From ecdc749a5e367e3cc0e6f27cdd6aea76875c1262 Mon Sep 17 00:00:00 2001 From: stack72 Date: Sat, 23 Sep 2017 20:04:44 +0300 Subject: [PATCH 001/350] resource/aws_ses_event_destination: Add support for SNS destinations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes: #1697 ``` terraform-provider-aws [master●] % acctests aws TestAccAWSSESEventDestination_basic === RUN TestAccAWSSESEventDestination_basic --- PASS: TestAccAWSSESEventDestination_basic (159.86s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 159.898s ``` --- aws/resource_aws_ses_event_destination.go | 53 ++++++++++++++----- ...resource_aws_ses_event_destination_test.go | 22 +++++++- website/docs/r/ses_event_destination.markdown | 5 ++ 3 files changed, 64 insertions(+), 16 deletions(-) diff --git a/aws/resource_aws_ses_event_destination.go b/aws/resource_aws_ses_event_destination.go index 2dde76e5ae2..0950d11bcc3 100644 --- a/aws/resource_aws_ses_event_destination.go +++ b/aws/resource_aws_ses_event_destination.go @@ -19,26 +19,26 @@ func resourceAwsSesEventDestination() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "configuration_set_name": &schema.Schema{ + "configuration_set_name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "enabled": &schema.Schema{ + "enabled": { Type: schema.TypeBool, Optional: true, Default: false, ForceNew: true, }, - "matching_types": &schema.Schema{ + "matching_types": { Type: schema.TypeSet, Required: true, ForceNew: true, @@ -53,20 +53,21 @@ func resourceAwsSesEventDestination() *schema.Resource { Type: schema.TypeSet, Optional: true, ForceNew: true, - ConflictsWith: []string{"kinesis_destination"}, + MaxItems: 1, + ConflictsWith: []string{"kinesis_destination", "sns_destination"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "default_value": &schema.Schema{ + "default_value": { Type: schema.TypeString, Required: true, }, - "dimension_name": &schema.Schema{ + "dimension_name": { Type: schema.TypeString, Required: true, }, - "value_source": &schema.Schema{ + "value_source": { Type: schema.TypeString, Required: true, ValidateFunc: validateDimensionValueSource, @@ -79,15 +80,32 @@ func resourceAwsSesEventDestination() *schema.Resource { Type: schema.TypeSet, Optional: true, ForceNew: true, - ConflictsWith: []string{"cloudwatch_destination"}, + MaxItems: 1, + ConflictsWith: []string{"cloudwatch_destination", "sns_destination"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "stream_arn": &schema.Schema{ + "stream_arn": { Type: schema.TypeString, Required: true, }, - "role_arn": &schema.Schema{ + "role_arn": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "sns_destination": { + Type: schema.TypeSet, + MaxItems: 1, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"cloudwatch_destination", "kinesis_destination"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic_arn": { Type: schema.TypeString, Required: true, }, @@ -125,9 +143,7 @@ func resourceAwsSesEventDestinationCreate(d *schema.ResourceData, meta interface if v, ok := d.GetOk("kinesis_destination"); ok { destination := v.(*schema.Set).List() - if len(destination) > 1 { - return fmt.Errorf("You can only define a single kinesis destination per record") - } + kinesis := destination[0].(map[string]interface{}) createOpts.EventDestination.KinesisFirehoseDestination = &ses.KinesisFirehoseDestination{ DeliveryStreamARN: aws.String(kinesis["stream_arn"].(string)), @@ -136,6 +152,15 @@ func resourceAwsSesEventDestinationCreate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] Creating kinesis destination: %#v", kinesis) } + if v, ok := d.GetOk("sns_destination"); ok { + destination := v.(*schema.Set).List() + sns := destination[0].(map[string]interface{}) + createOpts.EventDestination.SNSDestination = &ses.SNSDestination{ + TopicARN: aws.String(sns["topic_arn"].(string)), + } + log.Printf("[DEBUG] Creating sns destination: %#v", sns) + } + _, err := conn.CreateConfigurationSetEventDestination(createOpts) if err != nil { return fmt.Errorf("Error creating SES configuration set event destination: %s", err) diff --git a/aws/resource_aws_ses_event_destination_test.go b/aws/resource_aws_ses_event_destination_test.go index 624ce0c8321..7e357955454 100644 --- a/aws/resource_aws_ses_event_destination_test.go +++ b/aws/resource_aws_ses_event_destination_test.go @@ -26,6 +26,8 @@ func TestAccAWSSESEventDestination_basic(t *testing.T) { "aws_ses_event_destination.kinesis", "name", "event-destination-kinesis"), resource.TestCheckResourceAttr( "aws_ses_event_destination.cloudwatch", "name", "event-destination-cloudwatch"), + resource.TestCheckResourceAttr( + "aws_ses_event_destination.sns", "name", "event-destination-sns"), ), }, }, @@ -156,6 +158,10 @@ data "aws_iam_policy_document" "fh_felivery_document" { } } +resource "aws_sns_topic" "ses_destination" { + name = "ses-destination-test" +} + resource "aws_ses_configuration_set" "test" { name = "some-configuration-set-%d" } @@ -166,7 +172,7 @@ resource "aws_ses_event_destination" "kinesis" { enabled = true, matching_types = ["bounce", "send"], - kinesis_destination = { + kinesis_destination { stream_arn = "${aws_kinesis_firehose_delivery_stream.test_stream.arn}", role_arn = "${aws_iam_role.firehose_role.arn}" } @@ -178,10 +184,22 @@ resource "aws_ses_event_destination" "cloudwatch" { enabled = true, matching_types = ["bounce", "send"], - cloudwatch_destination = { + cloudwatch_destination { default_value = "default" dimension_name = "dimension" value_source = "emailHeader" } } + +resource "aws_ses_event_destination" "sns" { + name = "event-destination-sns", + configuration_set_name = "${aws_ses_configuration_set.test.name}", + enabled = true, + matching_types = ["bounce", "send"], + + sns_destination { + topic_arn = "${aws_sns_topic.ses_destination.arn}" + } +} + `, edRandomInteger) diff --git a/website/docs/r/ses_event_destination.markdown b/website/docs/r/ses_event_destination.markdown index 794953bc540..9afb999e6f9 100644 --- a/website/docs/r/ses_event_destination.markdown +++ b/website/docs/r/ses_event_destination.markdown @@ -51,6 +51,7 @@ The following arguments are supported: * `matching_types` - (Required) A list of matching types. May be any of `"send"`, `"reject"`, `"bounce"`, `"complaint"`, or `"delivery"`. * `cloudwatch_destination` - (Optional) CloudWatch destination for the events * `kinesis_destination` - (Optional) Send the events to a kinesis firehose destination +* `sns_destination` - (Optional) Send the events to an SNS Topic destination ~> **NOTE:** You can specify `"cloudwatch_destination"` or `"kinesis_destination"` but not both @@ -65,3 +66,7 @@ Kinesis Destination requires the following: * `stream_arn` - (Required) The ARN of the Kinesis Stream * `role_arn` - (Required) The ARN of the role that has permissions to access the Kinesis Stream +SNS Topic requires the following: + +* `topic_arn` - (Required) The ARN of the SNS topic + From f985602347eea0ac798e45992c9570408d96921f Mon Sep 17 00:00:00 2001 From: Stephen Newey Date: Mon, 13 Nov 2017 16:51:54 +0200 Subject: [PATCH 002/350] aws_route can add IPv6 routes to instances and network interfaces [GH-2264] --- aws/resource_aws_route.go | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/aws/resource_aws_route.go b/aws/resource_aws_route.go index ccacd25e4ff..9cabe1f3abc 100644 --- a/aws/resource_aws_route.go +++ b/aws/resource_aws_route.go @@ -159,16 +159,32 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error { } case "instance_id": createOpts = &ec2.CreateRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), - InstanceId: aws.String(d.Get("instance_id").(string)), + RouteTableId: aws.String(d.Get("route_table_id").(string)), + InstanceId: aws.String(d.Get("instance_id").(string)), } + + if v, ok := d.GetOk("destination_cidr_block"); ok { + createOpts.DestinationCidrBlock = aws.String(v.(string)) + } + + if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok { + createOpts.DestinationIpv6CidrBlock = aws.String(v.(string)) + } + case "network_interface_id": createOpts = &ec2.CreateRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), - NetworkInterfaceId: aws.String(d.Get("network_interface_id").(string)), + RouteTableId: aws.String(d.Get("route_table_id").(string)), + NetworkInterfaceId: aws.String(d.Get("network_interface_id").(string)), + } + + if v, ok := d.GetOk("destination_cidr_block"); ok { + createOpts.DestinationCidrBlock = aws.String(v.(string)) + } + + if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok { + createOpts.DestinationIpv6CidrBlock = aws.String(v.(string)) } + case "vpc_peering_connection_id": createOpts = &ec2.CreateRouteInput{ RouteTableId: aws.String(d.Get("route_table_id").(string)), From 8944b7d518328ae4671868ab026678f9b64fb431 Mon Sep 17 00:00:00 2001 From: Stephen Newey Date: Fri, 17 Nov 2017 19:56:11 +0000 Subject: [PATCH 003/350] Add acceptance tests. --- aws/resource_aws_route_test.go | 221 +++++++++++++++++++++++++++++++++ 1 file changed, 221 insertions(+) diff --git a/aws/resource_aws_route_test.go b/aws/resource_aws_route_test.go index 1dc50a564db..6bda3b45242 100644 --- a/aws/resource_aws_route_test.go +++ b/aws/resource_aws_route_test.go @@ -106,6 +106,46 @@ func TestAccAWSRoute_ipv6ToInternetGateway(t *testing.T) { }) } +func TestAccAWSRoute_ipv6ToInstance(t *testing.T) { + var route ec2.Route + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRouteDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSRouteConfigIpv6Instance, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSRouteExists("aws_route.internal-default-route-ipv6", &route), + ), + }, + }, + }) +} + +func TestAccAWSRoute_ipv6ToNetworkInterface(t *testing.T) { + var route ec2.Route + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRouteDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSRouteConfigIpv6NetworkInterface, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSRouteExists("aws_route.internal-default-route-ipv6", &route), + ), + }, + }, + }) +} + func TestAccAWSRoute_ipv6ToPeeringConnection(t *testing.T) { var route ec2.Route @@ -354,6 +394,187 @@ resource "aws_route" "igw" { `) +var testAccAWSRouteConfigIpv6NetworkInterface = fmt.Sprintf(` +resource "aws_vpc" "examplevpc" { + cidr_block = "10.100.0.0/16" + enable_dns_hostnames = true + assign_generated_ipv6_cidr_block = true +} + +data "aws_availability_zones" "available" {} + +resource "aws_internet_gateway" "internet" { + vpc_id = "${aws_vpc.examplevpc.id}" +} + +resource "aws_route" "igw" { + route_table_id = "${aws_vpc.examplevpc.main_route_table_id}" + destination_cidr_block = "0.0.0.0/0" + gateway_id = "${aws_internet_gateway.internet.id}" +} + +resource "aws_route" "igw-ipv6" { + route_table_id = "${aws_vpc.examplevpc.main_route_table_id}" + destination_ipv6_cidr_block = "::/0" + gateway_id = "${aws_internet_gateway.internet.id}" +} + +resource "aws_subnet" "router-network" { + cidr_block = "10.100.1.0/24" + vpc_id = "${aws_vpc.examplevpc.id}" + ipv6_cidr_block = "${cidrsubnet(aws_vpc.examplevpc.ipv6_cidr_block, 8, 1)}" + assign_ipv6_address_on_creation = true + map_public_ip_on_launch = true + availability_zone = "${data.aws_availability_zones.available.names[0]}" +} + +resource "aws_subnet" "client-network" { + cidr_block = "10.100.10.0/24" + vpc_id = "${aws_vpc.examplevpc.id}" + ipv6_cidr_block = "${cidrsubnet(aws_vpc.examplevpc.ipv6_cidr_block, 8, 2)}" + assign_ipv6_address_on_creation = true + map_public_ip_on_launch = false + availability_zone = "${data.aws_availability_zones.available.names[0]}" +} + +resource "aws_route_table" "client-routes" { + vpc_id = "${aws_vpc.examplevpc.id}" +} + +resource "aws_route_table_association" "client-routes" { + route_table_id = "${aws_route_table.client-routes.id}" + subnet_id = "${aws_subnet.client-network.id}" +} + +data "aws_ami" "ubuntu" { + most_recent = true + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*"] + } + filter { + name = "virtualization-type" + values = ["hvm"] + } + owners = ["099720109477"] +} + +resource "aws_instance" "test-router" { + ami = "${data.aws_ami.ubuntu.image_id}" + instance_type = "t2.small" + subnet_id = "${aws_subnet.router-network.id}" +} + +resource "aws_network_interface" "router-internal" { + subnet_id = "${aws_subnet.client-network.id}" + source_dest_check = false +} + +resource "aws_network_interface_attachment" "router-internal" { + device_index = 1 + instance_id = "${aws_instance.test-router.id}" + network_interface_id = "${aws_network_interface.router-internal.id}" +} + +resource "aws_route" "internal-default-route" { + route_table_id = "${aws_route_table.client-routes.id}" + destination_cidr_block = "0.0.0.0/0" + network_interface_id = "${aws_network_interface.router-internal.id}" +} + +resource "aws_route" "internal-default-route-ipv6" { + route_table_id = "${aws_route_table.client-routes.id}" + destination_ipv6_cidr_block = "::/0" + network_interface_id = "${aws_network_interface.router-internal.id}" +} + +`) + +var testAccAWSRouteConfigIpv6Instance = fmt.Sprintf(` +resource "aws_vpc" "examplevpc" { + cidr_block = "10.100.0.0/16" + enable_dns_hostnames = true + assign_generated_ipv6_cidr_block = true +} + +data "aws_availability_zones" "available" {} + +resource "aws_internet_gateway" "internet" { + vpc_id = "${aws_vpc.examplevpc.id}" +} + +resource "aws_route" "igw" { + route_table_id = "${aws_vpc.examplevpc.main_route_table_id}" + destination_cidr_block = "0.0.0.0/0" + gateway_id = "${aws_internet_gateway.internet.id}" +} + +resource "aws_route" "igw-ipv6" { + route_table_id = "${aws_vpc.examplevpc.main_route_table_id}" + destination_ipv6_cidr_block = "::/0" + gateway_id = "${aws_internet_gateway.internet.id}" +} + +resource "aws_subnet" "router-network" { + cidr_block = "10.100.1.0/24" + vpc_id = "${aws_vpc.examplevpc.id}" + ipv6_cidr_block = "${cidrsubnet(aws_vpc.examplevpc.ipv6_cidr_block, 8, 1)}" + assign_ipv6_address_on_creation = true + map_public_ip_on_launch = true + availability_zone = "${data.aws_availability_zones.available.names[0]}" +} + +resource "aws_subnet" "client-network" { + cidr_block = "10.100.10.0/24" + vpc_id = "${aws_vpc.examplevpc.id}" + ipv6_cidr_block = "${cidrsubnet(aws_vpc.examplevpc.ipv6_cidr_block, 8, 2)}" + assign_ipv6_address_on_creation = true + map_public_ip_on_launch = false + availability_zone = "${data.aws_availability_zones.available.names[0]}" +} + +resource "aws_route_table" "client-routes" { + vpc_id = "${aws_vpc.examplevpc.id}" +} + +resource "aws_route_table_association" "client-routes" { + route_table_id = "${aws_route_table.client-routes.id}" + subnet_id = "${aws_subnet.client-network.id}" +} + +data "aws_ami" "ubuntu" { + most_recent = true + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*"] + } + filter { + name = "virtualization-type" + values = ["hvm"] + } + owners = ["099720109477"] +} + +resource "aws_instance" "test-router" { + ami = "${data.aws_ami.ubuntu.image_id}" + instance_type = "t2.small" + subnet_id = "${aws_subnet.router-network.id}" +} + +resource "aws_route" "internal-default-route" { + route_table_id = "${aws_route_table.client-routes.id}" + destination_cidr_block = "0.0.0.0/0" + instance_id = "${aws_instance.test-router.id}" +} + +resource "aws_route" "internal-default-route-ipv6" { + route_table_id = "${aws_route_table.client-routes.id}" + destination_ipv6_cidr_block = "::/0" + instance_id = "${aws_instance.test-router.id}" +} + +`) + var testAccAWSRouteConfigIpv6PeeringConnection = fmt.Sprintf(` resource "aws_vpc" "foo" { cidr_block = "10.0.0.0/16" From 2ea7b926b28d4fb7f449c48d4b16be6c695d43e3 Mon Sep 17 00:00:00 2001 From: Keith Gable Date: Wed, 6 Dec 2017 11:29:04 -0800 Subject: [PATCH 004/350] Improve the documentation for the aws_ssm_association resource --- website/docs/r/ssm_association.html.markdown | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/website/docs/r/ssm_association.html.markdown b/website/docs/r/ssm_association.html.markdown index 492dc494a68..a16f9b72325 100644 --- a/website/docs/r/ssm_association.html.markdown +++ b/website/docs/r/ssm_association.html.markdown @@ -3,12 +3,12 @@ layout: "aws" page_title: "AWS: aws_ssm_association" sidebar_current: "docs-aws-resource-ssm-association" description: |- - Associates an SSM Document to an instance. + Associates an SSM Document to an instance or EC2 tag. --- # aws_ssm_association -Associates an SSM Document to an instance. +Associates an SSM Document to an instance or EC2 tag. ## Example Usage @@ -70,18 +70,23 @@ The following arguments are supported: * `name` - (Required) The name of the SSM document to apply. * `association_name` - (Optional) The descriptive name for the association. -* `instance_id` - (Optional) The instance id to apply an SSM document to. -* `parameters` - (Optional) Additional parameters to pass to the SSM document. -* `targets` - (Optional) The targets (either instances or tags). Instances are specified using Key=instanceids,Values=instanceid1,instanceid2. Tags are specified using Key=tag name,Values=tag value. Only 1 target is currently supported by AWS. -* `schedule_expression` - (Optional) A cron expression when the association will be applied to the target(s). -* `output_location` - (Optional) An output location block. OutputLocation documented below. * `document_version` - (Optional) The document version you want to associate with the target(s). Can be a specific version or the default version. +* `instance_id` - (Optional) The instance ID to apply an SSM document to. +* `output_location` - (Optional) An output location block. Output Location is documented below. +* `parameters` - (Optional) A block of arbitrary string parameters to pass to the SSM document. +* `schedule_expression` - (Optional) A cron expression when the association will be applied to the target(s). +* `targets` - (Optional) A block containing the targets of the SSM association. Targets are documented below. Output Location (`output_location`) is an S3 bucket where you want to store the results of this association: * `s3_bucket_name` - (Required) The S3 bucket name. * `s3_key_prefix` - (Optional) The S3 bucket prefix. Results stored in the root if not configured. +Targets specify what instance IDs or tags to apply the document to and has these keys: + +* `key` - (Required) Either `InstanceIds` or `tag:Tag Name` to specify an EC2 tag. +* `values` - (Required) A list of instance IDs or tag values. AWS currently limits this to 1 target value. + ## Attributes Reference The following attributes are exported: From 8e73b6de13dbee2a8893f54368a33738dedea69f Mon Sep 17 00:00:00 2001 From: Kash Date: Fri, 1 Dec 2017 08:18:31 -0500 Subject: [PATCH 005/350] Add peer_region to vpc peering connection --- aws/resource_aws_vpc_peering_connection.go | 11 ++++ ...esource_aws_vpc_peering_connection_test.go | 51 +++++++++++++++++++ 2 files changed, 62 insertions(+) diff --git a/aws/resource_aws_vpc_peering_connection.go b/aws/resource_aws_vpc_peering_connection.go index 24a1912e460..40013f096bb 100644 --- a/aws/resource_aws_vpc_peering_connection.go +++ b/aws/resource_aws_vpc_peering_connection.go @@ -49,6 +49,12 @@ func resourceAwsVpcPeeringConnection() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "peer_region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, "accepter": vpcPeeringConnectionOptionsSchema(), "requester": vpcPeeringConnectionOptionsSchema(), "tags": tagsSchema(), @@ -69,6 +75,10 @@ func resourceAwsVPCPeeringCreate(d *schema.ResourceData, meta interface{}) error createOpts.PeerOwnerId = aws.String(v.(string)) } + if v, ok := d.GetOk("peer_region"); ok { + createOpts.SetPeerRegion(v.(string)) + } + log.Printf("[DEBUG] VPC Peering Create options: %#v", createOpts) resp, err := conn.CreateVpcPeeringConnection(createOpts) @@ -152,6 +162,7 @@ func resourceAwsVPCPeeringRead(d *schema.ResourceData, meta interface{}) error { } d.Set("accept_status", pc.Status.Code) + d.Set("peer_region", pc.AccepterVpcInfo.Region) // When the VPC Peering Connection is pending acceptance, // the details about accepter and/or requester peering diff --git a/aws/resource_aws_vpc_peering_connection_test.go b/aws/resource_aws_vpc_peering_connection_test.go index fe9b6a7c54d..d7a1e5ec0ef 100644 --- a/aws/resource_aws_vpc_peering_connection_test.go +++ b/aws/resource_aws_vpc_peering_connection_test.go @@ -315,6 +315,29 @@ func testAccCheckAWSVpcPeeringConnectionOptions(n, block string, options *ec2.Vp } } +func TestAccAWSVPCPeeringConnection_region(t *testing.T) { + var connection ec2.VpcPeeringConnection + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_vpc_peering_connection.foo", + IDRefreshIgnore: []string{"auto_accept"}, + + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSVpcPeeringConnectionDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccVpcPeeringConfigRegion, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSVpcPeeringConnectionExists( + "aws_vpc_peering_connection.foo", + &connection), + ), + }, + }, + }) +} + const testAccVpcPeeringConfig = ` resource "aws_vpc" "foo" { cidr_block = "10.0.0.0/16" @@ -402,3 +425,31 @@ resource "aws_vpc_peering_connection" "foo" { peer_vpc_id = "${aws_vpc.bar.id}" } ` + +const testAccVpcPeeringConfigRegion = ` +provider "aws" { + region = "us-west-2" +} + +provider "aws" { + alias = "us-east-1" + region = "us-east-1" +} + +resource "aws_vpc" "foo" { + provider = "aws" + cidr_block = "10.0.0.0/16" +} + +resource "aws_vpc" "bar" { + provider = "aws.us-east-1" + cidr_block = "10.1.0.0/16" +} + +resource "aws_vpc_peering_connection" "foo" { + provider = "aws" + vpc_id = "${aws_vpc.foo.id}" + peer_vpc_id = "${aws_vpc.bar.id}" + peer_region = "us-east-1" +} +` From 02a0006f43b843b1a654407b9c1e49b7727a978c Mon Sep 17 00:00:00 2001 From: Kash Date: Fri, 1 Dec 2017 08:44:11 -0500 Subject: [PATCH 006/350] Add peer_region to documentation --- website/docs/r/vpc_peering.html.markdown | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/website/docs/r/vpc_peering.html.markdown b/website/docs/r/vpc_peering.html.markdown index c625b7fcdc9..10b711f8531 100644 --- a/website/docs/r/vpc_peering.html.markdown +++ b/website/docs/r/vpc_peering.html.markdown @@ -65,6 +65,29 @@ resource "aws_vpc" "bar" { } ``` +Basic usage with region: + + +```hcl +resource "aws_vpc_peering_connection" "foo" { + peer_owner_id = "${var.peer_owner_id}" + peer_vpc_id = "${aws_vpc.bar.id}" + vpc_id = "${aws_vpc.foo.id}" + peer_region = "us-east-1" + auto_accept = true +} + +resource "aws_vpc" "foo" { + provider = "aws.us-west-2" + cidr_block = "10.1.0.0/16" +} + +resource "aws_vpc" "bar" { + provider = "aws.us-east-1" + cidr_block = "10.2.0.0/16" +} +``` + ## Argument Reference -> **Note:** Modifying the VPC Peering Connection options requires peering to be active. An automatic activation @@ -79,6 +102,7 @@ The following arguments are supported: * `peer_vpc_id` - (Required) The ID of the VPC with which you are creating the VPC Peering Connection. * `vpc_id` - (Required) The ID of the requester VPC. * `auto_accept` - (Optional) Accept the peering (both VPCs need to be in the same AWS account). +* `peer_region` - (Optional) The region of the accepter VPC of the [VPC Peering Connection]. * `accepter` (Optional) - An optional configuration block that allows for [VPC Peering Connection] (http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options to be set for the VPC that accepts the peering connection (a maximum of one). From 55e91d5ae6e0ba1a3dc9db559ea86c9ff735a706 Mon Sep 17 00:00:00 2001 From: kl4w Date: Sat, 2 Dec 2017 09:25:16 -0500 Subject: [PATCH 007/350] Add peer_region to vpc_peering_connection data source --- aws/data_source_aws_vpc_peering_connection.go | 6 ++++++ website/docs/d/vpc_peering_connection.html.markdown | 2 ++ 2 files changed, 8 insertions(+) diff --git a/aws/data_source_aws_vpc_peering_connection.go b/aws/data_source_aws_vpc_peering_connection.go index 489a7262414..a35283a2bb6 100644 --- a/aws/data_source_aws_vpc_peering_connection.go +++ b/aws/data_source_aws_vpc_peering_connection.go @@ -54,6 +54,11 @@ func dataSourceAwsVpcPeeringConnection() *schema.Resource { Optional: true, Computed: true, }, + "peer_region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, "accepter": { Type: schema.TypeMap, Computed: true, @@ -124,6 +129,7 @@ func dataSourceAwsVpcPeeringConnectionRead(d *schema.ResourceData, meta interfac d.Set("peer_vpc_id", pcx.AccepterVpcInfo.VpcId) d.Set("peer_owner_id", pcx.AccepterVpcInfo.OwnerId) d.Set("peer_cidr_block", pcx.AccepterVpcInfo.CidrBlock) + d.Set("peer_region", pcx.AccepterVpcInfo.Region) d.Set("tags", tagsToMap(pcx.Tags)) if pcx.AccepterVpcInfo.PeeringOptions != nil { diff --git a/website/docs/d/vpc_peering_connection.html.markdown b/website/docs/d/vpc_peering_connection.html.markdown index 535828e5f10..36ee7b15f2a 100644 --- a/website/docs/d/vpc_peering_connection.html.markdown +++ b/website/docs/d/vpc_peering_connection.html.markdown @@ -54,6 +54,8 @@ The given filters must match exactly one VPC peering connection whose data will * `peer_cidr_block` - (Optional) The CIDR block of the accepter VPC of the specific VPC Peering Connection to retrieve. +* `peer_region` - (Optional) The region of the accepter VPC of the specific VPC Peering Connection to retrieve. + * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) A mapping of tags, each pair of which must exactly match From 310b3536461ce44ce236e153a692dbebd01cfc08 Mon Sep 17 00:00:00 2001 From: kl4w Date: Sat, 2 Dec 2017 09:43:08 -0500 Subject: [PATCH 008/350] Add peer_region to vpc_peering_connection_acceptor --- aws/resource_aws_vpc_peering_connection.go | 3 ++- aws/resource_aws_vpc_peering_connection_accepter.go | 4 ++++ website/docs/r/vpc_peering_accepter.html.markdown | 1 + 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_vpc_peering_connection.go b/aws/resource_aws_vpc_peering_connection.go index 40013f096bb..24beea2862a 100644 --- a/aws/resource_aws_vpc_peering_connection.go +++ b/aws/resource_aws_vpc_peering_connection.go @@ -153,16 +153,17 @@ func resourceAwsVPCPeeringRead(d *schema.ResourceData, meta interface{}) error { // We're the accepter d.Set("peer_owner_id", pc.RequesterVpcInfo.OwnerId) d.Set("peer_vpc_id", pc.RequesterVpcInfo.VpcId) + d.Set("peer_region", pc.RequesterVpcInfo.Region) d.Set("vpc_id", pc.AccepterVpcInfo.VpcId) } else { // We're the requester d.Set("peer_owner_id", pc.AccepterVpcInfo.OwnerId) d.Set("peer_vpc_id", pc.AccepterVpcInfo.VpcId) + d.Set("peer_region", pc.AccepterVpcInfo.Region) d.Set("vpc_id", pc.RequesterVpcInfo.VpcId) } d.Set("accept_status", pc.Status.Code) - d.Set("peer_region", pc.AccepterVpcInfo.Region) // When the VPC Peering Connection is pending acceptance, // the details about accepter and/or requester peering diff --git a/aws/resource_aws_vpc_peering_connection_accepter.go b/aws/resource_aws_vpc_peering_connection_accepter.go index 8b1efff50c3..41a4df6a06a 100644 --- a/aws/resource_aws_vpc_peering_connection_accepter.go +++ b/aws/resource_aws_vpc_peering_connection_accepter.go @@ -43,6 +43,10 @@ func resourceAwsVpcPeeringConnectionAccepter() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "peer_region": { + Type: schema.TypeString, + Computed: true, + }, "accepter": vpcPeeringConnectionOptionsSchema(), "requester": vpcPeeringConnectionOptionsSchema(), "tags": tagsSchema(), diff --git a/website/docs/r/vpc_peering_accepter.html.markdown b/website/docs/r/vpc_peering_accepter.html.markdown index 9e86c5c9e41..2875ddee056 100644 --- a/website/docs/r/vpc_peering_accepter.html.markdown +++ b/website/docs/r/vpc_peering_accepter.html.markdown @@ -91,6 +91,7 @@ All of the argument attributes except `auto_accept` are also exported as result * `vpc_id` - The ID of the accepter VPC. * `peer_vpc_id` - The ID of the requester VPC. * `peer_owner_id` - The AWS account ID of the owner of the requester VPC. +* `peer_region` - The region of the requester VPC. * `accepter` - A configuration block that describes [VPC Peering Connection] (http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options set for the accepter VPC. * `requester` - A configuration block that describes [VPC Peering Connection] From 892fe00b0ba05074433101172a7da0e35aa50f9f Mon Sep 17 00:00:00 2001 From: kl4w Date: Sat, 2 Dec 2017 09:50:33 -0500 Subject: [PATCH 009/350] Add region attribute to identify vpc peer connection requester --- aws/data_source_aws_vpc_peering_connection.go | 6 ++++++ website/docs/d/vpc_peering_connection.html.markdown | 2 ++ 2 files changed, 8 insertions(+) diff --git a/aws/data_source_aws_vpc_peering_connection.go b/aws/data_source_aws_vpc_peering_connection.go index a35283a2bb6..2c5c74c7854 100644 --- a/aws/data_source_aws_vpc_peering_connection.go +++ b/aws/data_source_aws_vpc_peering_connection.go @@ -39,6 +39,11 @@ func dataSourceAwsVpcPeeringConnection() *schema.Resource { Optional: true, Computed: true, }, + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, "peer_vpc_id": { Type: schema.TypeString, Optional: true, @@ -126,6 +131,7 @@ func dataSourceAwsVpcPeeringConnectionRead(d *schema.ResourceData, meta interfac d.Set("vpc_id", pcx.RequesterVpcInfo.VpcId) d.Set("owner_id", pcx.RequesterVpcInfo.OwnerId) d.Set("cidr_block", pcx.RequesterVpcInfo.CidrBlock) + d.Set("region", pcx.RequesterVpcInfo.Region) d.Set("peer_vpc_id", pcx.AccepterVpcInfo.VpcId) d.Set("peer_owner_id", pcx.AccepterVpcInfo.OwnerId) d.Set("peer_cidr_block", pcx.AccepterVpcInfo.CidrBlock) diff --git a/website/docs/d/vpc_peering_connection.html.markdown b/website/docs/d/vpc_peering_connection.html.markdown index 36ee7b15f2a..8627135afba 100644 --- a/website/docs/d/vpc_peering_connection.html.markdown +++ b/website/docs/d/vpc_peering_connection.html.markdown @@ -48,6 +48,8 @@ The given filters must match exactly one VPC peering connection whose data will * `cidr_block` - (Optional) The CIDR block of the requester VPC of the specific VPC Peering Connection to retrieve. +* `region` - (Optional) The region of the requester VPC of the specific VPC Peering Connection to retrieve. + * `peer_vpc_id` - (Optional) The ID of the accepter VPC of the specific VPC Peering Connection to retrieve. * `peer_owner_id` - (Optional) The AWS account ID of the owner of the accepter VPC of the specific VPC Peering Connection to retrieve. From 8e8ccc133d17529301a6fd624047df864efaf5ac Mon Sep 17 00:00:00 2001 From: kl4w Date: Sat, 2 Dec 2017 12:45:48 -0500 Subject: [PATCH 010/350] peer_region must always be the acceptor's region --- aws/resource_aws_vpc_peering_connection.go | 3 +-- aws/resource_aws_vpc_peering_connection_test.go | 2 -- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/aws/resource_aws_vpc_peering_connection.go b/aws/resource_aws_vpc_peering_connection.go index 24beea2862a..561c454fcaf 100644 --- a/aws/resource_aws_vpc_peering_connection.go +++ b/aws/resource_aws_vpc_peering_connection.go @@ -153,16 +153,15 @@ func resourceAwsVPCPeeringRead(d *schema.ResourceData, meta interface{}) error { // We're the accepter d.Set("peer_owner_id", pc.RequesterVpcInfo.OwnerId) d.Set("peer_vpc_id", pc.RequesterVpcInfo.VpcId) - d.Set("peer_region", pc.RequesterVpcInfo.Region) d.Set("vpc_id", pc.AccepterVpcInfo.VpcId) } else { // We're the requester d.Set("peer_owner_id", pc.AccepterVpcInfo.OwnerId) d.Set("peer_vpc_id", pc.AccepterVpcInfo.VpcId) - d.Set("peer_region", pc.AccepterVpcInfo.Region) d.Set("vpc_id", pc.RequesterVpcInfo.VpcId) } + d.Set("peer_region", pc.AccepterVpcInfo.Region) d.Set("accept_status", pc.Status.Code) // When the VPC Peering Connection is pending acceptance, diff --git a/aws/resource_aws_vpc_peering_connection_test.go b/aws/resource_aws_vpc_peering_connection_test.go index d7a1e5ec0ef..7a560fcbc05 100644 --- a/aws/resource_aws_vpc_peering_connection_test.go +++ b/aws/resource_aws_vpc_peering_connection_test.go @@ -437,7 +437,6 @@ provider "aws" { } resource "aws_vpc" "foo" { - provider = "aws" cidr_block = "10.0.0.0/16" } @@ -447,7 +446,6 @@ resource "aws_vpc" "bar" { } resource "aws_vpc_peering_connection" "foo" { - provider = "aws" vpc_id = "${aws_vpc.foo.id}" peer_vpc_id = "${aws_vpc.bar.id}" peer_region = "us-east-1" From 640f3b8236be014267f7fcab82a4632e61225a08 Mon Sep 17 00:00:00 2001 From: kl4w Date: Sun, 3 Dec 2017 09:42:46 -0500 Subject: [PATCH 011/350] auto_accept must be false when peer_region is set --- aws/resource_aws_vpc_peering_connection.go | 3 + ...rce_aws_vpc_peering_connection_accepter.go | 6 - ...ws_vpc_peering_connection_accepter_test.go | 120 +++++++++++++----- ...esource_aws_vpc_peering_connection_test.go | 84 +++++++++++- website/docs/r/vpc_peering.html.markdown | 9 +- .../docs/r/vpc_peering_accepter.html.markdown | 15 ++- 6 files changed, 184 insertions(+), 53 deletions(-) diff --git a/aws/resource_aws_vpc_peering_connection.go b/aws/resource_aws_vpc_peering_connection.go index 561c454fcaf..5573c1af4cd 100644 --- a/aws/resource_aws_vpc_peering_connection.go +++ b/aws/resource_aws_vpc_peering_connection.go @@ -76,6 +76,9 @@ func resourceAwsVPCPeeringCreate(d *schema.ResourceData, meta interface{}) error } if v, ok := d.GetOk("peer_region"); ok { + if _, ok := d.GetOk("auto_accept"); ok { + return fmt.Errorf("peer_region cannot be set whilst auto_accept is true when creating a vpc peering connection") + } createOpts.SetPeerRegion(v.(string)) } diff --git a/aws/resource_aws_vpc_peering_connection_accepter.go b/aws/resource_aws_vpc_peering_connection_accepter.go index 41a4df6a06a..854f8fc1668 100644 --- a/aws/resource_aws_vpc_peering_connection_accepter.go +++ b/aws/resource_aws_vpc_peering_connection_accepter.go @@ -1,7 +1,6 @@ package aws import ( - "errors" "log" "fmt" @@ -65,11 +64,6 @@ func resourceAwsVPCPeeringAccepterCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("VPC Peering Connection %q not found", id) } - // Ensure that this IS as cross-account VPC peering connection. - if d.Get("peer_owner_id").(string) == meta.(*AWSClient).accountid { - return errors.New("aws_vpc_peering_connection_accepter can only adopt into management cross-account VPC peering connections") - } - return resourceAwsVPCPeeringUpdate(d, meta) } diff --git a/aws/resource_aws_vpc_peering_connection_accepter_test.go b/aws/resource_aws_vpc_peering_connection_accepter_test.go index f0b287ec3dd..ef88c8085bf 100644 --- a/aws/resource_aws_vpc_peering_connection_accepter_test.go +++ b/aws/resource_aws_vpc_peering_connection_accepter_test.go @@ -1,22 +1,54 @@ package aws import ( - "regexp" "testing" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) -func TestAccAwsVPCPeeringConnectionAccepter_sameAccount(t *testing.T) { +func TestAccAWSVPCPeeringConnectionAccepter_sameRegion(t *testing.T) { + var connection ec2.VpcPeeringConnection + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccAwsVPCPeeringConnectionAccepterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAwsVPCPeeringConnectionAccepterSameRegion, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSVpcPeeringConnectionExists( + "aws_vpc_peering_connection_accepter.peer", + &connection), + resource.TestCheckResourceAttr( + "aws_vpc_peering_connection_accepter.peer", + "accept_status", "active"), + ), + }, + }, + }) +} + +func TestAccAWSVPCPeeringConnectionAccepter_differentRegion(t *testing.T) { + var connection ec2.VpcPeeringConnection + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccAwsVPCPeeringConnectionAccepterDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAwsVPCPeeringConnectionAccepterSameAccountConfig, - ExpectError: regexp.MustCompile(`aws_vpc_peering_connection_accepter can only adopt into management cross-account VPC peering connections`), + Config: testAccAwsVPCPeeringConnectionAccepterDifferentRegion, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSVpcPeeringConnectionExists( + "aws_vpc_peering_connection_accepter.peer", + &connection), + resource.TestCheckResourceAttr( + "aws_vpc_peering_connection_accepter.peer", + "accept_status", "active"), + ), }, }, }) @@ -27,51 +59,75 @@ func testAccAwsVPCPeeringConnectionAccepterDestroy(s *terraform.State) error { return nil } -const testAccAwsVPCPeeringConnectionAccepterSameAccountConfig = ` +const testAccAwsVPCPeeringConnectionAccepterSameRegion = ` +resource "aws_vpc" "main" { + cidr_block = "10.0.0.0/16" + tags { + Name = "tf-acc-revoke-vpc-peering-connection-accepter-same-region" + } +} + +resource "aws_vpc" "peer" { + cidr_block = "10.1.0.0/16" + tags { + Name = "tf-acc-revoke-vpc-peering-connection-accepter-same-region" + } +} + +// Requester's side of the connection. +resource "aws_vpc_peering_connection" "peer" { + vpc_id = "${aws_vpc.main.id}" + peer_vpc_id = "${aws_vpc.peer.id}" + auto_accept = false +} + +// Accepter's side of the connection. +resource "aws_vpc_peering_connection_accepter" "peer" { + vpc_peering_connection_id = "${aws_vpc_peering_connection.peer.id}" + auto_accept = true +} +` + +const testAccAwsVPCPeeringConnectionAccepterDifferentRegion = ` provider "aws" { - region = "us-west-2" - // Requester's credentials. + alias = "main" + region = "us-west-2" } provider "aws" { - alias = "peer" - region = "us-west-2" - // Accepter's credentials. + alias = "peer" + region = "us-east-1" } resource "aws_vpc" "main" { - cidr_block = "10.0.0.0/16" + provider = "aws.main" + cidr_block = "10.0.0.0/16" + tags { + Name = "tf-acc-revoke-vpc-peering-connection-accepter-different-region" + } } resource "aws_vpc" "peer" { - provider = "aws.peer" - cidr_block = "10.1.0.0/16" -} - -data "aws_caller_identity" "peer" { - provider = "aws.peer" + provider = "aws.peer" + cidr_block = "10.1.0.0/16" + tags { + Name = "tf-acc-revoke-vpc-peering-connection-accepter-different-region" + } } // Requester's side of the connection. resource "aws_vpc_peering_connection" "peer" { - vpc_id = "${aws_vpc.main.id}" - peer_vpc_id = "${aws_vpc.peer.id}" - peer_owner_id = "${data.aws_caller_identity.peer.account_id}" - auto_accept = false - - tags { - Side = "Requester" - } + provider = "aws.main" + vpc_id = "${aws_vpc.main.id}" + peer_vpc_id = "${aws_vpc.peer.id}" + peer_region = "us-east-1" + auto_accept = false } // Accepter's side of the connection. resource "aws_vpc_peering_connection_accepter" "peer" { - provider = "aws.peer" - vpc_peering_connection_id = "${aws_vpc_peering_connection.peer.id}" - auto_accept = true - - tags { - Side = "Accepter" - } + provider = "aws.peer" + vpc_peering_connection_id = "${aws_vpc_peering_connection.peer.id}" + auto_accept = true } ` diff --git a/aws/resource_aws_vpc_peering_connection_test.go b/aws/resource_aws_vpc_peering_connection_test.go index 7a560fcbc05..aecd69c1401 100644 --- a/aws/resource_aws_vpc_peering_connection_test.go +++ b/aws/resource_aws_vpc_peering_connection_test.go @@ -315,6 +315,21 @@ func testAccCheckAWSVpcPeeringConnectionOptions(n, block string, options *ec2.Vp } } +func TestAccAWSVPCPeeringConnection_peerRegionAndAutoAccept(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshIgnore: []string{"auto_accept"}, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSVpcPeeringConnectionDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccVpcPeeringConfigRegionAutoAccept, + ExpectError: regexp.MustCompile(`.*peer_region cannot be set whilst auto_accept is true when creating a vpc peering connection.*`), + }, + }, + }) +} + func TestAccAWSVPCPeeringConnection_region(t *testing.T) { var connection ec2.VpcPeeringConnection @@ -342,12 +357,15 @@ const testAccVpcPeeringConfig = ` resource "aws_vpc" "foo" { cidr_block = "10.0.0.0/16" tags { - Name = "TestAccAWSVPCPeeringConnection_basic" + Name = "tf-acc-revoke-vpc-peering-connection-basic" } } resource "aws_vpc" "bar" { cidr_block = "10.1.0.0/16" + tags { + Name = "tf-acc-revoke-vpc-peering-connection-basic" + } } resource "aws_vpc_peering_connection" "foo" { @@ -361,12 +379,15 @@ const testAccVpcPeeringConfigTags = ` resource "aws_vpc" "foo" { cidr_block = "10.0.0.0/16" tags { - Name = "TestAccAWSVPCPeeringConnection_tags" + Name = "tf-acc-revoke-vpc-peering-connection-tags" } } resource "aws_vpc" "bar" { cidr_block = "10.1.0.0/16" + tags { + Name = "tf-acc-revoke-vpc-peering-connection-tags" + } } resource "aws_vpc_peering_connection" "foo" { @@ -383,13 +404,16 @@ const testAccVpcPeeringConfigOptions = ` resource "aws_vpc" "foo" { cidr_block = "10.0.0.0/16" tags { - Name = "TestAccAWSVPCPeeringConnection_options" + Name = "tf-acc-revoke-vpc-peering-connection-options" } } resource "aws_vpc" "bar" { cidr_block = "10.1.0.0/16" enable_dns_hostnames = true + tags { + Name = "tf-acc-revoke-vpc-peering-connection-options" + } } resource "aws_vpc_peering_connection" "foo" { @@ -412,40 +436,88 @@ const testAccVpcPeeringConfigFailedState = ` resource "aws_vpc" "foo" { cidr_block = "10.0.0.0/16" tags { - Name = "TestAccAWSVPCPeeringConnection_failedState" + Name = "tf-acc-revoke-vpc-peering-connection-failedState" } } resource "aws_vpc" "bar" { cidr_block = "10.0.0.0/16" + tags { + Name = "tf-acc-revoke-vpc-peering-connection-failedState" + } +} + +resource "aws_vpc_peering_connection" "foo" { + vpc_id = "${aws_vpc.foo.id}" + peer_vpc_id = "${aws_vpc.bar.id}" +} +` + +const testAccVpcPeeringConfigRegionAutoAccept = ` +provider "aws" { + alias = "main" + region = "us-west-2" +} + +provider "aws" { + alias = "peer" + region = "us-east-1" +} + +resource "aws_vpc" "foo" { + provider = "aws.main" + cidr_block = "10.0.0.0/16" + tags { + Name = "tf-acc-revoke-vpc-peering-connection-region" + } +} + +resource "aws_vpc" "bar" { + provider = "aws.peer" + cidr_block = "10.1.0.0/16" + tags { + Name = "tf-acc-revoke-vpc-peering-connection-region" + } } resource "aws_vpc_peering_connection" "foo" { + provider = "aws.main" vpc_id = "${aws_vpc.foo.id}" peer_vpc_id = "${aws_vpc.bar.id}" + peer_region = "us-east-1" + auto_accept = true } ` const testAccVpcPeeringConfigRegion = ` provider "aws" { + alias = "main" region = "us-west-2" } provider "aws" { - alias = "us-east-1" + alias = "peer" region = "us-east-1" } resource "aws_vpc" "foo" { + provider = "aws.main" cidr_block = "10.0.0.0/16" + tags { + Name = "tf-acc-revoke-vpc-peering-connection-region" + } } resource "aws_vpc" "bar" { - provider = "aws.us-east-1" + provider = "aws.peer" cidr_block = "10.1.0.0/16" + tags { + Name = "tf-acc-revoke-vpc-peering-connection-region" + } } resource "aws_vpc_peering_connection" "foo" { + provider = "aws.main" vpc_id = "${aws_vpc.foo.id}" peer_vpc_id = "${aws_vpc.bar.id}" peer_region = "us-east-1" diff --git a/website/docs/r/vpc_peering.html.markdown b/website/docs/r/vpc_peering.html.markdown index 10b711f8531..c4912c3526e 100644 --- a/website/docs/r/vpc_peering.html.markdown +++ b/website/docs/r/vpc_peering.html.markdown @@ -10,9 +10,9 @@ description: |- Provides a resource to manage a VPC Peering Connection resource. --> **Note:** For cross-account (requester's AWS account differs from the accepter's AWS account) VPC Peering Connections -use the `aws_vpc_peering_connection` resource to manage the requester's side of the connection and -use the `aws_vpc_peering_connection_accepter` resource to manage the accepter's side of the connection. +-> **Note:** For cross-account (requester's AWS account differs from the accepter's AWS account) or inter-region +VPC Peering Connections use the `aws_vpc_peering_connection` resource to manage the requester's side of the +connection and use the `aws_vpc_peering_connection_accepter` resource to manage the accepter's side of the connection. ## Example Usage @@ -102,7 +102,8 @@ The following arguments are supported: * `peer_vpc_id` - (Required) The ID of the VPC with which you are creating the VPC Peering Connection. * `vpc_id` - (Required) The ID of the requester VPC. * `auto_accept` - (Optional) Accept the peering (both VPCs need to be in the same AWS account). -* `peer_region` - (Optional) The region of the accepter VPC of the [VPC Peering Connection]. +* `peer_region` - (Optional) The region of the accepter VPC of the [VPC Peering Connection]. `auto_accept` must be `false`, +and use the `aws_vpc_peering_connection_accepter` to manage the accepter side. * `accepter` (Optional) - An optional configuration block that allows for [VPC Peering Connection] (http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options to be set for the VPC that accepts the peering connection (a maximum of one). diff --git a/website/docs/r/vpc_peering_accepter.html.markdown b/website/docs/r/vpc_peering_accepter.html.markdown index 2875ddee056..c69a2403d92 100644 --- a/website/docs/r/vpc_peering_accepter.html.markdown +++ b/website/docs/r/vpc_peering_accepter.html.markdown @@ -3,15 +3,16 @@ layout: "aws" page_title: "AWS: aws_vpc_peering_connection_accepter" sidebar_current: "docs-aws-resource-vpc-peering-accepter" description: |- - Manage the accepter's side of a cross-account VPC Peering Connection. + Manage the accepter's side of a VPC Peering Connection. --- # aws\_vpc\_peering\_connection\_accepter -Provides a resource to manage the accepter's side of a cross-account VPC Peering Connection. +Provides a resource to manage the accepter's side of a VPC Peering Connection. -When a cross-account (requester's AWS account differs from the accepter's AWS account) VPC Peering Connection -is created, a VPC Peering Connection resource is automatically created in the accepter's account. +When a cross-account (requester's AWS account differs from the accepter's AWS account) or an inter-region +VPC Peering Connection is created, a VPC Peering Connection resource is automatically created in the +accepter's account. The requester can use the `aws_vpc_peering_connection` resource to manage its side of the connection and the accepter can use the `aws_vpc_peering_connection_accepter` resource to "adopt" its side of the connection into management. @@ -20,11 +21,14 @@ connection into management. ```hcl provider "aws" { + region = "us-east-1" + # Requester's credentials. } provider "aws" { alias = "peer" + region = "us-west-2" # Accepter's credentials. } @@ -47,6 +51,7 @@ resource "aws_vpc_peering_connection" "peer" { vpc_id = "${aws_vpc.main.id}" peer_vpc_id = "${aws_vpc.peer.id}" peer_owner_id = "${data.aws_caller_identity.peer.account_id}" + peer_region = "us-west-2" auto_accept = false tags { @@ -91,7 +96,7 @@ All of the argument attributes except `auto_accept` are also exported as result * `vpc_id` - The ID of the accepter VPC. * `peer_vpc_id` - The ID of the requester VPC. * `peer_owner_id` - The AWS account ID of the owner of the requester VPC. -* `peer_region` - The region of the requester VPC. +* `peer_region` - The region of the accepter VPC. * `accepter` - A configuration block that describes [VPC Peering Connection] (http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options set for the accepter VPC. * `requester` - A configuration block that describes [VPC Peering Connection] From 4f0769ab459f924b5b82d9feb209d1e6e5ff4faf Mon Sep 17 00:00:00 2001 From: Kash Date: Fri, 8 Dec 2017 14:12:13 -0500 Subject: [PATCH 012/350] set up multiple providers for test and wait til accept complete --- aws/resource_aws_vpc_peering_connection.go | 37 +++++++++++++------ ...ws_vpc_peering_connection_accepter_test.go | 16 ++++++-- 2 files changed, 38 insertions(+), 15 deletions(-) diff --git a/aws/resource_aws_vpc_peering_connection.go b/aws/resource_aws_vpc_peering_connection.go index 5573c1af4cd..5480eaf3255 100644 --- a/aws/resource_aws_vpc_peering_connection.go +++ b/aws/resource_aws_vpc_peering_connection.go @@ -94,18 +94,9 @@ func resourceAwsVPCPeeringCreate(d *schema.ResourceData, meta interface{}) error d.SetId(*rt.VpcPeeringConnectionId) log.Printf("[INFO] VPC Peering Connection ID: %s", d.Id()) - // Wait for the vpc peering connection to become available - log.Printf("[DEBUG] Waiting for VPC Peering Connection (%s) to become available.", d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"initiating-request", "provisioning", "pending"}, - Target: []string{"pending-acceptance", "active"}, - Refresh: resourceAwsVPCPeeringConnectionStateRefreshFunc(conn, d.Id()), - Timeout: 1 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return errwrap.Wrapf(fmt.Sprintf( - "Error waiting for VPC Peering Connection (%s) to become available: {{err}}", - d.Id()), err) + vpcAvailableErr := checkVpcPeeringConnectionAvailable(conn, d.Id()) + if vpcAvailableErr != nil { + return errwrap.Wrapf("Error waiting for VPC Peering Connection to become available: {{err}}", vpcAvailableErr) } return resourceAwsVPCPeeringUpdate(d, meta) @@ -280,6 +271,11 @@ func resourceAwsVPCPeeringUpdate(d *schema.ResourceData, meta interface{}) error } } + vpcAvailableErr := checkVpcPeeringConnectionAvailable(conn, d.Id()) + if vpcAvailableErr != nil { + return errwrap.Wrapf("Error waiting for VPC Peering Connection to become available: {{err}}", vpcAvailableErr) + } + return resourceAwsVPCPeeringRead(d, meta) } @@ -393,3 +389,20 @@ func expandPeeringOptions(m map[string]interface{}) *ec2.PeeringConnectionOption return r } + +func checkVpcPeeringConnectionAvailable(conn *ec2.EC2, id string) error { + // Wait for the vpc peering connection to become available + log.Printf("[DEBUG] Waiting for VPC Peering Connection (%s) to become available.", id) + stateConf := &resource.StateChangeConf{ + Pending: []string{"initiating-request", "provisioning", "pending"}, + Target: []string{"pending-acceptance", "active"}, + Refresh: resourceAwsVPCPeeringConnectionStateRefreshFunc(conn, id), + Timeout: 1 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return errwrap.Wrapf(fmt.Sprintf( + "Error waiting for VPC Peering Connection (%s) to become available: {{err}}", + id), err) + } + return nil +} diff --git a/aws/resource_aws_vpc_peering_connection_accepter_test.go b/aws/resource_aws_vpc_peering_connection_accepter_test.go index ef88c8085bf..56199ef8b45 100644 --- a/aws/resource_aws_vpc_peering_connection_accepter_test.go +++ b/aws/resource_aws_vpc_peering_connection_accepter_test.go @@ -5,6 +5,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) @@ -34,10 +35,19 @@ func TestAccAWSVPCPeeringConnectionAccepter_sameRegion(t *testing.T) { func TestAccAWSVPCPeeringConnectionAccepter_differentRegion(t *testing.T) { var connection ec2.VpcPeeringConnection + var providers []*schema.Provider + providerFactories := map[string]terraform.ResourceProviderFactory{ + "aws": func() (terraform.ResourceProvider, error) { + p := Provider() + providers = append(providers, p.(*schema.Provider)) + return p, nil + }, + } + resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccAwsVPCPeeringConnectionAccepterDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: providerFactories, + CheckDestroy: testAccAwsVPCPeeringConnectionAccepterDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testAccAwsVPCPeeringConnectionAccepterDifferentRegion, From e9152f1270d36673893240ef2cf3a772a9edaff7 Mon Sep 17 00:00:00 2001 From: Kash Date: Fri, 8 Dec 2017 18:23:00 -0500 Subject: [PATCH 013/350] wait for connection to be completely deleted --- aws/resource_aws_vpc_peering_connection.go | 14 ++++++++++++++ aws/resource_aws_vpc_peering_connection_test.go | 16 +++++++++++++--- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_vpc_peering_connection.go b/aws/resource_aws_vpc_peering_connection.go index 5480eaf3255..6424c4cc04c 100644 --- a/aws/resource_aws_vpc_peering_connection.go +++ b/aws/resource_aws_vpc_peering_connection.go @@ -287,6 +287,20 @@ func resourceAwsVPCPeeringDelete(d *schema.ResourceData, meta interface{}) error VpcPeeringConnectionId: aws.String(d.Id()), }) + // Wait for the vpc peering connection to become available + log.Printf("[DEBUG] Waiting for VPC Peering Connection (%s) to delete.", d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting"}, + Target: []string{"rejecting", "deleted"}, + Refresh: resourceAwsVPCPeeringConnectionStateRefreshFunc(conn, d.Id()), + Timeout: 1 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return errwrap.Wrapf(fmt.Sprintf( + "Error waiting for VPC Peering Connection (%s) to be deleted: {{err}}", + d.Id()), err) + } + return err } diff --git a/aws/resource_aws_vpc_peering_connection_test.go b/aws/resource_aws_vpc_peering_connection_test.go index aecd69c1401..a8397a7600e 100644 --- a/aws/resource_aws_vpc_peering_connection_test.go +++ b/aws/resource_aws_vpc_peering_connection_test.go @@ -10,6 +10,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) @@ -236,7 +237,7 @@ func testAccCheckAWSVpcPeeringConnectionDestroy(s *terraform.State) error { } if pc.Status != nil { - if *pc.Status.Code == "deleted" { + if *pc.Status.Code == "deleted" || *pc.Status.Code == "rejected" { return nil } return fmt.Errorf("Found the VPC Peering Connection in an unexpected state: %s", pc) @@ -333,13 +334,22 @@ func TestAccAWSVPCPeeringConnection_peerRegionAndAutoAccept(t *testing.T) { func TestAccAWSVPCPeeringConnection_region(t *testing.T) { var connection ec2.VpcPeeringConnection + var providers []*schema.Provider + providerFactories := map[string]terraform.ResourceProviderFactory{ + "aws": func() (terraform.ResourceProvider, error) { + p := Provider() + providers = append(providers, p.(*schema.Provider)) + return p, nil + }, + } + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, IDRefreshName: "aws_vpc_peering_connection.foo", IDRefreshIgnore: []string{"auto_accept"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSVpcPeeringConnectionDestroy, + ProviderFactories: providerFactories, + CheckDestroy: testAccCheckAWSVpcPeeringConnectionDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testAccVpcPeeringConfigRegion, From fc78a55eb8e1503f55f4d676fd613ac5bf80b4cb Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 8 Dec 2017 22:22:49 -0500 Subject: [PATCH 014/350] r/aws_rds_cluster_instance: Set db_subnet_group_name in state on read if available --- aws/resource_aws_rds_cluster_instance.go | 4 ++++ aws/resource_aws_rds_cluster_instance_test.go | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_rds_cluster_instance.go b/aws/resource_aws_rds_cluster_instance.go index 269d28458c7..490485ebb20 100644 --- a/aws/resource_aws_rds_cluster_instance.go +++ b/aws/resource_aws_rds_cluster_instance.go @@ -312,6 +312,10 @@ func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) d.Set("port", db.Endpoint.Port) } + if db.DBSubnetGroup != nil { + d.Set("db_subnet_group_name", db.DBSubnetGroup.DBSubnetGroupName) + } + d.Set("publicly_accessible", db.PubliclyAccessible) d.Set("cluster_identifier", db.DBClusterIdentifier) d.Set("engine", db.Engine) diff --git a/aws/resource_aws_rds_cluster_instance_test.go b/aws/resource_aws_rds_cluster_instance_test.go index 0dc51ef4c50..fb4db405b7c 100644 --- a/aws/resource_aws_rds_cluster_instance_test.go +++ b/aws/resource_aws_rds_cluster_instance_test.go @@ -52,6 +52,7 @@ func TestAccAWSRDSClusterInstance_basic(t *testing.T) { func TestAccAWSRDSClusterInstance_namePrefix(t *testing.T) { var v rds.DBInstance + rInt := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -59,10 +60,12 @@ func TestAccAWSRDSClusterInstance_namePrefix(t *testing.T) { CheckDestroy: testAccCheckAWSClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSClusterInstanceConfig_namePrefix(acctest.RandInt()), + Config: testAccAWSClusterInstanceConfig_namePrefix(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.test", &v), testAccCheckAWSDBClusterInstanceAttributes(&v), + resource.TestCheckResourceAttr( + "aws_rds_cluster_instance.test", "db_subnet_group_name", fmt.Sprintf("tf-test-%d", rInt)), resource.TestMatchResourceAttr( "aws_rds_cluster_instance.test", "identifier", regexp.MustCompile("^tf-cluster-instance-")), ), From 69a87dc3da9f1fa0d86d4ab6e8b79d7ba8f77811 Mon Sep 17 00:00:00 2001 From: Tom Elliff Date: Mon, 11 Dec 2017 18:34:28 +0000 Subject: [PATCH 015/350] Add encrypt at rest to ES domains For now just adds encrypt at rest when creating an ES domain, doesn't yet handle reading encrypt at rest options so will likely cause Terraform to rebuild the domain on the next operation. The AWS console handily creates a service KMS key for ES when you are creating an encrypted ES domain via the console. This resource doesn't currently do that but that functionality could be added. --- aws/resource_aws_elasticsearch_domain.go | 33 +++++++++++++ aws/resource_aws_elasticsearch_domain_test.go | 49 +++++++++++++++++++ aws/structure.go | 13 +++++ 3 files changed, 95 insertions(+) diff --git a/aws/resource_aws_elasticsearch_domain.go b/aws/resource_aws_elasticsearch_domain.go index 8e5ab7e39a2..d0c0e6b9d46 100644 --- a/aws/resource_aws_elasticsearch_domain.go +++ b/aws/resource_aws_elasticsearch_domain.go @@ -89,6 +89,24 @@ func resourceAwsElasticSearchDomain() *schema.Resource { }, }, }, + "encrypt_at_rest": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + }, + "kms_key_id": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "cluster_config": { Type: schema.TypeList, Optional: true, @@ -291,6 +309,21 @@ func resourceAwsElasticSearchDomainCreate(d *schema.ResourceData, meta interface } } + if v, ok := d.GetOk("encrypt_at_rest"); ok { + options := v.([]interface{}) + + if len(options) > 1 { + return fmt.Errorf("Only a single encrypt_at_rest block is expected") + } else if len(options) == 1 { + if options[0] == nil { + return fmt.Errorf("At least one field is expected inside encrypt_at_rest") + } + + s := options[0].(map[string]interface{}) + input.EncryptionAtRestOptions = expandESEncryptAtRestOptions(s) + } + } + if v, ok := d.GetOk("cluster_config"); ok { config := v.([]interface{}) diff --git a/aws/resource_aws_elasticsearch_domain_test.go b/aws/resource_aws_elasticsearch_domain_test.go index 5640fffb8cd..15a7034baed 100644 --- a/aws/resource_aws_elasticsearch_domain_test.go +++ b/aws/resource_aws_elasticsearch_domain_test.go @@ -261,6 +261,24 @@ func TestAccAWSElasticSearchDomain_policy(t *testing.T) { }) } +func TestAccAWSElasticSearchDomain_encrypt_at_rest(t *testing.T) { + var domain elasticsearch.ElasticsearchDomainStatus + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckESDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccESDomainConfigWithEncryptAtRest(acctest.RandInt()), + Check: resource.ComposeTestCheckFunc( + testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain), + ), + }, + }, + }) +} + func TestAccAWSElasticSearchDomain_tags(t *testing.T) { var domain elasticsearch.ElasticsearchDomainStatus var td elasticsearch.ListTagsOutput @@ -411,6 +429,7 @@ func testAccESDomainConfig(randInt int) string { return fmt.Sprintf(` resource "aws_elasticsearch_domain" "example" { domain_name = "tf-test-%d" + ebs_options { ebs_enabled = true volume_size = 10 @@ -504,6 +523,36 @@ data "aws_iam_policy_document" "instance-assume-role-policy" { `, randESId, randRoleId) } +func testAccESDomainConfigWithEncryptAtRest(randESId int) string { + return fmt.Sprintf(` +resource "aws_kms_key" "es" { + description = "kms-key-for-tf-test-%d" + deletion_window_in_days = 7 +} + +resource "aws_elasticsearch_domain" "example" { + domain_name = "tf-test-%d" + + elasticsearch_version = "6.0" + + # Encrypt at rest requires m4/c4/r4/i2 instances. See http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-supported-instance-types.html + cluster_config { + instance_type = "m4.large.elasticsearch" + } + + ebs_options { + ebs_enabled = true + volume_size = 10 + } + + encrypt_at_rest { + enabled = true + kms_key_id = "${aws_kms_key.es.key_id}" + } +} +`, randESId, randESId) +} + func testAccESDomainConfig_complex(randInt int) string { return fmt.Sprintf(` resource "aws_elasticsearch_domain" "example" { diff --git a/aws/structure.go b/aws/structure.go index e512f194f05..fc7cee31498 100644 --- a/aws/structure.go +++ b/aws/structure.go @@ -1045,6 +1045,19 @@ func expandESEBSOptions(m map[string]interface{}) *elasticsearch.EBSOptions { return &options } +func expandESEncryptAtRestOptions(m map[string]interface{}) *elasticsearch.EncryptionAtRestOptions { + options := elasticsearch.EncryptionAtRestOptions{} + + if v, ok := m["enabled"]; ok { + options.Enabled = aws.Bool(v.(bool)) + } + if v, ok := m["kms_key_id"]; ok && v.(string) != "" { + options.KmsKeyId = aws.String(v.(string)) + } + + return &options +} + func flattenESVPCDerivedInfo(o *elasticsearch.VPCDerivedInfo) []map[string]interface{} { m := map[string]interface{}{} From 793f0f1bfb50dd928ebdce9fcf811ff66e3f5769 Mon Sep 17 00:00:00 2001 From: Tom Elliff Date: Tue, 12 Dec 2017 11:36:00 +0000 Subject: [PATCH 016/350] Allow encryption of ES domain with service KMS key If you don't specify a KMS key then AWS will use the account's service KMS key (which is created for you automatically if you don't already have it). --- aws/resource_aws_elasticsearch_domain.go | 2 +- aws/resource_aws_elasticsearch_domain_test.go | 61 ++++++++++++++++++- 2 files changed, 59 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_elasticsearch_domain.go b/aws/resource_aws_elasticsearch_domain.go index d0c0e6b9d46..c48d3c9c608 100644 --- a/aws/resource_aws_elasticsearch_domain.go +++ b/aws/resource_aws_elasticsearch_domain.go @@ -102,7 +102,7 @@ func resourceAwsElasticSearchDomain() *schema.Resource { }, "kms_key_id": { Type: schema.TypeString, - Required: true, + Optional: true, }, }, }, diff --git a/aws/resource_aws_elasticsearch_domain_test.go b/aws/resource_aws_elasticsearch_domain_test.go index 15a7034baed..284239f2b44 100644 --- a/aws/resource_aws_elasticsearch_domain_test.go +++ b/aws/resource_aws_elasticsearch_domain_test.go @@ -261,7 +261,7 @@ func TestAccAWSElasticSearchDomain_policy(t *testing.T) { }) } -func TestAccAWSElasticSearchDomain_encrypt_at_rest(t *testing.T) { +func TestAccAWSElasticSearchDomain_encrypt_at_rest_default_key(t *testing.T) { var domain elasticsearch.ElasticsearchDomainStatus resource.Test(t, resource.TestCase{ @@ -270,9 +270,29 @@ func TestAccAWSElasticSearchDomain_encrypt_at_rest(t *testing.T) { CheckDestroy: testAccCheckESDomainDestroy, Steps: []resource.TestStep{ { - Config: testAccESDomainConfigWithEncryptAtRest(acctest.RandInt()), + Config: testAccESDomainConfigWithEncryptAtRestDefaultKey(acctest.RandInt()), Check: resource.ComposeTestCheckFunc( testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain), + testAccCheckESEncrypted(true, &domain), + ), + }, + }, + }) +} + +func TestAccAWSElasticSearchDomain_encrypt_at_rest_specify_key(t *testing.T) { + var domain elasticsearch.ElasticsearchDomainStatus + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckESDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccESDomainConfigWithEncryptAtRestWithKey(acctest.RandInt()), + Check: resource.ComposeTestCheckFunc( + testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain), + testAccCheckESEncrypted(true, &domain), ), }, }, @@ -357,6 +377,16 @@ func testAccCheckESNumberOfInstances(numberOfInstances int, status *elasticsearc } } +func testAccCheckESEncrypted(encrypted bool, status *elasticsearch.ElasticsearchDomainStatus) resource.TestCheckFunc { + return func(s *terraform.State) error { + conf := status.EncryptionAtRestOptions + if *conf.Enabled != encrypted { + return fmt.Errorf("Encrypt at rest not set properly. Given: %t, Expected: %t", *conf.Enabled, encrypted) + } + return nil + } +} + func testAccLoadESTags(conf *elasticsearch.ElasticsearchDomainStatus, td *elasticsearch.ListTagsOutput) resource.TestCheckFunc { return func(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).esconn @@ -523,7 +553,32 @@ data "aws_iam_policy_document" "instance-assume-role-policy" { `, randESId, randRoleId) } -func testAccESDomainConfigWithEncryptAtRest(randESId int) string { +func testAccESDomainConfigWithEncryptAtRestDefaultKey(randESId int) string { + return fmt.Sprintf(` + +resource "aws_elasticsearch_domain" "example" { + domain_name = "tf-test-%d" + + elasticsearch_version = "6.0" + + # Encrypt at rest requires m4/c4/r4/i2 instances. See http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-supported-instance-types.html + cluster_config { + instance_type = "m4.large.elasticsearch" + } + + ebs_options { + ebs_enabled = true + volume_size = 10 + } + + encrypt_at_rest { + enabled = true + } +} +`, randESId) +} + +func testAccESDomainConfigWithEncryptAtRestWithKey(randESId int) string { return fmt.Sprintf(` resource "aws_kms_key" "es" { description = "kms-key-for-tf-test-%d" From 5d1b54593247a7d12efd10e2650471eaeae8aa53 Mon Sep 17 00:00:00 2001 From: Tom Elliff Date: Tue, 12 Dec 2017 12:25:38 +0000 Subject: [PATCH 017/350] Add documentation for ES encryption at rest --- website/docs/r/elasticsearch_domain.html.markdown | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/website/docs/r/elasticsearch_domain.html.markdown b/website/docs/r/elasticsearch_domain.html.markdown index dd825dcfc50..3e255df0e60 100644 --- a/website/docs/r/elasticsearch_domain.html.markdown +++ b/website/docs/r/elasticsearch_domain.html.markdown @@ -57,6 +57,7 @@ The following arguments are supported: * `access_policies` - (Optional) IAM policy document specifying the access policies for the domain * `advanced_options` - (Optional) Key-value string pairs to specify advanced configuration options. * `ebs_options` - (Optional) EBS related options, may be required based on chosen [instance size](https://aws.amazon.com/elasticsearch-service/pricing/). See below. +* `encrypt_at_rest` - (Optional) Encrypt at rest options. Only available for [certain instance types](http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-supported-instance-types.html). See below. * `cluster_config` - (Optional) Cluster configuration of the domain, see below. * `snapshot_options` - (Optional) Snapshot related options, see below. * `vpc_options` - (Optional) VPC related options, see below. Adding or removing this configuration forces a new resource ([documentation](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-vpc-limitations)). @@ -73,6 +74,11 @@ The following arguments are supported: * `iops` - (Optional) The baseline input/output (I/O) performance of EBS volumes attached to data nodes. Applicable only for the Provisioned IOPS EBS volume type. +**encrypt_at_rest** supports the following attributes: + +* `enabled` - (Required) Whether to enable encryption at rest. If the `encrypt_at_rest` block is not provided then this defaults to `false`. +* `kms_key_id` - (Optional) The KMS key id to encrypt the Elasticsearch domain with. If not specified then it defaults to using the `aws/es` service KMS key. + **cluster_config** supports the following attributes: * `instance_type` - (Optional) Instance type of data nodes in the cluster. From ca73236503971f2c2f0394070d120926a077add4 Mon Sep 17 00:00:00 2001 From: Tom Elliff Date: Tue, 12 Dec 2017 13:37:04 +0000 Subject: [PATCH 018/350] Read KMS key for encrypt at rest from AWS API --- aws/resource_aws_elasticsearch_domain.go | 19 +++++++++++++++++-- aws/structure.go | 13 +++++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_elasticsearch_domain.go b/aws/resource_aws_elasticsearch_domain.go index c48d3c9c608..f8753935678 100644 --- a/aws/resource_aws_elasticsearch_domain.go +++ b/aws/resource_aws_elasticsearch_domain.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" + "strings" ) func resourceAwsElasticSearchDomain() *schema.Resource { @@ -101,8 +102,11 @@ func resourceAwsElasticSearchDomain() *schema.Resource { ForceNew: true, }, "kms_key_id": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: suppressEquivalentKmsKeyIds, }, }, }, @@ -494,6 +498,10 @@ func resourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface{} if err != nil { return err } + err = d.Set("encrypt_at_rest", flattenESEncryptAtRestOptions(ds.EncryptionAtRestOptions)) + if err != nil { + return err + } err = d.Set("cluster_config", flattenESClusterConfig(ds.ElasticsearchClusterConfig)) if err != nil { return err @@ -707,3 +715,10 @@ func resourceAwsElasticSearchDomainDelete(d *schema.ResourceData, meta interface return err } + +func suppressEquivalentKmsKeyIds(k, old, new string, d *schema.ResourceData) bool { + // The Elasticsearch API accepts a short KMS key id but always returns the ARN of the key. + // The ARN is of the format 'arn:aws:kms:REGION:ACCOUNT_ID:key/KMS_KEY_ID'. + // These should be treated as equivalent. + return strings.Contains(old, new) +} diff --git a/aws/structure.go b/aws/structure.go index fc7cee31498..c0ecb0d9c1e 100644 --- a/aws/structure.go +++ b/aws/structure.go @@ -1045,6 +1045,19 @@ func expandESEBSOptions(m map[string]interface{}) *elasticsearch.EBSOptions { return &options } +func flattenESEncryptAtRestOptions(o *elasticsearch.EncryptionAtRestOptions) []map[string]interface{} { + m := map[string]interface{}{} + + if o.Enabled != nil { + m["enabled"] = *o.Enabled + } + if o.KmsKeyId != nil { + m["kms_key_id"] = *o.KmsKeyId + } + + return []map[string]interface{}{m} +} + func expandESEncryptAtRestOptions(m map[string]interface{}) *elasticsearch.EncryptionAtRestOptions { options := elasticsearch.EncryptionAtRestOptions{} From bddcaddf7c228d984782fc84488ce32761235c13 Mon Sep 17 00:00:00 2001 From: rhard7 Date: Wed, 29 Nov 2017 10:34:35 -0800 Subject: [PATCH 019/350] adds apigateway endpoint adds lambda endpoint to endpoints --- aws/config.go | 8 ++++++-- aws/provider.go | 20 ++++++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/aws/config.go b/aws/config.go index 4782f76eafc..b1d73f2edb2 100644 --- a/aws/config.go +++ b/aws/config.go @@ -100,6 +100,7 @@ type Config struct { AllowedAccountIds []interface{} ForbiddenAccountIds []interface{} + ApigatewayEndpoint string CloudFormationEndpoint string CloudWatchEndpoint string CloudWatchEventsEndpoint string @@ -111,6 +112,7 @@ type Config struct { IamEndpoint string KinesisEndpoint string KmsEndpoint string + LambdaEndpoint string RdsEndpoint string S3Endpoint string SnsEndpoint string @@ -318,6 +320,7 @@ func (c *Config) Client() (interface{}, error) { r53Sess := sess.Copy(&aws.Config{Region: aws.String("us-east-1")}) // Some services have user-configurable endpoints + awsApigatewaySess := sess.Copy(&aws.Config{Endpoint: aws.String(c.ApigatewayEndpoint)}) awsCfSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudFormationEndpoint)}) awsCwSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchEndpoint)}) awsCweSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchEventsEndpoint)}) @@ -326,6 +329,7 @@ func (c *Config) Client() (interface{}, error) { awsEc2Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.Ec2Endpoint)}) awsElbSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.ElbEndpoint)}) awsIamSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.IamEndpoint)}) + awsLambdaSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.LambdaEndpoint)}) awsKinesisSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KinesisEndpoint)}) awsKmsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KmsEndpoint)}) awsRdsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.RdsEndpoint)}) @@ -375,7 +379,7 @@ func (c *Config) Client() (interface{}, error) { } client.acmconn = acm.New(sess) - client.apigateway = apigateway.New(sess) + client.apigateway = apigateway.New(awsApigatewaySess) client.appautoscalingconn = applicationautoscaling.New(sess) client.autoscalingconn = autoscaling.New(sess) client.cfconn = cloudformation.New(awsCfSess) @@ -410,7 +414,7 @@ func (c *Config) Client() (interface{}, error) { client.iotconn = iot.New(sess) client.kinesisconn = kinesis.New(awsKinesisSess) client.kmsconn = kms.New(awsKmsSess) - client.lambdaconn = lambda.New(sess) + client.lambdaconn = lambda.New(awsLambdaSess) client.lightsailconn = lightsail.New(sess) client.mqconn = mq.New(sess) client.opsworksconn = opsworks.New(sess) diff --git a/aws/provider.go b/aws/provider.go index 1a09e135106..e7018e87210 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -563,6 +563,8 @@ func init() { "being executed. If the API request still fails, an error is\n" + "thrown.", + "apigateway_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + "cloudformation_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", "cloudwatch_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", @@ -583,6 +585,8 @@ func init() { "iam_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + "lambda_endpoint": "Use this to override the default endpoint URL constructed from the `region`\n", + "ec2_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", "elb_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", @@ -677,6 +681,7 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { for _, endpointsSetI := range endpointsSet.List() { endpoints := endpointsSetI.(map[string]interface{}) + config.ApigatewayEndpoint = endpoints["apigateway"].(string) config.CloudFormationEndpoint = endpoints["cloudformation"].(string) config.CloudWatchEndpoint = endpoints["cloudwatch"].(string) config.CloudWatchEventsEndpoint = endpoints["cloudwatchevents"].(string) @@ -688,6 +693,7 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { config.IamEndpoint = endpoints["iam"].(string) config.KinesisEndpoint = endpoints["kinesis"].(string) config.KmsEndpoint = endpoints["kms"].(string) + config.LambdaEndpoint = endpoints["lambda"].(string) config.RdsEndpoint = endpoints["rds"].(string) config.S3Endpoint = endpoints["s3"].(string) config.SnsEndpoint = endpoints["sns"].(string) @@ -749,6 +755,12 @@ func endpointsSchema() *schema.Schema { Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "apigateway": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["apigateway_endpoint"], + }, "cloudwatch": { Type: schema.TypeString, Optional: true, @@ -817,6 +829,12 @@ func endpointsSchema() *schema.Schema { Default: "", Description: descriptions["kms_endpoint"], }, + "lambda": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["lambda_endpoint"], + }, "rds": { Type: schema.TypeString, Optional: true, @@ -850,6 +868,7 @@ func endpointsSchema() *schema.Schema { func endpointsToHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["apigateway"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["cloudwatch"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["cloudwatchevents"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["cloudwatchlogs"].(string))) @@ -861,6 +880,7 @@ func endpointsToHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-", m["elb"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["kinesis"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["kms"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["lambda"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["rds"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["s3"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["sns"].(string))) From 9c1dfd8888992f0832c9192faa4e846ff3905b5a Mon Sep 17 00:00:00 2001 From: Atsushi Ishibashi Date: Wed, 13 Dec 2017 09:30:03 +0900 Subject: [PATCH 020/350] Add notes --- website/docs/r/cognito_user_pool.markdown | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/docs/r/cognito_user_pool.markdown b/website/docs/r/cognito_user_pool.markdown index ea5f0b9b2b3..74d4b3dac25 100644 --- a/website/docs/r/cognito_user_pool.markdown +++ b/website/docs/r/cognito_user_pool.markdown @@ -30,8 +30,8 @@ The following arguments are supported: * `device_configuration` (Optional) - The configuration for the [user pool's device tracking](#device-configuration). * `email_configuration` (Optional) - The [Email Configuration](#email-configuration). * `name` - (Required) The name of the user pool. -* `email_verification_subject` - (Optional) A string representing the email verification subject. -* `email_verification_message` - (Optional) A string representing the email verification message. Must contain the `{####}` placeholder. +* `email_verification_subject` - (Optional) A string representing the email verification subject. **NOTE:** - If `email_verification_subject` and `verification_message_template.email_subject` are specified and the values are different, either one is prioritized and updated. +* `email_verification_message` - (Optional) A string representing the email verification message. Must contain the `{####}` placeholder. **NOTE:** - If `email_verification_message` and `verification_message_template.email_message` are specified and the values are different, either one is prioritized and updated. * `lambda_config` (Optional) - A container for the AWS [Lambda triggers](#lambda-configuration) associated with the user pool. * `mfa_configuration` - (Optional, Default: OFF) Set to enable multi-factor authentication. Must be one of the following values (ON, OFF, OPTIONAL) * `password_policy` (Optional) - A container for information about the [user pool password policy](#password-policy). @@ -112,9 +112,9 @@ The following arguments are supported: #### Verification Message Template * `default_email_option` (Optional) - The default email option. Must be either `CONFIRM_WITH_CODE` or `CONFIRM_WITH_LINK`. Defaults to `CONFIRM_WITH_CODE`. - * `email_message` (Optional) - The email message template. Must contain the `{####}` placeholder. + * `email_message` (Optional) - The email message template. Must contain the `{####}` placeholder. **NOTE:** - If `email_verification_message` and `verification_message_template.email_message` are specified and the values are different, either one is prioritized and updated. * `email_message_by_link` (Optional) - The email message template for sending a confirmation link to the user. - * `email_subject` (Optional) - The subject line for the email message template. + * `email_subject` (Optional) - The subject line for the email message template. **NOTE:** - If `email_verification_subject` and `verification_message_template.email_subject` are specified and the values are different, either one is prioritized and updated. * `email_subject_by_link` (Optional) - The subject line for the email message template for sending a confirmation link to the user. * `sms_message` (Optional) - The SMS message template. Must contain the `{####}` placeholder. From c2d03ffdadc4ca46e8994600b5655c93d062ccc8 Mon Sep 17 00:00:00 2001 From: Enxebre Date: Thu, 14 Dec 2017 11:22:49 +0100 Subject: [PATCH 021/350] add support for govcloud s3 object tags --- aws/resource_aws_s3_bucket_object.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_s3_bucket_object.go b/aws/resource_aws_s3_bucket_object.go index fb2791ce4a1..516fdd37577 100644 --- a/aws/resource_aws_s3_bucket_object.go +++ b/aws/resource_aws_s3_bucket_object.go @@ -132,7 +132,7 @@ func resourceAwsS3BucketObject() *schema.Resource { func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) error { s3conn := meta.(*AWSClient).s3conn - restricted := meta.(*AWSClient).IsGovCloud() || meta.(*AWSClient).IsChinaCloud() + restricted := meta.(*AWSClient).IsChinaCloud() var body io.ReadSeeker @@ -231,7 +231,7 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) error { s3conn := meta.(*AWSClient).s3conn - restricted := meta.(*AWSClient).IsGovCloud() || meta.(*AWSClient).IsChinaCloud() + restricted := meta.(*AWSClient).IsChinaCloud() bucket := d.Get("bucket").(string) key := d.Get("key").(string) From 08b2479a7e1076b72fe01d3b64f4689e43c4f908 Mon Sep 17 00:00:00 2001 From: Ian Dillon Date: Sun, 17 Dec 2017 12:21:16 -0500 Subject: [PATCH 022/350] Adds security_group_id to directory_service_directory output. --- aws/resource_aws_directory_service_directory.go | 8 ++++++++ aws/resource_aws_directory_service_directory_test.go | 1 + website/docs/r/directory_service_directory.html.markdown | 3 ++- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_directory_service_directory.go b/aws/resource_aws_directory_service_directory.go index 54d5839988e..9de42ed3b0b 100644 --- a/aws/resource_aws_directory_service_directory.go +++ b/aws/resource_aws_directory_service_directory.go @@ -134,6 +134,10 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource { Set: schema.HashString, Computed: true, }, + "security_group_id": { + Type: schema.TypeString, + Computed: true, + }, "type": { Type: schema.TypeString, Optional: true, @@ -446,6 +450,10 @@ func resourceAwsDirectoryServiceDirectoryRead(d *schema.ResourceData, meta inter d.Set("connect_settings", flattenDSConnectSettings(dir.DnsIpAddrs, dir.ConnectSettings)) d.Set("enable_sso", *dir.SsoEnabled) + if dir.VpcSettings != nil { + d.Set("security_group_id", *dir.VpcSettings.SecurityGroupId) + } + tagList, err := dsconn.ListTagsForResource(&directoryservice.ListTagsForResourceInput{ ResourceId: aws.String(d.Id()), }) diff --git a/aws/resource_aws_directory_service_directory_test.go b/aws/resource_aws_directory_service_directory_test.go index c294c9e823b..a2004853c34 100644 --- a/aws/resource_aws_directory_service_directory_test.go +++ b/aws/resource_aws_directory_service_directory_test.go @@ -98,6 +98,7 @@ func TestAccAWSDirectoryServiceDirectory_basic(t *testing.T) { Config: testAccDirectoryServiceDirectoryConfig, Check: resource.ComposeTestCheckFunc( testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar"), + resource.TestCheckResourceAttrSet("aws_directory_service_directory.bar", "security_group_id"), ), }, }, diff --git a/website/docs/r/directory_service_directory.html.markdown b/website/docs/r/directory_service_directory.html.markdown index 673de123228..99a44bf3f49 100644 --- a/website/docs/r/directory_service_directory.html.markdown +++ b/website/docs/r/directory_service_directory.html.markdown @@ -83,6 +83,7 @@ The following attributes are exported: * `id` - The directory identifier. * `access_url` - The access URL for the directory, such as `http://alias.awsapps.com`. * `dns_ip_addresses` - A list of IP addresses of the DNS servers for the directory or connector. +* `security_group_id` - The ID of the security group created by the directory (`SimpleAD` or `MicrosoftAD` only). ## Import @@ -91,4 +92,4 @@ DirectoryService directories can be imported using the directory `id`, e.g. ``` $ terraform import aws_directory_service_directory.sample d-926724cf57 -``` \ No newline at end of file +``` From f7e0ccf4391795bd969e74cf02b2117e423a8007 Mon Sep 17 00:00:00 2001 From: Nathan Smith Date: Mon, 18 Dec 2017 16:34:51 +0000 Subject: [PATCH 023/350] removing duplicate --- website/docs/d/availability_zone.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/d/availability_zone.html.markdown b/website/docs/d/availability_zone.html.markdown index d053672737e..bcf6b99ff75 100644 --- a/website/docs/d/availability_zone.html.markdown +++ b/website/docs/d/availability_zone.html.markdown @@ -77,7 +77,7 @@ zone whose data will be exported as attributes. * `name` - (Optional) The full name of the availability zone to select. * `state` - (Optional) A specific availability zone state to require. May - be any of `"available"`, `"information"`, `"impaired"` or `"available"`. + be any of `"available"`, `"information"` or `"impaired"`. All reasonable uses of this data source will specify `name`, since `state` alone would match a single AZ only in a region that itself has only one AZ. From cca30acedfcb54b7c96935078b9dd9353280bf59 Mon Sep 17 00:00:00 2001 From: Puneeth Nanjundaswamy Date: Tue, 19 Dec 2017 09:44:14 +0100 Subject: [PATCH 024/350] Bump aws-sdk-go to v1.12.49 --- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 42 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/cloudwatch/api.go | 2 + vendor/vendor.json | 830 +++++++++--------- 4 files changed, 460 insertions(+), 416 deletions(-) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 295e028638e..c6cf4071acb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -251,6 +251,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -281,6 +282,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -306,6 +308,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -354,6 +357,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -412,6 +416,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -491,6 +496,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -545,6 +551,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -645,6 +652,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -698,6 +706,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -762,6 +771,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -790,6 +800,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -813,12 +824,15 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -854,6 +868,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -873,6 +888,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -905,6 +921,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -929,6 +946,7 @@ var awsPartition = partition{ }, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", @@ -981,6 +999,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1000,6 +1019,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1051,6 +1071,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1060,6 +1081,7 @@ var awsPartition = partition{ "glue": service{ Endpoints: endpoints{ + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1157,6 +1179,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1184,6 +1207,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1203,6 +1227,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1237,6 +1262,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1316,6 +1342,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1344,6 +1371,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1394,6 +1422,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1413,6 +1442,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{dnsSuffix}", @@ -1434,6 +1464,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1511,6 +1542,7 @@ var awsPartition = partition{ SignatureVersions: []string{"s3", "s3v4"}, }, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "s3-external-1": endpoint{ Hostname: "s3-external-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -1567,6 +1599,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1593,8 +1626,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1628,6 +1663,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1650,6 +1686,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "queue.{dnsSuffix}", @@ -1671,6 +1708,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1703,6 +1741,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1727,6 +1766,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -1765,6 +1805,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -1814,6 +1855,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 4a53cddea33..fdca0c8d26e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.12.48" +const SDKVersion = "1.12.49" diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go index eceb129e510..47c5f7f9417 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go @@ -1300,6 +1300,8 @@ func (c *CloudWatch) PutMetricAlarmRequest(input *PutMetricAlarmInput) (req *req // // If you are an IAM user, you must have Amazon EC2 permissions for some operations: // +// * iam:CreateServiceLinkedRole for all alarms with EC2 actions +// // * ec2:DescribeInstanceStatus and ec2:DescribeInstances for all alarms // on EC2 instance status metrics // diff --git a/vendor/vendor.json b/vendor/vendor.json index 16fbf45c5ed..7fe1a3badbf 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -141,828 +141,828 @@ "revisionTime": "2017-07-27T15:54:43Z" }, { - "checksumSHA1": "5w4Jb36IWgEo9UEhPDiFNumIbBY=", + "checksumSHA1": "lLLCYYcMsypt77CRhQMbGpJ+ZM4=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "DtuTqKH29YnLjrIJkRYX0HQtXY0=", "path": "github.com/aws/aws-sdk-go/aws/arn", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "9nE/FjZ4pYrT883KtV2/aI+Gayo=", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "7/8j/q0TWtOgXyvEcv4B2Dhl00o=", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "Y+cPwQL0dZMyqp3wI+KJWmA9KQ8=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "OnU/n7R33oYXiB4SAGd5pK7I0Bs=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { - "checksumSHA1": "Fh9Vt1+lXYG7lc57koj5Fb+/WV4=", + "checksumSHA1": "StXQWfRcKjNysYZQFm/noJn9xnM=", "path": "github.com/aws/aws-sdk-go/aws/endpoints", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "9GvAyILJ7g+VUg8Ef5DsT5GuYsg=", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "HcGL4e6Uep4/80eCUI5xkcWjpQ0=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "iU00ZjhAml/13g+1YXT21IqoXqg=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "04ypv4x12l4q0TksA1zEVsmgpvw=", "path": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "NStHCXEvYqG72GknZyv1jaKaeH0=", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "yHfT5DTbeCLs4NE2Rgnqrhe15ls=", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "9V1PvtFQ9MObZTc3sa86WcuOtOU=", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "pkeoOfZpHRvFG/AOZeTf0lwtsFg=", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "Rpu8KBtHZgvhkwHxUfaky+qW+G4=", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "0qYPUga28aQVkxZgBR3Z86AbGUQ=", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "vnYDXA1NxJ7Hu+DMfXNk1UnmkWg=", "path": "github.com/aws/aws-sdk-go/service/acm", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "8mGhHG7WChxuu8Hu3Vv5j2sMKNQ=", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "X8tOI6i+RJwXIgg1qBjDNclyG/0=", "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "aDAaH6YiA50IrJ5Smfg0fovrniA=", "path": "github.com/aws/aws-sdk-go/service/appsync", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "oBXDw1zQTfxcKsK3ZjtKcS7gBLI=", "path": "github.com/aws/aws-sdk-go/service/athena", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "ITAwWyJp4t9AGfUXm9M3pFWTHVA=", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "Zz8qI6RloveM1zrXAglLxJZT1ZA=", "path": "github.com/aws/aws-sdk-go/service/batch", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "/nO06EpnD22+Ex80gHi4UYrAvKc=", "path": "github.com/aws/aws-sdk-go/service/budgets", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "6gM3CZZgiB0JvS7EK1c31Q8L09U=", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "T80IDetBz1hqJpq5Wqmx3MwCh8w=", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "bYrI9mxspB0xDFZEy3OIfWuez5g=", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { - "checksumSHA1": "LW6GGO3fnealwJTWtbZk5ShjHdg=", + "checksumSHA1": "oB+M+kOmYG28V0PuI75IF6E+/w8=", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "Nc3vXlV7s309PprScYpRDPQWeDQ=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "bPh7NF3mLpGMV0rIakolMPHqMyw=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "OqrWtx9iyIJ9roP2sEcmP9UCfXE=", "path": "github.com/aws/aws-sdk-go/service/codebuild", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "7nW1Ho2X3RcUU8FaFBhJIUeuDNw=", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "m19PZt1B51QCWo1jxSbII2zzL6Q=", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "LKw7fnNwq17Eqy0clzS/LK89vS4=", "path": "github.com/aws/aws-sdk-go/service/codepipeline", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "aXh1KIbNX+g+tH+lh3pk++9lm3k=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentity", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "IWi9xZz+OncotjM/vJ87Iffg2Qk=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentityprovider", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "6uudO8hkB5uERXixPA/yL3xcguQ=", "path": "github.com/aws/aws-sdk-go/service/configservice", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "hYCwLQdIjHj8rMHLGVyUVhecI4s=", "path": "github.com/aws/aws-sdk-go/service/databasemigrationservice", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "26CWoHQP/dyL2VzE5ZNd8zNzhko=", "path": "github.com/aws/aws-sdk-go/service/devicefarm", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "6g94rUHAgjcqMMTtMqKUbLU37wY=", "path": "github.com/aws/aws-sdk-go/service/directconnect", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "oFnS6I0u7KqnxK0/r1uoz8rTkxI=", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "0TXXUPjrbOCHpX555B6suH36Nnk=", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "BOjSO1uO7Coj6o3oqpPUtEhQrPI=", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "uEv9kkBsVIjg7K4+Y8TVlU0Cc8o=", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "sD9Urgwx7F3ImX+tJg2Q+ME/oFM=", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "eoM9nF5iVMbuGOmkY33d19aHt8Y=", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "dU5MPXUUOYD/E9sNncpFZ/U86Cw=", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "pj8mBWT3HE0Iid6HSmhw7lmyZDU=", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "VYGtTaSiajfKOVTbi9/SNmbiIac=", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "SZ7yLDZ6RvMhpWe0Goyem64kgyA=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "WYqHhdRNsiGGBLWlBLbOItZf+zA=", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "ae7VWg/xuXpnSD6wGumN44qEd+Q=", "path": "github.com/aws/aws-sdk-go/service/elbv2", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "NbkH6F+792jQ7BW4lGCb+vJVw58=", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "5btWHj2fZrPc/zfYdJLPaOcivxI=", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "oDoGvSfmO2Z099ixV2HXn+SDeHE=", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "HRmbBf3dUEBAfdC2xKaoWAGeM7Y=", "path": "github.com/aws/aws-sdk-go/service/glue", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "6JlxJoy1JCArNK2qBkaJ5IV6qBc=", "path": "github.com/aws/aws-sdk-go/service/guardduty", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "oZaxMqnwl2rA+V/W0tJ3uownORI=", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "dnNMSn5aHAtdOks+aWHLpwbi/VE=", "path": "github.com/aws/aws-sdk-go/service/inspector", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "96OBMJ3R9BD402LJsUUA8a82/UY=", "path": "github.com/aws/aws-sdk-go/service/iot", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "IoSyRZhlL0petrB28nXk5jKM9YA=", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "oAFLgD0uJiVOZkFkL5dd/wUgBz4=", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "XDVse9fKF0RkAywzzgsO31AV4oc=", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "HluEcyZNywrbKnj/aR3tXbu29d8=", "path": "github.com/aws/aws-sdk-go/service/lexmodelbuildingservice", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "wjs9YBsHx0YQH0zKBA7Ibd1UV5Y=", "path": "github.com/aws/aws-sdk-go/service/lightsail", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "4VfB5vMLNYs0y6K159YCBgo9T3c=", "path": "github.com/aws/aws-sdk-go/service/mediaconvert", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "Ox3VWHYSQq0YKmlr0paUPdr5W/0=", "path": "github.com/aws/aws-sdk-go/service/medialive", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "Rs7QtkcLl3XNPnKb8ss/AhF2X50=", "path": "github.com/aws/aws-sdk-go/service/mediapackage", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "QjiIL8LrlhwrQw8FboF+wMNvUF0=", "path": "github.com/aws/aws-sdk-go/service/mediastore", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "+srPYMy6U6+D29GNDM+FEtzj05g=", "path": "github.com/aws/aws-sdk-go/service/mediastoredata", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "ynB7Flcudp0VOqBVKZJ+23DtLHU=", "path": "github.com/aws/aws-sdk-go/service/mq", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "fpsBu+F79ktlLRwal1GugVMUDo0=", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "Iqkgx2nafQPV7fjw+uP35jtF6t4=", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "vP1FcccUZbuUlin7ME89w1GVJtA=", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "yV47oX5pFLCiMLSlfEPkPY3oqJg=", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "sCaHoPWsJXRHFbilUKwN71qFTOI=", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "QZU8vR9cOIenYiH+Ywl4Gzfnlp0=", "path": "github.com/aws/aws-sdk-go/service/servicecatalog", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "dk6ebvA0EYgdPyc5HPKLBPEtsm4=", "path": "github.com/aws/aws-sdk-go/service/servicediscovery", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "Ex1Ma0SFGpqeNuPbeXZtsliZ3zo=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "maVXeR3WDAkONlzf04e4mDgCYxo=", "path": "github.com/aws/aws-sdk-go/service/sfn", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "ADoR4mlCW5usH8iOa6mPNSy49LM=", "path": "github.com/aws/aws-sdk-go/service/shield", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "B3CgAFSREebpsFoFOo4vrQ6u04w=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "FfY8w4DM8XIULdRnFhd3Um8Mj8c=", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "Wx189wAbIhWChx4kVbvsyqKMF4U=", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "ijz0rBDeR6JP/06S+97k84FRYxc=", "path": "github.com/aws/aws-sdk-go/service/ssm", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "W1oFtpaT4TWIIJrAvFcn/XdcT7g=", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "Uw4pOUxSMbx4xBHUcOUkNhtnywE=", "path": "github.com/aws/aws-sdk-go/service/swf", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "on6d7Hydx2bM9jkFOf1JZcZZgeY=", "path": "github.com/aws/aws-sdk-go/service/waf", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "rHqjsOndIR82gX5mSKybaRWf3UY=", "path": "github.com/aws/aws-sdk-go/service/wafregional", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "5HDSvmMW7F3xzPAzughe4dEn6RM=", "path": "github.com/aws/aws-sdk-go/service/workspaces", - "revision": "fd9b7491525896e01db35c1e20a5bd94bf11491c", - "revisionTime": "2017-12-15T20:15:07Z", - "version": "v1.12.48", - "versionExact": "v1.12.48" + "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", + "revisionTime": "2017-12-19T05:52:48Z", + "version": "v1.12.49", + "versionExact": "v1.12.49" }, { "checksumSHA1": "usT4LCSQItkFvFOQT7cBlkCuGaE=", From 3916ab7286851e1b245f62a34c71b0078784ec30 Mon Sep 17 00:00:00 2001 From: Xiaowei Date: Tue, 19 Dec 2017 10:25:53 +0100 Subject: [PATCH 025/350] resource/cognito_user_pool: Update Cognito email message length to 20,000 (#2692) --- aws/validators.go | 4 ++-- aws/validators_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/validators.go b/aws/validators.go index 9c5ecb4da85..8aa8b89aefc 100644 --- a/aws/validators.go +++ b/aws/validators.go @@ -1465,8 +1465,8 @@ func validateCognitoUserPoolEmailVerificationMessage(v interface{}, k string) (w es = append(es, fmt.Errorf("%q cannot be less than 6 characters", k)) } - if len(value) > 2000 { - es = append(es, fmt.Errorf("%q cannot be longer than 2000 characters", k)) + if len(value) > 20000 { + es = append(es, fmt.Errorf("%q cannot be longer than 20000 characters", k)) } if !regexp.MustCompile(`[\p{L}\p{M}\p{S}\p{N}\p{P}\s*]*\{####\}[\p{L}\p{M}\p{S}\p{N}\p{P}\s*]*`).MatchString(value) { diff --git a/aws/validators_test.go b/aws/validators_test.go index cf9ec0234bc..b3af78abe15 100644 --- a/aws/validators_test.go +++ b/aws/validators_test.go @@ -2327,7 +2327,7 @@ func TestValidateCognitoUserPoolEmailVerificationMessage(t *testing.T) { "Foo {####}", "{####} Bar", "AZERTYUIOPQSDFGHJKLMWXCVBN?./+%£*¨°0987654321&é\"'(§è!çà)-@^'{####},=ù`$|´”’[å»ÛÁØ]–Ô¥#‰±•", - "{####}" + strings.Repeat("W", 1994), // = 2000 + "{####}" + strings.Repeat("W", 19994), // = 20000 } for _, s := range validValues { @@ -2340,7 +2340,7 @@ func TestValidateCognitoUserPoolEmailVerificationMessage(t *testing.T) { invalidValues := []string{ "Foo", "{###}", - "{####}" + strings.Repeat("W", 1995), // > 2000 + "{####}" + strings.Repeat("W", 19995), // > 20000 } for _, s := range invalidValues { From dde11f6b921fcd25cdbc07c79cc253b9179514e5 Mon Sep 17 00:00:00 2001 From: Gauthier Wallet Date: Tue, 19 Dec 2017 10:27:49 +0100 Subject: [PATCH 026/350] Update CHANGELOG.md --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 430da91dd19..61ee2d90506 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,9 @@ ## 1.6.1 (Unreleased) + +BUG FIXES: + +* resource/cognito_user_pool: Update Cognito email message length to 20,000 [GH-2692] + ## 1.6.0 (December 18, 2017) FEATURES: From 5dc836a47aed13eec7bb4862e3c27d4e6eb118da Mon Sep 17 00:00:00 2001 From: Ninir Date: Fri, 15 Dec 2017 17:28:55 +0100 Subject: [PATCH 027/350] Cleaned Data Sources --- aws/data_source_aws_acm_certificate.go | 5 ++- aws/data_source_aws_ami.go | 1 + aws/data_source_aws_ami_ids.go | 3 +- aws/data_source_aws_autoscaling_groups.go | 4 +- aws/data_source_aws_availability_zone.go | 10 ++--- aws/data_source_aws_availability_zones.go | 3 +- ...data_source_aws_billing_service_account.go | 1 - aws/data_source_aws_caller_identity.go | 3 ++ aws/data_source_aws_canonical_user_id.go | 2 +- aws/data_source_aws_cloudformation_stack.go | 6 ++- ...a_source_aws_cloudtrail_service_account.go | 2 +- aws/data_source_aws_db_instance.go | 6 +-- aws/data_source_aws_db_snapshot.go | 1 + aws/data_source_aws_dynamodb_table.go | 3 +- aws/data_source_aws_ebs_snapshot.go | 1 + aws/data_source_aws_ebs_snapshot_ids.go | 4 +- aws/data_source_aws_ebs_volume.go | 1 + aws/data_source_aws_ecr_repository.go | 7 ++-- aws/data_source_aws_ecs_cluster.go | 7 +++- ...ata_source_aws_ecs_container_definition.go | 27 ++++++------ aws/data_source_aws_ecs_task_definition.go | 19 +++++---- aws/data_source_aws_efs_file_system.go | 1 + aws/data_source_aws_efs_mount_target.go | 1 + aws/data_source_aws_eip.go | 6 +-- ...ce_aws_elastic_beanstalk_solution_stack.go | 1 + aws/data_source_aws_elasticache_cluster.go | 1 + ...ource_aws_elasticache_replication_group.go | 2 + aws/data_source_aws_elb.go | 2 +- aws/data_source_aws_iam_group.go | 2 + aws/data_source_aws_iam_instance_profile.go | 2 + aws/data_source_aws_iam_policy_document.go | 4 +- aws/data_source_aws_iam_server_certificate.go | 2 + aws/data_source_aws_iam_user.go | 3 ++ aws/data_source_aws_instance.go | 2 +- aws/data_source_aws_instances.go | 2 +- aws/data_source_aws_internet_gateway.go | 2 +- aws/data_source_aws_ip_ranges.go | 10 ++--- aws/data_source_aws_kinesis_stream.go | 18 ++++---- aws/data_source_aws_kms_alias.go | 2 + aws/data_source_aws_kms_ciphertext.go | 3 +- aws/data_source_aws_kms_secret.go | 10 ++--- aws/data_source_aws_lb.go | 2 + aws/data_source_aws_lb_target_group.go | 2 + aws/data_source_aws_nat_gateway.go | 2 +- aws/data_source_aws_network_interface.go | 34 ++++++++------- aws/data_source_aws_prefix_list.go | 8 ++-- aws/data_source_aws_rds_cluster.go | 7 +++- ...ata_source_aws_redshift_service_account.go | 2 +- aws/data_source_aws_region.go | 8 ++-- aws/data_source_aws_route53_zone.go | 15 ++++--- aws/data_source_aws_route_table.go | 2 +- aws/data_source_aws_s3_bucket_object.go | 42 +++++++++---------- aws/data_source_aws_security_group.go | 2 +- aws/data_source_aws_sns.go | 2 + aws/data_source_aws_ssm_parameter.go | 3 +- aws/data_source_aws_subnet.go | 2 +- aws/data_source_aws_subnet_ids.go | 4 +- aws/data_source_aws_vpc.go | 2 +- aws/data_source_aws_vpc_endpoint.go | 3 +- aws/data_source_aws_vpc_endpoint_service.go | 4 +- aws/data_source_aws_vpc_peering_connection.go | 1 + aws/data_source_aws_vpn_gateway.go | 3 +- 62 files changed, 194 insertions(+), 148 deletions(-) diff --git a/aws/data_source_aws_acm_certificate.go b/aws/data_source_aws_acm_certificate.go index 5b69ed93dfa..12ce7157634 100644 --- a/aws/data_source_aws_acm_certificate.go +++ b/aws/data_source_aws_acm_certificate.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "time" "github.com/aws/aws-sdk-go/aws" @@ -38,10 +39,9 @@ func dataSourceAwsAcmCertificate() *schema.Resource { func dataSourceAwsAcmCertificateRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).acmconn - params := &acm.ListCertificatesInput{} + params := &acm.ListCertificatesInput{} target := d.Get("domain") - statuses, ok := d.GetOk("statuses") if ok { statusStrings := statuses.([]interface{}) @@ -51,6 +51,7 @@ func dataSourceAwsAcmCertificateRead(d *schema.ResourceData, meta interface{}) e } var arns []string + log.Printf("[DEBUG] Reading ACM Certificate: %s", params) err := conn.ListCertificatesPages(params, func(page *acm.ListCertificatesOutput, lastPage bool) bool { for _, cert := range page.CertificateSummaryList { if *cert.DomainName == target { diff --git a/aws/data_source_aws_ami.go b/aws/data_source_aws_ami.go index be9ba72b019..7a1fbecd2b0 100644 --- a/aws/data_source_aws_ami.go +++ b/aws/data_source_aws_ami.go @@ -202,6 +202,7 @@ func dataSourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error { } } + log.Printf("[DEBUG] Reading AMI: %s", params) resp, err := conn.DescribeImages(params) if err != nil { return err diff --git a/aws/data_source_aws_ami_ids.go b/aws/data_source_aws_ami_ids.go index bce1a70a2b2..73ed09d28a2 100644 --- a/aws/data_source_aws_ami_ids.go +++ b/aws/data_source_aws_ami_ids.go @@ -34,7 +34,7 @@ func dataSourceAwsAmiIds() *schema.Resource { ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "ids": &schema.Schema{ + "ids": { Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -71,6 +71,7 @@ func dataSourceAwsAmiIdsRead(d *schema.ResourceData, meta interface{}) error { } } + log.Printf("[DEBUG] Reading AMI IDs: %s", params) resp, err := conn.DescribeImages(params) if err != nil { return err diff --git a/aws/data_source_aws_autoscaling_groups.go b/aws/data_source_aws_autoscaling_groups.go index f43f21d4e9b..012195e1799 100644 --- a/aws/data_source_aws_autoscaling_groups.go +++ b/aws/data_source_aws_autoscaling_groups.go @@ -26,11 +26,11 @@ func dataSourceAwsAutoscalingGroups() *schema.Resource { Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, }, - "values": &schema.Schema{ + "values": { Type: schema.TypeSet, Required: true, Elem: &schema.Schema{Type: schema.TypeString}, diff --git a/aws/data_source_aws_availability_zone.go b/aws/data_source_aws_availability_zone.go index 6fdf34281df..2d872b133fb 100644 --- a/aws/data_source_aws_availability_zone.go +++ b/aws/data_source_aws_availability_zone.go @@ -14,23 +14,23 @@ func dataSourceAwsAvailabilityZone() *schema.Resource { Read: dataSourceAwsAvailabilityZoneRead, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Optional: true, Computed: true, }, - "region": &schema.Schema{ + "region": { Type: schema.TypeString, Computed: true, }, - "name_suffix": &schema.Schema{ + "name_suffix": { Type: schema.TypeString, Computed: true, }, - "state": &schema.Schema{ + "state": { Type: schema.TypeString, Optional: true, Computed: true, @@ -58,7 +58,7 @@ func dataSourceAwsAvailabilityZoneRead(d *schema.ResourceData, meta interface{}) req.Filters = nil } - log.Printf("[DEBUG] DescribeAvailabilityZones %s\n", req) + log.Printf("[DEBUG] Reading Availability Zone: %s", req) resp, err := conn.DescribeAvailabilityZones(req) if err != nil { return err diff --git a/aws/data_source_aws_availability_zones.go b/aws/data_source_aws_availability_zones.go index dcc09438fdc..3cdc3221e02 100644 --- a/aws/data_source_aws_availability_zones.go +++ b/aws/data_source_aws_availability_zones.go @@ -47,8 +47,7 @@ func dataSourceAwsAvailabilityZonesRead(d *schema.ResourceData, meta interface{} } } - log.Printf("[DEBUG] Availability Zones request options: %#v", *request) - + log.Printf("[DEBUG] Reading Availability Zones: %s", request) resp, err := conn.DescribeAvailabilityZones(request) if err != nil { return fmt.Errorf("Error fetching Availability Zones: %s", err) diff --git a/aws/data_source_aws_billing_service_account.go b/aws/data_source_aws_billing_service_account.go index 23ec40843cd..e1425c4ffe5 100644 --- a/aws/data_source_aws_billing_service_account.go +++ b/aws/data_source_aws_billing_service_account.go @@ -24,7 +24,6 @@ func dataSourceAwsBillingServiceAccount() *schema.Resource { func dataSourceAwsBillingServiceAccountRead(d *schema.ResourceData, meta interface{}) error { d.SetId(billingAccountId) - d.Set("arn", fmt.Sprintf("arn:%s:iam::%s:root", meta.(*AWSClient).partition, billingAccountId)) return nil diff --git a/aws/data_source_aws_caller_identity.go b/aws/data_source_aws_caller_identity.go index a2adcef341f..2a87b21f782 100644 --- a/aws/data_source_aws_caller_identity.go +++ b/aws/data_source_aws_caller_identity.go @@ -35,7 +35,9 @@ func dataSourceAwsCallerIdentity() *schema.Resource { func dataSourceAwsCallerIdentityRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*AWSClient).stsconn + log.Printf("[DEBUG] Reading Caller Identity") res, err := client.GetCallerIdentity(&sts.GetCallerIdentityInput{}) + if err != nil { return fmt.Errorf("Error getting Caller Identity: %v", err) } @@ -46,5 +48,6 @@ func dataSourceAwsCallerIdentityRead(d *schema.ResourceData, meta interface{}) e d.Set("account_id", res.Account) d.Set("arn", res.Arn) d.Set("user_id", res.UserId) + return nil } diff --git a/aws/data_source_aws_canonical_user_id.go b/aws/data_source_aws_canonical_user_id.go index 0c8a89e390a..b5d65950678 100644 --- a/aws/data_source_aws_canonical_user_id.go +++ b/aws/data_source_aws_canonical_user_id.go @@ -25,7 +25,7 @@ func dataSourceAwsCanonicalUserId() *schema.Resource { func dataSourceAwsCanonicalUserIdRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).s3conn - log.Printf("[DEBUG] Listing S3 buckets.") + log.Printf("[DEBUG] Reading S3 Buckets") req := &s3.ListBucketsInput{} resp, err := conn.ListBuckets(req) diff --git a/aws/data_source_aws_cloudformation_stack.go b/aws/data_source_aws_cloudformation_stack.go index b834e0a29b5..b991c96862a 100644 --- a/aws/data_source_aws_cloudformation_stack.go +++ b/aws/data_source_aws_cloudformation_stack.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudformation" @@ -73,11 +74,12 @@ func dataSourceAwsCloudFormationStack() *schema.Resource { func dataSourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cfconn name := d.Get("name").(string) - input := cloudformation.DescribeStacksInput{ + input := &cloudformation.DescribeStacksInput{ StackName: aws.String(name), } - out, err := conn.DescribeStacks(&input) + log.Printf("[DEBUG] Reading CloudFormation Stack: %s", input) + out, err := conn.DescribeStacks(input) if err != nil { return fmt.Errorf("Failed describing CloudFormation stack (%s): %s", name, err) } diff --git a/aws/data_source_aws_cloudtrail_service_account.go b/aws/data_source_aws_cloudtrail_service_account.go index e41b6fb3d93..cbc503ec538 100644 --- a/aws/data_source_aws_cloudtrail_service_account.go +++ b/aws/data_source_aws_cloudtrail_service_account.go @@ -29,7 +29,7 @@ func dataSourceAwsCloudTrailServiceAccount() *schema.Resource { Read: dataSourceAwsCloudTrailServiceAccountRead, Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ + "region": { Type: schema.TypeString, Optional: true, }, diff --git a/aws/data_source_aws_db_instance.go b/aws/data_source_aws_db_instance.go index 77521a05270..4e93681be06 100644 --- a/aws/data_source_aws_db_instance.go +++ b/aws/data_source_aws_db_instance.go @@ -205,13 +205,13 @@ func dataSourceAwsDbInstance() *schema.Resource { func dataSourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn - opts := rds.DescribeDBInstancesInput{ + opts := &rds.DescribeDBInstancesInput{ DBInstanceIdentifier: aws.String(d.Get("db_instance_identifier").(string)), } - log.Printf("[DEBUG] DB Instance describe configuration: %#v", opts) + log.Printf("[DEBUG] Reading DB Instance: %s", opts) - resp, err := conn.DescribeDBInstances(&opts) + resp, err := conn.DescribeDBInstances(opts) if err != nil { return err } diff --git a/aws/data_source_aws_db_snapshot.go b/aws/data_source_aws_db_snapshot.go index 1aa9819eb5e..3b8f756e25a 100644 --- a/aws/data_source_aws_db_snapshot.go +++ b/aws/data_source_aws_db_snapshot.go @@ -152,6 +152,7 @@ func dataSourceAwsDbSnapshotRead(d *schema.ResourceData, meta interface{}) error params.DBSnapshotIdentifier = aws.String(snapshotIdentifier.(string)) } + log.Printf("[DEBUG] Reading DB Snapshot: %s", params) resp, err := conn.DescribeDBSnapshots(params) if err != nil { return err diff --git a/aws/data_source_aws_dynamodb_table.go b/aws/data_source_aws_dynamodb_table.go index 998d022eb5f..844bddfb982 100644 --- a/aws/data_source_aws_dynamodb_table.go +++ b/aws/data_source_aws_dynamodb_table.go @@ -21,7 +21,6 @@ func dataSourceAwsDynamoDbTable() *schema.Resource { Type: schema.TypeString, Required: true, }, - "arn": { Type: schema.TypeString, Computed: true, @@ -178,11 +177,11 @@ func dataSourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) er dynamodbconn := meta.(*AWSClient).dynamodbconn name := d.Get("name").(string) - log.Printf("[DEBUG] Loading data for DynamoDB table %q", name) req := &dynamodb.DescribeTableInput{ TableName: aws.String(name), } + log.Printf("[DEBUG] Reading DynamoDB Table: %s", req) result, err := dynamodbconn.DescribeTable(req) if err != nil { diff --git a/aws/data_source_aws_ebs_snapshot.go b/aws/data_source_aws_ebs_snapshot.go index 6d3c95fc2ef..7b7cb29d2df 100644 --- a/aws/data_source_aws_ebs_snapshot.go +++ b/aws/data_source_aws_ebs_snapshot.go @@ -111,6 +111,7 @@ func dataSourceAwsEbsSnapshotRead(d *schema.ResourceData, meta interface{}) erro params.SnapshotIds = expandStringList(snapshotIds.([]interface{})) } + log.Printf("[DEBUG] Reading EBS Snapshot: %s", params) resp, err := conn.DescribeSnapshots(params) if err != nil { return err diff --git a/aws/data_source_aws_ebs_snapshot_ids.go b/aws/data_source_aws_ebs_snapshot_ids.go index 17714acb6ca..65a8ab10123 100644 --- a/aws/data_source_aws_ebs_snapshot_ids.go +++ b/aws/data_source_aws_ebs_snapshot_ids.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform/helper/hashcode" @@ -26,7 +27,7 @@ func dataSourceAwsEbsSnapshotIds() *schema.Resource { ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "ids": &schema.Schema{ + "ids": { Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -58,6 +59,7 @@ func dataSourceAwsEbsSnapshotIdsRead(d *schema.ResourceData, meta interface{}) e params.OwnerIds = expandStringList(owners.([]interface{})) } + log.Printf("[DEBUG] Reading EBS Snapshot IDs: %s", params) resp, err := conn.DescribeSnapshots(params) if err != nil { return err diff --git a/aws/data_source_aws_ebs_volume.go b/aws/data_source_aws_ebs_volume.go index c09c238b163..f931e3a55a8 100644 --- a/aws/data_source_aws_ebs_volume.go +++ b/aws/data_source_aws_ebs_volume.go @@ -74,6 +74,7 @@ func dataSourceAwsEbsVolumeRead(d *schema.ResourceData, meta interface{}) error params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set)) } + log.Printf("[DEBUG] Reading EBS Volume: %s", params) resp, err := conn.DescribeVolumes(params) if err != nil { return err diff --git a/aws/data_source_aws_ecr_repository.go b/aws/data_source_aws_ecr_repository.go index 1552a71b6e3..e2d25a586aa 100644 --- a/aws/data_source_aws_ecr_repository.go +++ b/aws/data_source_aws_ecr_repository.go @@ -39,10 +39,11 @@ func dataSourceAwsEcrRepositoryRead(d *schema.ResourceData, meta interface{}) er conn := meta.(*AWSClient).ecrconn repositoryName := d.Get("name").(string) - log.Printf("[DEBUG] Reading ECR repository %s", repositoryName) - out, err := conn.DescribeRepositories(&ecr.DescribeRepositoriesInput{ + params := &ecr.DescribeRepositoriesInput{ RepositoryNames: []*string{aws.String(repositoryName)}, - }) + } + log.Printf("[DEBUG] Reading ECR repository: %s", params) + out, err := conn.DescribeRepositories(params) if err != nil { if ecrerr, ok := err.(awserr.Error); ok && ecrerr.Code() == "RepositoryNotFoundException" { log.Printf("[WARN] ECR Repository %s not found, removing from state", d.Id()) diff --git a/aws/data_source_aws_ecs_cluster.go b/aws/data_source_aws_ecs_cluster.go index 8f738ce6b86..b6dbc63f4c9 100644 --- a/aws/data_source_aws_ecs_cluster.go +++ b/aws/data_source_aws_ecs_cluster.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ecs" @@ -50,9 +51,11 @@ func dataSourceAwsEcsCluster() *schema.Resource { func dataSourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ecsconn - desc, err := conn.DescribeClusters(&ecs.DescribeClustersInput{ + params := &ecs.DescribeClustersInput{ Clusters: []*string{aws.String(d.Get("cluster_name").(string))}, - }) + } + log.Printf("[DEBUG] Reading ECS Cluster: %s", params) + desc, err := conn.DescribeClusters(params) if err != nil { return err diff --git a/aws/data_source_aws_ecs_container_definition.go b/aws/data_source_aws_ecs_container_definition.go index 412019ac9c7..9bb99cdaeda 100644 --- a/aws/data_source_aws_ecs_container_definition.go +++ b/aws/data_source_aws_ecs_container_definition.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "strings" "github.com/aws/aws-sdk-go/aws" @@ -14,47 +15,47 @@ func dataSourceAwsEcsContainerDefinition() *schema.Resource { Read: dataSourceAwsEcsContainerDefinitionRead, Schema: map[string]*schema.Schema{ - "task_definition": &schema.Schema{ + "task_definition": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "container_name": &schema.Schema{ + "container_name": { Type: schema.TypeString, Required: true, ForceNew: true, }, // Computed values. - "image": &schema.Schema{ + "image": { Type: schema.TypeString, Computed: true, }, - "image_digest": &schema.Schema{ + "image_digest": { Type: schema.TypeString, Computed: true, }, - "cpu": &schema.Schema{ + "cpu": { Type: schema.TypeInt, Computed: true, }, - "memory": &schema.Schema{ + "memory": { Type: schema.TypeInt, Computed: true, }, - "memory_reservation": &schema.Schema{ + "memory_reservation": { Type: schema.TypeInt, Computed: true, }, - "disable_networking": &schema.Schema{ + "disable_networking": { Type: schema.TypeBool, Computed: true, }, - "docker_labels": &schema.Schema{ + "docker_labels": { Type: schema.TypeMap, Computed: true, Elem: schema.TypeString, }, - "environment": &schema.Schema{ + "environment": { Type: schema.TypeMap, Computed: true, Elem: schema.TypeString, @@ -66,9 +67,11 @@ func dataSourceAwsEcsContainerDefinition() *schema.Resource { func dataSourceAwsEcsContainerDefinitionRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ecsconn - desc, err := conn.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{ + params := &ecs.DescribeTaskDefinitionInput{ TaskDefinition: aws.String(d.Get("task_definition").(string)), - }) + } + log.Printf("[DEBUG] Reading ECS Container Definition: %s", params) + desc, err := conn.DescribeTaskDefinition(params) if err != nil { return err diff --git a/aws/data_source_aws_ecs_task_definition.go b/aws/data_source_aws_ecs_task_definition.go index 3a5096a3b96..a734bb0e8af 100644 --- a/aws/data_source_aws_ecs_task_definition.go +++ b/aws/data_source_aws_ecs_task_definition.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ecs" @@ -13,29 +14,29 @@ func dataSourceAwsEcsTaskDefinition() *schema.Resource { Read: dataSourceAwsEcsTaskDefinitionRead, Schema: map[string]*schema.Schema{ - "task_definition": &schema.Schema{ + "task_definition": { Type: schema.TypeString, Required: true, ForceNew: true, }, // Computed values. - "family": &schema.Schema{ + "family": { Type: schema.TypeString, Computed: true, }, - "network_mode": &schema.Schema{ + "network_mode": { Type: schema.TypeString, Computed: true, }, - "revision": &schema.Schema{ + "revision": { Type: schema.TypeInt, Computed: true, }, - "status": &schema.Schema{ + "status": { Type: schema.TypeString, Computed: true, }, - "task_role_arn": &schema.Schema{ + "task_role_arn": { Type: schema.TypeString, Computed: true, }, @@ -46,9 +47,11 @@ func dataSourceAwsEcsTaskDefinition() *schema.Resource { func dataSourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ecsconn - desc, err := conn.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{ + params := &ecs.DescribeTaskDefinitionInput{ TaskDefinition: aws.String(d.Get("task_definition").(string)), - }) + } + log.Printf("[DEBUG] Reading ECS Task Definition: %s", params) + desc, err := conn.DescribeTaskDefinition(params) if err != nil { return fmt.Errorf("Failed getting task definition %s %q", err, d.Get("task_definition").(string)) diff --git a/aws/data_source_aws_efs_file_system.go b/aws/data_source_aws_efs_file_system.go index 2f91772c0bd..ab518d6f80c 100644 --- a/aws/data_source_aws_efs_file_system.go +++ b/aws/data_source_aws_efs_file_system.go @@ -62,6 +62,7 @@ func dataSourceAwsEfsFileSystemRead(d *schema.ResourceData, meta interface{}) er describeEfsOpts.FileSystemId = aws.String(v.(string)) } + log.Printf("[DEBUG] Reading EFS File System: %s", describeEfsOpts) describeResp, err := efsconn.DescribeFileSystems(describeEfsOpts) if err != nil { return errwrap.Wrapf("Error retrieving EFS: {{err}}", err) diff --git a/aws/data_source_aws_efs_mount_target.go b/aws/data_source_aws_efs_mount_target.go index fee845f6801..e2c6045d73b 100644 --- a/aws/data_source_aws_efs_mount_target.go +++ b/aws/data_source_aws_efs_mount_target.go @@ -58,6 +58,7 @@ func dataSourceAwsEfsMountTargetRead(d *schema.ResourceData, meta interface{}) e MountTargetId: aws.String(d.Get("mount_target_id").(string)), } + log.Printf("[DEBUG] Reading EFS Mount Target: %s", describeEfsOpts) resp, err := efsconn.DescribeMountTargets(describeEfsOpts) if err != nil { return errwrap.Wrapf("Error retrieving EFS Mount Target: {{err}}", err) diff --git a/aws/data_source_aws_eip.go b/aws/data_source_aws_eip.go index fa6126e57a4..f461a374e8b 100644 --- a/aws/data_source_aws_eip.go +++ b/aws/data_source_aws_eip.go @@ -14,12 +14,12 @@ func dataSourceAwsEip() *schema.Resource { Read: dataSourceAwsEipRead, Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ + "id": { Type: schema.TypeString, Optional: true, Computed: true, }, - "public_ip": &schema.Schema{ + "public_ip": { Type: schema.TypeString, Optional: true, Computed: true, @@ -41,7 +41,7 @@ func dataSourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { req.PublicIps = []*string{aws.String(public_ip.(string))} } - log.Printf("[DEBUG] DescribeAddresses %s\n", req) + log.Printf("[DEBUG] Reading EIP: %s", req) resp, err := conn.DescribeAddresses(req) if err != nil { return err diff --git a/aws/data_source_aws_elastic_beanstalk_solution_stack.go b/aws/data_source_aws_elastic_beanstalk_solution_stack.go index f9bec5bcebf..962aebf1084 100644 --- a/aws/data_source_aws_elastic_beanstalk_solution_stack.go +++ b/aws/data_source_aws_elastic_beanstalk_solution_stack.go @@ -43,6 +43,7 @@ func dataSourceAwsElasticBeanstalkSolutionStackRead(d *schema.ResourceData, meta var params *elasticbeanstalk.ListAvailableSolutionStacksInput + log.Printf("[DEBUG] Reading Elastic Beanstalk Solution Stack: %s", params) resp, err := conn.ListAvailableSolutionStacks(params) if err != nil { return err diff --git a/aws/data_source_aws_elasticache_cluster.go b/aws/data_source_aws_elasticache_cluster.go index eaa539d3a52..b50c65afb24 100644 --- a/aws/data_source_aws_elasticache_cluster.go +++ b/aws/data_source_aws_elasticache_cluster.go @@ -157,6 +157,7 @@ func dataSourceAwsElastiCacheClusterRead(d *schema.ResourceData, meta interface{ ShowCacheNodeInfo: aws.Bool(true), } + log.Printf("[DEBUG] Reading ElastiCache Cluster: %s", req) resp, err := conn.DescribeCacheClusters(req) if err != nil { return err diff --git a/aws/data_source_aws_elasticache_replication_group.go b/aws/data_source_aws_elasticache_replication_group.go index 02aa41cfe7b..938eea1a613 100644 --- a/aws/data_source_aws_elasticache_replication_group.go +++ b/aws/data_source_aws_elasticache_replication_group.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elasticache" @@ -66,6 +67,7 @@ func dataSourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta i ReplicationGroupId: aws.String(d.Get("replication_group_id").(string)), } + log.Printf("[DEBUG] Reading ElastiCache Replication Group: %s", input) resp, err := conn.DescribeReplicationGroups(input) if err != nil { return err diff --git a/aws/data_source_aws_elb.go b/aws/data_source_aws_elb.go index e997c9ffc25..626c2eaae6e 100644 --- a/aws/data_source_aws_elb.go +++ b/aws/data_source_aws_elb.go @@ -198,7 +198,7 @@ func dataSourceAwsElbRead(d *schema.ResourceData, meta interface{}) error { LoadBalancerNames: []*string{aws.String(lbName)}, } - log.Printf("[DEBUG] Reading ELBs: %#v", input) + log.Printf("[DEBUG] Reading ELB: %s", input) resp, err := elbconn.DescribeLoadBalancers(input) if err != nil { return fmt.Errorf("Error retrieving LB: %s", err) diff --git a/aws/data_source_aws_iam_group.go b/aws/data_source_aws_iam_group.go index 53cfecc86ed..942f2a97de4 100644 --- a/aws/data_source_aws_iam_group.go +++ b/aws/data_source_aws_iam_group.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" @@ -43,6 +44,7 @@ func dataSourceAwsIAMGroupRead(d *schema.ResourceData, meta interface{}) error { GroupName: aws.String(groupName), } + log.Printf("[DEBUG] Reading IAM Group: %s", req) resp, err := iamconn.GetGroup(req) if err != nil { return errwrap.Wrapf("Error getting group: {{err}}", err) diff --git a/aws/data_source_aws_iam_instance_profile.go b/aws/data_source_aws_iam_instance_profile.go index 12cec5ac83e..305e2e4e734 100644 --- a/aws/data_source_aws_iam_instance_profile.go +++ b/aws/data_source_aws_iam_instance_profile.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" @@ -47,6 +48,7 @@ func dataSourceAwsIAMInstanceProfileRead(d *schema.ResourceData, meta interface{ InstanceProfileName: aws.String(name), } + log.Printf("[DEBUG] Reading IAM Instance Profile: %s", req) resp, err := iamconn.GetInstanceProfile(req) if err != nil { return errwrap.Wrapf("Error getting instance profiles: {{err}}", err) diff --git a/aws/data_source_aws_iam_policy_document.go b/aws/data_source_aws_iam_policy_document.go index 2366ae4bc7b..2c89355d6d0 100644 --- a/aws/data_source_aws_iam_policy_document.go +++ b/aws/data_source_aws_iam_policy_document.go @@ -215,11 +215,11 @@ func dataSourceAwsIamPolicyPrincipalSchema() *schema.Schema { Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ + "type": { Type: schema.TypeString, Required: true, }, - "identifiers": &schema.Schema{ + "identifiers": { Type: schema.TypeSet, Required: true, Elem: &schema.Schema{ diff --git a/aws/data_source_aws_iam_server_certificate.go b/aws/data_source_aws_iam_server_certificate.go index 4be79462334..e39a7843f0e 100644 --- a/aws/data_source_aws_iam_server_certificate.go +++ b/aws/data_source_aws_iam_server_certificate.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "sort" "strings" @@ -98,6 +99,7 @@ func dataSourceAwsIAMServerCertificateRead(d *schema.ResourceData, meta interfac } var metadatas = []*iam.ServerCertificateMetadata{} + log.Printf("[DEBUG] Reading IAM Server Certificate") err := iamconn.ListServerCertificatesPages(&iam.ListServerCertificatesInput{}, func(p *iam.ListServerCertificatesOutput, lastPage bool) bool { for _, cert := range p.ServerCertificateMetadataList { if matcher(cert) { diff --git a/aws/data_source_aws_iam_user.go b/aws/data_source_aws_iam_user.go index e3c82667ed8..72d3e47d938 100644 --- a/aws/data_source_aws_iam_user.go +++ b/aws/data_source_aws_iam_user.go @@ -1,6 +1,8 @@ package aws import ( + "log" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" "github.com/hashicorp/errwrap" @@ -39,6 +41,7 @@ func dataSourceAwsIAMUserRead(d *schema.ResourceData, meta interface{}) error { UserName: aws.String(userName), } + log.Printf("[DEBUG] Reading IAM User: %s", req) resp, err := iamconn.GetUser(req) if err != nil { return errwrap.Wrapf("error getting user: {{err}}", err) diff --git a/aws/data_source_aws_instance.go b/aws/data_source_aws_instance.go index 12ae005043b..7afd83fb958 100644 --- a/aws/data_source_aws_instance.go +++ b/aws/data_source_aws_instance.go @@ -233,7 +233,7 @@ func dataSourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error { )...) } - // Perform the lookup + log.Printf("[DEBUG] Reading IAM Instance: %s", params) resp, err := conn.DescribeInstances(params) if err != nil { return err diff --git a/aws/data_source_aws_instances.go b/aws/data_source_aws_instances.go index 52bff321a02..7196e3189e7 100644 --- a/aws/data_source_aws_instances.go +++ b/aws/data_source_aws_instances.go @@ -65,7 +65,7 @@ func dataSourceAwsInstancesRead(d *schema.ResourceData, meta interface{}) error )...) } - log.Printf("[INFO] Describing EC2 instances: %s", params) + log.Printf("[DEBUG] Reading EC2 instances: %s", params) var instanceIds, privateIps, publicIps []string err := conn.DescribeInstancesPages(params, func(resp *ec2.DescribeInstancesOutput, isLast bool) bool { diff --git a/aws/data_source_aws_internet_gateway.go b/aws/data_source_aws_internet_gateway.go index c88ad2ccd83..2c318457747 100644 --- a/aws/data_source_aws_internet_gateway.go +++ b/aws/data_source_aws_internet_gateway.go @@ -60,8 +60,8 @@ func dataSourceAwsInternetGatewayRead(d *schema.ResourceData, meta interface{}) req.Filters = append(req.Filters, buildEC2CustomFilterList( filter.(*schema.Set), )...) - log.Printf("[DEBUG] Describe Internet Gateways %v\n", req) + log.Printf("[DEBUG] Reading Internet Gateway: %s", req) resp, err := conn.DescribeInternetGateways(req) if err != nil { diff --git a/aws/data_source_aws_ip_ranges.go b/aws/data_source_aws_ip_ranges.go index 32e9d898832..3e1aa7d1a96 100644 --- a/aws/data_source_aws_ip_ranges.go +++ b/aws/data_source_aws_ip_ranges.go @@ -30,26 +30,26 @@ func dataSourceAwsIPRanges() *schema.Resource { Read: dataSourceAwsIPRangesRead, Schema: map[string]*schema.Schema{ - "cidr_blocks": &schema.Schema{ + "cidr_blocks": { Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "create_date": &schema.Schema{ + "create_date": { Type: schema.TypeString, Computed: true, }, - "regions": &schema.Schema{ + "regions": { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, Optional: true, }, - "services": &schema.Schema{ + "services": { Type: schema.TypeSet, Required: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "sync_token": &schema.Schema{ + "sync_token": { Type: schema.TypeInt, Computed: true, }, diff --git a/aws/data_source_aws_kinesis_stream.go b/aws/data_source_aws_kinesis_stream.go index ebc843d11c8..3c82c1a7081 100644 --- a/aws/data_source_aws_kinesis_stream.go +++ b/aws/data_source_aws_kinesis_stream.go @@ -11,53 +11,53 @@ func dataSourceAwsKinesisStream() *schema.Resource { Read: dataSourceAwsKinesisStreamRead, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, }, - "arn": &schema.Schema{ + "arn": { Type: schema.TypeString, Computed: true, }, - "creation_timestamp": &schema.Schema{ + "creation_timestamp": { Type: schema.TypeInt, Computed: true, }, - "status": &schema.Schema{ + "status": { Type: schema.TypeString, Computed: true, }, - "retention_period": &schema.Schema{ + "retention_period": { Type: schema.TypeInt, Computed: true, }, - "open_shards": &schema.Schema{ + "open_shards": { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "closed_shards": &schema.Schema{ + "closed_shards": { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "shard_level_metrics": &schema.Schema{ + "shard_level_metrics": { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "tags": &schema.Schema{ + "tags": { Type: schema.TypeMap, Computed: true, }, diff --git a/aws/data_source_aws_kms_alias.go b/aws/data_source_aws_kms_alias.go index 41c33b680cf..5117504ea52 100644 --- a/aws/data_source_aws_kms_alias.go +++ b/aws/data_source_aws_kms_alias.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "time" "github.com/aws/aws-sdk-go/service/kms" @@ -36,6 +37,7 @@ func dataSourceAwsKmsAliasRead(d *schema.ResourceData, meta interface{}) error { target := d.Get("name") var alias *kms.AliasListEntry + log.Printf("[DEBUG] Reading KMS Alias: %s", params) err := conn.ListAliasesPages(params, func(page *kms.ListAliasesOutput, lastPage bool) bool { for _, entity := range page.Aliases { if *entity.AliasName == target { diff --git a/aws/data_source_aws_kms_ciphertext.go b/aws/data_source_aws_kms_ciphertext.go index 4ffc3465f14..5125e2b92ce 100644 --- a/aws/data_source_aws_kms_ciphertext.go +++ b/aws/data_source_aws_kms_ciphertext.go @@ -25,7 +25,7 @@ func dataSourceAwsKmsCiphertext() *schema.Resource { Required: true, }, - "context": &schema.Schema{ + "context": { Type: schema.TypeMap, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -54,7 +54,6 @@ func dataSourceAwsKmsCiphertextRead(d *schema.ResourceData, meta interface{}) er } log.Printf("[DEBUG] KMS encrypt for key: %s", d.Get("key_id").(string)) - resp, err := conn.Encrypt(req) if err != nil { return err diff --git a/aws/data_source_aws_kms_secret.go b/aws/data_source_aws_kms_secret.go index 92d5134fd95..3b022dfb5f2 100644 --- a/aws/data_source_aws_kms_secret.go +++ b/aws/data_source_aws_kms_secret.go @@ -16,26 +16,26 @@ func dataSourceAwsKmsSecret() *schema.Resource { Read: dataSourceAwsKmsSecretRead, Schema: map[string]*schema.Schema{ - "secret": &schema.Schema{ + "secret": { Type: schema.TypeSet, Required: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, }, - "payload": &schema.Schema{ + "payload": { Type: schema.TypeString, Required: true, }, - "context": &schema.Schema{ + "context": { Type: schema.TypeMap, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "grant_tokens": &schema.Schema{ + "grant_tokens": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, diff --git a/aws/data_source_aws_lb.go b/aws/data_source_aws_lb.go index 2b3864112a7..0d46b4e2b00 100644 --- a/aws/data_source_aws_lb.go +++ b/aws/data_source_aws_lb.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elbv2" @@ -136,6 +137,7 @@ func dataSourceAwsLbRead(d *schema.ResourceData, meta interface{}) error { describeLbOpts.Names = []*string{aws.String(lbName)} } + log.Printf("[DEBUG] Reading Load Balancer: %s", describeLbOpts) describeResp, err := elbconn.DescribeLoadBalancers(describeLbOpts) if err != nil { return errwrap.Wrapf("Error retrieving LB: {{err}}", err) diff --git a/aws/data_source_aws_lb_target_group.go b/aws/data_source_aws_lb_target_group.go index 741772ce2ce..f109194489c 100644 --- a/aws/data_source_aws_lb_target_group.go +++ b/aws/data_source_aws_lb_target_group.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elbv2" @@ -138,6 +139,7 @@ func dataSourceAwsLbTargetGroupRead(d *schema.ResourceData, meta interface{}) er describeTgOpts.Names = []*string{aws.String(tgName)} } + log.Printf("[DEBUG] Reading Load Balancer Target Group: %s", describeTgOpts) describeResp, err := elbconn.DescribeTargetGroups(describeTgOpts) if err != nil { return errwrap.Wrapf("Error retrieving LB Target Group: {{err}}", err) diff --git a/aws/data_source_aws_nat_gateway.go b/aws/data_source_aws_nat_gateway.go index 091f3326277..98947f397c2 100644 --- a/aws/data_source_aws_nat_gateway.go +++ b/aws/data_source_aws_nat_gateway.go @@ -102,7 +102,7 @@ func dataSourceAwsNatGatewayRead(d *schema.ResourceData, meta interface{}) error return err } if resp == nil || len(resp.NatGateways) == 0 { - return fmt.Errorf("no matching NAT gateway found: %#v", req) + return fmt.Errorf("no matching NAT gateway found: %s", req) } if len(resp.NatGateways) > 1 { return fmt.Errorf("multiple NAT gateways matched; use additional constraints to reduce matches to a single NAT gateway") diff --git a/aws/data_source_aws_network_interface.go b/aws/data_source_aws_network_interface.go index 6a8f56729ac..26d4be418bb 100644 --- a/aws/data_source_aws_network_interface.go +++ b/aws/data_source_aws_network_interface.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" @@ -16,52 +17,52 @@ func dataSourceAwsNetworkInterface() *schema.Resource { Type: schema.TypeString, Required: true, }, - "association": &schema.Schema{ + "association": { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "allocation_id": &schema.Schema{ + "allocation_id": { Type: schema.TypeString, Computed: true, }, - "association_id": &schema.Schema{ + "association_id": { Type: schema.TypeString, Computed: true, }, - "ip_owner_id": &schema.Schema{ + "ip_owner_id": { Type: schema.TypeString, Computed: true, }, - "public_dns_name": &schema.Schema{ + "public_dns_name": { Type: schema.TypeString, Computed: true, }, - "public_ip": &schema.Schema{ + "public_ip": { Type: schema.TypeString, Computed: true, }, }, }, }, - "attachment": &schema.Schema{ + "attachment": { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "attachment_id": &schema.Schema{ + "attachment_id": { Type: schema.TypeString, Computed: true, }, - "device_index": &schema.Schema{ + "device_index": { Type: schema.TypeInt, Computed: true, }, - "instance_id": &schema.Schema{ + "instance_id": { Type: schema.TypeString, Computed: true, }, - "instance_owner_id": &schema.Schema{ + "instance_owner_id": { Type: schema.TypeString, Computed: true, }, @@ -90,23 +91,23 @@ func dataSourceAwsNetworkInterface() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "mac_address": &schema.Schema{ + "mac_address": { Type: schema.TypeString, Computed: true, }, - "owner_id": &schema.Schema{ + "owner_id": { Type: schema.TypeString, Computed: true, }, - "private_dns_name": &schema.Schema{ + "private_dns_name": { Type: schema.TypeString, Computed: true, }, - "private_ip": &schema.Schema{ + "private_ip": { Type: schema.TypeString, Computed: true, }, - "private_ips": &schema.Schema{ + "private_ips": { Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -135,6 +136,7 @@ func dataSourceAwsNetworkInterfaceRead(d *schema.ResourceData, meta interface{}) NetworkInterfaceIds: []*string{aws.String(d.Get("id").(string))}, } + log.Printf("[DEBUG] Reading Network Interface: %s", input) resp, err := conn.DescribeNetworkInterfaces(input) if err != nil { return err diff --git a/aws/data_source_aws_prefix_list.go b/aws/data_source_aws_prefix_list.go index 876af586db3..5a251a737d3 100644 --- a/aws/data_source_aws_prefix_list.go +++ b/aws/data_source_aws_prefix_list.go @@ -14,16 +14,16 @@ func dataSourceAwsPrefixList() *schema.Resource { Read: dataSourceAwsPrefixListRead, Schema: map[string]*schema.Schema{ - "prefix_list_id": &schema.Schema{ + "prefix_list_id": { Type: schema.TypeString, Optional: true, }, - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Optional: true, Computed: true, }, - "cidr_blocks": &schema.Schema{ + "cidr_blocks": { Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -46,7 +46,7 @@ func dataSourceAwsPrefixListRead(d *schema.ResourceData, meta interface{}) error }, ) - log.Printf("[DEBUG] DescribePrefixLists %s\n", req) + log.Printf("[DEBUG] Reading Prefix List: %s", req) resp, err := conn.DescribePrefixLists(req) if err != nil { return err diff --git a/aws/data_source_aws_rds_cluster.go b/aws/data_source_aws_rds_cluster.go index 0e5e3ee7c75..6fea1159112 100644 --- a/aws/data_source_aws_rds_cluster.go +++ b/aws/data_source_aws_rds_cluster.go @@ -1,6 +1,7 @@ package aws import ( + "log" "strings" "github.com/aws/aws-sdk-go/aws" @@ -150,9 +151,11 @@ func dataSourceAwsRdsCluster() *schema.Resource { func dataSourceAwsRdsClusterRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn - resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ + params := &rds.DescribeDBClustersInput{ DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), - }) + } + log.Printf("[DEBUG] Reading RDS Cluster: %s", params) + resp, err := conn.DescribeDBClusters(params) if err != nil { return errwrap.Wrapf("Error retrieving RDS cluster: {{err}}", err) diff --git a/aws/data_source_aws_redshift_service_account.go b/aws/data_source_aws_redshift_service_account.go index b7a9a230774..ad6d6e68f1e 100644 --- a/aws/data_source_aws_redshift_service_account.go +++ b/aws/data_source_aws_redshift_service_account.go @@ -29,7 +29,7 @@ func dataSourceAwsRedshiftServiceAccount() *schema.Resource { Read: dataSourceAwsRedshiftServiceAccountRead, Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ + "region": { Type: schema.TypeString, Optional: true, }, diff --git a/aws/data_source_aws_region.go b/aws/data_source_aws_region.go index 6d2a21d1d6d..018f4138a0f 100644 --- a/aws/data_source_aws_region.go +++ b/aws/data_source_aws_region.go @@ -14,19 +14,19 @@ func dataSourceAwsRegion() *schema.Resource { Read: dataSourceAwsRegionRead, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Optional: true, Computed: true, }, - "current": &schema.Schema{ + "current": { Type: schema.TypeBool, Optional: true, Computed: true, }, - "endpoint": &schema.Schema{ + "endpoint": { Type: schema.TypeString, Optional: true, Computed: true, @@ -60,7 +60,7 @@ func dataSourceAwsRegionRead(d *schema.ResourceData, meta interface{}) error { req.Filters = nil } - log.Printf("[DEBUG] DescribeRegions %s\n", req) + log.Printf("[DEBUG] Reading Region: %s", req) resp, err := conn.DescribeRegions(req) if err != nil { return err diff --git a/aws/data_source_aws_route53_zone.go b/aws/data_source_aws_route53_zone.go index b3de4eed49b..55629f5def5 100644 --- a/aws/data_source_aws_route53_zone.go +++ b/aws/data_source_aws_route53_zone.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "strings" "github.com/aws/aws-sdk-go/aws" @@ -63,7 +64,9 @@ func dataSourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) erro tags := tagsFromMap(d.Get("tags").(map[string]interface{})) if nameExists && idExists { return fmt.Errorf("zone_id and name arguments can't be used together") - } else if !nameExists && !idExists { + } + + if !nameExists && !idExists { return fmt.Errorf("Either name or zone_id must be set") } @@ -76,6 +79,7 @@ func dataSourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) erro if nextMarker != nil { req.Marker = nextMarker } + log.Printf("[DEBUG] Reading Route53 Zone: %s", req) resp, err := conn.ListHostedZones(req) if err != nil { @@ -137,15 +141,14 @@ func dataSourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) erro if matchingTags && matchingVPC { if hostedZoneFound != nil { return fmt.Errorf("multiple Route53Zone found please use vpc_id option to filter") - } else { - hostedZoneFound = hostedZone } + + hostedZoneFound = hostedZone } } } if *resp.IsTruncated { - nextMarker = resp.NextMarker } else { allHostedZoneListed = true @@ -170,7 +173,7 @@ func dataSourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) erro func hostedZoneName(name string) string { if strings.HasSuffix(name, ".") { return name - } else { - return name + "." } + + return name + "." } diff --git a/aws/data_source_aws_route_table.go b/aws/data_source_aws_route_table.go index c332bdd913a..79853a01700 100644 --- a/aws/data_source_aws_route_table.go +++ b/aws/data_source_aws_route_table.go @@ -135,7 +135,7 @@ func dataSourceAwsRouteTableRead(d *schema.ResourceData, meta interface{}) error filter.(*schema.Set), )...) - log.Printf("[DEBUG] Describe Route Tables %v\n", req) + log.Printf("[DEBUG] Reading Route Table: %s", req) resp, err := conn.DescribeRouteTables(req) if err != nil { return err diff --git a/aws/data_source_aws_s3_bucket_object.go b/aws/data_source_aws_s3_bucket_object.go index 2eff5e6dab2..b501443a7f7 100644 --- a/aws/data_source_aws_s3_bucket_object.go +++ b/aws/data_source_aws_s3_bucket_object.go @@ -18,84 +18,84 @@ func dataSourceAwsS3BucketObject() *schema.Resource { Read: dataSourceAwsS3BucketObjectRead, Schema: map[string]*schema.Schema{ - "body": &schema.Schema{ + "body": { Type: schema.TypeString, Computed: true, }, - "bucket": &schema.Schema{ + "bucket": { Type: schema.TypeString, Required: true, }, - "cache_control": &schema.Schema{ + "cache_control": { Type: schema.TypeString, Computed: true, }, - "content_disposition": &schema.Schema{ + "content_disposition": { Type: schema.TypeString, Computed: true, }, - "content_encoding": &schema.Schema{ + "content_encoding": { Type: schema.TypeString, Computed: true, }, - "content_language": &schema.Schema{ + "content_language": { Type: schema.TypeString, Computed: true, }, - "content_length": &schema.Schema{ + "content_length": { Type: schema.TypeInt, Computed: true, }, - "content_type": &schema.Schema{ + "content_type": { Type: schema.TypeString, Computed: true, }, - "etag": &schema.Schema{ + "etag": { Type: schema.TypeString, Computed: true, }, - "expiration": &schema.Schema{ + "expiration": { Type: schema.TypeString, Computed: true, }, - "expires": &schema.Schema{ + "expires": { Type: schema.TypeString, Computed: true, }, - "key": &schema.Schema{ + "key": { Type: schema.TypeString, Required: true, }, - "last_modified": &schema.Schema{ + "last_modified": { Type: schema.TypeString, Computed: true, }, - "metadata": &schema.Schema{ + "metadata": { Type: schema.TypeMap, Computed: true, }, - "range": &schema.Schema{ + "range": { Type: schema.TypeString, Optional: true, }, - "server_side_encryption": &schema.Schema{ + "server_side_encryption": { Type: schema.TypeString, Computed: true, }, - "sse_kms_key_id": &schema.Schema{ + "sse_kms_key_id": { Type: schema.TypeString, Computed: true, }, - "storage_class": &schema.Schema{ + "storage_class": { Type: schema.TypeString, Computed: true, }, - "version_id": &schema.Schema{ + "version_id": { Type: schema.TypeString, Optional: true, Computed: true, }, - "website_redirect_location": &schema.Schema{ + "website_redirect_location": { Type: schema.TypeString, Computed: true, }, @@ -129,7 +129,7 @@ func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) e uniqueId += "@" + v.(string) } - log.Printf("[DEBUG] Reading S3 object: %s", input) + log.Printf("[DEBUG] Reading S3 Bucket Object: %s", input) out, err := conn.HeadObject(&input) if err != nil { return fmt.Errorf("Failed getting S3 object: %s Bucket: %q Object: %q", err, bucket, key) diff --git a/aws/data_source_aws_security_group.go b/aws/data_source_aws_security_group.go index ec659e4002b..7402c0ace88 100644 --- a/aws/data_source_aws_security_group.go +++ b/aws/data_source_aws_security_group.go @@ -72,7 +72,7 @@ func dataSourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) er req.Filters = nil } - log.Printf("[DEBUG] Describe Security Groups %v\n", req) + log.Printf("[DEBUG] Reading Security Group: %s", req) resp, err := conn.DescribeSecurityGroups(req) if err != nil { return err diff --git a/aws/data_source_aws_sns.go b/aws/data_source_aws_sns.go index c02ec328a8a..19ce56d3e69 100644 --- a/aws/data_source_aws_sns.go +++ b/aws/data_source_aws_sns.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "regexp" "time" @@ -42,6 +43,7 @@ func dataSourceAwsSnsTopicsRead(d *schema.ResourceData, meta interface{}) error target := d.Get("name") var arns []string + log.Printf("[DEBUG] Reading SNS Topic: %s", params) err := conn.ListTopicsPages(params, func(page *sns.ListTopicsOutput, lastPage bool) bool { for _, topic := range page.Topics { topicPattern := fmt.Sprintf(".*:%v$", target) diff --git a/aws/data_source_aws_ssm_parameter.go b/aws/data_source_aws_ssm_parameter.go index 28e1955ffce..f5be43e876e 100644 --- a/aws/data_source_aws_ssm_parameter.go +++ b/aws/data_source_aws_ssm_parameter.go @@ -41,8 +41,6 @@ func dataAwsSsmParameterRead(d *schema.ResourceData, meta interface{}) error { name := d.Get("name").(string) - log.Printf("[DEBUG] Reading SSM Parameter: %q", name) - paramInput := &ssm.GetParametersInput{ Names: []*string{ aws.String(name), @@ -50,6 +48,7 @@ func dataAwsSsmParameterRead(d *schema.ResourceData, meta interface{}) error { WithDecryption: aws.Bool(true), } + log.Printf("[DEBUG] Reading SSM Parameter: %s", paramInput) resp, err := ssmconn.GetParameters(paramInput) if err != nil { diff --git a/aws/data_source_aws_subnet.go b/aws/data_source_aws_subnet.go index 70cb8fbdfdc..bdea59c4688 100644 --- a/aws/data_source_aws_subnet.go +++ b/aws/data_source_aws_subnet.go @@ -124,7 +124,7 @@ func dataSourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { req.Filters = nil } - log.Printf("[DEBUG] DescribeSubnets %s\n", req) + log.Printf("[DEBUG] Reading Subnet: %s", req) resp, err := conn.DescribeSubnets(req) if err != nil { return err diff --git a/aws/data_source_aws_subnet_ids.go b/aws/data_source_aws_subnet_ids.go index c1a495aa1ad..a2f5f0c46aa 100644 --- a/aws/data_source_aws_subnet_ids.go +++ b/aws/data_source_aws_subnet_ids.go @@ -15,12 +15,12 @@ func dataSourceAwsSubnetIDs() *schema.Resource { "tags": tagsSchemaComputed(), - "vpc_id": &schema.Schema{ + "vpc_id": { Type: schema.TypeString, Required: true, }, - "ids": &schema.Schema{ + "ids": { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, diff --git a/aws/data_source_aws_vpc.go b/aws/data_source_aws_vpc.go index 1b8852b1790..583d69333fb 100644 --- a/aws/data_source_aws_vpc.go +++ b/aws/data_source_aws_vpc.go @@ -119,7 +119,7 @@ func dataSourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { req.Filters = nil } - log.Printf("[DEBUG] DescribeVpcs %s\n", req) + log.Printf("[DEBUG] Reading AWS VPC: %s", req) resp, err := conn.DescribeVpcs(req) if err != nil { return err diff --git a/aws/data_source_aws_vpc_endpoint.go b/aws/data_source_aws_vpc_endpoint.go index 0503fe43e7d..808649e000f 100644 --- a/aws/data_source_aws_vpc_endpoint.go +++ b/aws/data_source_aws_vpc_endpoint.go @@ -57,8 +57,6 @@ func dataSourceAwsVpcEndpoint() *schema.Resource { func dataSourceAwsVpcEndpointRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - log.Printf("[DEBUG] Reading VPC Endpoints.") - req := &ec2.DescribeVpcEndpointsInput{} if id, ok := d.GetOk("id"); ok { @@ -77,6 +75,7 @@ func dataSourceAwsVpcEndpointRead(d *schema.ResourceData, meta interface{}) erro req.Filters = nil } + log.Printf("[DEBUG] Reading VPC Endpoint: %s", req) resp, err := conn.DescribeVpcEndpoints(req) if err != nil { return err diff --git a/aws/data_source_aws_vpc_endpoint_service.go b/aws/data_source_aws_vpc_endpoint_service.go index 8860b39a7df..769b7ec0ea0 100644 --- a/aws/data_source_aws_vpc_endpoint_service.go +++ b/aws/data_source_aws_vpc_endpoint_service.go @@ -33,11 +33,9 @@ func dataSourceAwsVpcEndpointServiceRead(d *schema.ResourceData, meta interface{ conn := meta.(*AWSClient).ec2conn service := d.Get("service").(string) - - log.Printf("[DEBUG] Reading VPC Endpoint Services.") - request := &ec2.DescribeVpcEndpointServicesInput{} + log.Printf("[DEBUG] Reading VPC Endpoint Service: %s", request) resp, err := conn.DescribeVpcEndpointServices(request) if err != nil { return fmt.Errorf("Error fetching VPC Endpoint Services: %s", err) diff --git a/aws/data_source_aws_vpc_peering_connection.go b/aws/data_source_aws_vpc_peering_connection.go index 489a7262414..9caf9d86683 100644 --- a/aws/data_source_aws_vpc_peering_connection.go +++ b/aws/data_source_aws_vpc_peering_connection.go @@ -103,6 +103,7 @@ func dataSourceAwsVpcPeeringConnectionRead(d *schema.ResourceData, meta interfac req.Filters = nil } + log.Printf("[DEBUG] Reading VPC Peering Connection: %s", req) resp, err := conn.DescribeVpcPeeringConnections(req) if err != nil { return err diff --git a/aws/data_source_aws_vpn_gateway.go b/aws/data_source_aws_vpn_gateway.go index 5d088e54845..10d33360c0f 100644 --- a/aws/data_source_aws_vpn_gateway.go +++ b/aws/data_source_aws_vpn_gateway.go @@ -43,8 +43,6 @@ func dataSourceAwsVpnGateway() *schema.Resource { func dataSourceAwsVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - log.Printf("[DEBUG] Reading VPN Gateways.") - req := &ec2.DescribeVpnGatewaysInput{} if id, ok := d.GetOk("id"); ok { @@ -76,6 +74,7 @@ func dataSourceAwsVpnGatewayRead(d *schema.ResourceData, meta interface{}) error req.Filters = nil } + log.Printf("[DEBUG] Reading VPN Gateway: %s", req) resp, err := conn.DescribeVpnGateways(req) if err != nil { return err From b3bd6ff67bf68e6a3f8eb16bbc3288d8879787bd Mon Sep 17 00:00:00 2001 From: Charlie Duong Date: Tue, 19 Dec 2017 11:22:18 -0500 Subject: [PATCH 028/350] Minor fix for aws_default_security_group resource doc --- website/docs/r/default_security_group.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/default_security_group.html.markdown b/website/docs/r/default_security_group.html.markdown index d5729fd89a3..631270042d5 100644 --- a/website/docs/r/default_security_group.html.markdown +++ b/website/docs/r/default_security_group.html.markdown @@ -75,7 +75,7 @@ resource "aws_vpc" "mainvpc" { } resource "aws_default_security_group" "default" { - vpc_id = "${aws_vpc.mainvpc.vpc}" + vpc_id = "${aws_vpc.mainvpc.id}" ingress { protocol = -1 From f87b2ac7af0593d3be875eb4b196b91ac040eacd Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 13 Dec 2017 08:39:13 +0000 Subject: [PATCH 029/350] Randomize names in Lambda-related tests --- aws/import_aws_lambda_function_test.go | 28 +- ...s_kinesis_firehose_delivery_stream_test.go | 51 +- aws/resource_aws_lambda_alias_test.go | 29 +- ...ce_aws_lambda_event_source_mapping_test.go | 82 +- aws/resource_aws_lambda_function_test.go | 725 ++++++++++-------- aws/resource_aws_lambda_permission_test.go | 185 +++-- 6 files changed, 630 insertions(+), 470 deletions(-) diff --git a/aws/import_aws_lambda_function_test.go b/aws/import_aws_lambda_function_test.go index 8815c4cef0c..d47a79b7121 100644 --- a/aws/import_aws_lambda_function_test.go +++ b/aws/import_aws_lambda_function_test.go @@ -38,7 +38,7 @@ func testSweepLambdaFunctions(region string) error { for _, f := range resp.Functions { var testOptGroup bool - for _, testName := range []string{"tf_test"} { + for _, testName := range []string{"tf_test", "tf_acc_"} { if strings.HasPrefix(*f.FunctionName, testName) { testOptGroup = true } @@ -63,8 +63,11 @@ func testSweepLambdaFunctions(region string) error { func TestAccAWSLambdaFunction_importLocalFile(t *testing.T) { resourceName := "aws_lambda_function.lambda_function_test" - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) + rString := acctest.RandString(8) + funcName := fmt.Sprintf("tf_acc_lambda_func_import_local_%s", rString) + policyName := fmt.Sprintf("tf_acc_policy_lambda_func_import_local_%s", rString) + roleName := fmt.Sprintf("tf_acc_role_lambda_func_import_local_%s", rString) + sgName := fmt.Sprintf("tf_acc_sg_lambda_func_import_local_%s", rString) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -72,7 +75,7 @@ func TestAccAWSLambdaFunction_importLocalFile(t *testing.T) { CheckDestroy: testAccCheckLambdaFunctionDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSLambdaConfigBasic(rName, rSt), + Config: testAccAWSLambdaConfigBasic(funcName, policyName, roleName, sgName), }, resource.TestStep{ @@ -88,8 +91,11 @@ func TestAccAWSLambdaFunction_importLocalFile(t *testing.T) { func TestAccAWSLambdaFunction_importLocalFile_VPC(t *testing.T) { resourceName := "aws_lambda_function.lambda_function_test" - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) + rString := acctest.RandString(8) + funcName := fmt.Sprintf("tf_acc_lambda_func_import_vpc_%s", rString) + policyName := fmt.Sprintf("tf_acc_policy_lambda_func_import_vpc_%s", rString) + roleName := fmt.Sprintf("tf_acc_role_lambda_func_import_vpc_%s", rString) + sgName := fmt.Sprintf("tf_acc_sg_lambda_func_import_vpc_%s", rString) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -97,7 +103,7 @@ func TestAccAWSLambdaFunction_importLocalFile_VPC(t *testing.T) { CheckDestroy: testAccCheckLambdaFunctionDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSLambdaConfigWithVPC(rName, rSt), + Config: testAccAWSLambdaConfigWithVPC(funcName, policyName, roleName, sgName), }, resource.TestStep{ @@ -113,8 +119,10 @@ func TestAccAWSLambdaFunction_importLocalFile_VPC(t *testing.T) { func TestAccAWSLambdaFunction_importS3(t *testing.T) { resourceName := "aws_lambda_function.lambda_function_s3test" - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) + rString := acctest.RandString(8) + bucketName := fmt.Sprintf("tf-acc-bucket-lambda-func-import-s3-%s", rString) + roleName := fmt.Sprintf("tf_acc_role_lambda_func_import_s3_%s", rString) + funcName := fmt.Sprintf("tf_acc_lambda_func_import_s3_%s", rString) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -122,7 +130,7 @@ func TestAccAWSLambdaFunction_importS3(t *testing.T) { CheckDestroy: testAccCheckLambdaFunctionDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSLambdaConfigS3(rName, rSt), + Config: testAccAWSLambdaConfigS3(bucketName, roleName, funcName), }, resource.TestStep{ diff --git a/aws/resource_aws_kinesis_firehose_delivery_stream_test.go b/aws/resource_aws_kinesis_firehose_delivery_stream_test.go index 6f8e83e7eac..4f1d80074a9 100644 --- a/aws/resource_aws_kinesis_firehose_delivery_stream_test.go +++ b/aws/resource_aws_kinesis_firehose_delivery_stream_test.go @@ -120,12 +120,14 @@ func TestAccAWSKinesisFirehoseDeliveryStream_s3ConfigUpdates(t *testing.T) { } func TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3basic(t *testing.T) { - rSt := acctest.RandString(5) - rName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rSt) + rString := acctest.RandString(8) + funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) + policyName := fmt.Sprintf("tf_acc_policy_%s", rString) + roleName := fmt.Sprintf("tf_acc_role_%s", rString) var stream firehose.DeliveryStreamDescription ri := acctest.RandInt() - config := testAccFirehoseAWSLambdaConfigBasic(rName, rSt) + + config := testAccFirehoseAWSLambdaConfigBasic(funcName, policyName, roleName) + fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_extendedS3basic, ri, ri, ri, ri) @@ -146,12 +148,13 @@ func TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3basic(t *testing.T) { } func TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3InvalidProcessorType(t *testing.T) { - - rSt := acctest.RandString(5) - rName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rSt) + rString := acctest.RandString(8) + funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) + policyName := fmt.Sprintf("tf_acc_policy_%s", rString) + roleName := fmt.Sprintf("tf_acc_role_%s", rString) ri := acctest.RandInt() - config := testAccFirehoseAWSLambdaConfigBasic(rName, rSt) + + config := testAccFirehoseAWSLambdaConfigBasic(funcName, policyName, roleName) + fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_extendedS3InvalidProcessorType, ri, ri, ri, ri) @@ -169,12 +172,13 @@ func TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3InvalidProcessorType(t *t } func TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3InvalidParameterName(t *testing.T) { - - rSt := acctest.RandString(5) - rName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rSt) + rString := acctest.RandString(8) + funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) + policyName := fmt.Sprintf("tf_acc_policy_%s", rString) + roleName := fmt.Sprintf("tf_acc_role_%s", rString) ri := acctest.RandInt() - config := testAccFirehoseAWSLambdaConfigBasic(rName, rSt) + + config := testAccFirehoseAWSLambdaConfigBasic(funcName, policyName, roleName) + fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_extendedS3InvalidParameterName, ri, ri, ri, ri) @@ -192,17 +196,18 @@ func TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3InvalidParameterName(t *t } func TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3Updates(t *testing.T) { - - rSt := acctest.RandString(5) - rName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rSt) + rString := acctest.RandString(8) + funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) + policyName := fmt.Sprintf("tf_acc_policy_%s", rString) + roleName := fmt.Sprintf("tf_acc_role_%s", rString) var stream firehose.DeliveryStreamDescription ri := acctest.RandInt() - preConfig := testAccFirehoseAWSLambdaConfigBasic(rName, rSt) + + preConfig := testAccFirehoseAWSLambdaConfigBasic(funcName, policyName, roleName) + fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_extendedS3basic, ri, ri, ri, ri) - postConfig := testAccFirehoseAWSLambdaConfigBasic(rName, rSt) + + postConfig := testAccFirehoseAWSLambdaConfigBasic(funcName, policyName, roleName) + fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_extendedS3Updates, ri, ri, ri, ri) @@ -524,10 +529,10 @@ func testAccCheckFirehoseLambdaFunctionDestroy(s *terraform.State) error { return nil } -func baseAccFirehoseAWSLambdaConfig(rst string) string { +func baseAccFirehoseAWSLambdaConfig(policyName, roleName string) string { return fmt.Sprintf(` resource "aws_iam_role_policy" "iam_policy_for_lambda" { - name = "iam_policy_for_lambda_%s" + name = "%s" role = "${aws_iam_role.iam_for_lambda.id}" policy = < Date: Wed, 20 Dec 2017 10:38:08 +0100 Subject: [PATCH 030/350] Bump aws-sdk-go to v1.12.50 --- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 13 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/apigateway/api.go | 610 ++++++++++++- .../aws-sdk-go/service/mediastoredata/api.go | 22 +- .../aws/aws-sdk-go/service/route53/api.go | 68 +- .../aws/aws-sdk-go/service/route53/errors.go | 3 +- vendor/vendor.json | 834 +++++++++--------- 7 files changed, 1092 insertions(+), 460 deletions(-) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index c6cf4071acb..f503ced3ae1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -24,6 +24,7 @@ const ( EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). EuWest1RegionID = "eu-west-1" // EU (Ireland). EuWest2RegionID = "eu-west-2" // EU (London). + EuWest3RegionID = "eu-west-3" // EU (Paris). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). UsEast2RegionID = "us-east-2" // US East (Ohio). @@ -33,7 +34,8 @@ const ( // AWS China partition's regions. const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). ) // AWS GovCloud (US) partition's regions. @@ -222,6 +224,9 @@ var awsPartition = partition{ "eu-west-2": region{ Description: "EU (London)", }, + "eu-west-3": region{ + Description: "EU (Paris)", + }, "sa-east-1": region{ Description: "South America (Sao Paulo)", }, @@ -844,12 +849,15 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1975,6 +1983,9 @@ var awscnPartition = partition{ "cn-north-1": region{ Description: "China (Beijing)", }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, }, Services: services{ "apigateway": service{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index fdca0c8d26e..5066825a94b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.12.49" +const SDKVersion = "1.12.50" diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go b/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go index 7cd4b73585a..656830a9a16 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go @@ -7223,6 +7223,96 @@ func (c *APIGateway) GetStagesWithContext(ctx aws.Context, input *GetStagesInput return out, req.Send() } +const opGetTags = "GetTags" + +// GetTagsRequest generates a "aws/request.Request" representing the +// client's request for the GetTags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTags for more information on using the GetTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetTagsRequest method. +// req, resp := client.GetTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *APIGateway) GetTagsRequest(input *GetTagsInput) (req *request.Request, output *GetTagsOutput) { + op := &request.Operation{ + Name: opGetTags, + HTTPMethod: "GET", + HTTPPath: "/tags/{resource_arn}", + } + + if input == nil { + input = &GetTagsInput{} + } + + output = &GetTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTags API operation for Amazon API Gateway. +// +// Gets the Tags collection for a given resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon API Gateway's +// API operation GetTags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// The submitted request is not valid, for example, the input is incomplete +// or incorrect. See the accompanying error message for details. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The request is denied because the caller has insufficient permissions. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// The request has reached its throttling limit. Retry after the specified time +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// The requested resource is not found. Make sure that the request URI is correct. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The request exceeded the rate limit. Retry after the specified time period. +// +func (c *APIGateway) GetTags(input *GetTagsInput) (*GetTagsOutput, error) { + req, out := c.GetTagsRequest(input) + return out, req.Send() +} + +// GetTagsWithContext is the same as GetTags with the addition of +// the ability to pass a context and additional request options. +// +// See GetTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *APIGateway) GetTagsWithContext(ctx aws.Context, input *GetTagsInput, opts ...request.Option) (*GetTagsOutput, error) { + req, out := c.GetTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetUsage = "GetUsage" // GetUsageRequest generates a "aws/request.Request" representing the @@ -8889,6 +8979,102 @@ func (c *APIGateway) PutRestApiWithContext(ctx aws.Context, input *PutRestApiInp return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *APIGateway) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "PUT", + HTTPPath: "/tags/{resource_arn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon API Gateway. +// +// Adds or updates Tags on a gievn resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon API Gateway's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// The submitted request is not valid, for example, the input is incomplete +// or incorrect. See the accompanying error message for details. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The request is denied because the caller has insufficient permissions. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// The request has reached its throttling limit. Retry after the specified time +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// The requested resource is not found. Make sure that the request URI is correct. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The request exceeded the rate limit. Retry after the specified time period. +// +// * ErrCodeConflictException "ConflictException" +// The request configuration has conflicts. For details, see the accompanying +// error message. +// +func (c *APIGateway) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *APIGateway) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTestInvokeAuthorizer = "TestInvokeAuthorizer" // TestInvokeAuthorizerRequest generates a "aws/request.Request" representing the @@ -9067,6 +9253,99 @@ func (c *APIGateway) TestInvokeMethodWithContext(ctx aws.Context, input *TestInv return out, req.Send() } +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *APIGateway) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resource_arn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon API Gateway. +// +// Removes Tags from a given resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon API Gateway's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// The submitted request is not valid, for example, the input is incomplete +// or incorrect. See the accompanying error message for details. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The request is denied because the caller has insufficient permissions. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// The request has reached its throttling limit. Retry after the specified time +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// The requested resource is not found. Make sure that the request URI is correct. +// +// * ErrCodeConflictException "ConflictException" +// The request configuration has conflicts. For details, see the accompanying +// error message. +// +func (c *APIGateway) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *APIGateway) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateAccount = "UpdateAccount" // UpdateAccountRequest generates a "aws/request.Request" representing the @@ -12558,6 +12837,13 @@ func (s *CreateResourceInput) SetRestApiId(v string) *CreateResourceInput { type CreateRestApiInput struct { _ struct{} `type:"structure"` + // The source of the API key for metring requests according to a usage plan. + // Valid values are HEADER to read the API key from the X-API-Key header of + // a request. + // AUTHORIZER to read the API key from the UsageIdentifierKey from a custom + // authorizer. + ApiKeySource *string `locationName:"apiKeySource" type:"string" enum:"ApiKeySourceType"` + // The list of binary media types supported by the RestApi. By default, the // RestApi supports only UTF-8-encoded text payloads. BinaryMediaTypes []*string `locationName:"binaryMediaTypes" type:"list"` @@ -12572,6 +12858,13 @@ type CreateRestApiInput struct { // the API. EndpointConfiguration *EndpointConfiguration `locationName:"endpointConfiguration" type:"structure"` + // A nullable integer used to enable (non-negative between 0 and 10485760 (10M) + // bytes, inclusive) or disable (null) compression on an API. When compression + // is enabled, compression or decompression are not applied on the payload if + // the payload size is smaller than this value. Setting it to zero allows compression + // for any payload size. + MinimumCompressionSize *int64 `locationName:"minimumCompressionSize" type:"integer"` + // The name of the RestApi. // // Name is a required field @@ -12604,6 +12897,12 @@ func (s *CreateRestApiInput) Validate() error { return nil } +// SetApiKeySource sets the ApiKeySource field's value. +func (s *CreateRestApiInput) SetApiKeySource(v string) *CreateRestApiInput { + s.ApiKeySource = &v + return s +} + // SetBinaryMediaTypes sets the BinaryMediaTypes field's value. func (s *CreateRestApiInput) SetBinaryMediaTypes(v []*string) *CreateRestApiInput { s.BinaryMediaTypes = v @@ -12628,6 +12927,12 @@ func (s *CreateRestApiInput) SetEndpointConfiguration(v *EndpointConfiguration) return s } +// SetMinimumCompressionSize sets the MinimumCompressionSize field's value. +func (s *CreateRestApiInput) SetMinimumCompressionSize(v int64) *CreateRestApiInput { + s.MinimumCompressionSize = &v + return s +} + // SetName sets the Name field's value. func (s *CreateRestApiInput) SetName(v string) *CreateRestApiInput { s.Name = &v @@ -12674,6 +12979,11 @@ type CreateStageInput struct { // StageName is a required field StageName *string `locationName:"stageName" type:"string" required:"true"` + // Key/Value map of strings. Valid character set is [a-zA-Z+-=._:/]. Tag key + // can be up to 128 characters and must not start with "aws:". Tag value can + // be up to 256 characters. + Tags map[string]*string `locationName:"tags" type:"map"` + // A map that defines the stage variables for the new Stage resource. Variable // names can have alphanumeric and underscore characters, and the values must // match [A-Za-z0-9-._~:/?#&=,]+. @@ -12757,6 +13067,12 @@ func (s *CreateStageInput) SetStageName(v string) *CreateStageInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateStageInput) SetTags(v map[string]*string) *CreateStageInput { + s.Tags = v + return s +} + // SetVariables sets the Variables field's value. func (s *CreateStageInput) SetVariables(v map[string]*string) *CreateStageInput { s.Variables = v @@ -18013,6 +18329,89 @@ func (s *GetStagesOutput) SetItem(v []*Stage) *GetStagesOutput { return s } +// Gets the Tags collection for a given resource. +type GetTagsInput struct { + _ struct{} `type:"structure"` + + // (Not currently supported) The maximum number of returned results per page. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // (Not currently supported) The current pagination position in the paged result + // set. + Position *string `location:"querystring" locationName:"position" type:"string"` + + // [Required] The ARN of a resource that can be tagged. At present, Stage is + // the only taggable resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resource_arn" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTagsInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *GetTagsInput) SetLimit(v int64) *GetTagsInput { + s.Limit = &v + return s +} + +// SetPosition sets the Position field's value. +func (s *GetTagsInput) SetPosition(v string) *GetTagsInput { + s.Position = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *GetTagsInput) SetResourceArn(v string) *GetTagsInput { + s.ResourceArn = &v + return s +} + +// A collection of Tags associated with a given resource. +type GetTagsOutput struct { + _ struct{} `type:"structure"` + + // A collection of Tags associated with a given resource. + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s GetTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTagsOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *GetTagsOutput) SetTags(v map[string]*string) *GetTagsOutput { + s.Tags = v + return s +} + // The GET request to get the usage data of a usage plan in a specified time // interval. type GetUsageInput struct { @@ -19211,13 +19610,14 @@ type Method struct { // method. ApiKeyRequired *bool `locationName:"apiKeyRequired" type:"boolean"` - // A list authorization scopes configured on the method used with a COGNITO_USER_POOL - // authorizer to authorize the method invocation by matching them against the - // scopes parsed from the access token in the incoming request. The method invocation - // is authorized if any method scopes matches a claimed scope in the access - // token. Otherwise, the invocation is not authorized. When the method scope - // is configured, the client must provide an access token instead of an identity - // token for authorizatinon purposes. + // A list of authorization scopes configured on the method. The scopes are used + // with a COGNITO_USER_POOL authorizer to authorize the method invocation. The + // authorization works by matching the method scopes against the scopes parsed + // from the access token in the incoming request. The method invocation is authorized + // if any method scopes matches a claimed scope in the access token. Otherwise, + // the invocation is not authorized. When the method scope is configured, the + // client must provide an access token instead of an identity token for authorization + // purposes. AuthorizationScopes []*string `locationName:"authorizationScopes" type:"list"` // The method's authorization type. Valid values are NONE for open access, AWS_IAM @@ -20286,13 +20686,14 @@ type PutMethodInput struct { // Specifies whether the method required a valid ApiKey. ApiKeyRequired *bool `locationName:"apiKeyRequired" type:"boolean"` - // A list authorization scopes configured on the method used with a COGNITO_USER_POOL - // authorizer to authorize the method invocation by matching them against the - // scopes parsed from the access token in the incoming request. The method invocation - // is authorized if any method scopes matches a claimed scope in the access - // token. Otherwise, the invocation is not authorized. When the method scope - // is configured, the client must provide an access token instead of an identity - // token for authorizatinon purposes. + // A list of authorization scopes configured on the method. The scopes are used + // with a COGNITO_USER_POOL authorizer to authorize the method invocation. The + // authorization works by matching the method scopes against the scopes parsed + // from the access token in the incoming request. The method invocation is authorized + // if any method scopes matches a claimed scope in the access token. Otherwise, + // the invocation is not authorized. When the method scope is configured, the + // client must provide an access token instead of an identity token for authorization + // purposes. AuthorizationScopes []*string `locationName:"authorizationScopes" type:"list"` // The method's authorization type. Valid values are NONE for open access, AWS_IAM @@ -20810,6 +21211,13 @@ func (s *Resource) SetResourceMethods(v map[string]*Method) *Resource { type RestApi struct { _ struct{} `type:"structure"` + // The source of the API key for metring requests according to a usage plan. + // Valid values are HEADER to read the API key from the X-API-Key header of + // a request. + // AUTHORIZER to read the API key from the UsageIdentifierKey from a custom + // authorizer. + ApiKeySource *string `locationName:"apiKeySource" type:"string" enum:"ApiKeySourceType"` + // The list of binary media types supported by the RestApi. By default, the // RestApi supports only UTF-8-encoded text payloads. BinaryMediaTypes []*string `locationName:"binaryMediaTypes" type:"list"` @@ -20828,6 +21236,13 @@ type RestApi struct { // API Gateway. Id *string `locationName:"id" type:"string"` + // A nullable integer used to enable (non-negative between 0 and 10485760 (10M) + // bytes, inclusive) or disable (null) compression on an API. When compression + // is enabled, compression or decompression are not applied on the payload if + // the payload size is smaller than this value. Setting it to zero allows compression + // for any payload size. + MinimumCompressionSize *int64 `locationName:"minimumCompressionSize" type:"integer"` + // The API's name. Name *string `locationName:"name" type:"string"` @@ -20849,6 +21264,12 @@ func (s RestApi) GoString() string { return s.String() } +// SetApiKeySource sets the ApiKeySource field's value. +func (s *RestApi) SetApiKeySource(v string) *RestApi { + s.ApiKeySource = &v + return s +} + // SetBinaryMediaTypes sets the BinaryMediaTypes field's value. func (s *RestApi) SetBinaryMediaTypes(v []*string) *RestApi { s.BinaryMediaTypes = v @@ -20879,6 +21300,12 @@ func (s *RestApi) SetId(v string) *RestApi { return s } +// SetMinimumCompressionSize sets the MinimumCompressionSize field's value. +func (s *RestApi) SetMinimumCompressionSize(v int64) *RestApi { + s.MinimumCompressionSize = &v + return s +} + // SetName sets the Name field's value. func (s *RestApi) SetName(v string) *RestApi { s.Name = &v @@ -21059,6 +21486,9 @@ type Stage struct { // (URI) of a call to API Gateway. StageName *string `locationName:"stageName" type:"string"` + // A collection of Tags associated with a given resource. + Tags map[string]*string `locationName:"tags" type:"map"` + // A map that defines the stage variables for a Stage resource. Variable names // can have alphanumeric and underscore characters, and the values must match // [A-Za-z0-9-._~:/?#&=,]+. @@ -21153,6 +21583,12 @@ func (s *Stage) SetStageName(v string) *Stage { return s } +// SetTags sets the Tags field's value. +func (s *Stage) SetTags(v map[string]*string) *Stage { + s.Tags = v + return s +} + // SetVariables sets the Variables field's value. func (s *Stage) SetVariables(v map[string]*string) *Stage { s.Variables = v @@ -21192,6 +21628,76 @@ func (s *StageKey) SetStageName(v string) *StageKey { return s } +// Adds or updates Tags on a gievn resource. +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // [Required] The ARN of a resource that can be tagged. At present, Stage is + // the only taggable resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resource_arn" type:"string" required:"true"` + + // [Required] Key/Value map of strings. Valid character set is [a-zA-Z+-=._:/]. + // Tag key can be up to 128 characters and must not start with "aws:". Tag value + // can be up to 256 characters. + // + // Tags is a required field + Tags map[string]*string `locationName:"tags" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + // Make a request to simulate the execution of an Authorizer. type TestInvokeAuthorizerInput struct { _ struct{} `type:"structure"` @@ -21587,6 +22093,74 @@ func (s *ThrottleSettings) SetRateLimit(v float64) *ThrottleSettings { return s } +// Removes Tags from a given resource. +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // [Required] The ARN of a resource that can be tagged. At present, Stage is + // the only taggable resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resource_arn" type:"string" required:"true"` + + // The Tag keys to delete. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + // Requests API Gateway to change information about the current Account resource. type UpdateAccountInput struct { _ struct{} `type:"structure"` @@ -23433,6 +24007,14 @@ func (s *UsagePlanKey) SetValue(v string) *UsagePlanKey { return s } +const ( + // ApiKeySourceTypeHeader is a ApiKeySourceType enum value + ApiKeySourceTypeHeader = "HEADER" + + // ApiKeySourceTypeAuthorizer is a ApiKeySourceType enum value + ApiKeySourceTypeAuthorizer = "AUTHORIZER" +) + const ( // ApiKeysFormatCsv is a ApiKeysFormat enum value ApiKeysFormatCsv = "csv" diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go index 915c1b0a093..601f06ce87d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go @@ -141,7 +141,7 @@ func (c *MediaStoreData) DescribeObjectRequest(input *DescribeObjectInput) (req // DescribeObject API operation for AWS Elemental MediaStore Data Plane. // -// Gets the header for an object at the specified path. +// Gets the headers for an object at the specified path. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -682,15 +682,7 @@ func (s *GetObjectInput) SetRange(v string) *GetObjectInput { type GetObjectOutput struct { _ struct{} `type:"structure" payload:"Body"` - // The path to the file outside of the container. The file name can include - // or omit an extension. - // - // Example 1: If the file is stored on a remote server that has been mounted - // to the workstation on which the REST API command is being run, the path could - // be the absolute path \mount\assets\mlaw.avi or the relative path ..\..\mount\assets\movies\premium\mlaw.avi. - // - // Example 2: If the file is stored on a remote server that is not mounted, - // the path could be https:\\192.0.2.15\movies\premium\mlaw.avi. + // The bytes of the object. Body io.ReadCloser `type:"blob"` // An optional CacheControl header that allows the caller to control the object's @@ -945,15 +937,7 @@ func (s *ListItemsOutput) SetNextToken(v string) *ListItemsOutput { type PutObjectInput struct { _ struct{} `type:"structure" payload:"Body"` - // The path to the file outside of the container. The file name can include - // or omit an extension. - // - // Example 1: If the file is stored on a remote server that has been mounted - // to the workstation on which the REST API command is being run, the path could - // be the absolute path \mount\assets\mlaw.avi or the relative path ..\..\mount\assets\movies\premium\mlaw.avi. - // - // Example 2: If the file is stored on a remote server that is not mounted, - // the path could be https:\\192.0.2.15\movies\premium\mlaw.avi. + // The bytes to be stored. // // Body is a required field Body io.ReadSeeker `type:"blob" required:"true"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go index 5e161817895..5fafaa53e10 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go @@ -101,7 +101,8 @@ func (c *Route53) AssociateVPCWithHostedZoneRequest(input *AssociateVPCWithHoste // have any common name servers. You tried to create a hosted zone that has // the same name as an existing hosted zone or that's the parent or child // of an existing hosted zone, and you specified a delegation set that shares -// one or more name servers with the existing hosted zone. +// one or more name servers with the existing hosted zone. For more information, +// see CreateReusableDelegationSet. // // * Private hosted zone: You specified an Amazon VPC that you're already // using for another hosted zone, and the domain that you specified for one @@ -689,7 +690,8 @@ func (c *Route53) CreateHostedZoneRequest(input *CreateHostedZoneInput) (req *re // have any common name servers. You tried to create a hosted zone that has // the same name as an existing hosted zone or that's the parent or child // of an existing hosted zone, and you specified a delegation set that shares -// one or more name servers with the existing hosted zone. +// one or more name servers with the existing hosted zone. For more information, +// see CreateReusableDelegationSet. // // * Private hosted zone: You specified an Amazon VPC that you're already // using for another hosted zone, and the domain that you specified for one @@ -972,13 +974,49 @@ func (c *Route53) CreateReusableDelegationSetRequest(input *CreateReusableDelega // // Creates a delegation set (a group of four name servers) that can be reused // by multiple hosted zones. If a hosted zoned ID is specified, CreateReusableDelegationSet -// marks the delegation set associated with that zone as reusable +// marks the delegation set associated with that zone as reusable. // -// A reusable delegation set can't be associated with a private hosted zone. +// You can't associate a reusable delegation set with a private hosted zone. // -// For information on how to use a reusable delegation set to configure white +// For information about using a reusable delegation set to configure white // label name servers, see Configuring White Label Name Servers (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/white-label-name-servers.html). // +// The process for migrating existing hosted zones to use a reusable delegation +// set is comparable to the process for configuring white label name servers. +// You need to perform the following steps: +// +// Create a reusable delegation set. +// +// Recreate hosted zones, and reduce the TTL to 60 seconds or less. +// +// Recreate resource record sets in the new hosted zones. +// +// Change the registrar's name servers to use the name servers for the new hosted +// zones. +// +// Monitor traffic for the website or application. +// +// Change TTLs back to their original values. +// +// If you want to migrate existing hosted zones to use a reusable delegation +// set, the existing hosted zones can't use any of the name servers that are +// assigned to the reusable delegation set. If one or more hosted zones do use +// one or more name servers that are assigned to the reusable delegation set, +// you can do one of the following: +// +// * For small numbers of hosted zones—up to a few hundred—it's relatively +// easy to create reusable delegation sets until you get one that has four +// name servers that don't overlap with any of the name servers in your hosted +// zones. +// +// * For larger numbers of hosted zones, the easiest solution is to use more +// than one reusable delegation set. +// +// * For larger numbers of hosted zones, you can also migrate hosted zones +// that have overlapping name servers to hosted zones that don't have overlapping +// name servers, then migrate the hosted zones again to use the reusable +// delegation set. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -8873,6 +8911,10 @@ type GetHealthCheckLastFailureReasonInput struct { // you created the health check, CreateHealthCheck returned the ID in the response, // in the HealthCheckId element. // + // If you want to get the last failure reason for a calculated health check, + // you must use the Amazon Route 53 console or the CloudWatch console. You can't + // use GetHealthCheckLastFailureReason for a calculated health check. + // // HealthCheckId is a required field HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` } @@ -9138,7 +9180,7 @@ type GetHostedZoneLimitInput struct { // * MAX_RRSETS_BY_ZONE: The maximum number of records that you can create // in the specified hosted zone. // - // * MAX_VPCS_ASSOCIATED_BY_TYPE: The maximum number of Amazon VPCs that + // * MAX_VPCS_ASSOCIATED_BY_ZONE: The maximum number of Amazon VPCs that // you can associate with the specified private hosted zone. // // Type is a required field @@ -10394,7 +10436,7 @@ type HostedZoneLimit struct { // * MAX_RRSETS_BY_ZONE: The maximum number of records that you can create // in the specified hosted zone. // - // * MAX_VPCS_ASSOCIATED_BY_TYPE: The maximum number of Amazon VPCs that + // * MAX_VPCS_ASSOCIATED_BY_ZONE: The maximum number of Amazon VPCs that // you can associate with the specified private hosted zone. // // Type is a required field @@ -14751,6 +14793,9 @@ const ( // CloudWatchRegionEuWest2 is a CloudWatchRegion enum value CloudWatchRegionEuWest2 = "eu-west-2" + // CloudWatchRegionEuWest3 is a CloudWatchRegion enum value + CloudWatchRegionEuWest3 = "eu-west-3" + // CloudWatchRegionApSouth1 is a CloudWatchRegion enum value CloudWatchRegionApSouth1 = "ap-south-1" @@ -14934,6 +14979,9 @@ const ( // ResourceRecordSetRegionEuWest2 is a ResourceRecordSetRegion enum value ResourceRecordSetRegionEuWest2 = "eu-west-2" + // ResourceRecordSetRegionEuWest3 is a ResourceRecordSetRegion enum value + ResourceRecordSetRegionEuWest3 = "eu-west-3" + // ResourceRecordSetRegionEuCentral1 is a ResourceRecordSetRegion enum value ResourceRecordSetRegionEuCentral1 = "eu-central-1" @@ -14955,6 +15003,9 @@ const ( // ResourceRecordSetRegionCnNorth1 is a ResourceRecordSetRegion enum value ResourceRecordSetRegionCnNorth1 = "cn-north-1" + // ResourceRecordSetRegionCnNorthwest1 is a ResourceRecordSetRegion enum value + ResourceRecordSetRegionCnNorthwest1 = "cn-northwest-1" + // ResourceRecordSetRegionApSouth1 is a ResourceRecordSetRegion enum value ResourceRecordSetRegionApSouth1 = "ap-south-1" ) @@ -15008,6 +15059,9 @@ const ( // VPCRegionEuWest2 is a VPCRegion enum value VPCRegionEuWest2 = "eu-west-2" + // VPCRegionEuWest3 is a VPCRegion enum value + VPCRegionEuWest3 = "eu-west-3" + // VPCRegionEuCentral1 is a VPCRegion enum value VPCRegionEuCentral1 = "eu-central-1" diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/errors.go b/vendor/github.com/aws/aws-sdk-go/service/route53/errors.go index 18032111039..856039e95d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/errors.go @@ -22,7 +22,8 @@ const ( // have any common name servers. You tried to create a hosted zone that has // the same name as an existing hosted zone or that's the parent or child // of an existing hosted zone, and you specified a delegation set that shares - // one or more name servers with the existing hosted zone. + // one or more name servers with the existing hosted zone. For more information, + // see CreateReusableDelegationSet. // // * Private hosted zone: You specified an Amazon VPC that you're already // using for another hosted zone, and the domain that you specified for one diff --git a/vendor/vendor.json b/vendor/vendor.json index 7fe1a3badbf..e32b249d6dd 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -141,828 +141,828 @@ "revisionTime": "2017-07-27T15:54:43Z" }, { - "checksumSHA1": "lLLCYYcMsypt77CRhQMbGpJ+ZM4=", + "checksumSHA1": "ELwO63Rr9R8wzg2hv25tdUmT1Os=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "DtuTqKH29YnLjrIJkRYX0HQtXY0=", "path": "github.com/aws/aws-sdk-go/aws/arn", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "9nE/FjZ4pYrT883KtV2/aI+Gayo=", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "7/8j/q0TWtOgXyvEcv4B2Dhl00o=", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "Y+cPwQL0dZMyqp3wI+KJWmA9KQ8=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "OnU/n7R33oYXiB4SAGd5pK7I0Bs=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { - "checksumSHA1": "StXQWfRcKjNysYZQFm/noJn9xnM=", + "checksumSHA1": "pa4oM3PSwZQIfqcw1JFbd3kv3aQ=", "path": "github.com/aws/aws-sdk-go/aws/endpoints", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "9GvAyILJ7g+VUg8Ef5DsT5GuYsg=", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "HcGL4e6Uep4/80eCUI5xkcWjpQ0=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "iU00ZjhAml/13g+1YXT21IqoXqg=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "04ypv4x12l4q0TksA1zEVsmgpvw=", "path": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "NStHCXEvYqG72GknZyv1jaKaeH0=", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "yHfT5DTbeCLs4NE2Rgnqrhe15ls=", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "9V1PvtFQ9MObZTc3sa86WcuOtOU=", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "pkeoOfZpHRvFG/AOZeTf0lwtsFg=", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "Rpu8KBtHZgvhkwHxUfaky+qW+G4=", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "0qYPUga28aQVkxZgBR3Z86AbGUQ=", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "vnYDXA1NxJ7Hu+DMfXNk1UnmkWg=", "path": "github.com/aws/aws-sdk-go/service/acm", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { - "checksumSHA1": "8mGhHG7WChxuu8Hu3Vv5j2sMKNQ=", + "checksumSHA1": "DPl/OkvEUjrd+XKqX73l6nUNw3U=", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "X8tOI6i+RJwXIgg1qBjDNclyG/0=", "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "aDAaH6YiA50IrJ5Smfg0fovrniA=", "path": "github.com/aws/aws-sdk-go/service/appsync", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "oBXDw1zQTfxcKsK3ZjtKcS7gBLI=", "path": "github.com/aws/aws-sdk-go/service/athena", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "ITAwWyJp4t9AGfUXm9M3pFWTHVA=", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "Zz8qI6RloveM1zrXAglLxJZT1ZA=", "path": "github.com/aws/aws-sdk-go/service/batch", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "/nO06EpnD22+Ex80gHi4UYrAvKc=", "path": "github.com/aws/aws-sdk-go/service/budgets", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "6gM3CZZgiB0JvS7EK1c31Q8L09U=", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "T80IDetBz1hqJpq5Wqmx3MwCh8w=", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "bYrI9mxspB0xDFZEy3OIfWuez5g=", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "oB+M+kOmYG28V0PuI75IF6E+/w8=", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "Nc3vXlV7s309PprScYpRDPQWeDQ=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "bPh7NF3mLpGMV0rIakolMPHqMyw=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "OqrWtx9iyIJ9roP2sEcmP9UCfXE=", "path": "github.com/aws/aws-sdk-go/service/codebuild", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "7nW1Ho2X3RcUU8FaFBhJIUeuDNw=", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "m19PZt1B51QCWo1jxSbII2zzL6Q=", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "LKw7fnNwq17Eqy0clzS/LK89vS4=", "path": "github.com/aws/aws-sdk-go/service/codepipeline", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "aXh1KIbNX+g+tH+lh3pk++9lm3k=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentity", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "IWi9xZz+OncotjM/vJ87Iffg2Qk=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentityprovider", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "6uudO8hkB5uERXixPA/yL3xcguQ=", "path": "github.com/aws/aws-sdk-go/service/configservice", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "hYCwLQdIjHj8rMHLGVyUVhecI4s=", "path": "github.com/aws/aws-sdk-go/service/databasemigrationservice", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "26CWoHQP/dyL2VzE5ZNd8zNzhko=", "path": "github.com/aws/aws-sdk-go/service/devicefarm", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "6g94rUHAgjcqMMTtMqKUbLU37wY=", "path": "github.com/aws/aws-sdk-go/service/directconnect", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "oFnS6I0u7KqnxK0/r1uoz8rTkxI=", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "0TXXUPjrbOCHpX555B6suH36Nnk=", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "BOjSO1uO7Coj6o3oqpPUtEhQrPI=", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "uEv9kkBsVIjg7K4+Y8TVlU0Cc8o=", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "sD9Urgwx7F3ImX+tJg2Q+ME/oFM=", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "eoM9nF5iVMbuGOmkY33d19aHt8Y=", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "dU5MPXUUOYD/E9sNncpFZ/U86Cw=", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "pj8mBWT3HE0Iid6HSmhw7lmyZDU=", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "VYGtTaSiajfKOVTbi9/SNmbiIac=", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "SZ7yLDZ6RvMhpWe0Goyem64kgyA=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "WYqHhdRNsiGGBLWlBLbOItZf+zA=", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "ae7VWg/xuXpnSD6wGumN44qEd+Q=", "path": "github.com/aws/aws-sdk-go/service/elbv2", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "NbkH6F+792jQ7BW4lGCb+vJVw58=", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "5btWHj2fZrPc/zfYdJLPaOcivxI=", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "oDoGvSfmO2Z099ixV2HXn+SDeHE=", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "HRmbBf3dUEBAfdC2xKaoWAGeM7Y=", "path": "github.com/aws/aws-sdk-go/service/glue", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "6JlxJoy1JCArNK2qBkaJ5IV6qBc=", "path": "github.com/aws/aws-sdk-go/service/guardduty", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "oZaxMqnwl2rA+V/W0tJ3uownORI=", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "dnNMSn5aHAtdOks+aWHLpwbi/VE=", "path": "github.com/aws/aws-sdk-go/service/inspector", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "96OBMJ3R9BD402LJsUUA8a82/UY=", "path": "github.com/aws/aws-sdk-go/service/iot", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "IoSyRZhlL0petrB28nXk5jKM9YA=", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "oAFLgD0uJiVOZkFkL5dd/wUgBz4=", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "XDVse9fKF0RkAywzzgsO31AV4oc=", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "HluEcyZNywrbKnj/aR3tXbu29d8=", "path": "github.com/aws/aws-sdk-go/service/lexmodelbuildingservice", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "wjs9YBsHx0YQH0zKBA7Ibd1UV5Y=", "path": "github.com/aws/aws-sdk-go/service/lightsail", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "4VfB5vMLNYs0y6K159YCBgo9T3c=", "path": "github.com/aws/aws-sdk-go/service/mediaconvert", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "Ox3VWHYSQq0YKmlr0paUPdr5W/0=", "path": "github.com/aws/aws-sdk-go/service/medialive", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "Rs7QtkcLl3XNPnKb8ss/AhF2X50=", "path": "github.com/aws/aws-sdk-go/service/mediapackage", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "QjiIL8LrlhwrQw8FboF+wMNvUF0=", "path": "github.com/aws/aws-sdk-go/service/mediastore", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { - "checksumSHA1": "+srPYMy6U6+D29GNDM+FEtzj05g=", + "checksumSHA1": "ZY1SJNE03I6NL2OBJD9hlwVsqO0=", "path": "github.com/aws/aws-sdk-go/service/mediastoredata", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "ynB7Flcudp0VOqBVKZJ+23DtLHU=", "path": "github.com/aws/aws-sdk-go/service/mq", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "fpsBu+F79ktlLRwal1GugVMUDo0=", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "Iqkgx2nafQPV7fjw+uP35jtF6t4=", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "vP1FcccUZbuUlin7ME89w1GVJtA=", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { - "checksumSHA1": "yV47oX5pFLCiMLSlfEPkPY3oqJg=", + "checksumSHA1": "tKnVaKPOCiU6xl3/AYcdBCLtRdw=", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "sCaHoPWsJXRHFbilUKwN71qFTOI=", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "QZU8vR9cOIenYiH+Ywl4Gzfnlp0=", "path": "github.com/aws/aws-sdk-go/service/servicecatalog", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "dk6ebvA0EYgdPyc5HPKLBPEtsm4=", "path": "github.com/aws/aws-sdk-go/service/servicediscovery", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "Ex1Ma0SFGpqeNuPbeXZtsliZ3zo=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "maVXeR3WDAkONlzf04e4mDgCYxo=", "path": "github.com/aws/aws-sdk-go/service/sfn", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "ADoR4mlCW5usH8iOa6mPNSy49LM=", "path": "github.com/aws/aws-sdk-go/service/shield", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "B3CgAFSREebpsFoFOo4vrQ6u04w=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "FfY8w4DM8XIULdRnFhd3Um8Mj8c=", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "Wx189wAbIhWChx4kVbvsyqKMF4U=", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "ijz0rBDeR6JP/06S+97k84FRYxc=", "path": "github.com/aws/aws-sdk-go/service/ssm", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "W1oFtpaT4TWIIJrAvFcn/XdcT7g=", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "Uw4pOUxSMbx4xBHUcOUkNhtnywE=", "path": "github.com/aws/aws-sdk-go/service/swf", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "on6d7Hydx2bM9jkFOf1JZcZZgeY=", "path": "github.com/aws/aws-sdk-go/service/waf", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "rHqjsOndIR82gX5mSKybaRWf3UY=", "path": "github.com/aws/aws-sdk-go/service/wafregional", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "5HDSvmMW7F3xzPAzughe4dEn6RM=", "path": "github.com/aws/aws-sdk-go/service/workspaces", - "revision": "ab772a0ae5d958feb51cba2a65eca3911882d791", - "revisionTime": "2017-12-19T05:52:48Z", - "version": "v1.12.49", - "versionExact": "v1.12.49" + "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", + "revisionTime": "2017-12-19T22:11:17Z", + "version": "v1.12.50", + "versionExact": "v1.12.50" }, { "checksumSHA1": "usT4LCSQItkFvFOQT7cBlkCuGaE=", From 05f6677b1d9c28fa0a709348fea3add619fee2be Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Wed, 20 Dec 2017 14:39:18 +0100 Subject: [PATCH 031/350] Make sure we properly update the state Without taking the device name into account, certain changes (were the volume ID and instance ID remain the same) will not work as expected. In those cases you will not be able to get rid of the old resource in your state file, without manual actions. --- aws/resource_aws_volume_attachment.go | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_volume_attachment.go b/aws/resource_aws_volume_attachment.go index 2afcd6c676a..7637fdf07f1 100644 --- a/aws/resource_aws_volume_attachment.go +++ b/aws/resource_aws_volume_attachment.go @@ -117,7 +117,7 @@ func resourceAwsVolumeAttachmentCreate(d *schema.ResourceData, meta interface{}) stateConf := &resource.StateChangeConf{ Pending: []string{"attaching"}, Target: []string{"attached"}, - Refresh: volumeAttachmentStateRefreshFunc(conn, vID, iID), + Refresh: volumeAttachmentStateRefreshFunc(conn, name, vID, iID), Timeout: 5 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, @@ -134,12 +134,15 @@ func resourceAwsVolumeAttachmentCreate(d *schema.ResourceData, meta interface{}) return resourceAwsVolumeAttachmentRead(d, meta) } -func volumeAttachmentStateRefreshFunc(conn *ec2.EC2, volumeID, instanceID string) resource.StateRefreshFunc { +func volumeAttachmentStateRefreshFunc(conn *ec2.EC2, name, volumeID, instanceID string) resource.StateRefreshFunc { return func() (interface{}, string, error) { - request := &ec2.DescribeVolumesInput{ VolumeIds: []*string{aws.String(volumeID)}, Filters: []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("attachment.device"), + Values: []*string{aws.String(name)}, + }, &ec2.Filter{ Name: aws.String("attachment.instance-id"), Values: []*string{aws.String(instanceID)}, @@ -167,12 +170,17 @@ func volumeAttachmentStateRefreshFunc(conn *ec2.EC2, volumeID, instanceID string return 42, "detached", nil } } + func resourceAwsVolumeAttachmentRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn request := &ec2.DescribeVolumesInput{ VolumeIds: []*string{aws.String(d.Get("volume_id").(string))}, Filters: []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("attachment.device"), + Values: []*string{aws.String(d.Get("device_name").(string))}, + }, &ec2.Filter{ Name: aws.String("attachment.instance-id"), Values: []*string{aws.String(d.Get("instance_id").(string))}, @@ -206,11 +214,12 @@ func resourceAwsVolumeAttachmentDelete(d *schema.ResourceData, meta interface{}) return nil } + name := d.Get("device_name").(string) vID := d.Get("volume_id").(string) iID := d.Get("instance_id").(string) opts := &ec2.DetachVolumeInput{ - Device: aws.String(d.Get("device_name").(string)), + Device: aws.String(name), InstanceId: aws.String(iID), VolumeId: aws.String(vID), Force: aws.Bool(d.Get("force_detach").(bool)), @@ -224,7 +233,7 @@ func resourceAwsVolumeAttachmentDelete(d *schema.ResourceData, meta interface{}) stateConf := &resource.StateChangeConf{ Pending: []string{"detaching"}, Target: []string{"detached"}, - Refresh: volumeAttachmentStateRefreshFunc(conn, vID, iID), + Refresh: volumeAttachmentStateRefreshFunc(conn, name, vID, iID), Timeout: 5 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, From a7831e654f838c5016461786089ac890845fd18a Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 20 Dec 2017 11:25:58 -0500 Subject: [PATCH 032/350] r/aws_cognito_user_pool: Add computed arn attribute --- aws/resource_aws_cognito_user_pool.go | 14 ++++++++++++++ aws/resource_aws_cognito_user_pool_test.go | 2 ++ website/docs/r/cognito_user_pool.markdown | 3 ++- 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_cognito_user_pool.go b/aws/resource_aws_cognito_user_pool.go index 3c49e543d12..095664dbe34 100644 --- a/aws/resource_aws_cognito_user_pool.go +++ b/aws/resource_aws_cognito_user_pool.go @@ -7,6 +7,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" "github.com/hashicorp/errwrap" @@ -84,6 +85,11 @@ func resourceAwsCognitoUserPool() *schema.Resource { ConflictsWith: []string{"username_attributes"}, }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "auto_verified_attributes": { Type: schema.TypeSet, Optional: true, @@ -606,6 +612,14 @@ func resourceAwsCognitoUserPoolRead(d *schema.ResourceData, meta interface{}) er if resp.UserPool.AliasAttributes != nil { d.Set("alias_attributes", flattenStringList(resp.UserPool.AliasAttributes)) } + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Region: meta.(*AWSClient).region, + Service: "cognito-idp", + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("userpool/%s", d.Id()), + } + d.Set("arn", arn.String()) if resp.UserPool.AutoVerifiedAttributes != nil { d.Set("auto_verified_attributes", flattenStringList(resp.UserPool.AutoVerifiedAttributes)) } diff --git a/aws/resource_aws_cognito_user_pool_test.go b/aws/resource_aws_cognito_user_pool_test.go index dc7a4cbc5dd..a2c3aa071d9 100644 --- a/aws/resource_aws_cognito_user_pool_test.go +++ b/aws/resource_aws_cognito_user_pool_test.go @@ -26,6 +26,8 @@ func TestAccAWSCognitoUserPool_basic(t *testing.T) { Config: testAccAWSCognitoUserPoolConfig_basic(name), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), + resource.TestMatchResourceAttr("aws_cognito_user_pool.pool", "arn", + regexp.MustCompile("^arn:aws:cognito-idp:[^:]+:[0-9]{12}:userpool/[\\w-]+_[0-9a-zA-Z]+$")), resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "name", "terraform-test-pool-"+name), resource.TestCheckResourceAttrSet("aws_cognito_user_pool.pool", "creation_date"), resource.TestCheckResourceAttrSet("aws_cognito_user_pool.pool", "last_modified_date"), diff --git a/website/docs/r/cognito_user_pool.markdown b/website/docs/r/cognito_user_pool.markdown index 74d4b3dac25..004325ae722 100644 --- a/website/docs/r/cognito_user_pool.markdown +++ b/website/docs/r/cognito_user_pool.markdown @@ -120,9 +120,10 @@ The following arguments are supported: ## Attribute Reference -The following attributes are exported: +The following additional attributes are exported: * `id` - The id of the user pool. +* `arn` - The ARN of the user pool. * `creation_date` - The date the user pool was created. * `last_modified_date` - The date the user pool was last modified. From eb3085f31c536956477617ec988fe3efabf28c45 Mon Sep 17 00:00:00 2001 From: Kash Date: Wed, 20 Dec 2017 11:43:41 -0500 Subject: [PATCH 033/350] set field of struct directly --- aws/resource_aws_vpc_peering_connection.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_vpc_peering_connection.go b/aws/resource_aws_vpc_peering_connection.go index 6424c4cc04c..f5357255c7a 100644 --- a/aws/resource_aws_vpc_peering_connection.go +++ b/aws/resource_aws_vpc_peering_connection.go @@ -79,7 +79,7 @@ func resourceAwsVPCPeeringCreate(d *schema.ResourceData, meta interface{}) error if _, ok := d.GetOk("auto_accept"); ok { return fmt.Errorf("peer_region cannot be set whilst auto_accept is true when creating a vpc peering connection") } - createOpts.SetPeerRegion(v.(string)) + createOpts.PeerRegion = aws.String(v.(string)) } log.Printf("[DEBUG] VPC Peering Create options: %#v", createOpts) From 82ccf9215948b49458c8826eab06d8f4b370e85c Mon Sep 17 00:00:00 2001 From: Gopi Date: Wed, 20 Dec 2017 16:44:37 +0000 Subject: [PATCH 034/350] Fix the elasticache documentation about the endpoints - Reference: http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Endpoints.html --- website/docs/r/elasticache_replication_group.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index e4ae4a3fc92..4603c9a5889 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -104,8 +104,8 @@ Cluster Mode (`cluster_mode`) supports the following: The following attributes are exported: * `id` - The ID of the ElastiCache Replication Group. -* `configuration_endpoint_address` - The address of the endpoint for the primary node in the replication group. If Redis, only present when cluster mode is disabled. -* `primary_endpoint_address` - (Redis only) The address of the replication group configuration endpoint when cluster mode is enabled. +* `configuration_endpoint_address` - The address of the replication group configuration endpoint when cluster mode is enabled. +* `primary_endpoint_address` - (Redis only) The address of the endpoint for the primary node in the replication group, if the cluster mode is disabled. ## Import From 7de7f436e39145c3186a50f91ee6e947fab8c7c1 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 20 Dec 2017 11:53:14 -0600 Subject: [PATCH 035/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 61ee2d90506..60dd303ee79 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ BUG FIXES: * resource/cognito_user_pool: Update Cognito email message length to 20,000 [GH-2692] +* resource/aws_volume_attachment: Changing device name without changing volume or instance ID now correctly produces a diff [GH-2720] ## 1.6.0 (December 18, 2017) From 9eee083057ad7be011b3cbadf71e455683c2369c Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 20 Dec 2017 11:55:45 -0600 Subject: [PATCH 036/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 60dd303ee79..7ec5ea5826f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ BUG FIXES: * resource/cognito_user_pool: Update Cognito email message length to 20,000 [GH-2692] * resource/aws_volume_attachment: Changing device name without changing volume or instance ID now correctly produces a diff [GH-2720] +* resource/aws_cognito_user_pool: The ARN for the pool is now computed and exposed as an attribute [GH-2723] ## 1.6.0 (December 18, 2017) From de2cf3d9e67d788990ca098a97198dd6af8de7c2 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 20 Dec 2017 12:00:48 -0600 Subject: [PATCH 037/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ec5ea5826f..fcf2bc9155f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ BUG FIXES: * resource/cognito_user_pool: Update Cognito email message length to 20,000 [GH-2692] * resource/aws_volume_attachment: Changing device name without changing volume or instance ID now correctly produces a diff [GH-2720] * resource/aws_cognito_user_pool: The ARN for the pool is now computed and exposed as an attribute [GH-2723] +* resource/aws_s3_bucket_object: Object tagging is now supported in GovCloud [GH-2665] ## 1.6.0 (December 18, 2017) From 9c46cc6366845b51f6e2f31ffe03a6830141d2dd Mon Sep 17 00:00:00 2001 From: Amanpreet Singh Date: Thu, 21 Dec 2017 00:18:48 +0530 Subject: [PATCH 038/350] Add import support for kinesis firehose delivery stream (#2082) * Add import support for kinesis firehose delivery stream * Add flatteners for kinesis firehose delivery stream and cloudwatch logging options * Add support for elasticsearch and extended_s3 delivery streams * Set cloudwatch logging options only if logging is enabled * Fix processors flattener and handle nil values * Add acceptance test: not working yet * Acc test passes: import works only with resource id, not name * Fix test case breaking due to upstream changes * Run goimports * Return nil in case of an error --- ...s_kinesis_firehose_delivery_stream_test.go | 33 ++++ ...ce_aws_kinesis_firehose_delivery_stream.go | 160 +++++++++++++++++- 2 files changed, 187 insertions(+), 6 deletions(-) create mode 100644 aws/import_aws_kinesis_firehose_delivery_stream_test.go diff --git a/aws/import_aws_kinesis_firehose_delivery_stream_test.go b/aws/import_aws_kinesis_firehose_delivery_stream_test.go new file mode 100644 index 00000000000..eb6568d7ace --- /dev/null +++ b/aws/import_aws_kinesis_firehose_delivery_stream_test.go @@ -0,0 +1,33 @@ +package aws + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSKinesisFirehoseDeliveryStream_importBasic(t *testing.T) { + resName := "aws_kinesis_firehose_delivery_stream.test_stream" + rInt := acctest.RandInt() + config := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3basic, + rInt, os.Getenv("AWS_ACCOUNT_ID"), rInt, rInt, rInt) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/aws/resource_aws_kinesis_firehose_delivery_stream.go b/aws/resource_aws_kinesis_firehose_delivery_stream.go index 2dcacb71b24..eaef7ff3190 100644 --- a/aws/resource_aws_kinesis_firehose_delivery_stream.go +++ b/aws/resource_aws_kinesis_firehose_delivery_stream.go @@ -1,14 +1,17 @@ package aws import ( + "bytes" "fmt" "log" "strings" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/firehose" + "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" ) @@ -161,6 +164,142 @@ func processingConfigurationSchema() *schema.Schema { } } +func cloudwatchLoggingOptionsHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%t-", m["enabled"].(bool))) + if m["enabled"].(bool) { + buf.WriteString(fmt.Sprintf("%s-", m["log_group_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["log_stream_name"].(string))) + } + return hashcode.String(buf.String()) +} + +func flattenCloudwatchLoggingOptions(clo firehose.CloudWatchLoggingOptions) *schema.Set { + cloudwatchLoggingOptions := map[string]interface{}{ + "enabled": *clo.Enabled, + } + if *clo.Enabled { + cloudwatchLoggingOptions["log_group_name"] = *clo.LogGroupName + cloudwatchLoggingOptions["log_stream_name"] = *clo.LogStreamName + } + return schema.NewSet(cloudwatchLoggingOptionsHash, []interface{}{cloudwatchLoggingOptions}) +} + +func flattenFirehoseS3Configuration(s3 firehose.S3DestinationDescription) []map[string]interface{} { + s3Configuration := make([]map[string]interface{}, 1) + s3Configuration[0] = map[string]interface{}{ + "role_arn": *s3.RoleARN, + "bucket_arn": *s3.BucketARN, + "prefix": *s3.Prefix, + "buffer_size": *s3.BufferingHints.SizeInMBs, + "buffer_interval": *s3.BufferingHints.IntervalInSeconds, + "compression_format": *s3.CompressionFormat, + "cloudwatch_logging_options": flattenCloudwatchLoggingOptions(*s3.CloudWatchLoggingOptions), + } + if s3.EncryptionConfiguration.KMSEncryptionConfig != nil { + s3Configuration[0]["kms_key_arn"] = *s3.EncryptionConfiguration.KMSEncryptionConfig + } + return s3Configuration +} + +func flattenProcessingConfiguration(pc firehose.ProcessingConfiguration) []map[string]interface{} { + processingConfiguration := make([]map[string]interface{}, 1) + var processors []map[string]interface{} + for i, p := range pc.Processors { + processors = append(processors, map[string]interface{}{ + "type": p.Type, + }) + for _, params := range p.Parameters { + processors[i]["parameters"] = map[string]interface{}{ + "parameter_name": params.ParameterName, + "parameter_value": params.ParameterValue, + } + } + } + processingConfiguration[0] = map[string]interface{}{ + "enabled": *pc.Enabled, + "processors": processors, + } + return processingConfiguration +} + +func flattenKinesisFirehoseDeliveryStream(d *schema.ResourceData, s *firehose.DeliveryStreamDescription) error { + d.Set("version_id", s.VersionId) + d.Set("arn", *s.DeliveryStreamARN) + d.Set("name", s.DeliveryStreamName) + if len(s.Destinations) > 0 { + destination := s.Destinations[0] + if destination.RedshiftDestinationDescription != nil { + d.Set("destination", "redshift") + + redshiftConfiguration := map[string]interface{}{ + "cluster_jdbcurl": *destination.RedshiftDestinationDescription.ClusterJDBCURL, + "role_arn": *destination.RedshiftDestinationDescription.RoleARN, + "username": *destination.RedshiftDestinationDescription.Username, + "data_table_name": *destination.RedshiftDestinationDescription.CopyCommand.DataTableName, + "copy_options": *destination.RedshiftDestinationDescription.CopyCommand.CopyOptions, + "data_table_columns": *destination.RedshiftDestinationDescription.CopyCommand.DataTableColumns, + "s3_backup_mode": *destination.RedshiftDestinationDescription.S3BackupMode, + "s3_backup_configuration": flattenFirehoseS3Configuration(*destination.RedshiftDestinationDescription.S3BackupDescription), + "retry_duration": *destination.RedshiftDestinationDescription.RetryOptions.DurationInSeconds, + "cloudwatch_logging_options": flattenCloudwatchLoggingOptions(*destination.RedshiftDestinationDescription.CloudWatchLoggingOptions), + } + redshiftConfList := make([]map[string]interface{}, 1) + redshiftConfList[0] = redshiftConfiguration + d.Set("redshift_configuration", redshiftConfList) + d.Set("s3_configuration", flattenFirehoseS3Configuration(*destination.RedshiftDestinationDescription.S3DestinationDescription)) + + } else if destination.ElasticsearchDestinationDescription != nil { + d.Set("destination", "elasticsearch") + + elasticsearchConfiguration := map[string]interface{}{ + "buffering_interval": *destination.ElasticsearchDestinationDescription.BufferingHints.IntervalInSeconds, + "buffering_size": *destination.ElasticsearchDestinationDescription.BufferingHints.SizeInMBs, + "domain_arn": *destination.ElasticsearchDestinationDescription.DomainARN, + "role_arn": *destination.ElasticsearchDestinationDescription.RoleARN, + "type_name": *destination.ElasticsearchDestinationDescription.TypeName, + "index_name": *destination.ElasticsearchDestinationDescription.IndexName, + "s3_backup_mode": *destination.ElasticsearchDestinationDescription.S3BackupMode, + "retry_duration": *destination.ElasticsearchDestinationDescription.RetryOptions.DurationInSeconds, + "index_rotation_period": *destination.ElasticsearchDestinationDescription.IndexRotationPeriod, + "cloudwatch_logging_options": flattenCloudwatchLoggingOptions(*destination.ElasticsearchDestinationDescription.CloudWatchLoggingOptions), + } + elasticsearchConfList := make([]map[string]interface{}, 1) + elasticsearchConfList[0] = elasticsearchConfiguration + d.Set("elasticsearch_configuration", elasticsearchConfList) + d.Set("s3_configuration", flattenFirehoseS3Configuration(*destination.ElasticsearchDestinationDescription.S3DestinationDescription)) + } else if destination.S3DestinationDescription != nil { + d.Set("destination", "s3") + d.Set("s3_configuration", flattenFirehoseS3Configuration(*destination.S3DestinationDescription)) + } else if destination.ExtendedS3DestinationDescription != nil { + d.Set("destination", "extended_s3") + + extendedS3Configuration := map[string]interface{}{ + "buffering_interval": *destination.ExtendedS3DestinationDescription.BufferingHints.IntervalInSeconds, + "buffering_size": *destination.ExtendedS3DestinationDescription.BufferingHints.SizeInMBs, + "bucket_arn": *destination.ExtendedS3DestinationDescription.BucketARN, + "role_arn": *destination.ExtendedS3DestinationDescription.RoleARN, + "compression_format": *destination.ExtendedS3DestinationDescription.CompressionFormat, + "prefix": *destination.ExtendedS3DestinationDescription.Prefix, + "s3_backup_mode": *destination.ExtendedS3DestinationDescription.S3BackupMode, + "cloudwatch_logging_options": flattenCloudwatchLoggingOptions(*destination.ExtendedS3DestinationDescription.CloudWatchLoggingOptions), + } + if destination.ExtendedS3DestinationDescription.EncryptionConfiguration.KMSEncryptionConfig != nil { + extendedS3Configuration["kms_key_arn"] = *destination.ExtendedS3DestinationDescription.EncryptionConfiguration.KMSEncryptionConfig + } + if destination.ExtendedS3DestinationDescription.ProcessingConfiguration != nil { + extendedS3Configuration["processing_configuration"] = flattenProcessingConfiguration(*destination.ExtendedS3DestinationDescription.ProcessingConfiguration) + } + extendedS3ConfList := make([]map[string]interface{}, 1) + extendedS3ConfList[0] = extendedS3Configuration + d.Set("extended_s3_configuration", extendedS3ConfList) + } + d.Set("destination_id", *destination.DestinationId) + } + return nil +} + func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { return &schema.Resource{ Create: resourceAwsKinesisFirehoseDeliveryStreamCreate, @@ -168,6 +307,17 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { Update: resourceAwsKinesisFirehoseDeliveryStreamUpdate, Delete: resourceAwsKinesisFirehoseDeliveryStreamDelete, + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + resARN, err := arn.Parse(d.Id()) + if err != nil { + return nil, err + } + d.Set("name", strings.Split(resARN.Resource, "/")[1]) + return []*schema.ResourceData{d}, nil + }, + }, + SchemaVersion: 1, MigrateState: resourceAwsKinesisFirehoseMigrateState, Schema: map[string]*schema.Schema{ @@ -1103,17 +1253,15 @@ func resourceAwsKinesisFirehoseDeliveryStreamRead(d *schema.ResourceData, meta i d.SetId("") return nil } - return fmt.Errorf("[WARN] Error reading Kinesis Firehose Delivery Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code()) + return fmt.Errorf("[WARN] Error reading Kinesis Firehose Delivery Stream: %s", awsErr.Error()) } return err } s := resp.DeliveryStreamDescription - d.Set("version_id", s.VersionId) - d.Set("arn", *s.DeliveryStreamARN) - if len(s.Destinations) > 0 { - destination := s.Destinations[0] - d.Set("destination_id", *destination.DestinationId) + err = flattenKinesisFirehoseDeliveryStream(d, s) + if err != nil { + return err } return nil From b384babc632a4ddcd3028e1fccd5d1b566fbe31e Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 20 Dec 2017 12:50:03 -0600 Subject: [PATCH 039/350] Update CHANGELOG.md --- CHANGELOG.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fcf2bc9155f..5e8d0680230 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,14 @@ ## 1.6.1 (Unreleased) +ENHANCEMENTS: + +* resource/aws_kinesis_firehose_delivery_stream: Import is now supported [GH-2707] +* resource/aws_cognito_user_pool: The ARN for the pool is now computed and exposed as an attribute [GH-2723] + BUG FIXES: * resource/cognito_user_pool: Update Cognito email message length to 20,000 [GH-2692] * resource/aws_volume_attachment: Changing device name without changing volume or instance ID now correctly produces a diff [GH-2720] -* resource/aws_cognito_user_pool: The ARN for the pool is now computed and exposed as an attribute [GH-2723] * resource/aws_s3_bucket_object: Object tagging is now supported in GovCloud [GH-2665] ## 1.6.0 (December 18, 2017) From 0c20a25bba4cac44a6f6596834c31e913b83b215 Mon Sep 17 00:00:00 2001 From: Charlie Duong Date: Wed, 20 Dec 2017 13:53:38 -0500 Subject: [PATCH 040/350] Use s3 bucket "id" attribute for "aws_s3_bucket_object" docs (#2725) --- website/docs/r/s3_bucket_object.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/s3_bucket_object.html.markdown b/website/docs/r/s3_bucket_object.html.markdown index dc6eeb025b8..eb764154870 100644 --- a/website/docs/r/s3_bucket_object.html.markdown +++ b/website/docs/r/s3_bucket_object.html.markdown @@ -38,7 +38,7 @@ resource "aws_s3_bucket" "examplebucket" { resource "aws_s3_bucket_object" "examplebucket_object" { key = "someobject" - bucket = "${aws_s3_bucket.examplebucket.bucket}" + bucket = "${aws_s3_bucket.examplebucket.id}" source = "index.html" kms_key_id = "${aws_kms_key.examplekms.arn}" } @@ -54,7 +54,7 @@ resource "aws_s3_bucket" "examplebucket" { resource "aws_s3_bucket_object" "examplebucket_object" { key = "someobject" - bucket = "${aws_s3_bucket.examplebucket.bucket}" + bucket = "${aws_s3_bucket.examplebucket.id}" source = "index.html" server_side_encryption = "aws:kms" } From f2c3ec9609a3a25df39b5c544eda121170196c43 Mon Sep 17 00:00:00 2001 From: Puneeth Nanjundaswamy Date: Thu, 21 Dec 2017 11:01:51 +0100 Subject: [PATCH 041/350] Bump aws-sdk-go to v1.12.51 --- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 3 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws-sdk-go/service/configservice/api.go | 24 + .../aws/aws-sdk-go/service/iot/api.go | 2781 ++++++++++++++++- vendor/vendor.json | 832 ++--- 5 files changed, 3106 insertions(+), 536 deletions(-) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index f503ced3ae1..25ca1927f57 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -586,8 +586,10 @@ var awsPartition = partition{ "codestar": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, @@ -749,6 +751,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 5066825a94b..86443c16c79 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.12.50" +const SDKVersion = "1.12.51" diff --git a/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go index 5b55bdf7d18..971fca5f07f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go @@ -6867,4 +6867,28 @@ const ( // ResourceTypeAwsCodeBuildProject is a ResourceType enum value ResourceTypeAwsCodeBuildProject = "AWS::CodeBuild::Project" + + // ResourceTypeAwsWafRateBasedRule is a ResourceType enum value + ResourceTypeAwsWafRateBasedRule = "AWS::WAF::RateBasedRule" + + // ResourceTypeAwsWafRule is a ResourceType enum value + ResourceTypeAwsWafRule = "AWS::WAF::Rule" + + // ResourceTypeAwsWafWebAcl is a ResourceType enum value + ResourceTypeAwsWafWebAcl = "AWS::WAF::WebACL" + + // ResourceTypeAwsWafregionalRateBasedRule is a ResourceType enum value + ResourceTypeAwsWafregionalRateBasedRule = "AWS::WAFRegional::RateBasedRule" + + // ResourceTypeAwsWafregionalRule is a ResourceType enum value + ResourceTypeAwsWafregionalRule = "AWS::WAFRegional::Rule" + + // ResourceTypeAwsWafregionalWebAcl is a ResourceType enum value + ResourceTypeAwsWafregionalWebAcl = "AWS::WAFRegional::WebACL" + + // ResourceTypeAwsCloudFrontDistribution is a ResourceType enum value + ResourceTypeAwsCloudFrontDistribution = "AWS::CloudFront::Distribution" + + // ResourceTypeAwsCloudFrontStreamingDistribution is a ResourceType enum value + ResourceTypeAwsCloudFrontStreamingDistribution = "AWS::CloudFront::StreamingDistribution" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iot/api.go b/vendor/github.com/aws/aws-sdk-go/service/iot/api.go index 5de6fe0405d..e9112b4a8a1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iot/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iot/api.go @@ -1272,6 +1272,100 @@ func (c *IoT) CreateKeysAndCertificateWithContext(ctx aws.Context, input *Create return out, req.Send() } +const opCreateOTAUpdate = "CreateOTAUpdate" + +// CreateOTAUpdateRequest generates a "aws/request.Request" representing the +// client's request for the CreateOTAUpdate operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateOTAUpdate for more information on using the CreateOTAUpdate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateOTAUpdateRequest method. +// req, resp := client.CreateOTAUpdateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) CreateOTAUpdateRequest(input *CreateOTAUpdateInput) (req *request.Request, output *CreateOTAUpdateOutput) { + op := &request.Operation{ + Name: opCreateOTAUpdate, + HTTPMethod: "POST", + HTTPPath: "/otaUpdates/{otaUpdateId}", + } + + if input == nil { + input = &CreateOTAUpdateInput{} + } + + output = &CreateOTAUpdateOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateOTAUpdate API operation for AWS IoT. +// +// Creates an AWS IoT OTAUpdate on a target group of things or groups. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation CreateOTAUpdate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The resource already exists. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this operation. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +func (c *IoT) CreateOTAUpdate(input *CreateOTAUpdateInput) (*CreateOTAUpdateOutput, error) { + req, out := c.CreateOTAUpdateRequest(input) + return out, req.Send() +} + +// CreateOTAUpdateWithContext is the same as CreateOTAUpdate with the addition of +// the ability to pass a context and additional request options. +// +// See CreateOTAUpdate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) CreateOTAUpdateWithContext(ctx aws.Context, input *CreateOTAUpdateInput, opts ...request.Option) (*CreateOTAUpdateOutput, error) { + req, out := c.CreateOTAUpdateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreatePolicy = "CreatePolicy" // CreatePolicyRequest generates a "aws/request.Request" representing the @@ -1568,6 +1662,106 @@ func (c *IoT) CreateRoleAliasWithContext(ctx aws.Context, input *CreateRoleAlias return out, req.Send() } +const opCreateStream = "CreateStream" + +// CreateStreamRequest generates a "aws/request.Request" representing the +// client's request for the CreateStream operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateStream for more information on using the CreateStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateStreamRequest method. +// req, resp := client.CreateStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) CreateStreamRequest(input *CreateStreamInput) (req *request.Request, output *CreateStreamOutput) { + op := &request.Operation{ + Name: opCreateStream, + HTTPMethod: "POST", + HTTPPath: "/streams/{streamId}", + } + + if input == nil { + input = &CreateStreamInput{} + } + + output = &CreateStreamOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateStream API operation for AWS IoT. +// +// Creates a stream for delivering one or more large files in chunks over MQTT. +// A stream transports data bytes in chunks or blocks packaged as MQTT messages +// from a source like S3. You can have one or more files associated with a stream. +// The total size of a file associated with the stream cannot exceed more than +// 2 MB. The stream will be created with version 0. If a stream is created with +// the same streamID as a stream that existed and was deleted within last 90 +// days, we will resurrect that old stream by incrementing the version by 1. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation CreateStream for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The resource already exists. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this operation. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) CreateStream(input *CreateStreamInput) (*CreateStreamOutput, error) { + req, out := c.CreateStreamRequest(input) + return out, req.Send() +} + +// CreateStreamWithContext is the same as CreateStream with the addition of +// the ability to pass a context and additional request options. +// +// See CreateStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) CreateStreamWithContext(ctx aws.Context, input *CreateStreamInput, opts ...request.Option) (*CreateStreamOutput, error) { + req, out := c.CreateStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateThing = "CreateThing" // CreateThingRequest generates a "aws/request.Request" representing the @@ -2222,6 +2416,97 @@ func (c *IoT) DeleteCertificateWithContext(ctx aws.Context, input *DeleteCertifi return out, req.Send() } +const opDeleteOTAUpdate = "DeleteOTAUpdate" + +// DeleteOTAUpdateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteOTAUpdate operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteOTAUpdate for more information on using the DeleteOTAUpdate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteOTAUpdateRequest method. +// req, resp := client.DeleteOTAUpdateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) DeleteOTAUpdateRequest(input *DeleteOTAUpdateInput) (req *request.Request, output *DeleteOTAUpdateOutput) { + op := &request.Operation{ + Name: opDeleteOTAUpdate, + HTTPMethod: "DELETE", + HTTPPath: "/otaUpdates/{otaUpdateId}", + } + + if input == nil { + input = &DeleteOTAUpdateInput{} + } + + output = &DeleteOTAUpdateOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteOTAUpdate API operation for AWS IoT. +// +// Delete an OTA update. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation DeleteOTAUpdate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this operation. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +func (c *IoT) DeleteOTAUpdate(input *DeleteOTAUpdateInput) (*DeleteOTAUpdateOutput, error) { + req, out := c.DeleteOTAUpdateRequest(input) + return out, req.Send() +} + +// DeleteOTAUpdateWithContext is the same as DeleteOTAUpdate with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteOTAUpdate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) DeleteOTAUpdateWithContext(ctx aws.Context, input *DeleteOTAUpdateInput, opts ...request.Option) (*DeleteOTAUpdateOutput, error) { + req, out := c.DeleteOTAUpdateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeletePolicy = "DeletePolicy" // DeletePolicyRequest generates a "aws/request.Request" representing the @@ -2609,64 +2894,63 @@ func (c *IoT) DeleteRoleAliasWithContext(ctx aws.Context, input *DeleteRoleAlias return out, req.Send() } -const opDeleteThing = "DeleteThing" +const opDeleteStream = "DeleteStream" -// DeleteThingRequest generates a "aws/request.Request" representing the -// client's request for the DeleteThing operation. The "output" return +// DeleteStreamRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStream operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteThing for more information on using the DeleteThing +// See DeleteStream for more information on using the DeleteStream // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteThingRequest method. -// req, resp := client.DeleteThingRequest(params) +// // Example sending a request using the DeleteStreamRequest method. +// req, resp := client.DeleteStreamRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *IoT) DeleteThingRequest(input *DeleteThingInput) (req *request.Request, output *DeleteThingOutput) { +func (c *IoT) DeleteStreamRequest(input *DeleteStreamInput) (req *request.Request, output *DeleteStreamOutput) { op := &request.Operation{ - Name: opDeleteThing, + Name: opDeleteStream, HTTPMethod: "DELETE", - HTTPPath: "/things/{thingName}", + HTTPPath: "/streams/{streamId}", } if input == nil { - input = &DeleteThingInput{} + input = &DeleteStreamInput{} } - output = &DeleteThingOutput{} + output = &DeleteStreamOutput{} req = c.newRequest(op, input, output) return } -// DeleteThing API operation for AWS IoT. +// DeleteStream API operation for AWS IoT. // -// Deletes the specified thing. +// Deletes a stream. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS IoT's -// API operation DeleteThing for usage and error information. +// API operation DeleteStream for usage and error information. // // Returned Error Codes: // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // The specified resource does not exist. // -// * ErrCodeVersionConflictException "VersionConflictException" -// An exception thrown when the version of a thing passed to a command is different -// than the version specified with the --version parameter. +// * ErrCodeDeleteConflictException "DeleteConflictException" +// You can't delete the resource because it is attached to one or more resources. // // * ErrCodeInvalidRequestException "InvalidRequestException" // The request is not valid. @@ -2683,46 +2967,141 @@ func (c *IoT) DeleteThingRequest(input *DeleteThingInput) (req *request.Request, // * ErrCodeInternalFailureException "InternalFailureException" // An unexpected error has occurred. // -func (c *IoT) DeleteThing(input *DeleteThingInput) (*DeleteThingOutput, error) { - req, out := c.DeleteThingRequest(input) +func (c *IoT) DeleteStream(input *DeleteStreamInput) (*DeleteStreamOutput, error) { + req, out := c.DeleteStreamRequest(input) return out, req.Send() } -// DeleteThingWithContext is the same as DeleteThing with the addition of +// DeleteStreamWithContext is the same as DeleteStream with the addition of // the ability to pass a context and additional request options. // -// See DeleteThing for details on how to use this API operation. +// See DeleteStream for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *IoT) DeleteThingWithContext(ctx aws.Context, input *DeleteThingInput, opts ...request.Option) (*DeleteThingOutput, error) { - req, out := c.DeleteThingRequest(input) +func (c *IoT) DeleteStreamWithContext(ctx aws.Context, input *DeleteStreamInput, opts ...request.Option) (*DeleteStreamOutput, error) { + req, out := c.DeleteStreamRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteThingGroup = "DeleteThingGroup" +const opDeleteThing = "DeleteThing" -// DeleteThingGroupRequest generates a "aws/request.Request" representing the -// client's request for the DeleteThingGroup operation. The "output" return +// DeleteThingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteThing operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteThingGroup for more information on using the DeleteThingGroup +// See DeleteThing for more information on using the DeleteThing // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteThingGroupRequest method. -// req, resp := client.DeleteThingGroupRequest(params) +// // Example sending a request using the DeleteThingRequest method. +// req, resp := client.DeleteThingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) DeleteThingRequest(input *DeleteThingInput) (req *request.Request, output *DeleteThingOutput) { + op := &request.Operation{ + Name: opDeleteThing, + HTTPMethod: "DELETE", + HTTPPath: "/things/{thingName}", + } + + if input == nil { + input = &DeleteThingInput{} + } + + output = &DeleteThingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteThing API operation for AWS IoT. +// +// Deletes the specified thing. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation DeleteThing for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +// * ErrCodeVersionConflictException "VersionConflictException" +// An exception thrown when the version of a thing passed to a command is different +// than the version specified with the --version parameter. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this operation. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) DeleteThing(input *DeleteThingInput) (*DeleteThingOutput, error) { + req, out := c.DeleteThingRequest(input) + return out, req.Send() +} + +// DeleteThingWithContext is the same as DeleteThing with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteThing for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) DeleteThingWithContext(ctx aws.Context, input *DeleteThingInput, opts ...request.Option) (*DeleteThingOutput, error) { + req, out := c.DeleteThingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteThingGroup = "DeleteThingGroup" + +// DeleteThingGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteThingGroup operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteThingGroup for more information on using the DeleteThingGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteThingGroupRequest method. +// req, resp := client.DeleteThingGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled @@ -4028,6 +4407,97 @@ func (c *IoT) DescribeRoleAliasWithContext(ctx aws.Context, input *DescribeRoleA return out, req.Send() } +const opDescribeStream = "DescribeStream" + +// DescribeStreamRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStream operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeStream for more information on using the DescribeStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeStreamRequest method. +// req, resp := client.DescribeStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) DescribeStreamRequest(input *DescribeStreamInput) (req *request.Request, output *DescribeStreamOutput) { + op := &request.Operation{ + Name: opDescribeStream, + HTTPMethod: "GET", + HTTPPath: "/streams/{streamId}", + } + + if input == nil { + input = &DescribeStreamInput{} + } + + output = &DescribeStreamOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeStream API operation for AWS IoT. +// +// Gets information about a stream. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation DescribeStream for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this operation. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) DescribeStream(input *DescribeStreamInput) (*DescribeStreamOutput, error) { + req, out := c.DescribeStreamRequest(input) + return out, req.Send() +} + +// DescribeStreamWithContext is the same as DescribeStream with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) DescribeStreamWithContext(ctx aws.Context, input *DescribeStreamInput, opts ...request.Option) (*DescribeStreamOutput, error) { + req, out := c.DescribeStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeThing = "DescribeThing" // DescribeThingRequest generates a "aws/request.Request" representing the @@ -5188,6 +5658,97 @@ func (c *IoT) GetLoggingOptionsWithContext(ctx aws.Context, input *GetLoggingOpt return out, req.Send() } +const opGetOTAUpdate = "GetOTAUpdate" + +// GetOTAUpdateRequest generates a "aws/request.Request" representing the +// client's request for the GetOTAUpdate operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetOTAUpdate for more information on using the GetOTAUpdate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetOTAUpdateRequest method. +// req, resp := client.GetOTAUpdateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) GetOTAUpdateRequest(input *GetOTAUpdateInput) (req *request.Request, output *GetOTAUpdateOutput) { + op := &request.Operation{ + Name: opGetOTAUpdate, + HTTPMethod: "GET", + HTTPPath: "/otaUpdates/{otaUpdateId}", + } + + if input == nil { + input = &GetOTAUpdateInput{} + } + + output = &GetOTAUpdateOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetOTAUpdate API operation for AWS IoT. +// +// Gets an OTA update. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation GetOTAUpdate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this operation. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +func (c *IoT) GetOTAUpdate(input *GetOTAUpdateInput) (*GetOTAUpdateOutput, error) { + req, out := c.GetOTAUpdateRequest(input) + return out, req.Send() +} + +// GetOTAUpdateWithContext is the same as GetOTAUpdate with the addition of +// the ability to pass a context and additional request options. +// +// See GetOTAUpdate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) GetOTAUpdateWithContext(ctx aws.Context, input *GetOTAUpdateInput, opts ...request.Option) (*GetOTAUpdateOutput, error) { + req, out := c.GetOTAUpdateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetPolicy = "GetPolicy" // GetPolicyRequest generates a "aws/request.Request" representing the @@ -6421,56 +6982,56 @@ func (c *IoT) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opts .. return out, req.Send() } -const opListOutgoingCertificates = "ListOutgoingCertificates" +const opListOTAUpdates = "ListOTAUpdates" -// ListOutgoingCertificatesRequest generates a "aws/request.Request" representing the -// client's request for the ListOutgoingCertificates operation. The "output" return +// ListOTAUpdatesRequest generates a "aws/request.Request" representing the +// client's request for the ListOTAUpdates operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListOutgoingCertificates for more information on using the ListOutgoingCertificates +// See ListOTAUpdates for more information on using the ListOTAUpdates // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListOutgoingCertificatesRequest method. -// req, resp := client.ListOutgoingCertificatesRequest(params) +// // Example sending a request using the ListOTAUpdatesRequest method. +// req, resp := client.ListOTAUpdatesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *IoT) ListOutgoingCertificatesRequest(input *ListOutgoingCertificatesInput) (req *request.Request, output *ListOutgoingCertificatesOutput) { +func (c *IoT) ListOTAUpdatesRequest(input *ListOTAUpdatesInput) (req *request.Request, output *ListOTAUpdatesOutput) { op := &request.Operation{ - Name: opListOutgoingCertificates, + Name: opListOTAUpdates, HTTPMethod: "GET", - HTTPPath: "/certificates-out-going", + HTTPPath: "/otaUpdates", } if input == nil { - input = &ListOutgoingCertificatesInput{} + input = &ListOTAUpdatesInput{} } - output = &ListOutgoingCertificatesOutput{} + output = &ListOTAUpdatesOutput{} req = c.newRequest(op, input, output) return } -// ListOutgoingCertificates API operation for AWS IoT. +// ListOTAUpdates API operation for AWS IoT. // -// Lists certificates that are being transferred but not yet accepted. +// Lists OTA updates. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS IoT's -// API operation ListOutgoingCertificates for usage and error information. +// API operation ListOTAUpdates for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidRequestException "InvalidRequestException" @@ -6482,14 +7043,102 @@ func (c *IoT) ListOutgoingCertificatesRequest(input *ListOutgoingCertificatesInp // * ErrCodeUnauthorizedException "UnauthorizedException" // You are not authorized to perform this operation. // -// * ErrCodeServiceUnavailableException "ServiceUnavailableException" -// The service is temporarily unavailable. -// // * ErrCodeInternalFailureException "InternalFailureException" // An unexpected error has occurred. // -func (c *IoT) ListOutgoingCertificates(input *ListOutgoingCertificatesInput) (*ListOutgoingCertificatesOutput, error) { - req, out := c.ListOutgoingCertificatesRequest(input) +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +func (c *IoT) ListOTAUpdates(input *ListOTAUpdatesInput) (*ListOTAUpdatesOutput, error) { + req, out := c.ListOTAUpdatesRequest(input) + return out, req.Send() +} + +// ListOTAUpdatesWithContext is the same as ListOTAUpdates with the addition of +// the ability to pass a context and additional request options. +// +// See ListOTAUpdates for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListOTAUpdatesWithContext(ctx aws.Context, input *ListOTAUpdatesInput, opts ...request.Option) (*ListOTAUpdatesOutput, error) { + req, out := c.ListOTAUpdatesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListOutgoingCertificates = "ListOutgoingCertificates" + +// ListOutgoingCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the ListOutgoingCertificates operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListOutgoingCertificates for more information on using the ListOutgoingCertificates +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListOutgoingCertificatesRequest method. +// req, resp := client.ListOutgoingCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) ListOutgoingCertificatesRequest(input *ListOutgoingCertificatesInput) (req *request.Request, output *ListOutgoingCertificatesOutput) { + op := &request.Operation{ + Name: opListOutgoingCertificates, + HTTPMethod: "GET", + HTTPPath: "/certificates-out-going", + } + + if input == nil { + input = &ListOutgoingCertificatesInput{} + } + + output = &ListOutgoingCertificatesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListOutgoingCertificates API operation for AWS IoT. +// +// Lists certificates that are being transferred but not yet accepted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation ListOutgoingCertificates for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this operation. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) ListOutgoingCertificates(input *ListOutgoingCertificatesInput) (*ListOutgoingCertificatesOutput, error) { + req, out := c.ListOutgoingCertificatesRequest(input) return out, req.Send() } @@ -7060,6 +7709,94 @@ func (c *IoT) ListRoleAliasesWithContext(ctx aws.Context, input *ListRoleAliases return out, req.Send() } +const opListStreams = "ListStreams" + +// ListStreamsRequest generates a "aws/request.Request" representing the +// client's request for the ListStreams operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListStreams for more information on using the ListStreams +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListStreamsRequest method. +// req, resp := client.ListStreamsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) ListStreamsRequest(input *ListStreamsInput) (req *request.Request, output *ListStreamsOutput) { + op := &request.Operation{ + Name: opListStreams, + HTTPMethod: "GET", + HTTPPath: "/streams", + } + + if input == nil { + input = &ListStreamsInput{} + } + + output = &ListStreamsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListStreams API operation for AWS IoT. +// +// Lists all of the streams in your AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation ListStreams for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this operation. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) ListStreams(input *ListStreamsInput) (*ListStreamsOutput, error) { + req, out := c.ListStreamsRequest(input) + return out, req.Send() +} + +// ListStreamsWithContext is the same as ListStreams with the addition of +// the ability to pass a context and additional request options. +// +// See ListStreams for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListStreamsWithContext(ctx aws.Context, input *ListStreamsInput, opts ...request.Option) (*ListStreamsOutput, error) { + req, out := c.ListStreamsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListTargetsForPolicy = "ListTargetsForPolicy" // ListTargetsForPolicyRequest generates a "aws/request.Request" representing the @@ -10154,6 +10891,97 @@ func (c *IoT) UpdateRoleAliasWithContext(ctx aws.Context, input *UpdateRoleAlias return out, req.Send() } +const opUpdateStream = "UpdateStream" + +// UpdateStreamRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStream operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateStream for more information on using the UpdateStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateStreamRequest method. +// req, resp := client.UpdateStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) UpdateStreamRequest(input *UpdateStreamInput) (req *request.Request, output *UpdateStreamOutput) { + op := &request.Operation{ + Name: opUpdateStream, + HTTPMethod: "PUT", + HTTPPath: "/streams/{streamId}", + } + + if input == nil { + input = &UpdateStreamInput{} + } + + output = &UpdateStreamOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateStream API operation for AWS IoT. +// +// Updates an existing stream. The stream version will be incremented by one. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation UpdateStream for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this operation. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) UpdateStream(input *UpdateStreamInput) (*UpdateStreamOutput, error) { + req, out := c.UpdateStreamRequest(input) + return out, req.Send() +} + +// UpdateStreamWithContext is the same as UpdateStream with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) UpdateStreamWithContext(ctx aws.Context, input *UpdateStreamInput, opts ...request.Option) (*UpdateStreamOutput, error) { + req, out := c.UpdateStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateThing = "UpdateThing" // UpdateThingRequest generates a "aws/request.Request" representing the @@ -12030,40 +12858,195 @@ func (s *CloudwatchMetricAction) SetRoleArn(v string) *CloudwatchMetricAction { return s } -// Configuration. -type Configuration struct { +// Describes the method to use when code signing a file. +type CodeSigning struct { _ struct{} `type:"structure"` - // True to enable the configuration. - Enabled *bool `type:"boolean"` + // The ID of the AWSSignerJob which was created to sign the file. + AwsSignerJobId *string `locationName:"awsSignerJobId" type:"string"` + + // A custom method for code signing a file. + CustomCodeSigning *CustomCodeSigning `locationName:"customCodeSigning" type:"structure"` } // String returns the string representation -func (s Configuration) String() string { +func (s CodeSigning) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Configuration) GoString() string { +func (s CodeSigning) GoString() string { return s.String() } -// SetEnabled sets the Enabled field's value. -func (s *Configuration) SetEnabled(v bool) *Configuration { - s.Enabled = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *CodeSigning) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CodeSigning"} + if s.CustomCodeSigning != nil { + if err := s.CustomCodeSigning.Validate(); err != nil { + invalidParams.AddNested("CustomCodeSigning", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsSignerJobId sets the AwsSignerJobId field's value. +func (s *CodeSigning) SetAwsSignerJobId(v string) *CodeSigning { + s.AwsSignerJobId = &v return s } -type CreateAuthorizerInput struct { +// SetCustomCodeSigning sets the CustomCodeSigning field's value. +func (s *CodeSigning) SetCustomCodeSigning(v *CustomCodeSigning) *CodeSigning { + s.CustomCodeSigning = v + return s +} + +// Describes the certificate chain being used when code signing a file. +type CodeSigningCertificateChain struct { _ struct{} `type:"structure"` - // The ARN of the authorizer's Lambda function. - // - // AuthorizerFunctionArn is a required field - AuthorizerFunctionArn *string `locationName:"authorizerFunctionArn" type:"string" required:"true"` + // The name of the certificate. + CertificateName *string `locationName:"certificateName" type:"string"` - // The authorizer name. - // + // A base64 encoded binary representation of the code signing certificate chain. + InlineDocument *string `locationName:"inlineDocument" type:"string"` + + // A stream of the certificate chain files. + Stream *Stream `locationName:"stream" type:"structure"` +} + +// String returns the string representation +func (s CodeSigningCertificateChain) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CodeSigningCertificateChain) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CodeSigningCertificateChain) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CodeSigningCertificateChain"} + if s.Stream != nil { + if err := s.Stream.Validate(); err != nil { + invalidParams.AddNested("Stream", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateName sets the CertificateName field's value. +func (s *CodeSigningCertificateChain) SetCertificateName(v string) *CodeSigningCertificateChain { + s.CertificateName = &v + return s +} + +// SetInlineDocument sets the InlineDocument field's value. +func (s *CodeSigningCertificateChain) SetInlineDocument(v string) *CodeSigningCertificateChain { + s.InlineDocument = &v + return s +} + +// SetStream sets the Stream field's value. +func (s *CodeSigningCertificateChain) SetStream(v *Stream) *CodeSigningCertificateChain { + s.Stream = v + return s +} + +// Describes the signature for a file. +type CodeSigningSignature struct { + _ struct{} `type:"structure"` + + // A base64 encoded binary representation of the code signing signature. + // + // InlineDocument is automatically base64 encoded/decoded by the SDK. + InlineDocument []byte `locationName:"inlineDocument" type:"blob"` + + // A stream of the code signing signature. + Stream *Stream `locationName:"stream" type:"structure"` +} + +// String returns the string representation +func (s CodeSigningSignature) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CodeSigningSignature) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CodeSigningSignature) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CodeSigningSignature"} + if s.Stream != nil { + if err := s.Stream.Validate(); err != nil { + invalidParams.AddNested("Stream", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInlineDocument sets the InlineDocument field's value. +func (s *CodeSigningSignature) SetInlineDocument(v []byte) *CodeSigningSignature { + s.InlineDocument = v + return s +} + +// SetStream sets the Stream field's value. +func (s *CodeSigningSignature) SetStream(v *Stream) *CodeSigningSignature { + s.Stream = v + return s +} + +// Configuration. +type Configuration struct { + _ struct{} `type:"structure"` + + // True to enable the configuration. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s Configuration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Configuration) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *Configuration) SetEnabled(v bool) *Configuration { + s.Enabled = &v + return s +} + +type CreateAuthorizerInput struct { + _ struct{} `type:"structure"` + + // The ARN of the authorizer's Lambda function. + // + // AuthorizerFunctionArn is a required field + AuthorizerFunctionArn *string `locationName:"authorizerFunctionArn" type:"string" required:"true"` + + // The authorizer name. + // // AuthorizerName is a required field AuthorizerName *string `location:"uri" locationName:"authorizerName" min:"1" type:"string" required:"true"` @@ -12534,6 +13517,199 @@ func (s *CreateKeysAndCertificateOutput) SetKeyPair(v *KeyPair) *CreateKeysAndCe return s } +type CreateOTAUpdateInput struct { + _ struct{} `type:"structure"` + + // A list of additional OTA update parameters which are name-value pairs. + AdditionalParameters map[string]*string `locationName:"additionalParameters" type:"map"` + + // The description of the OTA update. + Description *string `locationName:"description" type:"string"` + + // The files to be streamed by the OTA update. + // + // Files is a required field + Files []*OTAUpdateFile `locationName:"files" min:"1" type:"list" required:"true"` + + // The ID of the OTA update to be created. + // + // OtaUpdateId is a required field + OtaUpdateId *string `location:"uri" locationName:"otaUpdateId" min:"1" type:"string" required:"true"` + + // The IAM role that allows access to the AWS IoT Jobs service. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"20" type:"string" required:"true"` + + // Specifies whether the update will continue to run (CONTINUOUS), or will be + // complete after all the things specified as targets have completed the update + // (SNAPSHOT). If continuous, the update may also be run on a thing when a change + // is detected in a target. For example, an update will run on a thing when + // the thing is added to a target group, even after the update was completed + // by all things originally in the group. Valid values: CONTINUOUS | SNAPSHOT. + TargetSelection *string `locationName:"targetSelection" type:"string" enum:"TargetSelection"` + + // The targeted devices to receive OTA updates. + // + // Targets is a required field + Targets []*string `locationName:"targets" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateOTAUpdateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOTAUpdateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateOTAUpdateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateOTAUpdateInput"} + if s.Files == nil { + invalidParams.Add(request.NewErrParamRequired("Files")) + } + if s.Files != nil && len(s.Files) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Files", 1)) + } + if s.OtaUpdateId == nil { + invalidParams.Add(request.NewErrParamRequired("OtaUpdateId")) + } + if s.OtaUpdateId != nil && len(*s.OtaUpdateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OtaUpdateId", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.Targets == nil { + invalidParams.Add(request.NewErrParamRequired("Targets")) + } + if s.Targets != nil && len(s.Targets) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Targets", 1)) + } + if s.Files != nil { + for i, v := range s.Files { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Files", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalParameters sets the AdditionalParameters field's value. +func (s *CreateOTAUpdateInput) SetAdditionalParameters(v map[string]*string) *CreateOTAUpdateInput { + s.AdditionalParameters = v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateOTAUpdateInput) SetDescription(v string) *CreateOTAUpdateInput { + s.Description = &v + return s +} + +// SetFiles sets the Files field's value. +func (s *CreateOTAUpdateInput) SetFiles(v []*OTAUpdateFile) *CreateOTAUpdateInput { + s.Files = v + return s +} + +// SetOtaUpdateId sets the OtaUpdateId field's value. +func (s *CreateOTAUpdateInput) SetOtaUpdateId(v string) *CreateOTAUpdateInput { + s.OtaUpdateId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateOTAUpdateInput) SetRoleArn(v string) *CreateOTAUpdateInput { + s.RoleArn = &v + return s +} + +// SetTargetSelection sets the TargetSelection field's value. +func (s *CreateOTAUpdateInput) SetTargetSelection(v string) *CreateOTAUpdateInput { + s.TargetSelection = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *CreateOTAUpdateInput) SetTargets(v []*string) *CreateOTAUpdateInput { + s.Targets = v + return s +} + +type CreateOTAUpdateOutput struct { + _ struct{} `type:"structure"` + + // The AWS IoT job ARN associated with the OTA update. + AwsIotJobArn *string `locationName:"awsIotJobArn" type:"string"` + + // The AWS IoT job ID associated with the OTA update. + AwsIotJobId *string `locationName:"awsIotJobId" type:"string"` + + // The OTA update ARN. + OtaUpdateArn *string `locationName:"otaUpdateArn" type:"string"` + + // The OTA update ID. + OtaUpdateId *string `locationName:"otaUpdateId" min:"1" type:"string"` + + // The OTA update status. + OtaUpdateStatus *string `locationName:"otaUpdateStatus" type:"string" enum:"OTAUpdateStatus"` +} + +// String returns the string representation +func (s CreateOTAUpdateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOTAUpdateOutput) GoString() string { + return s.String() +} + +// SetAwsIotJobArn sets the AwsIotJobArn field's value. +func (s *CreateOTAUpdateOutput) SetAwsIotJobArn(v string) *CreateOTAUpdateOutput { + s.AwsIotJobArn = &v + return s +} + +// SetAwsIotJobId sets the AwsIotJobId field's value. +func (s *CreateOTAUpdateOutput) SetAwsIotJobId(v string) *CreateOTAUpdateOutput { + s.AwsIotJobId = &v + return s +} + +// SetOtaUpdateArn sets the OtaUpdateArn field's value. +func (s *CreateOTAUpdateOutput) SetOtaUpdateArn(v string) *CreateOTAUpdateOutput { + s.OtaUpdateArn = &v + return s +} + +// SetOtaUpdateId sets the OtaUpdateId field's value. +func (s *CreateOTAUpdateOutput) SetOtaUpdateId(v string) *CreateOTAUpdateOutput { + s.OtaUpdateId = &v + return s +} + +// SetOtaUpdateStatus sets the OtaUpdateStatus field's value. +func (s *CreateOTAUpdateOutput) SetOtaUpdateStatus(v string) *CreateOTAUpdateOutput { + s.OtaUpdateStatus = &v + return s +} + // The input for the CreatePolicy operation. type CreatePolicyInput struct { _ struct{} `type:"structure"` @@ -12864,6 +14040,151 @@ func (s *CreateRoleAliasOutput) SetRoleAliasArn(v string) *CreateRoleAliasOutput return s } +type CreateStreamInput struct { + _ struct{} `type:"structure"` + + // A description of the stream. + Description *string `locationName:"description" type:"string"` + + // The files to stream. + // + // Files is a required field + Files []*StreamFile `locationName:"files" min:"1" type:"list" required:"true"` + + // An IAM role that allows the IoT service principal assumes to access your + // S3 files. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"20" type:"string" required:"true"` + + // The stream ID. + // + // StreamId is a required field + StreamId *string `location:"uri" locationName:"streamId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStreamInput"} + if s.Files == nil { + invalidParams.Add(request.NewErrParamRequired("Files")) + } + if s.Files != nil && len(s.Files) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Files", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.StreamId == nil { + invalidParams.Add(request.NewErrParamRequired("StreamId")) + } + if s.StreamId != nil && len(*s.StreamId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamId", 1)) + } + if s.Files != nil { + for i, v := range s.Files { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Files", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateStreamInput) SetDescription(v string) *CreateStreamInput { + s.Description = &v + return s +} + +// SetFiles sets the Files field's value. +func (s *CreateStreamInput) SetFiles(v []*StreamFile) *CreateStreamInput { + s.Files = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateStreamInput) SetRoleArn(v string) *CreateStreamInput { + s.RoleArn = &v + return s +} + +// SetStreamId sets the StreamId field's value. +func (s *CreateStreamInput) SetStreamId(v string) *CreateStreamInput { + s.StreamId = &v + return s +} + +type CreateStreamOutput struct { + _ struct{} `type:"structure"` + + // A description of the stream. + Description *string `locationName:"description" type:"string"` + + // The stream ARN. + StreamArn *string `locationName:"streamArn" type:"string"` + + // The stream ID. + StreamId *string `locationName:"streamId" min:"1" type:"string"` + + // The version of the stream. + StreamVersion *int64 `locationName:"streamVersion" type:"integer"` +} + +// String returns the string representation +func (s CreateStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *CreateStreamOutput) SetDescription(v string) *CreateStreamOutput { + s.Description = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *CreateStreamOutput) SetStreamArn(v string) *CreateStreamOutput { + s.StreamArn = &v + return s +} + +// SetStreamId sets the StreamId field's value. +func (s *CreateStreamOutput) SetStreamId(v string) *CreateStreamOutput { + s.StreamId = &v + return s +} + +// SetStreamVersion sets the StreamVersion field's value. +func (s *CreateStreamOutput) SetStreamVersion(v int64) *CreateStreamOutput { + s.StreamVersion = &v + return s +} + type CreateThingGroupInput struct { _ struct{} `type:"structure"` @@ -13245,6 +14566,77 @@ func (s CreateTopicRuleOutput) GoString() string { return s.String() } +// Describes a custom method used to code sign a file. +type CustomCodeSigning struct { + _ struct{} `type:"structure"` + + // The certificate chain. + CertificateChain *CodeSigningCertificateChain `locationName:"certificateChain" type:"structure"` + + // The hash algorithm used to code sign the file. + HashAlgorithm *string `locationName:"hashAlgorithm" type:"string"` + + // The signature for the file. + Signature *CodeSigningSignature `locationName:"signature" type:"structure"` + + // The signature algorithm used to code sign the file. + SignatureAlgorithm *string `locationName:"signatureAlgorithm" type:"string"` +} + +// String returns the string representation +func (s CustomCodeSigning) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomCodeSigning) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomCodeSigning) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomCodeSigning"} + if s.CertificateChain != nil { + if err := s.CertificateChain.Validate(); err != nil { + invalidParams.AddNested("CertificateChain", err.(request.ErrInvalidParams)) + } + } + if s.Signature != nil { + if err := s.Signature.Validate(); err != nil { + invalidParams.AddNested("Signature", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateChain sets the CertificateChain field's value. +func (s *CustomCodeSigning) SetCertificateChain(v *CodeSigningCertificateChain) *CustomCodeSigning { + s.CertificateChain = v + return s +} + +// SetHashAlgorithm sets the HashAlgorithm field's value. +func (s *CustomCodeSigning) SetHashAlgorithm(v string) *CustomCodeSigning { + s.HashAlgorithm = &v + return s +} + +// SetSignature sets the Signature field's value. +func (s *CustomCodeSigning) SetSignature(v *CodeSigningSignature) *CustomCodeSigning { + s.Signature = v + return s +} + +// SetSignatureAlgorithm sets the SignatureAlgorithm field's value. +func (s *CustomCodeSigning) SetSignatureAlgorithm(v string) *CustomCodeSigning { + s.SignatureAlgorithm = &v + return s +} + type DeleteAuthorizerInput struct { _ struct{} `type:"structure"` @@ -13422,6 +14814,61 @@ func (s DeleteCertificateOutput) GoString() string { return s.String() } +type DeleteOTAUpdateInput struct { + _ struct{} `type:"structure"` + + // The OTA update ID to delete. + // + // OtaUpdateId is a required field + OtaUpdateId *string `location:"uri" locationName:"otaUpdateId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteOTAUpdateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOTAUpdateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteOTAUpdateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteOTAUpdateInput"} + if s.OtaUpdateId == nil { + invalidParams.Add(request.NewErrParamRequired("OtaUpdateId")) + } + if s.OtaUpdateId != nil && len(*s.OtaUpdateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OtaUpdateId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOtaUpdateId sets the OtaUpdateId field's value. +func (s *DeleteOTAUpdateInput) SetOtaUpdateId(v string) *DeleteOTAUpdateInput { + s.OtaUpdateId = &v + return s +} + +type DeleteOTAUpdateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteOTAUpdateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOTAUpdateOutput) GoString() string { + return s.String() +} + // The input for the DeletePolicy operation. type DeletePolicyInput struct { _ struct{} `type:"structure"` @@ -13619,17 +15066,72 @@ func (s *DeleteRoleAliasInput) SetRoleAlias(v string) *DeleteRoleAliasInput { return s } -type DeleteRoleAliasOutput struct { +type DeleteRoleAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRoleAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRoleAliasOutput) GoString() string { + return s.String() +} + +type DeleteStreamInput struct { + _ struct{} `type:"structure"` + + // The stream ID. + // + // StreamId is a required field + StreamId *string `location:"uri" locationName:"streamId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteStreamInput"} + if s.StreamId == nil { + invalidParams.Add(request.NewErrParamRequired("StreamId")) + } + if s.StreamId != nil && len(*s.StreamId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStreamId sets the StreamId field's value. +func (s *DeleteStreamInput) SetStreamId(v string) *DeleteStreamInput { + s.StreamId = &v + return s +} + +type DeleteStreamOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s DeleteRoleAliasOutput) String() string { +func (s DeleteStreamOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRoleAliasOutput) GoString() string { +func (s DeleteStreamOutput) GoString() string { return s.String() } @@ -14706,6 +16208,70 @@ func (s *DescribeRoleAliasOutput) SetRoleAliasDescription(v *RoleAliasDescriptio return s } +type DescribeStreamInput struct { + _ struct{} `type:"structure"` + + // The stream ID. + // + // StreamId is a required field + StreamId *string `location:"uri" locationName:"streamId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStreamInput"} + if s.StreamId == nil { + invalidParams.Add(request.NewErrParamRequired("StreamId")) + } + if s.StreamId != nil && len(*s.StreamId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStreamId sets the StreamId field's value. +func (s *DescribeStreamInput) SetStreamId(v string) *DescribeStreamInput { + s.StreamId = &v + return s +} + +type DescribeStreamOutput struct { + _ struct{} `type:"structure"` + + // Information about the stream. + StreamInfo *StreamInfo `locationName:"streamInfo" type:"structure"` +} + +// String returns the string representation +func (s DescribeStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStreamOutput) GoString() string { + return s.String() +} + +// SetStreamInfo sets the StreamInfo field's value. +func (s *DescribeStreamOutput) SetStreamInfo(v *StreamInfo) *DescribeStreamOutput { + s.StreamInfo = v + return s +} + type DescribeThingGroupInput struct { _ struct{} `type:"structure"` @@ -15879,6 +17445,39 @@ func (s EnableTopicRuleOutput) GoString() string { return s.String() } +// Error information. +type ErrorInfo struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string"` + + // The error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ErrorInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorInfo) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ErrorInfo) SetCode(v string) *ErrorInfo { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ErrorInfo) SetMessage(v string) *ErrorInfo { + s.Message = &v + return s +} + // Information that explicitly denies authorization. type ExplicitDeny struct { _ struct{} `type:"structure"` @@ -16193,6 +17792,70 @@ func (s *GetLoggingOptionsOutput) SetRoleArn(v string) *GetLoggingOptionsOutput return s } +type GetOTAUpdateInput struct { + _ struct{} `type:"structure"` + + // The OTA update ID. + // + // OtaUpdateId is a required field + OtaUpdateId *string `location:"uri" locationName:"otaUpdateId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetOTAUpdateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOTAUpdateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOTAUpdateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOTAUpdateInput"} + if s.OtaUpdateId == nil { + invalidParams.Add(request.NewErrParamRequired("OtaUpdateId")) + } + if s.OtaUpdateId != nil && len(*s.OtaUpdateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OtaUpdateId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOtaUpdateId sets the OtaUpdateId field's value. +func (s *GetOTAUpdateInput) SetOtaUpdateId(v string) *GetOTAUpdateInput { + s.OtaUpdateId = &v + return s +} + +type GetOTAUpdateOutput struct { + _ struct{} `type:"structure"` + + // The OTA update info. + OtaUpdateInfo *OTAUpdateInfo `locationName:"otaUpdateInfo" type:"structure"` +} + +// String returns the string representation +func (s GetOTAUpdateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOTAUpdateOutput) GoString() string { + return s.String() +} + +// SetOtaUpdateInfo sets the OtaUpdateInfo field's value. +func (s *GetOTAUpdateOutput) SetOtaUpdateInfo(v *OTAUpdateInfo) *GetOTAUpdateOutput { + s.OtaUpdateInfo = v + return s +} + // The input for the GetPolicy operation. type GetPolicyInput struct { _ struct{} `type:"structure"` @@ -18270,6 +19933,92 @@ func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput { return s } +type ListOTAUpdatesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return at one time. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // A token used to retreive the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // The OTA update job status. + OtaUpdateStatus *string `location:"querystring" locationName:"otaUpdateStatus" type:"string" enum:"OTAUpdateStatus"` +} + +// String returns the string representation +func (s ListOTAUpdatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOTAUpdatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListOTAUpdatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListOTAUpdatesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListOTAUpdatesInput) SetMaxResults(v int64) *ListOTAUpdatesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListOTAUpdatesInput) SetNextToken(v string) *ListOTAUpdatesInput { + s.NextToken = &v + return s +} + +// SetOtaUpdateStatus sets the OtaUpdateStatus field's value. +func (s *ListOTAUpdatesInput) SetOtaUpdateStatus(v string) *ListOTAUpdatesInput { + s.OtaUpdateStatus = &v + return s +} + +type ListOTAUpdatesOutput struct { + _ struct{} `type:"structure"` + + // A token to use to get the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of OTA update jobs. + OtaUpdates []*OTAUpdateSummary `locationName:"otaUpdates" type:"list"` +} + +// String returns the string representation +func (s ListOTAUpdatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOTAUpdatesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListOTAUpdatesOutput) SetNextToken(v string) *ListOTAUpdatesOutput { + s.NextToken = &v + return s +} + +// SetOtaUpdates sets the OtaUpdates field's value. +func (s *ListOTAUpdatesOutput) SetOtaUpdates(v []*OTAUpdateSummary) *ListOTAUpdatesOutput { + s.OtaUpdates = v + return s +} + // The input to the ListOutgoingCertificates operation. type ListOutgoingCertificatesInput struct { _ struct{} `type:"structure"` @@ -18821,34 +20570,120 @@ func (s *ListPrincipalThingsOutput) SetThings(v []*string) *ListPrincipalThingsO return s } -type ListRoleAliasesInput struct { +type ListRoleAliasesInput struct { + _ struct{} `type:"structure"` + + // Return the list of role aliases in ascending alphabetical order. + AscendingOrder *bool `location:"querystring" locationName:"isAscendingOrder" type:"boolean"` + + // A marker used to get the next set of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The maximum number of results to return at one time. + PageSize *int64 `location:"querystring" locationName:"pageSize" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListRoleAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRoleAliasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRoleAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRoleAliasesInput"} + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAscendingOrder sets the AscendingOrder field's value. +func (s *ListRoleAliasesInput) SetAscendingOrder(v bool) *ListRoleAliasesInput { + s.AscendingOrder = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListRoleAliasesInput) SetMarker(v string) *ListRoleAliasesInput { + s.Marker = &v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *ListRoleAliasesInput) SetPageSize(v int64) *ListRoleAliasesInput { + s.PageSize = &v + return s +} + +type ListRoleAliasesOutput struct { + _ struct{} `type:"structure"` + + // A marker used to get the next set of results. + NextMarker *string `locationName:"nextMarker" type:"string"` + + // The role aliases. + RoleAliases []*string `locationName:"roleAliases" type:"list"` +} + +// String returns the string representation +func (s ListRoleAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRoleAliasesOutput) GoString() string { + return s.String() +} + +// SetNextMarker sets the NextMarker field's value. +func (s *ListRoleAliasesOutput) SetNextMarker(v string) *ListRoleAliasesOutput { + s.NextMarker = &v + return s +} + +// SetRoleAliases sets the RoleAliases field's value. +func (s *ListRoleAliasesOutput) SetRoleAliases(v []*string) *ListRoleAliasesOutput { + s.RoleAliases = v + return s +} + +type ListStreamsInput struct { _ struct{} `type:"structure"` - // Return the list of role aliases in ascending alphabetical order. + // Set to true to return the list of streams in ascending order. AscendingOrder *bool `location:"querystring" locationName:"isAscendingOrder" type:"boolean"` - // A marker used to get the next set of results. - Marker *string `location:"querystring" locationName:"marker" type:"string"` + // The maximum number of results to return at a time. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // The maximum number of results to return at one time. - PageSize *int64 `location:"querystring" locationName:"pageSize" min:"1" type:"integer"` + // A token used to get the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } // String returns the string representation -func (s ListRoleAliasesInput) String() string { +func (s ListStreamsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListRoleAliasesInput) GoString() string { +func (s ListStreamsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListRoleAliasesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListRoleAliasesInput"} - if s.PageSize != nil && *s.PageSize < 1 { - invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) +func (s *ListStreamsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListStreamsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -18858,52 +20693,52 @@ func (s *ListRoleAliasesInput) Validate() error { } // SetAscendingOrder sets the AscendingOrder field's value. -func (s *ListRoleAliasesInput) SetAscendingOrder(v bool) *ListRoleAliasesInput { +func (s *ListStreamsInput) SetAscendingOrder(v bool) *ListStreamsInput { s.AscendingOrder = &v return s } -// SetMarker sets the Marker field's value. -func (s *ListRoleAliasesInput) SetMarker(v string) *ListRoleAliasesInput { - s.Marker = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListStreamsInput) SetMaxResults(v int64) *ListStreamsInput { + s.MaxResults = &v return s } -// SetPageSize sets the PageSize field's value. -func (s *ListRoleAliasesInput) SetPageSize(v int64) *ListRoleAliasesInput { - s.PageSize = &v +// SetNextToken sets the NextToken field's value. +func (s *ListStreamsInput) SetNextToken(v string) *ListStreamsInput { + s.NextToken = &v return s } -type ListRoleAliasesOutput struct { +type ListStreamsOutput struct { _ struct{} `type:"structure"` - // A marker used to get the next set of results. - NextMarker *string `locationName:"nextMarker" type:"string"` + // A token used to get the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` - // The role aliases. - RoleAliases []*string `locationName:"roleAliases" type:"list"` + // A list of streams. + Streams []*StreamSummary `locationName:"streams" type:"list"` } // String returns the string representation -func (s ListRoleAliasesOutput) String() string { +func (s ListStreamsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListRoleAliasesOutput) GoString() string { +func (s ListStreamsOutput) GoString() string { return s.String() } -// SetNextMarker sets the NextMarker field's value. -func (s *ListRoleAliasesOutput) SetNextMarker(v string) *ListRoleAliasesOutput { - s.NextMarker = &v +// SetNextToken sets the NextToken field's value. +func (s *ListStreamsOutput) SetNextToken(v string) *ListStreamsOutput { + s.NextToken = &v return s } -// SetRoleAliases sets the RoleAliases field's value. -func (s *ListRoleAliasesOutput) SetRoleAliases(v []*string) *ListRoleAliasesOutput { - s.RoleAliases = v +// SetStreams sets the Streams field's value. +func (s *ListStreamsOutput) SetStreams(v []*StreamSummary) *ListStreamsOutput { + s.Streams = v return s } @@ -20100,6 +21935,265 @@ func (s *LoggingOptionsPayload) SetRoleArn(v string) *LoggingOptionsPayload { return s } +// Describes a file to be associated with an OTA update. +type OTAUpdateFile struct { + _ struct{} `type:"structure"` + + // A list of name/attribute pairs. + Attributes map[string]*string `locationName:"attributes" type:"map"` + + // The code signing method of the file. + CodeSigning *CodeSigning `locationName:"codeSigning" type:"structure"` + + // The name of the file. + FileName *string `locationName:"fileName" type:"string"` + + // The source of the file. + FileSource *Stream `locationName:"fileSource" type:"structure"` + + // The file version. + FileVersion *string `locationName:"fileVersion" type:"string"` +} + +// String returns the string representation +func (s OTAUpdateFile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OTAUpdateFile) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OTAUpdateFile) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OTAUpdateFile"} + if s.CodeSigning != nil { + if err := s.CodeSigning.Validate(); err != nil { + invalidParams.AddNested("CodeSigning", err.(request.ErrInvalidParams)) + } + } + if s.FileSource != nil { + if err := s.FileSource.Validate(); err != nil { + invalidParams.AddNested("FileSource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *OTAUpdateFile) SetAttributes(v map[string]*string) *OTAUpdateFile { + s.Attributes = v + return s +} + +// SetCodeSigning sets the CodeSigning field's value. +func (s *OTAUpdateFile) SetCodeSigning(v *CodeSigning) *OTAUpdateFile { + s.CodeSigning = v + return s +} + +// SetFileName sets the FileName field's value. +func (s *OTAUpdateFile) SetFileName(v string) *OTAUpdateFile { + s.FileName = &v + return s +} + +// SetFileSource sets the FileSource field's value. +func (s *OTAUpdateFile) SetFileSource(v *Stream) *OTAUpdateFile { + s.FileSource = v + return s +} + +// SetFileVersion sets the FileVersion field's value. +func (s *OTAUpdateFile) SetFileVersion(v string) *OTAUpdateFile { + s.FileVersion = &v + return s +} + +// Information about an OTA update. +type OTAUpdateInfo struct { + _ struct{} `type:"structure"` + + // A collection of name/value pairs + AdditionalParameters map[string]*string `locationName:"additionalParameters" type:"map"` + + // The AWS IoT job ARN associated with the OTA update. + AwsIotJobArn *string `locationName:"awsIotJobArn" type:"string"` + + // The AWS IoT job ID associated with the OTA update. + AwsIotJobId *string `locationName:"awsIotJobId" type:"string"` + + // The date when the OTA update was created. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix"` + + // A description of the OTA update. + Description *string `locationName:"description" type:"string"` + + // Error information associated with the OTA update. + ErrorInfo *ErrorInfo `locationName:"errorInfo" type:"structure"` + + // The date when the OTA update was last updated. + LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp" timestampFormat:"unix"` + + // The OTA update ARN. + OtaUpdateArn *string `locationName:"otaUpdateArn" type:"string"` + + // A list of files associated with the OTA update. + OtaUpdateFiles []*OTAUpdateFile `locationName:"otaUpdateFiles" min:"1" type:"list"` + + // The OTA update ID. + OtaUpdateId *string `locationName:"otaUpdateId" min:"1" type:"string"` + + // The status of the OTA update. + OtaUpdateStatus *string `locationName:"otaUpdateStatus" type:"string" enum:"OTAUpdateStatus"` + + // Specifies whether the OTA update will continue to run (CONTINUOUS), or will + // be complete after all those things specified as targets have completed the + // OTA update (SNAPSHOT). If continuous, the OTA update may also be run on a + // thing when a change is detected in a target. For example, an OTA update will + // run on a thing when the thing is added to a target group, even after the + // OTA update was completed by all things originally in the group. + TargetSelection *string `locationName:"targetSelection" type:"string" enum:"TargetSelection"` + + // The targets of the OTA update. + Targets []*string `locationName:"targets" min:"1" type:"list"` +} + +// String returns the string representation +func (s OTAUpdateInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OTAUpdateInfo) GoString() string { + return s.String() +} + +// SetAdditionalParameters sets the AdditionalParameters field's value. +func (s *OTAUpdateInfo) SetAdditionalParameters(v map[string]*string) *OTAUpdateInfo { + s.AdditionalParameters = v + return s +} + +// SetAwsIotJobArn sets the AwsIotJobArn field's value. +func (s *OTAUpdateInfo) SetAwsIotJobArn(v string) *OTAUpdateInfo { + s.AwsIotJobArn = &v + return s +} + +// SetAwsIotJobId sets the AwsIotJobId field's value. +func (s *OTAUpdateInfo) SetAwsIotJobId(v string) *OTAUpdateInfo { + s.AwsIotJobId = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *OTAUpdateInfo) SetCreationDate(v time.Time) *OTAUpdateInfo { + s.CreationDate = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *OTAUpdateInfo) SetDescription(v string) *OTAUpdateInfo { + s.Description = &v + return s +} + +// SetErrorInfo sets the ErrorInfo field's value. +func (s *OTAUpdateInfo) SetErrorInfo(v *ErrorInfo) *OTAUpdateInfo { + s.ErrorInfo = v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *OTAUpdateInfo) SetLastModifiedDate(v time.Time) *OTAUpdateInfo { + s.LastModifiedDate = &v + return s +} + +// SetOtaUpdateArn sets the OtaUpdateArn field's value. +func (s *OTAUpdateInfo) SetOtaUpdateArn(v string) *OTAUpdateInfo { + s.OtaUpdateArn = &v + return s +} + +// SetOtaUpdateFiles sets the OtaUpdateFiles field's value. +func (s *OTAUpdateInfo) SetOtaUpdateFiles(v []*OTAUpdateFile) *OTAUpdateInfo { + s.OtaUpdateFiles = v + return s +} + +// SetOtaUpdateId sets the OtaUpdateId field's value. +func (s *OTAUpdateInfo) SetOtaUpdateId(v string) *OTAUpdateInfo { + s.OtaUpdateId = &v + return s +} + +// SetOtaUpdateStatus sets the OtaUpdateStatus field's value. +func (s *OTAUpdateInfo) SetOtaUpdateStatus(v string) *OTAUpdateInfo { + s.OtaUpdateStatus = &v + return s +} + +// SetTargetSelection sets the TargetSelection field's value. +func (s *OTAUpdateInfo) SetTargetSelection(v string) *OTAUpdateInfo { + s.TargetSelection = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *OTAUpdateInfo) SetTargets(v []*string) *OTAUpdateInfo { + s.Targets = v + return s +} + +// An OTA update summary. +type OTAUpdateSummary struct { + _ struct{} `type:"structure"` + + // The date when the OTA update was created. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix"` + + // The OTA update ARN. + OtaUpdateArn *string `locationName:"otaUpdateArn" type:"string"` + + // The OTA update ID. + OtaUpdateId *string `locationName:"otaUpdateId" min:"1" type:"string"` +} + +// String returns the string representation +func (s OTAUpdateSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OTAUpdateSummary) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *OTAUpdateSummary) SetCreationDate(v time.Time) *OTAUpdateSummary { + s.CreationDate = &v + return s +} + +// SetOtaUpdateArn sets the OtaUpdateArn field's value. +func (s *OTAUpdateSummary) SetOtaUpdateArn(v string) *OTAUpdateSummary { + s.OtaUpdateArn = &v + return s +} + +// SetOtaUpdateId sets the OtaUpdateId field's value. +func (s *OTAUpdateSummary) SetOtaUpdateId(v string) *OTAUpdateSummary { + s.OtaUpdateId = &v + return s +} + // A certificate that has been transferred but not yet accepted. type OutgoingCertificate struct { _ struct{} `type:"structure"` @@ -21063,21 +23157,95 @@ func (s S3Action) String() string { } // GoString returns the string representation -func (s S3Action) GoString() string { +func (s S3Action) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Action) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Action"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucketName sets the BucketName field's value. +func (s *S3Action) SetBucketName(v string) *S3Action { + s.BucketName = &v + return s +} + +// SetCannedAcl sets the CannedAcl field's value. +func (s *S3Action) SetCannedAcl(v string) *S3Action { + s.CannedAcl = &v + return s +} + +// SetKey sets the Key field's value. +func (s *S3Action) SetKey(v string) *S3Action { + s.Key = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *S3Action) SetRoleArn(v string) *S3Action { + s.RoleArn = &v + return s +} + +// The location in S3 the contains the files to stream. +type S3Location struct { + _ struct{} `type:"structure"` + + // The S3 bucket that contains the file to stream. + // + // Bucket is a required field + Bucket *string `locationName:"bucket" min:"1" type:"string" required:"true"` + + // The name of the file within the S3 bucket to stream. + // + // Key is a required field + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The file version. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s S3Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Location) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *S3Action) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "S3Action"} - if s.BucketName == nil { - invalidParams.Add(request.NewErrParamRequired("BucketName")) +func (s *S3Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Location"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } if invalidParams.Len() > 0 { @@ -21086,27 +23254,21 @@ func (s *S3Action) Validate() error { return nil } -// SetBucketName sets the BucketName field's value. -func (s *S3Action) SetBucketName(v string) *S3Action { - s.BucketName = &v - return s -} - -// SetCannedAcl sets the CannedAcl field's value. -func (s *S3Action) SetCannedAcl(v string) *S3Action { - s.CannedAcl = &v +// SetBucket sets the Bucket field's value. +func (s *S3Location) SetBucket(v string) *S3Location { + s.Bucket = &v return s } // SetKey sets the Key field's value. -func (s *S3Action) SetKey(v string) *S3Action { +func (s *S3Location) SetKey(v string) *S3Location { s.Key = &v return s } -// SetRoleArn sets the RoleArn field's value. -func (s *S3Action) SetRoleArn(v string) *S3Action { - s.RoleArn = &v +// SetVersion sets the Version field's value. +func (s *S3Location) SetVersion(v string) *S3Location { + s.Version = &v return s } @@ -21908,6 +24070,238 @@ func (s StopThingRegistrationTaskOutput) GoString() string { return s.String() } +// Describes a group of files that can be streamed. +type Stream struct { + _ struct{} `type:"structure"` + + // The ID of a file associated with a stream. + FileId *int64 `locationName:"fileId" type:"integer"` + + // The stream ID. + StreamId *string `locationName:"streamId" min:"1" type:"string"` +} + +// String returns the string representation +func (s Stream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stream) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Stream) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Stream"} + if s.StreamId != nil && len(*s.StreamId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFileId sets the FileId field's value. +func (s *Stream) SetFileId(v int64) *Stream { + s.FileId = &v + return s +} + +// SetStreamId sets the StreamId field's value. +func (s *Stream) SetStreamId(v string) *Stream { + s.StreamId = &v + return s +} + +// Represents a file to stream. +type StreamFile struct { + _ struct{} `type:"structure"` + + // The file ID. + FileId *int64 `locationName:"fileId" type:"integer"` + + // The location of the file in S3. + S3Location *S3Location `locationName:"s3Location" type:"structure"` +} + +// String returns the string representation +func (s StreamFile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamFile) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StreamFile) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StreamFile"} + if s.S3Location != nil { + if err := s.S3Location.Validate(); err != nil { + invalidParams.AddNested("S3Location", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFileId sets the FileId field's value. +func (s *StreamFile) SetFileId(v int64) *StreamFile { + s.FileId = &v + return s +} + +// SetS3Location sets the S3Location field's value. +func (s *StreamFile) SetS3Location(v *S3Location) *StreamFile { + s.S3Location = v + return s +} + +// Information about a stream. +type StreamInfo struct { + _ struct{} `type:"structure"` + + // The date when the stream was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // The description of the stream. + Description *string `locationName:"description" type:"string"` + + // The files to stream. + Files []*StreamFile `locationName:"files" min:"1" type:"list"` + + // The date when the stream was last updated. + LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp" timestampFormat:"unix"` + + // An IAM role AWS IoT assumes to access your S3 files. + RoleArn *string `locationName:"roleArn" min:"20" type:"string"` + + // The stream ARN. + StreamArn *string `locationName:"streamArn" type:"string"` + + // The stream ID. + StreamId *string `locationName:"streamId" min:"1" type:"string"` + + // The stream version. + StreamVersion *int64 `locationName:"streamVersion" type:"integer"` +} + +// String returns the string representation +func (s StreamInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamInfo) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *StreamInfo) SetCreatedAt(v time.Time) *StreamInfo { + s.CreatedAt = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *StreamInfo) SetDescription(v string) *StreamInfo { + s.Description = &v + return s +} + +// SetFiles sets the Files field's value. +func (s *StreamInfo) SetFiles(v []*StreamFile) *StreamInfo { + s.Files = v + return s +} + +// SetLastUpdatedAt sets the LastUpdatedAt field's value. +func (s *StreamInfo) SetLastUpdatedAt(v time.Time) *StreamInfo { + s.LastUpdatedAt = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *StreamInfo) SetRoleArn(v string) *StreamInfo { + s.RoleArn = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *StreamInfo) SetStreamArn(v string) *StreamInfo { + s.StreamArn = &v + return s +} + +// SetStreamId sets the StreamId field's value. +func (s *StreamInfo) SetStreamId(v string) *StreamInfo { + s.StreamId = &v + return s +} + +// SetStreamVersion sets the StreamVersion field's value. +func (s *StreamInfo) SetStreamVersion(v int64) *StreamInfo { + s.StreamVersion = &v + return s +} + +// A summary of a stream. +type StreamSummary struct { + _ struct{} `type:"structure"` + + // A description of the stream. + Description *string `locationName:"description" type:"string"` + + // The stream ARN. + StreamArn *string `locationName:"streamArn" type:"string"` + + // The stream ID. + StreamId *string `locationName:"streamId" min:"1" type:"string"` + + // The stream version. + StreamVersion *int64 `locationName:"streamVersion" type:"integer"` +} + +// String returns the string representation +func (s StreamSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamSummary) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *StreamSummary) SetDescription(v string) *StreamSummary { + s.Description = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *StreamSummary) SetStreamArn(v string) *StreamSummary { + s.StreamArn = &v + return s +} + +// SetStreamId sets the StreamId field's value. +func (s *StreamSummary) SetStreamId(v string) *StreamSummary { + s.StreamId = &v + return s +} + +// SetStreamVersion sets the StreamVersion field's value. +func (s *StreamSummary) SetStreamVersion(v int64) *StreamSummary { + s.StreamVersion = &v + return s +} + type TestAuthorizationInput struct { _ struct{} `type:"structure"` @@ -23387,6 +25781,141 @@ func (s *UpdateRoleAliasOutput) SetRoleAliasArn(v string) *UpdateRoleAliasOutput return s } +type UpdateStreamInput struct { + _ struct{} `type:"structure"` + + // The description of the stream. + Description *string `locationName:"description" type:"string"` + + // The files associated with the stream. + Files []*StreamFile `locationName:"files" min:"1" type:"list"` + + // An IAM role that allows the IoT service principal assumes to access your + // S3 files. + RoleArn *string `locationName:"roleArn" min:"20" type:"string"` + + // The stream ID. + // + // StreamId is a required field + StreamId *string `location:"uri" locationName:"streamId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateStreamInput"} + if s.Files != nil && len(s.Files) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Files", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.StreamId == nil { + invalidParams.Add(request.NewErrParamRequired("StreamId")) + } + if s.StreamId != nil && len(*s.StreamId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamId", 1)) + } + if s.Files != nil { + for i, v := range s.Files { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Files", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *UpdateStreamInput) SetDescription(v string) *UpdateStreamInput { + s.Description = &v + return s +} + +// SetFiles sets the Files field's value. +func (s *UpdateStreamInput) SetFiles(v []*StreamFile) *UpdateStreamInput { + s.Files = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *UpdateStreamInput) SetRoleArn(v string) *UpdateStreamInput { + s.RoleArn = &v + return s +} + +// SetStreamId sets the StreamId field's value. +func (s *UpdateStreamInput) SetStreamId(v string) *UpdateStreamInput { + s.StreamId = &v + return s +} + +type UpdateStreamOutput struct { + _ struct{} `type:"structure"` + + // A description of the stream. + Description *string `locationName:"description" type:"string"` + + // The stream ARN. + StreamArn *string `locationName:"streamArn" type:"string"` + + // The stream ID. + StreamId *string `locationName:"streamId" min:"1" type:"string"` + + // The stream version. + StreamVersion *int64 `locationName:"streamVersion" type:"integer"` +} + +// String returns the string representation +func (s UpdateStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStreamOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *UpdateStreamOutput) SetDescription(v string) *UpdateStreamOutput { + s.Description = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *UpdateStreamOutput) SetStreamArn(v string) *UpdateStreamOutput { + s.StreamArn = &v + return s +} + +// SetStreamId sets the StreamId field's value. +func (s *UpdateStreamOutput) SetStreamId(v string) *UpdateStreamOutput { + s.StreamId = &v + return s +} + +// SetStreamVersion sets the StreamVersion field's value. +func (s *UpdateStreamOutput) SetStreamVersion(v int64) *UpdateStreamOutput { + s.StreamVersion = &v + return s +} + type UpdateThingGroupInput struct { _ struct{} `type:"structure"` @@ -23853,6 +26382,20 @@ const ( MessageFormatJson = "JSON" ) +const ( + // OTAUpdateStatusCreatePending is a OTAUpdateStatus enum value + OTAUpdateStatusCreatePending = "CREATE_PENDING" + + // OTAUpdateStatusCreateInProgress is a OTAUpdateStatus enum value + OTAUpdateStatusCreateInProgress = "CREATE_IN_PROGRESS" + + // OTAUpdateStatusCreateComplete is a OTAUpdateStatus enum value + OTAUpdateStatusCreateComplete = "CREATE_COMPLETE" + + // OTAUpdateStatusCreateFailed is a OTAUpdateStatus enum value + OTAUpdateStatusCreateFailed = "CREATE_FAILED" +) + const ( // ReportTypeErrors is a ReportType enum value ReportTypeErrors = "ERRORS" diff --git a/vendor/vendor.json b/vendor/vendor.json index e32b249d6dd..8c54536d6fd 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -141,828 +141,828 @@ "revisionTime": "2017-07-27T15:54:43Z" }, { - "checksumSHA1": "ELwO63Rr9R8wzg2hv25tdUmT1Os=", + "checksumSHA1": "IARbiOsFuZg1G6v9wnl0RWyBMkY=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "DtuTqKH29YnLjrIJkRYX0HQtXY0=", "path": "github.com/aws/aws-sdk-go/aws/arn", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "9nE/FjZ4pYrT883KtV2/aI+Gayo=", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "7/8j/q0TWtOgXyvEcv4B2Dhl00o=", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "Y+cPwQL0dZMyqp3wI+KJWmA9KQ8=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "OnU/n7R33oYXiB4SAGd5pK7I0Bs=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { - "checksumSHA1": "pa4oM3PSwZQIfqcw1JFbd3kv3aQ=", + "checksumSHA1": "pkjsLFuKd1FMWJXFnsO1XX+JKPs=", "path": "github.com/aws/aws-sdk-go/aws/endpoints", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "9GvAyILJ7g+VUg8Ef5DsT5GuYsg=", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "HcGL4e6Uep4/80eCUI5xkcWjpQ0=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "iU00ZjhAml/13g+1YXT21IqoXqg=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "04ypv4x12l4q0TksA1zEVsmgpvw=", "path": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "NStHCXEvYqG72GknZyv1jaKaeH0=", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "yHfT5DTbeCLs4NE2Rgnqrhe15ls=", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "9V1PvtFQ9MObZTc3sa86WcuOtOU=", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "pkeoOfZpHRvFG/AOZeTf0lwtsFg=", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "Rpu8KBtHZgvhkwHxUfaky+qW+G4=", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "0qYPUga28aQVkxZgBR3Z86AbGUQ=", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "vnYDXA1NxJ7Hu+DMfXNk1UnmkWg=", "path": "github.com/aws/aws-sdk-go/service/acm", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "DPl/OkvEUjrd+XKqX73l6nUNw3U=", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "X8tOI6i+RJwXIgg1qBjDNclyG/0=", "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "aDAaH6YiA50IrJ5Smfg0fovrniA=", "path": "github.com/aws/aws-sdk-go/service/appsync", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "oBXDw1zQTfxcKsK3ZjtKcS7gBLI=", "path": "github.com/aws/aws-sdk-go/service/athena", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "ITAwWyJp4t9AGfUXm9M3pFWTHVA=", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "Zz8qI6RloveM1zrXAglLxJZT1ZA=", "path": "github.com/aws/aws-sdk-go/service/batch", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "/nO06EpnD22+Ex80gHi4UYrAvKc=", "path": "github.com/aws/aws-sdk-go/service/budgets", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "6gM3CZZgiB0JvS7EK1c31Q8L09U=", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "T80IDetBz1hqJpq5Wqmx3MwCh8w=", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "bYrI9mxspB0xDFZEy3OIfWuez5g=", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "oB+M+kOmYG28V0PuI75IF6E+/w8=", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "Nc3vXlV7s309PprScYpRDPQWeDQ=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "bPh7NF3mLpGMV0rIakolMPHqMyw=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "OqrWtx9iyIJ9roP2sEcmP9UCfXE=", "path": "github.com/aws/aws-sdk-go/service/codebuild", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "7nW1Ho2X3RcUU8FaFBhJIUeuDNw=", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "m19PZt1B51QCWo1jxSbII2zzL6Q=", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "LKw7fnNwq17Eqy0clzS/LK89vS4=", "path": "github.com/aws/aws-sdk-go/service/codepipeline", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "aXh1KIbNX+g+tH+lh3pk++9lm3k=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentity", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "IWi9xZz+OncotjM/vJ87Iffg2Qk=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentityprovider", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { - "checksumSHA1": "6uudO8hkB5uERXixPA/yL3xcguQ=", + "checksumSHA1": "56F6Stg8hQ1kxiAEzqB0TDctW9k=", "path": "github.com/aws/aws-sdk-go/service/configservice", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "hYCwLQdIjHj8rMHLGVyUVhecI4s=", "path": "github.com/aws/aws-sdk-go/service/databasemigrationservice", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "26CWoHQP/dyL2VzE5ZNd8zNzhko=", "path": "github.com/aws/aws-sdk-go/service/devicefarm", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "6g94rUHAgjcqMMTtMqKUbLU37wY=", "path": "github.com/aws/aws-sdk-go/service/directconnect", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "oFnS6I0u7KqnxK0/r1uoz8rTkxI=", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "0TXXUPjrbOCHpX555B6suH36Nnk=", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "BOjSO1uO7Coj6o3oqpPUtEhQrPI=", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "uEv9kkBsVIjg7K4+Y8TVlU0Cc8o=", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "sD9Urgwx7F3ImX+tJg2Q+ME/oFM=", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "eoM9nF5iVMbuGOmkY33d19aHt8Y=", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "dU5MPXUUOYD/E9sNncpFZ/U86Cw=", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "pj8mBWT3HE0Iid6HSmhw7lmyZDU=", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "VYGtTaSiajfKOVTbi9/SNmbiIac=", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "SZ7yLDZ6RvMhpWe0Goyem64kgyA=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "WYqHhdRNsiGGBLWlBLbOItZf+zA=", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "ae7VWg/xuXpnSD6wGumN44qEd+Q=", "path": "github.com/aws/aws-sdk-go/service/elbv2", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "NbkH6F+792jQ7BW4lGCb+vJVw58=", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "5btWHj2fZrPc/zfYdJLPaOcivxI=", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "oDoGvSfmO2Z099ixV2HXn+SDeHE=", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "HRmbBf3dUEBAfdC2xKaoWAGeM7Y=", "path": "github.com/aws/aws-sdk-go/service/glue", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "6JlxJoy1JCArNK2qBkaJ5IV6qBc=", "path": "github.com/aws/aws-sdk-go/service/guardduty", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "oZaxMqnwl2rA+V/W0tJ3uownORI=", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "dnNMSn5aHAtdOks+aWHLpwbi/VE=", "path": "github.com/aws/aws-sdk-go/service/inspector", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { - "checksumSHA1": "96OBMJ3R9BD402LJsUUA8a82/UY=", + "checksumSHA1": "pZwCI4DpP5hcMa/ItKhiwo/ukd0=", "path": "github.com/aws/aws-sdk-go/service/iot", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "IoSyRZhlL0petrB28nXk5jKM9YA=", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "oAFLgD0uJiVOZkFkL5dd/wUgBz4=", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "XDVse9fKF0RkAywzzgsO31AV4oc=", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "HluEcyZNywrbKnj/aR3tXbu29d8=", "path": "github.com/aws/aws-sdk-go/service/lexmodelbuildingservice", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "wjs9YBsHx0YQH0zKBA7Ibd1UV5Y=", "path": "github.com/aws/aws-sdk-go/service/lightsail", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "4VfB5vMLNYs0y6K159YCBgo9T3c=", "path": "github.com/aws/aws-sdk-go/service/mediaconvert", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "Ox3VWHYSQq0YKmlr0paUPdr5W/0=", "path": "github.com/aws/aws-sdk-go/service/medialive", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "Rs7QtkcLl3XNPnKb8ss/AhF2X50=", "path": "github.com/aws/aws-sdk-go/service/mediapackage", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "QjiIL8LrlhwrQw8FboF+wMNvUF0=", "path": "github.com/aws/aws-sdk-go/service/mediastore", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "ZY1SJNE03I6NL2OBJD9hlwVsqO0=", "path": "github.com/aws/aws-sdk-go/service/mediastoredata", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "ynB7Flcudp0VOqBVKZJ+23DtLHU=", "path": "github.com/aws/aws-sdk-go/service/mq", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "fpsBu+F79ktlLRwal1GugVMUDo0=", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "Iqkgx2nafQPV7fjw+uP35jtF6t4=", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "vP1FcccUZbuUlin7ME89w1GVJtA=", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "tKnVaKPOCiU6xl3/AYcdBCLtRdw=", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "sCaHoPWsJXRHFbilUKwN71qFTOI=", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "QZU8vR9cOIenYiH+Ywl4Gzfnlp0=", "path": "github.com/aws/aws-sdk-go/service/servicecatalog", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "dk6ebvA0EYgdPyc5HPKLBPEtsm4=", "path": "github.com/aws/aws-sdk-go/service/servicediscovery", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "Ex1Ma0SFGpqeNuPbeXZtsliZ3zo=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "maVXeR3WDAkONlzf04e4mDgCYxo=", "path": "github.com/aws/aws-sdk-go/service/sfn", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "ADoR4mlCW5usH8iOa6mPNSy49LM=", "path": "github.com/aws/aws-sdk-go/service/shield", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "B3CgAFSREebpsFoFOo4vrQ6u04w=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "FfY8w4DM8XIULdRnFhd3Um8Mj8c=", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "Wx189wAbIhWChx4kVbvsyqKMF4U=", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "ijz0rBDeR6JP/06S+97k84FRYxc=", "path": "github.com/aws/aws-sdk-go/service/ssm", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "W1oFtpaT4TWIIJrAvFcn/XdcT7g=", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "Uw4pOUxSMbx4xBHUcOUkNhtnywE=", "path": "github.com/aws/aws-sdk-go/service/swf", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "on6d7Hydx2bM9jkFOf1JZcZZgeY=", "path": "github.com/aws/aws-sdk-go/service/waf", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "rHqjsOndIR82gX5mSKybaRWf3UY=", "path": "github.com/aws/aws-sdk-go/service/wafregional", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "5HDSvmMW7F3xzPAzughe4dEn6RM=", "path": "github.com/aws/aws-sdk-go/service/workspaces", - "revision": "03f37be7b3c2cec3a839ff74dea774c63819d45d", - "revisionTime": "2017-12-19T22:11:17Z", - "version": "v1.12.50", - "versionExact": "v1.12.50" + "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", + "revisionTime": "2017-12-21T00:21:33Z", + "version": "v1.12.51", + "versionExact": "v1.12.51" }, { "checksumSHA1": "usT4LCSQItkFvFOQT7cBlkCuGaE=", From 158eb15aa08d8568c29b545cc22cf4044479d0ae Mon Sep 17 00:00:00 2001 From: Puneeth Nanjundaswamy Date: Thu, 21 Dec 2017 11:25:44 +0100 Subject: [PATCH 042/350] Fix FARGATE ECS task definition doc --- website/docs/r/ecs_task_definition.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/ecs_task_definition.html.markdown b/website/docs/r/ecs_task_definition.html.markdown index 628bc2191c4..84f419b1e1c 100644 --- a/website/docs/r/ecs_task_definition.html.markdown +++ b/website/docs/r/ecs_task_definition.html.markdown @@ -84,8 +84,8 @@ official [Developer Guide](https://docs.aws.amazon.com/AmazonECS/latest/develope * `network_mode` - (Optional) The Docker networking mode to use for the containers in the task. The valid values are `none`, `bridge`, `awsvpc`, and `host`. * `volume` - (Optional) A set of [volume blocks](#volume-block-arguments) that containers in your task may use. * `placement_constraints` - (Optional) A set of [placement constraints](#placement-constraints-arguments) rules that are taken into consideration during task placement. Maximum number of `placement_constraints` is `10`. -* `cpu` - (Optional) The number of cpu units used by the task. If the `launch_type` is `FARGATE` this field is required. -* `memory` - (Optional) The amount (in MiB) of memory used by the task. If the `launch_type` is `FARGATE` this field is required. +* `cpu` - (Optional) The number of cpu units used by the task. If the `requires_compatibilities` is `FARGATE` this field is required. +* `memory` - (Optional) The amount (in MiB) of memory used by the task. If the `requires_compatibilities` is `FARGATE` this field is required. * `requires_compatibilities` - (Optional) A set of launch types required by the task. The valid values are `EC2` and `FARGATE`. #### Volume Block Arguments From 63b287b4d32163a4909c8b7f866c2922a8651d43 Mon Sep 17 00:00:00 2001 From: Gauthier Wallet Date: Thu, 21 Dec 2017 16:13:22 +0100 Subject: [PATCH 043/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e8d0680230..7be2662871a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ ENHANCEMENTS: * resource/aws_kinesis_firehose_delivery_stream: Import is now supported [GH-2707] * resource/aws_cognito_user_pool: The ARN for the pool is now computed and exposed as an attribute [GH-2723] +* resource/aws_directory_service_directory: Add security_group_id field [GH-2688] BUG FIXES: From 811c4570eebc9cdcd0b287c773b09f758007d0db Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 21 Dec 2017 12:51:05 -0500 Subject: [PATCH 044/350] r/aws_sqs_queue_policy: Add StateChangeConf for create/update, remove missing policy error on read --- aws/resource_aws_sqs_queue_policy.go | 45 +++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_sqs_queue_policy.go b/aws/resource_aws_sqs_queue_policy.go index 2ec1f37821f..d81661288ad 100644 --- a/aws/resource_aws_sqs_queue_policy.go +++ b/aws/resource_aws_sqs_queue_policy.go @@ -3,10 +3,13 @@ package aws import ( "fmt" "log" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sqs" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" + "github.com/jen20/awspolicyequivalence" ) func resourceAwsSqsQueuePolicy() *schema.Resource { @@ -40,18 +43,51 @@ func resourceAwsSqsQueuePolicy() *schema.Resource { func resourceAwsSqsQueuePolicyUpsert(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).sqsconn + policy := d.Get("policy").(string) url := d.Get("queue_url").(string) _, err := conn.SetQueueAttributes(&sqs.SetQueueAttributesInput{ QueueUrl: aws.String(url), Attributes: aws.StringMap(map[string]string{ - "Policy": d.Get("policy").(string), + "Policy": policy, }), }) if err != nil { return fmt.Errorf("Error updating SQS attributes: %s", err) } + // https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SetQueueAttributes.html + // When you change a queue's attributes, the change can take up to 60 seconds + // for most of the attributes to propagate throughout the Amazon SQS system. + wait := resource.StateChangeConf{ + Pending: []string{""}, + Target: []string{"SQS queue policy updated"}, + Timeout: 1 * time.Minute, + MinTimeout: 1 * time.Second, + Refresh: func() (interface{}, string, error) { + out, err := conn.GetQueueAttributes(&sqs.GetQueueAttributesInput{ + QueueUrl: aws.String(url), + AttributeNames: []*string{aws.String("Policy")}, + }) + if err != nil { + return out, "", err + } + queuePolicy, ok := out.Attributes["Policy"] + if ok { + equivalent, err := awspolicy.PoliciesAreEquivalent(*queuePolicy, policy) + if err != nil || !equivalent { + return out, "", nil + } + return out, "SQS queue policy updated", nil + } + return out, "", nil + }, + } + _, err = wait.WaitForState() + if err != nil { + return err + } + d.SetId(url) return resourceAwsSqsQueuePolicyRead(d, meta) @@ -77,11 +113,12 @@ func resourceAwsSqsQueuePolicyRead(d *schema.ResourceData, meta interface{}) err } policy, ok := out.Attributes["Policy"] - if !ok { - return fmt.Errorf("SQS Queue policy not found for %s", d.Id()) + if ok { + d.Set("policy", policy) + } else { + d.Set("policy", "") } - d.Set("policy", policy) d.Set("queue_url", d.Id()) return nil From ca8f8245776e160f607137d0fe7e2f6d541636f2 Mon Sep 17 00:00:00 2001 From: VEBER Arnaud Date: Tue, 19 Dec 2017 10:37:41 +0100 Subject: [PATCH 045/350] feat(eu-west-3): update valid region list --- aws/config.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/config.go b/aws/config.go index 4782f76eafc..e516440ec9d 100644 --- a/aws/config.go +++ b/aws/config.go @@ -487,6 +487,7 @@ func (c *Config) ValidateRegion() error { "eu-central-1", "eu-west-1", "eu-west-2", + "eu-west-3", "sa-east-1", "us-east-1", "us-east-2", From f3b5e4fed15e3a735f021704b9dd0583d2d5b0c4 Mon Sep 17 00:00:00 2001 From: VEBER Arnaud Date: Tue, 19 Dec 2017 14:06:27 +0100 Subject: [PATCH 046/350] feat(eu-west-3): update cloudtrail service account region map --- aws/data_source_aws_cloudtrail_service_account.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/data_source_aws_cloudtrail_service_account.go b/aws/data_source_aws_cloudtrail_service_account.go index cbc503ec538..88818fcdc2d 100644 --- a/aws/data_source_aws_cloudtrail_service_account.go +++ b/aws/data_source_aws_cloudtrail_service_account.go @@ -21,6 +21,7 @@ var cloudTrailServiceAccountPerRegionMap = map[string]string{ "eu-central-1": "035351147821", "eu-west-1": "859597730677", "eu-west-2": "282025262664", + "eu-west-3": "262312530599", "sa-east-1": "814480443879", } From 350159b18f1625009d24e3490ee66d83fe519a13 Mon Sep 17 00:00:00 2001 From: VEBER Arnaud Date: Tue, 19 Dec 2017 10:40:11 +0100 Subject: [PATCH 047/350] feat(eu-west-3): update elb hosted zone id region map AWS doc not updated --- aws/data_source_aws_elb_hosted_zone_id.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/aws/data_source_aws_elb_hosted_zone_id.go b/aws/data_source_aws_elb_hosted_zone_id.go index ee75a27bf24..6bcf74bdf99 100644 --- a/aws/data_source_aws_elb_hosted_zone_id.go +++ b/aws/data_source_aws_elb_hosted_zone_id.go @@ -6,9 +6,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" ) -// See https://github.com/fog/fog-aws/pull/332/files -// This list isn't exposed by AWS - it's been found through -// trouble solving +// See http://docs.aws.amazon.com/general/latest/gr/rande.html#elb_region var elbHostedZoneIdPerRegionMap = map[string]string{ "ap-northeast-1": "Z14GRHDCWA56QT", "ap-northeast-2": "ZWKZPGTI48KDX", @@ -19,6 +17,7 @@ var elbHostedZoneIdPerRegionMap = map[string]string{ "eu-central-1": "Z215JYRZR1TBD5", "eu-west-1": "Z32O12XQLNTSW2", "eu-west-2": "ZHURV8PSTC4K8", + "eu-west-3": "Z3Q77PNBQS71R4", "us-east-1": "Z35SXDOTRQ7X7K", "us-east-2": "Z3AADJGX6KTTL2", "us-west-1": "Z368ELLRRE2KJ0", From 8d3f279d489f6753bba322fdcf965f4a89fc03d5 Mon Sep 17 00:00:00 2001 From: VEBER Arnaud Date: Tue, 19 Dec 2017 10:41:06 +0100 Subject: [PATCH 048/350] feat(eu-west-3): update elb account id region map --- aws/data_source_aws_elb_service_account.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/data_source_aws_elb_service_account.go b/aws/data_source_aws_elb_service_account.go index a3d6cdd7125..6034ee55c57 100644 --- a/aws/data_source_aws_elb_service_account.go +++ b/aws/data_source_aws_elb_service_account.go @@ -18,6 +18,7 @@ var elbAccountIdPerRegionMap = map[string]string{ "eu-central-1": "054676820928", "eu-west-1": "156460612806", "eu-west-2": "652711504416", + "eu-west-3": "009996457667", "sa-east-1": "507241528517", "us-east-1": "127311923021", "us-east-2": "033677994240", From 425edf2765ee6a323b029073b538673ebbb57513 Mon Sep 17 00:00:00 2001 From: VEBER Arnaud Date: Tue, 19 Dec 2017 10:41:37 +0100 Subject: [PATCH 049/350] feat(eu-west-3): update redshift service account region map --- aws/data_source_aws_redshift_service_account.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/data_source_aws_redshift_service_account.go b/aws/data_source_aws_redshift_service_account.go index ad6d6e68f1e..029f8fe029c 100644 --- a/aws/data_source_aws_redshift_service_account.go +++ b/aws/data_source_aws_redshift_service_account.go @@ -21,6 +21,7 @@ var redshiftServiceAccountPerRegionMap = map[string]string{ "eu-central-1": "053454850223", "eu-west-1": "210876761215", "eu-west-2": "307160386991", + "eu-west-3": "915173422425", "sa-east-1": "075028567923", } From ef9102d4fc14d40101c8cc935511a77b49f3b17b Mon Sep 17 00:00:00 2001 From: VEBER Arnaud Date: Tue, 19 Dec 2017 10:42:19 +0100 Subject: [PATCH 050/350] feat(eu-west-3): update hosted zones region map --- aws/hosted_zones.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/hosted_zones.go b/aws/hosted_zones.go index 131f03ebde3..9550e46d60b 100644 --- a/aws/hosted_zones.go +++ b/aws/hosted_zones.go @@ -10,6 +10,7 @@ var hostedZoneIDsMap = map[string]string{ "us-west-1": "Z2F56UZL2M1ACD", "eu-west-1": "Z1BKCTXD74EZPE", "eu-west-2": "Z3GKZC51ZF0DB4", + "eu-west-3": "Z3R1K369G5AVDG", "eu-central-1": "Z21DNDUVLTQW6Q", "ap-south-1": "Z11RGJOFQNVJUP", "ap-southeast-1": "Z3O0J2DXBE1FTB", From ecb8d81cdb6587dfc3752856fac451d1d529e92d Mon Sep 17 00:00:00 2001 From: VEBER Arnaud Date: Tue, 19 Dec 2017 17:52:42 +0100 Subject: [PATCH 051/350] test(eu-west-3): website endpoint url --- aws/website_endpoint_url_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/website_endpoint_url_test.go b/aws/website_endpoint_url_test.go index 67f6b35425c..24d2137b0e0 100644 --- a/aws/website_endpoint_url_test.go +++ b/aws/website_endpoint_url_test.go @@ -11,6 +11,7 @@ var websiteEndpoints = []struct { {"us-west-2", "bucket-name.s3-website-us-west-2.amazonaws.com"}, {"us-west-1", "bucket-name.s3-website-us-west-1.amazonaws.com"}, {"eu-west-1", "bucket-name.s3-website-eu-west-1.amazonaws.com"}, + {"eu-west-3", "bucket-name.s3-website.eu-west-3.amazonaws.com"}, {"eu-central-1", "bucket-name.s3-website.eu-central-1.amazonaws.com"}, {"ap-south-1", "bucket-name.s3-website.ap-south-1.amazonaws.com"}, {"ap-southeast-1", "bucket-name.s3-website-ap-southeast-1.amazonaws.com"}, From c7e748cff0995061b1ceacc444d9e92ece430d77 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Thu, 21 Dec 2017 13:22:20 -0600 Subject: [PATCH 052/350] Update CHANGELOG.md --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7be2662871a..7c6ae1e8e87 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 1.6.1 (Unreleased) +FEATURES: + +* **New Region**: `eu-west-3` is now supported [GH-2707] + ENHANCEMENTS: * resource/aws_kinesis_firehose_delivery_stream: Import is now supported [GH-2707] From 16029905d8f539234e13e148d38cb4d8cf71d7ad Mon Sep 17 00:00:00 2001 From: Felipe Philipp Date: Thu, 21 Dec 2017 20:46:14 +0100 Subject: [PATCH 053/350] Update formatting of networking_configuration A missing new line makes the rendered markdown to be on the same line --- website/docs/r/ecs_service.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/ecs_service.html.markdown b/website/docs/r/ecs_service.html.markdown index 26be0fa46f7..3c45212e596 100644 --- a/website/docs/r/ecs_service.html.markdown +++ b/website/docs/r/ecs_service.html.markdown @@ -98,6 +98,7 @@ Guide](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query- ## network_configuration `network_configuration` support the following: + * `subnets` - (Required) The subnets associated with the task or service. * `security_groups` - (Optional) The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. For more information, see [Task Networking](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) From b5cca8b75eac1dcf95f4759874fa1d6ee6ef38cb Mon Sep 17 00:00:00 2001 From: Puneeth Nanjundaswamy Date: Fri, 22 Dec 2017 01:35:40 +0100 Subject: [PATCH 054/350] Bump aws-sdk-go to v1.12.52 --- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 1 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/codebuild/api.go | 19 +- .../aws/aws-sdk-go/service/ec2/api.go | 40 +- vendor/vendor.json | 832 +++++++++--------- 5 files changed, 455 insertions(+), 439 deletions(-) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 25ca1927f57..4fc11726d89 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -513,6 +513,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 86443c16c79..87a281a6d16 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.12.51" +const SDKVersion = "1.12.52" diff --git a/vendor/github.com/aws/aws-sdk-go/service/codebuild/api.go b/vendor/github.com/aws/aws-sdk-go/service/codebuild/api.go index bc23c2408fb..ce54c7c525c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codebuild/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codebuild/api.go @@ -2307,6 +2307,9 @@ type EnvironmentImage struct { // The name of the Docker image. Name *string `locationName:"name" type:"string"` + + // A list of environment image versions. + Versions []*string `locationName:"versions" type:"list"` } // String returns the string representation @@ -2331,6 +2334,12 @@ func (s *EnvironmentImage) SetName(v string) *EnvironmentImage { return s } +// SetVersions sets the Versions field's value. +func (s *EnvironmentImage) SetVersions(v []*string) *EnvironmentImage { + s.Versions = v + return s +} + // A set of Docker images that are related by programming language and are managed // by AWS CodeBuild. // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/EnvironmentLanguage @@ -3044,10 +3053,7 @@ type Project struct { // The default is 60 minutes. TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" min:"5" type:"integer"` - // If your AWS CodeBuild project accesses resources in an Amazon VPC, you provide - // this parameter that identifies the VPC ID and the list of security group - // IDs and subnet IDs. The security groups and subnets must belong to the same - // VPC. You must provide at least one security group and one subnet ID. + // Information about the VPC configuration that AWS CodeBuild will access. VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` // Information about a webhook in GitHub that connects repository events to @@ -4220,10 +4226,7 @@ func (s *UpdateProjectOutput) SetProject(v *Project) *UpdateProjectOutput { return s } -// If your AWS CodeBuild project accesses resources in an Amazon VPC, you provide -// this parameter that identifies the VPC ID and the list of security group -// IDs and subnet IDs. The security groups and subnets must belong to the same -// VPC. You must provide at least one security group and one subnet ID. +// Information about the VPC configuration that AWS CodeBuild will access. // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/VpcConfig type VpcConfig struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 0cd970b02d9..8966289f14b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -20190,6 +20190,8 @@ func (c *EC2) ReplaceNetworkAclAssociationRequest(input *ReplaceNetworkAclAssoci // For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) // in the Amazon Virtual Private Cloud User Guide. // +// This is an idempotent operation. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -22652,6 +22654,9 @@ type Address struct { // The Elastic IP address. PublicIp *string `locationName:"publicIp" type:"string"` + + // Any tags assigned to the Elastic IP address. + Tags []*Tag `locationName:"tags" locationNameList:"item" type:"list"` } // String returns the string representation @@ -22712,6 +22717,12 @@ func (s *Address) SetPublicIp(v string) *Address { return s } +// SetTags sets the Tags field's value. +func (s *Address) SetTags(v []*Tag) *Address { + s.Tags = v + return s +} + // Contains the parameters for AllocateAddress. // See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AllocateAddressRequest type AllocateAddressInput struct { @@ -41336,14 +41347,14 @@ type DescribeVpcEndpointConnectionsInput struct { // One or more filters. // - // * customer-account-id - The AWS account number of the owner of the endpoint. + // * service-id - The ID of the service. // - // * endpoint-connection-state - The state of the endpoint (PendingAcceptance - // | Pending | Available | Deleting | Deleted | Rejected | Failed). + // * vpc-endpoint-owner - The AWS account number of the owner of the endpoint. // - // * vpc-endpoint-id - The ID of the endpoint. + // * vpc-endpoint-state - The state of the endpoint (pendingAcceptance | + // pending | available | deleting | deleted | rejected | failed). // - // * vpc-endpoint-service-id - The ID of the service. + // * vpc-endpoint-id - The ID of the endpoint. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return for the request in a single page. @@ -41437,12 +41448,12 @@ type DescribeVpcEndpointServiceConfigurationsInput struct { // One or more filters. // - // * service-name - The ARN of the service. + // * service-name - The name of the service. // - // * vpc-endpoint-service-id - The ID of the service. + // * service-id - The ID of the service. // - // * vpc-endpoint-service-state - The state of the service (Pending | Available - // | Deleting | Deleted | Failed). + // * service-state - The state of the service (Pending | Available | Deleting + // | Deleted | Failed). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return for the request in a single page. @@ -49577,10 +49588,11 @@ type IpPermission struct { // [EC2-VPC only] One or more IPv6 ranges. Ipv6Ranges []*Ipv6Range `locationName:"ipv6Ranges" locationNameList:"item" type:"list"` - // (Valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups - // only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress - // request, this is the AWS service that you want to access through a VPC endpoint - // from instances associated with the security group. + // (EC2-VPC only; valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress + // and DescribeSecurityGroups only) One or more prefix list IDs for an AWS service. + // In an AuthorizeSecurityGroupEgress request, this is the AWS service that + // you want to access through a VPC endpoint from instances associated with + // the security group. PrefixListIds []*PrefixListId `locationName:"prefixListIds" locationNameList:"item" type:"list"` // The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code. @@ -55613,7 +55625,7 @@ func (s *PrefixList) SetPrefixListName(v string) *PrefixList { return s } -// The ID of the prefix. +// [EC2-VPC only] The ID of the prefix. // See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PrefixListId type PrefixListId struct { _ struct{} `type:"structure"` diff --git a/vendor/vendor.json b/vendor/vendor.json index 8c54536d6fd..aeebe6dcb84 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -141,828 +141,828 @@ "revisionTime": "2017-07-27T15:54:43Z" }, { - "checksumSHA1": "IARbiOsFuZg1G6v9wnl0RWyBMkY=", + "checksumSHA1": "zrW2b0liD5UpOFil/Nj7wa7Sp9A=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "DtuTqKH29YnLjrIJkRYX0HQtXY0=", "path": "github.com/aws/aws-sdk-go/aws/arn", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "9nE/FjZ4pYrT883KtV2/aI+Gayo=", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "7/8j/q0TWtOgXyvEcv4B2Dhl00o=", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "Y+cPwQL0dZMyqp3wI+KJWmA9KQ8=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "OnU/n7R33oYXiB4SAGd5pK7I0Bs=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { - "checksumSHA1": "pkjsLFuKd1FMWJXFnsO1XX+JKPs=", + "checksumSHA1": "aOgB3+hNeX2svLhaX373ToSkhTg=", "path": "github.com/aws/aws-sdk-go/aws/endpoints", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "9GvAyILJ7g+VUg8Ef5DsT5GuYsg=", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "HcGL4e6Uep4/80eCUI5xkcWjpQ0=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "iU00ZjhAml/13g+1YXT21IqoXqg=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "04ypv4x12l4q0TksA1zEVsmgpvw=", "path": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "NStHCXEvYqG72GknZyv1jaKaeH0=", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "yHfT5DTbeCLs4NE2Rgnqrhe15ls=", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "9V1PvtFQ9MObZTc3sa86WcuOtOU=", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "pkeoOfZpHRvFG/AOZeTf0lwtsFg=", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "Rpu8KBtHZgvhkwHxUfaky+qW+G4=", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "0qYPUga28aQVkxZgBR3Z86AbGUQ=", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "vnYDXA1NxJ7Hu+DMfXNk1UnmkWg=", "path": "github.com/aws/aws-sdk-go/service/acm", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "DPl/OkvEUjrd+XKqX73l6nUNw3U=", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "X8tOI6i+RJwXIgg1qBjDNclyG/0=", "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "aDAaH6YiA50IrJ5Smfg0fovrniA=", "path": "github.com/aws/aws-sdk-go/service/appsync", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "oBXDw1zQTfxcKsK3ZjtKcS7gBLI=", "path": "github.com/aws/aws-sdk-go/service/athena", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "ITAwWyJp4t9AGfUXm9M3pFWTHVA=", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "Zz8qI6RloveM1zrXAglLxJZT1ZA=", "path": "github.com/aws/aws-sdk-go/service/batch", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "/nO06EpnD22+Ex80gHi4UYrAvKc=", "path": "github.com/aws/aws-sdk-go/service/budgets", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "6gM3CZZgiB0JvS7EK1c31Q8L09U=", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "T80IDetBz1hqJpq5Wqmx3MwCh8w=", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "bYrI9mxspB0xDFZEy3OIfWuez5g=", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "oB+M+kOmYG28V0PuI75IF6E+/w8=", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "Nc3vXlV7s309PprScYpRDPQWeDQ=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "bPh7NF3mLpGMV0rIakolMPHqMyw=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { - "checksumSHA1": "OqrWtx9iyIJ9roP2sEcmP9UCfXE=", + "checksumSHA1": "P6qyaFX9X6Nnvm3avLigjmjfYds=", "path": "github.com/aws/aws-sdk-go/service/codebuild", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "7nW1Ho2X3RcUU8FaFBhJIUeuDNw=", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "m19PZt1B51QCWo1jxSbII2zzL6Q=", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "LKw7fnNwq17Eqy0clzS/LK89vS4=", "path": "github.com/aws/aws-sdk-go/service/codepipeline", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "aXh1KIbNX+g+tH+lh3pk++9lm3k=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentity", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "IWi9xZz+OncotjM/vJ87Iffg2Qk=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentityprovider", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "56F6Stg8hQ1kxiAEzqB0TDctW9k=", "path": "github.com/aws/aws-sdk-go/service/configservice", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "hYCwLQdIjHj8rMHLGVyUVhecI4s=", "path": "github.com/aws/aws-sdk-go/service/databasemigrationservice", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "26CWoHQP/dyL2VzE5ZNd8zNzhko=", "path": "github.com/aws/aws-sdk-go/service/devicefarm", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "6g94rUHAgjcqMMTtMqKUbLU37wY=", "path": "github.com/aws/aws-sdk-go/service/directconnect", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "oFnS6I0u7KqnxK0/r1uoz8rTkxI=", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "0TXXUPjrbOCHpX555B6suH36Nnk=", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { - "checksumSHA1": "BOjSO1uO7Coj6o3oqpPUtEhQrPI=", + "checksumSHA1": "ygIRwuuaUwheg2sYJkChPRD2JME=", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "uEv9kkBsVIjg7K4+Y8TVlU0Cc8o=", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "sD9Urgwx7F3ImX+tJg2Q+ME/oFM=", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "eoM9nF5iVMbuGOmkY33d19aHt8Y=", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "dU5MPXUUOYD/E9sNncpFZ/U86Cw=", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "pj8mBWT3HE0Iid6HSmhw7lmyZDU=", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "VYGtTaSiajfKOVTbi9/SNmbiIac=", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "SZ7yLDZ6RvMhpWe0Goyem64kgyA=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "WYqHhdRNsiGGBLWlBLbOItZf+zA=", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "ae7VWg/xuXpnSD6wGumN44qEd+Q=", "path": "github.com/aws/aws-sdk-go/service/elbv2", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "NbkH6F+792jQ7BW4lGCb+vJVw58=", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "5btWHj2fZrPc/zfYdJLPaOcivxI=", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "oDoGvSfmO2Z099ixV2HXn+SDeHE=", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "HRmbBf3dUEBAfdC2xKaoWAGeM7Y=", "path": "github.com/aws/aws-sdk-go/service/glue", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "6JlxJoy1JCArNK2qBkaJ5IV6qBc=", "path": "github.com/aws/aws-sdk-go/service/guardduty", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "oZaxMqnwl2rA+V/W0tJ3uownORI=", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "dnNMSn5aHAtdOks+aWHLpwbi/VE=", "path": "github.com/aws/aws-sdk-go/service/inspector", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "pZwCI4DpP5hcMa/ItKhiwo/ukd0=", "path": "github.com/aws/aws-sdk-go/service/iot", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "IoSyRZhlL0petrB28nXk5jKM9YA=", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "oAFLgD0uJiVOZkFkL5dd/wUgBz4=", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "XDVse9fKF0RkAywzzgsO31AV4oc=", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "HluEcyZNywrbKnj/aR3tXbu29d8=", "path": "github.com/aws/aws-sdk-go/service/lexmodelbuildingservice", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "wjs9YBsHx0YQH0zKBA7Ibd1UV5Y=", "path": "github.com/aws/aws-sdk-go/service/lightsail", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "4VfB5vMLNYs0y6K159YCBgo9T3c=", "path": "github.com/aws/aws-sdk-go/service/mediaconvert", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "Ox3VWHYSQq0YKmlr0paUPdr5W/0=", "path": "github.com/aws/aws-sdk-go/service/medialive", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "Rs7QtkcLl3XNPnKb8ss/AhF2X50=", "path": "github.com/aws/aws-sdk-go/service/mediapackage", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "QjiIL8LrlhwrQw8FboF+wMNvUF0=", "path": "github.com/aws/aws-sdk-go/service/mediastore", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "ZY1SJNE03I6NL2OBJD9hlwVsqO0=", "path": "github.com/aws/aws-sdk-go/service/mediastoredata", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "ynB7Flcudp0VOqBVKZJ+23DtLHU=", "path": "github.com/aws/aws-sdk-go/service/mq", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "fpsBu+F79ktlLRwal1GugVMUDo0=", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "Iqkgx2nafQPV7fjw+uP35jtF6t4=", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "vP1FcccUZbuUlin7ME89w1GVJtA=", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "tKnVaKPOCiU6xl3/AYcdBCLtRdw=", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "sCaHoPWsJXRHFbilUKwN71qFTOI=", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "QZU8vR9cOIenYiH+Ywl4Gzfnlp0=", "path": "github.com/aws/aws-sdk-go/service/servicecatalog", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "dk6ebvA0EYgdPyc5HPKLBPEtsm4=", "path": "github.com/aws/aws-sdk-go/service/servicediscovery", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "Ex1Ma0SFGpqeNuPbeXZtsliZ3zo=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "maVXeR3WDAkONlzf04e4mDgCYxo=", "path": "github.com/aws/aws-sdk-go/service/sfn", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "ADoR4mlCW5usH8iOa6mPNSy49LM=", "path": "github.com/aws/aws-sdk-go/service/shield", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "B3CgAFSREebpsFoFOo4vrQ6u04w=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "FfY8w4DM8XIULdRnFhd3Um8Mj8c=", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "Wx189wAbIhWChx4kVbvsyqKMF4U=", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "ijz0rBDeR6JP/06S+97k84FRYxc=", "path": "github.com/aws/aws-sdk-go/service/ssm", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "W1oFtpaT4TWIIJrAvFcn/XdcT7g=", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "Uw4pOUxSMbx4xBHUcOUkNhtnywE=", "path": "github.com/aws/aws-sdk-go/service/swf", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "on6d7Hydx2bM9jkFOf1JZcZZgeY=", "path": "github.com/aws/aws-sdk-go/service/waf", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "rHqjsOndIR82gX5mSKybaRWf3UY=", "path": "github.com/aws/aws-sdk-go/service/wafregional", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "5HDSvmMW7F3xzPAzughe4dEn6RM=", "path": "github.com/aws/aws-sdk-go/service/workspaces", - "revision": "1cb9ce3dbddddab4a70e25ec1a5a60f1647b7e0b", - "revisionTime": "2017-12-21T00:21:33Z", - "version": "v1.12.51", - "versionExact": "v1.12.51" + "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", + "revisionTime": "2017-12-21T23:11:03Z", + "version": "v1.12.52", + "versionExact": "v1.12.52" }, { "checksumSHA1": "usT4LCSQItkFvFOQT7cBlkCuGaE=", From af666ee919fdf076bbdb7f8d8b56658e3baa82a9 Mon Sep 17 00:00:00 2001 From: Quentin Rousseau Date: Fri, 22 Dec 2017 05:28:45 -0800 Subject: [PATCH 055/350] Support Performance Insights for rds_cluster_instance (#2331) resource/aws_rds_cluster_instance: Support Performance Insights --- aws/resource_aws_rds_cluster_instance.go | 37 ++++++++ aws/resource_aws_rds_cluster_instance_test.go | 89 ++++++++++++++++++- .../docs/r/rds_cluster_instance.html.markdown | 4 + 3 files changed, 128 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_rds_cluster_instance.go b/aws/resource_aws_rds_cluster_instance.go index 9c4b580d067..fd1b2496bee 100644 --- a/aws/resource_aws_rds_cluster_instance.go +++ b/aws/resource_aws_rds_cluster_instance.go @@ -179,6 +179,19 @@ func resourceAwsRDSClusterInstance() *schema.Resource { Computed: true, }, + "performance_insights_enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "performance_insights_kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateArn, + }, + "tags": tagsSchema(), }, } @@ -224,6 +237,16 @@ func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{ createOpts.MonitoringRoleArn = aws.String(attr.(string)) } + if attr, _ := d.GetOk("engine"); attr == "aurora-postgresql" { + if attr, ok := d.GetOk("performance_insights_enabled"); ok { + createOpts.EnablePerformanceInsights = aws.Bool(attr.(bool)) + } + + if attr, ok := d.GetOk("performance_insights_kms_key_id"); ok { + createOpts.PerformanceInsightsKMSKeyId = aws.String(attr.(string)) + } + } + if attr, ok := d.GetOk("preferred_backup_window"); ok { createOpts.PreferredBackupWindow = aws.String(attr.(string)) } @@ -326,6 +349,8 @@ func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) d.Set("preferred_backup_window", db.PreferredBackupWindow) d.Set("preferred_maintenance_window", db.PreferredMaintenanceWindow) d.Set("availability_zone", db.AvailabilityZone) + d.Set("performance_insights_enabled", db.PerformanceInsightsEnabled) + d.Set("performance_insights_kms_key_id", db.PerformanceInsightsKMSKeyId) if db.MonitoringInterval != nil { d.Set("monitoring_interval", db.MonitoringInterval) @@ -377,6 +402,18 @@ func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{ requestUpdate = true } + if d.HasChange("performance_insights_enabled") { + d.SetPartial("performance_insights_enabled") + req.EnablePerformanceInsights = aws.Bool(d.Get("performance_insights_enabled").(bool)) + requestUpdate = true + } + + if d.HasChange("performance_insights_kms_key_id") { + d.SetPartial("performance_insights_kms_key_id") + req.PerformanceInsightsKMSKeyId = aws.String(d.Get("performance_insights_kms_key_id").(string)) + requestUpdate = true + } + if d.HasChange("preferred_backup_window") { d.SetPartial("preferred_backup_window") req.PreferredBackupWindow = aws.String(d.Get("preferred_backup_window").(string)) diff --git a/aws/resource_aws_rds_cluster_instance_test.go b/aws/resource_aws_rds_cluster_instance_test.go index 0dc51ef4c50..4815b4c60b8 100644 --- a/aws/resource_aws_rds_cluster_instance_test.go +++ b/aws/resource_aws_rds_cluster_instance_test.go @@ -138,8 +138,8 @@ func TestAccAWSRDSClusterInstance_disappears(t *testing.T) { func testAccCheckAWSDBClusterInstanceAttributes(v *rds.DBInstance) resource.TestCheckFunc { return func(s *terraform.State) error { - if *v.Engine != "aurora" { - return fmt.Errorf("bad engine, expected \"aurora\": %#v", *v.Engine) + if *v.Engine != "aurora" && *v.Engine != "aurora-postgresql" { + return fmt.Errorf("bad engine, expected \"aurora\" or \"aurora-postgresql\": %#v", *v.Engine) } if !strings.HasPrefix(*v.DBClusterIdentifier, "tf-aurora-cluster") { @@ -228,6 +228,30 @@ func TestAccAWSRDSClusterInstance_withInstanceEnhancedMonitor(t *testing.T) { }) } +func TestAccAWSRDSClusterInstance_withInstancePerformanceInsights(t *testing.T) { + var v rds.DBInstance + keyRegex := regexp.MustCompile("^arn:aws:kms:") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSClusterInstancePerformanceInsights(acctest.RandInt()), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v), + testAccCheckAWSDBClusterInstanceAttributes(&v), + resource.TestCheckResourceAttr( + "aws_rds_cluster_instance.cluster_instances", "performance_insights_enabled", "true"), + resource.TestMatchResourceAttr( + "aws_rds_cluster_instance.cluster_instances", "performance_insights_kms_key_id", keyRegex), + ), + }, + }, + }) +} + // Add some random to the name, to avoid collision func testAccAWSClusterInstanceConfig(n int) string { return fmt.Sprintf(` @@ -540,3 +564,64 @@ resource "aws_db_parameter_group" "bar" { } `, n, n, n, n, n, n) } + +func testAccAWSClusterInstancePerformanceInsights(n int) string { + return fmt.Sprintf(` + +resource "aws_kms_key" "foo" { + description = "Terraform acc test %d" + policy = < Date: Fri, 22 Dec 2017 14:29:23 +0100 Subject: [PATCH 056/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c6ae1e8e87..7ab2e97cbc2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ ENHANCEMENTS: * resource/aws_kinesis_firehose_delivery_stream: Import is now supported [GH-2707] * resource/aws_cognito_user_pool: The ARN for the pool is now computed and exposed as an attribute [GH-2723] * resource/aws_directory_service_directory: Add security_group_id field [GH-2688] +* resource/aws_rds_cluster_instance: Support Performance Insights [GH-2331] BUG FIXES: From 8372dd84e641d0f6be88123317ae2b48e054a849 Mon Sep 17 00:00:00 2001 From: Gauthier Wallet Date: Fri, 22 Dec 2017 14:44:42 +0100 Subject: [PATCH 057/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ab2e97cbc2..e6aa2aed166 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ ENHANCEMENTS: * resource/aws_cognito_user_pool: The ARN for the pool is now computed and exposed as an attribute [GH-2723] * resource/aws_directory_service_directory: Add security_group_id field [GH-2688] * resource/aws_rds_cluster_instance: Support Performance Insights [GH-2331] +* config: Allow API Gateway and Lambda endpoints configuration [GH-2641] BUG FIXES: From 8ef5c813ad939fb7150298563a709ea56320d68c Mon Sep 17 00:00:00 2001 From: Gauthier Wallet Date: Fri, 22 Dec 2017 17:49:47 +0100 Subject: [PATCH 058/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e6aa2aed166..fa51fcaabf0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ ENHANCEMENTS: * resource/aws_directory_service_directory: Add security_group_id field [GH-2688] * resource/aws_rds_cluster_instance: Support Performance Insights [GH-2331] * config: Allow API Gateway and Lambda endpoints configuration [GH-2641] +* r/aws_rds_cluster_instance: Set db_subnet_group_name in state on read if available [GH-2606] BUG FIXES: From 69e4a976b16ca108cb197210c240e650f35d7727 Mon Sep 17 00:00:00 2001 From: David Lovitch Date: Fri, 22 Dec 2017 14:35:30 -0800 Subject: [PATCH 059/350] Fixed two spelling mistakes in the S3 bucket documentation. --- website/docs/r/s3_bucket.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index cf7d514c26e..deda7034dc6 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -331,7 +331,7 @@ Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 buck the costs of any data transfer. See [Requester Pays Buckets](http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) developer guide for more information. * `replication_configuration` - (Optional) A configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) (documented below). -* `server_side_encryption_configuration` - (Optional) A confguration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) (documented blow) +* `server_side_encryption_configuration` - (Optional) A configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) (documented below) ~> **NOTE:** You cannot use `acceleration_status` in `cn-north-1` or `us-gov-west-1` From 0914c21eb1097d294b6c968ea54d762ce542099f Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Sun, 24 Dec 2017 09:58:37 +0100 Subject: [PATCH 060/350] Update CHANGELOG.md --- CHANGELOG.md | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa51fcaabf0..a5ab69a5fe4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,17 +1,14 @@ ## 1.6.1 (Unreleased) -FEATURES: - -* **New Region**: `eu-west-3` is now supported [GH-2707] - ENHANCEMENTS: +* provider: Allow API Gateway and Lambda endpoints configuration [GH-2641] +* provider: `eu-west-3` is now supported [GH-2707] * resource/aws_kinesis_firehose_delivery_stream: Import is now supported [GH-2707] * resource/aws_cognito_user_pool: The ARN for the pool is now computed and exposed as an attribute [GH-2723] * resource/aws_directory_service_directory: Add security_group_id field [GH-2688] * resource/aws_rds_cluster_instance: Support Performance Insights [GH-2331] -* config: Allow API Gateway and Lambda endpoints configuration [GH-2641] -* r/aws_rds_cluster_instance: Set db_subnet_group_name in state on read if available [GH-2606] +* resource/aws_rds_cluster_instance: Set db_subnet_group_name in state on read if available [GH-2606] BUG FIXES: From 0fa0484681cba615d3b657343ee67fae93939032 Mon Sep 17 00:00:00 2001 From: "xiaowei.wang" Date: Sun, 24 Dec 2017 11:44:48 +0100 Subject: [PATCH 061/350] r/aws_ecs_cluster: support import --- aws/resource_aws_ecs_cluster.go | 16 ++++++++++++++++ website/docs/r/ecs_cluster.html.markdown | 8 ++++++++ 2 files changed, 24 insertions(+) diff --git a/aws/resource_aws_ecs_cluster.go b/aws/resource_aws_ecs_cluster.go index 426e86fc80a..5b25b9e6aa2 100644 --- a/aws/resource_aws_ecs_cluster.go +++ b/aws/resource_aws_ecs_cluster.go @@ -6,6 +6,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ecs" "github.com/hashicorp/terraform/helper/resource" @@ -17,6 +18,9 @@ func resourceAwsEcsCluster() *schema.Resource { Create: resourceAwsEcsClusterCreate, Read: resourceAwsEcsClusterRead, Delete: resourceAwsEcsClusterDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsEcsClusterImport, + }, Schema: map[string]*schema.Schema{ "name": { @@ -33,6 +37,18 @@ func resourceAwsEcsCluster() *schema.Resource { } } +func resourceAwsEcsClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("name", d.Id()) + d.SetId(arn.ARN{ + Partition: meta.(*AWSClient).partition, + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Service: "ecs", + Resource: fmt.Sprintf("cluster/%s", d.Id()), + }.String()) + return []*schema.ResourceData{d}, nil +} + func resourceAwsEcsClusterCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ecsconn diff --git a/website/docs/r/ecs_cluster.html.markdown b/website/docs/r/ecs_cluster.html.markdown index 2366efdb675..cb65f57e176 100644 --- a/website/docs/r/ecs_cluster.html.markdown +++ b/website/docs/r/ecs_cluster.html.markdown @@ -30,3 +30,11 @@ The following additional attributes are exported: * `id` - The Amazon Resource Name (ARN) that identifies the cluster * `arn` - The Amazon Resource Name (ARN) that identifies the cluster + +## Import + +ECS clusters can be imported using the `name`, e.g. + +``` +$ terraform import aws_ecs_cluster.stateless stateless-app +``` \ No newline at end of file From a0890b0bf419295133cc414885a790a7a7398d11 Mon Sep 17 00:00:00 2001 From: Kentaro Terada Date: Sun, 24 Dec 2017 21:17:48 +0900 Subject: [PATCH 062/350] Fix Argument in Example Usage --- website/docs/d/rds_cluster.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/d/rds_cluster.html.markdown b/website/docs/d/rds_cluster.html.markdown index ef5ae76ffb0..2a65039f24e 100644 --- a/website/docs/d/rds_cluster.html.markdown +++ b/website/docs/d/rds_cluster.html.markdown @@ -14,7 +14,7 @@ Provides information about a RDS cluster. ```hcl data "aws_rds_cluster" "clusterName" { - name = "clusterName" + cluster_identifier = "clusterName" } ``` From e79d6984e190e18187b55f85df45d2b80b818adb Mon Sep 17 00:00:00 2001 From: "xiaowei.wang" Date: Sun, 24 Dec 2017 19:27:52 +0100 Subject: [PATCH 063/350] r/aws_instance: validate user data size during plan --- aws/resource_aws_instance.go | 1 + aws/validators.go | 9 +++++++++ aws/validators_test.go | 24 ++++++++++++++++++++++++ 3 files changed, 34 insertions(+) diff --git a/aws/resource_aws_instance.go b/aws/resource_aws_instance.go index 52572d232d1..03a874c1c52 100644 --- a/aws/resource_aws_instance.go +++ b/aws/resource_aws_instance.go @@ -116,6 +116,7 @@ func resourceAwsInstance() *schema.Resource { return "" } }, + ValidateFunc: validateInstanceUserDataSize, }, "user_data_base64": { diff --git a/aws/validators.go b/aws/validators.go index 8aa8b89aefc..c502925e31d 100644 --- a/aws/validators.go +++ b/aws/validators.go @@ -15,6 +15,15 @@ import ( "github.com/hashicorp/terraform/helper/schema" ) +func validateInstanceUserDataSize(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 16384 { + errors = append(errors, fmt.Errorf("%q cannot be longer than 16384 bytes", k)) + } + return +} + func validateRdsIdentifier(v interface{}, k string) (ws []string, errors []error) { value := v.(string) if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { diff --git a/aws/validators_test.go b/aws/validators_test.go index b3af78abe15..9337faacb44 100644 --- a/aws/validators_test.go +++ b/aws/validators_test.go @@ -9,6 +9,30 @@ import ( "github.com/aws/aws-sdk-go/service/s3" ) +func TestValidateInstanceUserDataSize(t *testing.T) { + validValues := []string{ + "#!/bin/bash", + "#!/bin/bash\n" + strings.Repeat("#", 16372), // = 16384 + } + + for _, s := range validValues { + _, errors := validateInstanceUserDataSize(s, "user_data") + if len(errors) > 0 { + t.Fatalf("%q should be valid user data with limited size: %v", s, errors) + } + } + + invalidValues := []string{ + "#!/bin/bash\n" + strings.Repeat("#", 16373), // = 16385 + } + + for _, s := range invalidValues { + _, errors := validateInstanceUserDataSize(s, "user_data") + if len(errors) == 0 { + t.Fatalf("%q should not be valid user data with limited size: %v", s, errors) + } + } +} func TestValidateEcrRepositoryName(t *testing.T) { validNames := []string{ "nginx-web-app", From d1132f9e23ccf1e0e64b6cb6004c115643675c62 Mon Sep 17 00:00:00 2001 From: VEBER Arnaud Date: Tue, 26 Dec 2017 10:37:33 +0100 Subject: [PATCH 064/350] chore(vendor): bump aws-sdk-go to v1.12.53 --- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/ec2/api.go | 2 +- .../aws/aws-sdk-go/service/ecs/api.go | 127 ++- .../aws/aws-sdk-go/service/ecs/doc.go | 2 +- .../aws/aws-sdk-go/service/inspector/api.go | 82 +- vendor/vendor.json | 832 +++++++++--------- 6 files changed, 596 insertions(+), 451 deletions(-) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 87a281a6d16..f71be27c6f6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.12.52" +const SDKVersion = "1.12.53" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 8966289f14b..4598ccd820a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -22656,7 +22656,7 @@ type Address struct { PublicIp *string `locationName:"publicIp" type:"string"` // Any tags assigned to the Elastic IP address. - Tags []*Tag `locationName:"tags" locationNameList:"item" type:"list"` + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go index 42ccc0ff69f..1b467b89303 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go @@ -2669,6 +2669,27 @@ func (c *ECS) RunTaskRequest(input *RunTaskInput) (req *request.Request, output // Alternatively, you can use StartTask to use your own scheduler or place tasks // manually on specific container instances. // +// The Amazon ECS API follows an eventual consistency model, due to the distributed +// nature of the system supporting the API. This means that the result of an +// API command you run that affects your Amazon ECS resources might not be immediately +// visible to all subsequent commands you run. You should keep this in mind +// when you carry out an API command that immediately follows a previous API +// command. +// +// To manage eventual consistency, you can do the following: +// +// * Confirm the state of the resource before you run a command to modify +// it. Run the DescribeTasks command using an exponential backoff algorithm +// to ensure that you allow enough time for the previous command to propagate +// through the system. To do this, run the DescribeTasks command repeatedly, +// starting with a couple of seconds of wait time, and increasing gradually +// up to five minutes of wait time. +// +// * Add wait time between subsequent commands, even if the DescribeTasks +// command returns an accurate response. Apply an exponential backoff algorithm +// starting with a couple of seconds of wait time, and increase gradually +// up to about five minutes of wait time. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4292,6 +4313,9 @@ type ContainerDefinition struct { // allow the container to only reserve 128 MiB of memory from the remaining // resources on the container instance, but also allow the container to consume // more memory resources when needed. + // + // The Docker daemon reserves a minimum of 4 MiB of memory for a container, + // so you should not specify fewer than 4 MiB of memory for your containers. MemoryReservation *int64 `locationName:"memoryReservation" type:"integer"` // The mount points for data volumes in your container. @@ -4991,6 +5015,17 @@ type CreateServiceInput struct { // DesiredCount is a required field DesiredCount *int64 `locationName:"desiredCount" type:"integer" required:"true"` + // The period of time, in seconds, that the Amazon ECS service scheduler should + // ignore unhealthy Elastic Load Balancing target health checks after a task + // has first started. This is only valid if your service is configured to use + // a load balancer. If your service's tasks take a while to start and respond + // to ELB health checks, you can specify a health check grace period of up to + // 1,800 seconds during which the ECS service scheduler will ignore ELB health + // check status. This grace period can prevent the ECS service scheduler from + // marking tasks as unhealthy and stopping them before they have time to come + // up. + HealthCheckGracePeriodSeconds *int64 `locationName:"healthCheckGracePeriodSeconds" type:"integer"` + // The launch type on which to run your service. LaunchType *string `locationName:"launchType" type:"string" enum:"LaunchType"` @@ -5129,6 +5164,12 @@ func (s *CreateServiceInput) SetDesiredCount(v int64) *CreateServiceInput { return s } +// SetHealthCheckGracePeriodSeconds sets the HealthCheckGracePeriodSeconds field's value. +func (s *CreateServiceInput) SetHealthCheckGracePeriodSeconds(v int64) *CreateServiceInput { + s.HealthCheckGracePeriodSeconds = &v + return s +} + // SetLaunchType sets the LaunchType field's value. func (s *CreateServiceInput) SetLaunchType(v string) *CreateServiceInput { s.LaunchType = &v @@ -5768,7 +5809,7 @@ type DescribeClustersInput struct { // // * runningEC2TasksCount // - // * RunningFargateTasksCount + // * runningFargateTasksCount // // * pendingEC2TasksCount // @@ -7810,9 +7851,9 @@ func (s *PlacementStrategy) SetType(v string) *PlacementStrategy { // to send or receive traffic. Port mappings are specified as part of the container // definition. // -// If using containers in a task with the Fargate launch type, exposed ports -// should be specified using containerPort. The hostPort can be left blank or -// it must be the same value as the containerPort. +// If using containers in a task with the awsvpc or host network mode, exposed +// ports should be specified using containerPort. The hostPort can be left blank +// or it must be the same value as the containerPort. // // After a task reaches the RUNNING status, manual and automatic host and container // port assignments are visible in the networkBindings section of DescribeTasks @@ -7824,11 +7865,11 @@ type PortMapping struct { // The port number on the container that is bound to the user-specified or automatically // assigned host port. // - // If using containers in a task with the Fargate launch type, exposed ports - // should be specified using containerPort. + // If using containers in a task with the awsvpc or host network mode, exposed + // ports should be specified using containerPort. // - // If using containers in a task with the EC2 launch type and you specify a - // container port and not a host port, your container automatically receives + // If using containers in a task with the bridge network mode and you specify + // a container port and not a host port, your container automatically receives // a host port in the ephemeral port range (for more information, see hostPort). // Port mappings that are automatically assigned in this way do not count toward // the 100 reserved ports limit of a container instance. @@ -7836,12 +7877,12 @@ type PortMapping struct { // The port number on the container instance to reserve for your container. // - // If using containers in a task with the Fargate launch type, the hostPort + // If using containers in a task with the awsvpc or host network mode, the hostPort // can either be left blank or needs to be the same value as the containerPort. // - // If using containers in a task with the EC2 launch type, you can specify a - // non-reserved host port for your container port mapping, or you can omit the - // hostPort (or set it to 0) while specifying a containerPort and your container + // If using containers in a task with the bridge network mode, you can specify + // a non-reserved host port for your container port mapping, or you can omit + // the hostPort (or set it to 0) while specifying a containerPort and your container // automatically receives a port in the ephemeral port range for your container // instance operating system and Docker version. // @@ -8123,11 +8164,16 @@ type RegisterTaskDefinitionInput struct { ContainerDefinitions []*ContainerDefinition `locationName:"containerDefinitions" type:"list" required:"true"` // The number of cpu units used by the task. If using the EC2 launch type, this - // field is optional and any value can be used. If you are using the Fargate - // launch type, this field is required and you must use one of the following - // values, which determines your range of valid values for the memory parameter: + // field is optional and any value can be used. + // + // Task-level CPU and memory parameters are ignored for Windows containers. + // We recommend specifying container-level resources for Windows containers. + // + // If you are using the Fargate launch type, this field is required and you + // must use one of the following values, which determines your range of valid + // values for the memory parameter: // - // * 256 (.25 vCPU) - Available memory values: 512MB, 1GB, 2GB + // * 256 (.25 vCPU) - Available memory values: 0.5GB, 1GB, 2GB // // * 512 (.5 vCPU) - Available memory values: 1GB, 2GB, 3GB, 4GB // @@ -8154,11 +8200,16 @@ type RegisterTaskDefinitionInput struct { Family *string `locationName:"family" type:"string" required:"true"` // The amount (in MiB) of memory used by the task. If using the EC2 launch type, - // this field is optional and any value can be used. If you are using the Fargate - // launch type, this field is required and you must use one of the following - // values, which determines your range of valid values for the cpu parameter: + // this field is optional and any value can be used. // - // * 512MB, 1GB, 2GB - Available cpu values: 256 (.25 vCPU) + // Task-level CPU and memory parameters are ignored for Windows containers. + // We recommend specifying container-level resources for Windows containers. + // + // If you are using the Fargate launch type, this field is required and you + // must use one of the following values, which determines your range of valid + // values for the cpu parameter: + // + // * 0.5GB, 1GB, 2GB - Available cpu values: 256 (.25 vCPU) // // * 1GB, 2GB, 3GB, 4GB - Available cpu values: 512 (.5 vCPU) // @@ -8642,6 +8693,11 @@ type Service struct { // are displayed. Events []*ServiceEvent `locationName:"events" type:"list"` + // The period of time, in seconds, that the Amazon ECS service scheduler ignores + // unhealthy Elastic Load Balancing target health checks after a task has first + // started. + HealthCheckGracePeriodSeconds *int64 `locationName:"healthCheckGracePeriodSeconds" type:"integer"` + // The launch type on which your service is running. LaunchType *string `locationName:"launchType" type:"string" enum:"LaunchType"` @@ -8742,6 +8798,12 @@ func (s *Service) SetEvents(v []*ServiceEvent) *Service { return s } +// SetHealthCheckGracePeriodSeconds sets the HealthCheckGracePeriodSeconds field's value. +func (s *Service) SetHealthCheckGracePeriodSeconds(v int64) *Service { + s.HealthCheckGracePeriodSeconds = &v + return s +} + // SetLaunchType sets the LaunchType field's value. func (s *Service) SetLaunchType(v string) *Service { s.LaunchType = &v @@ -9383,7 +9445,7 @@ type Task struct { // type, this field is required and you must use one of the following values, // which determines your range of valid values for the memory parameter: // - // * 256 (.25 vCPU) - Available memory values: 512MB, 1GB, 2GB + // * 256 (.25 vCPU) - Available memory values: 0.5GB, 1GB, 2GB // // * 512 (.5 vCPU) - Available memory values: 1GB, 2GB, 3GB, 4GB // @@ -9421,7 +9483,7 @@ type Task struct { // type, this field is required and you must use one of the following values, // which determines your range of valid values for the cpu parameter: // - // * 512MB, 1GB, 2GB - Available cpu values: 256 (.25 vCPU) + // * 0.5GB, 1GB, 2GB - Available cpu values: 256 (.25 vCPU) // // * 1GB, 2GB, 3GB, 4GB - Available cpu values: 512 (.5 vCPU) // @@ -9670,7 +9732,7 @@ type TaskDefinition struct { // type, this field is required and you must use one of the following values, // which determines your range of valid values for the memory parameter: // - // * 256 (.25 vCPU) - Available memory values: 512MB, 1GB, 2GB + // * 256 (.25 vCPU) - Available memory values: 0.5GB, 1GB, 2GB // // * 512 (.5 vCPU) - Available memory values: 1GB, 2GB, 3GB, 4GB // @@ -9696,7 +9758,7 @@ type TaskDefinition struct { // type, this field is required and you must use one of the following values, // which determines your range of valid values for the cpu parameter: // - // * 512MB, 1GB, 2GB - Available cpu values: 256 (.25 vCPU) + // * 0.5GB, 1GB, 2GB - Available cpu values: 256 (.25 vCPU) // // * 1GB, 2GB, 3GB, 4GB - Available cpu values: 512 (.5 vCPU) // @@ -10241,6 +10303,17 @@ type UpdateServiceInput struct { // Whether or not to force a new deployment of the service. ForceNewDeployment *bool `locationName:"forceNewDeployment" type:"boolean"` + // The period of time, in seconds, that the Amazon ECS service scheduler should + // ignore unhealthy Elastic Load Balancing target health checks after a task + // has first started. This is only valid if your service is configured to use + // a load balancer. If your service's tasks take a while to start and respond + // to ELB health checks, you can specify a health check grace period of up to + // 1,800 seconds during which the ECS service scheduler will ignore ELB health + // check status. This grace period can prevent the ECS service scheduler from + // marking tasks as unhealthy and stopping them before they have time to come + // up. + HealthCheckGracePeriodSeconds *int64 `locationName:"healthCheckGracePeriodSeconds" type:"integer"` + // The network configuration for the service. This parameter is required for // task definitions that use the awsvpc network mode to receive their own Elastic // Network Interface, and it is not supported for other network modes. For more @@ -10321,6 +10394,12 @@ func (s *UpdateServiceInput) SetForceNewDeployment(v bool) *UpdateServiceInput { return s } +// SetHealthCheckGracePeriodSeconds sets the HealthCheckGracePeriodSeconds field's value. +func (s *UpdateServiceInput) SetHealthCheckGracePeriodSeconds(v int64) *UpdateServiceInput { + s.HealthCheckGracePeriodSeconds = &v + return s +} + // SetNetworkConfiguration sets the NetworkConfiguration field's value. func (s *UpdateServiceInput) SetNetworkConfiguration(v *NetworkConfiguration) *UpdateServiceInput { s.NetworkConfiguration = v diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/doc.go index a78e75e86f7..1d59f5b640e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecs/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/doc.go @@ -10,7 +10,7 @@ // tasks using the Fargate launch type. For more control, you can host your // tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) instances // that you manage by using the EC2 launch type. For more information about -// launch types, see Amazon ECS Launch Types (http://docs.aws.amazon.com/AmazonECS/latest/developerguidelaunch_types.html). +// launch types, see Amazon ECS Launch Types (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html). // // Amazon ECS lets you launch and stop container-based applications with simple // API calls, allows you to get the state of your cluster from a centralized diff --git a/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go b/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go index 7097f2fe681..f9ec51dd5c0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go @@ -149,9 +149,12 @@ func (c *Inspector) CreateAssessmentTargetRequest(input *CreateAssessmentTargetI // CreateAssessmentTarget API operation for Amazon Inspector. // // Creates a new assessment target using the ARN of the resource group that -// is generated by CreateResourceGroup. You can create up to 50 assessment targets -// per AWS account. You can run up to 500 concurrent agents per AWS account. -// For more information, see Amazon Inspector Assessment Targets (http://docs.aws.amazon.com/inspector/latest/userguide/inspector_applications.html). +// is generated by CreateResourceGroup. If the service-linked role (https://docs.aws.amazon.com/inspector/latest/userguide/inspector_slr.html) +// isn’t already registered, also creates and registers a service-linked role +// to grant Amazon Inspector access to AWS Services needed to perform security +// assessments. You can create up to 50 assessment targets per AWS account. +// You can run up to 500 concurrent agents per AWS account. For more information, +// see Amazon Inspector Assessment Targets (http://docs.aws.amazon.com/inspector/latest/userguide/inspector_applications.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -246,7 +249,10 @@ func (c *Inspector) CreateAssessmentTemplateRequest(input *CreateAssessmentTempl // CreateAssessmentTemplate API operation for Amazon Inspector. // // Creates an assessment template for the assessment target that is specified -// by the ARN of the assessment target. +// by the ARN of the assessment target. If the service-linked role (https://docs.aws.amazon.com/inspector/latest/userguide/inspector_slr.html) +// isn’t already registered, also creates and registers a service-linked role +// to grant Amazon Inspector access to AWS Services needed to perform security +// assessments. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2764,8 +2770,8 @@ func (c *Inspector) RegisterCrossAccountAccessRoleRequest(input *RegisterCrossAc // RegisterCrossAccountAccessRole API operation for Amazon Inspector. // -// Registers the IAM role that Amazon Inspector uses to list your EC2 instances -// at the start of the assessment run or when you call the PreviewAgents action. +// Registers the IAM role that grants Amazon Inspector access to AWS Services +// needed to perform security assessments. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3670,13 +3676,34 @@ func (s *AgentFilter) SetAgentHealths(v []*string) *AgentFilter { type AgentPreview struct { _ struct{} `type:"structure"` + // The health status of the Amazon Inspector Agent. + AgentHealth *string `locationName:"agentHealth" type:"string" enum:"AgentHealth"` + // The ID of the EC2 instance where the agent is installed. // // AgentId is a required field AgentId *string `locationName:"agentId" min:"1" type:"string" required:"true"` + // The version of the Amazon Inspector Agent. + AgentVersion *string `locationName:"agentVersion" min:"1" type:"string"` + // The Auto Scaling group for the EC2 instance where the agent is installed. AutoScalingGroup *string `locationName:"autoScalingGroup" min:"1" type:"string"` + + // The hostname of the EC2 instance on which the Amazon Inspector Agent is installed. + Hostname *string `locationName:"hostname" type:"string"` + + // The IP address of the EC2 instance on which the Amazon Inspector Agent is + // installed. + Ipv4Address *string `locationName:"ipv4Address" min:"7" type:"string"` + + // The kernel version of the operating system running on the EC2 instance on + // which the Amazon Inspector Agent is installed. + KernelVersion *string `locationName:"kernelVersion" min:"1" type:"string"` + + // The operating system running on the EC2 instance on which the Amazon Inspector + // Agent is installed. + OperatingSystem *string `locationName:"operatingSystem" min:"1" type:"string"` } // String returns the string representation @@ -3689,18 +3716,54 @@ func (s AgentPreview) GoString() string { return s.String() } +// SetAgentHealth sets the AgentHealth field's value. +func (s *AgentPreview) SetAgentHealth(v string) *AgentPreview { + s.AgentHealth = &v + return s +} + // SetAgentId sets the AgentId field's value. func (s *AgentPreview) SetAgentId(v string) *AgentPreview { s.AgentId = &v return s } +// SetAgentVersion sets the AgentVersion field's value. +func (s *AgentPreview) SetAgentVersion(v string) *AgentPreview { + s.AgentVersion = &v + return s +} + // SetAutoScalingGroup sets the AutoScalingGroup field's value. func (s *AgentPreview) SetAutoScalingGroup(v string) *AgentPreview { s.AutoScalingGroup = &v return s } +// SetHostname sets the Hostname field's value. +func (s *AgentPreview) SetHostname(v string) *AgentPreview { + s.Hostname = &v + return s +} + +// SetIpv4Address sets the Ipv4Address field's value. +func (s *AgentPreview) SetIpv4Address(v string) *AgentPreview { + s.Ipv4Address = &v + return s +} + +// SetKernelVersion sets the KernelVersion field's value. +func (s *AgentPreview) SetKernelVersion(v string) *AgentPreview { + s.KernelVersion = &v + return s +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *AgentPreview) SetOperatingSystem(v string) *AgentPreview { + s.OperatingSystem = &v + return s +} + // A snapshot of an Amazon Inspector assessment run that contains the findings // of the assessment run . // @@ -7234,8 +7297,8 @@ func (s *PreviewAgentsOutput) SetNextToken(v string) *PreviewAgentsOutput { type RegisterCrossAccountAccessRoleInput struct { _ struct{} `type:"structure"` - // The ARN of the IAM role that Amazon Inspector uses to list your EC2 instances - // during the assessment run or when you call the PreviewAgents action. + // The ARN of the IAM role that grants Amazon Inspector access to AWS Services + // needed to perform security assessments. // // RoleArn is a required field RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` @@ -8317,6 +8380,9 @@ const ( // AgentHealthUnhealthy is a AgentHealth enum value AgentHealthUnhealthy = "UNHEALTHY" + + // AgentHealthUnknown is a AgentHealth enum value + AgentHealthUnknown = "UNKNOWN" ) const ( diff --git a/vendor/vendor.json b/vendor/vendor.json index aeebe6dcb84..5b75513c61c 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -141,828 +141,828 @@ "revisionTime": "2017-07-27T15:54:43Z" }, { - "checksumSHA1": "zrW2b0liD5UpOFil/Nj7wa7Sp9A=", + "checksumSHA1": "rRZbZbRFXiRN25up38799OeziZw=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "DtuTqKH29YnLjrIJkRYX0HQtXY0=", "path": "github.com/aws/aws-sdk-go/aws/arn", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "9nE/FjZ4pYrT883KtV2/aI+Gayo=", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "7/8j/q0TWtOgXyvEcv4B2Dhl00o=", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "Y+cPwQL0dZMyqp3wI+KJWmA9KQ8=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "OnU/n7R33oYXiB4SAGd5pK7I0Bs=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "aOgB3+hNeX2svLhaX373ToSkhTg=", "path": "github.com/aws/aws-sdk-go/aws/endpoints", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "9GvAyILJ7g+VUg8Ef5DsT5GuYsg=", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "HcGL4e6Uep4/80eCUI5xkcWjpQ0=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "iU00ZjhAml/13g+1YXT21IqoXqg=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "04ypv4x12l4q0TksA1zEVsmgpvw=", "path": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "NStHCXEvYqG72GknZyv1jaKaeH0=", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "yHfT5DTbeCLs4NE2Rgnqrhe15ls=", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "9V1PvtFQ9MObZTc3sa86WcuOtOU=", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "pkeoOfZpHRvFG/AOZeTf0lwtsFg=", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "Rpu8KBtHZgvhkwHxUfaky+qW+G4=", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "0qYPUga28aQVkxZgBR3Z86AbGUQ=", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "vnYDXA1NxJ7Hu+DMfXNk1UnmkWg=", "path": "github.com/aws/aws-sdk-go/service/acm", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "DPl/OkvEUjrd+XKqX73l6nUNw3U=", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "X8tOI6i+RJwXIgg1qBjDNclyG/0=", "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "aDAaH6YiA50IrJ5Smfg0fovrniA=", "path": "github.com/aws/aws-sdk-go/service/appsync", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "oBXDw1zQTfxcKsK3ZjtKcS7gBLI=", "path": "github.com/aws/aws-sdk-go/service/athena", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "ITAwWyJp4t9AGfUXm9M3pFWTHVA=", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "Zz8qI6RloveM1zrXAglLxJZT1ZA=", "path": "github.com/aws/aws-sdk-go/service/batch", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "/nO06EpnD22+Ex80gHi4UYrAvKc=", "path": "github.com/aws/aws-sdk-go/service/budgets", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "6gM3CZZgiB0JvS7EK1c31Q8L09U=", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "T80IDetBz1hqJpq5Wqmx3MwCh8w=", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "bYrI9mxspB0xDFZEy3OIfWuez5g=", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "oB+M+kOmYG28V0PuI75IF6E+/w8=", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "Nc3vXlV7s309PprScYpRDPQWeDQ=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "bPh7NF3mLpGMV0rIakolMPHqMyw=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "P6qyaFX9X6Nnvm3avLigjmjfYds=", "path": "github.com/aws/aws-sdk-go/service/codebuild", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "7nW1Ho2X3RcUU8FaFBhJIUeuDNw=", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "m19PZt1B51QCWo1jxSbII2zzL6Q=", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "LKw7fnNwq17Eqy0clzS/LK89vS4=", "path": "github.com/aws/aws-sdk-go/service/codepipeline", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "aXh1KIbNX+g+tH+lh3pk++9lm3k=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentity", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "IWi9xZz+OncotjM/vJ87Iffg2Qk=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentityprovider", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "56F6Stg8hQ1kxiAEzqB0TDctW9k=", "path": "github.com/aws/aws-sdk-go/service/configservice", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "hYCwLQdIjHj8rMHLGVyUVhecI4s=", "path": "github.com/aws/aws-sdk-go/service/databasemigrationservice", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "26CWoHQP/dyL2VzE5ZNd8zNzhko=", "path": "github.com/aws/aws-sdk-go/service/devicefarm", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "6g94rUHAgjcqMMTtMqKUbLU37wY=", "path": "github.com/aws/aws-sdk-go/service/directconnect", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "oFnS6I0u7KqnxK0/r1uoz8rTkxI=", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "0TXXUPjrbOCHpX555B6suH36Nnk=", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { - "checksumSHA1": "ygIRwuuaUwheg2sYJkChPRD2JME=", + "checksumSHA1": "INaeHZ2L5x6RlrcQBm4q1hFqNRM=", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "uEv9kkBsVIjg7K4+Y8TVlU0Cc8o=", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { - "checksumSHA1": "sD9Urgwx7F3ImX+tJg2Q+ME/oFM=", + "checksumSHA1": "3B3RtWG7IY9qhFhWGEwroeMxnPI=", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "eoM9nF5iVMbuGOmkY33d19aHt8Y=", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "dU5MPXUUOYD/E9sNncpFZ/U86Cw=", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "pj8mBWT3HE0Iid6HSmhw7lmyZDU=", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "VYGtTaSiajfKOVTbi9/SNmbiIac=", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "SZ7yLDZ6RvMhpWe0Goyem64kgyA=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "WYqHhdRNsiGGBLWlBLbOItZf+zA=", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "ae7VWg/xuXpnSD6wGumN44qEd+Q=", "path": "github.com/aws/aws-sdk-go/service/elbv2", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "NbkH6F+792jQ7BW4lGCb+vJVw58=", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "5btWHj2fZrPc/zfYdJLPaOcivxI=", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "oDoGvSfmO2Z099ixV2HXn+SDeHE=", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "HRmbBf3dUEBAfdC2xKaoWAGeM7Y=", "path": "github.com/aws/aws-sdk-go/service/glue", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "6JlxJoy1JCArNK2qBkaJ5IV6qBc=", "path": "github.com/aws/aws-sdk-go/service/guardduty", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "oZaxMqnwl2rA+V/W0tJ3uownORI=", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { - "checksumSHA1": "dnNMSn5aHAtdOks+aWHLpwbi/VE=", + "checksumSHA1": "Pg4c7tUVP15Ry9uPA3qixJXSd4I=", "path": "github.com/aws/aws-sdk-go/service/inspector", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "pZwCI4DpP5hcMa/ItKhiwo/ukd0=", "path": "github.com/aws/aws-sdk-go/service/iot", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "IoSyRZhlL0petrB28nXk5jKM9YA=", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "oAFLgD0uJiVOZkFkL5dd/wUgBz4=", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "XDVse9fKF0RkAywzzgsO31AV4oc=", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "HluEcyZNywrbKnj/aR3tXbu29d8=", "path": "github.com/aws/aws-sdk-go/service/lexmodelbuildingservice", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "wjs9YBsHx0YQH0zKBA7Ibd1UV5Y=", "path": "github.com/aws/aws-sdk-go/service/lightsail", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "4VfB5vMLNYs0y6K159YCBgo9T3c=", "path": "github.com/aws/aws-sdk-go/service/mediaconvert", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "Ox3VWHYSQq0YKmlr0paUPdr5W/0=", "path": "github.com/aws/aws-sdk-go/service/medialive", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "Rs7QtkcLl3XNPnKb8ss/AhF2X50=", "path": "github.com/aws/aws-sdk-go/service/mediapackage", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "QjiIL8LrlhwrQw8FboF+wMNvUF0=", "path": "github.com/aws/aws-sdk-go/service/mediastore", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "ZY1SJNE03I6NL2OBJD9hlwVsqO0=", "path": "github.com/aws/aws-sdk-go/service/mediastoredata", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "ynB7Flcudp0VOqBVKZJ+23DtLHU=", "path": "github.com/aws/aws-sdk-go/service/mq", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "fpsBu+F79ktlLRwal1GugVMUDo0=", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "Iqkgx2nafQPV7fjw+uP35jtF6t4=", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "vP1FcccUZbuUlin7ME89w1GVJtA=", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "tKnVaKPOCiU6xl3/AYcdBCLtRdw=", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "sCaHoPWsJXRHFbilUKwN71qFTOI=", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "QZU8vR9cOIenYiH+Ywl4Gzfnlp0=", "path": "github.com/aws/aws-sdk-go/service/servicecatalog", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "dk6ebvA0EYgdPyc5HPKLBPEtsm4=", "path": "github.com/aws/aws-sdk-go/service/servicediscovery", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "Ex1Ma0SFGpqeNuPbeXZtsliZ3zo=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "maVXeR3WDAkONlzf04e4mDgCYxo=", "path": "github.com/aws/aws-sdk-go/service/sfn", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "ADoR4mlCW5usH8iOa6mPNSy49LM=", "path": "github.com/aws/aws-sdk-go/service/shield", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "B3CgAFSREebpsFoFOo4vrQ6u04w=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "FfY8w4DM8XIULdRnFhd3Um8Mj8c=", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "Wx189wAbIhWChx4kVbvsyqKMF4U=", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "ijz0rBDeR6JP/06S+97k84FRYxc=", "path": "github.com/aws/aws-sdk-go/service/ssm", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "W1oFtpaT4TWIIJrAvFcn/XdcT7g=", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "Uw4pOUxSMbx4xBHUcOUkNhtnywE=", "path": "github.com/aws/aws-sdk-go/service/swf", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "on6d7Hydx2bM9jkFOf1JZcZZgeY=", "path": "github.com/aws/aws-sdk-go/service/waf", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "rHqjsOndIR82gX5mSKybaRWf3UY=", "path": "github.com/aws/aws-sdk-go/service/wafregional", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "5HDSvmMW7F3xzPAzughe4dEn6RM=", "path": "github.com/aws/aws-sdk-go/service/workspaces", - "revision": "32d0e45c3f93cd20c25614183246d7e34bc7385c", - "revisionTime": "2017-12-21T23:11:03Z", - "version": "v1.12.52", - "versionExact": "v1.12.52" + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" }, { "checksumSHA1": "usT4LCSQItkFvFOQT7cBlkCuGaE=", From 0e4b88b44c4482fe80952c830a749f1f78b40b97 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 26 Dec 2017 09:08:39 -0500 Subject: [PATCH 065/350] r/aws_eip: Support tags --- aws/resource_aws_eip.go | 17 +++++++++++ aws/resource_aws_eip_test.go | 48 ++++++++++++++++++++++++++++++++ website/docs/r/eip.html.markdown | 4 +-- 3 files changed, 67 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_eip.go b/aws/resource_aws_eip.go index c85c0655c96..67c99a909b2 100644 --- a/aws/resource_aws_eip.go +++ b/aws/resource_aws_eip.go @@ -73,6 +73,8 @@ func resourceAwsEip() *schema.Resource { Type: schema.TypeString, Optional: true, }, + + "tags": tagsSchema(), }, } } @@ -111,6 +113,13 @@ func resourceAwsEipCreate(d *schema.ResourceData, meta interface{}) error { } log.Printf("[INFO] EIP ID: %s (domain: %v)", d.Id(), *allocResp.Domain) + + if _, ok := d.GetOk("tags"); ok { + if err := setTags(ec2conn, d); err != nil { + return fmt.Errorf("Error creating EIP tags: %s", err) + } + } + return resourceAwsEipUpdate(d, meta) } @@ -206,6 +215,8 @@ func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { d.SetId(*address.AllocationId) } + d.Set("tags", tagsToMap(address.Tags)) + return nil } @@ -270,6 +281,12 @@ func resourceAwsEipUpdate(d *schema.ResourceData, meta interface{}) error { } } + if _, ok := d.GetOk("tags"); ok { + if err := setTags(ec2conn, d); err != nil { + return fmt.Errorf("Error updating EIP tags: %s", err) + } + } + return resourceAwsEipRead(d, meta) } diff --git a/aws/resource_aws_eip_test.go b/aws/resource_aws_eip_test.go index 053f9432f75..9f65f31bceb 100644 --- a/aws/resource_aws_eip_test.go +++ b/aws/resource_aws_eip_test.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) @@ -265,6 +266,42 @@ func TestAccAWSEIPAssociate_not_associated(t *testing.T) { }) } +func TestAccAWSEIP_tags(t *testing.T) { + var conf ec2.Address + resourceName := "aws_eip.bar" + rName1 := fmt.Sprintf("%s-%d", t.Name(), acctest.RandInt()) + rName2 := fmt.Sprintf("%s-%d", t.Name(), acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_eip.bar", + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEIPDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSEIPConfig_tags(rName1, t.Name()), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEIPExists(resourceName, &conf), + testAccCheckAWSEIPAttributes(&conf), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.RandomName", rName1), + resource.TestCheckResourceAttr(resourceName, "tags.TestName", t.Name()), + ), + }, + resource.TestStep{ + Config: testAccAWSEIPConfig_tags(rName2, t.Name()), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEIPExists(resourceName, &conf), + testAccCheckAWSEIPAttributes(&conf), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.RandomName", rName2), + resource.TestCheckResourceAttr(resourceName, "tags.TestName", t.Name()), + ), + }, + }, + }) +} + func testAccCheckAWSEIPDisappears(v *ec2.Address) resource.TestCheckFunc { return func(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).ec2conn @@ -402,6 +439,17 @@ resource "aws_eip" "bar" { } ` +func testAccAWSEIPConfig_tags(rName, testName string) string { + return fmt.Sprintf(` +resource "aws_eip" "bar" { + tags { + RandomName = "%[1]s" + TestName = "%[2]s" + } +} +`, rName, testName) +} + const testAccAWSEIPInstanceEc2Classic = ` provider "aws" { region = "us-east-1" diff --git a/website/docs/r/eip.html.markdown b/website/docs/r/eip.html.markdown index e828b11c36b..374bcf11ff9 100644 --- a/website/docs/r/eip.html.markdown +++ b/website/docs/r/eip.html.markdown @@ -92,6 +92,7 @@ The following arguments are supported: * `associate_with_private_ip` - (Optional) A user specified primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address. +* `tags` - (Optional) A mapping of tags to assign to the resource. ~> **NOTE:** You can specify either the `instance` ID or the `network_interface` ID, but not both. Including both will **not** return an error from the AWS API, but will @@ -100,7 +101,7 @@ more information. ## Attributes Reference -The following attributes are exported: +The following additional attributes are exported: * `id` - Contains the EIP allocation ID. * `private_ip` - Contains the private IP address (if in VPC). @@ -110,7 +111,6 @@ The following attributes are exported: * `instance` - Contains the ID of the attached instance. * `network_interface` - Contains the ID of the attached network interface. - ## Import EIPs in a VPC can be imported using their Allocation ID, e.g. From 2eba2869f943b4a1c5e3b37f074c93d4d94c4ad9 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 26 Dec 2017 15:05:01 -0500 Subject: [PATCH 066/350] New Resource: aws_route53_query_log --- aws/provider.go | 1 + aws/resource_aws_route53_query_log.go | 91 ++++++++++ aws/resource_aws_route53_query_log_test.go | 164 ++++++++++++++++++ website/aws.erb | 4 + .../docs/r/route53_query_log.html.markdown | 93 ++++++++++ 5 files changed, 353 insertions(+) create mode 100644 aws/resource_aws_route53_query_log.go create mode 100644 aws/resource_aws_route53_query_log_test.go create mode 100644 website/docs/r/route53_query_log.html.markdown diff --git a/aws/provider.go b/aws/provider.go index 8fb671307d5..bb511d39827 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -438,6 +438,7 @@ func Provider() terraform.ResourceProvider { "aws_redshift_parameter_group": resourceAwsRedshiftParameterGroup(), "aws_redshift_subnet_group": resourceAwsRedshiftSubnetGroup(), "aws_route53_delegation_set": resourceAwsRoute53DelegationSet(), + "aws_route53_query_log": resourceAwsRoute53QueryLog(), "aws_route53_record": resourceAwsRoute53Record(), "aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(), "aws_route53_zone": resourceAwsRoute53Zone(), diff --git a/aws/resource_aws_route53_query_log.go b/aws/resource_aws_route53_query_log.go new file mode 100644 index 00000000000..f992141f205 --- /dev/null +++ b/aws/resource_aws_route53_query_log.go @@ -0,0 +1,91 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53" +) + +func resourceAwsRoute53QueryLog() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRoute53QueryLogCreate, + Read: resourceAwsRoute53QueryLogRead, + Delete: resourceAwsRoute53QueryLogDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "cloudwatch_log_group_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + + "zone_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsRoute53QueryLogCreate(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).r53conn + + input := &route53.CreateQueryLoggingConfigInput{ + CloudWatchLogsLogGroupArn: aws.String(d.Get("cloudwatch_log_group_arn").(string)), + HostedZoneId: aws.String(d.Get("zone_id").(string)), + } + + log.Printf("[DEBUG] Creating Route53 query logging configuration: %#v", input) + out, err := r53.CreateQueryLoggingConfig(input) + if err != nil { + return fmt.Errorf("Error creating Route53 query logging configuration: %s", err) + } + log.Printf("[DEBUG] Route53 query logging configuration created: %#v", out) + + d.SetId(*out.QueryLoggingConfig.Id) + + return resourceAwsRoute53QueryLogRead(d, meta) +} + +func resourceAwsRoute53QueryLogRead(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).r53conn + + input := &route53.GetQueryLoggingConfigInput{ + Id: aws.String(d.Id()), + } + log.Printf("[DEBUG] Reading Route53 query logging configuration: %#v", input) + out, err := r53.GetQueryLoggingConfig(input) + if err != nil { + return fmt.Errorf("Error reading Route53 query logging configuration: %s", err) + } + log.Printf("[DEBUG] Route53 query logging configuration received: %#v", out) + + d.Set("cloudwatch_log_group_arn", out.QueryLoggingConfig.CloudWatchLogsLogGroupArn) + d.Set("zone_id", out.QueryLoggingConfig.HostedZoneId) + + return nil +} + +func resourceAwsRoute53QueryLogDelete(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).r53conn + + input := &route53.DeleteQueryLoggingConfigInput{ + Id: aws.String(d.Id()), + } + log.Printf("[DEBUG] Deleting Route53 query logging configuration: %#v", input) + _, err := r53.DeleteQueryLoggingConfig(input) + if err != nil { + return fmt.Errorf("Error deleting Route53 query logging configuration: %s", err) + } + + return nil +} diff --git a/aws/resource_aws_route53_query_log_test.go b/aws/resource_aws_route53_query_log_test.go new file mode 100644 index 00000000000..411476b4812 --- /dev/null +++ b/aws/resource_aws_route53_query_log_test.go @@ -0,0 +1,164 @@ +package aws + +import ( + "fmt" + "os" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSRoute53QueryLog_Basic(t *testing.T) { + // The underlying resources are sensitive to where they are located + // Use us-east-1 for testing + oldRegion := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldRegion) + + resourceName := "aws_route53_query_log.test" + rName := fmt.Sprintf("%s-%s", t.Name(), acctest.RandString(5)) + + var queryLoggingConfig route53.QueryLoggingConfig + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckRoute53QueryLogDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckAWSRoute53QueryLogResourceConfigBasic1(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRoute53QueryLogExists(resourceName, &queryLoggingConfig), + resource.TestMatchResourceAttr(resourceName, "cloudwatch_log_group_arn", + regexp.MustCompile(fmt.Sprintf(`^arn:aws:logs:[^:]+:[0-9]{12}:log-group:/aws/route53/%s.com:\*$`, rName))), + resource.TestCheckResourceAttrSet(resourceName, "zone_id"), + ), + }, + }, + }) +} + +func TestAccAWSRoute53QueryLog_Import(t *testing.T) { + // The underlying resources are sensitive to where they are located + // Use us-east-1 for testing + oldRegion := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldRegion) + + resourceName := "aws_route53_query_log.test" + rName := fmt.Sprintf("%s-%s", t.Name(), acctest.RandString(5)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckRoute53QueryLogDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckAWSRoute53QueryLogResourceConfigBasic1(rName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckRoute53QueryLogExists(pr string, queryLoggingConfig *route53.QueryLoggingConfig) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).r53conn + rs, ok := s.RootModule().Resources[pr] + if !ok { + return fmt.Errorf("Not found: %s", pr) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + out, err := conn.GetQueryLoggingConfig(&route53.GetQueryLoggingConfigInput{ + Id: aws.String(rs.Primary.ID), + }) + if err != nil { + return err + } + if out.QueryLoggingConfig == nil { + return fmt.Errorf("Route53 query logging configuration does not exist: %q", rs.Primary.ID) + } + + *queryLoggingConfig = *out.QueryLoggingConfig + + return nil + } +} + +func testAccCheckRoute53QueryLogDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).r53conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_route53_query_log" { + continue + } + + out, err := conn.GetQueryLoggingConfig(&route53.GetQueryLoggingConfigInput{ + Id: aws.String(rs.Primary.ID), + }) + if err != nil { + return nil + } + + if out.QueryLoggingConfig != nil { + return fmt.Errorf("Route53 query logging configuration exists: %q", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAWSRoute53QueryLogResourceConfigBasic1(rName string) string { + return fmt.Sprintf(` +resource "aws_cloudwatch_log_group" "test" { + name = "/aws/route53/${aws_route53_zone.test.name}" + retention_in_days = 1 +} + +data "aws_iam_policy_document" "test" { + statement { + actions = [ + "logs:CreateLogStream", + "logs:PutLogEvents", + ] + + resources = ["arn:aws:logs:*:*:log-group:/aws/route53/*"] + + principals { + identifiers = ["route53.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_cloudwatch_log_resource_policy" "test" { + policy_name = "%[1]s" + policy_document = "${data.aws_iam_policy_document.test.json}" +} + +resource "aws_route53_zone" "test" { + name = "%[1]s.com" +} + +resource "aws_route53_query_log" "test" { + depends_on = ["aws_cloudwatch_log_resource_policy.test"] + + cloudwatch_log_group_arn = "${aws_cloudwatch_log_group.test.arn}" + zone_id = "${aws_route53_zone.test.zone_id}" +} +`, rName) +} diff --git a/website/aws.erb b/website/aws.erb index 5bf9ba7af53..7b64f20da87 100644 --- a/website/aws.erb +++ b/website/aws.erb @@ -1357,6 +1357,10 @@ aws_route53_health_check + > + aws_route53_query_log + + > aws_route53_record diff --git a/website/docs/r/route53_query_log.html.markdown b/website/docs/r/route53_query_log.html.markdown new file mode 100644 index 00000000000..e3704b149ef --- /dev/null +++ b/website/docs/r/route53_query_log.html.markdown @@ -0,0 +1,93 @@ +--- +layout: "aws" +page_title: "AWS: aws_route53_query_log" +sidebar_current: "docs-aws-resource-route53-query-log" +description: |- + Provides a Route53 query logging configuration resource. +--- + +# aws_route53_query_log + +Provides a Route53 query logging configuration resource. + +~> **NOTE:** There are restrictions on the configuration of query logging. Notably, +the CloudWatch log group must be in the `us-east-1` region, +a permissive CloudWatch log resource policy must be in place, and +the Route53 hosted zone must be public. +See [Configuring Logging for DNS Queries](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html?console_help=true#query-logs-configuring) for additional details. + +## Example Usage + +```hcl +# Example CloudWatch log group in us-east-1 + +provider "aws" { + alias = "us-east-1" + region = "us-east-1" +} + +resource "aws_cloudwatch_log_group" "aws_route53_example_com" { + provider = "aws.us-east-1" + + name = "/aws/route53/${aws_route53_zone.example_com.name}" + retention_in_days = 30 +} + +# Example CloudWatch log resource policy to allow Route53 to write logs +# to any log group under /aws/route53/* + +data "aws_iam_policy_document" "route53-query-logging-policy" { + statement { + actions = [ + "logs:CreateLogStream", + "logs:PutLogEvents", + ] + + resources = ["arn:aws:logs:*:*:log-group:/aws/route53/*"] + + principals { + identifiers = ["route53.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_cloudwatch_log_resource_policy" "route53-query-logging-policy" { + policy_document = "${data.aws_iam_policy_document.route53-query-logging-policy.json}" + policy_name = "route53-query-logging-policy" +} + +# Example Route53 zone with query logging + +resource "aws_route53_zone" "example_com" { + name = "example.com" +} + +resource "aws_route53_query_log" "example_com" { + depends_on = ["aws_cloudwatch_log_resource_policy.route53-query-logging-policy"] + + cloudwatch_log_group_arn = "${aws_cloudwatch_log_group.aws_route53_example_com.arn}" + zone_id = "${aws_route53_zone.example_com.zone_id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `cloudwatch_log_group_arn` - (Required) CloudWatch log group ARN to send query logs. +* `zone_id` - (Required) Route53 hosted zone ID to enable query logs. + +## Attributes Reference + +The following additional attributes are exported: + +* `id` - The query logging configuration ID + +## Import + +Route53 query logging configurations can be imported using their ID, e.g. + +``` +$ terraform import aws_route53_query_log.example_com xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +``` From 5cf15f7cc39319ade989272e732bc7c092779822 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Tue, 26 Dec 2017 14:33:22 -0600 Subject: [PATCH 067/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a5ab69a5fe4..6f0e703762f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ ENHANCEMENTS: * resource/aws_directory_service_directory: Add security_group_id field [GH-2688] * resource/aws_rds_cluster_instance: Support Performance Insights [GH-2331] * resource/aws_rds_cluster_instance: Set db_subnet_group_name in state on read if available [GH-2606] +* resource/aws_eip: Tagging is now supported [GH-2768] BUG FIXES: From bec6d7996e1e3876ee59056e35a673ddaabef870 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 26 Dec 2017 15:37:10 -0500 Subject: [PATCH 068/350] r/aws_route53_record: Documentation note about longer than 255 character record values (e.g. DKIM TXT records) --- website/docs/r/route53_record.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/route53_record.html.markdown b/website/docs/r/route53_record.html.markdown index 094e97e0f26..46406071fa4 100644 --- a/website/docs/r/route53_record.html.markdown +++ b/website/docs/r/route53_record.html.markdown @@ -98,7 +98,7 @@ The following arguments are supported: * `name` - (Required) The name of the record. * `type` - (Required) The record type. Valid values are `A`, `AAAA`, `CAA`, `CNAME`, `MX`, `NAPTR`, `NS`, `PTR`, `SOA`, `SPF`, `SRV` and `TXT`. * `ttl` - (Required for non-alias records) The TTL of the record. -* `records` - (Required for non-alias records) A string list of records. +* `records` - (Required for non-alias records) A string list of records. To specify a single record value longer than 255 characters such as a TXT record for DKIM, add `\"\"` inside the Terraform configuration string (e.g. `"first255characters\"\"morecharacters"`). * `set_identifier` - (Optional) Unique identifier to differentiate records with routing policies from one another. Required if using `failover`, `geolocation`, `latency`, or `weighted` routing policies documented below. * `health_check_id` - (Optional) The health check the record should be associated with. * `alias` - (Optional) An alias block. Conflicts with `ttl` & `records`. From 1c58036baf6cec2c921307e3e2eb28c941369164 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 26 Dec 2017 16:19:12 -0500 Subject: [PATCH 069/350] r/aws_codepipeline: Add arn attribute --- aws/resource_aws_codepipeline.go | 7 +++++++ aws/resource_aws_codepipeline_test.go | 2 ++ website/docs/r/codepipeline.markdown | 3 ++- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_codepipeline.go b/aws/resource_aws_codepipeline.go index 29866cb1917..8cb011634e8 100644 --- a/aws/resource_aws_codepipeline.go +++ b/aws/resource_aws_codepipeline.go @@ -24,6 +24,11 @@ func resourceAwsCodePipeline() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "name": { Type: schema.TypeString, Required: true, @@ -459,6 +464,7 @@ func resourceAwsCodePipelineRead(d *schema.ResourceData, meta interface{}) error } return fmt.Errorf("[ERROR] Error retreiving Pipeline: %q", err) } + metadata := resp.Metadata pipeline := resp.Pipeline if err := d.Set("artifact_store", flattenAwsCodePipelineArtifactStore(pipeline.ArtifactStore)); err != nil { @@ -469,6 +475,7 @@ func resourceAwsCodePipelineRead(d *schema.ResourceData, meta interface{}) error return err } + d.Set("arn", metadata.PipelineArn) d.Set("name", pipeline.Name) d.Set("role_arn", pipeline.RoleArn) return nil diff --git a/aws/resource_aws_codepipeline_test.go b/aws/resource_aws_codepipeline_test.go index a377f5ac7c2..0951a28b7e6 100644 --- a/aws/resource_aws_codepipeline_test.go +++ b/aws/resource_aws_codepipeline_test.go @@ -29,6 +29,8 @@ func TestAccAWSCodePipeline_basic(t *testing.T) { Config: testAccAWSCodePipelineConfig_basic(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists("aws_codepipeline.bar"), + resource.TestMatchResourceAttr("aws_codepipeline.bar", "arn", + regexp.MustCompile(fmt.Sprintf("^arn:aws:codepipeline:[^:]+:[0-9]{12}:test-pipeline-%s", name))), resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.type", "S3"), resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.encryption_key.0.id", "1234"), resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.encryption_key.0.type", "KMS"), diff --git a/website/docs/r/codepipeline.markdown b/website/docs/r/codepipeline.markdown index 0edb912c7fb..dd092a37be5 100644 --- a/website/docs/r/codepipeline.markdown +++ b/website/docs/r/codepipeline.markdown @@ -157,9 +157,10 @@ A `action` block supports the following arguments: ## Attributes Reference -The following attributes are exported: +The following additional attributes are exported: * `id` - The codepipeline ID. +* `arn` - The codepipeline ARN. ## Import From f761ffffbc357ec19f684828493779f941fefe6b Mon Sep 17 00:00:00 2001 From: Royce Remer Date: Tue, 26 Dec 2017 14:25:32 -0800 Subject: [PATCH 070/350] document import for ENIs --- website/docs/d/network_interface.html.markdown | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/website/docs/d/network_interface.html.markdown b/website/docs/d/network_interface.html.markdown index 14def5dca25..f64274af7f8 100644 --- a/website/docs/d/network_interface.html.markdown +++ b/website/docs/d/network_interface.html.markdown @@ -45,3 +45,11 @@ Additionally, the following attributes are exported: * `ip_owner_id` - The ID of the Elastic IP address owner. * `public_dns_name` - The public DNS name. * `public_ip` - The address of the Elastic IP address bound to the network interface. + +## Import + +Elastic Network Interfaces can be imported using the `id`, e.g. + +``` +$ terraform import aws_network_interface.test eni-12345 +``` From 4fbf7db3f572c38933358af3e8bb5ab8c43115b4 Mon Sep 17 00:00:00 2001 From: Sebastian Wahn Date: Wed, 27 Dec 2017 19:33:28 +0100 Subject: [PATCH 071/350] Update the ecs documentation by moving the "launch_type" property to the correct place in aws_ecs_service --- website/docs/r/ecs_service.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/ecs_service.html.markdown b/website/docs/r/ecs_service.html.markdown index 3c45212e596..2aafe1d4a7b 100644 --- a/website/docs/r/ecs_service.html.markdown +++ b/website/docs/r/ecs_service.html.markdown @@ -50,6 +50,7 @@ The following arguments are supported: * `name` - (Required) The name of the service (up to 255 letters, numbers, hyphens, and underscores) * `task_definition` - (Required) The family and revision (`family:revision`) or full ARN of the task definition that you want to run in your service. * `desired_count` - (Required) The number of instances of the task definition to place and keep running +* `launch_type` - (Optional) The launch type on which to run your service. The valid values are `EC2` and `FARGATE`. Defaults to `EC2`. * `cluster` - (Optional) ARN of an ECS cluster * `iam_role` - (Optional) The ARN of IAM role that allows your Amazon ECS container agent to make calls to your load balancer on your behalf. This parameter is only required if you are using a load balancer with your service. * `deployment_maximum_percent` - (Optional) The upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment. @@ -70,7 +71,6 @@ Load balancers support the following: * `target_group_arn` - (Required for ALB) The ARN of the ALB target group to associate with the service. * `container_name` - (Required) The name of the container to associate with the load balancer (as it appears in a container definition). * `container_port` - (Required) The port on the container to associate with the load balancer. -* `launch_type` - (Optional) The launch type on which to run your service. The valid values are `EC2` and `FARGATE`. Defaults to `EC2`. ## placement_strategy From ce17236c741532f329759fe97b7f30c0584be470 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 27 Dec 2017 13:50:08 -0600 Subject: [PATCH 072/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f0e703762f..40f8383fba8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ ENHANCEMENTS: * resource/aws_rds_cluster_instance: Support Performance Insights [GH-2331] * resource/aws_rds_cluster_instance: Set db_subnet_group_name in state on read if available [GH-2606] * resource/aws_eip: Tagging is now supported [GH-2768] +* resource/aws_codepipeline: ARN is now exposed as an attribute [GH-2773] BUG FIXES: From 3b78617c275d1b80d40cb73543465a6496204272 Mon Sep 17 00:00:00 2001 From: Atsushi Ishibashi Date: Thu, 28 Dec 2017 12:25:04 +0900 Subject: [PATCH 073/350] avoid nil pointer when Enabled=false --- aws/resource_aws_elasticsearch_domain.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticsearch_domain.go b/aws/resource_aws_elasticsearch_domain.go index 8e5ab7e39a2..86f1e9c7b45 100644 --- a/aws/resource_aws_elasticsearch_domain.go +++ b/aws/resource_aws_elasticsearch_domain.go @@ -497,7 +497,9 @@ func resourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface{} for k, val := range ds.LogPublishingOptions { mm := map[string]interface{}{} mm["log_type"] = k - mm["cloudwatch_log_group_arn"] = *val.CloudWatchLogsLogGroupArn + if val.CloudWatchLogsLogGroupArn != nil { + mm["cloudwatch_log_group_arn"] = *val.CloudWatchLogsLogGroupArn + } mm["enabled"] = *val.Enabled m = append(m, mm) } From bec308b77da61d8a7f80395869ba52e1aed71946 Mon Sep 17 00:00:00 2001 From: Colin Hebert Date: Thu, 28 Dec 2017 10:28:47 +0100 Subject: [PATCH 074/350] r/aws_lb_target_group: Fix max prefix length check --- aws/validators.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/validators.go b/aws/validators.go index c502925e31d..4009e32c29d 100644 --- a/aws/validators.go +++ b/aws/validators.go @@ -1364,7 +1364,7 @@ func validateAwsLbTargetGroupName(v interface{}, k string) (ws []string, errors func validateAwsLbTargetGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) { name := v.(string) - if len(name) > 32 { + if len(name) > 6 { errors = append(errors, fmt.Errorf("%q (%q) cannot be longer than '6' characters", k, name)) } return From 81d3ade4c219641ec920a7ac718fcc0efb7b8699 Mon Sep 17 00:00:00 2001 From: Joshua Johnston Date: Thu, 28 Dec 2017 10:37:44 -0500 Subject: [PATCH 075/350] Handle RDS Cluster state `resetting-master-credentials` Addresses error when creating a new aws_rds_cluster using the `snapshot_identifier` option. ``` * aws_rds_cluster.aurora_postgres: [WARN] Error waiting for RDS Cluster state to be "available": unexpected state 'resetting-master-credentials', wanted target 'available'. last error: %!s() ``` --- aws/resource_aws_rds_cluster.go | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_rds_cluster.go b/aws/resource_aws_rds_cluster.go index 80cd7abab71..299153809bb 100644 --- a/aws/resource_aws_rds_cluster.go +++ b/aws/resource_aws_rds_cluster.go @@ -340,7 +340,7 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error log.Println("[INFO] Waiting for RDS Cluster to be available") stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "backing-up", "modifying", "preparing-data-migration", "migrating"}, + Pending: resourceAwsRdsClusterCreatePendingStates, Target: []string{"available"}, Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), Timeout: d.Timeout(schema.TimeoutCreate), @@ -497,7 +497,7 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error "[INFO] Waiting for RDS Cluster to be available") stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "backing-up", "modifying"}, + Pending: resourceAwsRdsClusterCreatePendingStates, Target: []string{"available"}, Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), Timeout: d.Timeout(schema.TimeoutCreate), @@ -765,7 +765,7 @@ func resourceAwsRDSClusterDelete(d *schema.ResourceData, meta interface{}) error } stateConf := &resource.StateChangeConf{ - Pending: []string{"available", "deleting", "backing-up", "modifying"}, + Pending: resourceAwsRdsClusterDeletePendingStates, Target: []string{"destroyed"}, Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), Timeout: d.Timeout(schema.TimeoutDelete), @@ -859,3 +859,19 @@ func removeIamRoleFromRdsCluster(clusterIdentifier string, roleArn string, conn return nil } + +var resourceAwsRdsClusterCreatePendingStates = []string{ + "creating", + "backing-up", + "modifying", + "preparing-data-migration", + "migrating", + "resetting-master-credentials", +} + +var resourceAwsRdsClusterDeletePendingStates = []string{ + "available", + "deleting", + "backing-up", + "modifying", +} From dddc588b53d0de7421040313d2ec7d54ca55bd4a Mon Sep 17 00:00:00 2001 From: James Nugent Date: Thu, 28 Dec 2017 10:32:36 -0600 Subject: [PATCH 076/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 40f8383fba8..d64b0194819 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ ENHANCEMENTS: * resource/aws_rds_cluster_instance: Set db_subnet_group_name in state on read if available [GH-2606] * resource/aws_eip: Tagging is now supported [GH-2768] * resource/aws_codepipeline: ARN is now exposed as an attribute [GH-2773] +* resource/elasticsearch_domain: Fixed a crash when no Cloudwatch log group is configured [GH-2787] BUG FIXES: From 895d1f1421578a87133a4234721db323569c792f Mon Sep 17 00:00:00 2001 From: David Dvorak Date: Thu, 28 Dec 2017 20:08:42 +0100 Subject: [PATCH 077/350] Fixes skipped min_capacity argument in scalable_target_action --- aws/resource_aws_appautoscaling_scheduled_action.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_appautoscaling_scheduled_action.go b/aws/resource_aws_appautoscaling_scheduled_action.go index ec479da6de6..71613ea5566 100644 --- a/aws/resource_aws_appautoscaling_scheduled_action.go +++ b/aws/resource_aws_appautoscaling_scheduled_action.go @@ -104,7 +104,7 @@ func resourceAwsAppautoscalingScheduledActionPut(d *schema.ResourceData, meta in sta.MaxCapacity = aws.Int64(int64(max.(int))) } if min, ok := raw["min_capacity"]; ok { - sta.MaxCapacity = aws.Int64(int64(min.(int))) + sta.MinCapacity = aws.Int64(int64(min.(int))) } input.ScalableTargetAction = sta } From b1cdc723f705a37b00993370d6c5a7f12c3e81d2 Mon Sep 17 00:00:00 2001 From: Gordon Irving Date: Thu, 28 Dec 2017 19:46:03 +0000 Subject: [PATCH 078/350] allow setting endpoints for acm,ecr,ecs,sts,r53 --- aws/config.go | 19 ++++++++++++++----- aws/provider.go | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 5 deletions(-) diff --git a/aws/config.go b/aws/config.go index 1331e826ba8..86eb60f4f9b 100644 --- a/aws/config.go +++ b/aws/config.go @@ -100,6 +100,7 @@ type Config struct { AllowedAccountIds []interface{} ForbiddenAccountIds []interface{} + AcmEndpoint string ApigatewayEndpoint string CloudFormationEndpoint string CloudWatchEndpoint string @@ -108,15 +109,19 @@ type Config struct { DynamoDBEndpoint string DeviceFarmEndpoint string Ec2Endpoint string + EcsEndpoint string + EcrEndpoint string ElbEndpoint string IamEndpoint string KinesisEndpoint string KmsEndpoint string LambdaEndpoint string RdsEndpoint string + R53Endpoint string S3Endpoint string SnsEndpoint string SqsEndpoint string + StsEndpoint string Insecure bool SkipCredsValidation bool @@ -317,9 +322,10 @@ func (c *Config) Client() (interface{}, error) { // Other resources that have restrictions should allow the API to fail, rather // than Terraform abstracting the region for the user. This can lead to breaking // changes if that resource is ever opened up to more regions. - r53Sess := sess.Copy(&aws.Config{Region: aws.String("us-east-1")}) + r53Sess := sess.Copy(&aws.Config{Region: aws.String("us-east-1"), Endpoint: aws.String(c.R53Endpoint)}) // Some services have user-configurable endpoints + awsAcmSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.AcmEndpoint)}) awsApigatewaySess := sess.Copy(&aws.Config{Endpoint: aws.String(c.ApigatewayEndpoint)}) awsCfSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudFormationEndpoint)}) awsCwSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchEndpoint)}) @@ -327,6 +333,8 @@ func (c *Config) Client() (interface{}, error) { awsCwlSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchLogsEndpoint)}) awsDynamoSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DynamoDBEndpoint)}) awsEc2Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.Ec2Endpoint)}) + awsEcrSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.EcrEndpoint)}) + awsEcsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.EcsEndpoint)}) awsElbSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.ElbEndpoint)}) awsIamSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.IamEndpoint)}) awsLambdaSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.LambdaEndpoint)}) @@ -336,6 +344,7 @@ func (c *Config) Client() (interface{}, error) { awsS3Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.S3Endpoint)}) awsSnsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.SnsEndpoint)}) awsSqsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.SqsEndpoint)}) + awsStsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.StsEndpoint)}) awsDeviceFarmSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DeviceFarmEndpoint)}) log.Println("[INFO] Initializing DeviceFarm SDK connection") @@ -343,7 +352,7 @@ func (c *Config) Client() (interface{}, error) { // These two services need to be set up early so we can check on AccountID client.iamconn = iam.New(awsIamSess) - client.stsconn = sts.New(sess) + client.stsconn = sts.New(awsStsSess) if !c.SkipCredsValidation { err = c.ValidateCredentials(client.stsconn) @@ -378,7 +387,7 @@ func (c *Config) Client() (interface{}, error) { } } - client.acmconn = acm.New(sess) + client.acmconn = acm.New(awsAcmSess) client.apigateway = apigateway.New(awsApigatewaySess) client.appautoscalingconn = applicationautoscaling.New(sess) client.autoscalingconn = autoscaling.New(sess) @@ -398,8 +407,8 @@ func (c *Config) Client() (interface{}, error) { client.codepipelineconn = codepipeline.New(sess) client.dsconn = directoryservice.New(sess) client.dynamodbconn = dynamodb.New(awsDynamoSess) - client.ecrconn = ecr.New(sess) - client.ecsconn = ecs.New(sess) + client.ecrconn = ecr.New(awsEcrSess) + client.ecsconn = ecs.New(awsEcsSess) client.efsconn = efs.New(sess) client.elasticacheconn = elasticache.New(sess) client.elasticbeanstalkconn = elasticbeanstalk.New(sess) diff --git a/aws/provider.go b/aws/provider.go index 8fb671307d5..e4c80bfcceb 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -682,6 +682,7 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { for _, endpointsSetI := range endpointsSet.List() { endpoints := endpointsSetI.(map[string]interface{}) + config.AcmEndpoint = endpoints["acm"].(string) config.ApigatewayEndpoint = endpoints["apigateway"].(string) config.CloudFormationEndpoint = endpoints["cloudformation"].(string) config.CloudWatchEndpoint = endpoints["cloudwatch"].(string) @@ -690,15 +691,19 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { config.DeviceFarmEndpoint = endpoints["devicefarm"].(string) config.DynamoDBEndpoint = endpoints["dynamodb"].(string) config.Ec2Endpoint = endpoints["ec2"].(string) + config.EcrEndpoint = endpoints["ecr"].(string) + config.EcsEndpoint = endpoints["ecs"].(string) config.ElbEndpoint = endpoints["elb"].(string) config.IamEndpoint = endpoints["iam"].(string) config.KinesisEndpoint = endpoints["kinesis"].(string) config.KmsEndpoint = endpoints["kms"].(string) config.LambdaEndpoint = endpoints["lambda"].(string) + config.R53Endpoint = endpoints["r53"].(string) config.RdsEndpoint = endpoints["rds"].(string) config.S3Endpoint = endpoints["s3"].(string) config.SnsEndpoint = endpoints["sns"].(string) config.SqsEndpoint = endpoints["sqs"].(string) + config.StsEndpoint = endpoints["sts"].(string) } if v, ok := d.GetOk("allowed_account_ids"); ok { @@ -756,6 +761,12 @@ func endpointsSchema() *schema.Schema { Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "acm": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["acm_endpoint"], + }, "apigateway": { Type: schema.TypeString, Optional: true, @@ -812,6 +823,20 @@ func endpointsSchema() *schema.Schema { Description: descriptions["ec2_endpoint"], }, + "ecr": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["ecr_endpoint"], + }, + + "ecs": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["ecs_endpoint"], + }, + "elb": { Type: schema.TypeString, Optional: true, @@ -836,6 +861,12 @@ func endpointsSchema() *schema.Schema { Default: "", Description: descriptions["lambda_endpoint"], }, + "r53": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["r53_endpoint"], + }, "rds": { Type: schema.TypeString, Optional: true, @@ -860,6 +891,12 @@ func endpointsSchema() *schema.Schema { Default: "", Description: descriptions["sqs_endpoint"], }, + "sts": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["sts_endpoint"], + }, }, }, Set: endpointsToHash, From 419877ebfa54dc04e3b151c15930b62b32270bed Mon Sep 17 00:00:00 2001 From: James Nugent Date: Fri, 29 Dec 2017 14:07:59 -0600 Subject: [PATCH 079/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d64b0194819..15f4a01c758 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ ENHANCEMENTS: * provider: Allow API Gateway and Lambda endpoints configuration [GH-2641] * provider: `eu-west-3` is now supported [GH-2707] +* provider: Endpoints can now be specified for ACM, ECR, ECS, STS and Route 53 [GH-2795] * resource/aws_kinesis_firehose_delivery_stream: Import is now supported [GH-2707] * resource/aws_cognito_user_pool: The ARN for the pool is now computed and exposed as an attribute [GH-2723] * resource/aws_directory_service_directory: Add security_group_id field [GH-2688] From db4da47e29dc92e820b11bf4a408e121b8b8304b Mon Sep 17 00:00:00 2001 From: James Nugent Date: Fri, 29 Dec 2017 14:10:05 -0600 Subject: [PATCH 080/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15f4a01c758..c6f82f12842 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ ENHANCEMENTS: * resource/aws_eip: Tagging is now supported [GH-2768] * resource/aws_codepipeline: ARN is now exposed as an attribute [GH-2773] * resource/elasticsearch_domain: Fixed a crash when no Cloudwatch log group is configured [GH-2787] +* resource/aws_appautoscaling_scheduled_action: min_capcity argument is now honoured [GH-2794] BUG FIXES: From 755e04d2b2c8660316f9477b9085b589a768a17c Mon Sep 17 00:00:00 2001 From: James Nugent Date: Fri, 29 Dec 2017 14:20:05 -0600 Subject: [PATCH 081/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c6f82f12842..9f04d0cd25e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ ENHANCEMENTS: * resource/aws_codepipeline: ARN is now exposed as an attribute [GH-2773] * resource/elasticsearch_domain: Fixed a crash when no Cloudwatch log group is configured [GH-2787] * resource/aws_appautoscaling_scheduled_action: min_capcity argument is now honoured [GH-2794] +* resource/aws_rds_cluster: Clusters in the `resetting-master-credentials` state no longer cause an error [GH-2791] BUG FIXES: From c3b13edf8b9d27c1a9ccfa37d57e6c4041270813 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Sun, 31 Dec 2017 09:29:41 +0100 Subject: [PATCH 082/350] Update CHANGELOG.md --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f04d0cd25e..89becf30407 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,9 +2,9 @@ ENHANCEMENTS: -* provider: Allow API Gateway and Lambda endpoints configuration [GH-2641] * provider: `eu-west-3` is now supported [GH-2707] * provider: Endpoints can now be specified for ACM, ECR, ECS, STS and Route 53 [GH-2795] +* provider: Endpoints can now be specified for API Gateway and Lambda [GH-2641] * resource/aws_kinesis_firehose_delivery_stream: Import is now supported [GH-2707] * resource/aws_cognito_user_pool: The ARN for the pool is now computed and exposed as an attribute [GH-2723] * resource/aws_directory_service_directory: Add security_group_id field [GH-2688] @@ -12,15 +12,15 @@ ENHANCEMENTS: * resource/aws_rds_cluster_instance: Set db_subnet_group_name in state on read if available [GH-2606] * resource/aws_eip: Tagging is now supported [GH-2768] * resource/aws_codepipeline: ARN is now exposed as an attribute [GH-2773] -* resource/elasticsearch_domain: Fixed a crash when no Cloudwatch log group is configured [GH-2787] * resource/aws_appautoscaling_scheduled_action: min_capcity argument is now honoured [GH-2794] * resource/aws_rds_cluster: Clusters in the `resetting-master-credentials` state no longer cause an error [GH-2791] BUG FIXES: -* resource/cognito_user_pool: Update Cognito email message length to 20,000 [GH-2692] +* resource/aws_cognito_user_pool: Update Cognito email message length to 20,000 [GH-2692] * resource/aws_volume_attachment: Changing device name without changing volume or instance ID now correctly produces a diff [GH-2720] * resource/aws_s3_bucket_object: Object tagging is now supported in GovCloud [GH-2665] +* resource/aws_elasticsearch_domain: Fixed a crash when no Cloudwatch log group is configured [GH-2787] ## 1.6.0 (December 18, 2017) From a7cd48417127df03a22c4eae2b9120ca442fd364 Mon Sep 17 00:00:00 2001 From: Phil Porada Date: Sun, 31 Dec 2017 13:57:04 -0500 Subject: [PATCH 083/350] Updated documentation for egress_only_internet_gateway to use the proper ipv6 variable --- .../r/egress_only_internet_gateway.html.markdown | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/docs/r/egress_only_internet_gateway.html.markdown b/website/docs/r/egress_only_internet_gateway.html.markdown index 1124607e4a8..b19c2a4eff2 100644 --- a/website/docs/r/egress_only_internet_gateway.html.markdown +++ b/website/docs/r/egress_only_internet_gateway.html.markdown @@ -8,17 +8,17 @@ description: |- # aws_egress_only_internet_gateway -[IPv6 only] Creates an egress-only Internet gateway for your VPC. -An egress-only Internet gateway is used to enable outbound communication -over IPv6 from instances in your VPC to the Internet, and prevents hosts -outside of your VPC from initiating an IPv6 connection with your instance. +[IPv6 only] Creates an egress-only Internet gateway for your VPC. +An egress-only Internet gateway is used to enable outbound communication +over IPv6 from instances in your VPC to the Internet, and prevents hosts +outside of your VPC from initiating an IPv6 connection with your instance. ## Example Usage ```hcl resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" - assign_amazon_ipv6_cidr_block = true + assign_generated_ipv6_cidr_block = true } resource "aws_egress_only_internet_gateway" "foo" { @@ -36,4 +36,4 @@ The following arguments are supported: The following attributes are exported: -* `id` - The ID of the Egress Only Internet Gateway. \ No newline at end of file +* `id` - The ID of the Egress Only Internet Gateway. From e21ad5c3be149b71aae8372bb5e5a07687b425d3 Mon Sep 17 00:00:00 2001 From: Stuart Auld Date: Tue, 2 Jan 2018 15:08:08 +1100 Subject: [PATCH 084/350] Set the resource ID after successful creation --- aws/resource_aws_s3_bucket_policy.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_s3_bucket_policy.go b/aws/resource_aws_s3_bucket_policy.go index 593d144fbd7..b7f7fed8f43 100644 --- a/aws/resource_aws_s3_bucket_policy.go +++ b/aws/resource_aws_s3_bucket_policy.go @@ -42,8 +42,6 @@ func resourceAwsS3BucketPolicyPut(d *schema.ResourceData, meta interface{}) erro bucket := d.Get("bucket").(string) policy := d.Get("policy").(string) - d.SetId(bucket) - log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy) params := &s3.PutBucketPolicyInput{ @@ -67,6 +65,8 @@ func resourceAwsS3BucketPolicyPut(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error putting S3 policy: %s", err) } + d.SetId(bucket) + return nil } From 19a7f4f8b36b4b0b84eb9020537d1f1109aec6ca Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 2 Jan 2018 09:03:10 +0100 Subject: [PATCH 085/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 89becf30407..ddcf45c1fa6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ BUG FIXES: * resource/aws_volume_attachment: Changing device name without changing volume or instance ID now correctly produces a diff [GH-2720] * resource/aws_s3_bucket_object: Object tagging is now supported in GovCloud [GH-2665] * resource/aws_elasticsearch_domain: Fixed a crash when no Cloudwatch log group is configured [GH-2787] +* resource/aws_s3_bucket_policy: Set the resource ID after successful creation [GH-2820] ## 1.6.0 (December 18, 2017) From 97a0cab36e44cb4b5a7e790541aa7f23a8d95baf Mon Sep 17 00:00:00 2001 From: Argishti Rostamian Date: Tue, 2 Jan 2018 16:12:58 -0800 Subject: [PATCH 086/350] rds event subscription set source type when updating categories --- aws/resource_aws_db_event_subscription.go | 1 + ...resource_aws_db_event_subscription_test.go | 54 +++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/aws/resource_aws_db_event_subscription.go b/aws/resource_aws_db_event_subscription.go index 9e725ce2d89..4415bbcc1b2 100644 --- a/aws/resource_aws_db_event_subscription.go +++ b/aws/resource_aws_db_event_subscription.go @@ -215,6 +215,7 @@ func resourceAwsDbEventSubscriptionUpdate(d *schema.ResourceData, meta interface for i, eventCategory := range eventCategoriesSet.List() { req.EventCategories[i] = aws.String(eventCategory.(string)) } + req.SourceType = aws.String(d.Get("source_type").(string)) requestUpdate = true } diff --git a/aws/resource_aws_db_event_subscription_test.go b/aws/resource_aws_db_event_subscription_test.go index c6dfde77374..97bd8df1d60 100644 --- a/aws/resource_aws_db_event_subscription_test.go +++ b/aws/resource_aws_db_event_subscription_test.go @@ -92,6 +92,41 @@ func TestAccAWSDBEventSubscription_withSourceIds(t *testing.T) { }) } +func TestAccAWSDBEventSubscription_categoryUpdate(t *testing.T) { + var v rds.EventSubscription + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDBEventSubscriptionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSDBEventSubscriptionConfig(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDBEventSubscriptionExists("aws_db_event_subscription.bar", &v), + resource.TestCheckResourceAttr( + "aws_db_event_subscription.bar", "enabled", "true"), + resource.TestCheckResourceAttr( + "aws_db_event_subscription.bar", "source_type", "db-instance"), + resource.TestCheckResourceAttr( + "aws_db_event_subscription.bar", "name", fmt.Sprintf("tf-acc-test-rds-event-subs-%d", rInt)), + ), + }, + { + Config: testAccAWSDBEventSubscriptionConfigUpdateCategories(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDBEventSubscriptionExists("aws_db_event_subscription.bar", &v), + resource.TestCheckResourceAttr( + "aws_db_event_subscription.bar", "enabled", "true"), + resource.TestCheckResourceAttr( + "aws_db_event_subscription.bar", "source_type", "db-instance"), + ), + }, + }, + }) +} + func testAccCheckAWSDBEventSubscriptionExists(n string, v *rds.EventSubscription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -263,3 +298,22 @@ func testAccAWSDBEventSubscriptionConfigUpdateSourceIds(rInt int) string { } }`, rInt, rInt, rInt, rInt) } + +func testAccAWSDBEventSubscriptionConfigUpdateCategories(rInt int) string { + return fmt.Sprintf(` +resource "aws_sns_topic" "aws_sns_topic" { + name = "tf-acc-test-rds-event-subs-sns-topic-%d" +} + +resource "aws_db_event_subscription" "bar" { + name = "tf-acc-test-rds-event-subs-%d" + sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" + source_type = "db-instance" + event_categories = [ + "availability", + ] + tags { + Name = "name" + } +}`, rInt, rInt) +} From 4091606b6a02de8df62f2635133a6fff3e8553ac Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 3 Jan 2018 08:57:06 +0100 Subject: [PATCH 087/350] docs/provider: Sort list of endpoints --- website/docs/index.html.markdown | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 92e7154de2c..81193d944be 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -251,6 +251,10 @@ in excess of those allowed by the access policy of the role that is being assume Nested `endpoints` block supports the following: +* `cloudformation` - (Optional) Use this to override the default endpoint + URL constructed from the `region`. It's typically used to connect to + custom CloudFormation endpoints. + * `cloudwatch` - (Optional) Use this to override the default endpoint URL constructed from the `region`. It's typically used to connect to custom CloudWatch endpoints. @@ -263,33 +267,29 @@ Nested `endpoints` block supports the following: URL constructed from the `region`. It's typically used to connect to custom CloudWatchLogs endpoints. -* `cloudformation` - (Optional) Use this to override the default endpoint - URL constructed from the `region`. It's typically used to connect to - custom CloudFormation endpoints. - * `dynamodb` - (Optional) Use this to override the default endpoint URL constructed from the `region`. It's typically used to connect to `dynamodb-local`. -* `kinesis` - (Optional) Use this to override the default endpoint +* `ec2` - (Optional) Use this to override the default endpoint URL constructed from the `region`. It's typically used to connect to - `kinesalite`. + custom EC2 endpoints. -* `kms` - (Optional) Use this to override the default endpoint +* `elb` - (Optional) Use this to override the default endpoint URL constructed from the `region`. It's typically used to connect to - custom KMS endpoints. + custom ELB endpoints. * `iam` - (Optional) Use this to override the default endpoint URL constructed from the `region`. It's typically used to connect to custom IAM endpoints. -* `ec2` - (Optional) Use this to override the default endpoint +* `kinesis` - (Optional) Use this to override the default endpoint URL constructed from the `region`. It's typically used to connect to - custom EC2 endpoints. + `kinesalite`. -* `elb` - (Optional) Use this to override the default endpoint +* `kms` - (Optional) Use this to override the default endpoint URL constructed from the `region`. It's typically used to connect to - custom ELB endpoints. + custom KMS endpoints. * `rds` - (Optional) Use this to override the default endpoint URL constructed from the `region`. It's typically used to connect to From 2ec0b20ef7b90fac08033a1bc34ae2658325c207 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 3 Jan 2018 09:00:52 +0100 Subject: [PATCH 088/350] docs/provider: Add missing endpoints --- website/docs/index.html.markdown | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 81193d944be..b768c12f40e 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -251,6 +251,14 @@ in excess of those allowed by the access policy of the role that is being assume Nested `endpoints` block supports the following: +* `acm` - (Optional) Use this to override the default endpoint + URL constructed from the `region`. It's typically used to connect to + custom ACM endpoints. + +* `apigateway` - (Optional) Use this to override the default endpoint + URL constructed from the `region`. It's typically used to connect to + custom API Gateway endpoints. + * `cloudformation` - (Optional) Use this to override the default endpoint URL constructed from the `region`. It's typically used to connect to custom CloudFormation endpoints. @@ -267,6 +275,10 @@ Nested `endpoints` block supports the following: URL constructed from the `region`. It's typically used to connect to custom CloudWatchLogs endpoints. +* `devicefarm` - (Optional) Use this to override the default endpoint + URL constructed from the `region`. It's typically used to connect to + custom DeviceFarm endpoints. + * `dynamodb` - (Optional) Use this to override the default endpoint URL constructed from the `region`. It's typically used to connect to `dynamodb-local`. @@ -275,6 +287,14 @@ Nested `endpoints` block supports the following: URL constructed from the `region`. It's typically used to connect to custom EC2 endpoints. +* `ecr` - (Optional) Use this to override the default endpoint + URL constructed from the `region`. It's typically used to connect to + custom ECR endpoints. + +* `ecs` - (Optional) Use this to override the default endpoint + URL constructed from the `region`. It's typically used to connect to + custom ECS endpoints. + * `elb` - (Optional) Use this to override the default endpoint URL constructed from the `region`. It's typically used to connect to custom ELB endpoints. @@ -291,6 +311,14 @@ Nested `endpoints` block supports the following: URL constructed from the `region`. It's typically used to connect to custom KMS endpoints. +* `lambda` - (Optional) Use this to override the default endpoint + URL constructed from the `region`. It's typically used to connect to + custom Lambda endpoints. + +* `r53` - (Optional) Use this to override the default endpoint + URL constructed from the `region`. It's typically used to connect to + custom Route53 endpoints. + * `rds` - (Optional) Use this to override the default endpoint URL constructed from the `region`. It's typically used to connect to custom RDS endpoints. @@ -307,6 +335,10 @@ Nested `endpoints` block supports the following: URL constructed from the `region`. It's typically used to connect to custom SQS endpoints. +* `sts` - (Optional) Use this to override the default endpoint + URL constructed from the `region`. It's typically used to connect to + custom STS endpoints. + ## Getting the Account ID If you use either `allowed_account_ids` or `forbidden_account_ids`, From e5a3f14cc51005f390bbdd36d872a4e16024f4ed Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 3 Jan 2018 09:05:04 +0100 Subject: [PATCH 089/350] vendor: Add Gamelift --- .../aws/aws-sdk-go/service/gamelift/api.go | 19092 ++++++++++++++++ .../aws/aws-sdk-go/service/gamelift/doc.go | 304 + .../aws/aws-sdk-go/service/gamelift/errors.go | 102 + .../aws-sdk-go/service/gamelift/service.go | 95 + vendor/vendor.json | 8 + 5 files changed, 19601 insertions(+) create mode 100644 vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/gamelift/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go new file mode 100644 index 00000000000..7aa6023571f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go @@ -0,0 +1,19092 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package gamelift + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAcceptMatch = "AcceptMatch" + +// AcceptMatchRequest generates a "aws/request.Request" representing the +// client's request for the AcceptMatch operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AcceptMatch for more information on using the AcceptMatch +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AcceptMatchRequest method. +// req, resp := client.AcceptMatchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/AcceptMatch +func (c *GameLift) AcceptMatchRequest(input *AcceptMatchInput) (req *request.Request, output *AcceptMatchOutput) { + op := &request.Operation{ + Name: opAcceptMatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcceptMatchInput{} + } + + output = &AcceptMatchOutput{} + req = c.newRequest(op, input, output) + return +} + +// AcceptMatch API operation for Amazon GameLift. +// +// Registers a player's acceptance or rejection of a proposed FlexMatch match. +// A matchmaking configuration may require player acceptance; if so, then matches +// built with that configuration cannot be completed unless all players accept +// the proposed match within a specified time limit. +// +// When FlexMatch builds a match, all the matchmaking tickets involved in the +// proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger +// for your game to get acceptance from all players in the ticket. Acceptances +// are only valid for tickets when they are in this status; all other acceptances +// result in an error. +// +// To register acceptance, specify the ticket ID, a response, and one or more +// players. Once all players have registered acceptance, the matchmaking tickets +// advance to status PLACING, where a new game session is created for the match. +// +// If any player rejects the match, or if acceptances are not received before +// a specified timeout, the proposed match is dropped. The matchmaking tickets +// are then handled in one of two ways: For tickets where all players accepted +// the match, the ticket status is returned to SEARCHING to find a new match. +// For tickets where one or more players failed to accept the match, the ticket +// status is set to FAILED, and processing is terminated. A new matchmaking +// request for these players can be submitted as needed. +// +// Matchmaking-related operations include: +// +// * StartMatchmaking +// +// * DescribeMatchmaking +// +// * StopMatchmaking +// +// * AcceptMatch +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation AcceptMatch for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" +// The requested operation is not supported in the region specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/AcceptMatch +func (c *GameLift) AcceptMatch(input *AcceptMatchInput) (*AcceptMatchOutput, error) { + req, out := c.AcceptMatchRequest(input) + return out, req.Send() +} + +// AcceptMatchWithContext is the same as AcceptMatch with the addition of +// the ability to pass a context and additional request options. +// +// See AcceptMatch for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) AcceptMatchWithContext(ctx aws.Context, input *AcceptMatchInput, opts ...request.Option) (*AcceptMatchOutput, error) { + req, out := c.AcceptMatchRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateAlias = "CreateAlias" + +// CreateAliasRequest generates a "aws/request.Request" representing the +// client's request for the CreateAlias operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateAlias for more information on using the CreateAlias +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateAliasRequest method. +// req, resp := client.CreateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateAlias +func (c *GameLift) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *CreateAliasOutput) { + op := &request.Operation{ + Name: opCreateAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAliasInput{} + } + + output = &CreateAliasOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateAlias API operation for Amazon GameLift. +// +// Creates an alias for a fleet. In most situations, you can use an alias ID +// in place of a fleet ID. By using a fleet alias instead of a specific fleet +// ID, you can switch gameplay and players to a new fleet without changing your +// game client or other game components. For example, for games in production, +// using an alias allows you to seamlessly redirect your player base to a new +// game server update. +// +// Amazon GameLift supports two types of routing strategies for aliases: simple +// and terminal. A simple alias points to an active fleet. A terminal alias +// is used to display messaging or link to a URL instead of routing players +// to an active fleet. For example, you might use a terminal alias when a game +// version is no longer supported and you want to direct players to an upgrade +// site. +// +// To create a fleet alias, specify an alias name, routing strategy, and optional +// description. Each simple alias can point to only one fleet, but a fleet can +// have multiple aliases. If successful, a new alias record is returned, including +// an alias ID, which you can reference when creating a game session. You can +// reassign an alias to another fleet by calling UpdateAlias. +// +// Alias-related operations include: +// +// * CreateAlias +// +// * ListAliases +// +// * DescribeAlias +// +// * UpdateAlias +// +// * DeleteAlias +// +// * ResolveAlias +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation CreateAlias for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeConflictException "ConflictException" +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested operation would cause the resource to exceed the allowed service +// limit. Resolve the issue before retrying. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateAlias +func (c *GameLift) CreateAlias(input *CreateAliasInput) (*CreateAliasOutput, error) { + req, out := c.CreateAliasRequest(input) + return out, req.Send() +} + +// CreateAliasWithContext is the same as CreateAlias with the addition of +// the ability to pass a context and additional request options. +// +// See CreateAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) CreateAliasWithContext(ctx aws.Context, input *CreateAliasInput, opts ...request.Option) (*CreateAliasOutput, error) { + req, out := c.CreateAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBuild = "CreateBuild" + +// CreateBuildRequest generates a "aws/request.Request" representing the +// client's request for the CreateBuild operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBuild for more information on using the CreateBuild +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBuildRequest method. +// req, resp := client.CreateBuildRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateBuild +func (c *GameLift) CreateBuildRequest(input *CreateBuildInput) (req *request.Request, output *CreateBuildOutput) { + op := &request.Operation{ + Name: opCreateBuild, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBuildInput{} + } + + output = &CreateBuildOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBuild API operation for Amazon GameLift. +// +// Creates a new Amazon GameLift build from a set of game server binary files +// stored in an Amazon Simple Storage Service (Amazon S3) location. To use this +// API call, create a .zip file containing all of the files for the build and +// store it in an Amazon S3 bucket under your AWS account. For help on packaging +// your build files and creating a build, see Uploading Your Game to Amazon +// GameLift (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html). +// +// Use this API action ONLY if you are storing your game build files in an Amazon +// S3 bucket. To create a build using files stored locally, use the CLI command +// upload-build (http://docs.aws.amazon.com/cli/latest/reference/gamelift/upload-build.html), +// which uploads the build files from a file location you specify. +// +// To create a new build using CreateBuild, identify the storage location and +// operating system of your game build. You also have the option of specifying +// a build name and version. If successful, this action creates a new build +// record with an unique build ID and in INITIALIZED status. Use the API call +// DescribeBuild to check the status of your build. A build must be in READY +// status before it can be used to create fleets to host your game. +// +// Build-related operations include: +// +// * CreateBuild +// +// * ListBuilds +// +// * DescribeBuild +// +// * UpdateBuild +// +// * DeleteBuild +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation CreateBuild for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeConflictException "ConflictException" +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateBuild +func (c *GameLift) CreateBuild(input *CreateBuildInput) (*CreateBuildOutput, error) { + req, out := c.CreateBuildRequest(input) + return out, req.Send() +} + +// CreateBuildWithContext is the same as CreateBuild with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBuild for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) CreateBuildWithContext(ctx aws.Context, input *CreateBuildInput, opts ...request.Option) (*CreateBuildOutput, error) { + req, out := c.CreateBuildRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateFleet = "CreateFleet" + +// CreateFleetRequest generates a "aws/request.Request" representing the +// client's request for the CreateFleet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateFleet for more information on using the CreateFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateFleetRequest method. +// req, resp := client.CreateFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateFleet +func (c *GameLift) CreateFleetRequest(input *CreateFleetInput) (req *request.Request, output *CreateFleetOutput) { + op := &request.Operation{ + Name: opCreateFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFleetInput{} + } + + output = &CreateFleetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateFleet API operation for Amazon GameLift. +// +// Creates a new fleet to run your game servers. A fleet is a set of Amazon +// Elastic Compute Cloud (Amazon EC2) instances, each of which can run multiple +// server processes to host game sessions. You configure a fleet to create instances +// with certain hardware specifications (see Amazon EC2 Instance Types (http://aws.amazon.com/ec2/instance-types/) +// for more information), and deploy a specified game build to each instance. +// A newly created fleet passes through several statuses; once it reaches the +// ACTIVE status, it can begin hosting game sessions. +// +// To create a new fleet, you must specify the following: (1) fleet name, (2) +// build ID of an uploaded game build, (3) an EC2 instance type, and (4) a run-time +// configuration that describes which server processes to run on each instance +// in the fleet. (Although the run-time configuration is not a required parameter, +// the fleet cannot be successfully activated without it.) +// +// You can also configure the new fleet with the following settings: +// +// * Fleet description +// +// * Access permissions for inbound traffic +// +// * Fleet-wide game session protection +// +// * Resource creation limit +// +// If you use Amazon CloudWatch for metrics, you can add the new fleet to a +// metric group. This allows you to view aggregated metrics for a set of fleets. +// Once you specify a metric group, the new fleet's metrics are included in +// the metric group's data. +// +// You have the option of creating a VPC peering connection with the new fleet. +// For more information, see VPC Peering with Amazon GameLift Fleets (http://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html). +// +// If the CreateFleet call is successful, Amazon GameLift performs the following +// tasks: +// +// * Creates a fleet record and sets the status to NEW (followed by other +// statuses as the fleet is activated). +// +// * Sets the fleet's target capacity to 1 (desired instances), which causes +// Amazon GameLift to start one new EC2 instance. +// +// * Starts launching server processes on the instance. If the fleet is configured +// to run multiple server processes per instance, Amazon GameLift staggers +// each launch by a few seconds. +// +// * Begins writing events to the fleet event log, which can be accessed +// in the Amazon GameLift console. +// +// * Sets the fleet's status to ACTIVE as soon as one server process in the +// fleet is ready to host a game session. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation CreateFleet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeConflictException "ConflictException" +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested operation would cause the resource to exceed the allowed service +// limit. Resolve the issue before retrying. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateFleet +func (c *GameLift) CreateFleet(input *CreateFleetInput) (*CreateFleetOutput, error) { + req, out := c.CreateFleetRequest(input) + return out, req.Send() +} + +// CreateFleetWithContext is the same as CreateFleet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) CreateFleetWithContext(ctx aws.Context, input *CreateFleetInput, opts ...request.Option) (*CreateFleetOutput, error) { + req, out := c.CreateFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateGameSession = "CreateGameSession" + +// CreateGameSessionRequest generates a "aws/request.Request" representing the +// client's request for the CreateGameSession operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateGameSession for more information on using the CreateGameSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateGameSessionRequest method. +// req, resp := client.CreateGameSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSession +func (c *GameLift) CreateGameSessionRequest(input *CreateGameSessionInput) (req *request.Request, output *CreateGameSessionOutput) { + op := &request.Operation{ + Name: opCreateGameSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGameSessionInput{} + } + + output = &CreateGameSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateGameSession API operation for Amazon GameLift. +// +// Creates a multiplayer game session for players. This action creates a game +// session record and assigns an available server process in the specified fleet +// to host the game session. A fleet must have an ACTIVE status before a game +// session can be created in it. +// +// To create a game session, specify either fleet ID or alias ID and indicate +// a maximum number of players to allow in the game session. You can also provide +// a name and game-specific properties for this game session. If successful, +// a GameSession object is returned containing the game session properties and +// other settings you specified. +// +// Idempotency tokens. You can add a token that uniquely identifies game session +// requests. This is useful for ensuring that game session requests are idempotent. +// Multiple requests with the same idempotency token are processed only once; +// subsequent requests return the original result. All response values are the +// same with the exception of game session status, which may change. +// +// Resource creation limits. If you are creating a game session on a fleet with +// a resource creation limit policy in force, then you must specify a creator +// ID. Without this ID, Amazon GameLift has no way to evaluate the policy for +// this new game session request. +// +// Player acceptance policy. By default, newly created game sessions are open +// to new players. You can restrict new player access by using UpdateGameSession +// to change the game session's player session creation policy. +// +// Game session logs. Logs are retained for all active game sessions for 14 +// days. To access the logs, call GetGameSessionLogUrl to download the log files. +// +// Available in Amazon GameLift Local. +// +// Game-session-related operations include: +// +// * CreateGameSession +// +// * DescribeGameSessions +// +// * DescribeGameSessionDetails +// +// * SearchGameSessions +// +// * UpdateGameSession +// +// * GetGameSessionLogUrl +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation CreateGameSession for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConflictException "ConflictException" +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidFleetStatusException "InvalidFleetStatusException" +// The requested operation would cause a conflict with the current state of +// a resource associated with the request and/or the fleet. Resolve the conflict +// before retrying. +// +// * ErrCodeTerminalRoutingStrategyException "TerminalRoutingStrategyException" +// The service is unable to resolve the routing for a particular alias because +// it has a terminal RoutingStrategy associated with it. The message returned +// in this exception is the message defined in the routing strategy itself. +// Such requests should only be retried if the routing strategy for the specified +// alias is modified. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeFleetCapacityExceededException "FleetCapacityExceededException" +// The specified fleet has no available instances to fulfill a CreateGameSession +// request. Clients can retry such requests immediately or after a waiting period. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested operation would cause the resource to exceed the allowed service +// limit. Resolve the issue before retrying. +// +// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException" +// A game session with this custom ID string already exists in this fleet. Resolve +// this conflict before retrying this request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSession +func (c *GameLift) CreateGameSession(input *CreateGameSessionInput) (*CreateGameSessionOutput, error) { + req, out := c.CreateGameSessionRequest(input) + return out, req.Send() +} + +// CreateGameSessionWithContext is the same as CreateGameSession with the addition of +// the ability to pass a context and additional request options. +// +// See CreateGameSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) CreateGameSessionWithContext(ctx aws.Context, input *CreateGameSessionInput, opts ...request.Option) (*CreateGameSessionOutput, error) { + req, out := c.CreateGameSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateGameSessionQueue = "CreateGameSessionQueue" + +// CreateGameSessionQueueRequest generates a "aws/request.Request" representing the +// client's request for the CreateGameSessionQueue operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateGameSessionQueue for more information on using the CreateGameSessionQueue +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateGameSessionQueueRequest method. +// req, resp := client.CreateGameSessionQueueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSessionQueue +func (c *GameLift) CreateGameSessionQueueRequest(input *CreateGameSessionQueueInput) (req *request.Request, output *CreateGameSessionQueueOutput) { + op := &request.Operation{ + Name: opCreateGameSessionQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGameSessionQueueInput{} + } + + output = &CreateGameSessionQueueOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateGameSessionQueue API operation for Amazon GameLift. +// +// Establishes a new queue for processing requests to place new game sessions. +// A queue identifies where new game sessions can be hosted -- by specifying +// a list of destinations (fleets or aliases) -- and how long requests can wait +// in the queue before timing out. You can set up a queue to try to place game +// sessions on fleets in multiple regions. To add placement requests to a queue, +// call StartGameSessionPlacement and reference the queue name. +// +// Destination order. When processing a request for a game session, Amazon GameLift +// tries each destination in order until it finds one with available resources +// to host the new game session. A queue's default order is determined by how +// destinations are listed. The default order is overridden when a game session +// placement request provides player latency information. Player latency information +// enables Amazon GameLift to prioritize destinations where players report the +// lowest average latency, as a result placing the new game session where the +// majority of players will have the best possible gameplay experience. +// +// Player latency policies. For placement requests containing player latency +// information, use player latency policies to protect individual players from +// very high latencies. With a latency cap, even when a destination can deliver +// a low latency for most players, the game is not placed where any individual +// player is reporting latency higher than a policy's maximum. A queue can have +// multiple latency policies, which are enforced consecutively starting with +// the policy with the lowest latency cap. Use multiple policies to gradually +// relax latency controls; for example, you might set a policy with a low latency +// cap for the first 60 seconds, a second policy with a higher cap for the next +// 60 seconds, etc. +// +// To create a new queue, provide a name, timeout value, a list of destinations +// and, if desired, a set of latency policies. If successful, a new queue object +// is returned. +// +// Queue-related operations include: +// +// * CreateGameSessionQueue +// +// * DescribeGameSessionQueues +// +// * UpdateGameSessionQueue +// +// * DeleteGameSessionQueue +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation CreateGameSessionQueue for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested operation would cause the resource to exceed the allowed service +// limit. Resolve the issue before retrying. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSessionQueue +func (c *GameLift) CreateGameSessionQueue(input *CreateGameSessionQueueInput) (*CreateGameSessionQueueOutput, error) { + req, out := c.CreateGameSessionQueueRequest(input) + return out, req.Send() +} + +// CreateGameSessionQueueWithContext is the same as CreateGameSessionQueue with the addition of +// the ability to pass a context and additional request options. +// +// See CreateGameSessionQueue for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) CreateGameSessionQueueWithContext(ctx aws.Context, input *CreateGameSessionQueueInput, opts ...request.Option) (*CreateGameSessionQueueOutput, error) { + req, out := c.CreateGameSessionQueueRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateMatchmakingConfiguration = "CreateMatchmakingConfiguration" + +// CreateMatchmakingConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the CreateMatchmakingConfiguration operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateMatchmakingConfiguration for more information on using the CreateMatchmakingConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateMatchmakingConfigurationRequest method. +// req, resp := client.CreateMatchmakingConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingConfiguration +func (c *GameLift) CreateMatchmakingConfigurationRequest(input *CreateMatchmakingConfigurationInput) (req *request.Request, output *CreateMatchmakingConfigurationOutput) { + op := &request.Operation{ + Name: opCreateMatchmakingConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateMatchmakingConfigurationInput{} + } + + output = &CreateMatchmakingConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMatchmakingConfiguration API operation for Amazon GameLift. +// +// Defines a new matchmaking configuration for use with FlexMatch. A matchmaking +// configuration sets out guidelines for matching players and getting the matches +// into games. You can set up multiple matchmaking configurations to handle +// the scenarios needed for your game. Each matchmaking request (StartMatchmaking) +// specifies a configuration for the match and provides player attributes to +// support the configuration being used. +// +// To create a matchmaking configuration, at a minimum you must specify the +// following: configuration name; a rule set that governs how to evaluate players +// and find acceptable matches; a game session queue to use when placing a new +// game session for the match; and the maximum time allowed for a matchmaking +// attempt. +// +// Player acceptance -- In each configuration, you have the option to require +// that all players accept participation in a proposed match. To enable this +// feature, set AcceptanceRequired to true and specify a time limit for player +// acceptance. Players have the option to accept or reject a proposed match, +// and a match does not move ahead to game session placement unless all matched +// players accept. +// +// Matchmaking status notification -- There are two ways to track the progress +// of matchmaking tickets: (1) polling ticket status with DescribeMatchmaking; +// or (2) receiving notifications with Amazon Simple Notification Service (SNS). +// To use notifications, you first need to set up an SNS topic to receive the +// notifications, and provide the topic ARN in the matchmaking configuration +// (see Setting up Notifications for Matchmaking (http://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html)). +// Since notifications promise only "best effort" delivery, we recommend calling +// DescribeMatchmaking if no notifications are received within 30 seconds. +// +// Operations related to match configurations and rule sets include: +// +// * CreateMatchmakingConfiguration +// +// * DescribeMatchmakingConfigurations +// +// * UpdateMatchmakingConfiguration +// +// * DeleteMatchmakingConfiguration +// +// * CreateMatchmakingRuleSet +// +// * DescribeMatchmakingRuleSets +// +// * ValidateMatchmakingRuleSet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation CreateMatchmakingConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested operation would cause the resource to exceed the allowed service +// limit. Resolve the issue before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" +// The requested operation is not supported in the region specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingConfiguration +func (c *GameLift) CreateMatchmakingConfiguration(input *CreateMatchmakingConfigurationInput) (*CreateMatchmakingConfigurationOutput, error) { + req, out := c.CreateMatchmakingConfigurationRequest(input) + return out, req.Send() +} + +// CreateMatchmakingConfigurationWithContext is the same as CreateMatchmakingConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMatchmakingConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) CreateMatchmakingConfigurationWithContext(ctx aws.Context, input *CreateMatchmakingConfigurationInput, opts ...request.Option) (*CreateMatchmakingConfigurationOutput, error) { + req, out := c.CreateMatchmakingConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateMatchmakingRuleSet = "CreateMatchmakingRuleSet" + +// CreateMatchmakingRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateMatchmakingRuleSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateMatchmakingRuleSet for more information on using the CreateMatchmakingRuleSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateMatchmakingRuleSetRequest method. +// req, resp := client.CreateMatchmakingRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingRuleSet +func (c *GameLift) CreateMatchmakingRuleSetRequest(input *CreateMatchmakingRuleSetInput) (req *request.Request, output *CreateMatchmakingRuleSetOutput) { + op := &request.Operation{ + Name: opCreateMatchmakingRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateMatchmakingRuleSetInput{} + } + + output = &CreateMatchmakingRuleSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMatchmakingRuleSet API operation for Amazon GameLift. +// +// Creates a new rule set for FlexMatch matchmaking. A rule set describes the +// type of match to create, such as the number and size of teams, and sets the +// parameters for acceptable player matches, such as minimum skill level or +// character type. Rule sets are used in matchmaking configurations, which define +// how matchmaking requests are handled. Each MatchmakingConfiguration uses +// one rule set; you can set up multiple rule sets to handle the scenarios that +// suit your game (such as for different game modes), and create a separate +// matchmaking configuration for each rule set. See additional information on +// rule set content in the MatchmakingRuleSet structure. For help creating rule +// sets, including useful examples, see the topic Adding FlexMatch to Your +// Game (http://docs.aws.amazon.com/gamelift/latest/developerguide/match-intro.html). +// +// Once created, matchmaking rule sets cannot be changed or deleted, so we recommend +// checking the rule set syntax using ValidateMatchmakingRuleSetbefore creating +// the rule set. +// +// To create a matchmaking rule set, provide the set of rules and a unique name. +// Rule sets must be defined in the same region as the matchmaking configuration +// they will be used with. Rule sets cannot be edited or deleted. If you need +// to change a rule set, create a new one with the necessary edits and then +// update matchmaking configurations to use the new rule set. +// +// Operations related to match configurations and rule sets include: +// +// * CreateMatchmakingConfiguration +// +// * DescribeMatchmakingConfigurations +// +// * UpdateMatchmakingConfiguration +// +// * DeleteMatchmakingConfiguration +// +// * CreateMatchmakingRuleSet +// +// * DescribeMatchmakingRuleSets +// +// * ValidateMatchmakingRuleSet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation CreateMatchmakingRuleSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" +// The requested operation is not supported in the region specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingRuleSet +func (c *GameLift) CreateMatchmakingRuleSet(input *CreateMatchmakingRuleSetInput) (*CreateMatchmakingRuleSetOutput, error) { + req, out := c.CreateMatchmakingRuleSetRequest(input) + return out, req.Send() +} + +// CreateMatchmakingRuleSetWithContext is the same as CreateMatchmakingRuleSet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMatchmakingRuleSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) CreateMatchmakingRuleSetWithContext(ctx aws.Context, input *CreateMatchmakingRuleSetInput, opts ...request.Option) (*CreateMatchmakingRuleSetOutput, error) { + req, out := c.CreateMatchmakingRuleSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreatePlayerSession = "CreatePlayerSession" + +// CreatePlayerSessionRequest generates a "aws/request.Request" representing the +// client's request for the CreatePlayerSession operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreatePlayerSession for more information on using the CreatePlayerSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreatePlayerSessionRequest method. +// req, resp := client.CreatePlayerSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSession +func (c *GameLift) CreatePlayerSessionRequest(input *CreatePlayerSessionInput) (req *request.Request, output *CreatePlayerSessionOutput) { + op := &request.Operation{ + Name: opCreatePlayerSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlayerSessionInput{} + } + + output = &CreatePlayerSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreatePlayerSession API operation for Amazon GameLift. +// +// Adds a player to a game session and creates a player session record. Before +// a player can be added, a game session must have an ACTIVE status, have a +// creation policy of ALLOW_ALL, and have an open player slot. To add a group +// of players to a game session, use CreatePlayerSessions. +// +// To create a player session, specify a game session ID, player ID, and optionally +// a string of player data. If successful, the player is added to the game session +// and a new PlayerSession object is returned. Player sessions cannot be updated. +// +// Available in Amazon GameLift Local. +// +// Player-session-related operations include: +// +// * CreatePlayerSession +// +// * CreatePlayerSessions +// +// * DescribePlayerSessions +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation CreatePlayerSession for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidGameSessionStatusException "InvalidGameSessionStatusException" +// The requested operation would cause a conflict with the current state of +// a resource associated with the request and/or the game instance. Resolve +// the conflict before retrying. +// +// * ErrCodeGameSessionFullException "GameSessionFullException" +// The game instance is currently full and cannot allow the requested player(s) +// to join. Clients can retry such requests immediately or after a waiting period. +// +// * ErrCodeTerminalRoutingStrategyException "TerminalRoutingStrategyException" +// The service is unable to resolve the routing for a particular alias because +// it has a terminal RoutingStrategy associated with it. The message returned +// in this exception is the message defined in the routing strategy itself. +// Such requests should only be retried if the routing strategy for the specified +// alias is modified. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSession +func (c *GameLift) CreatePlayerSession(input *CreatePlayerSessionInput) (*CreatePlayerSessionOutput, error) { + req, out := c.CreatePlayerSessionRequest(input) + return out, req.Send() +} + +// CreatePlayerSessionWithContext is the same as CreatePlayerSession with the addition of +// the ability to pass a context and additional request options. +// +// See CreatePlayerSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) CreatePlayerSessionWithContext(ctx aws.Context, input *CreatePlayerSessionInput, opts ...request.Option) (*CreatePlayerSessionOutput, error) { + req, out := c.CreatePlayerSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreatePlayerSessions = "CreatePlayerSessions" + +// CreatePlayerSessionsRequest generates a "aws/request.Request" representing the +// client's request for the CreatePlayerSessions operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreatePlayerSessions for more information on using the CreatePlayerSessions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreatePlayerSessionsRequest method. +// req, resp := client.CreatePlayerSessionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSessions +func (c *GameLift) CreatePlayerSessionsRequest(input *CreatePlayerSessionsInput) (req *request.Request, output *CreatePlayerSessionsOutput) { + op := &request.Operation{ + Name: opCreatePlayerSessions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlayerSessionsInput{} + } + + output = &CreatePlayerSessionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreatePlayerSessions API operation for Amazon GameLift. +// +// Adds a group of players to a game session. This action is useful with a team +// matching feature. Before players can be added, a game session must have an +// ACTIVE status, have a creation policy of ALLOW_ALL, and have an open player +// slot. To add a single player to a game session, use CreatePlayerSession. +// +// To create player sessions, specify a game session ID, a list of player IDs, +// and optionally a set of player data strings. If successful, the players are +// added to the game session and a set of new PlayerSession objects is returned. +// Player sessions cannot be updated. +// +// Available in Amazon GameLift Local. +// +// Player-session-related operations include: +// +// * CreatePlayerSession +// +// * CreatePlayerSessions +// +// * DescribePlayerSessions +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation CreatePlayerSessions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidGameSessionStatusException "InvalidGameSessionStatusException" +// The requested operation would cause a conflict with the current state of +// a resource associated with the request and/or the game instance. Resolve +// the conflict before retrying. +// +// * ErrCodeGameSessionFullException "GameSessionFullException" +// The game instance is currently full and cannot allow the requested player(s) +// to join. Clients can retry such requests immediately or after a waiting period. +// +// * ErrCodeTerminalRoutingStrategyException "TerminalRoutingStrategyException" +// The service is unable to resolve the routing for a particular alias because +// it has a terminal RoutingStrategy associated with it. The message returned +// in this exception is the message defined in the routing strategy itself. +// Such requests should only be retried if the routing strategy for the specified +// alias is modified. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSessions +func (c *GameLift) CreatePlayerSessions(input *CreatePlayerSessionsInput) (*CreatePlayerSessionsOutput, error) { + req, out := c.CreatePlayerSessionsRequest(input) + return out, req.Send() +} + +// CreatePlayerSessionsWithContext is the same as CreatePlayerSessions with the addition of +// the ability to pass a context and additional request options. +// +// See CreatePlayerSessions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) CreatePlayerSessionsWithContext(ctx aws.Context, input *CreatePlayerSessionsInput, opts ...request.Option) (*CreatePlayerSessionsOutput, error) { + req, out := c.CreatePlayerSessionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateVpcPeeringAuthorization = "CreateVpcPeeringAuthorization" + +// CreateVpcPeeringAuthorizationRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpcPeeringAuthorization operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateVpcPeeringAuthorization for more information on using the CreateVpcPeeringAuthorization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateVpcPeeringAuthorizationRequest method. +// req, resp := client.CreateVpcPeeringAuthorizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringAuthorization +func (c *GameLift) CreateVpcPeeringAuthorizationRequest(input *CreateVpcPeeringAuthorizationInput) (req *request.Request, output *CreateVpcPeeringAuthorizationOutput) { + op := &request.Operation{ + Name: opCreateVpcPeeringAuthorization, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcPeeringAuthorizationInput{} + } + + output = &CreateVpcPeeringAuthorizationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateVpcPeeringAuthorization API operation for Amazon GameLift. +// +// Requests authorization to create or delete a peer connection between the +// VPC for your Amazon GameLift fleet and a virtual private cloud (VPC) in your +// AWS account. VPC peering enables the game servers on your fleet to communicate +// directly with other AWS resources. Once you've received authorization, call +// CreateVpcPeeringConnection to establish the peering connection. For more +// information, see VPC Peering with Amazon GameLift Fleets (http://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html). +// +// You can peer with VPCs that are owned by any AWS account you have access +// to, including the account that you use to manage your Amazon GameLift fleets. +// You cannot peer with VPCs that are in different regions. +// +// To request authorization to create a connection, call this operation from +// the AWS account with the VPC that you want to peer to your Amazon GameLift +// fleet. For example, to enable your game servers to retrieve data from a DynamoDB +// table, use the account that manages that DynamoDB resource. Identify the +// following values: (1) The ID of the VPC that you want to peer with, and (2) +// the ID of the AWS account that you use to manage Amazon GameLift. If successful, +// VPC peering is authorized for the specified VPC. +// +// To request authorization to delete a connection, call this operation from +// the AWS account with the VPC that is peered with your Amazon GameLift fleet. +// Identify the following values: (1) VPC ID that you want to delete the peering +// connection for, and (2) ID of the AWS account that you use to manage Amazon +// GameLift. +// +// The authorization remains valid for 24 hours unless it is canceled by a call +// to DeleteVpcPeeringAuthorization. You must create or delete the peering connection +// while the authorization is valid. +// +// VPC peering connection operations include: +// +// * CreateVpcPeeringAuthorization +// +// * DescribeVpcPeeringAuthorizations +// +// * DeleteVpcPeeringAuthorization +// +// * CreateVpcPeeringConnection +// +// * DescribeVpcPeeringConnections +// +// * DeleteVpcPeeringConnection +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation CreateVpcPeeringAuthorization for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringAuthorization +func (c *GameLift) CreateVpcPeeringAuthorization(input *CreateVpcPeeringAuthorizationInput) (*CreateVpcPeeringAuthorizationOutput, error) { + req, out := c.CreateVpcPeeringAuthorizationRequest(input) + return out, req.Send() +} + +// CreateVpcPeeringAuthorizationWithContext is the same as CreateVpcPeeringAuthorization with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpcPeeringAuthorization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) CreateVpcPeeringAuthorizationWithContext(ctx aws.Context, input *CreateVpcPeeringAuthorizationInput, opts ...request.Option) (*CreateVpcPeeringAuthorizationOutput, error) { + req, out := c.CreateVpcPeeringAuthorizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateVpcPeeringConnection = "CreateVpcPeeringConnection" + +// CreateVpcPeeringConnectionRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpcPeeringConnection operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateVpcPeeringConnection for more information on using the CreateVpcPeeringConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateVpcPeeringConnectionRequest method. +// req, resp := client.CreateVpcPeeringConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringConnection +func (c *GameLift) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectionInput) (req *request.Request, output *CreateVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opCreateVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcPeeringConnectionInput{} + } + + output = &CreateVpcPeeringConnectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateVpcPeeringConnection API operation for Amazon GameLift. +// +// Establishes a VPC peering connection between a virtual private cloud (VPC) +// in an AWS account with the VPC for your Amazon GameLift fleet. VPC peering +// enables the game servers on your fleet to communicate directly with other +// AWS resources. You can peer with VPCs in any AWS account that you have access +// to, including the account that you use to manage your Amazon GameLift fleets. +// You cannot peer with VPCs that are in different regions. For more information, +// see VPC Peering with Amazon GameLift Fleets (http://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html). +// +// Before calling this operation to establish the peering connection, you first +// need to call CreateVpcPeeringAuthorization and identify the VPC you want +// to peer with. Once the authorization for the specified VPC is issued, you +// have 24 hours to establish the connection. These two operations handle all +// tasks necessary to peer the two VPCs, including acceptance, updating routing +// tables, etc. +// +// To establish the connection, call this operation from the AWS account that +// is used to manage the Amazon GameLift fleets. Identify the following values: +// (1) The ID of the fleet you want to be enable a VPC peering connection for; +// (2) The AWS account with the VPC that you want to peer with; and (3) The +// ID of the VPC you want to peer with. This operation is asynchronous. If successful, +// a VpcPeeringConnection request is created. You can use continuous polling +// to track the request's status using DescribeVpcPeeringConnections, or by +// monitoring fleet events for success or failure using DescribeFleetEvents. +// +// VPC peering connection operations include: +// +// * CreateVpcPeeringAuthorization +// +// * DescribeVpcPeeringAuthorizations +// +// * DeleteVpcPeeringAuthorization +// +// * CreateVpcPeeringConnection +// +// * DescribeVpcPeeringConnections +// +// * DeleteVpcPeeringConnection +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation CreateVpcPeeringConnection for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringConnection +func (c *GameLift) CreateVpcPeeringConnection(input *CreateVpcPeeringConnectionInput) (*CreateVpcPeeringConnectionOutput, error) { + req, out := c.CreateVpcPeeringConnectionRequest(input) + return out, req.Send() +} + +// CreateVpcPeeringConnectionWithContext is the same as CreateVpcPeeringConnection with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpcPeeringConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) CreateVpcPeeringConnectionWithContext(ctx aws.Context, input *CreateVpcPeeringConnectionInput, opts ...request.Option) (*CreateVpcPeeringConnectionOutput, error) { + req, out := c.CreateVpcPeeringConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteAlias = "DeleteAlias" + +// DeleteAliasRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAlias operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAlias for more information on using the DeleteAlias +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteAliasRequest method. +// req, resp := client.DeleteAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteAlias +func (c *GameLift) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, output *DeleteAliasOutput) { + op := &request.Operation{ + Name: opDeleteAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAliasInput{} + } + + output = &DeleteAliasOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteAlias API operation for Amazon GameLift. +// +// Deletes an alias. This action removes all record of the alias. Game clients +// attempting to access a server process using the deleted alias receive an +// error. To delete an alias, specify the alias ID to be deleted. +// +// Alias-related operations include: +// +// * CreateAlias +// +// * ListAliases +// +// * DescribeAlias +// +// * UpdateAlias +// +// * DeleteAlias +// +// * ResolveAlias +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DeleteAlias for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteAlias +func (c *GameLift) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) { + req, out := c.DeleteAliasRequest(input) + return out, req.Send() +} + +// DeleteAliasWithContext is the same as DeleteAlias with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DeleteAliasWithContext(ctx aws.Context, input *DeleteAliasInput, opts ...request.Option) (*DeleteAliasOutput, error) { + req, out := c.DeleteAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBuild = "DeleteBuild" + +// DeleteBuildRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBuild operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBuild for more information on using the DeleteBuild +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBuildRequest method. +// req, resp := client.DeleteBuildRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteBuild +func (c *GameLift) DeleteBuildRequest(input *DeleteBuildInput) (req *request.Request, output *DeleteBuildOutput) { + op := &request.Operation{ + Name: opDeleteBuild, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBuildInput{} + } + + output = &DeleteBuildOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBuild API operation for Amazon GameLift. +// +// Deletes a build. This action permanently deletes the build record and any +// uploaded build files. +// +// To delete a build, specify its ID. Deleting a build does not affect the status +// of any active fleets using the build, but you can no longer create new fleets +// with the deleted build. +// +// Build-related operations include: +// +// * CreateBuild +// +// * ListBuilds +// +// * DescribeBuild +// +// * UpdateBuild +// +// * DeleteBuild +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DeleteBuild for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteBuild +func (c *GameLift) DeleteBuild(input *DeleteBuildInput) (*DeleteBuildOutput, error) { + req, out := c.DeleteBuildRequest(input) + return out, req.Send() +} + +// DeleteBuildWithContext is the same as DeleteBuild with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBuild for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DeleteBuildWithContext(ctx aws.Context, input *DeleteBuildInput, opts ...request.Option) (*DeleteBuildOutput, error) { + req, out := c.DeleteBuildRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteFleet = "DeleteFleet" + +// DeleteFleetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFleet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteFleet for more information on using the DeleteFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteFleetRequest method. +// req, resp := client.DeleteFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteFleet +func (c *GameLift) DeleteFleetRequest(input *DeleteFleetInput) (req *request.Request, output *DeleteFleetOutput) { + op := &request.Operation{ + Name: opDeleteFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFleetInput{} + } + + output = &DeleteFleetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteFleet API operation for Amazon GameLift. +// +// Deletes everything related to a fleet. Before deleting a fleet, you must +// set the fleet's desired capacity to zero. See UpdateFleetCapacity. +// +// This action removes the fleet's resources and the fleet record. Once a fleet +// is deleted, you can no longer use that fleet. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DeleteFleet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidFleetStatusException "InvalidFleetStatusException" +// The requested operation would cause a conflict with the current state of +// a resource associated with the request and/or the fleet. Resolve the conflict +// before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteFleet +func (c *GameLift) DeleteFleet(input *DeleteFleetInput) (*DeleteFleetOutput, error) { + req, out := c.DeleteFleetRequest(input) + return out, req.Send() +} + +// DeleteFleetWithContext is the same as DeleteFleet with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DeleteFleetWithContext(ctx aws.Context, input *DeleteFleetInput, opts ...request.Option) (*DeleteFleetOutput, error) { + req, out := c.DeleteFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteGameSessionQueue = "DeleteGameSessionQueue" + +// DeleteGameSessionQueueRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGameSessionQueue operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteGameSessionQueue for more information on using the DeleteGameSessionQueue +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteGameSessionQueueRequest method. +// req, resp := client.DeleteGameSessionQueueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteGameSessionQueue +func (c *GameLift) DeleteGameSessionQueueRequest(input *DeleteGameSessionQueueInput) (req *request.Request, output *DeleteGameSessionQueueOutput) { + op := &request.Operation{ + Name: opDeleteGameSessionQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGameSessionQueueInput{} + } + + output = &DeleteGameSessionQueueOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteGameSessionQueue API operation for Amazon GameLift. +// +// Deletes a game session queue. This action means that any StartGameSessionPlacement +// requests that reference this queue will fail. To delete a queue, specify +// the queue name. +// +// Queue-related operations include: +// +// * CreateGameSessionQueue +// +// * DescribeGameSessionQueues +// +// * UpdateGameSessionQueue +// +// * DeleteGameSessionQueue +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DeleteGameSessionQueue for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteGameSessionQueue +func (c *GameLift) DeleteGameSessionQueue(input *DeleteGameSessionQueueInput) (*DeleteGameSessionQueueOutput, error) { + req, out := c.DeleteGameSessionQueueRequest(input) + return out, req.Send() +} + +// DeleteGameSessionQueueWithContext is the same as DeleteGameSessionQueue with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteGameSessionQueue for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DeleteGameSessionQueueWithContext(ctx aws.Context, input *DeleteGameSessionQueueInput, opts ...request.Option) (*DeleteGameSessionQueueOutput, error) { + req, out := c.DeleteGameSessionQueueRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteMatchmakingConfiguration = "DeleteMatchmakingConfiguration" + +// DeleteMatchmakingConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMatchmakingConfiguration operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteMatchmakingConfiguration for more information on using the DeleteMatchmakingConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteMatchmakingConfigurationRequest method. +// req, resp := client.DeleteMatchmakingConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteMatchmakingConfiguration +func (c *GameLift) DeleteMatchmakingConfigurationRequest(input *DeleteMatchmakingConfigurationInput) (req *request.Request, output *DeleteMatchmakingConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteMatchmakingConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMatchmakingConfigurationInput{} + } + + output = &DeleteMatchmakingConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteMatchmakingConfiguration API operation for Amazon GameLift. +// +// Permanently removes a FlexMatch matchmaking configuration. To delete, specify +// the configuration name. A matchmaking configuration cannot be deleted if +// it is being used in any active matchmaking tickets. +// +// Operations related to match configurations and rule sets include: +// +// * CreateMatchmakingConfiguration +// +// * DescribeMatchmakingConfigurations +// +// * UpdateMatchmakingConfiguration +// +// * DeleteMatchmakingConfiguration +// +// * CreateMatchmakingRuleSet +// +// * DescribeMatchmakingRuleSets +// +// * ValidateMatchmakingRuleSet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DeleteMatchmakingConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" +// The requested operation is not supported in the region specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteMatchmakingConfiguration +func (c *GameLift) DeleteMatchmakingConfiguration(input *DeleteMatchmakingConfigurationInput) (*DeleteMatchmakingConfigurationOutput, error) { + req, out := c.DeleteMatchmakingConfigurationRequest(input) + return out, req.Send() +} + +// DeleteMatchmakingConfigurationWithContext is the same as DeleteMatchmakingConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteMatchmakingConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DeleteMatchmakingConfigurationWithContext(ctx aws.Context, input *DeleteMatchmakingConfigurationInput, opts ...request.Option) (*DeleteMatchmakingConfigurationOutput, error) { + req, out := c.DeleteMatchmakingConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteScalingPolicy = "DeleteScalingPolicy" + +// DeleteScalingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteScalingPolicy operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteScalingPolicy for more information on using the DeleteScalingPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteScalingPolicyRequest method. +// req, resp := client.DeleteScalingPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteScalingPolicy +func (c *GameLift) DeleteScalingPolicyRequest(input *DeleteScalingPolicyInput) (req *request.Request, output *DeleteScalingPolicyOutput) { + op := &request.Operation{ + Name: opDeleteScalingPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteScalingPolicyInput{} + } + + output = &DeleteScalingPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteScalingPolicy API operation for Amazon GameLift. +// +// Deletes a fleet scaling policy. This action means that the policy is no longer +// in force and removes all record of it. To delete a scaling policy, specify +// both the scaling policy name and the fleet ID it is associated with. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DeleteScalingPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteScalingPolicy +func (c *GameLift) DeleteScalingPolicy(input *DeleteScalingPolicyInput) (*DeleteScalingPolicyOutput, error) { + req, out := c.DeleteScalingPolicyRequest(input) + return out, req.Send() +} + +// DeleteScalingPolicyWithContext is the same as DeleteScalingPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteScalingPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DeleteScalingPolicyWithContext(ctx aws.Context, input *DeleteScalingPolicyInput, opts ...request.Option) (*DeleteScalingPolicyOutput, error) { + req, out := c.DeleteScalingPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVpcPeeringAuthorization = "DeleteVpcPeeringAuthorization" + +// DeleteVpcPeeringAuthorizationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpcPeeringAuthorization operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVpcPeeringAuthorization for more information on using the DeleteVpcPeeringAuthorization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVpcPeeringAuthorizationRequest method. +// req, resp := client.DeleteVpcPeeringAuthorizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringAuthorization +func (c *GameLift) DeleteVpcPeeringAuthorizationRequest(input *DeleteVpcPeeringAuthorizationInput) (req *request.Request, output *DeleteVpcPeeringAuthorizationOutput) { + op := &request.Operation{ + Name: opDeleteVpcPeeringAuthorization, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcPeeringAuthorizationInput{} + } + + output = &DeleteVpcPeeringAuthorizationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteVpcPeeringAuthorization API operation for Amazon GameLift. +// +// Cancels a pending VPC peering authorization for the specified VPC. If the +// authorization has already been used to create a peering connection, call +// DeleteVpcPeeringConnection to remove the connection. +// +// VPC peering connection operations include: +// +// * CreateVpcPeeringAuthorization +// +// * DescribeVpcPeeringAuthorizations +// +// * DeleteVpcPeeringAuthorization +// +// * CreateVpcPeeringConnection +// +// * DescribeVpcPeeringConnections +// +// * DeleteVpcPeeringConnection +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DeleteVpcPeeringAuthorization for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringAuthorization +func (c *GameLift) DeleteVpcPeeringAuthorization(input *DeleteVpcPeeringAuthorizationInput) (*DeleteVpcPeeringAuthorizationOutput, error) { + req, out := c.DeleteVpcPeeringAuthorizationRequest(input) + return out, req.Send() +} + +// DeleteVpcPeeringAuthorizationWithContext is the same as DeleteVpcPeeringAuthorization with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpcPeeringAuthorization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DeleteVpcPeeringAuthorizationWithContext(ctx aws.Context, input *DeleteVpcPeeringAuthorizationInput, opts ...request.Option) (*DeleteVpcPeeringAuthorizationOutput, error) { + req, out := c.DeleteVpcPeeringAuthorizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVpcPeeringConnection = "DeleteVpcPeeringConnection" + +// DeleteVpcPeeringConnectionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpcPeeringConnection operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVpcPeeringConnection for more information on using the DeleteVpcPeeringConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVpcPeeringConnectionRequest method. +// req, resp := client.DeleteVpcPeeringConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringConnection +func (c *GameLift) DeleteVpcPeeringConnectionRequest(input *DeleteVpcPeeringConnectionInput) (req *request.Request, output *DeleteVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opDeleteVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcPeeringConnectionInput{} + } + + output = &DeleteVpcPeeringConnectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteVpcPeeringConnection API operation for Amazon GameLift. +// +// Removes a VPC peering connection. To delete the connection, you must have +// a valid authorization for the VPC peering connection that you want to delete. +// You can check for an authorization by calling DescribeVpcPeeringAuthorizations +// or request a new one using CreateVpcPeeringAuthorization. +// +// Once a valid authorization exists, call this operation from the AWS account +// that is used to manage the Amazon GameLift fleets. Identify the connection +// to delete by the connection ID and fleet ID. If successful, the connection +// is removed. +// +// VPC peering connection operations include: +// +// * CreateVpcPeeringAuthorization +// +// * DescribeVpcPeeringAuthorizations +// +// * DeleteVpcPeeringAuthorization +// +// * CreateVpcPeeringConnection +// +// * DescribeVpcPeeringConnections +// +// * DeleteVpcPeeringConnection +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DeleteVpcPeeringConnection for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringConnection +func (c *GameLift) DeleteVpcPeeringConnection(input *DeleteVpcPeeringConnectionInput) (*DeleteVpcPeeringConnectionOutput, error) { + req, out := c.DeleteVpcPeeringConnectionRequest(input) + return out, req.Send() +} + +// DeleteVpcPeeringConnectionWithContext is the same as DeleteVpcPeeringConnection with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpcPeeringConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DeleteVpcPeeringConnectionWithContext(ctx aws.Context, input *DeleteVpcPeeringConnectionInput, opts ...request.Option) (*DeleteVpcPeeringConnectionOutput, error) { + req, out := c.DeleteVpcPeeringConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeAlias = "DescribeAlias" + +// DescribeAliasRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAlias operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAlias for more information on using the DescribeAlias +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAliasRequest method. +// req, resp := client.DescribeAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeAlias +func (c *GameLift) DescribeAliasRequest(input *DescribeAliasInput) (req *request.Request, output *DescribeAliasOutput) { + op := &request.Operation{ + Name: opDescribeAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAliasInput{} + } + + output = &DescribeAliasOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAlias API operation for Amazon GameLift. +// +// Retrieves properties for an alias. This operation returns all alias metadata +// and settings. To get an alias's target fleet ID only, use ResolveAlias. +// +// To get alias properties, specify the alias ID. If successful, the requested +// alias record is returned. +// +// Alias-related operations include: +// +// * CreateAlias +// +// * ListAliases +// +// * DescribeAlias +// +// * UpdateAlias +// +// * DeleteAlias +// +// * ResolveAlias +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeAlias for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeAlias +func (c *GameLift) DescribeAlias(input *DescribeAliasInput) (*DescribeAliasOutput, error) { + req, out := c.DescribeAliasRequest(input) + return out, req.Send() +} + +// DescribeAliasWithContext is the same as DescribeAlias with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeAliasWithContext(ctx aws.Context, input *DescribeAliasInput, opts ...request.Option) (*DescribeAliasOutput, error) { + req, out := c.DescribeAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeBuild = "DescribeBuild" + +// DescribeBuildRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBuild operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeBuild for more information on using the DescribeBuild +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeBuildRequest method. +// req, resp := client.DescribeBuildRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeBuild +func (c *GameLift) DescribeBuildRequest(input *DescribeBuildInput) (req *request.Request, output *DescribeBuildOutput) { + op := &request.Operation{ + Name: opDescribeBuild, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBuildInput{} + } + + output = &DescribeBuildOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeBuild API operation for Amazon GameLift. +// +// Retrieves properties for a build. To get a build record, specify a build +// ID. If successful, an object containing the build properties is returned. +// +// Build-related operations include: +// +// * CreateBuild +// +// * ListBuilds +// +// * DescribeBuild +// +// * UpdateBuild +// +// * DeleteBuild +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeBuild for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeBuild +func (c *GameLift) DescribeBuild(input *DescribeBuildInput) (*DescribeBuildOutput, error) { + req, out := c.DescribeBuildRequest(input) + return out, req.Send() +} + +// DescribeBuildWithContext is the same as DescribeBuild with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeBuild for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeBuildWithContext(ctx aws.Context, input *DescribeBuildInput, opts ...request.Option) (*DescribeBuildOutput, error) { + req, out := c.DescribeBuildRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeEC2InstanceLimits = "DescribeEC2InstanceLimits" + +// DescribeEC2InstanceLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEC2InstanceLimits operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeEC2InstanceLimits for more information on using the DescribeEC2InstanceLimits +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeEC2InstanceLimitsRequest method. +// req, resp := client.DescribeEC2InstanceLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeEC2InstanceLimits +func (c *GameLift) DescribeEC2InstanceLimitsRequest(input *DescribeEC2InstanceLimitsInput) (req *request.Request, output *DescribeEC2InstanceLimitsOutput) { + op := &request.Operation{ + Name: opDescribeEC2InstanceLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEC2InstanceLimitsInput{} + } + + output = &DescribeEC2InstanceLimitsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeEC2InstanceLimits API operation for Amazon GameLift. +// +// Retrieves the following information for the specified EC2 instance type: +// +// * maximum number of instances allowed per AWS account (service limit) +// +// * current usage level for the AWS account +// +// Service limits vary depending on region. Available regions for Amazon GameLift +// can be found in the AWS Management Console for Amazon GameLift (see the drop-down +// list in the upper right corner). +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeEC2InstanceLimits for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeEC2InstanceLimits +func (c *GameLift) DescribeEC2InstanceLimits(input *DescribeEC2InstanceLimitsInput) (*DescribeEC2InstanceLimitsOutput, error) { + req, out := c.DescribeEC2InstanceLimitsRequest(input) + return out, req.Send() +} + +// DescribeEC2InstanceLimitsWithContext is the same as DescribeEC2InstanceLimits with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeEC2InstanceLimits for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeEC2InstanceLimitsWithContext(ctx aws.Context, input *DescribeEC2InstanceLimitsInput, opts ...request.Option) (*DescribeEC2InstanceLimitsOutput, error) { + req, out := c.DescribeEC2InstanceLimitsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFleetAttributes = "DescribeFleetAttributes" + +// DescribeFleetAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetAttributes operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFleetAttributes for more information on using the DescribeFleetAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFleetAttributesRequest method. +// req, resp := client.DescribeFleetAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetAttributes +func (c *GameLift) DescribeFleetAttributesRequest(input *DescribeFleetAttributesInput) (req *request.Request, output *DescribeFleetAttributesOutput) { + op := &request.Operation{ + Name: opDescribeFleetAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetAttributesInput{} + } + + output = &DescribeFleetAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFleetAttributes API operation for Amazon GameLift. +// +// Retrieves fleet properties, including metadata, status, and configuration, +// for one or more fleets. You can request attributes for all fleets, or specify +// a list of one or more fleet IDs. When requesting multiple fleets, use the +// pagination parameters to retrieve results as a set of sequential pages. If +// successful, a FleetAttributes object is returned for each requested fleet +// ID. When specifying a list of fleet IDs, attribute objects are returned only +// for fleets that currently exist. +// +// Some API actions may limit the number of fleet IDs allowed in one request. +// If a request exceeds this limit, the request fails and the error message +// includes the maximum allowed. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeFleetAttributes for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetAttributes +func (c *GameLift) DescribeFleetAttributes(input *DescribeFleetAttributesInput) (*DescribeFleetAttributesOutput, error) { + req, out := c.DescribeFleetAttributesRequest(input) + return out, req.Send() +} + +// DescribeFleetAttributesWithContext is the same as DescribeFleetAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFleetAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeFleetAttributesWithContext(ctx aws.Context, input *DescribeFleetAttributesInput, opts ...request.Option) (*DescribeFleetAttributesOutput, error) { + req, out := c.DescribeFleetAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFleetCapacity = "DescribeFleetCapacity" + +// DescribeFleetCapacityRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetCapacity operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFleetCapacity for more information on using the DescribeFleetCapacity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFleetCapacityRequest method. +// req, resp := client.DescribeFleetCapacityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetCapacity +func (c *GameLift) DescribeFleetCapacityRequest(input *DescribeFleetCapacityInput) (req *request.Request, output *DescribeFleetCapacityOutput) { + op := &request.Operation{ + Name: opDescribeFleetCapacity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetCapacityInput{} + } + + output = &DescribeFleetCapacityOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFleetCapacity API operation for Amazon GameLift. +// +// Retrieves the current status of fleet capacity for one or more fleets. This +// information includes the number of instances that have been requested for +// the fleet and the number currently active. You can request capacity for all +// fleets, or specify a list of one or more fleet IDs. When requesting multiple +// fleets, use the pagination parameters to retrieve results as a set of sequential +// pages. If successful, a FleetCapacity object is returned for each requested +// fleet ID. When specifying a list of fleet IDs, attribute objects are returned +// only for fleets that currently exist. +// +// Some API actions may limit the number of fleet IDs allowed in one request. +// If a request exceeds this limit, the request fails and the error message +// includes the maximum allowed. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeFleetCapacity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetCapacity +func (c *GameLift) DescribeFleetCapacity(input *DescribeFleetCapacityInput) (*DescribeFleetCapacityOutput, error) { + req, out := c.DescribeFleetCapacityRequest(input) + return out, req.Send() +} + +// DescribeFleetCapacityWithContext is the same as DescribeFleetCapacity with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFleetCapacity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeFleetCapacityWithContext(ctx aws.Context, input *DescribeFleetCapacityInput, opts ...request.Option) (*DescribeFleetCapacityOutput, error) { + req, out := c.DescribeFleetCapacityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFleetEvents = "DescribeFleetEvents" + +// DescribeFleetEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetEvents operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFleetEvents for more information on using the DescribeFleetEvents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFleetEventsRequest method. +// req, resp := client.DescribeFleetEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetEvents +func (c *GameLift) DescribeFleetEventsRequest(input *DescribeFleetEventsInput) (req *request.Request, output *DescribeFleetEventsOutput) { + op := &request.Operation{ + Name: opDescribeFleetEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetEventsInput{} + } + + output = &DescribeFleetEventsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFleetEvents API operation for Amazon GameLift. +// +// Retrieves entries from the specified fleet's event log. You can specify a +// time range to limit the result set. Use the pagination parameters to retrieve +// results as a set of sequential pages. If successful, a collection of event +// log entries matching the request are returned. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeFleetEvents for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetEvents +func (c *GameLift) DescribeFleetEvents(input *DescribeFleetEventsInput) (*DescribeFleetEventsOutput, error) { + req, out := c.DescribeFleetEventsRequest(input) + return out, req.Send() +} + +// DescribeFleetEventsWithContext is the same as DescribeFleetEvents with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFleetEvents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeFleetEventsWithContext(ctx aws.Context, input *DescribeFleetEventsInput, opts ...request.Option) (*DescribeFleetEventsOutput, error) { + req, out := c.DescribeFleetEventsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFleetPortSettings = "DescribeFleetPortSettings" + +// DescribeFleetPortSettingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetPortSettings operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFleetPortSettings for more information on using the DescribeFleetPortSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFleetPortSettingsRequest method. +// req, resp := client.DescribeFleetPortSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetPortSettings +func (c *GameLift) DescribeFleetPortSettingsRequest(input *DescribeFleetPortSettingsInput) (req *request.Request, output *DescribeFleetPortSettingsOutput) { + op := &request.Operation{ + Name: opDescribeFleetPortSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetPortSettingsInput{} + } + + output = &DescribeFleetPortSettingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFleetPortSettings API operation for Amazon GameLift. +// +// Retrieves the inbound connection permissions for a fleet. Connection permissions +// include a range of IP addresses and port settings that incoming traffic can +// use to access server processes in the fleet. To get a fleet's inbound connection +// permissions, specify a fleet ID. If successful, a collection of IpPermission +// objects is returned for the requested fleet ID. If the requested fleet has +// been deleted, the result set is empty. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeFleetPortSettings for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetPortSettings +func (c *GameLift) DescribeFleetPortSettings(input *DescribeFleetPortSettingsInput) (*DescribeFleetPortSettingsOutput, error) { + req, out := c.DescribeFleetPortSettingsRequest(input) + return out, req.Send() +} + +// DescribeFleetPortSettingsWithContext is the same as DescribeFleetPortSettings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFleetPortSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeFleetPortSettingsWithContext(ctx aws.Context, input *DescribeFleetPortSettingsInput, opts ...request.Option) (*DescribeFleetPortSettingsOutput, error) { + req, out := c.DescribeFleetPortSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFleetUtilization = "DescribeFleetUtilization" + +// DescribeFleetUtilizationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetUtilization operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFleetUtilization for more information on using the DescribeFleetUtilization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFleetUtilizationRequest method. +// req, resp := client.DescribeFleetUtilizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetUtilization +func (c *GameLift) DescribeFleetUtilizationRequest(input *DescribeFleetUtilizationInput) (req *request.Request, output *DescribeFleetUtilizationOutput) { + op := &request.Operation{ + Name: opDescribeFleetUtilization, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetUtilizationInput{} + } + + output = &DescribeFleetUtilizationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFleetUtilization API operation for Amazon GameLift. +// +// Retrieves utilization statistics for one or more fleets. You can request +// utilization data for all fleets, or specify a list of one or more fleet IDs. +// When requesting multiple fleets, use the pagination parameters to retrieve +// results as a set of sequential pages. If successful, a FleetUtilization object +// is returned for each requested fleet ID. When specifying a list of fleet +// IDs, utilization objects are returned only for fleets that currently exist. +// +// Some API actions may limit the number of fleet IDs allowed in one request. +// If a request exceeds this limit, the request fails and the error message +// includes the maximum allowed. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeFleetUtilization for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetUtilization +func (c *GameLift) DescribeFleetUtilization(input *DescribeFleetUtilizationInput) (*DescribeFleetUtilizationOutput, error) { + req, out := c.DescribeFleetUtilizationRequest(input) + return out, req.Send() +} + +// DescribeFleetUtilizationWithContext is the same as DescribeFleetUtilization with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFleetUtilization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeFleetUtilizationWithContext(ctx aws.Context, input *DescribeFleetUtilizationInput, opts ...request.Option) (*DescribeFleetUtilizationOutput, error) { + req, out := c.DescribeFleetUtilizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeGameSessionDetails = "DescribeGameSessionDetails" + +// DescribeGameSessionDetailsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGameSessionDetails operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeGameSessionDetails for more information on using the DescribeGameSessionDetails +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeGameSessionDetailsRequest method. +// req, resp := client.DescribeGameSessionDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionDetails +func (c *GameLift) DescribeGameSessionDetailsRequest(input *DescribeGameSessionDetailsInput) (req *request.Request, output *DescribeGameSessionDetailsOutput) { + op := &request.Operation{ + Name: opDescribeGameSessionDetails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGameSessionDetailsInput{} + } + + output = &DescribeGameSessionDetailsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeGameSessionDetails API operation for Amazon GameLift. +// +// Retrieves properties, including the protection policy in force, for one or +// more game sessions. This action can be used in several ways: (1) provide +// a GameSessionId or GameSessionArn to request details for a specific game +// session; (2) provide either a FleetId or an AliasId to request properties +// for all game sessions running on a fleet. +// +// To get game session record(s), specify just one of the following: game session +// ID, fleet ID, or alias ID. You can filter this request by game session status. +// Use the pagination parameters to retrieve results as a set of sequential +// pages. If successful, a GameSessionDetail object is returned for each session +// matching the request. +// +// Game-session-related operations include: +// +// * CreateGameSession +// +// * DescribeGameSessions +// +// * DescribeGameSessionDetails +// +// * SearchGameSessions +// +// * UpdateGameSession +// +// * GetGameSessionLogUrl +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeGameSessionDetails for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeTerminalRoutingStrategyException "TerminalRoutingStrategyException" +// The service is unable to resolve the routing for a particular alias because +// it has a terminal RoutingStrategy associated with it. The message returned +// in this exception is the message defined in the routing strategy itself. +// Such requests should only be retried if the routing strategy for the specified +// alias is modified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionDetails +func (c *GameLift) DescribeGameSessionDetails(input *DescribeGameSessionDetailsInput) (*DescribeGameSessionDetailsOutput, error) { + req, out := c.DescribeGameSessionDetailsRequest(input) + return out, req.Send() +} + +// DescribeGameSessionDetailsWithContext is the same as DescribeGameSessionDetails with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeGameSessionDetails for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeGameSessionDetailsWithContext(ctx aws.Context, input *DescribeGameSessionDetailsInput, opts ...request.Option) (*DescribeGameSessionDetailsOutput, error) { + req, out := c.DescribeGameSessionDetailsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeGameSessionPlacement = "DescribeGameSessionPlacement" + +// DescribeGameSessionPlacementRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGameSessionPlacement operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeGameSessionPlacement for more information on using the DescribeGameSessionPlacement +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeGameSessionPlacementRequest method. +// req, resp := client.DescribeGameSessionPlacementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionPlacement +func (c *GameLift) DescribeGameSessionPlacementRequest(input *DescribeGameSessionPlacementInput) (req *request.Request, output *DescribeGameSessionPlacementOutput) { + op := &request.Operation{ + Name: opDescribeGameSessionPlacement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGameSessionPlacementInput{} + } + + output = &DescribeGameSessionPlacementOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeGameSessionPlacement API operation for Amazon GameLift. +// +// Retrieves properties and current status of a game session placement request. +// To get game session placement details, specify the placement ID. If successful, +// a GameSessionPlacement object is returned. +// +// Game-session-related operations include: +// +// * CreateGameSession +// +// * DescribeGameSessions +// +// * DescribeGameSessionDetails +// +// * SearchGameSessions +// +// * UpdateGameSession +// +// * GetGameSessionLogUrl +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeGameSessionPlacement for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionPlacement +func (c *GameLift) DescribeGameSessionPlacement(input *DescribeGameSessionPlacementInput) (*DescribeGameSessionPlacementOutput, error) { + req, out := c.DescribeGameSessionPlacementRequest(input) + return out, req.Send() +} + +// DescribeGameSessionPlacementWithContext is the same as DescribeGameSessionPlacement with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeGameSessionPlacement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeGameSessionPlacementWithContext(ctx aws.Context, input *DescribeGameSessionPlacementInput, opts ...request.Option) (*DescribeGameSessionPlacementOutput, error) { + req, out := c.DescribeGameSessionPlacementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeGameSessionQueues = "DescribeGameSessionQueues" + +// DescribeGameSessionQueuesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGameSessionQueues operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeGameSessionQueues for more information on using the DescribeGameSessionQueues +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeGameSessionQueuesRequest method. +// req, resp := client.DescribeGameSessionQueuesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionQueues +func (c *GameLift) DescribeGameSessionQueuesRequest(input *DescribeGameSessionQueuesInput) (req *request.Request, output *DescribeGameSessionQueuesOutput) { + op := &request.Operation{ + Name: opDescribeGameSessionQueues, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGameSessionQueuesInput{} + } + + output = &DescribeGameSessionQueuesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeGameSessionQueues API operation for Amazon GameLift. +// +// Retrieves the properties for one or more game session queues. When requesting +// multiple queues, use the pagination parameters to retrieve results as a set +// of sequential pages. If successful, a GameSessionQueue object is returned +// for each requested queue. When specifying a list of queues, objects are returned +// only for queues that currently exist in the region. +// +// Queue-related operations include: +// +// * CreateGameSessionQueue +// +// * DescribeGameSessionQueues +// +// * UpdateGameSessionQueue +// +// * DeleteGameSessionQueue +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeGameSessionQueues for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionQueues +func (c *GameLift) DescribeGameSessionQueues(input *DescribeGameSessionQueuesInput) (*DescribeGameSessionQueuesOutput, error) { + req, out := c.DescribeGameSessionQueuesRequest(input) + return out, req.Send() +} + +// DescribeGameSessionQueuesWithContext is the same as DescribeGameSessionQueues with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeGameSessionQueues for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeGameSessionQueuesWithContext(ctx aws.Context, input *DescribeGameSessionQueuesInput, opts ...request.Option) (*DescribeGameSessionQueuesOutput, error) { + req, out := c.DescribeGameSessionQueuesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeGameSessions = "DescribeGameSessions" + +// DescribeGameSessionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGameSessions operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeGameSessions for more information on using the DescribeGameSessions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeGameSessionsRequest method. +// req, resp := client.DescribeGameSessionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessions +func (c *GameLift) DescribeGameSessionsRequest(input *DescribeGameSessionsInput) (req *request.Request, output *DescribeGameSessionsOutput) { + op := &request.Operation{ + Name: opDescribeGameSessions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGameSessionsInput{} + } + + output = &DescribeGameSessionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeGameSessions API operation for Amazon GameLift. +// +// Retrieves a set of one or more game sessions. Request a specific game session +// or request all game sessions on a fleet. Alternatively, use SearchGameSessions +// to request a set of active game sessions that are filtered by certain criteria. +// To retrieve protection policy settings for game sessions, use DescribeGameSessionDetails. +// +// To get game sessions, specify one of the following: game session ID, fleet +// ID, or alias ID. You can filter this request by game session status. Use +// the pagination parameters to retrieve results as a set of sequential pages. +// If successful, a GameSession object is returned for each game session matching +// the request. +// +// Available in Amazon GameLift Local. +// +// Game-session-related operations include: +// +// * CreateGameSession +// +// * DescribeGameSessions +// +// * DescribeGameSessionDetails +// +// * SearchGameSessions +// +// * UpdateGameSession +// +// * GetGameSessionLogUrl +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeGameSessions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeTerminalRoutingStrategyException "TerminalRoutingStrategyException" +// The service is unable to resolve the routing for a particular alias because +// it has a terminal RoutingStrategy associated with it. The message returned +// in this exception is the message defined in the routing strategy itself. +// Such requests should only be retried if the routing strategy for the specified +// alias is modified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessions +func (c *GameLift) DescribeGameSessions(input *DescribeGameSessionsInput) (*DescribeGameSessionsOutput, error) { + req, out := c.DescribeGameSessionsRequest(input) + return out, req.Send() +} + +// DescribeGameSessionsWithContext is the same as DescribeGameSessions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeGameSessions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeGameSessionsWithContext(ctx aws.Context, input *DescribeGameSessionsInput, opts ...request.Option) (*DescribeGameSessionsOutput, error) { + req, out := c.DescribeGameSessionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeInstances = "DescribeInstances" + +// DescribeInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstances operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstances for more information on using the DescribeInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstancesRequest method. +// req, resp := client.DescribeInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeInstances +func (c *GameLift) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) { + op := &request.Operation{ + Name: opDescribeInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstancesInput{} + } + + output = &DescribeInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstances API operation for Amazon GameLift. +// +// Retrieves information about a fleet's instances, including instance IDs. +// Use this action to get details on all instances in the fleet or get details +// on one specific instance. +// +// To get a specific instance, specify fleet ID and instance ID. To get all +// instances in a fleet, specify a fleet ID only. Use the pagination parameters +// to retrieve results as a set of sequential pages. If successful, an Instance +// object is returned for each result. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeInstances for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeInstances +func (c *GameLift) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) { + req, out := c.DescribeInstancesRequest(input) + return out, req.Send() +} + +// DescribeInstancesWithContext is the same as DescribeInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeInstancesWithContext(ctx aws.Context, input *DescribeInstancesInput, opts ...request.Option) (*DescribeInstancesOutput, error) { + req, out := c.DescribeInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeMatchmaking = "DescribeMatchmaking" + +// DescribeMatchmakingRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMatchmaking operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMatchmaking for more information on using the DescribeMatchmaking +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMatchmakingRequest method. +// req, resp := client.DescribeMatchmakingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmaking +func (c *GameLift) DescribeMatchmakingRequest(input *DescribeMatchmakingInput) (req *request.Request, output *DescribeMatchmakingOutput) { + op := &request.Operation{ + Name: opDescribeMatchmaking, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMatchmakingInput{} + } + + output = &DescribeMatchmakingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMatchmaking API operation for Amazon GameLift. +// +// Retrieves a set of one or more matchmaking tickets. Use this operation to +// retrieve ticket information, including status and--once a successful match +// is made--acquire connection information for the resulting new game session. +// +// You can use this operation to track the progress of matchmaking requests +// (through polling) as an alternative to using event notifications. See more +// details on tracking matchmaking requests through polling or notifications +// in StartMatchmaking. +// +// You can request data for a one or a list of ticket IDs. If the request is +// successful, a ticket object is returned for each requested ID. When specifying +// a list of ticket IDs, objects are returned only for tickets that currently +// exist. +// +// Matchmaking-related operations include: +// +// * StartMatchmaking +// +// * DescribeMatchmaking +// +// * StopMatchmaking +// +// * AcceptMatch +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeMatchmaking for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" +// The requested operation is not supported in the region specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmaking +func (c *GameLift) DescribeMatchmaking(input *DescribeMatchmakingInput) (*DescribeMatchmakingOutput, error) { + req, out := c.DescribeMatchmakingRequest(input) + return out, req.Send() +} + +// DescribeMatchmakingWithContext is the same as DescribeMatchmaking with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMatchmaking for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeMatchmakingWithContext(ctx aws.Context, input *DescribeMatchmakingInput, opts ...request.Option) (*DescribeMatchmakingOutput, error) { + req, out := c.DescribeMatchmakingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeMatchmakingConfigurations = "DescribeMatchmakingConfigurations" + +// DescribeMatchmakingConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMatchmakingConfigurations operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMatchmakingConfigurations for more information on using the DescribeMatchmakingConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMatchmakingConfigurationsRequest method. +// req, resp := client.DescribeMatchmakingConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingConfigurations +func (c *GameLift) DescribeMatchmakingConfigurationsRequest(input *DescribeMatchmakingConfigurationsInput) (req *request.Request, output *DescribeMatchmakingConfigurationsOutput) { + op := &request.Operation{ + Name: opDescribeMatchmakingConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMatchmakingConfigurationsInput{} + } + + output = &DescribeMatchmakingConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMatchmakingConfigurations API operation for Amazon GameLift. +// +// Retrieves the details of FlexMatch matchmaking configurations. with this +// operation, you have the following options: (1) retrieve all existing configurations, +// (2) provide the names of one or more configurations to retrieve, or (3) retrieve +// all configurations that use a specified rule set name. When requesting multiple +// items, use the pagination parameters to retrieve results as a set of sequential +// pages. If successful, a configuration is returned for each requested name. +// When specifying a list of names, only configurations that currently exist +// are returned. +// +// Operations related to match configurations and rule sets include: +// +// * CreateMatchmakingConfiguration +// +// * DescribeMatchmakingConfigurations +// +// * UpdateMatchmakingConfiguration +// +// * DeleteMatchmakingConfiguration +// +// * CreateMatchmakingRuleSet +// +// * DescribeMatchmakingRuleSets +// +// * ValidateMatchmakingRuleSet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeMatchmakingConfigurations for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" +// The requested operation is not supported in the region specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingConfigurations +func (c *GameLift) DescribeMatchmakingConfigurations(input *DescribeMatchmakingConfigurationsInput) (*DescribeMatchmakingConfigurationsOutput, error) { + req, out := c.DescribeMatchmakingConfigurationsRequest(input) + return out, req.Send() +} + +// DescribeMatchmakingConfigurationsWithContext is the same as DescribeMatchmakingConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMatchmakingConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeMatchmakingConfigurationsWithContext(ctx aws.Context, input *DescribeMatchmakingConfigurationsInput, opts ...request.Option) (*DescribeMatchmakingConfigurationsOutput, error) { + req, out := c.DescribeMatchmakingConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeMatchmakingRuleSets = "DescribeMatchmakingRuleSets" + +// DescribeMatchmakingRuleSetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMatchmakingRuleSets operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMatchmakingRuleSets for more information on using the DescribeMatchmakingRuleSets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMatchmakingRuleSetsRequest method. +// req, resp := client.DescribeMatchmakingRuleSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingRuleSets +func (c *GameLift) DescribeMatchmakingRuleSetsRequest(input *DescribeMatchmakingRuleSetsInput) (req *request.Request, output *DescribeMatchmakingRuleSetsOutput) { + op := &request.Operation{ + Name: opDescribeMatchmakingRuleSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMatchmakingRuleSetsInput{} + } + + output = &DescribeMatchmakingRuleSetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMatchmakingRuleSets API operation for Amazon GameLift. +// +// Retrieves the details for FlexMatch matchmaking rule sets. You can request +// all existing rule sets for the region, or provide a list of one or more rule +// set names. When requesting multiple items, use the pagination parameters +// to retrieve results as a set of sequential pages. If successful, a rule set +// is returned for each requested name. +// +// Operations related to match configurations and rule sets include: +// +// * CreateMatchmakingConfiguration +// +// * DescribeMatchmakingConfigurations +// +// * UpdateMatchmakingConfiguration +// +// * DeleteMatchmakingConfiguration +// +// * CreateMatchmakingRuleSet +// +// * DescribeMatchmakingRuleSets +// +// * ValidateMatchmakingRuleSet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeMatchmakingRuleSets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" +// The requested operation is not supported in the region specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingRuleSets +func (c *GameLift) DescribeMatchmakingRuleSets(input *DescribeMatchmakingRuleSetsInput) (*DescribeMatchmakingRuleSetsOutput, error) { + req, out := c.DescribeMatchmakingRuleSetsRequest(input) + return out, req.Send() +} + +// DescribeMatchmakingRuleSetsWithContext is the same as DescribeMatchmakingRuleSets with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMatchmakingRuleSets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeMatchmakingRuleSetsWithContext(ctx aws.Context, input *DescribeMatchmakingRuleSetsInput, opts ...request.Option) (*DescribeMatchmakingRuleSetsOutput, error) { + req, out := c.DescribeMatchmakingRuleSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribePlayerSessions = "DescribePlayerSessions" + +// DescribePlayerSessionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePlayerSessions operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribePlayerSessions for more information on using the DescribePlayerSessions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribePlayerSessionsRequest method. +// req, resp := client.DescribePlayerSessionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribePlayerSessions +func (c *GameLift) DescribePlayerSessionsRequest(input *DescribePlayerSessionsInput) (req *request.Request, output *DescribePlayerSessionsOutput) { + op := &request.Operation{ + Name: opDescribePlayerSessions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePlayerSessionsInput{} + } + + output = &DescribePlayerSessionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribePlayerSessions API operation for Amazon GameLift. +// +// Retrieves properties for one or more player sessions. This action can be +// used in several ways: (1) provide a PlayerSessionId to request properties +// for a specific player session; (2) provide a GameSessionId to request properties +// for all player sessions in the specified game session; (3) provide a PlayerId +// to request properties for all player sessions of a specified player. +// +// To get game session record(s), specify only one of the following: a player +// session ID, a game session ID, or a player ID. You can filter this request +// by player session status. Use the pagination parameters to retrieve results +// as a set of sequential pages. If successful, a PlayerSession object is returned +// for each session matching the request. +// +// Available in Amazon GameLift Local. +// +// Player-session-related operations include: +// +// * CreatePlayerSession +// +// * CreatePlayerSessions +// +// * DescribePlayerSessions +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribePlayerSessions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribePlayerSessions +func (c *GameLift) DescribePlayerSessions(input *DescribePlayerSessionsInput) (*DescribePlayerSessionsOutput, error) { + req, out := c.DescribePlayerSessionsRequest(input) + return out, req.Send() +} + +// DescribePlayerSessionsWithContext is the same as DescribePlayerSessions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePlayerSessions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribePlayerSessionsWithContext(ctx aws.Context, input *DescribePlayerSessionsInput, opts ...request.Option) (*DescribePlayerSessionsOutput, error) { + req, out := c.DescribePlayerSessionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeRuntimeConfiguration = "DescribeRuntimeConfiguration" + +// DescribeRuntimeConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRuntimeConfiguration operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeRuntimeConfiguration for more information on using the DescribeRuntimeConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeRuntimeConfigurationRequest method. +// req, resp := client.DescribeRuntimeConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeRuntimeConfiguration +func (c *GameLift) DescribeRuntimeConfigurationRequest(input *DescribeRuntimeConfigurationInput) (req *request.Request, output *DescribeRuntimeConfigurationOutput) { + op := &request.Operation{ + Name: opDescribeRuntimeConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRuntimeConfigurationInput{} + } + + output = &DescribeRuntimeConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeRuntimeConfiguration API operation for Amazon GameLift. +// +// Retrieves the current run-time configuration for the specified fleet. The +// run-time configuration tells Amazon GameLift how to launch server processes +// on instances in the fleet. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeRuntimeConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeRuntimeConfiguration +func (c *GameLift) DescribeRuntimeConfiguration(input *DescribeRuntimeConfigurationInput) (*DescribeRuntimeConfigurationOutput, error) { + req, out := c.DescribeRuntimeConfigurationRequest(input) + return out, req.Send() +} + +// DescribeRuntimeConfigurationWithContext is the same as DescribeRuntimeConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeRuntimeConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeRuntimeConfigurationWithContext(ctx aws.Context, input *DescribeRuntimeConfigurationInput, opts ...request.Option) (*DescribeRuntimeConfigurationOutput, error) { + req, out := c.DescribeRuntimeConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeScalingPolicies = "DescribeScalingPolicies" + +// DescribeScalingPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScalingPolicies operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeScalingPolicies for more information on using the DescribeScalingPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeScalingPoliciesRequest method. +// req, resp := client.DescribeScalingPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeScalingPolicies +func (c *GameLift) DescribeScalingPoliciesRequest(input *DescribeScalingPoliciesInput) (req *request.Request, output *DescribeScalingPoliciesOutput) { + op := &request.Operation{ + Name: opDescribeScalingPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScalingPoliciesInput{} + } + + output = &DescribeScalingPoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeScalingPolicies API operation for Amazon GameLift. +// +// Retrieves all scaling policies applied to a fleet. +// +// To get a fleet's scaling policies, specify the fleet ID. You can filter this +// request by policy status, such as to retrieve only active scaling policies. +// Use the pagination parameters to retrieve results as a set of sequential +// pages. If successful, set of ScalingPolicy objects is returned for the fleet. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeScalingPolicies for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeScalingPolicies +func (c *GameLift) DescribeScalingPolicies(input *DescribeScalingPoliciesInput) (*DescribeScalingPoliciesOutput, error) { + req, out := c.DescribeScalingPoliciesRequest(input) + return out, req.Send() +} + +// DescribeScalingPoliciesWithContext is the same as DescribeScalingPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeScalingPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeScalingPoliciesWithContext(ctx aws.Context, input *DescribeScalingPoliciesInput, opts ...request.Option) (*DescribeScalingPoliciesOutput, error) { + req, out := c.DescribeScalingPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeVpcPeeringAuthorizations = "DescribeVpcPeeringAuthorizations" + +// DescribeVpcPeeringAuthorizationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcPeeringAuthorizations operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpcPeeringAuthorizations for more information on using the DescribeVpcPeeringAuthorizations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpcPeeringAuthorizationsRequest method. +// req, resp := client.DescribeVpcPeeringAuthorizationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringAuthorizations +func (c *GameLift) DescribeVpcPeeringAuthorizationsRequest(input *DescribeVpcPeeringAuthorizationsInput) (req *request.Request, output *DescribeVpcPeeringAuthorizationsOutput) { + op := &request.Operation{ + Name: opDescribeVpcPeeringAuthorizations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcPeeringAuthorizationsInput{} + } + + output = &DescribeVpcPeeringAuthorizationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpcPeeringAuthorizations API operation for Amazon GameLift. +// +// Retrieves valid VPC peering authorizations that are pending for the AWS account. +// This operation returns all VPC peering authorizations and requests for peering. +// This includes those initiated and received by this account. +// +// VPC peering connection operations include: +// +// * CreateVpcPeeringAuthorization +// +// * DescribeVpcPeeringAuthorizations +// +// * DeleteVpcPeeringAuthorization +// +// * CreateVpcPeeringConnection +// +// * DescribeVpcPeeringConnections +// +// * DeleteVpcPeeringConnection +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeVpcPeeringAuthorizations for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringAuthorizations +func (c *GameLift) DescribeVpcPeeringAuthorizations(input *DescribeVpcPeeringAuthorizationsInput) (*DescribeVpcPeeringAuthorizationsOutput, error) { + req, out := c.DescribeVpcPeeringAuthorizationsRequest(input) + return out, req.Send() +} + +// DescribeVpcPeeringAuthorizationsWithContext is the same as DescribeVpcPeeringAuthorizations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcPeeringAuthorizations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeVpcPeeringAuthorizationsWithContext(ctx aws.Context, input *DescribeVpcPeeringAuthorizationsInput, opts ...request.Option) (*DescribeVpcPeeringAuthorizationsOutput, error) { + req, out := c.DescribeVpcPeeringAuthorizationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeVpcPeeringConnections = "DescribeVpcPeeringConnections" + +// DescribeVpcPeeringConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcPeeringConnections operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpcPeeringConnections for more information on using the DescribeVpcPeeringConnections +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpcPeeringConnectionsRequest method. +// req, resp := client.DescribeVpcPeeringConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringConnections +func (c *GameLift) DescribeVpcPeeringConnectionsRequest(input *DescribeVpcPeeringConnectionsInput) (req *request.Request, output *DescribeVpcPeeringConnectionsOutput) { + op := &request.Operation{ + Name: opDescribeVpcPeeringConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcPeeringConnectionsInput{} + } + + output = &DescribeVpcPeeringConnectionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpcPeeringConnections API operation for Amazon GameLift. +// +// Retrieves information on VPC peering connections. Use this operation to get +// peering information for all fleets or for one specific fleet ID. +// +// To retrieve connection information, call this operation from the AWS account +// that is used to manage the Amazon GameLift fleets. Specify a fleet ID or +// leave the parameter empty to retrieve all connection records. If successful, +// the retrieved information includes both active and pending connections. Active +// connections identify the IpV4 CIDR block that the VPC uses to connect. +// +// VPC peering connection operations include: +// +// * CreateVpcPeeringAuthorization +// +// * DescribeVpcPeeringAuthorizations +// +// * DeleteVpcPeeringAuthorization +// +// * CreateVpcPeeringConnection +// +// * DescribeVpcPeeringConnections +// +// * DeleteVpcPeeringConnection +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeVpcPeeringConnections for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringConnections +func (c *GameLift) DescribeVpcPeeringConnections(input *DescribeVpcPeeringConnectionsInput) (*DescribeVpcPeeringConnectionsOutput, error) { + req, out := c.DescribeVpcPeeringConnectionsRequest(input) + return out, req.Send() +} + +// DescribeVpcPeeringConnectionsWithContext is the same as DescribeVpcPeeringConnections with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcPeeringConnections for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeVpcPeeringConnectionsWithContext(ctx aws.Context, input *DescribeVpcPeeringConnectionsInput, opts ...request.Option) (*DescribeVpcPeeringConnectionsOutput, error) { + req, out := c.DescribeVpcPeeringConnectionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetGameSessionLogUrl = "GetGameSessionLogUrl" + +// GetGameSessionLogUrlRequest generates a "aws/request.Request" representing the +// client's request for the GetGameSessionLogUrl operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetGameSessionLogUrl for more information on using the GetGameSessionLogUrl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetGameSessionLogUrlRequest method. +// req, resp := client.GetGameSessionLogUrlRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetGameSessionLogUrl +func (c *GameLift) GetGameSessionLogUrlRequest(input *GetGameSessionLogUrlInput) (req *request.Request, output *GetGameSessionLogUrlOutput) { + op := &request.Operation{ + Name: opGetGameSessionLogUrl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetGameSessionLogUrlInput{} + } + + output = &GetGameSessionLogUrlOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetGameSessionLogUrl API operation for Amazon GameLift. +// +// Retrieves the location of stored game session logs for a specified game session. +// When a game session is terminated, Amazon GameLift automatically stores the +// logs in Amazon S3 and retains them for 14 days. Use this URL to download +// the logs. +// +// See the AWS Service Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_gamelift) +// page for maximum log file sizes. Log files that exceed this limit are not +// saved. +// +// Game-session-related operations include: +// +// * CreateGameSession +// +// * DescribeGameSessions +// +// * DescribeGameSessionDetails +// +// * SearchGameSessions +// +// * UpdateGameSession +// +// * GetGameSessionLogUrl +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation GetGameSessionLogUrl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetGameSessionLogUrl +func (c *GameLift) GetGameSessionLogUrl(input *GetGameSessionLogUrlInput) (*GetGameSessionLogUrlOutput, error) { + req, out := c.GetGameSessionLogUrlRequest(input) + return out, req.Send() +} + +// GetGameSessionLogUrlWithContext is the same as GetGameSessionLogUrl with the addition of +// the ability to pass a context and additional request options. +// +// See GetGameSessionLogUrl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) GetGameSessionLogUrlWithContext(ctx aws.Context, input *GetGameSessionLogUrlInput, opts ...request.Option) (*GetGameSessionLogUrlOutput, error) { + req, out := c.GetGameSessionLogUrlRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetInstanceAccess = "GetInstanceAccess" + +// GetInstanceAccessRequest generates a "aws/request.Request" representing the +// client's request for the GetInstanceAccess operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetInstanceAccess for more information on using the GetInstanceAccess +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetInstanceAccessRequest method. +// req, resp := client.GetInstanceAccessRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetInstanceAccess +func (c *GameLift) GetInstanceAccessRequest(input *GetInstanceAccessInput) (req *request.Request, output *GetInstanceAccessOutput) { + op := &request.Operation{ + Name: opGetInstanceAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetInstanceAccessInput{} + } + + output = &GetInstanceAccessOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetInstanceAccess API operation for Amazon GameLift. +// +// Requests remote access to a fleet instance. Remote access is useful for debugging, +// gathering benchmarking data, or watching activity in real time. +// +// Access requires credentials that match the operating system of the instance. +// For a Windows instance, Amazon GameLift returns a user name and password +// as strings for use with a Windows Remote Desktop client. For a Linux instance, +// Amazon GameLift returns a user name and RSA private key, also as strings, +// for use with an SSH client. The private key must be saved in the proper format +// to a .pem file before using. If you're making this request using the AWS +// CLI, saving the secret can be handled as part of the GetInstanceAccess request. +// (See the example later in this topic). For more information on remote access, +// see Remotely Accessing an Instance (http://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-remote-access.html). +// +// To request access to a specific instance, specify the IDs of the instance +// and the fleet it belongs to. If successful, an InstanceAccess object is returned +// containing the instance's IP address and a set of credentials. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation GetInstanceAccess for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetInstanceAccess +func (c *GameLift) GetInstanceAccess(input *GetInstanceAccessInput) (*GetInstanceAccessOutput, error) { + req, out := c.GetInstanceAccessRequest(input) + return out, req.Send() +} + +// GetInstanceAccessWithContext is the same as GetInstanceAccess with the addition of +// the ability to pass a context and additional request options. +// +// See GetInstanceAccess for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) GetInstanceAccessWithContext(ctx aws.Context, input *GetInstanceAccessInput, opts ...request.Option) (*GetInstanceAccessOutput, error) { + req, out := c.GetInstanceAccessRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAliases = "ListAliases" + +// ListAliasesRequest generates a "aws/request.Request" representing the +// client's request for the ListAliases operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAliases for more information on using the ListAliases +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAliasesRequest method. +// req, resp := client.ListAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListAliases +func (c *GameLift) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, output *ListAliasesOutput) { + op := &request.Operation{ + Name: opListAliases, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAliasesInput{} + } + + output = &ListAliasesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAliases API operation for Amazon GameLift. +// +// Retrieves all aliases for this AWS account. You can filter the result set +// by alias name and/or routing strategy type. Use the pagination parameters +// to retrieve results in sequential pages. +// +// Returned aliases are not listed in any particular order. +// +// Alias-related operations include: +// +// * CreateAlias +// +// * ListAliases +// +// * DescribeAlias +// +// * UpdateAlias +// +// * DeleteAlias +// +// * ResolveAlias +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation ListAliases for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListAliases +func (c *GameLift) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { + req, out := c.ListAliasesRequest(input) + return out, req.Send() +} + +// ListAliasesWithContext is the same as ListAliases with the addition of +// the ability to pass a context and additional request options. +// +// See ListAliases for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) ListAliasesWithContext(ctx aws.Context, input *ListAliasesInput, opts ...request.Option) (*ListAliasesOutput, error) { + req, out := c.ListAliasesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBuilds = "ListBuilds" + +// ListBuildsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuilds operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBuilds for more information on using the ListBuilds +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBuildsRequest method. +// req, resp := client.ListBuildsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListBuilds +func (c *GameLift) ListBuildsRequest(input *ListBuildsInput) (req *request.Request, output *ListBuildsOutput) { + op := &request.Operation{ + Name: opListBuilds, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListBuildsInput{} + } + + output = &ListBuildsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBuilds API operation for Amazon GameLift. +// +// Retrieves build records for all builds associated with the AWS account in +// use. You can limit results to builds that are in a specific status by using +// the Status parameter. Use the pagination parameters to retrieve results in +// a set of sequential pages. +// +// Build records are not listed in any particular order. +// +// Build-related operations include: +// +// * CreateBuild +// +// * ListBuilds +// +// * DescribeBuild +// +// * UpdateBuild +// +// * DeleteBuild +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation ListBuilds for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListBuilds +func (c *GameLift) ListBuilds(input *ListBuildsInput) (*ListBuildsOutput, error) { + req, out := c.ListBuildsRequest(input) + return out, req.Send() +} + +// ListBuildsWithContext is the same as ListBuilds with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuilds for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) ListBuildsWithContext(ctx aws.Context, input *ListBuildsInput, opts ...request.Option) (*ListBuildsOutput, error) { + req, out := c.ListBuildsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListFleets = "ListFleets" + +// ListFleetsRequest generates a "aws/request.Request" representing the +// client's request for the ListFleets operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListFleets for more information on using the ListFleets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListFleetsRequest method. +// req, resp := client.ListFleetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListFleets +func (c *GameLift) ListFleetsRequest(input *ListFleetsInput) (req *request.Request, output *ListFleetsOutput) { + op := &request.Operation{ + Name: opListFleets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListFleetsInput{} + } + + output = &ListFleetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListFleets API operation for Amazon GameLift. +// +// Retrieves a collection of fleet records for this AWS account. You can filter +// the result set by build ID. Use the pagination parameters to retrieve results +// in sequential pages. +// +// Fleet records are not listed in any particular order. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation ListFleets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListFleets +func (c *GameLift) ListFleets(input *ListFleetsInput) (*ListFleetsOutput, error) { + req, out := c.ListFleetsRequest(input) + return out, req.Send() +} + +// ListFleetsWithContext is the same as ListFleets with the addition of +// the ability to pass a context and additional request options. +// +// See ListFleets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) ListFleetsWithContext(ctx aws.Context, input *ListFleetsInput, opts ...request.Option) (*ListFleetsOutput, error) { + req, out := c.ListFleetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutScalingPolicy = "PutScalingPolicy" + +// PutScalingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutScalingPolicy operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutScalingPolicy for more information on using the PutScalingPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutScalingPolicyRequest method. +// req, resp := client.PutScalingPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PutScalingPolicy +func (c *GameLift) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req *request.Request, output *PutScalingPolicyOutput) { + op := &request.Operation{ + Name: opPutScalingPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutScalingPolicyInput{} + } + + output = &PutScalingPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutScalingPolicy API operation for Amazon GameLift. +// +// Creates or updates a scaling policy for a fleet. An active scaling policy +// prompts Amazon GameLift to track a certain metric for a fleet and automatically +// change the fleet's capacity in specific circumstances. Each scaling policy +// contains one rule statement. Fleets can have multiple scaling policies in +// force simultaneously. +// +// A scaling policy rule statement has the following structure: +// +// If [MetricName] is [ComparisonOperator][Threshold] for [EvaluationPeriods] +// minutes, then [ScalingAdjustmentType] to/by [ScalingAdjustment]. +// +// For example, this policy: "If the number of idle instances exceeds 20 for +// more than 15 minutes, then reduce the fleet capacity by 10 instances" could +// be implemented as the following rule statement: +// +// If [IdleInstances] is [GreaterThanOrEqualToThreshold] [20] for [15] minutes, +// then [ChangeInCapacity] by [-10]. +// +// To create or update a scaling policy, specify a unique combination of name +// and fleet ID, and set the rule values. All parameters for this action are +// required. If successful, the policy name is returned. Scaling policies cannot +// be suspended or made inactive. To stop enforcing a scaling policy, call DeleteScalingPolicy. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation PutScalingPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PutScalingPolicy +func (c *GameLift) PutScalingPolicy(input *PutScalingPolicyInput) (*PutScalingPolicyOutput, error) { + req, out := c.PutScalingPolicyRequest(input) + return out, req.Send() +} + +// PutScalingPolicyWithContext is the same as PutScalingPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutScalingPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) PutScalingPolicyWithContext(ctx aws.Context, input *PutScalingPolicyInput, opts ...request.Option) (*PutScalingPolicyOutput, error) { + req, out := c.PutScalingPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRequestUploadCredentials = "RequestUploadCredentials" + +// RequestUploadCredentialsRequest generates a "aws/request.Request" representing the +// client's request for the RequestUploadCredentials operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RequestUploadCredentials for more information on using the RequestUploadCredentials +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RequestUploadCredentialsRequest method. +// req, resp := client.RequestUploadCredentialsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/RequestUploadCredentials +func (c *GameLift) RequestUploadCredentialsRequest(input *RequestUploadCredentialsInput) (req *request.Request, output *RequestUploadCredentialsOutput) { + op := &request.Operation{ + Name: opRequestUploadCredentials, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestUploadCredentialsInput{} + } + + output = &RequestUploadCredentialsOutput{} + req = c.newRequest(op, input, output) + return +} + +// RequestUploadCredentials API operation for Amazon GameLift. +// +// This API call is not currently in use. Retrieves a fresh set of upload credentials +// and the assigned Amazon S3 storage location for a specific build. Valid credentials +// are required to upload your game build files to Amazon S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation RequestUploadCredentials for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/RequestUploadCredentials +func (c *GameLift) RequestUploadCredentials(input *RequestUploadCredentialsInput) (*RequestUploadCredentialsOutput, error) { + req, out := c.RequestUploadCredentialsRequest(input) + return out, req.Send() +} + +// RequestUploadCredentialsWithContext is the same as RequestUploadCredentials with the addition of +// the ability to pass a context and additional request options. +// +// See RequestUploadCredentials for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) RequestUploadCredentialsWithContext(ctx aws.Context, input *RequestUploadCredentialsInput, opts ...request.Option) (*RequestUploadCredentialsOutput, error) { + req, out := c.RequestUploadCredentialsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opResolveAlias = "ResolveAlias" + +// ResolveAliasRequest generates a "aws/request.Request" representing the +// client's request for the ResolveAlias operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResolveAlias for more information on using the ResolveAlias +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResolveAliasRequest method. +// req, resp := client.ResolveAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ResolveAlias +func (c *GameLift) ResolveAliasRequest(input *ResolveAliasInput) (req *request.Request, output *ResolveAliasOutput) { + op := &request.Operation{ + Name: opResolveAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResolveAliasInput{} + } + + output = &ResolveAliasOutput{} + req = c.newRequest(op, input, output) + return +} + +// ResolveAlias API operation for Amazon GameLift. +// +// Retrieves the fleet ID that a specified alias is currently pointing to. +// +// Alias-related operations include: +// +// * CreateAlias +// +// * ListAliases +// +// * DescribeAlias +// +// * UpdateAlias +// +// * DeleteAlias +// +// * ResolveAlias +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation ResolveAlias for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeTerminalRoutingStrategyException "TerminalRoutingStrategyException" +// The service is unable to resolve the routing for a particular alias because +// it has a terminal RoutingStrategy associated with it. The message returned +// in this exception is the message defined in the routing strategy itself. +// Such requests should only be retried if the routing strategy for the specified +// alias is modified. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ResolveAlias +func (c *GameLift) ResolveAlias(input *ResolveAliasInput) (*ResolveAliasOutput, error) { + req, out := c.ResolveAliasRequest(input) + return out, req.Send() +} + +// ResolveAliasWithContext is the same as ResolveAlias with the addition of +// the ability to pass a context and additional request options. +// +// See ResolveAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) ResolveAliasWithContext(ctx aws.Context, input *ResolveAliasInput, opts ...request.Option) (*ResolveAliasOutput, error) { + req, out := c.ResolveAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSearchGameSessions = "SearchGameSessions" + +// SearchGameSessionsRequest generates a "aws/request.Request" representing the +// client's request for the SearchGameSessions operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SearchGameSessions for more information on using the SearchGameSessions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SearchGameSessionsRequest method. +// req, resp := client.SearchGameSessionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/SearchGameSessions +func (c *GameLift) SearchGameSessionsRequest(input *SearchGameSessionsInput) (req *request.Request, output *SearchGameSessionsOutput) { + op := &request.Operation{ + Name: opSearchGameSessions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SearchGameSessionsInput{} + } + + output = &SearchGameSessionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// SearchGameSessions API operation for Amazon GameLift. +// +// Retrieves a set of game sessions that match a set of search criteria and +// sorts them in a specified order. A game session search is limited to a single +// fleet. Search results include only game sessions that are in ACTIVE status. +// If you need to retrieve game sessions with a status other than active, use +// DescribeGameSessions. If you need to retrieve the protection policy for each +// game session, use DescribeGameSessionDetails. +// +// You can search or sort by the following game session attributes: +// +// * gameSessionId -- Unique identifier for the game session. You can use +// either a GameSessionId or GameSessionArn value. +// +// * gameSessionName -- Name assigned to a game session. This value is set +// when requesting a new game session with CreateGameSession or updating +// with UpdateGameSession. Game session names do not need to be unique to +// a game session. +// +// * creationTimeMillis -- Value indicating when a game session was created. +// It is expressed in Unix time as milliseconds. +// +// * playerSessionCount -- Number of players currently connected to a game +// session. This value changes rapidly as players join the session or drop +// out. +// +// * maximumSessions -- Maximum number of player sessions allowed for a game +// session. This value is set when requesting a new game session with CreateGameSession +// or updating with UpdateGameSession. +// +// * hasAvailablePlayerSessions -- Boolean value indicating whether a game +// session has reached its maximum number of players. When searching with +// this attribute, the search value must be true or false. It is highly recommended +// that all search requests include this filter attribute to optimize search +// performance and return only sessions that players can join. +// +// To search or sort, specify either a fleet ID or an alias ID, and provide +// a search filter expression, a sort expression, or both. Use the pagination +// parameters to retrieve results as a set of sequential pages. If successful, +// a collection of GameSession objects matching the request is returned. +// +// Returned values for playerSessionCount and hasAvailablePlayerSessions change +// quickly as players join sessions and others drop out. Results should be considered +// a snapshot in time. Be sure to refresh search results often, and handle sessions +// that fill up before a player can join. +// +// Game-session-related operations include: +// +// * CreateGameSession +// +// * DescribeGameSessions +// +// * DescribeGameSessionDetails +// +// * SearchGameSessions +// +// * UpdateGameSession +// +// * GetGameSessionLogUrl +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation SearchGameSessions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeTerminalRoutingStrategyException "TerminalRoutingStrategyException" +// The service is unable to resolve the routing for a particular alias because +// it has a terminal RoutingStrategy associated with it. The message returned +// in this exception is the message defined in the routing strategy itself. +// Such requests should only be retried if the routing strategy for the specified +// alias is modified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/SearchGameSessions +func (c *GameLift) SearchGameSessions(input *SearchGameSessionsInput) (*SearchGameSessionsOutput, error) { + req, out := c.SearchGameSessionsRequest(input) + return out, req.Send() +} + +// SearchGameSessionsWithContext is the same as SearchGameSessions with the addition of +// the ability to pass a context and additional request options. +// +// See SearchGameSessions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) SearchGameSessionsWithContext(ctx aws.Context, input *SearchGameSessionsInput, opts ...request.Option) (*SearchGameSessionsOutput, error) { + req, out := c.SearchGameSessionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartGameSessionPlacement = "StartGameSessionPlacement" + +// StartGameSessionPlacementRequest generates a "aws/request.Request" representing the +// client's request for the StartGameSessionPlacement operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartGameSessionPlacement for more information on using the StartGameSessionPlacement +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartGameSessionPlacementRequest method. +// req, resp := client.StartGameSessionPlacementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartGameSessionPlacement +func (c *GameLift) StartGameSessionPlacementRequest(input *StartGameSessionPlacementInput) (req *request.Request, output *StartGameSessionPlacementOutput) { + op := &request.Operation{ + Name: opStartGameSessionPlacement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartGameSessionPlacementInput{} + } + + output = &StartGameSessionPlacementOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartGameSessionPlacement API operation for Amazon GameLift. +// +// Places a request for a new game session in a queue (see CreateGameSessionQueue). +// When processing a placement request, Amazon GameLift searches for available +// resources on the queue's destinations, scanning each until it finds resources +// or the placement request times out. +// +// A game session placement request can also request player sessions. When a +// new game session is successfully created, Amazon GameLift creates a player +// session for each player included in the request. +// +// When placing a game session, by default Amazon GameLift tries each fleet +// in the order they are listed in the queue configuration. Ideally, a queue's +// destinations are listed in preference order. +// +// Alternatively, when requesting a game session with players, you can also +// provide latency data for each player in relevant regions. Latency data indicates +// the performance lag a player experiences when connected to a fleet in the +// region. Amazon GameLift uses latency data to reorder the list of destinations +// to place the game session in a region with minimal lag. If latency data is +// provided for multiple players, Amazon GameLift calculates each region's average +// lag for all players and reorders to get the best game play across all players. +// +// To place a new game session request, specify the following: +// +// * The queue name and a set of game session properties and settings +// +// * A unique ID (such as a UUID) for the placement. You use this ID to track +// the status of the placement request +// +// * (Optional) A set of IDs and player data for each player you want to +// join to the new game session +// +// * Latency data for all players (if you want to optimize game play for +// the players) +// +// If successful, a new game session placement is created. +// +// To track the status of a placement request, call DescribeGameSessionPlacement +// and check the request's status. If the status is FULFILLED, a new game session +// has been created and a game session ARN and region are referenced. If the +// placement request times out, you can resubmit the request or retry it with +// a different queue. +// +// Game-session-related operations include: +// +// * CreateGameSession +// +// * DescribeGameSessions +// +// * DescribeGameSessionDetails +// +// * SearchGameSessions +// +// * UpdateGameSession +// +// * GetGameSessionLogUrl +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation StartGameSessionPlacement for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartGameSessionPlacement +func (c *GameLift) StartGameSessionPlacement(input *StartGameSessionPlacementInput) (*StartGameSessionPlacementOutput, error) { + req, out := c.StartGameSessionPlacementRequest(input) + return out, req.Send() +} + +// StartGameSessionPlacementWithContext is the same as StartGameSessionPlacement with the addition of +// the ability to pass a context and additional request options. +// +// See StartGameSessionPlacement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) StartGameSessionPlacementWithContext(ctx aws.Context, input *StartGameSessionPlacementInput, opts ...request.Option) (*StartGameSessionPlacementOutput, error) { + req, out := c.StartGameSessionPlacementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartMatchmaking = "StartMatchmaking" + +// StartMatchmakingRequest generates a "aws/request.Request" representing the +// client's request for the StartMatchmaking operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartMatchmaking for more information on using the StartMatchmaking +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartMatchmakingRequest method. +// req, resp := client.StartMatchmakingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartMatchmaking +func (c *GameLift) StartMatchmakingRequest(input *StartMatchmakingInput) (req *request.Request, output *StartMatchmakingOutput) { + op := &request.Operation{ + Name: opStartMatchmaking, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartMatchmakingInput{} + } + + output = &StartMatchmakingOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartMatchmaking API operation for Amazon GameLift. +// +// Uses FlexMatch to create a game match for a group of players based on custom +// matchmaking rules, and starts a new game for the matched players. Each matchmaking +// request specifies the type of match to build (team configuration, rules for +// an acceptable match, etc.). The request also specifies the players to find +// a match for and where to host the new game session for optimal performance. +// A matchmaking request might start with a single player or a group of players +// who want to play together. FlexMatch finds additional players as needed to +// fill the match. Match type, rules, and the queue used to place a new game +// session are defined in a MatchmakingConfiguration. For complete information +// on setting up and using FlexMatch, see the topic Adding FlexMatch to Your +// Game (http://docs.aws.amazon.com/gamelift/latest/developerguide/match-intro.html). +// +// To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, +// and include the players to be matched. You must also include a set of player +// attributes relevant for the matchmaking configuration. If successful, a matchmaking +// ticket is returned with status set to QUEUED. Track the status of the ticket +// to respond as needed and acquire game session connection information for +// successfully completed matches. +// +// Tracking ticket status -- A couple of options are available for tracking +// the status of matchmaking requests: +// +// * Polling -- Call DescribeMatchmaking. This operation returns the full +// ticket object, including current status and (for completed tickets) game +// session connection info. We recommend polling no more than once every +// 10 seconds. +// +// * Notifications -- Get event notifications for changes in ticket status +// using Amazon Simple Notification Service (SNS). Notifications are easy +// to set up (see CreateMatchmakingConfiguration) and typically deliver match +// status changes faster and more efficiently than polling. We recommend +// that you use polling to back up to notifications (since delivery is not +// guaranteed) and call DescribeMatchmaking only when notifications are not +// received within 30 seconds. +// +// Processing a matchmaking request -- FlexMatch handles a matchmaking request +// as follows: +// +// Your client code submits a StartMatchmaking request for one or more players +// and tracks the status of the request ticket. +// +// FlexMatch uses this ticket and others in process to build an acceptable match. +// When a potential match is identified, all tickets in the proposed match are +// advanced to the next status. +// +// If the match requires player acceptance (set in the matchmaking configuration), +// the tickets move into status REQUIRES_ACCEPTANCE. This status triggers your +// client code to solicit acceptance from all players in every ticket involved +// in the match, and then call AcceptMatch for each player. If any player rejects +// or fails to accept the match before a specified timeout, the proposed match +// is dropped (see AcceptMatch for more details). +// +// Once a match is proposed and accepted, the matchmaking tickets move into +// status PLACING. FlexMatch locates resources for a new game session using +// the game session queue (set in the matchmaking configuration) and creates +// the game session based on the match data. +// +// When the match is successfully placed, the matchmaking tickets move into +// COMPLETED status. Connection information (including game session endpoint +// and player session) is added to the matchmaking tickets. Matched players +// can use the connection information to join the game. +// +// Matchmaking-related operations include: +// +// * StartMatchmaking +// +// * DescribeMatchmaking +// +// * StopMatchmaking +// +// * AcceptMatch +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation StartMatchmaking for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" +// The requested operation is not supported in the region specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartMatchmaking +func (c *GameLift) StartMatchmaking(input *StartMatchmakingInput) (*StartMatchmakingOutput, error) { + req, out := c.StartMatchmakingRequest(input) + return out, req.Send() +} + +// StartMatchmakingWithContext is the same as StartMatchmaking with the addition of +// the ability to pass a context and additional request options. +// +// See StartMatchmaking for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) StartMatchmakingWithContext(ctx aws.Context, input *StartMatchmakingInput, opts ...request.Option) (*StartMatchmakingOutput, error) { + req, out := c.StartMatchmakingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopGameSessionPlacement = "StopGameSessionPlacement" + +// StopGameSessionPlacementRequest generates a "aws/request.Request" representing the +// client's request for the StopGameSessionPlacement operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopGameSessionPlacement for more information on using the StopGameSessionPlacement +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopGameSessionPlacementRequest method. +// req, resp := client.StopGameSessionPlacementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopGameSessionPlacement +func (c *GameLift) StopGameSessionPlacementRequest(input *StopGameSessionPlacementInput) (req *request.Request, output *StopGameSessionPlacementOutput) { + op := &request.Operation{ + Name: opStopGameSessionPlacement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopGameSessionPlacementInput{} + } + + output = &StopGameSessionPlacementOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopGameSessionPlacement API operation for Amazon GameLift. +// +// Cancels a game session placement that is in PENDING status. To stop a placement, +// provide the placement ID values. If successful, the placement is moved to +// CANCELLED status. +// +// Game-session-related operations include: +// +// * CreateGameSession +// +// * DescribeGameSessions +// +// * DescribeGameSessionDetails +// +// * SearchGameSessions +// +// * UpdateGameSession +// +// * GetGameSessionLogUrl +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation StopGameSessionPlacement for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopGameSessionPlacement +func (c *GameLift) StopGameSessionPlacement(input *StopGameSessionPlacementInput) (*StopGameSessionPlacementOutput, error) { + req, out := c.StopGameSessionPlacementRequest(input) + return out, req.Send() +} + +// StopGameSessionPlacementWithContext is the same as StopGameSessionPlacement with the addition of +// the ability to pass a context and additional request options. +// +// See StopGameSessionPlacement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) StopGameSessionPlacementWithContext(ctx aws.Context, input *StopGameSessionPlacementInput, opts ...request.Option) (*StopGameSessionPlacementOutput, error) { + req, out := c.StopGameSessionPlacementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopMatchmaking = "StopMatchmaking" + +// StopMatchmakingRequest generates a "aws/request.Request" representing the +// client's request for the StopMatchmaking operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopMatchmaking for more information on using the StopMatchmaking +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopMatchmakingRequest method. +// req, resp := client.StopMatchmakingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopMatchmaking +func (c *GameLift) StopMatchmakingRequest(input *StopMatchmakingInput) (req *request.Request, output *StopMatchmakingOutput) { + op := &request.Operation{ + Name: opStopMatchmaking, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopMatchmakingInput{} + } + + output = &StopMatchmakingOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopMatchmaking API operation for Amazon GameLift. +// +// Cancels a matchmaking ticket that is currently being processed. To stop the +// matchmaking operation, specify the ticket ID. If successful, work on the +// ticket is stopped, and the ticket status is changed to CANCELLED. +// +// Matchmaking-related operations include: +// +// * StartMatchmaking +// +// * DescribeMatchmaking +// +// * StopMatchmaking +// +// * AcceptMatch +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation StopMatchmaking for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" +// The requested operation is not supported in the region specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopMatchmaking +func (c *GameLift) StopMatchmaking(input *StopMatchmakingInput) (*StopMatchmakingOutput, error) { + req, out := c.StopMatchmakingRequest(input) + return out, req.Send() +} + +// StopMatchmakingWithContext is the same as StopMatchmaking with the addition of +// the ability to pass a context and additional request options. +// +// See StopMatchmaking for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) StopMatchmakingWithContext(ctx aws.Context, input *StopMatchmakingInput, opts ...request.Option) (*StopMatchmakingOutput, error) { + req, out := c.StopMatchmakingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateAlias = "UpdateAlias" + +// UpdateAliasRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAlias operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAlias for more information on using the UpdateAlias +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAliasRequest method. +// req, resp := client.UpdateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateAlias +func (c *GameLift) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, output *UpdateAliasOutput) { + op := &request.Operation{ + Name: opUpdateAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAliasInput{} + } + + output = &UpdateAliasOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateAlias API operation for Amazon GameLift. +// +// Updates properties for an alias. To update properties, specify the alias +// ID to be updated and provide the information to be changed. To reassign an +// alias to another fleet, provide an updated routing strategy. If successful, +// the updated alias record is returned. +// +// Alias-related operations include: +// +// * CreateAlias +// +// * ListAliases +// +// * DescribeAlias +// +// * UpdateAlias +// +// * DeleteAlias +// +// * ResolveAlias +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation UpdateAlias for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateAlias +func (c *GameLift) UpdateAlias(input *UpdateAliasInput) (*UpdateAliasOutput, error) { + req, out := c.UpdateAliasRequest(input) + return out, req.Send() +} + +// UpdateAliasWithContext is the same as UpdateAlias with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) UpdateAliasWithContext(ctx aws.Context, input *UpdateAliasInput, opts ...request.Option) (*UpdateAliasOutput, error) { + req, out := c.UpdateAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateBuild = "UpdateBuild" + +// UpdateBuildRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBuild operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateBuild for more information on using the UpdateBuild +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateBuildRequest method. +// req, resp := client.UpdateBuildRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateBuild +func (c *GameLift) UpdateBuildRequest(input *UpdateBuildInput) (req *request.Request, output *UpdateBuildOutput) { + op := &request.Operation{ + Name: opUpdateBuild, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateBuildInput{} + } + + output = &UpdateBuildOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateBuild API operation for Amazon GameLift. +// +// Updates metadata in a build record, including the build name and version. +// To update the metadata, specify the build ID to update and provide the new +// values. If successful, a build object containing the updated metadata is +// returned. +// +// Build-related operations include: +// +// * CreateBuild +// +// * ListBuilds +// +// * DescribeBuild +// +// * UpdateBuild +// +// * DeleteBuild +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation UpdateBuild for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateBuild +func (c *GameLift) UpdateBuild(input *UpdateBuildInput) (*UpdateBuildOutput, error) { + req, out := c.UpdateBuildRequest(input) + return out, req.Send() +} + +// UpdateBuildWithContext is the same as UpdateBuild with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateBuild for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) UpdateBuildWithContext(ctx aws.Context, input *UpdateBuildInput, opts ...request.Option) (*UpdateBuildOutput, error) { + req, out := c.UpdateBuildRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateFleetAttributes = "UpdateFleetAttributes" + +// UpdateFleetAttributesRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFleetAttributes operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateFleetAttributes for more information on using the UpdateFleetAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateFleetAttributesRequest method. +// req, resp := client.UpdateFleetAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetAttributes +func (c *GameLift) UpdateFleetAttributesRequest(input *UpdateFleetAttributesInput) (req *request.Request, output *UpdateFleetAttributesOutput) { + op := &request.Operation{ + Name: opUpdateFleetAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateFleetAttributesInput{} + } + + output = &UpdateFleetAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateFleetAttributes API operation for Amazon GameLift. +// +// Updates fleet properties, including name and description, for a fleet. To +// update metadata, specify the fleet ID and the property values that you want +// to change. If successful, the fleet ID for the updated fleet is returned. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation UpdateFleetAttributes for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeConflictException "ConflictException" +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. +// +// * ErrCodeInvalidFleetStatusException "InvalidFleetStatusException" +// The requested operation would cause a conflict with the current state of +// a resource associated with the request and/or the fleet. Resolve the conflict +// before retrying. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested operation would cause the resource to exceed the allowed service +// limit. Resolve the issue before retrying. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetAttributes +func (c *GameLift) UpdateFleetAttributes(input *UpdateFleetAttributesInput) (*UpdateFleetAttributesOutput, error) { + req, out := c.UpdateFleetAttributesRequest(input) + return out, req.Send() +} + +// UpdateFleetAttributesWithContext is the same as UpdateFleetAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateFleetAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) UpdateFleetAttributesWithContext(ctx aws.Context, input *UpdateFleetAttributesInput, opts ...request.Option) (*UpdateFleetAttributesOutput, error) { + req, out := c.UpdateFleetAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateFleetCapacity = "UpdateFleetCapacity" + +// UpdateFleetCapacityRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFleetCapacity operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateFleetCapacity for more information on using the UpdateFleetCapacity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateFleetCapacityRequest method. +// req, resp := client.UpdateFleetCapacityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetCapacity +func (c *GameLift) UpdateFleetCapacityRequest(input *UpdateFleetCapacityInput) (req *request.Request, output *UpdateFleetCapacityOutput) { + op := &request.Operation{ + Name: opUpdateFleetCapacity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateFleetCapacityInput{} + } + + output = &UpdateFleetCapacityOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateFleetCapacity API operation for Amazon GameLift. +// +// Updates capacity settings for a fleet. Use this action to specify the number +// of EC2 instances (hosts) that you want this fleet to contain. Before calling +// this action, you may want to call DescribeEC2InstanceLimits to get the maximum +// capacity based on the fleet's EC2 instance type. +// +// If you're using autoscaling (see PutScalingPolicy), you may want to specify +// a minimum and/or maximum capacity. If you don't provide these, autoscaling +// can set capacity anywhere between zero and the service limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_gamelift). +// +// To update fleet capacity, specify the fleet ID and the number of instances +// you want the fleet to host. If successful, Amazon GameLift starts or terminates +// instances so that the fleet's active instance count matches the desired instance +// count. You can view a fleet's current capacity information by calling DescribeFleetCapacity. +// If the desired instance count is higher than the instance type's limit, the +// "Limit Exceeded" exception occurs. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation UpdateFleetCapacity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeConflictException "ConflictException" +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested operation would cause the resource to exceed the allowed service +// limit. Resolve the issue before retrying. +// +// * ErrCodeInvalidFleetStatusException "InvalidFleetStatusException" +// The requested operation would cause a conflict with the current state of +// a resource associated with the request and/or the fleet. Resolve the conflict +// before retrying. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetCapacity +func (c *GameLift) UpdateFleetCapacity(input *UpdateFleetCapacityInput) (*UpdateFleetCapacityOutput, error) { + req, out := c.UpdateFleetCapacityRequest(input) + return out, req.Send() +} + +// UpdateFleetCapacityWithContext is the same as UpdateFleetCapacity with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateFleetCapacity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) UpdateFleetCapacityWithContext(ctx aws.Context, input *UpdateFleetCapacityInput, opts ...request.Option) (*UpdateFleetCapacityOutput, error) { + req, out := c.UpdateFleetCapacityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateFleetPortSettings = "UpdateFleetPortSettings" + +// UpdateFleetPortSettingsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFleetPortSettings operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateFleetPortSettings for more information on using the UpdateFleetPortSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateFleetPortSettingsRequest method. +// req, resp := client.UpdateFleetPortSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetPortSettings +func (c *GameLift) UpdateFleetPortSettingsRequest(input *UpdateFleetPortSettingsInput) (req *request.Request, output *UpdateFleetPortSettingsOutput) { + op := &request.Operation{ + Name: opUpdateFleetPortSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateFleetPortSettingsInput{} + } + + output = &UpdateFleetPortSettingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateFleetPortSettings API operation for Amazon GameLift. +// +// Updates port settings for a fleet. To update settings, specify the fleet +// ID to be updated and list the permissions you want to update. List the permissions +// you want to add in InboundPermissionAuthorizations, and permissions you want +// to remove in InboundPermissionRevocations. Permissions to be removed must +// match existing fleet permissions. If successful, the fleet ID for the updated +// fleet is returned. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation UpdateFleetPortSettings for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeConflictException "ConflictException" +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. +// +// * ErrCodeInvalidFleetStatusException "InvalidFleetStatusException" +// The requested operation would cause a conflict with the current state of +// a resource associated with the request and/or the fleet. Resolve the conflict +// before retrying. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested operation would cause the resource to exceed the allowed service +// limit. Resolve the issue before retrying. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetPortSettings +func (c *GameLift) UpdateFleetPortSettings(input *UpdateFleetPortSettingsInput) (*UpdateFleetPortSettingsOutput, error) { + req, out := c.UpdateFleetPortSettingsRequest(input) + return out, req.Send() +} + +// UpdateFleetPortSettingsWithContext is the same as UpdateFleetPortSettings with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateFleetPortSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) UpdateFleetPortSettingsWithContext(ctx aws.Context, input *UpdateFleetPortSettingsInput, opts ...request.Option) (*UpdateFleetPortSettingsOutput, error) { + req, out := c.UpdateFleetPortSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateGameSession = "UpdateGameSession" + +// UpdateGameSessionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGameSession operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGameSession for more information on using the UpdateGameSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateGameSessionRequest method. +// req, resp := client.UpdateGameSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSession +func (c *GameLift) UpdateGameSessionRequest(input *UpdateGameSessionInput) (req *request.Request, output *UpdateGameSessionOutput) { + op := &request.Operation{ + Name: opUpdateGameSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGameSessionInput{} + } + + output = &UpdateGameSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateGameSession API operation for Amazon GameLift. +// +// Updates game session properties. This includes the session name, maximum +// player count, protection policy, which controls whether or not an active +// game session can be terminated during a scale-down event, and the player +// session creation policy, which controls whether or not new players can join +// the session. To update a game session, specify the game session ID and the +// values you want to change. If successful, an updated GameSession object is +// returned. +// +// Game-session-related operations include: +// +// * CreateGameSession +// +// * DescribeGameSessions +// +// * DescribeGameSessionDetails +// +// * SearchGameSessions +// +// * UpdateGameSession +// +// * GetGameSessionLogUrl +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation UpdateGameSession for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeConflictException "ConflictException" +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeInvalidGameSessionStatusException "InvalidGameSessionStatusException" +// The requested operation would cause a conflict with the current state of +// a resource associated with the request and/or the game instance. Resolve +// the conflict before retrying. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSession +func (c *GameLift) UpdateGameSession(input *UpdateGameSessionInput) (*UpdateGameSessionOutput, error) { + req, out := c.UpdateGameSessionRequest(input) + return out, req.Send() +} + +// UpdateGameSessionWithContext is the same as UpdateGameSession with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateGameSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) UpdateGameSessionWithContext(ctx aws.Context, input *UpdateGameSessionInput, opts ...request.Option) (*UpdateGameSessionOutput, error) { + req, out := c.UpdateGameSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateGameSessionQueue = "UpdateGameSessionQueue" + +// UpdateGameSessionQueueRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGameSessionQueue operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGameSessionQueue for more information on using the UpdateGameSessionQueue +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateGameSessionQueueRequest method. +// req, resp := client.UpdateGameSessionQueueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSessionQueue +func (c *GameLift) UpdateGameSessionQueueRequest(input *UpdateGameSessionQueueInput) (req *request.Request, output *UpdateGameSessionQueueOutput) { + op := &request.Operation{ + Name: opUpdateGameSessionQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGameSessionQueueInput{} + } + + output = &UpdateGameSessionQueueOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateGameSessionQueue API operation for Amazon GameLift. +// +// Updates settings for a game session queue, which determines how new game +// session requests in the queue are processed. To update settings, specify +// the queue name to be updated and provide the new settings. When updating +// destinations, provide a complete list of destinations. +// +// Queue-related operations include: +// +// * CreateGameSessionQueue +// +// * DescribeGameSessionQueues +// +// * UpdateGameSessionQueue +// +// * DeleteGameSessionQueue +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation UpdateGameSessionQueue for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSessionQueue +func (c *GameLift) UpdateGameSessionQueue(input *UpdateGameSessionQueueInput) (*UpdateGameSessionQueueOutput, error) { + req, out := c.UpdateGameSessionQueueRequest(input) + return out, req.Send() +} + +// UpdateGameSessionQueueWithContext is the same as UpdateGameSessionQueue with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateGameSessionQueue for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) UpdateGameSessionQueueWithContext(ctx aws.Context, input *UpdateGameSessionQueueInput, opts ...request.Option) (*UpdateGameSessionQueueOutput, error) { + req, out := c.UpdateGameSessionQueueRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateMatchmakingConfiguration = "UpdateMatchmakingConfiguration" + +// UpdateMatchmakingConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMatchmakingConfiguration operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateMatchmakingConfiguration for more information on using the UpdateMatchmakingConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateMatchmakingConfigurationRequest method. +// req, resp := client.UpdateMatchmakingConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateMatchmakingConfiguration +func (c *GameLift) UpdateMatchmakingConfigurationRequest(input *UpdateMatchmakingConfigurationInput) (req *request.Request, output *UpdateMatchmakingConfigurationOutput) { + op := &request.Operation{ + Name: opUpdateMatchmakingConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateMatchmakingConfigurationInput{} + } + + output = &UpdateMatchmakingConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateMatchmakingConfiguration API operation for Amazon GameLift. +// +// Updates settings for a FlexMatch matchmaking configuration. To update settings, +// specify the configuration name to be updated and provide the new settings. +// +// Operations related to match configurations and rule sets include: +// +// * CreateMatchmakingConfiguration +// +// * DescribeMatchmakingConfigurations +// +// * UpdateMatchmakingConfiguration +// +// * DeleteMatchmakingConfiguration +// +// * CreateMatchmakingRuleSet +// +// * DescribeMatchmakingRuleSets +// +// * ValidateMatchmakingRuleSet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation UpdateMatchmakingConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" +// The requested operation is not supported in the region specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateMatchmakingConfiguration +func (c *GameLift) UpdateMatchmakingConfiguration(input *UpdateMatchmakingConfigurationInput) (*UpdateMatchmakingConfigurationOutput, error) { + req, out := c.UpdateMatchmakingConfigurationRequest(input) + return out, req.Send() +} + +// UpdateMatchmakingConfigurationWithContext is the same as UpdateMatchmakingConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateMatchmakingConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) UpdateMatchmakingConfigurationWithContext(ctx aws.Context, input *UpdateMatchmakingConfigurationInput, opts ...request.Option) (*UpdateMatchmakingConfigurationOutput, error) { + req, out := c.UpdateMatchmakingConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRuntimeConfiguration = "UpdateRuntimeConfiguration" + +// UpdateRuntimeConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRuntimeConfiguration operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRuntimeConfiguration for more information on using the UpdateRuntimeConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRuntimeConfigurationRequest method. +// req, resp := client.UpdateRuntimeConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateRuntimeConfiguration +func (c *GameLift) UpdateRuntimeConfigurationRequest(input *UpdateRuntimeConfigurationInput) (req *request.Request, output *UpdateRuntimeConfigurationOutput) { + op := &request.Operation{ + Name: opUpdateRuntimeConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRuntimeConfigurationInput{} + } + + output = &UpdateRuntimeConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRuntimeConfiguration API operation for Amazon GameLift. +// +// Updates the current run-time configuration for the specified fleet, which +// tells Amazon GameLift how to launch server processes on instances in the +// fleet. You can update a fleet's run-time configuration at any time after +// the fleet is created; it does not need to be in an ACTIVE status. +// +// To update run-time configuration, specify the fleet ID and provide a RuntimeConfiguration +// object with the updated collection of server process configurations. +// +// Each instance in a Amazon GameLift fleet checks regularly for an updated +// run-time configuration and changes how it launches server processes to comply +// with the latest version. Existing server processes are not affected by the +// update; they continue to run until they end, while Amazon GameLift simply +// adds new server processes to fit the current run-time configuration. As a +// result, the run-time configuration changes are applied gradually as existing +// processes shut down and new processes are launched in Amazon GameLift's normal +// process recycling activity. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation UpdateRuntimeConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// The client failed authentication. Clients should not retry such requests. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ErrCodeInvalidFleetStatusException "InvalidFleetStatusException" +// The requested operation would cause a conflict with the current state of +// a resource associated with the request and/or the fleet. Resolve the conflict +// before retrying. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateRuntimeConfiguration +func (c *GameLift) UpdateRuntimeConfiguration(input *UpdateRuntimeConfigurationInput) (*UpdateRuntimeConfigurationOutput, error) { + req, out := c.UpdateRuntimeConfigurationRequest(input) + return out, req.Send() +} + +// UpdateRuntimeConfigurationWithContext is the same as UpdateRuntimeConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRuntimeConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) UpdateRuntimeConfigurationWithContext(ctx aws.Context, input *UpdateRuntimeConfigurationInput, opts ...request.Option) (*UpdateRuntimeConfigurationOutput, error) { + req, out := c.UpdateRuntimeConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opValidateMatchmakingRuleSet = "ValidateMatchmakingRuleSet" + +// ValidateMatchmakingRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the ValidateMatchmakingRuleSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ValidateMatchmakingRuleSet for more information on using the ValidateMatchmakingRuleSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ValidateMatchmakingRuleSetRequest method. +// req, resp := client.ValidateMatchmakingRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ValidateMatchmakingRuleSet +func (c *GameLift) ValidateMatchmakingRuleSetRequest(input *ValidateMatchmakingRuleSetInput) (req *request.Request, output *ValidateMatchmakingRuleSetOutput) { + op := &request.Operation{ + Name: opValidateMatchmakingRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ValidateMatchmakingRuleSetInput{} + } + + output = &ValidateMatchmakingRuleSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// ValidateMatchmakingRuleSet API operation for Amazon GameLift. +// +// Validates the syntax of a matchmaking rule or rule set. This operation checks +// that the rule set uses syntactically correct JSON and that it conforms to +// allowed property expressions. To validate syntax, provide a rule set string. +// +// Operations related to match configurations and rule sets include: +// +// * CreateMatchmakingConfiguration +// +// * DescribeMatchmakingConfigurations +// +// * UpdateMatchmakingConfiguration +// +// * DeleteMatchmakingConfiguration +// +// * CreateMatchmakingRuleSet +// +// * DescribeMatchmakingRuleSets +// +// * ValidateMatchmakingRuleSet +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation ValidateMatchmakingRuleSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" +// The requested operation is not supported in the region specified. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ValidateMatchmakingRuleSet +func (c *GameLift) ValidateMatchmakingRuleSet(input *ValidateMatchmakingRuleSetInput) (*ValidateMatchmakingRuleSetOutput, error) { + req, out := c.ValidateMatchmakingRuleSetRequest(input) + return out, req.Send() +} + +// ValidateMatchmakingRuleSetWithContext is the same as ValidateMatchmakingRuleSet with the addition of +// the ability to pass a context and additional request options. +// +// See ValidateMatchmakingRuleSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) ValidateMatchmakingRuleSetWithContext(ctx aws.Context, input *ValidateMatchmakingRuleSetInput, opts ...request.Option) (*ValidateMatchmakingRuleSetOutput, error) { + req, out := c.ValidateMatchmakingRuleSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/AcceptMatchInput +type AcceptMatchInput struct { + _ struct{} `type:"structure"` + + // Player response to the proposed match. + // + // AcceptanceType is a required field + AcceptanceType *string `type:"string" required:"true" enum:"AcceptanceType"` + + // Unique identifier for a player delivering the response. This parameter can + // include one or multiple player IDs. + // + // PlayerIds is a required field + PlayerIds []*string `type:"list" required:"true"` + + // Unique identifier for a matchmaking ticket. The ticket must be in status + // REQUIRES_ACCEPTANCE; otherwise this request will fail. + // + // TicketId is a required field + TicketId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AcceptMatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptMatchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AcceptMatchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AcceptMatchInput"} + if s.AcceptanceType == nil { + invalidParams.Add(request.NewErrParamRequired("AcceptanceType")) + } + if s.PlayerIds == nil { + invalidParams.Add(request.NewErrParamRequired("PlayerIds")) + } + if s.TicketId == nil { + invalidParams.Add(request.NewErrParamRequired("TicketId")) + } + if s.TicketId != nil && len(*s.TicketId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TicketId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptanceType sets the AcceptanceType field's value. +func (s *AcceptMatchInput) SetAcceptanceType(v string) *AcceptMatchInput { + s.AcceptanceType = &v + return s +} + +// SetPlayerIds sets the PlayerIds field's value. +func (s *AcceptMatchInput) SetPlayerIds(v []*string) *AcceptMatchInput { + s.PlayerIds = v + return s +} + +// SetTicketId sets the TicketId field's value. +func (s *AcceptMatchInput) SetTicketId(v string) *AcceptMatchInput { + s.TicketId = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/AcceptMatchOutput +type AcceptMatchOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AcceptMatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptMatchOutput) GoString() string { + return s.String() +} + +// Properties describing a fleet alias. +// +// Alias-related operations include: +// +// * CreateAlias +// +// * ListAliases +// +// * DescribeAlias +// +// * UpdateAlias +// +// * DeleteAlias +// +// * ResolveAlias +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/Alias +type Alias struct { + _ struct{} `type:"structure"` + + // Unique identifier for an alias; alias ARNs are unique across all regions. + AliasArn *string `min:"1" type:"string"` + + // Unique identifier for an alias; alias IDs are unique within a region. + AliasId *string `type:"string"` + + // Time stamp indicating when this data object was created. Format is a number + // expressed in Unix time as milliseconds (for example "1469498468.057"). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Human-readable description of an alias. + Description *string `type:"string"` + + // Time stamp indicating when this data object was last modified. Format is + // a number expressed in Unix time as milliseconds (for example "1469498468.057"). + LastUpdatedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Descriptive label that is associated with an alias. Alias names do not need + // to be unique. + Name *string `min:"1" type:"string"` + + // Alias configuration for the alias, including routing type and settings. + RoutingStrategy *RoutingStrategy `type:"structure"` +} + +// String returns the string representation +func (s Alias) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Alias) GoString() string { + return s.String() +} + +// SetAliasArn sets the AliasArn field's value. +func (s *Alias) SetAliasArn(v string) *Alias { + s.AliasArn = &v + return s +} + +// SetAliasId sets the AliasId field's value. +func (s *Alias) SetAliasId(v string) *Alias { + s.AliasId = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *Alias) SetCreationTime(v time.Time) *Alias { + s.CreationTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Alias) SetDescription(v string) *Alias { + s.Description = &v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *Alias) SetLastUpdatedTime(v time.Time) *Alias { + s.LastUpdatedTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *Alias) SetName(v string) *Alias { + s.Name = &v + return s +} + +// SetRoutingStrategy sets the RoutingStrategy field's value. +func (s *Alias) SetRoutingStrategy(v *RoutingStrategy) *Alias { + s.RoutingStrategy = v + return s +} + +// Values for use in Player attribute type:value pairs. This object lets you +// specify an attribute value using any of the valid data types: string, number, +// string array or data map. Each AttributeValue object can use only one of +// the available properties. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/AttributeValue +type AttributeValue struct { + _ struct{} `type:"structure"` + + // For number values, expressed as double. + N *float64 `type:"double"` + + // For single string values. Maximum string length is 100 characters. + S *string `min:"1" type:"string"` + + // For a map of up to 10 type:value pairs. Maximum length for each string value + // is 100 characters. + SDM map[string]*float64 `type:"map"` + + // For a list of up to 10 strings. Maximum length for each string is 100 characters. + // Duplicate values are not recognized; all occurrences of the repeated value + // after the first of a repeated value are ignored. + SL []*string `type:"list"` +} + +// String returns the string representation +func (s AttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeValue) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttributeValue) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttributeValue"} + if s.S != nil && len(*s.S) < 1 { + invalidParams.Add(request.NewErrParamMinLen("S", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetN sets the N field's value. +func (s *AttributeValue) SetN(v float64) *AttributeValue { + s.N = &v + return s +} + +// SetS sets the S field's value. +func (s *AttributeValue) SetS(v string) *AttributeValue { + s.S = &v + return s +} + +// SetSDM sets the SDM field's value. +func (s *AttributeValue) SetSDM(v map[string]*float64) *AttributeValue { + s.SDM = v + return s +} + +// SetSL sets the SL field's value. +func (s *AttributeValue) SetSL(v []*string) *AttributeValue { + s.SL = v + return s +} + +// Temporary access credentials used for uploading game build files to Amazon +// GameLift. They are valid for a limited time. If they expire before you upload +// your game build, get a new set by calling RequestUploadCredentials. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/AwsCredentials +type AwsCredentials struct { + _ struct{} `type:"structure"` + + // Temporary key allowing access to the Amazon GameLift S3 account. + AccessKeyId *string `min:"1" type:"string"` + + // Temporary secret key allowing access to the Amazon GameLift S3 account. + SecretAccessKey *string `min:"1" type:"string"` + + // Token used to associate a specific build ID with the files uploaded using + // these credentials. + SessionToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AwsCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCredentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *AwsCredentials) SetAccessKeyId(v string) *AwsCredentials { + s.AccessKeyId = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *AwsCredentials) SetSecretAccessKey(v string) *AwsCredentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *AwsCredentials) SetSessionToken(v string) *AwsCredentials { + s.SessionToken = &v + return s +} + +// Properties describing a game build. +// +// Build-related operations include: +// +// * CreateBuild +// +// * ListBuilds +// +// * DescribeBuild +// +// * UpdateBuild +// +// * DeleteBuild +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/Build +type Build struct { + _ struct{} `type:"structure"` + + // Unique identifier for a build. + BuildId *string `type:"string"` + + // Time stamp indicating when this data object was created. Format is a number + // expressed in Unix time as milliseconds (for example "1469498468.057"). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Descriptive label that is associated with a build. Build names do not need + // to be unique. It can be set using CreateBuild or UpdateBuild. + Name *string `type:"string"` + + // Operating system that the game server binaries are built to run on. This + // value determines the type of fleet resources that you can use for this build. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` + + // File size of the uploaded game build, expressed in bytes. When the build + // status is INITIALIZED, this value is 0. + SizeOnDisk *int64 `min:"1" type:"long"` + + // Current status of the build. + // + // Possible build statuses include the following: + // + // * INITIALIZED -- A new build has been defined, but no files have been + // uploaded. You cannot create fleets for builds that are in this status. + // When a build is successfully created, the build status is set to this + // value. + // + // * READY -- The game build has been successfully uploaded. You can now + // create new fleets for this build. + // + // * FAILED -- The game build upload failed. You cannot create new fleets + // for this build. + Status *string `type:"string" enum:"BuildStatus"` + + // Version that is associated with this build. Version strings do not need to + // be unique. This value can be set using CreateBuild or UpdateBuild. + Version *string `type:"string"` +} + +// String returns the string representation +func (s Build) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Build) GoString() string { + return s.String() +} + +// SetBuildId sets the BuildId field's value. +func (s *Build) SetBuildId(v string) *Build { + s.BuildId = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *Build) SetCreationTime(v time.Time) *Build { + s.CreationTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *Build) SetName(v string) *Build { + s.Name = &v + return s +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *Build) SetOperatingSystem(v string) *Build { + s.OperatingSystem = &v + return s +} + +// SetSizeOnDisk sets the SizeOnDisk field's value. +func (s *Build) SetSizeOnDisk(v int64) *Build { + s.SizeOnDisk = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Build) SetStatus(v string) *Build { + s.Status = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *Build) SetVersion(v string) *Build { + s.Version = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateAliasInput +type CreateAliasInput struct { + _ struct{} `type:"structure"` + + // Human-readable description of an alias. + Description *string `min:"1" type:"string"` + + // Descriptive label that is associated with an alias. Alias names do not need + // to be unique. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Object that specifies the fleet and routing type to use for the alias. + // + // RoutingStrategy is a required field + RoutingStrategy *RoutingStrategy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAliasInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RoutingStrategy == nil { + invalidParams.Add(request.NewErrParamRequired("RoutingStrategy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateAliasInput) SetDescription(v string) *CreateAliasInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateAliasInput) SetName(v string) *CreateAliasInput { + s.Name = &v + return s +} + +// SetRoutingStrategy sets the RoutingStrategy field's value. +func (s *CreateAliasInput) SetRoutingStrategy(v *RoutingStrategy) *CreateAliasInput { + s.RoutingStrategy = v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateAliasOutput +type CreateAliasOutput struct { + _ struct{} `type:"structure"` + + // Object that describes the newly created alias record. + Alias *Alias `type:"structure"` +} + +// String returns the string representation +func (s CreateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasOutput) GoString() string { + return s.String() +} + +// SetAlias sets the Alias field's value. +func (s *CreateAliasOutput) SetAlias(v *Alias) *CreateAliasOutput { + s.Alias = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateBuildInput +type CreateBuildInput struct { + _ struct{} `type:"structure"` + + // Descriptive label that is associated with a build. Build names do not need + // to be unique. You can use UpdateBuild to change this value later. + Name *string `min:"1" type:"string"` + + // Operating system that the game server binaries are built to run on. This + // value determines the type of fleet resources that you can use for this build. + // If your game build contains multiple executables, they all must run on the + // same operating system. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` + + // Amazon S3 location of the game build files to be uploaded. The S3 bucket + // must be owned by the same AWS account that you're using to manage Amazon + // GameLift. It also must in the same region that you want to create a new build + // in. Before calling CreateBuild with this location, you must allow Amazon + // GameLift to access your Amazon S3 bucket (see Create a Build with Files in + // Amazon S3 (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build)). + StorageLocation *S3Location `type:"structure"` + + // Version that is associated with this build. Version strings do not need to + // be unique. You can use UpdateBuild to change this value later. + Version *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateBuildInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBuildInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBuildInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBuildInput"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + } + if s.StorageLocation != nil { + if err := s.StorageLocation.Validate(); err != nil { + invalidParams.AddNested("StorageLocation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *CreateBuildInput) SetName(v string) *CreateBuildInput { + s.Name = &v + return s +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *CreateBuildInput) SetOperatingSystem(v string) *CreateBuildInput { + s.OperatingSystem = &v + return s +} + +// SetStorageLocation sets the StorageLocation field's value. +func (s *CreateBuildInput) SetStorageLocation(v *S3Location) *CreateBuildInput { + s.StorageLocation = v + return s +} + +// SetVersion sets the Version field's value. +func (s *CreateBuildInput) SetVersion(v string) *CreateBuildInput { + s.Version = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateBuildOutput +type CreateBuildOutput struct { + _ struct{} `type:"structure"` + + // The newly created build record, including a unique build ID and status. + Build *Build `type:"structure"` + + // Amazon S3 location specified in the request. + StorageLocation *S3Location `type:"structure"` + + // This element is not currently in use. + UploadCredentials *AwsCredentials `type:"structure"` +} + +// String returns the string representation +func (s CreateBuildOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBuildOutput) GoString() string { + return s.String() +} + +// SetBuild sets the Build field's value. +func (s *CreateBuildOutput) SetBuild(v *Build) *CreateBuildOutput { + s.Build = v + return s +} + +// SetStorageLocation sets the StorageLocation field's value. +func (s *CreateBuildOutput) SetStorageLocation(v *S3Location) *CreateBuildOutput { + s.StorageLocation = v + return s +} + +// SetUploadCredentials sets the UploadCredentials field's value. +func (s *CreateBuildOutput) SetUploadCredentials(v *AwsCredentials) *CreateBuildOutput { + s.UploadCredentials = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateFleetInput +type CreateFleetInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a build to be deployed on the new fleet. The build + // must have been successfully uploaded to Amazon GameLift and be in a READY + // status. This fleet setting cannot be changed once the fleet is created. + // + // BuildId is a required field + BuildId *string `type:"string" required:"true"` + + // Human-readable description of a fleet. + Description *string `min:"1" type:"string"` + + // Range of IP addresses and port settings that permit inbound traffic to access + // server processes running on the fleet. If no inbound permissions are set, + // including both IP address range and port range, the server processes in the + // fleet cannot accept connections. You can specify one or more sets of permissions + // for a fleet. + EC2InboundPermissions []*IpPermission `type:"list"` + + // Name of an EC2 instance type that is supported in Amazon GameLift. A fleet + // instance type determines the computing resources of each instance in the + // fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift + // supports the following EC2 instance types. See Amazon EC2 Instance Types + // (http://aws.amazon.com/ec2/instance-types/) for detailed descriptions. + // + // EC2InstanceType is a required field + EC2InstanceType *string `type:"string" required:"true" enum:"EC2InstanceType"` + + // This parameter is no longer used. Instead, to specify where Amazon GameLift + // should store log files once a server process shuts down, use the Amazon GameLift + // server API ProcessReady() and specify one or more directory paths in logParameters. + // See more information in the Server API Reference (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api-ref.html#gamelift-sdk-server-api-ref-dataypes-process). + LogPaths []*string `type:"list"` + + // Names of metric groups to add this fleet to. Use an existing metric group + // name to add this fleet to the group. Or use a new name to create a new metric + // group. A fleet can only be included in one metric group at a time. + MetricGroups []*string `type:"list"` + + // Descriptive label that is associated with a fleet. Fleet names do not need + // to be unique. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Game session protection policy to apply to all instances in this fleet. If + // this parameter is not set, instances in this fleet default to no protection. + // You can change a fleet's protection policy using UpdateFleetAttributes, but + // this change will only affect sessions created after the policy change. You + // can also set protection for individual instances using UpdateGameSession. + // + // * NoProtection -- The game session can be terminated during a scale-down + // event. + // + // * FullProtection -- If the game session is in an ACTIVE status, it cannot + // be terminated during a scale-down event. + NewGameSessionProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` + + // Unique identifier for the AWS account with the VPC that you want to peer + // your Amazon GameLift fleet with. You can find your Account ID in the AWS + // Management Console under account settings. + PeerVpcAwsAccountId *string `min:"1" type:"string"` + + // Unique identifier for a VPC with resources to be accessed by your Amazon + // GameLift fleet. The VPC must be in the same region where your fleet is deployed. + // To get VPC information, including IDs, use the Virtual Private Cloud service + // tools, including the VPC Dashboard in the AWS Management Console. + PeerVpcId *string `min:"1" type:"string"` + + // Policy that limits the number of game sessions an individual player can create + // over a span of time for this fleet. + ResourceCreationLimitPolicy *ResourceCreationLimitPolicy `type:"structure"` + + // Instructions for launching server processes on each instance in the fleet. + // The run-time configuration for a fleet has a collection of server process + // configurations, one for each type of server process to run on an instance. + // A server process configuration specifies the location of the server executable, + // launch parameters, and the number of concurrent processes with that configuration + // to maintain on each instance. A CreateFleet request must include a run-time + // configuration with at least one server process configuration; otherwise the + // request fails with an invalid request exception. (This parameter replaces + // the parameters ServerLaunchPath and ServerLaunchParameters; requests that + // contain values for these parameters instead of a run-time configuration will + // continue to work.) + RuntimeConfiguration *RuntimeConfiguration `type:"structure"` + + // This parameter is no longer used. Instead, specify server launch parameters + // in the RuntimeConfiguration parameter. (Requests that specify a server launch + // path and launch parameters instead of a run-time configuration will continue + // to work.) + ServerLaunchParameters *string `min:"1" type:"string"` + + // This parameter is no longer used. Instead, specify a server launch path using + // the RuntimeConfiguration parameter. (Requests that specify a server launch + // path and launch parameters instead of a run-time configuration will continue + // to work.) + ServerLaunchPath *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFleetInput"} + if s.BuildId == nil { + invalidParams.Add(request.NewErrParamRequired("BuildId")) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.EC2InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("EC2InstanceType")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.PeerVpcAwsAccountId != nil && len(*s.PeerVpcAwsAccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PeerVpcAwsAccountId", 1)) + } + if s.PeerVpcId != nil && len(*s.PeerVpcId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PeerVpcId", 1)) + } + if s.ServerLaunchParameters != nil && len(*s.ServerLaunchParameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerLaunchParameters", 1)) + } + if s.ServerLaunchPath != nil && len(*s.ServerLaunchPath) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerLaunchPath", 1)) + } + if s.EC2InboundPermissions != nil { + for i, v := range s.EC2InboundPermissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EC2InboundPermissions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RuntimeConfiguration != nil { + if err := s.RuntimeConfiguration.Validate(); err != nil { + invalidParams.AddNested("RuntimeConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBuildId sets the BuildId field's value. +func (s *CreateFleetInput) SetBuildId(v string) *CreateFleetInput { + s.BuildId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateFleetInput) SetDescription(v string) *CreateFleetInput { + s.Description = &v + return s +} + +// SetEC2InboundPermissions sets the EC2InboundPermissions field's value. +func (s *CreateFleetInput) SetEC2InboundPermissions(v []*IpPermission) *CreateFleetInput { + s.EC2InboundPermissions = v + return s +} + +// SetEC2InstanceType sets the EC2InstanceType field's value. +func (s *CreateFleetInput) SetEC2InstanceType(v string) *CreateFleetInput { + s.EC2InstanceType = &v + return s +} + +// SetLogPaths sets the LogPaths field's value. +func (s *CreateFleetInput) SetLogPaths(v []*string) *CreateFleetInput { + s.LogPaths = v + return s +} + +// SetMetricGroups sets the MetricGroups field's value. +func (s *CreateFleetInput) SetMetricGroups(v []*string) *CreateFleetInput { + s.MetricGroups = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateFleetInput) SetName(v string) *CreateFleetInput { + s.Name = &v + return s +} + +// SetNewGameSessionProtectionPolicy sets the NewGameSessionProtectionPolicy field's value. +func (s *CreateFleetInput) SetNewGameSessionProtectionPolicy(v string) *CreateFleetInput { + s.NewGameSessionProtectionPolicy = &v + return s +} + +// SetPeerVpcAwsAccountId sets the PeerVpcAwsAccountId field's value. +func (s *CreateFleetInput) SetPeerVpcAwsAccountId(v string) *CreateFleetInput { + s.PeerVpcAwsAccountId = &v + return s +} + +// SetPeerVpcId sets the PeerVpcId field's value. +func (s *CreateFleetInput) SetPeerVpcId(v string) *CreateFleetInput { + s.PeerVpcId = &v + return s +} + +// SetResourceCreationLimitPolicy sets the ResourceCreationLimitPolicy field's value. +func (s *CreateFleetInput) SetResourceCreationLimitPolicy(v *ResourceCreationLimitPolicy) *CreateFleetInput { + s.ResourceCreationLimitPolicy = v + return s +} + +// SetRuntimeConfiguration sets the RuntimeConfiguration field's value. +func (s *CreateFleetInput) SetRuntimeConfiguration(v *RuntimeConfiguration) *CreateFleetInput { + s.RuntimeConfiguration = v + return s +} + +// SetServerLaunchParameters sets the ServerLaunchParameters field's value. +func (s *CreateFleetInput) SetServerLaunchParameters(v string) *CreateFleetInput { + s.ServerLaunchParameters = &v + return s +} + +// SetServerLaunchPath sets the ServerLaunchPath field's value. +func (s *CreateFleetInput) SetServerLaunchPath(v string) *CreateFleetInput { + s.ServerLaunchPath = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateFleetOutput +type CreateFleetOutput struct { + _ struct{} `type:"structure"` + + // Properties for the newly created fleet. + FleetAttributes *FleetAttributes `type:"structure"` +} + +// String returns the string representation +func (s CreateFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFleetOutput) GoString() string { + return s.String() +} + +// SetFleetAttributes sets the FleetAttributes field's value. +func (s *CreateFleetOutput) SetFleetAttributes(v *FleetAttributes) *CreateFleetOutput { + s.FleetAttributes = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSessionInput +type CreateGameSessionInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for an alias associated with the fleet to create a game + // session in. Each request must reference either a fleet ID or alias ID, but + // not both. + AliasId *string `type:"string"` + + // Unique identifier for a player or entity creating the game session. This + // ID is used to enforce a resource protection policy (if one exists) that limits + // the number of concurrent active game sessions one player can have. + CreatorId *string `min:"1" type:"string"` + + // Unique identifier for a fleet to create a game session in. Each request must + // reference either a fleet ID or alias ID, but not both. + FleetId *string `type:"string"` + + // Set of developer-defined properties for a game session, formatted as a set + // of type:value pairs. These properties are included in the GameSession object, + // which is passed to the game server with a request to start a new game session + // (see Start a Game Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). + GameProperties []*GameProperty `type:"list"` + + // Set of developer-defined game session properties, formatted as a single string + // value. This data is included in the GameSession object, which is passed to + // the game server with a request to start a new game session (see Start a Game + // Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). + GameSessionData *string `min:"1" type:"string"` + + // This parameter is no longer preferred. Please use IdempotencyToken instead. + // Custom string that uniquely identifies a request for a new game session. + // Maximum token length is 48 characters. If provided, this string is included + // in the new game session's ID. (A game session ARN has the following format: + // arn:aws:gamelift:::gamesession//.) + GameSessionId *string `min:"1" type:"string"` + + // Custom string that uniquely identifies a request for a new game session. + // Maximum token length is 48 characters. If provided, this string is included + // in the new game session's ID. (A game session ARN has the following format: + // arn:aws:gamelift:::gamesession//.) Idempotency tokens remain in use for 30 days after a game session + // has ended; game session objects are retained for this time period and then + // deleted. + IdempotencyToken *string `min:"1" type:"string"` + + // Maximum number of players that can be connected simultaneously to the game + // session. + // + // MaximumPlayerSessionCount is a required field + MaximumPlayerSessionCount *int64 `type:"integer" required:"true"` + + // Descriptive label that is associated with a game session. Session names do + // not need to be unique. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateGameSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGameSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGameSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGameSessionInput"} + if s.CreatorId != nil && len(*s.CreatorId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CreatorId", 1)) + } + if s.GameSessionData != nil && len(*s.GameSessionData) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameSessionData", 1)) + } + if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) + } + if s.IdempotencyToken != nil && len(*s.IdempotencyToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdempotencyToken", 1)) + } + if s.MaximumPlayerSessionCount == nil { + invalidParams.Add(request.NewErrParamRequired("MaximumPlayerSessionCount")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.GameProperties != nil { + for i, v := range s.GameProperties { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GameProperties", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasId sets the AliasId field's value. +func (s *CreateGameSessionInput) SetAliasId(v string) *CreateGameSessionInput { + s.AliasId = &v + return s +} + +// SetCreatorId sets the CreatorId field's value. +func (s *CreateGameSessionInput) SetCreatorId(v string) *CreateGameSessionInput { + s.CreatorId = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *CreateGameSessionInput) SetFleetId(v string) *CreateGameSessionInput { + s.FleetId = &v + return s +} + +// SetGameProperties sets the GameProperties field's value. +func (s *CreateGameSessionInput) SetGameProperties(v []*GameProperty) *CreateGameSessionInput { + s.GameProperties = v + return s +} + +// SetGameSessionData sets the GameSessionData field's value. +func (s *CreateGameSessionInput) SetGameSessionData(v string) *CreateGameSessionInput { + s.GameSessionData = &v + return s +} + +// SetGameSessionId sets the GameSessionId field's value. +func (s *CreateGameSessionInput) SetGameSessionId(v string) *CreateGameSessionInput { + s.GameSessionId = &v + return s +} + +// SetIdempotencyToken sets the IdempotencyToken field's value. +func (s *CreateGameSessionInput) SetIdempotencyToken(v string) *CreateGameSessionInput { + s.IdempotencyToken = &v + return s +} + +// SetMaximumPlayerSessionCount sets the MaximumPlayerSessionCount field's value. +func (s *CreateGameSessionInput) SetMaximumPlayerSessionCount(v int64) *CreateGameSessionInput { + s.MaximumPlayerSessionCount = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateGameSessionInput) SetName(v string) *CreateGameSessionInput { + s.Name = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSessionOutput +type CreateGameSessionOutput struct { + _ struct{} `type:"structure"` + + // Object that describes the newly created game session record. + GameSession *GameSession `type:"structure"` +} + +// String returns the string representation +func (s CreateGameSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGameSessionOutput) GoString() string { + return s.String() +} + +// SetGameSession sets the GameSession field's value. +func (s *CreateGameSessionOutput) SetGameSession(v *GameSession) *CreateGameSessionOutput { + s.GameSession = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSessionQueueInput +type CreateGameSessionQueueInput struct { + _ struct{} `type:"structure"` + + // List of fleets that can be used to fulfill game session placement requests + // in the queue. Fleets are identified by either a fleet ARN or a fleet alias + // ARN. Destinations are listed in default preference order. + Destinations []*GameSessionQueueDestination `type:"list"` + + // Descriptive label that is associated with game session queue. Queue names + // must be unique within each region. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Collection of latency policies to apply when processing game sessions placement + // requests with player latency information. Multiple policies are evaluated + // in order of the maximum latency value, starting with the lowest latency values. + // With just one policy, it is enforced at the start of the game session placement + // for the duration period. With multiple policies, each policy is enforced + // consecutively for its duration period. For example, a queue might enforce + // a 60-second policy followed by a 120-second policy, and then no policy for + // the remainder of the placement. A player latency policy must set a value + // for MaximumIndividualPlayerLatencyMilliseconds; if none is set, this API + // requests will fail. + PlayerLatencyPolicies []*PlayerLatencyPolicy `type:"list"` + + // Maximum time, in seconds, that a new game session placement request remains + // in the queue. When a request exceeds this time, the game session placement + // changes to a TIMED_OUT status. + TimeoutInSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CreateGameSessionQueueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGameSessionQueueInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGameSessionQueueInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGameSessionQueueInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Destinations != nil { + for i, v := range s.Destinations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Destinations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinations sets the Destinations field's value. +func (s *CreateGameSessionQueueInput) SetDestinations(v []*GameSessionQueueDestination) *CreateGameSessionQueueInput { + s.Destinations = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateGameSessionQueueInput) SetName(v string) *CreateGameSessionQueueInput { + s.Name = &v + return s +} + +// SetPlayerLatencyPolicies sets the PlayerLatencyPolicies field's value. +func (s *CreateGameSessionQueueInput) SetPlayerLatencyPolicies(v []*PlayerLatencyPolicy) *CreateGameSessionQueueInput { + s.PlayerLatencyPolicies = v + return s +} + +// SetTimeoutInSeconds sets the TimeoutInSeconds field's value. +func (s *CreateGameSessionQueueInput) SetTimeoutInSeconds(v int64) *CreateGameSessionQueueInput { + s.TimeoutInSeconds = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSessionQueueOutput +type CreateGameSessionQueueOutput struct { + _ struct{} `type:"structure"` + + // Object that describes the newly created game session queue. + GameSessionQueue *GameSessionQueue `type:"structure"` +} + +// String returns the string representation +func (s CreateGameSessionQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGameSessionQueueOutput) GoString() string { + return s.String() +} + +// SetGameSessionQueue sets the GameSessionQueue field's value. +func (s *CreateGameSessionQueueOutput) SetGameSessionQueue(v *GameSessionQueue) *CreateGameSessionQueueOutput { + s.GameSessionQueue = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingConfigurationInput +type CreateMatchmakingConfigurationInput struct { + _ struct{} `type:"structure"` + + // Flag that determines whether or not a match that was created with this configuration + // must be accepted by the matched players. To require acceptance, set to TRUE. + // + // AcceptanceRequired is a required field + AcceptanceRequired *bool `type:"boolean" required:"true"` + + // Length of time (in seconds) to wait for players to accept a proposed match. + // If any player rejects the match or fails to accept before the timeout, the + // ticket continues to look for an acceptable match. + AcceptanceTimeoutSeconds *int64 `min:"1" type:"integer"` + + // Number of player slots in a match to keep open for future players. For example, + // if the configuration's rule set specifies a match for a single 12-person + // team, and the additional player count is set to 2, only 10 players are selected + // for the match. + AdditionalPlayerCount *int64 `type:"integer"` + + // Information to attached to all events related to the matchmaking configuration. + CustomEventData *string `type:"string"` + + // Meaningful description of the matchmaking configuration. + Description *string `min:"1" type:"string"` + + // Set of developer-defined properties for a game session, formatted as a set + // of type:value pairs. These properties are included in the GameSession object, + // which is passed to the game server with a request to start a new game session + // (see Start a Game Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). + // This information is added to the new GameSession object that is created for + // a successful match. + GameProperties []*GameProperty `type:"list"` + + // Set of developer-defined game session properties, formatted as a single string + // value. This data is included in the GameSession object, which is passed to + // the game server with a request to start a new game session (see Start a Game + // Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). + // This information is added to the new GameSession object that is created for + // a successful match. + GameSessionData *string `min:"1" type:"string"` + + // Amazon Resource Name (ARN (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) + // that is assigned to a game session queue and uniquely identifies it. Format + // is arn:aws:gamelift:::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. + // These queues are used when placing game sessions for matches that are created + // with this matchmaking configuration. Queues can be located in any region. + // + // GameSessionQueueArns is a required field + GameSessionQueueArns []*string `type:"list" required:"true"` + + // Unique identifier for a matchmaking configuration. This name is used to identify + // the configuration associated with a matchmaking request or ticket. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // SNS topic ARN that is set up to receive matchmaking notifications. + NotificationTarget *string `type:"string"` + + // Maximum duration, in seconds, that a matchmaking ticket can remain in process + // before timing out. Requests that time out can be resubmitted as needed. + // + // RequestTimeoutSeconds is a required field + RequestTimeoutSeconds *int64 `min:"1" type:"integer" required:"true"` + + // Unique identifier for a matchmaking rule set to use with this configuration. + // A matchmaking configuration can only use rule sets that are defined in the + // same region. + // + // RuleSetName is a required field + RuleSetName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateMatchmakingConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMatchmakingConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMatchmakingConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMatchmakingConfigurationInput"} + if s.AcceptanceRequired == nil { + invalidParams.Add(request.NewErrParamRequired("AcceptanceRequired")) + } + if s.AcceptanceTimeoutSeconds != nil && *s.AcceptanceTimeoutSeconds < 1 { + invalidParams.Add(request.NewErrParamMinValue("AcceptanceTimeoutSeconds", 1)) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.GameSessionData != nil && len(*s.GameSessionData) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameSessionData", 1)) + } + if s.GameSessionQueueArns == nil { + invalidParams.Add(request.NewErrParamRequired("GameSessionQueueArns")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RequestTimeoutSeconds == nil { + invalidParams.Add(request.NewErrParamRequired("RequestTimeoutSeconds")) + } + if s.RequestTimeoutSeconds != nil && *s.RequestTimeoutSeconds < 1 { + invalidParams.Add(request.NewErrParamMinValue("RequestTimeoutSeconds", 1)) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + if s.RuleSetName != nil && len(*s.RuleSetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleSetName", 1)) + } + if s.GameProperties != nil { + for i, v := range s.GameProperties { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GameProperties", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptanceRequired sets the AcceptanceRequired field's value. +func (s *CreateMatchmakingConfigurationInput) SetAcceptanceRequired(v bool) *CreateMatchmakingConfigurationInput { + s.AcceptanceRequired = &v + return s +} + +// SetAcceptanceTimeoutSeconds sets the AcceptanceTimeoutSeconds field's value. +func (s *CreateMatchmakingConfigurationInput) SetAcceptanceTimeoutSeconds(v int64) *CreateMatchmakingConfigurationInput { + s.AcceptanceTimeoutSeconds = &v + return s +} + +// SetAdditionalPlayerCount sets the AdditionalPlayerCount field's value. +func (s *CreateMatchmakingConfigurationInput) SetAdditionalPlayerCount(v int64) *CreateMatchmakingConfigurationInput { + s.AdditionalPlayerCount = &v + return s +} + +// SetCustomEventData sets the CustomEventData field's value. +func (s *CreateMatchmakingConfigurationInput) SetCustomEventData(v string) *CreateMatchmakingConfigurationInput { + s.CustomEventData = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateMatchmakingConfigurationInput) SetDescription(v string) *CreateMatchmakingConfigurationInput { + s.Description = &v + return s +} + +// SetGameProperties sets the GameProperties field's value. +func (s *CreateMatchmakingConfigurationInput) SetGameProperties(v []*GameProperty) *CreateMatchmakingConfigurationInput { + s.GameProperties = v + return s +} + +// SetGameSessionData sets the GameSessionData field's value. +func (s *CreateMatchmakingConfigurationInput) SetGameSessionData(v string) *CreateMatchmakingConfigurationInput { + s.GameSessionData = &v + return s +} + +// SetGameSessionQueueArns sets the GameSessionQueueArns field's value. +func (s *CreateMatchmakingConfigurationInput) SetGameSessionQueueArns(v []*string) *CreateMatchmakingConfigurationInput { + s.GameSessionQueueArns = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateMatchmakingConfigurationInput) SetName(v string) *CreateMatchmakingConfigurationInput { + s.Name = &v + return s +} + +// SetNotificationTarget sets the NotificationTarget field's value. +func (s *CreateMatchmakingConfigurationInput) SetNotificationTarget(v string) *CreateMatchmakingConfigurationInput { + s.NotificationTarget = &v + return s +} + +// SetRequestTimeoutSeconds sets the RequestTimeoutSeconds field's value. +func (s *CreateMatchmakingConfigurationInput) SetRequestTimeoutSeconds(v int64) *CreateMatchmakingConfigurationInput { + s.RequestTimeoutSeconds = &v + return s +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *CreateMatchmakingConfigurationInput) SetRuleSetName(v string) *CreateMatchmakingConfigurationInput { + s.RuleSetName = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingConfigurationOutput +type CreateMatchmakingConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Object that describes the newly created matchmaking configuration. + Configuration *MatchmakingConfiguration `type:"structure"` +} + +// String returns the string representation +func (s CreateMatchmakingConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMatchmakingConfigurationOutput) GoString() string { + return s.String() +} + +// SetConfiguration sets the Configuration field's value. +func (s *CreateMatchmakingConfigurationOutput) SetConfiguration(v *MatchmakingConfiguration) *CreateMatchmakingConfigurationOutput { + s.Configuration = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingRuleSetInput +type CreateMatchmakingRuleSetInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a matchmaking rule set. This name is used to identify + // the rule set associated with a matchmaking configuration. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Collection of matchmaking rules, formatted as a JSON string. (Note that comments + // are not allowed in JSON, but most elements support a description field.) + // + // RuleSetBody is a required field + RuleSetBody *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateMatchmakingRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMatchmakingRuleSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMatchmakingRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMatchmakingRuleSetInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RuleSetBody == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetBody")) + } + if s.RuleSetBody != nil && len(*s.RuleSetBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleSetBody", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *CreateMatchmakingRuleSetInput) SetName(v string) *CreateMatchmakingRuleSetInput { + s.Name = &v + return s +} + +// SetRuleSetBody sets the RuleSetBody field's value. +func (s *CreateMatchmakingRuleSetInput) SetRuleSetBody(v string) *CreateMatchmakingRuleSetInput { + s.RuleSetBody = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingRuleSetOutput +type CreateMatchmakingRuleSetOutput struct { + _ struct{} `type:"structure"` + + // Object that describes the newly created matchmaking rule set. + // + // RuleSet is a required field + RuleSet *MatchmakingRuleSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateMatchmakingRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMatchmakingRuleSetOutput) GoString() string { + return s.String() +} + +// SetRuleSet sets the RuleSet field's value. +func (s *CreateMatchmakingRuleSetOutput) SetRuleSet(v *MatchmakingRuleSet) *CreateMatchmakingRuleSetOutput { + s.RuleSet = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSessionInput +type CreatePlayerSessionInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the game session to add a player to. + // + // GameSessionId is a required field + GameSessionId *string `min:"1" type:"string" required:"true"` + + // Developer-defined information related to a player. Amazon GameLift does not + // use this data, so it can be formatted as needed for use in the game. + PlayerData *string `min:"1" type:"string"` + + // Unique identifier for a player. Player IDs are developer-defined. + // + // PlayerId is a required field + PlayerId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePlayerSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlayerSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePlayerSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePlayerSessionInput"} + if s.GameSessionId == nil { + invalidParams.Add(request.NewErrParamRequired("GameSessionId")) + } + if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) + } + if s.PlayerData != nil && len(*s.PlayerData) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlayerData", 1)) + } + if s.PlayerId == nil { + invalidParams.Add(request.NewErrParamRequired("PlayerId")) + } + if s.PlayerId != nil && len(*s.PlayerId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlayerId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameSessionId sets the GameSessionId field's value. +func (s *CreatePlayerSessionInput) SetGameSessionId(v string) *CreatePlayerSessionInput { + s.GameSessionId = &v + return s +} + +// SetPlayerData sets the PlayerData field's value. +func (s *CreatePlayerSessionInput) SetPlayerData(v string) *CreatePlayerSessionInput { + s.PlayerData = &v + return s +} + +// SetPlayerId sets the PlayerId field's value. +func (s *CreatePlayerSessionInput) SetPlayerId(v string) *CreatePlayerSessionInput { + s.PlayerId = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSessionOutput +type CreatePlayerSessionOutput struct { + _ struct{} `type:"structure"` + + // Object that describes the newly created player session record. + PlayerSession *PlayerSession `type:"structure"` +} + +// String returns the string representation +func (s CreatePlayerSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlayerSessionOutput) GoString() string { + return s.String() +} + +// SetPlayerSession sets the PlayerSession field's value. +func (s *CreatePlayerSessionOutput) SetPlayerSession(v *PlayerSession) *CreatePlayerSessionOutput { + s.PlayerSession = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSessionsInput +type CreatePlayerSessionsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the game session to add players to. + // + // GameSessionId is a required field + GameSessionId *string `min:"1" type:"string" required:"true"` + + // Map of string pairs, each specifying a player ID and a set of developer-defined + // information related to the player. Amazon GameLift does not use this data, + // so it can be formatted as needed for use in the game. Player data strings + // for player IDs not included in the PlayerIds parameter are ignored. + PlayerDataMap map[string]*string `type:"map"` + + // List of unique identifiers for the players to be added. + // + // PlayerIds is a required field + PlayerIds []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreatePlayerSessionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlayerSessionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePlayerSessionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePlayerSessionsInput"} + if s.GameSessionId == nil { + invalidParams.Add(request.NewErrParamRequired("GameSessionId")) + } + if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) + } + if s.PlayerIds == nil { + invalidParams.Add(request.NewErrParamRequired("PlayerIds")) + } + if s.PlayerIds != nil && len(s.PlayerIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlayerIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameSessionId sets the GameSessionId field's value. +func (s *CreatePlayerSessionsInput) SetGameSessionId(v string) *CreatePlayerSessionsInput { + s.GameSessionId = &v + return s +} + +// SetPlayerDataMap sets the PlayerDataMap field's value. +func (s *CreatePlayerSessionsInput) SetPlayerDataMap(v map[string]*string) *CreatePlayerSessionsInput { + s.PlayerDataMap = v + return s +} + +// SetPlayerIds sets the PlayerIds field's value. +func (s *CreatePlayerSessionsInput) SetPlayerIds(v []*string) *CreatePlayerSessionsInput { + s.PlayerIds = v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSessionsOutput +type CreatePlayerSessionsOutput struct { + _ struct{} `type:"structure"` + + // Collection of player session objects created for the added players. + PlayerSessions []*PlayerSession `type:"list"` +} + +// String returns the string representation +func (s CreatePlayerSessionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlayerSessionsOutput) GoString() string { + return s.String() +} + +// SetPlayerSessions sets the PlayerSessions field's value. +func (s *CreatePlayerSessionsOutput) SetPlayerSessions(v []*PlayerSession) *CreatePlayerSessionsOutput { + s.PlayerSessions = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringAuthorizationInput +type CreateVpcPeeringAuthorizationInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the AWS account that you use to manage your Amazon + // GameLift fleet. You can find your Account ID in the AWS Management Console + // under account settings. + // + // GameLiftAwsAccountId is a required field + GameLiftAwsAccountId *string `min:"1" type:"string" required:"true"` + + // Unique identifier for a VPC with resources to be accessed by your Amazon + // GameLift fleet. The VPC must be in the same region where your fleet is deployed. + // To get VPC information, including IDs, use the Virtual Private Cloud service + // tools, including the VPC Dashboard in the AWS Management Console. + // + // PeerVpcId is a required field + PeerVpcId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVpcPeeringAuthorizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcPeeringAuthorizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVpcPeeringAuthorizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVpcPeeringAuthorizationInput"} + if s.GameLiftAwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("GameLiftAwsAccountId")) + } + if s.GameLiftAwsAccountId != nil && len(*s.GameLiftAwsAccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameLiftAwsAccountId", 1)) + } + if s.PeerVpcId == nil { + invalidParams.Add(request.NewErrParamRequired("PeerVpcId")) + } + if s.PeerVpcId != nil && len(*s.PeerVpcId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PeerVpcId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameLiftAwsAccountId sets the GameLiftAwsAccountId field's value. +func (s *CreateVpcPeeringAuthorizationInput) SetGameLiftAwsAccountId(v string) *CreateVpcPeeringAuthorizationInput { + s.GameLiftAwsAccountId = &v + return s +} + +// SetPeerVpcId sets the PeerVpcId field's value. +func (s *CreateVpcPeeringAuthorizationInput) SetPeerVpcId(v string) *CreateVpcPeeringAuthorizationInput { + s.PeerVpcId = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringAuthorizationOutput +type CreateVpcPeeringAuthorizationOutput struct { + _ struct{} `type:"structure"` + + // Details on the requested VPC peering authorization, including expiration. + VpcPeeringAuthorization *VpcPeeringAuthorization `type:"structure"` +} + +// String returns the string representation +func (s CreateVpcPeeringAuthorizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcPeeringAuthorizationOutput) GoString() string { + return s.String() +} + +// SetVpcPeeringAuthorization sets the VpcPeeringAuthorization field's value. +func (s *CreateVpcPeeringAuthorizationOutput) SetVpcPeeringAuthorization(v *VpcPeeringAuthorization) *CreateVpcPeeringAuthorizationOutput { + s.VpcPeeringAuthorization = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringConnectionInput +type CreateVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet. This tells Amazon GameLift which GameLift + // VPC to peer with. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // Unique identifier for the AWS account with the VPC that you want to peer + // your Amazon GameLift fleet with. You can find your Account ID in the AWS + // Management Console under account settings. + // + // PeerVpcAwsAccountId is a required field + PeerVpcAwsAccountId *string `min:"1" type:"string" required:"true"` + + // Unique identifier for a VPC with resources to be accessed by your Amazon + // GameLift fleet. The VPC must be in the same region where your fleet is deployed. + // To get VPC information, including IDs, use the Virtual Private Cloud service + // tools, including the VPC Dashboard in the AWS Management Console. + // + // PeerVpcId is a required field + PeerVpcId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVpcPeeringConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVpcPeeringConnectionInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.PeerVpcAwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("PeerVpcAwsAccountId")) + } + if s.PeerVpcAwsAccountId != nil && len(*s.PeerVpcAwsAccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PeerVpcAwsAccountId", 1)) + } + if s.PeerVpcId == nil { + invalidParams.Add(request.NewErrParamRequired("PeerVpcId")) + } + if s.PeerVpcId != nil && len(*s.PeerVpcId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PeerVpcId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetId sets the FleetId field's value. +func (s *CreateVpcPeeringConnectionInput) SetFleetId(v string) *CreateVpcPeeringConnectionInput { + s.FleetId = &v + return s +} + +// SetPeerVpcAwsAccountId sets the PeerVpcAwsAccountId field's value. +func (s *CreateVpcPeeringConnectionInput) SetPeerVpcAwsAccountId(v string) *CreateVpcPeeringConnectionInput { + s.PeerVpcAwsAccountId = &v + return s +} + +// SetPeerVpcId sets the PeerVpcId field's value. +func (s *CreateVpcPeeringConnectionInput) SetPeerVpcId(v string) *CreateVpcPeeringConnectionInput { + s.PeerVpcId = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringConnectionOutput +type CreateVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteAliasInput +type DeleteAliasInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet alias. Specify the alias you want to delete. + // + // AliasId is a required field + AliasId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAliasInput"} + if s.AliasId == nil { + invalidParams.Add(request.NewErrParamRequired("AliasId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasId sets the AliasId field's value. +func (s *DeleteAliasInput) SetAliasId(v string) *DeleteAliasInput { + s.AliasId = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteAliasOutput +type DeleteAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAliasOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteBuildInput +type DeleteBuildInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a build to delete. + // + // BuildId is a required field + BuildId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBuildInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBuildInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBuildInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBuildInput"} + if s.BuildId == nil { + invalidParams.Add(request.NewErrParamRequired("BuildId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBuildId sets the BuildId field's value. +func (s *DeleteBuildInput) SetBuildId(v string) *DeleteBuildInput { + s.BuildId = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteBuildOutput +type DeleteBuildOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBuildOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBuildOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteFleetInput +type DeleteFleetInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet to be deleted. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFleetInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetId sets the FleetId field's value. +func (s *DeleteFleetInput) SetFleetId(v string) *DeleteFleetInput { + s.FleetId = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteFleetOutput +type DeleteFleetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteGameSessionQueueInput +type DeleteGameSessionQueueInput struct { + _ struct{} `type:"structure"` + + // Descriptive label that is associated with game session queue. Queue names + // must be unique within each region. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGameSessionQueueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGameSessionQueueInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGameSessionQueueInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGameSessionQueueInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteGameSessionQueueInput) SetName(v string) *DeleteGameSessionQueueInput { + s.Name = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteGameSessionQueueOutput +type DeleteGameSessionQueueOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteGameSessionQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGameSessionQueueOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteMatchmakingConfigurationInput +type DeleteMatchmakingConfigurationInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a matchmaking configuration + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMatchmakingConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMatchmakingConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMatchmakingConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMatchmakingConfigurationInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteMatchmakingConfigurationInput) SetName(v string) *DeleteMatchmakingConfigurationInput { + s.Name = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteMatchmakingConfigurationOutput +type DeleteMatchmakingConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMatchmakingConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMatchmakingConfigurationOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteScalingPolicyInput +type DeleteScalingPolicyInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet to be deleted. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // Descriptive label that is associated with a scaling policy. Policy names + // do not need to be unique. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteScalingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScalingPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteScalingPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteScalingPolicyInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetId sets the FleetId field's value. +func (s *DeleteScalingPolicyInput) SetFleetId(v string) *DeleteScalingPolicyInput { + s.FleetId = &v + return s +} + +// SetName sets the Name field's value. +func (s *DeleteScalingPolicyInput) SetName(v string) *DeleteScalingPolicyInput { + s.Name = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteScalingPolicyOutput +type DeleteScalingPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteScalingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScalingPolicyOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringAuthorizationInput +type DeleteVpcPeeringAuthorizationInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the AWS account that you use to manage your Amazon + // GameLift fleet. You can find your Account ID in the AWS Management Console + // under account settings. + // + // GameLiftAwsAccountId is a required field + GameLiftAwsAccountId *string `min:"1" type:"string" required:"true"` + + // Unique identifier for a VPC with resources to be accessed by your Amazon + // GameLift fleet. The VPC must be in the same region where your fleet is deployed. + // To get VPC information, including IDs, use the Virtual Private Cloud service + // tools, including the VPC Dashboard in the AWS Management Console. + // + // PeerVpcId is a required field + PeerVpcId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcPeeringAuthorizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcPeeringAuthorizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpcPeeringAuthorizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpcPeeringAuthorizationInput"} + if s.GameLiftAwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("GameLiftAwsAccountId")) + } + if s.GameLiftAwsAccountId != nil && len(*s.GameLiftAwsAccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameLiftAwsAccountId", 1)) + } + if s.PeerVpcId == nil { + invalidParams.Add(request.NewErrParamRequired("PeerVpcId")) + } + if s.PeerVpcId != nil && len(*s.PeerVpcId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PeerVpcId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameLiftAwsAccountId sets the GameLiftAwsAccountId field's value. +func (s *DeleteVpcPeeringAuthorizationInput) SetGameLiftAwsAccountId(v string) *DeleteVpcPeeringAuthorizationInput { + s.GameLiftAwsAccountId = &v + return s +} + +// SetPeerVpcId sets the PeerVpcId field's value. +func (s *DeleteVpcPeeringAuthorizationInput) SetPeerVpcId(v string) *DeleteVpcPeeringAuthorizationInput { + s.PeerVpcId = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringAuthorizationOutput +type DeleteVpcPeeringAuthorizationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpcPeeringAuthorizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcPeeringAuthorizationOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringConnectionInput +type DeleteVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet. This value must match the fleet ID referenced + // in the VPC peering connection record. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // Unique identifier for a VPC peering connection. This value is included in + // the VpcPeeringConnection object, which can be retrieved by calling DescribeVpcPeeringConnections. + // + // VpcPeeringConnectionId is a required field + VpcPeeringConnectionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpcPeeringConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpcPeeringConnectionInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.VpcPeeringConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcPeeringConnectionId")) + } + if s.VpcPeeringConnectionId != nil && len(*s.VpcPeeringConnectionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VpcPeeringConnectionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetId sets the FleetId field's value. +func (s *DeleteVpcPeeringConnectionInput) SetFleetId(v string) *DeleteVpcPeeringConnectionInput { + s.FleetId = &v + return s +} + +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. +func (s *DeleteVpcPeeringConnectionInput) SetVpcPeeringConnectionId(v string) *DeleteVpcPeeringConnectionInput { + s.VpcPeeringConnectionId = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringConnectionOutput +type DeleteVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeAliasInput +type DescribeAliasInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet alias. Specify the alias you want to retrieve. + // + // AliasId is a required field + AliasId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAliasInput"} + if s.AliasId == nil { + invalidParams.Add(request.NewErrParamRequired("AliasId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasId sets the AliasId field's value. +func (s *DescribeAliasInput) SetAliasId(v string) *DescribeAliasInput { + s.AliasId = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeAliasOutput +type DescribeAliasOutput struct { + _ struct{} `type:"structure"` + + // Object that contains the requested alias. + Alias *Alias `type:"structure"` +} + +// String returns the string representation +func (s DescribeAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAliasOutput) GoString() string { + return s.String() +} + +// SetAlias sets the Alias field's value. +func (s *DescribeAliasOutput) SetAlias(v *Alias) *DescribeAliasOutput { + s.Alias = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeBuildInput +type DescribeBuildInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a build to retrieve properties for. + // + // BuildId is a required field + BuildId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeBuildInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBuildInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBuildInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBuildInput"} + if s.BuildId == nil { + invalidParams.Add(request.NewErrParamRequired("BuildId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBuildId sets the BuildId field's value. +func (s *DescribeBuildInput) SetBuildId(v string) *DescribeBuildInput { + s.BuildId = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeBuildOutput +type DescribeBuildOutput struct { + _ struct{} `type:"structure"` + + // Set of properties describing the requested build. + Build *Build `type:"structure"` +} + +// String returns the string representation +func (s DescribeBuildOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBuildOutput) GoString() string { + return s.String() +} + +// SetBuild sets the Build field's value. +func (s *DescribeBuildOutput) SetBuild(v *Build) *DescribeBuildOutput { + s.Build = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeEC2InstanceLimitsInput +type DescribeEC2InstanceLimitsInput struct { + _ struct{} `type:"structure"` + + // Name of an EC2 instance type that is supported in Amazon GameLift. A fleet + // instance type determines the computing resources of each instance in the + // fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift + // supports the following EC2 instance types. See Amazon EC2 Instance Types + // (http://aws.amazon.com/ec2/instance-types/) for detailed descriptions. Leave + // this parameter blank to retrieve limits for all types. + EC2InstanceType *string `type:"string" enum:"EC2InstanceType"` +} + +// String returns the string representation +func (s DescribeEC2InstanceLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEC2InstanceLimitsInput) GoString() string { + return s.String() +} + +// SetEC2InstanceType sets the EC2InstanceType field's value. +func (s *DescribeEC2InstanceLimitsInput) SetEC2InstanceType(v string) *DescribeEC2InstanceLimitsInput { + s.EC2InstanceType = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeEC2InstanceLimitsOutput +type DescribeEC2InstanceLimitsOutput struct { + _ struct{} `type:"structure"` + + // Object that contains the maximum number of instances for the specified instance + // type. + EC2InstanceLimits []*EC2InstanceLimit `type:"list"` +} + +// String returns the string representation +func (s DescribeEC2InstanceLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEC2InstanceLimitsOutput) GoString() string { + return s.String() +} + +// SetEC2InstanceLimits sets the EC2InstanceLimits field's value. +func (s *DescribeEC2InstanceLimitsOutput) SetEC2InstanceLimits(v []*EC2InstanceLimit) *DescribeEC2InstanceLimitsOutput { + s.EC2InstanceLimits = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetAttributesInput +type DescribeFleetAttributesInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet(s) to retrieve attributes for. To request attributes + // for all fleets, leave this parameter empty. + FleetIds []*string `min:"1" type:"list"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. This parameter is ignored when + // the request specifies one or a list of fleet IDs. + Limit *int64 `min:"1" type:"integer"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. This parameter + // is ignored when the request specifies one or a list of fleet IDs. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetAttributesInput"} + if s.FleetIds != nil && len(s.FleetIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FleetIds", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetIds sets the FleetIds field's value. +func (s *DescribeFleetAttributesInput) SetFleetIds(v []*string) *DescribeFleetAttributesInput { + s.FleetIds = v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeFleetAttributesInput) SetLimit(v int64) *DescribeFleetAttributesInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetAttributesInput) SetNextToken(v string) *DescribeFleetAttributesInput { + s.NextToken = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetAttributesOutput +type DescribeFleetAttributesOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects containing attribute metadata for each requested fleet + // ID. + FleetAttributes []*FleetAttributes `type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetAttributesOutput) GoString() string { + return s.String() +} + +// SetFleetAttributes sets the FleetAttributes field's value. +func (s *DescribeFleetAttributesOutput) SetFleetAttributes(v []*FleetAttributes) *DescribeFleetAttributesOutput { + s.FleetAttributes = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetAttributesOutput) SetNextToken(v string) *DescribeFleetAttributesOutput { + s.NextToken = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetCapacityInput +type DescribeFleetCapacityInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet(s) to retrieve capacity information for. To + // request capacity information for all fleets, leave this parameter empty. + FleetIds []*string `min:"1" type:"list"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. This parameter is ignored when + // the request specifies one or a list of fleet IDs. + Limit *int64 `min:"1" type:"integer"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. This parameter + // is ignored when the request specifies one or a list of fleet IDs. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetCapacityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetCapacityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetCapacityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetCapacityInput"} + if s.FleetIds != nil && len(s.FleetIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FleetIds", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetIds sets the FleetIds field's value. +func (s *DescribeFleetCapacityInput) SetFleetIds(v []*string) *DescribeFleetCapacityInput { + s.FleetIds = v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeFleetCapacityInput) SetLimit(v int64) *DescribeFleetCapacityInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetCapacityInput) SetNextToken(v string) *DescribeFleetCapacityInput { + s.NextToken = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetCapacityOutput +type DescribeFleetCapacityOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects containing capacity information for each requested + // fleet ID. Leave this parameter empty to retrieve capacity information for + // all fleets. + FleetCapacity []*FleetCapacity `type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetCapacityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetCapacityOutput) GoString() string { + return s.String() +} + +// SetFleetCapacity sets the FleetCapacity field's value. +func (s *DescribeFleetCapacityOutput) SetFleetCapacity(v []*FleetCapacity) *DescribeFleetCapacityOutput { + s.FleetCapacity = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetCapacityOutput) SetNextToken(v string) *DescribeFleetCapacityOutput { + s.NextToken = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetEventsInput +type DescribeFleetEventsInput struct { + _ struct{} `type:"structure"` + + // Most recent date to retrieve event logs for. If no end time is specified, + // this call returns entries from the specified start time up to the present. + // Format is a number expressed in Unix time as milliseconds (ex: "1469498468.057"). + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Unique identifier for a fleet to get event logs for. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Earliest date to retrieve event logs for. If no start time is specified, + // this call returns entries starting from when the fleet was created to the + // specified end time. Format is a number expressed in Unix time as milliseconds + // (ex: "1469498468.057"). + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s DescribeFleetEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetEventsInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *DescribeFleetEventsInput) SetEndTime(v time.Time) *DescribeFleetEventsInput { + s.EndTime = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeFleetEventsInput) SetFleetId(v string) *DescribeFleetEventsInput { + s.FleetId = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeFleetEventsInput) SetLimit(v int64) *DescribeFleetEventsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetEventsInput) SetNextToken(v string) *DescribeFleetEventsInput { + s.NextToken = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *DescribeFleetEventsInput) SetStartTime(v time.Time) *DescribeFleetEventsInput { + s.StartTime = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetEventsOutput +type DescribeFleetEventsOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects containing event log entries for the specified fleet. + Events []*Event `type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetEventsOutput) GoString() string { + return s.String() +} + +// SetEvents sets the Events field's value. +func (s *DescribeFleetEventsOutput) SetEvents(v []*Event) *DescribeFleetEventsOutput { + s.Events = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetEventsOutput) SetNextToken(v string) *DescribeFleetEventsOutput { + s.NextToken = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetPortSettingsInput +type DescribeFleetPortSettingsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet to retrieve port settings for. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeFleetPortSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetPortSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetPortSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetPortSettingsInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeFleetPortSettingsInput) SetFleetId(v string) *DescribeFleetPortSettingsInput { + s.FleetId = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetPortSettingsOutput +type DescribeFleetPortSettingsOutput struct { + _ struct{} `type:"structure"` + + // Object that contains port settings for the requested fleet ID. + InboundPermissions []*IpPermission `type:"list"` +} + +// String returns the string representation +func (s DescribeFleetPortSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetPortSettingsOutput) GoString() string { + return s.String() +} + +// SetInboundPermissions sets the InboundPermissions field's value. +func (s *DescribeFleetPortSettingsOutput) SetInboundPermissions(v []*IpPermission) *DescribeFleetPortSettingsOutput { + s.InboundPermissions = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetUtilizationInput +type DescribeFleetUtilizationInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet(s) to retrieve utilization data for. To request + // utilization data for all fleets, leave this parameter empty. + FleetIds []*string `min:"1" type:"list"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. This parameter is ignored when + // the request specifies one or a list of fleet IDs. + Limit *int64 `min:"1" type:"integer"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. This parameter + // is ignored when the request specifies one or a list of fleet IDs. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetUtilizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetUtilizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetUtilizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetUtilizationInput"} + if s.FleetIds != nil && len(s.FleetIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FleetIds", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetIds sets the FleetIds field's value. +func (s *DescribeFleetUtilizationInput) SetFleetIds(v []*string) *DescribeFleetUtilizationInput { + s.FleetIds = v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeFleetUtilizationInput) SetLimit(v int64) *DescribeFleetUtilizationInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetUtilizationInput) SetNextToken(v string) *DescribeFleetUtilizationInput { + s.NextToken = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetUtilizationOutput +type DescribeFleetUtilizationOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects containing utilization information for each requested + // fleet ID. + FleetUtilization []*FleetUtilization `type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetUtilizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetUtilizationOutput) GoString() string { + return s.String() +} + +// SetFleetUtilization sets the FleetUtilization field's value. +func (s *DescribeFleetUtilizationOutput) SetFleetUtilization(v []*FleetUtilization) *DescribeFleetUtilizationOutput { + s.FleetUtilization = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetUtilizationOutput) SetNextToken(v string) *DescribeFleetUtilizationOutput { + s.NextToken = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionDetailsInput +type DescribeGameSessionDetailsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for an alias associated with the fleet to retrieve all + // game sessions for. + AliasId *string `type:"string"` + + // Unique identifier for a fleet to retrieve all game sessions active on the + // fleet. + FleetId *string `type:"string"` + + // Unique identifier for the game session to retrieve. + GameSessionId *string `min:"1" type:"string"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Game session status to filter results on. Possible game session statuses + // include ACTIVE, TERMINATED, ACTIVATING and TERMINATING (the last two are + // transitory). + StatusFilter *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeGameSessionDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameSessionDetailsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGameSessionDetailsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGameSessionDetailsInput"} + if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.StatusFilter != nil && len(*s.StatusFilter) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StatusFilter", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasId sets the AliasId field's value. +func (s *DescribeGameSessionDetailsInput) SetAliasId(v string) *DescribeGameSessionDetailsInput { + s.AliasId = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeGameSessionDetailsInput) SetFleetId(v string) *DescribeGameSessionDetailsInput { + s.FleetId = &v + return s +} + +// SetGameSessionId sets the GameSessionId field's value. +func (s *DescribeGameSessionDetailsInput) SetGameSessionId(v string) *DescribeGameSessionDetailsInput { + s.GameSessionId = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeGameSessionDetailsInput) SetLimit(v int64) *DescribeGameSessionDetailsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeGameSessionDetailsInput) SetNextToken(v string) *DescribeGameSessionDetailsInput { + s.NextToken = &v + return s +} + +// SetStatusFilter sets the StatusFilter field's value. +func (s *DescribeGameSessionDetailsInput) SetStatusFilter(v string) *DescribeGameSessionDetailsInput { + s.StatusFilter = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionDetailsOutput +type DescribeGameSessionDetailsOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects containing game session properties and the protection + // policy currently in force for each session matching the request. + GameSessionDetails []*GameSessionDetail `type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeGameSessionDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameSessionDetailsOutput) GoString() string { + return s.String() +} + +// SetGameSessionDetails sets the GameSessionDetails field's value. +func (s *DescribeGameSessionDetailsOutput) SetGameSessionDetails(v []*GameSessionDetail) *DescribeGameSessionDetailsOutput { + s.GameSessionDetails = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeGameSessionDetailsOutput) SetNextToken(v string) *DescribeGameSessionDetailsOutput { + s.NextToken = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionPlacementInput +type DescribeGameSessionPlacementInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a game session placement to retrieve. + // + // PlacementId is a required field + PlacementId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeGameSessionPlacementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameSessionPlacementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGameSessionPlacementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGameSessionPlacementInput"} + if s.PlacementId == nil { + invalidParams.Add(request.NewErrParamRequired("PlacementId")) + } + if s.PlacementId != nil && len(*s.PlacementId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlacementId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPlacementId sets the PlacementId field's value. +func (s *DescribeGameSessionPlacementInput) SetPlacementId(v string) *DescribeGameSessionPlacementInput { + s.PlacementId = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionPlacementOutput +type DescribeGameSessionPlacementOutput struct { + _ struct{} `type:"structure"` + + // Object that describes the requested game session placement. + GameSessionPlacement *GameSessionPlacement `type:"structure"` +} + +// String returns the string representation +func (s DescribeGameSessionPlacementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameSessionPlacementOutput) GoString() string { + return s.String() +} + +// SetGameSessionPlacement sets the GameSessionPlacement field's value. +func (s *DescribeGameSessionPlacementOutput) SetGameSessionPlacement(v *GameSessionPlacement) *DescribeGameSessionPlacementOutput { + s.GameSessionPlacement = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionQueuesInput +type DescribeGameSessionQueuesInput struct { + _ struct{} `type:"structure"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // List of queue names to retrieve information for. To request settings for + // all queues, leave this parameter empty. + Names []*string `type:"list"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeGameSessionQueuesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameSessionQueuesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGameSessionQueuesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGameSessionQueuesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *DescribeGameSessionQueuesInput) SetLimit(v int64) *DescribeGameSessionQueuesInput { + s.Limit = &v + return s +} + +// SetNames sets the Names field's value. +func (s *DescribeGameSessionQueuesInput) SetNames(v []*string) *DescribeGameSessionQueuesInput { + s.Names = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeGameSessionQueuesInput) SetNextToken(v string) *DescribeGameSessionQueuesInput { + s.NextToken = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionQueuesOutput +type DescribeGameSessionQueuesOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects that describes the requested game session queues. + GameSessionQueues []*GameSessionQueue `type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeGameSessionQueuesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameSessionQueuesOutput) GoString() string { + return s.String() +} + +// SetGameSessionQueues sets the GameSessionQueues field's value. +func (s *DescribeGameSessionQueuesOutput) SetGameSessionQueues(v []*GameSessionQueue) *DescribeGameSessionQueuesOutput { + s.GameSessionQueues = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeGameSessionQueuesOutput) SetNextToken(v string) *DescribeGameSessionQueuesOutput { + s.NextToken = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionsInput +type DescribeGameSessionsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for an alias associated with the fleet to retrieve all + // game sessions for. + AliasId *string `type:"string"` + + // Unique identifier for a fleet to retrieve all game sessions for. + FleetId *string `type:"string"` + + // Unique identifier for the game session to retrieve. You can use either a + // GameSessionId or GameSessionArn value. + GameSessionId *string `min:"1" type:"string"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Game session status to filter results on. Possible game session statuses + // include ACTIVE, TERMINATED, ACTIVATING, and TERMINATING (the last two are + // transitory). + StatusFilter *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeGameSessionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameSessionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGameSessionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGameSessionsInput"} + if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.StatusFilter != nil && len(*s.StatusFilter) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StatusFilter", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasId sets the AliasId field's value. +func (s *DescribeGameSessionsInput) SetAliasId(v string) *DescribeGameSessionsInput { + s.AliasId = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeGameSessionsInput) SetFleetId(v string) *DescribeGameSessionsInput { + s.FleetId = &v + return s +} + +// SetGameSessionId sets the GameSessionId field's value. +func (s *DescribeGameSessionsInput) SetGameSessionId(v string) *DescribeGameSessionsInput { + s.GameSessionId = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeGameSessionsInput) SetLimit(v int64) *DescribeGameSessionsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeGameSessionsInput) SetNextToken(v string) *DescribeGameSessionsInput { + s.NextToken = &v + return s +} + +// SetStatusFilter sets the StatusFilter field's value. +func (s *DescribeGameSessionsInput) SetStatusFilter(v string) *DescribeGameSessionsInput { + s.StatusFilter = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionsOutput +type DescribeGameSessionsOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects containing game session properties for each session + // matching the request. + GameSessions []*GameSession `type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeGameSessionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameSessionsOutput) GoString() string { + return s.String() +} + +// SetGameSessions sets the GameSessions field's value. +func (s *DescribeGameSessionsOutput) SetGameSessions(v []*GameSession) *DescribeGameSessionsOutput { + s.GameSessions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeGameSessionsOutput) SetNextToken(v string) *DescribeGameSessionsOutput { + s.NextToken = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeInstancesInput +type DescribeInstancesInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet to retrieve instance information for. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // Unique identifier for an instance to retrieve. Specify an instance ID or + // leave blank to retrieve all instances in the fleet. + InstanceId *string `type:"string"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstancesInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeInstancesInput) SetFleetId(v string) *DescribeInstancesInput { + s.FleetId = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *DescribeInstancesInput) SetInstanceId(v string) *DescribeInstancesInput { + s.InstanceId = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeInstancesInput) SetLimit(v int64) *DescribeInstancesInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstancesInput) SetNextToken(v string) *DescribeInstancesInput { + s.NextToken = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeInstancesOutput +type DescribeInstancesOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects containing properties for each instance returned. + Instances []*Instance `type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesOutput) GoString() string { + return s.String() +} + +// SetInstances sets the Instances field's value. +func (s *DescribeInstancesOutput) SetInstances(v []*Instance) *DescribeInstancesOutput { + s.Instances = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstancesOutput) SetNextToken(v string) *DescribeInstancesOutput { + s.NextToken = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingConfigurationsInput +type DescribeMatchmakingConfigurationsInput struct { + _ struct{} `type:"structure"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. This parameter is limited to 10. + Limit *int64 `min:"1" type:"integer"` + + // Unique identifier for a matchmaking configuration(s) to retrieve. To request + // all existing configurations, leave this parameter empty. + Names []*string `type:"list"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Unique identifier for a matchmaking rule set. Use this parameter to retrieve + // all matchmaking configurations that use this rule set. + RuleSetName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeMatchmakingConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMatchmakingConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMatchmakingConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMatchmakingConfigurationsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.RuleSetName != nil && len(*s.RuleSetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleSetName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *DescribeMatchmakingConfigurationsInput) SetLimit(v int64) *DescribeMatchmakingConfigurationsInput { + s.Limit = &v + return s +} + +// SetNames sets the Names field's value. +func (s *DescribeMatchmakingConfigurationsInput) SetNames(v []*string) *DescribeMatchmakingConfigurationsInput { + s.Names = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMatchmakingConfigurationsInput) SetNextToken(v string) *DescribeMatchmakingConfigurationsInput { + s.NextToken = &v + return s +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *DescribeMatchmakingConfigurationsInput) SetRuleSetName(v string) *DescribeMatchmakingConfigurationsInput { + s.RuleSetName = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingConfigurationsOutput +type DescribeMatchmakingConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // Collection of requested matchmaking configuration objects. + Configurations []*MatchmakingConfiguration `type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeMatchmakingConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMatchmakingConfigurationsOutput) GoString() string { + return s.String() +} + +// SetConfigurations sets the Configurations field's value. +func (s *DescribeMatchmakingConfigurationsOutput) SetConfigurations(v []*MatchmakingConfiguration) *DescribeMatchmakingConfigurationsOutput { + s.Configurations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMatchmakingConfigurationsOutput) SetNextToken(v string) *DescribeMatchmakingConfigurationsOutput { + s.NextToken = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingInput +type DescribeMatchmakingInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a matchmaking ticket. To request all existing tickets, + // leave this parameter empty. + // + // TicketIds is a required field + TicketIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeMatchmakingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMatchmakingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMatchmakingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMatchmakingInput"} + if s.TicketIds == nil { + invalidParams.Add(request.NewErrParamRequired("TicketIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTicketIds sets the TicketIds field's value. +func (s *DescribeMatchmakingInput) SetTicketIds(v []*string) *DescribeMatchmakingInput { + s.TicketIds = v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingOutput +type DescribeMatchmakingOutput struct { + _ struct{} `type:"structure"` + + // Collection of existing matchmaking ticket objects matching the request. + TicketList []*MatchmakingTicket `type:"list"` +} + +// String returns the string representation +func (s DescribeMatchmakingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMatchmakingOutput) GoString() string { + return s.String() +} + +// SetTicketList sets the TicketList field's value. +func (s *DescribeMatchmakingOutput) SetTicketList(v []*MatchmakingTicket) *DescribeMatchmakingOutput { + s.TicketList = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingRuleSetsInput +type DescribeMatchmakingRuleSetsInput struct { + _ struct{} `type:"structure"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Unique identifier for a matchmaking rule set. This name is used to identify + // the rule set associated with a matchmaking configuration. + Names []*string `min:"1" type:"list"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeMatchmakingRuleSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMatchmakingRuleSetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMatchmakingRuleSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMatchmakingRuleSetsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Names != nil && len(s.Names) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Names", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *DescribeMatchmakingRuleSetsInput) SetLimit(v int64) *DescribeMatchmakingRuleSetsInput { + s.Limit = &v + return s +} + +// SetNames sets the Names field's value. +func (s *DescribeMatchmakingRuleSetsInput) SetNames(v []*string) *DescribeMatchmakingRuleSetsInput { + s.Names = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMatchmakingRuleSetsInput) SetNextToken(v string) *DescribeMatchmakingRuleSetsInput { + s.NextToken = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingRuleSetsOutput +type DescribeMatchmakingRuleSetsOutput struct { + _ struct{} `type:"structure"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` + + // Collection of requested matchmaking rule set objects. + // + // RuleSets is a required field + RuleSets []*MatchmakingRuleSet `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeMatchmakingRuleSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMatchmakingRuleSetsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMatchmakingRuleSetsOutput) SetNextToken(v string) *DescribeMatchmakingRuleSetsOutput { + s.NextToken = &v + return s +} + +// SetRuleSets sets the RuleSets field's value. +func (s *DescribeMatchmakingRuleSetsOutput) SetRuleSets(v []*MatchmakingRuleSet) *DescribeMatchmakingRuleSetsOutput { + s.RuleSets = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribePlayerSessionsInput +type DescribePlayerSessionsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the game session to retrieve player sessions for. + GameSessionId *string `min:"1" type:"string"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. If a player session ID is specified, + // this parameter is ignored. + Limit *int64 `min:"1" type:"integer"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. If a player session + // ID is specified, this parameter is ignored. + NextToken *string `min:"1" type:"string"` + + // Unique identifier for a player to retrieve player sessions for. + PlayerId *string `min:"1" type:"string"` + + // Unique identifier for a player session to retrieve. + PlayerSessionId *string `type:"string"` + + // Player session status to filter results on. + // + // Possible player session statuses include the following: + // + // * RESERVED -- The player session request has been received, but the player + // has not yet connected to the server process and/or been validated. + // + // * ACTIVE -- The player has been validated by the server process and is + // currently connected. + // + // * COMPLETED -- The player connection has been dropped. + // + // * TIMEDOUT -- A player session request was received, but the player did + // not connect and/or was not validated within the timeout limit (60 seconds). + PlayerSessionStatusFilter *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribePlayerSessionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePlayerSessionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePlayerSessionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePlayerSessionsInput"} + if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.PlayerId != nil && len(*s.PlayerId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlayerId", 1)) + } + if s.PlayerSessionStatusFilter != nil && len(*s.PlayerSessionStatusFilter) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlayerSessionStatusFilter", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameSessionId sets the GameSessionId field's value. +func (s *DescribePlayerSessionsInput) SetGameSessionId(v string) *DescribePlayerSessionsInput { + s.GameSessionId = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribePlayerSessionsInput) SetLimit(v int64) *DescribePlayerSessionsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePlayerSessionsInput) SetNextToken(v string) *DescribePlayerSessionsInput { + s.NextToken = &v + return s +} + +// SetPlayerId sets the PlayerId field's value. +func (s *DescribePlayerSessionsInput) SetPlayerId(v string) *DescribePlayerSessionsInput { + s.PlayerId = &v + return s +} + +// SetPlayerSessionId sets the PlayerSessionId field's value. +func (s *DescribePlayerSessionsInput) SetPlayerSessionId(v string) *DescribePlayerSessionsInput { + s.PlayerSessionId = &v + return s +} + +// SetPlayerSessionStatusFilter sets the PlayerSessionStatusFilter field's value. +func (s *DescribePlayerSessionsInput) SetPlayerSessionStatusFilter(v string) *DescribePlayerSessionsInput { + s.PlayerSessionStatusFilter = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribePlayerSessionsOutput +type DescribePlayerSessionsOutput struct { + _ struct{} `type:"structure"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` + + // Collection of objects containing properties for each player session that + // matches the request. + PlayerSessions []*PlayerSession `type:"list"` +} + +// String returns the string representation +func (s DescribePlayerSessionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePlayerSessionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePlayerSessionsOutput) SetNextToken(v string) *DescribePlayerSessionsOutput { + s.NextToken = &v + return s +} + +// SetPlayerSessions sets the PlayerSessions field's value. +func (s *DescribePlayerSessionsOutput) SetPlayerSessions(v []*PlayerSession) *DescribePlayerSessionsOutput { + s.PlayerSessions = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeRuntimeConfigurationInput +type DescribeRuntimeConfigurationInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet to get the run-time configuration for. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeRuntimeConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRuntimeConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRuntimeConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRuntimeConfigurationInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeRuntimeConfigurationInput) SetFleetId(v string) *DescribeRuntimeConfigurationInput { + s.FleetId = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeRuntimeConfigurationOutput +type DescribeRuntimeConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Instructions describing how server processes should be launched and maintained + // on each instance in the fleet. + RuntimeConfiguration *RuntimeConfiguration `type:"structure"` +} + +// String returns the string representation +func (s DescribeRuntimeConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRuntimeConfigurationOutput) GoString() string { + return s.String() +} + +// SetRuntimeConfiguration sets the RuntimeConfiguration field's value. +func (s *DescribeRuntimeConfigurationOutput) SetRuntimeConfiguration(v *RuntimeConfiguration) *DescribeRuntimeConfigurationOutput { + s.RuntimeConfiguration = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeScalingPoliciesInput +type DescribeScalingPoliciesInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet to retrieve scaling policies for. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Scaling policy status to filter results on. A scaling policy is only in force + // when in an ACTIVE status. + // + // * ACTIVE -- The scaling policy is currently in force. + // + // * UPDATEREQUESTED -- A request to update the scaling policy has been received. + // + // * UPDATING -- A change is being made to the scaling policy. + // + // * DELETEREQUESTED -- A request to delete the scaling policy has been received. + // + // * DELETING -- The scaling policy is being deleted. + // + // * DELETED -- The scaling policy has been deleted. + // + // * ERROR -- An error occurred in creating the policy. It should be removed + // and recreated. + StatusFilter *string `type:"string" enum:"ScalingStatusType"` +} + +// String returns the string representation +func (s DescribeScalingPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScalingPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScalingPoliciesInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeScalingPoliciesInput) SetFleetId(v string) *DescribeScalingPoliciesInput { + s.FleetId = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeScalingPoliciesInput) SetLimit(v int64) *DescribeScalingPoliciesInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeScalingPoliciesInput) SetNextToken(v string) *DescribeScalingPoliciesInput { + s.NextToken = &v + return s +} + +// SetStatusFilter sets the StatusFilter field's value. +func (s *DescribeScalingPoliciesInput) SetStatusFilter(v string) *DescribeScalingPoliciesInput { + s.StatusFilter = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeScalingPoliciesOutput +type DescribeScalingPoliciesOutput struct { + _ struct{} `type:"structure"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` + + // Collection of objects containing the scaling policies matching the request. + ScalingPolicies []*ScalingPolicy `type:"list"` +} + +// String returns the string representation +func (s DescribeScalingPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingPoliciesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeScalingPoliciesOutput) SetNextToken(v string) *DescribeScalingPoliciesOutput { + s.NextToken = &v + return s +} + +// SetScalingPolicies sets the ScalingPolicies field's value. +func (s *DescribeScalingPoliciesOutput) SetScalingPolicies(v []*ScalingPolicy) *DescribeScalingPoliciesOutput { + s.ScalingPolicies = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringAuthorizationsInput +type DescribeVpcPeeringAuthorizationsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpcPeeringAuthorizationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcPeeringAuthorizationsInput) GoString() string { + return s.String() +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringAuthorizationsOutput +type DescribeVpcPeeringAuthorizationsOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects that describe all valid VPC peering operations for + // the current AWS account. + VpcPeeringAuthorizations []*VpcPeeringAuthorization `type:"list"` +} + +// String returns the string representation +func (s DescribeVpcPeeringAuthorizationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcPeeringAuthorizationsOutput) GoString() string { + return s.String() +} + +// SetVpcPeeringAuthorizations sets the VpcPeeringAuthorizations field's value. +func (s *DescribeVpcPeeringAuthorizationsOutput) SetVpcPeeringAuthorizations(v []*VpcPeeringAuthorization) *DescribeVpcPeeringAuthorizationsOutput { + s.VpcPeeringAuthorizations = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringConnectionsInput +type DescribeVpcPeeringConnectionsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet. + FleetId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeVpcPeeringConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcPeeringConnectionsInput) GoString() string { + return s.String() +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeVpcPeeringConnectionsInput) SetFleetId(v string) *DescribeVpcPeeringConnectionsInput { + s.FleetId = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringConnectionsOutput +type DescribeVpcPeeringConnectionsOutput struct { + _ struct{} `type:"structure"` + + // Collection of VPC peering connection records that match the request. + VpcPeeringConnections []*VpcPeeringConnection `type:"list"` +} + +// String returns the string representation +func (s DescribeVpcPeeringConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcPeeringConnectionsOutput) GoString() string { + return s.String() +} + +// SetVpcPeeringConnections sets the VpcPeeringConnections field's value. +func (s *DescribeVpcPeeringConnectionsOutput) SetVpcPeeringConnections(v []*VpcPeeringConnection) *DescribeVpcPeeringConnectionsOutput { + s.VpcPeeringConnections = v + return s +} + +// Player information for use when creating player sessions using a game session +// placement request with StartGameSessionPlacement. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DesiredPlayerSession +type DesiredPlayerSession struct { + _ struct{} `type:"structure"` + + // Developer-defined information related to a player. Amazon GameLift does not + // use this data, so it can be formatted as needed for use in the game. + PlayerData *string `min:"1" type:"string"` + + // Unique identifier for a player to associate with the player session. + PlayerId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DesiredPlayerSession) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DesiredPlayerSession) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DesiredPlayerSession) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DesiredPlayerSession"} + if s.PlayerData != nil && len(*s.PlayerData) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlayerData", 1)) + } + if s.PlayerId != nil && len(*s.PlayerId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlayerId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPlayerData sets the PlayerData field's value. +func (s *DesiredPlayerSession) SetPlayerData(v string) *DesiredPlayerSession { + s.PlayerData = &v + return s +} + +// SetPlayerId sets the PlayerId field's value. +func (s *DesiredPlayerSession) SetPlayerId(v string) *DesiredPlayerSession { + s.PlayerId = &v + return s +} + +// Current status of fleet capacity. The number of active instances should match +// or be in the process of matching the number of desired instances. Pending +// and terminating counts are non-zero only if fleet capacity is adjusting to +// an UpdateFleetCapacity request, or if access to resources is temporarily +// affected. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/EC2InstanceCounts +type EC2InstanceCounts struct { + _ struct{} `type:"structure"` + + // Actual number of active instances in the fleet. + ACTIVE *int64 `type:"integer"` + + // Ideal number of active instances in the fleet. + DESIRED *int64 `type:"integer"` + + // Number of active instances in the fleet that are not currently hosting a + // game session. + IDLE *int64 `type:"integer"` + + // Maximum value allowed for the fleet's instance count. + MAXIMUM *int64 `type:"integer"` + + // Minimum value allowed for the fleet's instance count. + MINIMUM *int64 `type:"integer"` + + // Number of instances in the fleet that are starting but not yet active. + PENDING *int64 `type:"integer"` + + // Number of instances in the fleet that are no longer active but haven't yet + // been terminated. + TERMINATING *int64 `type:"integer"` +} + +// String returns the string representation +func (s EC2InstanceCounts) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2InstanceCounts) GoString() string { + return s.String() +} + +// SetACTIVE sets the ACTIVE field's value. +func (s *EC2InstanceCounts) SetACTIVE(v int64) *EC2InstanceCounts { + s.ACTIVE = &v + return s +} + +// SetDESIRED sets the DESIRED field's value. +func (s *EC2InstanceCounts) SetDESIRED(v int64) *EC2InstanceCounts { + s.DESIRED = &v + return s +} + +// SetIDLE sets the IDLE field's value. +func (s *EC2InstanceCounts) SetIDLE(v int64) *EC2InstanceCounts { + s.IDLE = &v + return s +} + +// SetMAXIMUM sets the MAXIMUM field's value. +func (s *EC2InstanceCounts) SetMAXIMUM(v int64) *EC2InstanceCounts { + s.MAXIMUM = &v + return s +} + +// SetMINIMUM sets the MINIMUM field's value. +func (s *EC2InstanceCounts) SetMINIMUM(v int64) *EC2InstanceCounts { + s.MINIMUM = &v + return s +} + +// SetPENDING sets the PENDING field's value. +func (s *EC2InstanceCounts) SetPENDING(v int64) *EC2InstanceCounts { + s.PENDING = &v + return s +} + +// SetTERMINATING sets the TERMINATING field's value. +func (s *EC2InstanceCounts) SetTERMINATING(v int64) *EC2InstanceCounts { + s.TERMINATING = &v + return s +} + +// Maximum number of instances allowed based on the Amazon Elastic Compute Cloud +// (Amazon EC2) instance type. Instance limits can be retrieved by calling DescribeEC2InstanceLimits. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/EC2InstanceLimit +type EC2InstanceLimit struct { + _ struct{} `type:"structure"` + + // Number of instances of the specified type that are currently in use by this + // AWS account. + CurrentInstances *int64 `type:"integer"` + + // Name of an EC2 instance type that is supported in Amazon GameLift. A fleet + // instance type determines the computing resources of each instance in the + // fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift + // supports the following EC2 instance types. See Amazon EC2 Instance Types + // (http://aws.amazon.com/ec2/instance-types/) for detailed descriptions. + EC2InstanceType *string `type:"string" enum:"EC2InstanceType"` + + // Number of instances allowed. + InstanceLimit *int64 `type:"integer"` +} + +// String returns the string representation +func (s EC2InstanceLimit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2InstanceLimit) GoString() string { + return s.String() +} + +// SetCurrentInstances sets the CurrentInstances field's value. +func (s *EC2InstanceLimit) SetCurrentInstances(v int64) *EC2InstanceLimit { + s.CurrentInstances = &v + return s +} + +// SetEC2InstanceType sets the EC2InstanceType field's value. +func (s *EC2InstanceLimit) SetEC2InstanceType(v string) *EC2InstanceLimit { + s.EC2InstanceType = &v + return s +} + +// SetInstanceLimit sets the InstanceLimit field's value. +func (s *EC2InstanceLimit) SetInstanceLimit(v int64) *EC2InstanceLimit { + s.InstanceLimit = &v + return s +} + +// Log entry describing an event that involves Amazon GameLift resources (such +// as a fleet). In addition to tracking activity, event codes and messages can +// provide additional information for troubleshooting and debugging problems. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/Event +type Event struct { + _ struct{} `type:"structure"` + + // Type of event being logged. The following events are currently in use: + // + // General events: + // + // * GENERIC_EVENT -- An unspecified event has occurred. + // + // Fleet creation events: + // + // * FLEET_CREATED -- A fleet record was successfully created with a status + // of NEW. Event messaging includes the fleet ID. + // + // * FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. + // The compressed build has started downloading to a fleet instance for installation. + // + // * FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the + // fleet instance. + // + // * FLEET_CREATION_EXTRACTING_BUILD – The game server build was successfully + // downloaded to an instance, and the build files are now being extracted + // from the uploaded build and saved to an instance. Failure at this stage + // prevents a fleet from moving to ACTIVE status. Logs for this stage display + // a list of the files that are extracted and saved on the instance. Access + // the logs by using the URL in PreSignedLogUrl. + // + // * FLEET_CREATION_RUNNING_INSTALLER – The game server build files were + // successfully extracted, and the Amazon GameLift is now running the build's + // install script (if one is included). Failure in this stage prevents a + // fleet from moving to ACTIVE status. Logs for this stage list the installation + // steps and whether or not the install completed successfully. Access the + // logs by using the URL in PreSignedLogUrl. + // + // * FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, + // and the Amazon GameLift is now verifying that the game server launch paths, + // which are specified in the fleet's run-time configuration, exist. If any + // listed launch path exists, Amazon GameLift tries to launch a game server + // process and waits for the process to report ready. Failures in this stage + // prevent a fleet from moving to ACTIVE status. Logs for this stage list + // the launch paths in the run-time configuration and indicate whether each + // is found. Access the logs by using the URL in PreSignedLogUrl. + // + // * FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING. + // + // * FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the run-time + // configuration failed because the executable specified in a launch path + // does not exist on the instance. + // + // * FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING. + // + // * FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the run-time + // configuration failed because the executable specified in a launch path + // failed to run on the fleet instance. + // + // * FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING. + // + // + // * FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete + // one of the steps in the fleet activation process. This event code indicates + // that the game build was successfully downloaded to a fleet instance, built, + // and validated, but was not able to start a server process. A possible + // reason for failure is that the game server is not reporting "process ready" + // to the Amazon GameLift service. + // + // * FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to + // ACTIVE. The fleet is now ready to host game sessions. + // + // VPC peering events: + // + // * FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established + // between the VPC for an Amazon GameLift fleet and a VPC in your AWS account. + // + // * FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. + // Event details and status information (see DescribeVpcPeeringConnections) + // provide additional detail. A common reason for peering failure is that + // the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve + // this, change the CIDR block for the VPC in your AWS account. For more + // information on VPC peering failures, see http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html + // (http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html) + // + // * FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully + // deleted. + // + // Other fleet events: + // + // * FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings + // (desired instances, minimum/maximum scaling limits). Event messaging includes + // the new capacity settings. + // + // * FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made + // to the fleet's game session protection policy setting. Event messaging + // includes both the old and new policy setting. + // + // * FLEET_DELETED -- A request to delete a fleet was initiated. + EventCode *string `type:"string" enum:"EventCode"` + + // Unique identifier for a fleet event. + EventId *string `min:"1" type:"string"` + + // Time stamp indicating when this event occurred. Format is a number expressed + // in Unix time as milliseconds (for example "1469498468.057"). + EventTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Additional information related to the event. + Message *string `min:"1" type:"string"` + + // Location of stored logs with additional detail that is related to the event. + // This is useful for debugging issues. The URL is valid for 15 minutes. You + // can also access fleet creation logs through the Amazon GameLift console. + PreSignedLogUrl *string `min:"1" type:"string"` + + // Unique identifier for an event resource, such as a fleet ID. + ResourceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// SetEventCode sets the EventCode field's value. +func (s *Event) SetEventCode(v string) *Event { + s.EventCode = &v + return s +} + +// SetEventId sets the EventId field's value. +func (s *Event) SetEventId(v string) *Event { + s.EventId = &v + return s +} + +// SetEventTime sets the EventTime field's value. +func (s *Event) SetEventTime(v time.Time) *Event { + s.EventTime = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *Event) SetMessage(v string) *Event { + s.Message = &v + return s +} + +// SetPreSignedLogUrl sets the PreSignedLogUrl field's value. +func (s *Event) SetPreSignedLogUrl(v string) *Event { + s.PreSignedLogUrl = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *Event) SetResourceId(v string) *Event { + s.ResourceId = &v + return s +} + +// General properties describing a fleet. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/FleetAttributes +type FleetAttributes struct { + _ struct{} `type:"structure"` + + // Unique identifier for a build. + BuildId *string `type:"string"` + + // Time stamp indicating when this data object was created. Format is a number + // expressed in Unix time as milliseconds (for example "1469498468.057"). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Human-readable description of the fleet. + Description *string `min:"1" type:"string"` + + // Identifier for a fleet that is unique across all regions. + FleetArn *string `min:"1" type:"string"` + + // Unique identifier for a fleet. + FleetId *string `type:"string"` + + // Location of default log files. When a server process is shut down, Amazon + // GameLift captures and stores any log files in this location. These logs are + // in addition to game session logs; see more on game session logs in the Amazon + // GameLift Developer Guide (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-api-server-code). + // If no default log path for a fleet is specified, Amazon GameLift automatically + // uploads logs that are stored on each instance at C:\game\logs (for Windows) + // or /local/game/logs (for Linux). Use the Amazon GameLift console to access + // stored logs. + LogPaths []*string `type:"list"` + + // Names of metric groups that this fleet is included in. In Amazon CloudWatch, + // you can view metrics for an individual fleet or aggregated metrics for fleets + // that are in a fleet metric group. A fleet can be included in only one metric + // group at a time. + MetricGroups []*string `type:"list"` + + // Descriptive label that is associated with a fleet. Fleet names do not need + // to be unique. + Name *string `min:"1" type:"string"` + + // Type of game session protection to set for all new instances started in the + // fleet. + // + // * NoProtection -- The game session can be terminated during a scale-down + // event. + // + // * FullProtection -- If the game session is in an ACTIVE status, it cannot + // be terminated during a scale-down event. + NewGameSessionProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` + + // Operating system of the fleet's computing resources. A fleet's operating + // system depends on the OS specified for the build that is deployed on this + // fleet. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` + + // Fleet policy to limit the number of game sessions an individual player can + // create over a span of time. + ResourceCreationLimitPolicy *ResourceCreationLimitPolicy `type:"structure"` + + // Game server launch parameters specified for fleets created before 2016-08-04 + // (or AWS SDK v. 0.12.16). Server launch parameters for fleets created after + // this date are specified in the fleet's RuntimeConfiguration. + ServerLaunchParameters *string `min:"1" type:"string"` + + // Path to a game server executable in the fleet's build, specified for fleets + // created before 2016-08-04 (or AWS SDK v. 0.12.16). Server launch paths for + // fleets created after this date are specified in the fleet's RuntimeConfiguration. + ServerLaunchPath *string `min:"1" type:"string"` + + // Current status of the fleet. + // + // Possible fleet statuses include the following: + // + // * NEW -- A new fleet has been defined and desired instances is set to + // 1. + // + // * DOWNLOADING/VALIDATING/BUILDING/ACTIVATING -- Amazon GameLift is setting + // up the new fleet, creating new instances with the game build and starting + // server processes. + // + // * ACTIVE -- Hosts can now accept game sessions. + // + // * ERROR -- An error occurred when downloading, validating, building, or + // activating the fleet. + // + // * DELETING -- Hosts are responding to a delete fleet request. + // + // * TERMINATED -- The fleet no longer exists. + Status *string `type:"string" enum:"FleetStatus"` + + // Time stamp indicating when this data object was terminated. Format is a number + // expressed in Unix time as milliseconds (for example "1469498468.057"). + TerminationTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s FleetAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetAttributes) GoString() string { + return s.String() +} + +// SetBuildId sets the BuildId field's value. +func (s *FleetAttributes) SetBuildId(v string) *FleetAttributes { + s.BuildId = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *FleetAttributes) SetCreationTime(v time.Time) *FleetAttributes { + s.CreationTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *FleetAttributes) SetDescription(v string) *FleetAttributes { + s.Description = &v + return s +} + +// SetFleetArn sets the FleetArn field's value. +func (s *FleetAttributes) SetFleetArn(v string) *FleetAttributes { + s.FleetArn = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *FleetAttributes) SetFleetId(v string) *FleetAttributes { + s.FleetId = &v + return s +} + +// SetLogPaths sets the LogPaths field's value. +func (s *FleetAttributes) SetLogPaths(v []*string) *FleetAttributes { + s.LogPaths = v + return s +} + +// SetMetricGroups sets the MetricGroups field's value. +func (s *FleetAttributes) SetMetricGroups(v []*string) *FleetAttributes { + s.MetricGroups = v + return s +} + +// SetName sets the Name field's value. +func (s *FleetAttributes) SetName(v string) *FleetAttributes { + s.Name = &v + return s +} + +// SetNewGameSessionProtectionPolicy sets the NewGameSessionProtectionPolicy field's value. +func (s *FleetAttributes) SetNewGameSessionProtectionPolicy(v string) *FleetAttributes { + s.NewGameSessionProtectionPolicy = &v + return s +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *FleetAttributes) SetOperatingSystem(v string) *FleetAttributes { + s.OperatingSystem = &v + return s +} + +// SetResourceCreationLimitPolicy sets the ResourceCreationLimitPolicy field's value. +func (s *FleetAttributes) SetResourceCreationLimitPolicy(v *ResourceCreationLimitPolicy) *FleetAttributes { + s.ResourceCreationLimitPolicy = v + return s +} + +// SetServerLaunchParameters sets the ServerLaunchParameters field's value. +func (s *FleetAttributes) SetServerLaunchParameters(v string) *FleetAttributes { + s.ServerLaunchParameters = &v + return s +} + +// SetServerLaunchPath sets the ServerLaunchPath field's value. +func (s *FleetAttributes) SetServerLaunchPath(v string) *FleetAttributes { + s.ServerLaunchPath = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *FleetAttributes) SetStatus(v string) *FleetAttributes { + s.Status = &v + return s +} + +// SetTerminationTime sets the TerminationTime field's value. +func (s *FleetAttributes) SetTerminationTime(v time.Time) *FleetAttributes { + s.TerminationTime = &v + return s +} + +// Information about the fleet's capacity. Fleet capacity is measured in EC2 +// instances. By default, new fleets have a capacity of one instance, but can +// be updated as needed. The maximum number of instances for a fleet is determined +// by the fleet's instance type. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/FleetCapacity +type FleetCapacity struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet. + FleetId *string `type:"string"` + + // Current status of fleet capacity. + InstanceCounts *EC2InstanceCounts `type:"structure"` + + // Name of an EC2 instance type that is supported in Amazon GameLift. A fleet + // instance type determines the computing resources of each instance in the + // fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift + // supports the following EC2 instance types. See Amazon EC2 Instance Types + // (http://aws.amazon.com/ec2/instance-types/) for detailed descriptions. + InstanceType *string `type:"string" enum:"EC2InstanceType"` +} + +// String returns the string representation +func (s FleetCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetCapacity) GoString() string { + return s.String() +} + +// SetFleetId sets the FleetId field's value. +func (s *FleetCapacity) SetFleetId(v string) *FleetCapacity { + s.FleetId = &v + return s +} + +// SetInstanceCounts sets the InstanceCounts field's value. +func (s *FleetCapacity) SetInstanceCounts(v *EC2InstanceCounts) *FleetCapacity { + s.InstanceCounts = v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *FleetCapacity) SetInstanceType(v string) *FleetCapacity { + s.InstanceType = &v + return s +} + +// Current status of fleet utilization, including the number of game and player +// sessions being hosted. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/FleetUtilization +type FleetUtilization struct { + _ struct{} `type:"structure"` + + // Number of active game sessions currently being hosted on all instances in + // the fleet. + ActiveGameSessionCount *int64 `type:"integer"` + + // Number of server processes in an ACTIVE status currently running across all + // instances in the fleet + ActiveServerProcessCount *int64 `type:"integer"` + + // Number of active player sessions currently being hosted on all instances + // in the fleet. + CurrentPlayerSessionCount *int64 `type:"integer"` + + // Unique identifier for a fleet. + FleetId *string `type:"string"` + + // Maximum players allowed across all game sessions currently being hosted on + // all instances in the fleet. + MaximumPlayerSessionCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s FleetUtilization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetUtilization) GoString() string { + return s.String() +} + +// SetActiveGameSessionCount sets the ActiveGameSessionCount field's value. +func (s *FleetUtilization) SetActiveGameSessionCount(v int64) *FleetUtilization { + s.ActiveGameSessionCount = &v + return s +} + +// SetActiveServerProcessCount sets the ActiveServerProcessCount field's value. +func (s *FleetUtilization) SetActiveServerProcessCount(v int64) *FleetUtilization { + s.ActiveServerProcessCount = &v + return s +} + +// SetCurrentPlayerSessionCount sets the CurrentPlayerSessionCount field's value. +func (s *FleetUtilization) SetCurrentPlayerSessionCount(v int64) *FleetUtilization { + s.CurrentPlayerSessionCount = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *FleetUtilization) SetFleetId(v string) *FleetUtilization { + s.FleetId = &v + return s +} + +// SetMaximumPlayerSessionCount sets the MaximumPlayerSessionCount field's value. +func (s *FleetUtilization) SetMaximumPlayerSessionCount(v int64) *FleetUtilization { + s.MaximumPlayerSessionCount = &v + return s +} + +// Set of key-value pairs that contain information about a game session. When +// included in a game session request, these properties communicate details +// to be used when setting up the new game session, such as to specify a game +// mode, level, or map. Game properties are passed to the game server process +// when initiating a new game session; the server process uses the properties +// as appropriate. For more information, see the Amazon GameLift Developer +// Guide (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-client-api.html#gamelift-sdk-client-api-create). +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GameProperty +type GameProperty struct { + _ struct{} `type:"structure"` + + // Game property identifier. + // + // Key is a required field + Key *string `type:"string" required:"true"` + + // Game property value. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GameProperty) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GameProperty) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GameProperty) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GameProperty"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *GameProperty) SetKey(v string) *GameProperty { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *GameProperty) SetValue(v string) *GameProperty { + s.Value = &v + return s +} + +// Properties describing a game session. +// +// A game session in ACTIVE status can host players. When a game session ends, +// its status is set to TERMINATED. +// +// Once the session ends, the game session object is retained for 30 days. This +// means you can reuse idempotency token values after this time. Game session +// logs are retained for 14 days. +// +// Game-session-related operations include: +// +// * CreateGameSession +// +// * DescribeGameSessions +// +// * DescribeGameSessionDetails +// +// * SearchGameSessions +// +// * UpdateGameSession +// +// * GetGameSessionLogUrl +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GameSession +type GameSession struct { + _ struct{} `type:"structure"` + + // Time stamp indicating when this data object was created. Format is a number + // expressed in Unix time as milliseconds (for example "1469498468.057"). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Unique identifier for a player. This ID is used to enforce a resource protection + // policy (if one exists), that limits the number of game sessions a player + // can create. + CreatorId *string `min:"1" type:"string"` + + // Number of players currently in the game session. + CurrentPlayerSessionCount *int64 `type:"integer"` + + // Unique identifier for a fleet that the game session is running on. + FleetId *string `type:"string"` + + // Set of developer-defined properties for a game session, formatted as a set + // of type:value pairs. These properties are included in the GameSession object, + // which is passed to the game server with a request to start a new game session + // (see Start a Game Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). + GameProperties []*GameProperty `type:"list"` + + // Set of developer-defined game session properties, formatted as a single string + // value. This data is included in the GameSession object, which is passed to + // the game server with a request to start a new game session (see Start a Game + // Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). + GameSessionData *string `min:"1" type:"string"` + + // Unique identifier for the game session. A game session ARN has the following + // format: arn:aws:gamelift:::gamesession//. + GameSessionId *string `min:"1" type:"string"` + + // IP address of the game session. To connect to a Amazon GameLift game server, + // an app needs both the IP address and port number. + IpAddress *string `type:"string"` + + // Maximum number of players that can be connected simultaneously to the game + // session. + MaximumPlayerSessionCount *int64 `type:"integer"` + + // Descriptive label that is associated with a game session. Session names do + // not need to be unique. + Name *string `min:"1" type:"string"` + + // Indicates whether or not the game session is accepting new players. + PlayerSessionCreationPolicy *string `type:"string" enum:"PlayerSessionCreationPolicy"` + + // Port number for the game session. To connect to a Amazon GameLift game server, + // an app needs both the IP address and port number. + Port *int64 `min:"1" type:"integer"` + + // Current status of the game session. A game session must have an ACTIVE status + // to have player sessions. + Status *string `type:"string" enum:"GameSessionStatus"` + + // Time stamp indicating when this data object was terminated. Format is a number + // expressed in Unix time as milliseconds (for example "1469498468.057"). + TerminationTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s GameSession) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GameSession) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *GameSession) SetCreationTime(v time.Time) *GameSession { + s.CreationTime = &v + return s +} + +// SetCreatorId sets the CreatorId field's value. +func (s *GameSession) SetCreatorId(v string) *GameSession { + s.CreatorId = &v + return s +} + +// SetCurrentPlayerSessionCount sets the CurrentPlayerSessionCount field's value. +func (s *GameSession) SetCurrentPlayerSessionCount(v int64) *GameSession { + s.CurrentPlayerSessionCount = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *GameSession) SetFleetId(v string) *GameSession { + s.FleetId = &v + return s +} + +// SetGameProperties sets the GameProperties field's value. +func (s *GameSession) SetGameProperties(v []*GameProperty) *GameSession { + s.GameProperties = v + return s +} + +// SetGameSessionData sets the GameSessionData field's value. +func (s *GameSession) SetGameSessionData(v string) *GameSession { + s.GameSessionData = &v + return s +} + +// SetGameSessionId sets the GameSessionId field's value. +func (s *GameSession) SetGameSessionId(v string) *GameSession { + s.GameSessionId = &v + return s +} + +// SetIpAddress sets the IpAddress field's value. +func (s *GameSession) SetIpAddress(v string) *GameSession { + s.IpAddress = &v + return s +} + +// SetMaximumPlayerSessionCount sets the MaximumPlayerSessionCount field's value. +func (s *GameSession) SetMaximumPlayerSessionCount(v int64) *GameSession { + s.MaximumPlayerSessionCount = &v + return s +} + +// SetName sets the Name field's value. +func (s *GameSession) SetName(v string) *GameSession { + s.Name = &v + return s +} + +// SetPlayerSessionCreationPolicy sets the PlayerSessionCreationPolicy field's value. +func (s *GameSession) SetPlayerSessionCreationPolicy(v string) *GameSession { + s.PlayerSessionCreationPolicy = &v + return s +} + +// SetPort sets the Port field's value. +func (s *GameSession) SetPort(v int64) *GameSession { + s.Port = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GameSession) SetStatus(v string) *GameSession { + s.Status = &v + return s +} + +// SetTerminationTime sets the TerminationTime field's value. +func (s *GameSession) SetTerminationTime(v time.Time) *GameSession { + s.TerminationTime = &v + return s +} + +// Connection information for the new game session that is created with matchmaking. +// (with StartMatchmaking). Once a match is set, the FlexMatch engine places +// the match and creates a new game session for it. This information, including +// the game session endpoint and player sessions for each player in the original +// matchmaking request, is added to the MatchmakingTicket, which can be retrieved +// by calling DescribeMatchmaking. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GameSessionConnectionInfo +type GameSessionConnectionInfo struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) + // that is assigned to a game session and uniquely identifies it. + GameSessionArn *string `min:"1" type:"string"` + + // IP address of the game session. To connect to a Amazon GameLift game server, + // an app needs both the IP address and port number. + IpAddress *string `type:"string"` + + // Collection of player session IDs, one for each player ID that was included + // in the original matchmaking request. + MatchedPlayerSessions []*MatchedPlayerSession `type:"list"` + + // Port number for the game session. To connect to a Amazon GameLift game server, + // an app needs both the IP address and port number. + Port *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GameSessionConnectionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GameSessionConnectionInfo) GoString() string { + return s.String() +} + +// SetGameSessionArn sets the GameSessionArn field's value. +func (s *GameSessionConnectionInfo) SetGameSessionArn(v string) *GameSessionConnectionInfo { + s.GameSessionArn = &v + return s +} + +// SetIpAddress sets the IpAddress field's value. +func (s *GameSessionConnectionInfo) SetIpAddress(v string) *GameSessionConnectionInfo { + s.IpAddress = &v + return s +} + +// SetMatchedPlayerSessions sets the MatchedPlayerSessions field's value. +func (s *GameSessionConnectionInfo) SetMatchedPlayerSessions(v []*MatchedPlayerSession) *GameSessionConnectionInfo { + s.MatchedPlayerSessions = v + return s +} + +// SetPort sets the Port field's value. +func (s *GameSessionConnectionInfo) SetPort(v int64) *GameSessionConnectionInfo { + s.Port = &v + return s +} + +// A game session's properties plus the protection policy currently in force. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GameSessionDetail +type GameSessionDetail struct { + _ struct{} `type:"structure"` + + // Object that describes a game session. + GameSession *GameSession `type:"structure"` + + // Current status of protection for the game session. + // + // * NoProtection -- The game session can be terminated during a scale-down + // event. + // + // * FullProtection -- If the game session is in an ACTIVE status, it cannot + // be terminated during a scale-down event. + ProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` +} + +// String returns the string representation +func (s GameSessionDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GameSessionDetail) GoString() string { + return s.String() +} + +// SetGameSession sets the GameSession field's value. +func (s *GameSessionDetail) SetGameSession(v *GameSession) *GameSessionDetail { + s.GameSession = v + return s +} + +// SetProtectionPolicy sets the ProtectionPolicy field's value. +func (s *GameSessionDetail) SetProtectionPolicy(v string) *GameSessionDetail { + s.ProtectionPolicy = &v + return s +} + +// Object that describes a StartGameSessionPlacement request. This object includes +// the full details of the original request plus the current status and start/end +// time stamps. +// +// Game session placement-related operations include: +// +// * StartGameSessionPlacement +// +// * DescribeGameSessionPlacement +// +// * StopGameSessionPlacement +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GameSessionPlacement +type GameSessionPlacement struct { + _ struct{} `type:"structure"` + + // Time stamp indicating when this request was completed, canceled, or timed + // out. + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Set of developer-defined properties for a game session, formatted as a set + // of type:value pairs. These properties are included in the GameSession object, + // which is passed to the game server with a request to start a new game session + // (see Start a Game Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). + GameProperties []*GameProperty `type:"list"` + + // Identifier for the game session created by this placement request. This value + // is set once the new game session is placed (placement status is FULFILLED). + // This identifier is unique across all regions. You can use this value as a + // GameSessionId value as needed. + GameSessionArn *string `min:"1" type:"string"` + + // Set of developer-defined game session properties, formatted as a single string + // value. This data is included in the GameSession object, which is passed to + // the game server with a request to start a new game session (see Start a Game + // Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). + GameSessionData *string `min:"1" type:"string"` + + // Unique identifier for the game session. This value is set once the new game + // session is placed (placement status is FULFILLED). + GameSessionId *string `min:"1" type:"string"` + + // Descriptive label that is associated with a game session. Session names do + // not need to be unique. + GameSessionName *string `min:"1" type:"string"` + + // Descriptive label that is associated with game session queue. Queue names + // must be unique within each region. + GameSessionQueueName *string `min:"1" type:"string"` + + // Name of the region where the game session created by this placement request + // is running. This value is set once the new game session is placed (placement + // status is FULFILLED). + GameSessionRegion *string `min:"1" type:"string"` + + // IP address of the game session. To connect to a Amazon GameLift game server, + // an app needs both the IP address and port number. This value is set once + // the new game session is placed (placement status is FULFILLED). + IpAddress *string `type:"string"` + + // Maximum number of players that can be connected simultaneously to the game + // session. + MaximumPlayerSessionCount *int64 `type:"integer"` + + // Collection of information on player sessions created in response to the game + // session placement request. These player sessions are created only once a + // new game session is successfully placed (placement status is FULFILLED). + // This information includes the player ID (as provided in the placement request) + // and the corresponding player session ID. Retrieve full player sessions by + // calling DescribePlayerSessions with the player session ID. + PlacedPlayerSessions []*PlacedPlayerSession `type:"list"` + + // Unique identifier for a game session placement. + PlacementId *string `min:"1" type:"string"` + + // Set of values, expressed in milliseconds, indicating the amount of latency + // that a player experiences when connected to AWS regions. + PlayerLatencies []*PlayerLatency `type:"list"` + + // Port number for the game session. To connect to a Amazon GameLift game server, + // an app needs both the IP address and port number. This value is set once + // the new game session is placed (placement status is FULFILLED). + Port *int64 `min:"1" type:"integer"` + + // Time stamp indicating when this request was placed in the queue. Format is + // a number expressed in Unix time as milliseconds (for example "1469498468.057"). + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Current status of the game session placement request. + // + // * PENDING -- The placement request is currently in the queue waiting to + // be processed. + // + // * FULFILLED -- A new game session and player sessions (if requested) have + // been successfully created. Values for GameSessionArn and GameSessionRegion + // are available. + // + // * CANCELLED -- The placement request was canceled with a call to StopGameSessionPlacement. + // + // * TIMED_OUT -- A new game session was not successfully created before + // the time limit expired. You can resubmit the placement request as needed. + Status *string `type:"string" enum:"GameSessionPlacementState"` +} + +// String returns the string representation +func (s GameSessionPlacement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GameSessionPlacement) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *GameSessionPlacement) SetEndTime(v time.Time) *GameSessionPlacement { + s.EndTime = &v + return s +} + +// SetGameProperties sets the GameProperties field's value. +func (s *GameSessionPlacement) SetGameProperties(v []*GameProperty) *GameSessionPlacement { + s.GameProperties = v + return s +} + +// SetGameSessionArn sets the GameSessionArn field's value. +func (s *GameSessionPlacement) SetGameSessionArn(v string) *GameSessionPlacement { + s.GameSessionArn = &v + return s +} + +// SetGameSessionData sets the GameSessionData field's value. +func (s *GameSessionPlacement) SetGameSessionData(v string) *GameSessionPlacement { + s.GameSessionData = &v + return s +} + +// SetGameSessionId sets the GameSessionId field's value. +func (s *GameSessionPlacement) SetGameSessionId(v string) *GameSessionPlacement { + s.GameSessionId = &v + return s +} + +// SetGameSessionName sets the GameSessionName field's value. +func (s *GameSessionPlacement) SetGameSessionName(v string) *GameSessionPlacement { + s.GameSessionName = &v + return s +} + +// SetGameSessionQueueName sets the GameSessionQueueName field's value. +func (s *GameSessionPlacement) SetGameSessionQueueName(v string) *GameSessionPlacement { + s.GameSessionQueueName = &v + return s +} + +// SetGameSessionRegion sets the GameSessionRegion field's value. +func (s *GameSessionPlacement) SetGameSessionRegion(v string) *GameSessionPlacement { + s.GameSessionRegion = &v + return s +} + +// SetIpAddress sets the IpAddress field's value. +func (s *GameSessionPlacement) SetIpAddress(v string) *GameSessionPlacement { + s.IpAddress = &v + return s +} + +// SetMaximumPlayerSessionCount sets the MaximumPlayerSessionCount field's value. +func (s *GameSessionPlacement) SetMaximumPlayerSessionCount(v int64) *GameSessionPlacement { + s.MaximumPlayerSessionCount = &v + return s +} + +// SetPlacedPlayerSessions sets the PlacedPlayerSessions field's value. +func (s *GameSessionPlacement) SetPlacedPlayerSessions(v []*PlacedPlayerSession) *GameSessionPlacement { + s.PlacedPlayerSessions = v + return s +} + +// SetPlacementId sets the PlacementId field's value. +func (s *GameSessionPlacement) SetPlacementId(v string) *GameSessionPlacement { + s.PlacementId = &v + return s +} + +// SetPlayerLatencies sets the PlayerLatencies field's value. +func (s *GameSessionPlacement) SetPlayerLatencies(v []*PlayerLatency) *GameSessionPlacement { + s.PlayerLatencies = v + return s +} + +// SetPort sets the Port field's value. +func (s *GameSessionPlacement) SetPort(v int64) *GameSessionPlacement { + s.Port = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GameSessionPlacement) SetStartTime(v time.Time) *GameSessionPlacement { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GameSessionPlacement) SetStatus(v string) *GameSessionPlacement { + s.Status = &v + return s +} + +// Configuration of a queue that is used to process game session placement requests. +// The queue configuration identifies several game features: +// +// * The destinations where a new game session can potentially be hosted. +// Amazon GameLift tries these destinations in an order based on either the +// queue's default order or player latency information, if provided in a +// placement request. With latency information, Amazon GameLift can place +// game sessions where the majority of players are reporting the lowest possible +// latency. +// +// * The length of time that placement requests can wait in the queue before +// timing out. +// +// * A set of optional latency policies that protect individual players from +// high latencies, preventing game sessions from being placed where any individual +// player is reporting latency higher than a policy's maximum. +// +// Queue-related operations include: +// +// * CreateGameSessionQueue +// +// * DescribeGameSessionQueues +// +// * UpdateGameSessionQueue +// +// * DeleteGameSessionQueue +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GameSessionQueue +type GameSessionQueue struct { + _ struct{} `type:"structure"` + + // List of fleets that can be used to fulfill game session placement requests + // in the queue. Fleets are identified by either a fleet ARN or a fleet alias + // ARN. Destinations are listed in default preference order. + Destinations []*GameSessionQueueDestination `type:"list"` + + // Amazon Resource Name (ARN (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) + // that is assigned to a game session queue and uniquely identifies it. Format + // is arn:aws:gamelift:::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. + GameSessionQueueArn *string `min:"1" type:"string"` + + // Descriptive label that is associated with game session queue. Queue names + // must be unique within each region. + Name *string `min:"1" type:"string"` + + // Collection of latency policies to apply when processing game sessions placement + // requests with player latency information. Multiple policies are evaluated + // in order of the maximum latency value, starting with the lowest latency values. + // With just one policy, it is enforced at the start of the game session placement + // for the duration period. With multiple policies, each policy is enforced + // consecutively for its duration period. For example, a queue might enforce + // a 60-second policy followed by a 120-second policy, and then no policy for + // the remainder of the placement. + PlayerLatencyPolicies []*PlayerLatencyPolicy `type:"list"` + + // Maximum time, in seconds, that a new game session placement request remains + // in the queue. When a request exceeds this time, the game session placement + // changes to a TIMED_OUT status. + TimeoutInSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s GameSessionQueue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GameSessionQueue) GoString() string { + return s.String() +} + +// SetDestinations sets the Destinations field's value. +func (s *GameSessionQueue) SetDestinations(v []*GameSessionQueueDestination) *GameSessionQueue { + s.Destinations = v + return s +} + +// SetGameSessionQueueArn sets the GameSessionQueueArn field's value. +func (s *GameSessionQueue) SetGameSessionQueueArn(v string) *GameSessionQueue { + s.GameSessionQueueArn = &v + return s +} + +// SetName sets the Name field's value. +func (s *GameSessionQueue) SetName(v string) *GameSessionQueue { + s.Name = &v + return s +} + +// SetPlayerLatencyPolicies sets the PlayerLatencyPolicies field's value. +func (s *GameSessionQueue) SetPlayerLatencyPolicies(v []*PlayerLatencyPolicy) *GameSessionQueue { + s.PlayerLatencyPolicies = v + return s +} + +// SetTimeoutInSeconds sets the TimeoutInSeconds field's value. +func (s *GameSessionQueue) SetTimeoutInSeconds(v int64) *GameSessionQueue { + s.TimeoutInSeconds = &v + return s +} + +// Fleet designated in a game session queue. Requests for new game sessions +// in the queue are fulfilled by starting a new game session on any destination +// configured for a queue. +// +// Queue-related operations include: +// +// * CreateGameSessionQueue +// +// * DescribeGameSessionQueues +// +// * UpdateGameSessionQueue +// +// * DeleteGameSessionQueue +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GameSessionQueueDestination +type GameSessionQueueDestination struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) assigned to fleet or fleet alias. ARNs, which + // include a fleet ID or alias ID and a region name, provide a unique identifier + // across all regions. + DestinationArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GameSessionQueueDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GameSessionQueueDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GameSessionQueueDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GameSessionQueueDestination"} + if s.DestinationArn != nil && len(*s.DestinationArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationArn sets the DestinationArn field's value. +func (s *GameSessionQueueDestination) SetDestinationArn(v string) *GameSessionQueueDestination { + s.DestinationArn = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetGameSessionLogUrlInput +type GetGameSessionLogUrlInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the game session to get logs for. + // + // GameSessionId is a required field + GameSessionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetGameSessionLogUrlInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGameSessionLogUrlInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGameSessionLogUrlInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetGameSessionLogUrlInput"} + if s.GameSessionId == nil { + invalidParams.Add(request.NewErrParamRequired("GameSessionId")) + } + if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameSessionId sets the GameSessionId field's value. +func (s *GetGameSessionLogUrlInput) SetGameSessionId(v string) *GetGameSessionLogUrlInput { + s.GameSessionId = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetGameSessionLogUrlOutput +type GetGameSessionLogUrlOutput struct { + _ struct{} `type:"structure"` + + // Location of the requested game session logs, available for download. + PreSignedUrl *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetGameSessionLogUrlOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGameSessionLogUrlOutput) GoString() string { + return s.String() +} + +// SetPreSignedUrl sets the PreSignedUrl field's value. +func (s *GetGameSessionLogUrlOutput) SetPreSignedUrl(v string) *GetGameSessionLogUrlOutput { + s.PreSignedUrl = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetInstanceAccessInput +type GetInstanceAccessInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet that contains the instance you want access + // to. The fleet can be in any of the following statuses: ACTIVATING, ACTIVE, + // or ERROR. Fleets with an ERROR status may be accessible for a short time + // before they are deleted. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // Unique identifier for an instance you want to get access to. You can access + // an instance in any status. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetInstanceAccessInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInstanceAccessInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetInstanceAccessInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetInstanceAccessInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetId sets the FleetId field's value. +func (s *GetInstanceAccessInput) SetFleetId(v string) *GetInstanceAccessInput { + s.FleetId = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetInstanceAccessInput) SetInstanceId(v string) *GetInstanceAccessInput { + s.InstanceId = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetInstanceAccessOutput +type GetInstanceAccessOutput struct { + _ struct{} `type:"structure"` + + // Object that contains connection information for a fleet instance, including + // IP address and access credentials. + InstanceAccess *InstanceAccess `type:"structure"` +} + +// String returns the string representation +func (s GetInstanceAccessOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInstanceAccessOutput) GoString() string { + return s.String() +} + +// SetInstanceAccess sets the InstanceAccess field's value. +func (s *GetInstanceAccessOutput) SetInstanceAccess(v *InstanceAccess) *GetInstanceAccessOutput { + s.InstanceAccess = v + return s +} + +// Properties that describe an instance of a virtual computing resource that +// hosts one or more game servers. A fleet may contain zero or more instances. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/Instance +type Instance struct { + _ struct{} `type:"structure"` + + // Time stamp indicating when this data object was created. Format is a number + // expressed in Unix time as milliseconds (for example "1469498468.057"). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Unique identifier for a fleet that the instance is in. + FleetId *string `type:"string"` + + // Unique identifier for an instance. + InstanceId *string `type:"string"` + + // IP address assigned to the instance. + IpAddress *string `type:"string"` + + // Operating system that is running on this instance. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` + + // Current status of the instance. Possible statuses include the following: + // + // * PENDING -- The instance is in the process of being created and launching + // server processes as defined in the fleet's run-time configuration. + // + // * ACTIVE -- The instance has been successfully created and at least one + // server process has successfully launched and reported back to Amazon GameLift + // that it is ready to host a game session. The instance is now considered + // ready to host game sessions. + // + // * TERMINATING -- The instance is in the process of shutting down. This + // may happen to reduce capacity during a scaling down event or to recycle + // resources in the event of a problem. + Status *string `type:"string" enum:"InstanceStatus"` + + // EC2 instance type that defines the computing resources of this instance. + Type *string `type:"string" enum:"EC2InstanceType"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *Instance) SetCreationTime(v time.Time) *Instance { + s.CreationTime = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *Instance) SetFleetId(v string) *Instance { + s.FleetId = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *Instance) SetInstanceId(v string) *Instance { + s.InstanceId = &v + return s +} + +// SetIpAddress sets the IpAddress field's value. +func (s *Instance) SetIpAddress(v string) *Instance { + s.IpAddress = &v + return s +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *Instance) SetOperatingSystem(v string) *Instance { + s.OperatingSystem = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Instance) SetStatus(v string) *Instance { + s.Status = &v + return s +} + +// SetType sets the Type field's value. +func (s *Instance) SetType(v string) *Instance { + s.Type = &v + return s +} + +// Information required to remotely connect to a fleet instance. Access is requested +// by calling GetInstanceAccess. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/InstanceAccess +type InstanceAccess struct { + _ struct{} `type:"structure"` + + // Credentials required to access the instance. + Credentials *InstanceCredentials `type:"structure"` + + // Unique identifier for a fleet containing the instance being accessed. + FleetId *string `type:"string"` + + // Unique identifier for an instance being accessed. + InstanceId *string `type:"string"` + + // IP address assigned to the instance. + IpAddress *string `type:"string"` + + // Operating system that is running on the instance. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` +} + +// String returns the string representation +func (s InstanceAccess) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceAccess) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *InstanceAccess) SetCredentials(v *InstanceCredentials) *InstanceAccess { + s.Credentials = v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *InstanceAccess) SetFleetId(v string) *InstanceAccess { + s.FleetId = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *InstanceAccess) SetInstanceId(v string) *InstanceAccess { + s.InstanceId = &v + return s +} + +// SetIpAddress sets the IpAddress field's value. +func (s *InstanceAccess) SetIpAddress(v string) *InstanceAccess { + s.IpAddress = &v + return s +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *InstanceAccess) SetOperatingSystem(v string) *InstanceAccess { + s.OperatingSystem = &v + return s +} + +// Set of credentials required to remotely access a fleet instance. Access credentials +// are requested by calling GetInstanceAccess and returned in an InstanceAccess +// object. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/InstanceCredentials +type InstanceCredentials struct { + _ struct{} `type:"structure"` + + // Secret string. For Windows instances, the secret is a password for use with + // Windows Remote Desktop. For Linux instances, it is a private key (which must + // be saved as a .pem file) for use with SSH. + Secret *string `min:"1" type:"string"` + + // User login string. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s InstanceCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceCredentials) GoString() string { + return s.String() +} + +// SetSecret sets the Secret field's value. +func (s *InstanceCredentials) SetSecret(v string) *InstanceCredentials { + s.Secret = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *InstanceCredentials) SetUserName(v string) *InstanceCredentials { + s.UserName = &v + return s +} + +// A range of IP addresses and port settings that allow inbound traffic to connect +// to server processes on Amazon GameLift. Each game session hosted on a fleet +// is assigned a unique combination of IP address and port number, which must +// fall into the fleet's allowed ranges. This combination is included in the +// GameSession object. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/IpPermission +type IpPermission struct { + _ struct{} `type:"structure"` + + // Starting value for a range of allowed port numbers. + // + // FromPort is a required field + FromPort *int64 `min:"1" type:"integer" required:"true"` + + // Range of allowed IP addresses. This value must be expressed in CIDR notation. + // Example: "000.000.000.000/[subnet mask]" or optionally the shortened version + // "0.0.0.0/[subnet mask]". + // + // IpRange is a required field + IpRange *string `type:"string" required:"true"` + + // Network communication protocol used by the fleet. + // + // Protocol is a required field + Protocol *string `type:"string" required:"true" enum:"IpProtocol"` + + // Ending value for a range of allowed port numbers. Port numbers are end-inclusive. + // This value must be higher than FromPort. + // + // ToPort is a required field + ToPort *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s IpPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IpPermission) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IpPermission) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IpPermission"} + if s.FromPort == nil { + invalidParams.Add(request.NewErrParamRequired("FromPort")) + } + if s.FromPort != nil && *s.FromPort < 1 { + invalidParams.Add(request.NewErrParamMinValue("FromPort", 1)) + } + if s.IpRange == nil { + invalidParams.Add(request.NewErrParamRequired("IpRange")) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + if s.ToPort == nil { + invalidParams.Add(request.NewErrParamRequired("ToPort")) + } + if s.ToPort != nil && *s.ToPort < 1 { + invalidParams.Add(request.NewErrParamMinValue("ToPort", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFromPort sets the FromPort field's value. +func (s *IpPermission) SetFromPort(v int64) *IpPermission { + s.FromPort = &v + return s +} + +// SetIpRange sets the IpRange field's value. +func (s *IpPermission) SetIpRange(v string) *IpPermission { + s.IpRange = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *IpPermission) SetProtocol(v string) *IpPermission { + s.Protocol = &v + return s +} + +// SetToPort sets the ToPort field's value. +func (s *IpPermission) SetToPort(v int64) *IpPermission { + s.ToPort = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListAliasesInput +type ListAliasesInput struct { + _ struct{} `type:"structure"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Descriptive label that is associated with an alias. Alias names do not need + // to be unique. + Name *string `min:"1" type:"string"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Type of routing to filter results on. Use this parameter to retrieve only + // aliases of a certain type. To retrieve all aliases, leave this parameter + // empty. + // + // Possible routing types include the following: + // + // * SIMPLE -- The alias resolves to one specific fleet. Use this type when + // routing to active fleets. + // + // * TERMINAL -- The alias does not resolve to a fleet but instead can be + // used to display a message to the user. A terminal alias throws a TerminalRoutingStrategyException + // with the RoutingStrategy message embedded. + RoutingStrategyType *string `type:"string" enum:"RoutingStrategyType"` +} + +// String returns the string representation +func (s ListAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAliasesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *ListAliasesInput) SetLimit(v int64) *ListAliasesInput { + s.Limit = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListAliasesInput) SetName(v string) *ListAliasesInput { + s.Name = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAliasesInput) SetNextToken(v string) *ListAliasesInput { + s.NextToken = &v + return s +} + +// SetRoutingStrategyType sets the RoutingStrategyType field's value. +func (s *ListAliasesInput) SetRoutingStrategyType(v string) *ListAliasesInput { + s.RoutingStrategyType = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListAliasesOutput +type ListAliasesOutput struct { + _ struct{} `type:"structure"` + + // Collection of alias records that match the list request. + Aliases []*Alias `type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesOutput) GoString() string { + return s.String() +} + +// SetAliases sets the Aliases field's value. +func (s *ListAliasesOutput) SetAliases(v []*Alias) *ListAliasesOutput { + s.Aliases = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAliasesOutput) SetNextToken(v string) *ListAliasesOutput { + s.NextToken = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListBuildsInput +type ListBuildsInput struct { + _ struct{} `type:"structure"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Build status to filter results by. To retrieve all builds, leave this parameter + // empty. + // + // Possible build statuses include the following: + // + // * INITIALIZED -- A new build has been defined, but no files have been + // uploaded. You cannot create fleets for builds that are in this status. + // When a build is successfully created, the build status is set to this + // value. + // + // * READY -- The game build has been successfully uploaded. You can now + // create new fleets for this build. + // + // * FAILED -- The game build upload failed. You cannot create new fleets + // for this build. + Status *string `type:"string" enum:"BuildStatus"` +} + +// String returns the string representation +func (s ListBuildsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBuildsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBuildsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBuildsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *ListBuildsInput) SetLimit(v int64) *ListBuildsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListBuildsInput) SetNextToken(v string) *ListBuildsInput { + s.NextToken = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListBuildsInput) SetStatus(v string) *ListBuildsInput { + s.Status = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListBuildsOutput +type ListBuildsOutput struct { + _ struct{} `type:"structure"` + + // Collection of build records that match the request. + Builds []*Build `type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListBuildsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBuildsOutput) GoString() string { + return s.String() +} + +// SetBuilds sets the Builds field's value. +func (s *ListBuildsOutput) SetBuilds(v []*Build) *ListBuildsOutput { + s.Builds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListBuildsOutput) SetNextToken(v string) *ListBuildsOutput { + s.NextToken = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListFleetsInput +type ListFleetsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a build to return fleets for. Use this parameter to + // return only fleets using the specified build. To retrieve all fleets, leave + // this parameter empty. + BuildId *string `type:"string"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListFleetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFleetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListFleetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListFleetsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBuildId sets the BuildId field's value. +func (s *ListFleetsInput) SetBuildId(v string) *ListFleetsInput { + s.BuildId = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListFleetsInput) SetLimit(v int64) *ListFleetsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListFleetsInput) SetNextToken(v string) *ListFleetsInput { + s.NextToken = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListFleetsOutput +type ListFleetsOutput struct { + _ struct{} `type:"structure"` + + // Set of fleet IDs matching the list request. You can retrieve additional information + // about all returned fleets by passing this result set to a call to DescribeFleetAttributes, + // DescribeFleetCapacity, or DescribeFleetUtilization. + FleetIds []*string `min:"1" type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListFleetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFleetsOutput) GoString() string { + return s.String() +} + +// SetFleetIds sets the FleetIds field's value. +func (s *ListFleetsOutput) SetFleetIds(v []*string) *ListFleetsOutput { + s.FleetIds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListFleetsOutput) SetNextToken(v string) *ListFleetsOutput { + s.NextToken = &v + return s +} + +// Represents a new player session that is created as a result of a successful +// FlexMatch match. A successful match automatically creates new player sessions +// for every player ID in the original matchmaking request. +// +// When players connect to the match's game session, they must include both +// player ID and player session ID in order to claim their assigned player slot. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/MatchedPlayerSession +type MatchedPlayerSession struct { + _ struct{} `type:"structure"` + + // Unique identifier for a player + PlayerId *string `min:"1" type:"string"` + + // Unique identifier for a player session + PlayerSessionId *string `type:"string"` +} + +// String returns the string representation +func (s MatchedPlayerSession) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MatchedPlayerSession) GoString() string { + return s.String() +} + +// SetPlayerId sets the PlayerId field's value. +func (s *MatchedPlayerSession) SetPlayerId(v string) *MatchedPlayerSession { + s.PlayerId = &v + return s +} + +// SetPlayerSessionId sets the PlayerSessionId field's value. +func (s *MatchedPlayerSession) SetPlayerSessionId(v string) *MatchedPlayerSession { + s.PlayerSessionId = &v + return s +} + +// Guidelines for use with FlexMatch to match players into games. All matchmaking +// requests must specify a matchmaking configuration. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/MatchmakingConfiguration +type MatchmakingConfiguration struct { + _ struct{} `type:"structure"` + + // Flag that determines whether or not a match that was created with this configuration + // must be accepted by the matched players. To require acceptance, set to TRUE. + AcceptanceRequired *bool `type:"boolean"` + + // Length of time (in seconds) to wait for players to accept a proposed match. + // If any player rejects the match or fails to accept before the timeout, the + // ticket continues to look for an acceptable match. + AcceptanceTimeoutSeconds *int64 `min:"1" type:"integer"` + + // Number of player slots in a match to keep open for future players. For example, + // if the configuration's rule set specifies a match for a single 12-person + // team, and the additional player count is set to 2, only 10 players are selected + // for the match. + AdditionalPlayerCount *int64 `type:"integer"` + + // Time stamp indicating when this data object was created. Format is a number + // expressed in Unix time as milliseconds (for example "1469498468.057"). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Information to attached to all events related to the matchmaking configuration. + CustomEventData *string `type:"string"` + + // Descriptive label that is associated with matchmaking configuration. + Description *string `min:"1" type:"string"` + + // Set of developer-defined properties for a game session, formatted as a set + // of type:value pairs. These properties are included in the GameSession object, + // which is passed to the game server with a request to start a new game session + // (see Start a Game Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). + // This information is added to the new GameSession object that is created for + // a successful match. + GameProperties []*GameProperty `type:"list"` + + // Set of developer-defined game session properties, formatted as a single string + // value. This data is included in the GameSession object, which is passed to + // the game server with a request to start a new game session (see Start a Game + // Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). + // This information is added to the new GameSession object that is created for + // a successful match. + GameSessionData *string `min:"1" type:"string"` + + // Amazon Resource Name (ARN (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) + // that is assigned to a game session queue and uniquely identifies it. Format + // is arn:aws:gamelift:::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. + // These queues are used when placing game sessions for matches that are created + // with this matchmaking configuration. Queues can be located in any region. + GameSessionQueueArns []*string `type:"list"` + + // Unique identifier for a matchmaking configuration. This name is used to identify + // the configuration associated with a matchmaking request or ticket. + Name *string `min:"1" type:"string"` + + // SNS topic ARN that is set up to receive matchmaking notifications. + NotificationTarget *string `type:"string"` + + // Maximum duration, in seconds, that a matchmaking ticket can remain in process + // before timing out. Requests that time out can be resubmitted as needed. + RequestTimeoutSeconds *int64 `min:"1" type:"integer"` + + // Unique identifier for a matchmaking rule set to use with this configuration. + // A matchmaking configuration can only use rule sets that are defined in the + // same region. + RuleSetName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MatchmakingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MatchmakingConfiguration) GoString() string { + return s.String() +} + +// SetAcceptanceRequired sets the AcceptanceRequired field's value. +func (s *MatchmakingConfiguration) SetAcceptanceRequired(v bool) *MatchmakingConfiguration { + s.AcceptanceRequired = &v + return s +} + +// SetAcceptanceTimeoutSeconds sets the AcceptanceTimeoutSeconds field's value. +func (s *MatchmakingConfiguration) SetAcceptanceTimeoutSeconds(v int64) *MatchmakingConfiguration { + s.AcceptanceTimeoutSeconds = &v + return s +} + +// SetAdditionalPlayerCount sets the AdditionalPlayerCount field's value. +func (s *MatchmakingConfiguration) SetAdditionalPlayerCount(v int64) *MatchmakingConfiguration { + s.AdditionalPlayerCount = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *MatchmakingConfiguration) SetCreationTime(v time.Time) *MatchmakingConfiguration { + s.CreationTime = &v + return s +} + +// SetCustomEventData sets the CustomEventData field's value. +func (s *MatchmakingConfiguration) SetCustomEventData(v string) *MatchmakingConfiguration { + s.CustomEventData = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *MatchmakingConfiguration) SetDescription(v string) *MatchmakingConfiguration { + s.Description = &v + return s +} + +// SetGameProperties sets the GameProperties field's value. +func (s *MatchmakingConfiguration) SetGameProperties(v []*GameProperty) *MatchmakingConfiguration { + s.GameProperties = v + return s +} + +// SetGameSessionData sets the GameSessionData field's value. +func (s *MatchmakingConfiguration) SetGameSessionData(v string) *MatchmakingConfiguration { + s.GameSessionData = &v + return s +} + +// SetGameSessionQueueArns sets the GameSessionQueueArns field's value. +func (s *MatchmakingConfiguration) SetGameSessionQueueArns(v []*string) *MatchmakingConfiguration { + s.GameSessionQueueArns = v + return s +} + +// SetName sets the Name field's value. +func (s *MatchmakingConfiguration) SetName(v string) *MatchmakingConfiguration { + s.Name = &v + return s +} + +// SetNotificationTarget sets the NotificationTarget field's value. +func (s *MatchmakingConfiguration) SetNotificationTarget(v string) *MatchmakingConfiguration { + s.NotificationTarget = &v + return s +} + +// SetRequestTimeoutSeconds sets the RequestTimeoutSeconds field's value. +func (s *MatchmakingConfiguration) SetRequestTimeoutSeconds(v int64) *MatchmakingConfiguration { + s.RequestTimeoutSeconds = &v + return s +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *MatchmakingConfiguration) SetRuleSetName(v string) *MatchmakingConfiguration { + s.RuleSetName = &v + return s +} + +// Set of rule statements, used with FlexMatch, that determine how to build +// a certain kind of player match. Each rule set describes a type of group to +// be created and defines the parameters for acceptable player matches. Rule +// sets are used in MatchmakingConfiguration objects. +// +// A rule set may define the following elements for a match. For detailed information +// and examples showing how to construct a rule set, see Create Matchmaking +// Rules for Your Game (http://docs.aws.amazon.com/gamelift/latest/developerguide/match-rules.html). +// +// * Teams -- Required. A rule set must define one or multiple teams for +// the match and set minimum and maximum team sizes. For example, a rule +// set might describe a 4x4 match that requires all eight slots to be filled. +// +// +// * Player attributes -- Optional. These attributes specify a set of player +// characteristics to evaluate when looking for a match. Matchmaking requests +// that use a rule set with player attributes must provide the corresponding +// attribute values. For example, an attribute might specify a player's skill +// or level. +// +// * Rules -- Optional. Rules define how to evaluate potential players for +// a match based on player attributes. A rule might specify minimum requirements +// for individual players--such as each player must meet a certain skill +// level, or may describe an entire group--such as all teams must be evenly +// matched or have at least one player in a certain role. +// +// * Expansions -- Optional. Expansions allow you to relax the rules after +// a period of time if no acceptable matches are found. This feature lets +// you balance getting players into games in a reasonable amount of time +// instead of making them wait indefinitely for the best possible match. +// For example, you might use an expansion to increase the maximum skill +// variance between players after 30 seconds. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/MatchmakingRuleSet +type MatchmakingRuleSet struct { + _ struct{} `type:"structure"` + + // Time stamp indicating when this data object was created. Format is a number + // expressed in Unix time as milliseconds (for example "1469498468.057"). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Collection of matchmaking rules, formatted as a JSON string. (Note that comments14 + // are not allowed in JSON, but most elements support a description field.) + // + // RuleSetBody is a required field + RuleSetBody *string `min:"1" type:"string" required:"true"` + + // Unique identifier for a matchmaking rule set + RuleSetName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MatchmakingRuleSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MatchmakingRuleSet) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *MatchmakingRuleSet) SetCreationTime(v time.Time) *MatchmakingRuleSet { + s.CreationTime = &v + return s +} + +// SetRuleSetBody sets the RuleSetBody field's value. +func (s *MatchmakingRuleSet) SetRuleSetBody(v string) *MatchmakingRuleSet { + s.RuleSetBody = &v + return s +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *MatchmakingRuleSet) SetRuleSetName(v string) *MatchmakingRuleSet { + s.RuleSetName = &v + return s +} + +// Ticket generated to track the progress of a matchmaking request. Each ticket +// is uniquely identified by a ticket ID, supplied by the requester, when creating +// a matchmaking request with StartMatchmaking. Tickets can be retrieved by +// calling DescribeMatchmaking with the ticket ID. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/MatchmakingTicket +type MatchmakingTicket struct { + _ struct{} `type:"structure"` + + // Name of the MatchmakingConfiguration that is used with this ticket. Matchmaking + // configurations determine how players are grouped into a match and how a new + // game session is created for the match. + ConfigurationName *string `min:"1" type:"string"` + + // Time stamp indicating when the matchmaking request stopped being processed + // due to successful completion, timeout, or cancellation. Format is a number + // expressed in Unix time as milliseconds (for example "1469498468.057"). + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Average amount of time (in seconds) that players are currently waiting for + // a match. If there is not enough recent data, this property may be empty. + EstimatedWaitTime *int64 `type:"integer"` + + // Identifier and connection information of the game session created for the + // match. This information is added to the ticket only after the matchmaking + // request has been successfully completed. + GameSessionConnectionInfo *GameSessionConnectionInfo `type:"structure"` + + // A set of Player objects, each representing a player to find matches for. + // Players are identified by a unique player ID and may include latency data + // for use during matchmaking. If the ticket is in status COMPLETED, the Player + // objects include the team the players were assigned to in the resulting match. + Players []*Player `type:"list"` + + // Time stamp indicating when this matchmaking request was received. Format + // is a number expressed in Unix time as milliseconds (for example "1469498468.057"). + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Current status of the matchmaking request. + // + // * QUEUED -- The matchmaking request has been received and is currently + // waiting to be processed. + // + // * SEARCHING -- The matchmaking request is currently being processed. + // + // * REQUIRES_ACCEPTANCE -- A match has been proposed and the players must + // accept the match (see AcceptMatch). This status is used only with requests + // that use a matchmaking configuration with a player acceptance requirement. + // + // * PLACING -- The FlexMatch engine has matched players and is in the process + // of placing a new game session for the match. + // + // * COMPLETED -- Players have been matched and a game session is ready to + // host the players. A ticket in this state contains the necessary connection + // information for players. + // + // * FAILED -- The matchmaking request was not completed. Tickets with players + // who fail to accept a proposed match are placed in FAILED status; new matchmaking + // requests can be submitted for these players. + // + // * CANCELLED -- The matchmaking request was canceled with a call to StopMatchmaking. + // + // * TIMED_OUT -- The matchmaking request was not completed within the duration + // specified in the matchmaking configuration. Matchmaking requests that + // time out can be resubmitted. + Status *string `type:"string" enum:"MatchmakingConfigurationStatus"` + + // Additional information about the current status. + StatusMessage *string `type:"string"` + + // Code to explain the current status. For example, a status reason may indicate + // when a ticket has returned to SEARCHING status after a proposed match fails + // to receive player acceptances. + StatusReason *string `type:"string"` + + // Unique identifier for a matchmaking ticket. + TicketId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MatchmakingTicket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MatchmakingTicket) GoString() string { + return s.String() +} + +// SetConfigurationName sets the ConfigurationName field's value. +func (s *MatchmakingTicket) SetConfigurationName(v string) *MatchmakingTicket { + s.ConfigurationName = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *MatchmakingTicket) SetEndTime(v time.Time) *MatchmakingTicket { + s.EndTime = &v + return s +} + +// SetEstimatedWaitTime sets the EstimatedWaitTime field's value. +func (s *MatchmakingTicket) SetEstimatedWaitTime(v int64) *MatchmakingTicket { + s.EstimatedWaitTime = &v + return s +} + +// SetGameSessionConnectionInfo sets the GameSessionConnectionInfo field's value. +func (s *MatchmakingTicket) SetGameSessionConnectionInfo(v *GameSessionConnectionInfo) *MatchmakingTicket { + s.GameSessionConnectionInfo = v + return s +} + +// SetPlayers sets the Players field's value. +func (s *MatchmakingTicket) SetPlayers(v []*Player) *MatchmakingTicket { + s.Players = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *MatchmakingTicket) SetStartTime(v time.Time) *MatchmakingTicket { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *MatchmakingTicket) SetStatus(v string) *MatchmakingTicket { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *MatchmakingTicket) SetStatusMessage(v string) *MatchmakingTicket { + s.StatusMessage = &v + return s +} + +// SetStatusReason sets the StatusReason field's value. +func (s *MatchmakingTicket) SetStatusReason(v string) *MatchmakingTicket { + s.StatusReason = &v + return s +} + +// SetTicketId sets the TicketId field's value. +func (s *MatchmakingTicket) SetTicketId(v string) *MatchmakingTicket { + s.TicketId = &v + return s +} + +// Information about a player session that was created as part of a StartGameSessionPlacement +// request. This object contains only the player ID and player session ID. To +// retrieve full details on a player session, call DescribePlayerSessions with +// the player session ID. +// +// Player-session-related operations include: +// +// * CreatePlayerSession +// +// * CreatePlayerSessions +// +// * DescribePlayerSessions +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PlacedPlayerSession +type PlacedPlayerSession struct { + _ struct{} `type:"structure"` + + // Unique identifier for a player that is associated with this player session. + PlayerId *string `min:"1" type:"string"` + + // Unique identifier for a player session. + PlayerSessionId *string `type:"string"` +} + +// String returns the string representation +func (s PlacedPlayerSession) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlacedPlayerSession) GoString() string { + return s.String() +} + +// SetPlayerId sets the PlayerId field's value. +func (s *PlacedPlayerSession) SetPlayerId(v string) *PlacedPlayerSession { + s.PlayerId = &v + return s +} + +// SetPlayerSessionId sets the PlayerSessionId field's value. +func (s *PlacedPlayerSession) SetPlayerSessionId(v string) *PlacedPlayerSession { + s.PlayerSessionId = &v + return s +} + +// Represents a player in matchmaking. When starting a matchmaking request, +// a player has a player ID, attributes, and may have latency data. Team information +// is added after a match has been successfully completed. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/Player +type Player struct { + _ struct{} `type:"structure"` + + // Set of values, expressed in milliseconds, indicating the amount of latency + // that a player experiences when connected to AWS regions. If this property + // is present, FlexMatch considers placing the match only in regions for which + // latency is reported. + // + // If a matchmaker has a rule that evaluates player latency, players must report + // latency in order to be matched. If no latency is reported in this scenario, + // FlexMatch assumes that no regions are available to the player and the ticket + // is not matchable. + LatencyInMs map[string]*int64 `type:"map"` + + // Collection of name:value pairs containing player information for use in matchmaking. + // Player attribute names need to match playerAttributes names in the rule set + // being used. Example: "PlayerAttributes": {"skill": {"N": "23"}, "gameMode": + // {"S": "deathmatch"}}. + PlayerAttributes map[string]*AttributeValue `type:"map"` + + // Unique identifier for a player + PlayerId *string `min:"1" type:"string"` + + // Name of the team that the player is assigned to in a match. Team names are + // defined in a matchmaking rule set. + Team *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Player) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Player) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Player) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Player"} + if s.PlayerId != nil && len(*s.PlayerId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlayerId", 1)) + } + if s.Team != nil && len(*s.Team) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Team", 1)) + } + if s.PlayerAttributes != nil { + for i, v := range s.PlayerAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PlayerAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLatencyInMs sets the LatencyInMs field's value. +func (s *Player) SetLatencyInMs(v map[string]*int64) *Player { + s.LatencyInMs = v + return s +} + +// SetPlayerAttributes sets the PlayerAttributes field's value. +func (s *Player) SetPlayerAttributes(v map[string]*AttributeValue) *Player { + s.PlayerAttributes = v + return s +} + +// SetPlayerId sets the PlayerId field's value. +func (s *Player) SetPlayerId(v string) *Player { + s.PlayerId = &v + return s +} + +// SetTeam sets the Team field's value. +func (s *Player) SetTeam(v string) *Player { + s.Team = &v + return s +} + +// Regional latency information for a player, used when requesting a new game +// session with StartGameSessionPlacement. This value indicates the amount of +// time lag that exists when the player is connected to a fleet in the specified +// region. The relative difference between a player's latency values for multiple +// regions are used to determine which fleets are best suited to place a new +// game session for the player. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PlayerLatency +type PlayerLatency struct { + _ struct{} `type:"structure"` + + // Amount of time that represents the time lag experienced by the player when + // connected to the specified region. + LatencyInMilliseconds *float64 `type:"float"` + + // Unique identifier for a player associated with the latency data. + PlayerId *string `min:"1" type:"string"` + + // Name of the region that is associated with the latency value. + RegionIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PlayerLatency) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlayerLatency) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PlayerLatency) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PlayerLatency"} + if s.PlayerId != nil && len(*s.PlayerId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlayerId", 1)) + } + if s.RegionIdentifier != nil && len(*s.RegionIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RegionIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLatencyInMilliseconds sets the LatencyInMilliseconds field's value. +func (s *PlayerLatency) SetLatencyInMilliseconds(v float64) *PlayerLatency { + s.LatencyInMilliseconds = &v + return s +} + +// SetPlayerId sets the PlayerId field's value. +func (s *PlayerLatency) SetPlayerId(v string) *PlayerLatency { + s.PlayerId = &v + return s +} + +// SetRegionIdentifier sets the RegionIdentifier field's value. +func (s *PlayerLatency) SetRegionIdentifier(v string) *PlayerLatency { + s.RegionIdentifier = &v + return s +} + +// Queue setting that determines the highest latency allowed for individual +// players when placing a game session. When a latency policy is in force, a +// game session cannot be placed at any destination in a region where a player +// is reporting latency higher than the cap. Latency policies are only enforced +// when the placement request contains player latency information. +// +// Queue-related operations include: +// +// * CreateGameSessionQueue +// +// * DescribeGameSessionQueues +// +// * UpdateGameSessionQueue +// +// * DeleteGameSessionQueue +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PlayerLatencyPolicy +type PlayerLatencyPolicy struct { + _ struct{} `type:"structure"` + + // The maximum latency value that is allowed for any player, in milliseconds. + // All policies must have a value set for this property. + MaximumIndividualPlayerLatencyMilliseconds *int64 `type:"integer"` + + // The length of time, in seconds, that the policy is enforced while placing + // a new game session. A null value for this property means that the policy + // is enforced until the queue times out. + PolicyDurationSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s PlayerLatencyPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlayerLatencyPolicy) GoString() string { + return s.String() +} + +// SetMaximumIndividualPlayerLatencyMilliseconds sets the MaximumIndividualPlayerLatencyMilliseconds field's value. +func (s *PlayerLatencyPolicy) SetMaximumIndividualPlayerLatencyMilliseconds(v int64) *PlayerLatencyPolicy { + s.MaximumIndividualPlayerLatencyMilliseconds = &v + return s +} + +// SetPolicyDurationSeconds sets the PolicyDurationSeconds field's value. +func (s *PlayerLatencyPolicy) SetPolicyDurationSeconds(v int64) *PlayerLatencyPolicy { + s.PolicyDurationSeconds = &v + return s +} + +// Properties describing a player session. Player session objects are created +// either by creating a player session for a specific game session, or as part +// of a game session placement. A player session represents either a player +// reservation for a game session (status RESERVED) or actual player activity +// in a game session (status ACTIVE). A player session object (including player +// data) is automatically passed to a game session when the player connects +// to the game session and is validated. +// +// When a player disconnects, the player session status changes to COMPLETED. +// Once the session ends, the player session object is retained for 30 days +// and then removed. +// +// Player-session-related operations include: +// +// * CreatePlayerSession +// +// * CreatePlayerSessions +// +// * DescribePlayerSessions +// +// * Game session placements +// +// StartGameSessionPlacement +// +// DescribeGameSessionPlacement +// +// StopGameSessionPlacement +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PlayerSession +type PlayerSession struct { + _ struct{} `type:"structure"` + + // Time stamp indicating when this data object was created. Format is a number + // expressed in Unix time as milliseconds (for example "1469498468.057"). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Unique identifier for a fleet that the player's game session is running on. + FleetId *string `type:"string"` + + // Unique identifier for the game session that the player session is connected + // to. + GameSessionId *string `min:"1" type:"string"` + + // IP address of the game session. To connect to a Amazon GameLift game server, + // an app needs both the IP address and port number. + IpAddress *string `type:"string"` + + // Developer-defined information related to a player. Amazon GameLift does not + // use this data, so it can be formatted as needed for use in the game. + PlayerData *string `min:"1" type:"string"` + + // Unique identifier for a player that is associated with this player session. + PlayerId *string `min:"1" type:"string"` + + // Unique identifier for a player session. + PlayerSessionId *string `type:"string"` + + // Port number for the game session. To connect to a Amazon GameLift server + // process, an app needs both the IP address and port number. + Port *int64 `min:"1" type:"integer"` + + // Current status of the player session. + // + // Possible player session statuses include the following: + // + // * RESERVED -- The player session request has been received, but the player + // has not yet connected to the server process and/or been validated. + // + // * ACTIVE -- The player has been validated by the server process and is + // currently connected. + // + // * COMPLETED -- The player connection has been dropped. + // + // * TIMEDOUT -- A player session request was received, but the player did + // not connect and/or was not validated within the timeout limit (60 seconds). + Status *string `type:"string" enum:"PlayerSessionStatus"` + + // Time stamp indicating when this data object was terminated. Format is a number + // expressed in Unix time as milliseconds (for example "1469498468.057"). + TerminationTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s PlayerSession) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlayerSession) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *PlayerSession) SetCreationTime(v time.Time) *PlayerSession { + s.CreationTime = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *PlayerSession) SetFleetId(v string) *PlayerSession { + s.FleetId = &v + return s +} + +// SetGameSessionId sets the GameSessionId field's value. +func (s *PlayerSession) SetGameSessionId(v string) *PlayerSession { + s.GameSessionId = &v + return s +} + +// SetIpAddress sets the IpAddress field's value. +func (s *PlayerSession) SetIpAddress(v string) *PlayerSession { + s.IpAddress = &v + return s +} + +// SetPlayerData sets the PlayerData field's value. +func (s *PlayerSession) SetPlayerData(v string) *PlayerSession { + s.PlayerData = &v + return s +} + +// SetPlayerId sets the PlayerId field's value. +func (s *PlayerSession) SetPlayerId(v string) *PlayerSession { + s.PlayerId = &v + return s +} + +// SetPlayerSessionId sets the PlayerSessionId field's value. +func (s *PlayerSession) SetPlayerSessionId(v string) *PlayerSession { + s.PlayerSessionId = &v + return s +} + +// SetPort sets the Port field's value. +func (s *PlayerSession) SetPort(v int64) *PlayerSession { + s.Port = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *PlayerSession) SetStatus(v string) *PlayerSession { + s.Status = &v + return s +} + +// SetTerminationTime sets the TerminationTime field's value. +func (s *PlayerSession) SetTerminationTime(v time.Time) *PlayerSession { + s.TerminationTime = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PutScalingPolicyInput +type PutScalingPolicyInput struct { + _ struct{} `type:"structure"` + + // Comparison operator to use when measuring the metric against the threshold + // value. + // + // ComparisonOperator is a required field + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperatorType"` + + // Length of time (in minutes) the metric must be at or beyond the threshold + // before a scaling event is triggered. + // + // EvaluationPeriods is a required field + EvaluationPeriods *int64 `min:"1" type:"integer" required:"true"` + + // Unique identifier for a fleet to apply this policy to. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // Name of the Amazon GameLift-defined metric that is used to trigger an adjustment. + // + // * ActivatingGameSessions -- number of game sessions in the process of + // being created (game session status = ACTIVATING). + // + // * ActiveGameSessions -- number of game sessions currently running (game + // session status = ACTIVE). + // + // * CurrentPlayerSessions -- number of active or reserved player sessions + // (player session status = ACTIVE or RESERVED). + // + // * AvailablePlayerSessions -- number of player session slots currently + // available in active game sessions across the fleet, calculated by subtracting + // a game session's current player session count from its maximum player + // session count. This number includes game sessions that are not currently + // accepting players (game session PlayerSessionCreationPolicy = DENY_ALL). + // + // * ActiveInstances -- number of instances currently running a game session. + // + // * IdleInstances -- number of instances not currently running a game session. + // + // MetricName is a required field + MetricName *string `type:"string" required:"true" enum:"MetricName"` + + // Descriptive label that is associated with a scaling policy. Policy names + // do not need to be unique. A fleet can have only one scaling policy with the + // same name. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Amount of adjustment to make, based on the scaling adjustment type. + // + // ScalingAdjustment is a required field + ScalingAdjustment *int64 `type:"integer" required:"true"` + + // Type of adjustment to make to a fleet's instance count (see FleetCapacity): + // + // * ChangeInCapacity -- add (or subtract) the scaling adjustment value from + // the current instance count. Positive values scale up while negative values + // scale down. + // + // * ExactCapacity -- set the instance count to the scaling adjustment value. + // + // * PercentChangeInCapacity -- increase or reduce the current instance count + // by the scaling adjustment, read as a percentage. Positive values scale + // up while negative values scale down; for example, a value of "-10" scales + // the fleet down by 10%. + // + // ScalingAdjustmentType is a required field + ScalingAdjustmentType *string `type:"string" required:"true" enum:"ScalingAdjustmentType"` + + // Metric value used to trigger a scaling event. + // + // Threshold is a required field + Threshold *float64 `type:"double" required:"true"` +} + +// String returns the string representation +func (s PutScalingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScalingPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutScalingPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutScalingPolicyInput"} + if s.ComparisonOperator == nil { + invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) + } + if s.EvaluationPeriods == nil { + invalidParams.Add(request.NewErrParamRequired("EvaluationPeriods")) + } + if s.EvaluationPeriods != nil && *s.EvaluationPeriods < 1 { + invalidParams.Add(request.NewErrParamMinValue("EvaluationPeriods", 1)) + } + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.ScalingAdjustment == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingAdjustment")) + } + if s.ScalingAdjustmentType == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingAdjustmentType")) + } + if s.Threshold == nil { + invalidParams.Add(request.NewErrParamRequired("Threshold")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *PutScalingPolicyInput) SetComparisonOperator(v string) *PutScalingPolicyInput { + s.ComparisonOperator = &v + return s +} + +// SetEvaluationPeriods sets the EvaluationPeriods field's value. +func (s *PutScalingPolicyInput) SetEvaluationPeriods(v int64) *PutScalingPolicyInput { + s.EvaluationPeriods = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *PutScalingPolicyInput) SetFleetId(v string) *PutScalingPolicyInput { + s.FleetId = &v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *PutScalingPolicyInput) SetMetricName(v string) *PutScalingPolicyInput { + s.MetricName = &v + return s +} + +// SetName sets the Name field's value. +func (s *PutScalingPolicyInput) SetName(v string) *PutScalingPolicyInput { + s.Name = &v + return s +} + +// SetScalingAdjustment sets the ScalingAdjustment field's value. +func (s *PutScalingPolicyInput) SetScalingAdjustment(v int64) *PutScalingPolicyInput { + s.ScalingAdjustment = &v + return s +} + +// SetScalingAdjustmentType sets the ScalingAdjustmentType field's value. +func (s *PutScalingPolicyInput) SetScalingAdjustmentType(v string) *PutScalingPolicyInput { + s.ScalingAdjustmentType = &v + return s +} + +// SetThreshold sets the Threshold field's value. +func (s *PutScalingPolicyInput) SetThreshold(v float64) *PutScalingPolicyInput { + s.Threshold = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PutScalingPolicyOutput +type PutScalingPolicyOutput struct { + _ struct{} `type:"structure"` + + // Descriptive label that is associated with a scaling policy. Policy names + // do not need to be unique. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutScalingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScalingPolicyOutput) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *PutScalingPolicyOutput) SetName(v string) *PutScalingPolicyOutput { + s.Name = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/RequestUploadCredentialsInput +type RequestUploadCredentialsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a build to get credentials for. + // + // BuildId is a required field + BuildId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestUploadCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestUploadCredentialsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestUploadCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestUploadCredentialsInput"} + if s.BuildId == nil { + invalidParams.Add(request.NewErrParamRequired("BuildId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBuildId sets the BuildId field's value. +func (s *RequestUploadCredentialsInput) SetBuildId(v string) *RequestUploadCredentialsInput { + s.BuildId = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/RequestUploadCredentialsOutput +type RequestUploadCredentialsOutput struct { + _ struct{} `type:"structure"` + + // Amazon S3 path and key, identifying where the game build files are stored. + StorageLocation *S3Location `type:"structure"` + + // AWS credentials required when uploading a game build to the storage location. + // These credentials have a limited lifespan and are valid only for the build + // they were issued for. + UploadCredentials *AwsCredentials `type:"structure"` +} + +// String returns the string representation +func (s RequestUploadCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestUploadCredentialsOutput) GoString() string { + return s.String() +} + +// SetStorageLocation sets the StorageLocation field's value. +func (s *RequestUploadCredentialsOutput) SetStorageLocation(v *S3Location) *RequestUploadCredentialsOutput { + s.StorageLocation = v + return s +} + +// SetUploadCredentials sets the UploadCredentials field's value. +func (s *RequestUploadCredentialsOutput) SetUploadCredentials(v *AwsCredentials) *RequestUploadCredentialsOutput { + s.UploadCredentials = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ResolveAliasInput +type ResolveAliasInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the alias you want to resolve. + // + // AliasId is a required field + AliasId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResolveAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResolveAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResolveAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResolveAliasInput"} + if s.AliasId == nil { + invalidParams.Add(request.NewErrParamRequired("AliasId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasId sets the AliasId field's value. +func (s *ResolveAliasInput) SetAliasId(v string) *ResolveAliasInput { + s.AliasId = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ResolveAliasOutput +type ResolveAliasOutput struct { + _ struct{} `type:"structure"` + + // Fleet identifier that is associated with the requested alias. + FleetId *string `type:"string"` +} + +// String returns the string representation +func (s ResolveAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResolveAliasOutput) GoString() string { + return s.String() +} + +// SetFleetId sets the FleetId field's value. +func (s *ResolveAliasOutput) SetFleetId(v string) *ResolveAliasOutput { + s.FleetId = &v + return s +} + +// Policy that limits the number of game sessions a player can create on the +// same fleet. This optional policy gives game owners control over how players +// can consume available game server resources. A resource creation policy makes +// the following statement: "An individual player can create a maximum number +// of new game sessions within a specified time period". +// +// The policy is evaluated when a player tries to create a new game session. +// For example, with a policy of 10 new game sessions and a time period of 60 +// minutes, on receiving a CreateGameSession request, Amazon GameLift checks +// that the player (identified by CreatorId) has created fewer than 10 game +// sessions in the past 60 minutes. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ResourceCreationLimitPolicy +type ResourceCreationLimitPolicy struct { + _ struct{} `type:"structure"` + + // Maximum number of game sessions that an individual can create during the + // policy period. + NewGameSessionsPerCreator *int64 `type:"integer"` + + // Time span used in evaluating the resource creation limit policy. + PolicyPeriodInMinutes *int64 `type:"integer"` +} + +// String returns the string representation +func (s ResourceCreationLimitPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceCreationLimitPolicy) GoString() string { + return s.String() +} + +// SetNewGameSessionsPerCreator sets the NewGameSessionsPerCreator field's value. +func (s *ResourceCreationLimitPolicy) SetNewGameSessionsPerCreator(v int64) *ResourceCreationLimitPolicy { + s.NewGameSessionsPerCreator = &v + return s +} + +// SetPolicyPeriodInMinutes sets the PolicyPeriodInMinutes field's value. +func (s *ResourceCreationLimitPolicy) SetPolicyPeriodInMinutes(v int64) *ResourceCreationLimitPolicy { + s.PolicyPeriodInMinutes = &v + return s +} + +// Routing configuration for a fleet alias. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/RoutingStrategy +type RoutingStrategy struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet that the alias points to. + FleetId *string `type:"string"` + + // Message text to be used with a terminal routing strategy. + Message *string `type:"string"` + + // Type of routing strategy. + // + // Possible routing types include the following: + // + // * SIMPLE -- The alias resolves to one specific fleet. Use this type when + // routing to active fleets. + // + // * TERMINAL -- The alias does not resolve to a fleet but instead can be + // used to display a message to the user. A terminal alias throws a TerminalRoutingStrategyException + // with the RoutingStrategy message embedded. + Type *string `type:"string" enum:"RoutingStrategyType"` +} + +// String returns the string representation +func (s RoutingStrategy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingStrategy) GoString() string { + return s.String() +} + +// SetFleetId sets the FleetId field's value. +func (s *RoutingStrategy) SetFleetId(v string) *RoutingStrategy { + s.FleetId = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *RoutingStrategy) SetMessage(v string) *RoutingStrategy { + s.Message = &v + return s +} + +// SetType sets the Type field's value. +func (s *RoutingStrategy) SetType(v string) *RoutingStrategy { + s.Type = &v + return s +} + +// A collection of server process configurations that describe what processes +// to run on each instance in a fleet. All fleets must have a run-time configuration. +// Each instance in the fleet launches the server processes specified in the +// run-time configuration and launches new ones as existing processes end. Each +// instance regularly checks for an updated run-time configuration and follows +// the new instructions. +// +// The run-time configuration enables the instances in a fleet to run multiple +// processes simultaneously. Potential scenarios are as follows: (1) Run multiple +// processes of a single game server executable to maximize usage of your hosting +// resources. (2) Run one or more processes of different build executables, +// such as your game server executable and a related program, or two or more +// different versions of a game server. (3) Run multiple processes of a single +// game server but with different launch parameters, for example to run one +// process on each instance in debug mode. +// +// A Amazon GameLift instance is limited to 50 processes running simultaneously. +// A run-time configuration must specify fewer than this limit. To calculate +// the total number of processes specified in a run-time configuration, add +// the values of the ConcurrentExecutions parameter for each ServerProcess object +// in the run-time configuration. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/RuntimeConfiguration +type RuntimeConfiguration struct { + _ struct{} `type:"structure"` + + // Maximum amount of time (in seconds) that a game session can remain in status + // ACTIVATING. If the game session is not active before the timeout, activation + // is terminated and the game session status is changed to TERMINATED. + GameSessionActivationTimeoutSeconds *int64 `min:"1" type:"integer"` + + // Maximum number of game sessions with status ACTIVATING to allow on an instance + // simultaneously. This setting limits the amount of instance resources that + // can be used for new game activations at any one time. + MaxConcurrentGameSessionActivations *int64 `min:"1" type:"integer"` + + // Collection of server process configurations that describe which server processes + // to run on each instance in a fleet. + ServerProcesses []*ServerProcess `min:"1" type:"list"` +} + +// String returns the string representation +func (s RuntimeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RuntimeConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RuntimeConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RuntimeConfiguration"} + if s.GameSessionActivationTimeoutSeconds != nil && *s.GameSessionActivationTimeoutSeconds < 1 { + invalidParams.Add(request.NewErrParamMinValue("GameSessionActivationTimeoutSeconds", 1)) + } + if s.MaxConcurrentGameSessionActivations != nil && *s.MaxConcurrentGameSessionActivations < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxConcurrentGameSessionActivations", 1)) + } + if s.ServerProcesses != nil && len(s.ServerProcesses) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerProcesses", 1)) + } + if s.ServerProcesses != nil { + for i, v := range s.ServerProcesses { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ServerProcesses", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameSessionActivationTimeoutSeconds sets the GameSessionActivationTimeoutSeconds field's value. +func (s *RuntimeConfiguration) SetGameSessionActivationTimeoutSeconds(v int64) *RuntimeConfiguration { + s.GameSessionActivationTimeoutSeconds = &v + return s +} + +// SetMaxConcurrentGameSessionActivations sets the MaxConcurrentGameSessionActivations field's value. +func (s *RuntimeConfiguration) SetMaxConcurrentGameSessionActivations(v int64) *RuntimeConfiguration { + s.MaxConcurrentGameSessionActivations = &v + return s +} + +// SetServerProcesses sets the ServerProcesses field's value. +func (s *RuntimeConfiguration) SetServerProcesses(v []*ServerProcess) *RuntimeConfiguration { + s.ServerProcesses = v + return s +} + +// Location in Amazon Simple Storage Service (Amazon S3) where build files can +// be stored for access by Amazon GameLift. This location is specified in a +// CreateBuild request. For more details, see the Create a Build with Files +// in Amazon S3 (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build). +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/S3Location +type S3Location struct { + _ struct{} `type:"structure"` + + // Amazon S3 bucket identifier. This is the name of your S3 bucket. + Bucket *string `min:"1" type:"string"` + + // Name of the zip file containing your build files. + Key *string `min:"1" type:"string"` + + // Amazon Resource Name (ARN (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) + // for the access role that allows Amazon GameLift to access your S3 bucket. + RoleArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s S3Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Location) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Location"} + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *S3Location) SetBucket(v string) *S3Location { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *S3Location) SetKey(v string) *S3Location { + s.Key = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *S3Location) SetRoleArn(v string) *S3Location { + s.RoleArn = &v + return s +} + +// Rule that controls how a fleet is scaled. Scaling policies are uniquely identified +// by the combination of name and fleet ID. +// +// Fleet-related operations include: +// +// * CreateFleet +// +// * ListFleets +// +// * Describe fleets: +// +// DescribeFleetAttributes +// +// DescribeFleetPortSettings +// +// DescribeFleetUtilization +// +// DescribeRuntimeConfiguration +// +// DescribeFleetEvents +// +// * Update fleets: +// +// UpdateFleetAttributes +// +// UpdateFleetCapacity +// +// UpdateFleetPortSettings +// +// UpdateRuntimeConfiguration +// +// * Manage fleet capacity: +// +// DescribeFleetCapacity +// +// UpdateFleetCapacity +// +// PutScalingPolicy (automatic scaling) +// +// DescribeScalingPolicies (automatic scaling) +// +// DeleteScalingPolicy (automatic scaling) +// +// DescribeEC2InstanceLimits +// +// * DeleteFleet +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ScalingPolicy +type ScalingPolicy struct { + _ struct{} `type:"structure"` + + // Comparison operator to use when measuring a metric against the threshold + // value. + ComparisonOperator *string `type:"string" enum:"ComparisonOperatorType"` + + // Length of time (in minutes) the metric must be at or beyond the threshold + // before a scaling event is triggered. + EvaluationPeriods *int64 `min:"1" type:"integer"` + + // Unique identifier for a fleet that is associated with this scaling policy. + FleetId *string `type:"string"` + + // Name of the Amazon GameLift-defined metric that is used to trigger an adjustment. + // + // * ActivatingGameSessions -- number of game sessions in the process of + // being created (game session status = ACTIVATING). + // + // * ActiveGameSessions -- number of game sessions currently running (game + // session status = ACTIVE). + // + // * CurrentPlayerSessions -- number of active or reserved player sessions + // (player session status = ACTIVE or RESERVED). + // + // * AvailablePlayerSessions -- number of player session slots currently + // available in active game sessions across the fleet, calculated by subtracting + // a game session's current player session count from its maximum player + // session count. This number does include game sessions that are not currently + // accepting players (game session PlayerSessionCreationPolicy = DENY_ALL). + // + // * ActiveInstances -- number of instances currently running a game session. + // + // * IdleInstances -- number of instances not currently running a game session. + MetricName *string `type:"string" enum:"MetricName"` + + // Descriptive label that is associated with a scaling policy. Policy names + // do not need to be unique. + Name *string `min:"1" type:"string"` + + // Amount of adjustment to make, based on the scaling adjustment type. + ScalingAdjustment *int64 `type:"integer"` + + // Type of adjustment to make to a fleet's instance count (see FleetCapacity): + // + // * ChangeInCapacity -- add (or subtract) the scaling adjustment value from + // the current instance count. Positive values scale up while negative values + // scale down. + // + // * ExactCapacity -- set the instance count to the scaling adjustment value. + // + // * PercentChangeInCapacity -- increase or reduce the current instance count + // by the scaling adjustment, read as a percentage. Positive values scale + // up while negative values scale down. + ScalingAdjustmentType *string `type:"string" enum:"ScalingAdjustmentType"` + + // Current status of the scaling policy. The scaling policy is only in force + // when in an ACTIVE status. + // + // * ACTIVE -- The scaling policy is currently in force. + // + // * UPDATE_REQUESTED -- A request to update the scaling policy has been + // received. + // + // * UPDATING -- A change is being made to the scaling policy. + // + // * DELETE_REQUESTED -- A request to delete the scaling policy has been + // received. + // + // * DELETING -- The scaling policy is being deleted. + // + // * DELETED -- The scaling policy has been deleted. + // + // * ERROR -- An error occurred in creating the policy. It should be removed + // and recreated. + Status *string `type:"string" enum:"ScalingStatusType"` + + // Metric value used to trigger a scaling event. + Threshold *float64 `type:"double"` +} + +// String returns the string representation +func (s ScalingPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingPolicy) GoString() string { + return s.String() +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *ScalingPolicy) SetComparisonOperator(v string) *ScalingPolicy { + s.ComparisonOperator = &v + return s +} + +// SetEvaluationPeriods sets the EvaluationPeriods field's value. +func (s *ScalingPolicy) SetEvaluationPeriods(v int64) *ScalingPolicy { + s.EvaluationPeriods = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *ScalingPolicy) SetFleetId(v string) *ScalingPolicy { + s.FleetId = &v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *ScalingPolicy) SetMetricName(v string) *ScalingPolicy { + s.MetricName = &v + return s +} + +// SetName sets the Name field's value. +func (s *ScalingPolicy) SetName(v string) *ScalingPolicy { + s.Name = &v + return s +} + +// SetScalingAdjustment sets the ScalingAdjustment field's value. +func (s *ScalingPolicy) SetScalingAdjustment(v int64) *ScalingPolicy { + s.ScalingAdjustment = &v + return s +} + +// SetScalingAdjustmentType sets the ScalingAdjustmentType field's value. +func (s *ScalingPolicy) SetScalingAdjustmentType(v string) *ScalingPolicy { + s.ScalingAdjustmentType = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ScalingPolicy) SetStatus(v string) *ScalingPolicy { + s.Status = &v + return s +} + +// SetThreshold sets the Threshold field's value. +func (s *ScalingPolicy) SetThreshold(v float64) *ScalingPolicy { + s.Threshold = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/SearchGameSessionsInput +type SearchGameSessionsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for an alias associated with the fleet to search for active + // game sessions. Each request must reference either a fleet ID or alias ID, + // but not both. + AliasId *string `type:"string"` + + // String containing the search criteria for the session search. If no filter + // expression is included, the request returns results for all game sessions + // in the fleet that are in ACTIVE status. + // + // A filter expression can contain one or multiple conditions. Each condition + // consists of the following: + // + // * Operand -- Name of a game session attribute. Valid values are gameSessionName, + // gameSessionId, creationTimeMillis, playerSessionCount, maximumSessions, + // hasAvailablePlayerSessions. + // + // * Comparator -- Valid comparators are: =, <>, <, >, <=, >=. + // + // * Value -- Value to be searched for. Values can be numbers, boolean values + // (true/false) or strings. String values are case sensitive, enclosed in + // single quotes. Special characters must be escaped. Boolean and string + // values can only be used with the comparators = and <>. For example, the + // following filter expression searches on gameSessionName: "FilterExpression": + // "gameSessionName = 'Matt\\'s Awesome Game 1'". + // + // To chain multiple conditions in a single expression, use the logical keywords + // AND, OR, and NOT and parentheses as needed. For example: x AND y AND NOT + // z, NOT (x OR y). + // + // Session search evaluates conditions from left to right using the following + // precedence rules: + // + // =, <>, <, >, <=, >= + // + // Parentheses + // + // NOT + // + // AND + // + // OR + // + // For example, this filter expression retrieves game sessions hosting at least + // ten players that have an open player slot: "maximumSessions>=10 AND hasAvailablePlayerSessions=true". + FilterExpression *string `min:"1" type:"string"` + + // Unique identifier for a fleet to search for active game sessions. Each request + // must reference either a fleet ID or alias ID, but not both. + FleetId *string `type:"string"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. The maximum number of results returned + // is 20, even if this value is not set or is set higher than 20. + Limit *int64 `min:"1" type:"integer"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this action. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Instructions on how to sort the search results. If no sort expression is + // included, the request returns results in random order. A sort expression + // consists of the following elements: + // + // * Operand -- Name of a game session attribute. Valid values are gameSessionName, + // gameSessionId, creationTimeMillis, playerSessionCount, maximumSessions, + // hasAvailablePlayerSessions. + // + // * Order -- Valid sort orders are ASC (ascending) and DESC (descending). + // + // For example, this sort expression returns the oldest active sessions first: + // "SortExpression": "creationTimeMillis ASC". Results with a null value for + // the sort operand are returned at the end of the list. + SortExpression *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SearchGameSessionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchGameSessionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SearchGameSessionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SearchGameSessionsInput"} + if s.FilterExpression != nil && len(*s.FilterExpression) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterExpression", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.SortExpression != nil && len(*s.SortExpression) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SortExpression", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasId sets the AliasId field's value. +func (s *SearchGameSessionsInput) SetAliasId(v string) *SearchGameSessionsInput { + s.AliasId = &v + return s +} + +// SetFilterExpression sets the FilterExpression field's value. +func (s *SearchGameSessionsInput) SetFilterExpression(v string) *SearchGameSessionsInput { + s.FilterExpression = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *SearchGameSessionsInput) SetFleetId(v string) *SearchGameSessionsInput { + s.FleetId = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *SearchGameSessionsInput) SetLimit(v int64) *SearchGameSessionsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *SearchGameSessionsInput) SetNextToken(v string) *SearchGameSessionsInput { + s.NextToken = &v + return s +} + +// SetSortExpression sets the SortExpression field's value. +func (s *SearchGameSessionsInput) SetSortExpression(v string) *SearchGameSessionsInput { + s.SortExpression = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/SearchGameSessionsOutput +type SearchGameSessionsOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects containing game session properties for each session + // matching the request. + GameSessions []*GameSession `type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this action. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SearchGameSessionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchGameSessionsOutput) GoString() string { + return s.String() +} + +// SetGameSessions sets the GameSessions field's value. +func (s *SearchGameSessionsOutput) SetGameSessions(v []*GameSession) *SearchGameSessionsOutput { + s.GameSessions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *SearchGameSessionsOutput) SetNextToken(v string) *SearchGameSessionsOutput { + s.NextToken = &v + return s +} + +// A set of instructions for launching server processes on each instance in +// a fleet. Each instruction set identifies the location of the server executable, +// optional launch parameters, and the number of server processes with this +// configuration to maintain concurrently on the instance. Server process configurations +// make up a fleet's RuntimeConfiguration. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ServerProcess +type ServerProcess struct { + _ struct{} `type:"structure"` + + // Number of server processes using this configuration to run concurrently on + // an instance. + // + // ConcurrentExecutions is a required field + ConcurrentExecutions *int64 `min:"1" type:"integer" required:"true"` + + // Location of the server executable in a game build. All game builds are installed + // on instances at the root : for Windows instances C:\game, and for Linux instances + // /local/game. A Windows game build with an executable file located at MyGame\latest\server.exe + // must have a launch path of "C:\game\MyGame\latest\server.exe". A Linux game + // build with an executable file located at MyGame/latest/server.exe must have + // a launch path of "/local/game/MyGame/latest/server.exe". + // + // LaunchPath is a required field + LaunchPath *string `min:"1" type:"string" required:"true"` + + // Optional list of parameters to pass to the server executable on launch. + Parameters *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ServerProcess) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerProcess) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerProcess) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerProcess"} + if s.ConcurrentExecutions == nil { + invalidParams.Add(request.NewErrParamRequired("ConcurrentExecutions")) + } + if s.ConcurrentExecutions != nil && *s.ConcurrentExecutions < 1 { + invalidParams.Add(request.NewErrParamMinValue("ConcurrentExecutions", 1)) + } + if s.LaunchPath == nil { + invalidParams.Add(request.NewErrParamRequired("LaunchPath")) + } + if s.LaunchPath != nil && len(*s.LaunchPath) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LaunchPath", 1)) + } + if s.Parameters != nil && len(*s.Parameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConcurrentExecutions sets the ConcurrentExecutions field's value. +func (s *ServerProcess) SetConcurrentExecutions(v int64) *ServerProcess { + s.ConcurrentExecutions = &v + return s +} + +// SetLaunchPath sets the LaunchPath field's value. +func (s *ServerProcess) SetLaunchPath(v string) *ServerProcess { + s.LaunchPath = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *ServerProcess) SetParameters(v string) *ServerProcess { + s.Parameters = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartGameSessionPlacementInput +type StartGameSessionPlacementInput struct { + _ struct{} `type:"structure"` + + // Set of information on each player to create a player session for. + DesiredPlayerSessions []*DesiredPlayerSession `type:"list"` + + // Set of developer-defined properties for a game session, formatted as a set + // of type:value pairs. These properties are included in the GameSession object, + // which is passed to the game server with a request to start a new game session + // (see Start a Game Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). + GameProperties []*GameProperty `type:"list"` + + // Set of developer-defined game session properties, formatted as a single string + // value. This data is included in the GameSession object, which is passed to + // the game server with a request to start a new game session (see Start a Game + // Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). + GameSessionData *string `min:"1" type:"string"` + + // Descriptive label that is associated with a game session. Session names do + // not need to be unique. + GameSessionName *string `min:"1" type:"string"` + + // Name of the queue to use to place the new game session. + // + // GameSessionQueueName is a required field + GameSessionQueueName *string `min:"1" type:"string" required:"true"` + + // Maximum number of players that can be connected simultaneously to the game + // session. + // + // MaximumPlayerSessionCount is a required field + MaximumPlayerSessionCount *int64 `type:"integer" required:"true"` + + // Unique identifier to assign to the new game session placement. This value + // is developer-defined. The value must be unique across all regions and cannot + // be reused unless you are resubmitting a canceled or timed-out placement request. + // + // PlacementId is a required field + PlacementId *string `min:"1" type:"string" required:"true"` + + // Set of values, expressed in milliseconds, indicating the amount of latency + // that a player experiences when connected to AWS regions. This information + // is used to try to place the new game session where it can offer the best + // possible gameplay experience for the players. + PlayerLatencies []*PlayerLatency `type:"list"` +} + +// String returns the string representation +func (s StartGameSessionPlacementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartGameSessionPlacementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartGameSessionPlacementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartGameSessionPlacementInput"} + if s.GameSessionData != nil && len(*s.GameSessionData) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameSessionData", 1)) + } + if s.GameSessionName != nil && len(*s.GameSessionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameSessionName", 1)) + } + if s.GameSessionQueueName == nil { + invalidParams.Add(request.NewErrParamRequired("GameSessionQueueName")) + } + if s.GameSessionQueueName != nil && len(*s.GameSessionQueueName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameSessionQueueName", 1)) + } + if s.MaximumPlayerSessionCount == nil { + invalidParams.Add(request.NewErrParamRequired("MaximumPlayerSessionCount")) + } + if s.PlacementId == nil { + invalidParams.Add(request.NewErrParamRequired("PlacementId")) + } + if s.PlacementId != nil && len(*s.PlacementId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlacementId", 1)) + } + if s.DesiredPlayerSessions != nil { + for i, v := range s.DesiredPlayerSessions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DesiredPlayerSessions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GameProperties != nil { + for i, v := range s.GameProperties { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GameProperties", i), err.(request.ErrInvalidParams)) + } + } + } + if s.PlayerLatencies != nil { + for i, v := range s.PlayerLatencies { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PlayerLatencies", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDesiredPlayerSessions sets the DesiredPlayerSessions field's value. +func (s *StartGameSessionPlacementInput) SetDesiredPlayerSessions(v []*DesiredPlayerSession) *StartGameSessionPlacementInput { + s.DesiredPlayerSessions = v + return s +} + +// SetGameProperties sets the GameProperties field's value. +func (s *StartGameSessionPlacementInput) SetGameProperties(v []*GameProperty) *StartGameSessionPlacementInput { + s.GameProperties = v + return s +} + +// SetGameSessionData sets the GameSessionData field's value. +func (s *StartGameSessionPlacementInput) SetGameSessionData(v string) *StartGameSessionPlacementInput { + s.GameSessionData = &v + return s +} + +// SetGameSessionName sets the GameSessionName field's value. +func (s *StartGameSessionPlacementInput) SetGameSessionName(v string) *StartGameSessionPlacementInput { + s.GameSessionName = &v + return s +} + +// SetGameSessionQueueName sets the GameSessionQueueName field's value. +func (s *StartGameSessionPlacementInput) SetGameSessionQueueName(v string) *StartGameSessionPlacementInput { + s.GameSessionQueueName = &v + return s +} + +// SetMaximumPlayerSessionCount sets the MaximumPlayerSessionCount field's value. +func (s *StartGameSessionPlacementInput) SetMaximumPlayerSessionCount(v int64) *StartGameSessionPlacementInput { + s.MaximumPlayerSessionCount = &v + return s +} + +// SetPlacementId sets the PlacementId field's value. +func (s *StartGameSessionPlacementInput) SetPlacementId(v string) *StartGameSessionPlacementInput { + s.PlacementId = &v + return s +} + +// SetPlayerLatencies sets the PlayerLatencies field's value. +func (s *StartGameSessionPlacementInput) SetPlayerLatencies(v []*PlayerLatency) *StartGameSessionPlacementInput { + s.PlayerLatencies = v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartGameSessionPlacementOutput +type StartGameSessionPlacementOutput struct { + _ struct{} `type:"structure"` + + // Object that describes the newly created game session placement. This object + // includes all the information provided in the request, as well as start/end + // time stamps and placement status. + GameSessionPlacement *GameSessionPlacement `type:"structure"` +} + +// String returns the string representation +func (s StartGameSessionPlacementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartGameSessionPlacementOutput) GoString() string { + return s.String() +} + +// SetGameSessionPlacement sets the GameSessionPlacement field's value. +func (s *StartGameSessionPlacementOutput) SetGameSessionPlacement(v *GameSessionPlacement) *StartGameSessionPlacementOutput { + s.GameSessionPlacement = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartMatchmakingInput +type StartMatchmakingInput struct { + _ struct{} `type:"structure"` + + // Name of the matchmaking configuration to use for this request. Matchmaking + // configurations must exist in the same region as this request. + // + // ConfigurationName is a required field + ConfigurationName *string `min:"1" type:"string" required:"true"` + + // Information on each player to be matched. This information must include a + // player ID, and may contain player attributes and latency data to be used + // in the matchmaking process. After a successful match, Player objects contain + // the name of the team the player is assigned to. + // + // Players is a required field + Players []*Player `type:"list" required:"true"` + + // Unique identifier for a matchmaking ticket. Use this identifier to track + // the matchmaking ticket status and retrieve match results. + TicketId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s StartMatchmakingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartMatchmakingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartMatchmakingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartMatchmakingInput"} + if s.ConfigurationName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationName")) + } + if s.ConfigurationName != nil && len(*s.ConfigurationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigurationName", 1)) + } + if s.Players == nil { + invalidParams.Add(request.NewErrParamRequired("Players")) + } + if s.TicketId != nil && len(*s.TicketId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TicketId", 1)) + } + if s.Players != nil { + for i, v := range s.Players { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Players", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationName sets the ConfigurationName field's value. +func (s *StartMatchmakingInput) SetConfigurationName(v string) *StartMatchmakingInput { + s.ConfigurationName = &v + return s +} + +// SetPlayers sets the Players field's value. +func (s *StartMatchmakingInput) SetPlayers(v []*Player) *StartMatchmakingInput { + s.Players = v + return s +} + +// SetTicketId sets the TicketId field's value. +func (s *StartMatchmakingInput) SetTicketId(v string) *StartMatchmakingInput { + s.TicketId = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartMatchmakingOutput +type StartMatchmakingOutput struct { + _ struct{} `type:"structure"` + + // Ticket representing the matchmaking request. This object include the information + // included in the request, ticket status, and match results as generated during + // the matchmaking process. + MatchmakingTicket *MatchmakingTicket `type:"structure"` +} + +// String returns the string representation +func (s StartMatchmakingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartMatchmakingOutput) GoString() string { + return s.String() +} + +// SetMatchmakingTicket sets the MatchmakingTicket field's value. +func (s *StartMatchmakingOutput) SetMatchmakingTicket(v *MatchmakingTicket) *StartMatchmakingOutput { + s.MatchmakingTicket = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopGameSessionPlacementInput +type StopGameSessionPlacementInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a game session placement to cancel. + // + // PlacementId is a required field + PlacementId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopGameSessionPlacementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopGameSessionPlacementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopGameSessionPlacementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopGameSessionPlacementInput"} + if s.PlacementId == nil { + invalidParams.Add(request.NewErrParamRequired("PlacementId")) + } + if s.PlacementId != nil && len(*s.PlacementId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlacementId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPlacementId sets the PlacementId field's value. +func (s *StopGameSessionPlacementInput) SetPlacementId(v string) *StopGameSessionPlacementInput { + s.PlacementId = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopGameSessionPlacementOutput +type StopGameSessionPlacementOutput struct { + _ struct{} `type:"structure"` + + // Object that describes the canceled game session placement, with CANCELLED + // status and an end time stamp. + GameSessionPlacement *GameSessionPlacement `type:"structure"` +} + +// String returns the string representation +func (s StopGameSessionPlacementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopGameSessionPlacementOutput) GoString() string { + return s.String() +} + +// SetGameSessionPlacement sets the GameSessionPlacement field's value. +func (s *StopGameSessionPlacementOutput) SetGameSessionPlacement(v *GameSessionPlacement) *StopGameSessionPlacementOutput { + s.GameSessionPlacement = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopMatchmakingInput +type StopMatchmakingInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a matchmaking ticket. + // + // TicketId is a required field + TicketId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopMatchmakingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopMatchmakingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopMatchmakingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopMatchmakingInput"} + if s.TicketId == nil { + invalidParams.Add(request.NewErrParamRequired("TicketId")) + } + if s.TicketId != nil && len(*s.TicketId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TicketId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTicketId sets the TicketId field's value. +func (s *StopMatchmakingInput) SetTicketId(v string) *StopMatchmakingInput { + s.TicketId = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopMatchmakingOutput +type StopMatchmakingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopMatchmakingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopMatchmakingOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateAliasInput +type UpdateAliasInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet alias. Specify the alias you want to update. + // + // AliasId is a required field + AliasId *string `type:"string" required:"true"` + + // Human-readable description of an alias. + Description *string `min:"1" type:"string"` + + // Descriptive label that is associated with an alias. Alias names do not need + // to be unique. + Name *string `min:"1" type:"string"` + + // Object that specifies the fleet and routing type to use for the alias. + RoutingStrategy *RoutingStrategy `type:"structure"` +} + +// String returns the string representation +func (s UpdateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAliasInput"} + if s.AliasId == nil { + invalidParams.Add(request.NewErrParamRequired("AliasId")) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasId sets the AliasId field's value. +func (s *UpdateAliasInput) SetAliasId(v string) *UpdateAliasInput { + s.AliasId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateAliasInput) SetDescription(v string) *UpdateAliasInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateAliasInput) SetName(v string) *UpdateAliasInput { + s.Name = &v + return s +} + +// SetRoutingStrategy sets the RoutingStrategy field's value. +func (s *UpdateAliasInput) SetRoutingStrategy(v *RoutingStrategy) *UpdateAliasInput { + s.RoutingStrategy = v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateAliasOutput +type UpdateAliasOutput struct { + _ struct{} `type:"structure"` + + // Object that contains the updated alias configuration. + Alias *Alias `type:"structure"` +} + +// String returns the string representation +func (s UpdateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAliasOutput) GoString() string { + return s.String() +} + +// SetAlias sets the Alias field's value. +func (s *UpdateAliasOutput) SetAlias(v *Alias) *UpdateAliasOutput { + s.Alias = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateBuildInput +type UpdateBuildInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a build to update. + // + // BuildId is a required field + BuildId *string `type:"string" required:"true"` + + // Descriptive label that is associated with a build. Build names do not need + // to be unique. + Name *string `min:"1" type:"string"` + + // Version that is associated with this build. Version strings do not need to + // be unique. + Version *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateBuildInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBuildInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBuildInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBuildInput"} + if s.BuildId == nil { + invalidParams.Add(request.NewErrParamRequired("BuildId")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBuildId sets the BuildId field's value. +func (s *UpdateBuildInput) SetBuildId(v string) *UpdateBuildInput { + s.BuildId = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateBuildInput) SetName(v string) *UpdateBuildInput { + s.Name = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *UpdateBuildInput) SetVersion(v string) *UpdateBuildInput { + s.Version = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateBuildOutput +type UpdateBuildOutput struct { + _ struct{} `type:"structure"` + + // Object that contains the updated build record. + Build *Build `type:"structure"` +} + +// String returns the string representation +func (s UpdateBuildOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBuildOutput) GoString() string { + return s.String() +} + +// SetBuild sets the Build field's value. +func (s *UpdateBuildOutput) SetBuild(v *Build) *UpdateBuildOutput { + s.Build = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetAttributesInput +type UpdateFleetAttributesInput struct { + _ struct{} `type:"structure"` + + // Human-readable description of a fleet. + Description *string `min:"1" type:"string"` + + // Unique identifier for a fleet to update attribute metadata for. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // Names of metric groups to include this fleet in. Amazon CloudWatch uses a + // fleet metric group is to aggregate metrics from multiple fleets. Use an existing + // metric group name to add this fleet to the group. Or use a new name to create + // a new metric group. A fleet can only be included in one metric group at a + // time. + MetricGroups []*string `type:"list"` + + // Descriptive label that is associated with a fleet. Fleet names do not need + // to be unique. + Name *string `min:"1" type:"string"` + + // Game session protection policy to apply to all new instances created in this + // fleet. Instances that already exist are not affected. You can set protection + // for individual instances using UpdateGameSession. + // + // * NoProtection -- The game session can be terminated during a scale-down + // event. + // + // * FullProtection -- If the game session is in an ACTIVE status, it cannot + // be terminated during a scale-down event. + NewGameSessionProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` + + // Policy that limits the number of game sessions an individual player can create + // over a span of time. + ResourceCreationLimitPolicy *ResourceCreationLimitPolicy `type:"structure"` +} + +// String returns the string representation +func (s UpdateFleetAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateFleetAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateFleetAttributesInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *UpdateFleetAttributesInput) SetDescription(v string) *UpdateFleetAttributesInput { + s.Description = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *UpdateFleetAttributesInput) SetFleetId(v string) *UpdateFleetAttributesInput { + s.FleetId = &v + return s +} + +// SetMetricGroups sets the MetricGroups field's value. +func (s *UpdateFleetAttributesInput) SetMetricGroups(v []*string) *UpdateFleetAttributesInput { + s.MetricGroups = v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateFleetAttributesInput) SetName(v string) *UpdateFleetAttributesInput { + s.Name = &v + return s +} + +// SetNewGameSessionProtectionPolicy sets the NewGameSessionProtectionPolicy field's value. +func (s *UpdateFleetAttributesInput) SetNewGameSessionProtectionPolicy(v string) *UpdateFleetAttributesInput { + s.NewGameSessionProtectionPolicy = &v + return s +} + +// SetResourceCreationLimitPolicy sets the ResourceCreationLimitPolicy field's value. +func (s *UpdateFleetAttributesInput) SetResourceCreationLimitPolicy(v *ResourceCreationLimitPolicy) *UpdateFleetAttributesInput { + s.ResourceCreationLimitPolicy = v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetAttributesOutput +type UpdateFleetAttributesOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet that was updated. + FleetId *string `type:"string"` +} + +// String returns the string representation +func (s UpdateFleetAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetAttributesOutput) GoString() string { + return s.String() +} + +// SetFleetId sets the FleetId field's value. +func (s *UpdateFleetAttributesOutput) SetFleetId(v string) *UpdateFleetAttributesOutput { + s.FleetId = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetCapacityInput +type UpdateFleetCapacityInput struct { + _ struct{} `type:"structure"` + + // Number of EC2 instances you want this fleet to host. + DesiredInstances *int64 `type:"integer"` + + // Unique identifier for a fleet to update capacity for. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // Maximum value allowed for the fleet's instance count. Default if not set + // is 1. + MaxSize *int64 `type:"integer"` + + // Minimum value allowed for the fleet's instance count. Default if not set + // is 0. + MinSize *int64 `type:"integer"` +} + +// String returns the string representation +func (s UpdateFleetCapacityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetCapacityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateFleetCapacityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateFleetCapacityInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDesiredInstances sets the DesiredInstances field's value. +func (s *UpdateFleetCapacityInput) SetDesiredInstances(v int64) *UpdateFleetCapacityInput { + s.DesiredInstances = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *UpdateFleetCapacityInput) SetFleetId(v string) *UpdateFleetCapacityInput { + s.FleetId = &v + return s +} + +// SetMaxSize sets the MaxSize field's value. +func (s *UpdateFleetCapacityInput) SetMaxSize(v int64) *UpdateFleetCapacityInput { + s.MaxSize = &v + return s +} + +// SetMinSize sets the MinSize field's value. +func (s *UpdateFleetCapacityInput) SetMinSize(v int64) *UpdateFleetCapacityInput { + s.MinSize = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetCapacityOutput +type UpdateFleetCapacityOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet that was updated. + FleetId *string `type:"string"` +} + +// String returns the string representation +func (s UpdateFleetCapacityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetCapacityOutput) GoString() string { + return s.String() +} + +// SetFleetId sets the FleetId field's value. +func (s *UpdateFleetCapacityOutput) SetFleetId(v string) *UpdateFleetCapacityOutput { + s.FleetId = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetPortSettingsInput +type UpdateFleetPortSettingsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet to update port settings for. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // Collection of port settings to be added to the fleet record. + InboundPermissionAuthorizations []*IpPermission `type:"list"` + + // Collection of port settings to be removed from the fleet record. + InboundPermissionRevocations []*IpPermission `type:"list"` +} + +// String returns the string representation +func (s UpdateFleetPortSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetPortSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateFleetPortSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateFleetPortSettingsInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.InboundPermissionAuthorizations != nil { + for i, v := range s.InboundPermissionAuthorizations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InboundPermissionAuthorizations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.InboundPermissionRevocations != nil { + for i, v := range s.InboundPermissionRevocations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InboundPermissionRevocations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetId sets the FleetId field's value. +func (s *UpdateFleetPortSettingsInput) SetFleetId(v string) *UpdateFleetPortSettingsInput { + s.FleetId = &v + return s +} + +// SetInboundPermissionAuthorizations sets the InboundPermissionAuthorizations field's value. +func (s *UpdateFleetPortSettingsInput) SetInboundPermissionAuthorizations(v []*IpPermission) *UpdateFleetPortSettingsInput { + s.InboundPermissionAuthorizations = v + return s +} + +// SetInboundPermissionRevocations sets the InboundPermissionRevocations field's value. +func (s *UpdateFleetPortSettingsInput) SetInboundPermissionRevocations(v []*IpPermission) *UpdateFleetPortSettingsInput { + s.InboundPermissionRevocations = v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetPortSettingsOutput +type UpdateFleetPortSettingsOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet that was updated. + FleetId *string `type:"string"` +} + +// String returns the string representation +func (s UpdateFleetPortSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetPortSettingsOutput) GoString() string { + return s.String() +} + +// SetFleetId sets the FleetId field's value. +func (s *UpdateFleetPortSettingsOutput) SetFleetId(v string) *UpdateFleetPortSettingsOutput { + s.FleetId = &v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSessionInput +type UpdateGameSessionInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the game session to update. + // + // GameSessionId is a required field + GameSessionId *string `min:"1" type:"string" required:"true"` + + // Maximum number of players that can be connected simultaneously to the game + // session. + MaximumPlayerSessionCount *int64 `type:"integer"` + + // Descriptive label that is associated with a game session. Session names do + // not need to be unique. + Name *string `min:"1" type:"string"` + + // Policy determining whether or not the game session accepts new players. + PlayerSessionCreationPolicy *string `type:"string" enum:"PlayerSessionCreationPolicy"` + + // Game session protection policy to apply to this game session only. + // + // * NoProtection -- The game session can be terminated during a scale-down + // event. + // + // * FullProtection -- If the game session is in an ACTIVE status, it cannot + // be terminated during a scale-down event. + ProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` +} + +// String returns the string representation +func (s UpdateGameSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGameSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGameSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGameSessionInput"} + if s.GameSessionId == nil { + invalidParams.Add(request.NewErrParamRequired("GameSessionId")) + } + if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameSessionId sets the GameSessionId field's value. +func (s *UpdateGameSessionInput) SetGameSessionId(v string) *UpdateGameSessionInput { + s.GameSessionId = &v + return s +} + +// SetMaximumPlayerSessionCount sets the MaximumPlayerSessionCount field's value. +func (s *UpdateGameSessionInput) SetMaximumPlayerSessionCount(v int64) *UpdateGameSessionInput { + s.MaximumPlayerSessionCount = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateGameSessionInput) SetName(v string) *UpdateGameSessionInput { + s.Name = &v + return s +} + +// SetPlayerSessionCreationPolicy sets the PlayerSessionCreationPolicy field's value. +func (s *UpdateGameSessionInput) SetPlayerSessionCreationPolicy(v string) *UpdateGameSessionInput { + s.PlayerSessionCreationPolicy = &v + return s +} + +// SetProtectionPolicy sets the ProtectionPolicy field's value. +func (s *UpdateGameSessionInput) SetProtectionPolicy(v string) *UpdateGameSessionInput { + s.ProtectionPolicy = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSessionOutput +type UpdateGameSessionOutput struct { + _ struct{} `type:"structure"` + + // Object that contains the updated game session metadata. + GameSession *GameSession `type:"structure"` +} + +// String returns the string representation +func (s UpdateGameSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGameSessionOutput) GoString() string { + return s.String() +} + +// SetGameSession sets the GameSession field's value. +func (s *UpdateGameSessionOutput) SetGameSession(v *GameSession) *UpdateGameSessionOutput { + s.GameSession = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSessionQueueInput +type UpdateGameSessionQueueInput struct { + _ struct{} `type:"structure"` + + // List of fleets that can be used to fulfill game session placement requests + // in the queue. Fleets are identified by either a fleet ARN or a fleet alias + // ARN. Destinations are listed in default preference order. When updating this + // list, provide a complete list of destinations. + Destinations []*GameSessionQueueDestination `type:"list"` + + // Descriptive label that is associated with game session queue. Queue names + // must be unique within each region. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Collection of latency policies to apply when processing game sessions placement + // requests with player latency information. Multiple policies are evaluated + // in order of the maximum latency value, starting with the lowest latency values. + // With just one policy, it is enforced at the start of the game session placement + // for the duration period. With multiple policies, each policy is enforced + // consecutively for its duration period. For example, a queue might enforce + // a 60-second policy followed by a 120-second policy, and then no policy for + // the remainder of the placement. When updating policies, provide a complete + // collection of policies. + PlayerLatencyPolicies []*PlayerLatencyPolicy `type:"list"` + + // Maximum time, in seconds, that a new game session placement request remains + // in the queue. When a request exceeds this time, the game session placement + // changes to a TIMED_OUT status. + TimeoutInSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s UpdateGameSessionQueueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGameSessionQueueInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGameSessionQueueInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGameSessionQueueInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Destinations != nil { + for i, v := range s.Destinations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Destinations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinations sets the Destinations field's value. +func (s *UpdateGameSessionQueueInput) SetDestinations(v []*GameSessionQueueDestination) *UpdateGameSessionQueueInput { + s.Destinations = v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateGameSessionQueueInput) SetName(v string) *UpdateGameSessionQueueInput { + s.Name = &v + return s +} + +// SetPlayerLatencyPolicies sets the PlayerLatencyPolicies field's value. +func (s *UpdateGameSessionQueueInput) SetPlayerLatencyPolicies(v []*PlayerLatencyPolicy) *UpdateGameSessionQueueInput { + s.PlayerLatencyPolicies = v + return s +} + +// SetTimeoutInSeconds sets the TimeoutInSeconds field's value. +func (s *UpdateGameSessionQueueInput) SetTimeoutInSeconds(v int64) *UpdateGameSessionQueueInput { + s.TimeoutInSeconds = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSessionQueueOutput +type UpdateGameSessionQueueOutput struct { + _ struct{} `type:"structure"` + + // Object that describes the newly updated game session queue. + GameSessionQueue *GameSessionQueue `type:"structure"` +} + +// String returns the string representation +func (s UpdateGameSessionQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGameSessionQueueOutput) GoString() string { + return s.String() +} + +// SetGameSessionQueue sets the GameSessionQueue field's value. +func (s *UpdateGameSessionQueueOutput) SetGameSessionQueue(v *GameSessionQueue) *UpdateGameSessionQueueOutput { + s.GameSessionQueue = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateMatchmakingConfigurationInput +type UpdateMatchmakingConfigurationInput struct { + _ struct{} `type:"structure"` + + // Flag that determines whether or not a match that was created with this configuration + // must be accepted by the matched players. To require acceptance, set to TRUE. + AcceptanceRequired *bool `type:"boolean"` + + // Length of time (in seconds) to wait for players to accept a proposed match. + // If any player rejects the match or fails to accept before the timeout, the + // ticket continues to look for an acceptable match. + AcceptanceTimeoutSeconds *int64 `min:"1" type:"integer"` + + // Number of player slots in a match to keep open for future players. For example, + // if the configuration's rule set specifies a match for a single 12-person + // team, and the additional player count is set to 2, only 10 players are selected + // for the match. + AdditionalPlayerCount *int64 `type:"integer"` + + // Information to attached to all events related to the matchmaking configuration. + CustomEventData *string `type:"string"` + + // Descriptive label that is associated with matchmaking configuration. + Description *string `min:"1" type:"string"` + + // Set of developer-defined properties for a game session, formatted as a set + // of type:value pairs. These properties are included in the GameSession object, + // which is passed to the game server with a request to start a new game session + // (see Start a Game Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). + // This information is added to the new GameSession object that is created for + // a successful match. + GameProperties []*GameProperty `type:"list"` + + // Set of developer-defined game session properties, formatted as a single string + // value. This data is included in the GameSession object, which is passed to + // the game server with a request to start a new game session (see Start a Game + // Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). + // This information is added to the new GameSession object that is created for + // a successful match. + GameSessionData *string `min:"1" type:"string"` + + // Amazon Resource Name (ARN (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) + // that is assigned to a game session queue and uniquely identifies it. Format + // is arn:aws:gamelift:::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. + // These queues are used when placing game sessions for matches that are created + // with this matchmaking configuration. Queues can be located in any region. + GameSessionQueueArns []*string `type:"list"` + + // Unique identifier for a matchmaking configuration to update. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // SNS topic ARN that is set up to receive matchmaking notifications. See Setting + // up Notifications for Matchmaking (http://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) + // for more information. + NotificationTarget *string `type:"string"` + + // Maximum duration, in seconds, that a matchmaking ticket can remain in process + // before timing out. Requests that time out can be resubmitted as needed. + RequestTimeoutSeconds *int64 `min:"1" type:"integer"` + + // Unique identifier for a matchmaking rule set to use with this configuration. + // A matchmaking configuration can only use rule sets that are defined in the + // same region. + RuleSetName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateMatchmakingConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMatchmakingConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateMatchmakingConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateMatchmakingConfigurationInput"} + if s.AcceptanceTimeoutSeconds != nil && *s.AcceptanceTimeoutSeconds < 1 { + invalidParams.Add(request.NewErrParamMinValue("AcceptanceTimeoutSeconds", 1)) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.GameSessionData != nil && len(*s.GameSessionData) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameSessionData", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RequestTimeoutSeconds != nil && *s.RequestTimeoutSeconds < 1 { + invalidParams.Add(request.NewErrParamMinValue("RequestTimeoutSeconds", 1)) + } + if s.RuleSetName != nil && len(*s.RuleSetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleSetName", 1)) + } + if s.GameProperties != nil { + for i, v := range s.GameProperties { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GameProperties", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptanceRequired sets the AcceptanceRequired field's value. +func (s *UpdateMatchmakingConfigurationInput) SetAcceptanceRequired(v bool) *UpdateMatchmakingConfigurationInput { + s.AcceptanceRequired = &v + return s +} + +// SetAcceptanceTimeoutSeconds sets the AcceptanceTimeoutSeconds field's value. +func (s *UpdateMatchmakingConfigurationInput) SetAcceptanceTimeoutSeconds(v int64) *UpdateMatchmakingConfigurationInput { + s.AcceptanceTimeoutSeconds = &v + return s +} + +// SetAdditionalPlayerCount sets the AdditionalPlayerCount field's value. +func (s *UpdateMatchmakingConfigurationInput) SetAdditionalPlayerCount(v int64) *UpdateMatchmakingConfigurationInput { + s.AdditionalPlayerCount = &v + return s +} + +// SetCustomEventData sets the CustomEventData field's value. +func (s *UpdateMatchmakingConfigurationInput) SetCustomEventData(v string) *UpdateMatchmakingConfigurationInput { + s.CustomEventData = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateMatchmakingConfigurationInput) SetDescription(v string) *UpdateMatchmakingConfigurationInput { + s.Description = &v + return s +} + +// SetGameProperties sets the GameProperties field's value. +func (s *UpdateMatchmakingConfigurationInput) SetGameProperties(v []*GameProperty) *UpdateMatchmakingConfigurationInput { + s.GameProperties = v + return s +} + +// SetGameSessionData sets the GameSessionData field's value. +func (s *UpdateMatchmakingConfigurationInput) SetGameSessionData(v string) *UpdateMatchmakingConfigurationInput { + s.GameSessionData = &v + return s +} + +// SetGameSessionQueueArns sets the GameSessionQueueArns field's value. +func (s *UpdateMatchmakingConfigurationInput) SetGameSessionQueueArns(v []*string) *UpdateMatchmakingConfigurationInput { + s.GameSessionQueueArns = v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateMatchmakingConfigurationInput) SetName(v string) *UpdateMatchmakingConfigurationInput { + s.Name = &v + return s +} + +// SetNotificationTarget sets the NotificationTarget field's value. +func (s *UpdateMatchmakingConfigurationInput) SetNotificationTarget(v string) *UpdateMatchmakingConfigurationInput { + s.NotificationTarget = &v + return s +} + +// SetRequestTimeoutSeconds sets the RequestTimeoutSeconds field's value. +func (s *UpdateMatchmakingConfigurationInput) SetRequestTimeoutSeconds(v int64) *UpdateMatchmakingConfigurationInput { + s.RequestTimeoutSeconds = &v + return s +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *UpdateMatchmakingConfigurationInput) SetRuleSetName(v string) *UpdateMatchmakingConfigurationInput { + s.RuleSetName = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateMatchmakingConfigurationOutput +type UpdateMatchmakingConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Object that describes the updated matchmaking configuration. + Configuration *MatchmakingConfiguration `type:"structure"` +} + +// String returns the string representation +func (s UpdateMatchmakingConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMatchmakingConfigurationOutput) GoString() string { + return s.String() +} + +// SetConfiguration sets the Configuration field's value. +func (s *UpdateMatchmakingConfigurationOutput) SetConfiguration(v *MatchmakingConfiguration) *UpdateMatchmakingConfigurationOutput { + s.Configuration = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateRuntimeConfigurationInput +type UpdateRuntimeConfigurationInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet to update run-time configuration for. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // Instructions for launching server processes on each instance in the fleet. + // The run-time configuration for a fleet has a collection of server process + // configurations, one for each type of server process to run on an instance. + // A server process configuration specifies the location of the server executable, + // launch parameters, and the number of concurrent processes with that configuration + // to maintain on each instance. + // + // RuntimeConfiguration is a required field + RuntimeConfiguration *RuntimeConfiguration `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateRuntimeConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRuntimeConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRuntimeConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRuntimeConfigurationInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.RuntimeConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RuntimeConfiguration")) + } + if s.RuntimeConfiguration != nil { + if err := s.RuntimeConfiguration.Validate(); err != nil { + invalidParams.AddNested("RuntimeConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetId sets the FleetId field's value. +func (s *UpdateRuntimeConfigurationInput) SetFleetId(v string) *UpdateRuntimeConfigurationInput { + s.FleetId = &v + return s +} + +// SetRuntimeConfiguration sets the RuntimeConfiguration field's value. +func (s *UpdateRuntimeConfigurationInput) SetRuntimeConfiguration(v *RuntimeConfiguration) *UpdateRuntimeConfigurationInput { + s.RuntimeConfiguration = v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateRuntimeConfigurationOutput +type UpdateRuntimeConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The run-time configuration currently in force. If the update was successful, + // this object matches the one in the request. + RuntimeConfiguration *RuntimeConfiguration `type:"structure"` +} + +// String returns the string representation +func (s UpdateRuntimeConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRuntimeConfigurationOutput) GoString() string { + return s.String() +} + +// SetRuntimeConfiguration sets the RuntimeConfiguration field's value. +func (s *UpdateRuntimeConfigurationOutput) SetRuntimeConfiguration(v *RuntimeConfiguration) *UpdateRuntimeConfigurationOutput { + s.RuntimeConfiguration = v + return s +} + +// Represents the input for a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ValidateMatchmakingRuleSetInput +type ValidateMatchmakingRuleSetInput struct { + _ struct{} `type:"structure"` + + // Collection of matchmaking rules to validate, formatted as a JSON string. + // + // RuleSetBody is a required field + RuleSetBody *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ValidateMatchmakingRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateMatchmakingRuleSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ValidateMatchmakingRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ValidateMatchmakingRuleSetInput"} + if s.RuleSetBody == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetBody")) + } + if s.RuleSetBody != nil && len(*s.RuleSetBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleSetBody", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRuleSetBody sets the RuleSetBody field's value. +func (s *ValidateMatchmakingRuleSetInput) SetRuleSetBody(v string) *ValidateMatchmakingRuleSetInput { + s.RuleSetBody = &v + return s +} + +// Represents the returned data in response to a request action. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ValidateMatchmakingRuleSetOutput +type ValidateMatchmakingRuleSetOutput struct { + _ struct{} `type:"structure"` + + // Response indicating whether or not the rule set is valid. + Valid *bool `type:"boolean"` +} + +// String returns the string representation +func (s ValidateMatchmakingRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateMatchmakingRuleSetOutput) GoString() string { + return s.String() +} + +// SetValid sets the Valid field's value. +func (s *ValidateMatchmakingRuleSetOutput) SetValid(v bool) *ValidateMatchmakingRuleSetOutput { + s.Valid = &v + return s +} + +// Represents an authorization for a VPC peering connection between the VPC +// for an Amazon GameLift fleet and another VPC on an account you have access +// to. This authorization must exist and be valid for the peering connection +// to be established. Authorizations are valid for 24 hours after they are issued. +// +// VPC peering connection operations include: +// +// * CreateVpcPeeringAuthorization +// +// * DescribeVpcPeeringAuthorizations +// +// * DeleteVpcPeeringAuthorization +// +// * CreateVpcPeeringConnection +// +// * DescribeVpcPeeringConnections +// +// * DeleteVpcPeeringConnection +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/VpcPeeringAuthorization +type VpcPeeringAuthorization struct { + _ struct{} `type:"structure"` + + // Time stamp indicating when this authorization was issued. Format is a number + // expressed in Unix time as milliseconds (for example "1469498468.057"). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Time stamp indicating when this authorization expires (24 hours after issuance). + // Format is a number expressed in Unix time as milliseconds (for example "1469498468.057"). + ExpirationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Unique identifier for the AWS account that you use to manage your Amazon + // GameLift fleet. You can find your Account ID in the AWS Management Console + // under account settings. + GameLiftAwsAccountId *string `min:"1" type:"string"` + + PeerVpcAwsAccountId *string `min:"1" type:"string"` + + // Unique identifier for a VPC with resources to be accessed by your Amazon + // GameLift fleet. The VPC must be in the same region where your fleet is deployed. + // To get VPC information, including IDs, use the Virtual Private Cloud service + // tools, including the VPC Dashboard in the AWS Management Console. + PeerVpcId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringAuthorization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringAuthorization) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *VpcPeeringAuthorization) SetCreationTime(v time.Time) *VpcPeeringAuthorization { + s.CreationTime = &v + return s +} + +// SetExpirationTime sets the ExpirationTime field's value. +func (s *VpcPeeringAuthorization) SetExpirationTime(v time.Time) *VpcPeeringAuthorization { + s.ExpirationTime = &v + return s +} + +// SetGameLiftAwsAccountId sets the GameLiftAwsAccountId field's value. +func (s *VpcPeeringAuthorization) SetGameLiftAwsAccountId(v string) *VpcPeeringAuthorization { + s.GameLiftAwsAccountId = &v + return s +} + +// SetPeerVpcAwsAccountId sets the PeerVpcAwsAccountId field's value. +func (s *VpcPeeringAuthorization) SetPeerVpcAwsAccountId(v string) *VpcPeeringAuthorization { + s.PeerVpcAwsAccountId = &v + return s +} + +// SetPeerVpcId sets the PeerVpcId field's value. +func (s *VpcPeeringAuthorization) SetPeerVpcId(v string) *VpcPeeringAuthorization { + s.PeerVpcId = &v + return s +} + +// Represents a peering connection between a VPC on one of your AWS accounts +// and the VPC for your Amazon GameLift fleets. This record may be for an active +// peering connection or a pending connection that has not yet been established. +// +// VPC peering connection operations include: +// +// * CreateVpcPeeringAuthorization +// +// * DescribeVpcPeeringAuthorizations +// +// * DeleteVpcPeeringAuthorization +// +// * CreateVpcPeeringConnection +// +// * DescribeVpcPeeringConnections +// +// * DeleteVpcPeeringConnection +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/VpcPeeringConnection +type VpcPeeringConnection struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet. This ID determines the ID of the Amazon GameLift + // VPC for your fleet. + FleetId *string `type:"string"` + + // Unique identifier for the VPC that contains the Amazon GameLift fleet for + // this connection. This VPC is managed by Amazon GameLift and does not appear + // in your AWS account. + GameLiftVpcId *string `min:"1" type:"string"` + + // CIDR block of IPv4 addresses assigned to the VPC peering connection for the + // GameLift VPC. The peered VPC also has an IPv4 CIDR block associated with + // it; these blocks cannot overlap or the peering connection cannot be created. + IpV4CidrBlock *string `min:"1" type:"string"` + + // Unique identifier for a VPC with resources to be accessed by your Amazon + // GameLift fleet. The VPC must be in the same region where your fleet is deployed. + // To get VPC information, including IDs, use the Virtual Private Cloud service + // tools, including the VPC Dashboard in the AWS Management Console. + PeerVpcId *string `min:"1" type:"string"` + + // Object that contains status information about the connection. Status indicates + // if a connection is pending, successful, or failed. + Status *VpcPeeringConnectionStatus `type:"structure"` + + // Unique identifier that is automatically assigned to the connection record. + // This ID is referenced in VPC peering connection events, and is used when + // deleting a connection with DeleteVpcPeeringConnection. + VpcPeeringConnectionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnection) GoString() string { + return s.String() +} + +// SetFleetId sets the FleetId field's value. +func (s *VpcPeeringConnection) SetFleetId(v string) *VpcPeeringConnection { + s.FleetId = &v + return s +} + +// SetGameLiftVpcId sets the GameLiftVpcId field's value. +func (s *VpcPeeringConnection) SetGameLiftVpcId(v string) *VpcPeeringConnection { + s.GameLiftVpcId = &v + return s +} + +// SetIpV4CidrBlock sets the IpV4CidrBlock field's value. +func (s *VpcPeeringConnection) SetIpV4CidrBlock(v string) *VpcPeeringConnection { + s.IpV4CidrBlock = &v + return s +} + +// SetPeerVpcId sets the PeerVpcId field's value. +func (s *VpcPeeringConnection) SetPeerVpcId(v string) *VpcPeeringConnection { + s.PeerVpcId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VpcPeeringConnection) SetStatus(v *VpcPeeringConnectionStatus) *VpcPeeringConnection { + s.Status = v + return s +} + +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. +func (s *VpcPeeringConnection) SetVpcPeeringConnectionId(v string) *VpcPeeringConnection { + s.VpcPeeringConnectionId = &v + return s +} + +// Represents status information for a VPC peering connection. Status is associated +// with a VpcPeeringConnection object. Status codes and messages are provided +// from EC2 (). (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_VpcPeeringConnectionStateReason.html) +// Connection status information is also communicated as a fleet Event. +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/VpcPeeringConnectionStatus +type VpcPeeringConnectionStatus struct { + _ struct{} `type:"structure"` + + // Code indicating the status of a VPC peering connection. + Code *string `min:"1" type:"string"` + + // Additional messaging associated with the connection status. + Message *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringConnectionStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnectionStatus) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *VpcPeeringConnectionStatus) SetCode(v string) *VpcPeeringConnectionStatus { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *VpcPeeringConnectionStatus) SetMessage(v string) *VpcPeeringConnectionStatus { + s.Message = &v + return s +} + +const ( + // AcceptanceTypeAccept is a AcceptanceType enum value + AcceptanceTypeAccept = "ACCEPT" + + // AcceptanceTypeReject is a AcceptanceType enum value + AcceptanceTypeReject = "REJECT" +) + +const ( + // BuildStatusInitialized is a BuildStatus enum value + BuildStatusInitialized = "INITIALIZED" + + // BuildStatusReady is a BuildStatus enum value + BuildStatusReady = "READY" + + // BuildStatusFailed is a BuildStatus enum value + BuildStatusFailed = "FAILED" +) + +const ( + // ComparisonOperatorTypeGreaterThanOrEqualToThreshold is a ComparisonOperatorType enum value + ComparisonOperatorTypeGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" + + // ComparisonOperatorTypeGreaterThanThreshold is a ComparisonOperatorType enum value + ComparisonOperatorTypeGreaterThanThreshold = "GreaterThanThreshold" + + // ComparisonOperatorTypeLessThanThreshold is a ComparisonOperatorType enum value + ComparisonOperatorTypeLessThanThreshold = "LessThanThreshold" + + // ComparisonOperatorTypeLessThanOrEqualToThreshold is a ComparisonOperatorType enum value + ComparisonOperatorTypeLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" +) + +const ( + // EC2InstanceTypeT2Micro is a EC2InstanceType enum value + EC2InstanceTypeT2Micro = "t2.micro" + + // EC2InstanceTypeT2Small is a EC2InstanceType enum value + EC2InstanceTypeT2Small = "t2.small" + + // EC2InstanceTypeT2Medium is a EC2InstanceType enum value + EC2InstanceTypeT2Medium = "t2.medium" + + // EC2InstanceTypeT2Large is a EC2InstanceType enum value + EC2InstanceTypeT2Large = "t2.large" + + // EC2InstanceTypeC3Large is a EC2InstanceType enum value + EC2InstanceTypeC3Large = "c3.large" + + // EC2InstanceTypeC3Xlarge is a EC2InstanceType enum value + EC2InstanceTypeC3Xlarge = "c3.xlarge" + + // EC2InstanceTypeC32xlarge is a EC2InstanceType enum value + EC2InstanceTypeC32xlarge = "c3.2xlarge" + + // EC2InstanceTypeC34xlarge is a EC2InstanceType enum value + EC2InstanceTypeC34xlarge = "c3.4xlarge" + + // EC2InstanceTypeC38xlarge is a EC2InstanceType enum value + EC2InstanceTypeC38xlarge = "c3.8xlarge" + + // EC2InstanceTypeC4Large is a EC2InstanceType enum value + EC2InstanceTypeC4Large = "c4.large" + + // EC2InstanceTypeC4Xlarge is a EC2InstanceType enum value + EC2InstanceTypeC4Xlarge = "c4.xlarge" + + // EC2InstanceTypeC42xlarge is a EC2InstanceType enum value + EC2InstanceTypeC42xlarge = "c4.2xlarge" + + // EC2InstanceTypeC44xlarge is a EC2InstanceType enum value + EC2InstanceTypeC44xlarge = "c4.4xlarge" + + // EC2InstanceTypeC48xlarge is a EC2InstanceType enum value + EC2InstanceTypeC48xlarge = "c4.8xlarge" + + // EC2InstanceTypeR3Large is a EC2InstanceType enum value + EC2InstanceTypeR3Large = "r3.large" + + // EC2InstanceTypeR3Xlarge is a EC2InstanceType enum value + EC2InstanceTypeR3Xlarge = "r3.xlarge" + + // EC2InstanceTypeR32xlarge is a EC2InstanceType enum value + EC2InstanceTypeR32xlarge = "r3.2xlarge" + + // EC2InstanceTypeR34xlarge is a EC2InstanceType enum value + EC2InstanceTypeR34xlarge = "r3.4xlarge" + + // EC2InstanceTypeR38xlarge is a EC2InstanceType enum value + EC2InstanceTypeR38xlarge = "r3.8xlarge" + + // EC2InstanceTypeR4Large is a EC2InstanceType enum value + EC2InstanceTypeR4Large = "r4.large" + + // EC2InstanceTypeR4Xlarge is a EC2InstanceType enum value + EC2InstanceTypeR4Xlarge = "r4.xlarge" + + // EC2InstanceTypeR42xlarge is a EC2InstanceType enum value + EC2InstanceTypeR42xlarge = "r4.2xlarge" + + // EC2InstanceTypeR44xlarge is a EC2InstanceType enum value + EC2InstanceTypeR44xlarge = "r4.4xlarge" + + // EC2InstanceTypeR48xlarge is a EC2InstanceType enum value + EC2InstanceTypeR48xlarge = "r4.8xlarge" + + // EC2InstanceTypeR416xlarge is a EC2InstanceType enum value + EC2InstanceTypeR416xlarge = "r4.16xlarge" + + // EC2InstanceTypeM3Medium is a EC2InstanceType enum value + EC2InstanceTypeM3Medium = "m3.medium" + + // EC2InstanceTypeM3Large is a EC2InstanceType enum value + EC2InstanceTypeM3Large = "m3.large" + + // EC2InstanceTypeM3Xlarge is a EC2InstanceType enum value + EC2InstanceTypeM3Xlarge = "m3.xlarge" + + // EC2InstanceTypeM32xlarge is a EC2InstanceType enum value + EC2InstanceTypeM32xlarge = "m3.2xlarge" + + // EC2InstanceTypeM4Large is a EC2InstanceType enum value + EC2InstanceTypeM4Large = "m4.large" + + // EC2InstanceTypeM4Xlarge is a EC2InstanceType enum value + EC2InstanceTypeM4Xlarge = "m4.xlarge" + + // EC2InstanceTypeM42xlarge is a EC2InstanceType enum value + EC2InstanceTypeM42xlarge = "m4.2xlarge" + + // EC2InstanceTypeM44xlarge is a EC2InstanceType enum value + EC2InstanceTypeM44xlarge = "m4.4xlarge" + + // EC2InstanceTypeM410xlarge is a EC2InstanceType enum value + EC2InstanceTypeM410xlarge = "m4.10xlarge" +) + +const ( + // EventCodeGenericEvent is a EventCode enum value + EventCodeGenericEvent = "GENERIC_EVENT" + + // EventCodeFleetCreated is a EventCode enum value + EventCodeFleetCreated = "FLEET_CREATED" + + // EventCodeFleetDeleted is a EventCode enum value + EventCodeFleetDeleted = "FLEET_DELETED" + + // EventCodeFleetScalingEvent is a EventCode enum value + EventCodeFleetScalingEvent = "FLEET_SCALING_EVENT" + + // EventCodeFleetStateDownloading is a EventCode enum value + EventCodeFleetStateDownloading = "FLEET_STATE_DOWNLOADING" + + // EventCodeFleetStateValidating is a EventCode enum value + EventCodeFleetStateValidating = "FLEET_STATE_VALIDATING" + + // EventCodeFleetStateBuilding is a EventCode enum value + EventCodeFleetStateBuilding = "FLEET_STATE_BUILDING" + + // EventCodeFleetStateActivating is a EventCode enum value + EventCodeFleetStateActivating = "FLEET_STATE_ACTIVATING" + + // EventCodeFleetStateActive is a EventCode enum value + EventCodeFleetStateActive = "FLEET_STATE_ACTIVE" + + // EventCodeFleetStateError is a EventCode enum value + EventCodeFleetStateError = "FLEET_STATE_ERROR" + + // EventCodeFleetInitializationFailed is a EventCode enum value + EventCodeFleetInitializationFailed = "FLEET_INITIALIZATION_FAILED" + + // EventCodeFleetBinaryDownloadFailed is a EventCode enum value + EventCodeFleetBinaryDownloadFailed = "FLEET_BINARY_DOWNLOAD_FAILED" + + // EventCodeFleetValidationLaunchPathNotFound is a EventCode enum value + EventCodeFleetValidationLaunchPathNotFound = "FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND" + + // EventCodeFleetValidationExecutableRuntimeFailure is a EventCode enum value + EventCodeFleetValidationExecutableRuntimeFailure = "FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE" + + // EventCodeFleetValidationTimedOut is a EventCode enum value + EventCodeFleetValidationTimedOut = "FLEET_VALIDATION_TIMED_OUT" + + // EventCodeFleetActivationFailed is a EventCode enum value + EventCodeFleetActivationFailed = "FLEET_ACTIVATION_FAILED" + + // EventCodeFleetActivationFailedNoInstances is a EventCode enum value + EventCodeFleetActivationFailedNoInstances = "FLEET_ACTIVATION_FAILED_NO_INSTANCES" + + // EventCodeFleetNewGameSessionProtectionPolicyUpdated is a EventCode enum value + EventCodeFleetNewGameSessionProtectionPolicyUpdated = "FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED" + + // EventCodeServerProcessInvalidPath is a EventCode enum value + EventCodeServerProcessInvalidPath = "SERVER_PROCESS_INVALID_PATH" + + // EventCodeServerProcessSdkInitializationTimeout is a EventCode enum value + EventCodeServerProcessSdkInitializationTimeout = "SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT" + + // EventCodeServerProcessProcessReadyTimeout is a EventCode enum value + EventCodeServerProcessProcessReadyTimeout = "SERVER_PROCESS_PROCESS_READY_TIMEOUT" + + // EventCodeServerProcessCrashed is a EventCode enum value + EventCodeServerProcessCrashed = "SERVER_PROCESS_CRASHED" + + // EventCodeServerProcessTerminatedUnhealthy is a EventCode enum value + EventCodeServerProcessTerminatedUnhealthy = "SERVER_PROCESS_TERMINATED_UNHEALTHY" + + // EventCodeServerProcessForceTerminated is a EventCode enum value + EventCodeServerProcessForceTerminated = "SERVER_PROCESS_FORCE_TERMINATED" + + // EventCodeServerProcessProcessExitTimeout is a EventCode enum value + EventCodeServerProcessProcessExitTimeout = "SERVER_PROCESS_PROCESS_EXIT_TIMEOUT" + + // EventCodeGameSessionActivationTimeout is a EventCode enum value + EventCodeGameSessionActivationTimeout = "GAME_SESSION_ACTIVATION_TIMEOUT" + + // EventCodeFleetCreationExtractingBuild is a EventCode enum value + EventCodeFleetCreationExtractingBuild = "FLEET_CREATION_EXTRACTING_BUILD" + + // EventCodeFleetCreationRunningInstaller is a EventCode enum value + EventCodeFleetCreationRunningInstaller = "FLEET_CREATION_RUNNING_INSTALLER" + + // EventCodeFleetCreationValidatingRuntimeConfig is a EventCode enum value + EventCodeFleetCreationValidatingRuntimeConfig = "FLEET_CREATION_VALIDATING_RUNTIME_CONFIG" + + // EventCodeFleetVpcPeeringSucceeded is a EventCode enum value + EventCodeFleetVpcPeeringSucceeded = "FLEET_VPC_PEERING_SUCCEEDED" + + // EventCodeFleetVpcPeeringFailed is a EventCode enum value + EventCodeFleetVpcPeeringFailed = "FLEET_VPC_PEERING_FAILED" + + // EventCodeFleetVpcPeeringDeleted is a EventCode enum value + EventCodeFleetVpcPeeringDeleted = "FLEET_VPC_PEERING_DELETED" +) + +const ( + // FleetStatusNew is a FleetStatus enum value + FleetStatusNew = "NEW" + + // FleetStatusDownloading is a FleetStatus enum value + FleetStatusDownloading = "DOWNLOADING" + + // FleetStatusValidating is a FleetStatus enum value + FleetStatusValidating = "VALIDATING" + + // FleetStatusBuilding is a FleetStatus enum value + FleetStatusBuilding = "BUILDING" + + // FleetStatusActivating is a FleetStatus enum value + FleetStatusActivating = "ACTIVATING" + + // FleetStatusActive is a FleetStatus enum value + FleetStatusActive = "ACTIVE" + + // FleetStatusDeleting is a FleetStatus enum value + FleetStatusDeleting = "DELETING" + + // FleetStatusError is a FleetStatus enum value + FleetStatusError = "ERROR" + + // FleetStatusTerminated is a FleetStatus enum value + FleetStatusTerminated = "TERMINATED" +) + +const ( + // GameSessionPlacementStatePending is a GameSessionPlacementState enum value + GameSessionPlacementStatePending = "PENDING" + + // GameSessionPlacementStateFulfilled is a GameSessionPlacementState enum value + GameSessionPlacementStateFulfilled = "FULFILLED" + + // GameSessionPlacementStateCancelled is a GameSessionPlacementState enum value + GameSessionPlacementStateCancelled = "CANCELLED" + + // GameSessionPlacementStateTimedOut is a GameSessionPlacementState enum value + GameSessionPlacementStateTimedOut = "TIMED_OUT" +) + +const ( + // GameSessionStatusActive is a GameSessionStatus enum value + GameSessionStatusActive = "ACTIVE" + + // GameSessionStatusActivating is a GameSessionStatus enum value + GameSessionStatusActivating = "ACTIVATING" + + // GameSessionStatusTerminated is a GameSessionStatus enum value + GameSessionStatusTerminated = "TERMINATED" + + // GameSessionStatusTerminating is a GameSessionStatus enum value + GameSessionStatusTerminating = "TERMINATING" + + // GameSessionStatusError is a GameSessionStatus enum value + GameSessionStatusError = "ERROR" +) + +const ( + // InstanceStatusPending is a InstanceStatus enum value + InstanceStatusPending = "PENDING" + + // InstanceStatusActive is a InstanceStatus enum value + InstanceStatusActive = "ACTIVE" + + // InstanceStatusTerminating is a InstanceStatus enum value + InstanceStatusTerminating = "TERMINATING" +) + +const ( + // IpProtocolTcp is a IpProtocol enum value + IpProtocolTcp = "TCP" + + // IpProtocolUdp is a IpProtocol enum value + IpProtocolUdp = "UDP" +) + +const ( + // MatchmakingConfigurationStatusCancelled is a MatchmakingConfigurationStatus enum value + MatchmakingConfigurationStatusCancelled = "CANCELLED" + + // MatchmakingConfigurationStatusCompleted is a MatchmakingConfigurationStatus enum value + MatchmakingConfigurationStatusCompleted = "COMPLETED" + + // MatchmakingConfigurationStatusFailed is a MatchmakingConfigurationStatus enum value + MatchmakingConfigurationStatusFailed = "FAILED" + + // MatchmakingConfigurationStatusPlacing is a MatchmakingConfigurationStatus enum value + MatchmakingConfigurationStatusPlacing = "PLACING" + + // MatchmakingConfigurationStatusQueued is a MatchmakingConfigurationStatus enum value + MatchmakingConfigurationStatusQueued = "QUEUED" + + // MatchmakingConfigurationStatusRequiresAcceptance is a MatchmakingConfigurationStatus enum value + MatchmakingConfigurationStatusRequiresAcceptance = "REQUIRES_ACCEPTANCE" + + // MatchmakingConfigurationStatusSearching is a MatchmakingConfigurationStatus enum value + MatchmakingConfigurationStatusSearching = "SEARCHING" + + // MatchmakingConfigurationStatusTimedOut is a MatchmakingConfigurationStatus enum value + MatchmakingConfigurationStatusTimedOut = "TIMED_OUT" +) + +const ( + // MetricNameActivatingGameSessions is a MetricName enum value + MetricNameActivatingGameSessions = "ActivatingGameSessions" + + // MetricNameActiveGameSessions is a MetricName enum value + MetricNameActiveGameSessions = "ActiveGameSessions" + + // MetricNameActiveInstances is a MetricName enum value + MetricNameActiveInstances = "ActiveInstances" + + // MetricNameAvailableGameSessions is a MetricName enum value + MetricNameAvailableGameSessions = "AvailableGameSessions" + + // MetricNameAvailablePlayerSessions is a MetricName enum value + MetricNameAvailablePlayerSessions = "AvailablePlayerSessions" + + // MetricNameCurrentPlayerSessions is a MetricName enum value + MetricNameCurrentPlayerSessions = "CurrentPlayerSessions" + + // MetricNameIdleInstances is a MetricName enum value + MetricNameIdleInstances = "IdleInstances" + + // MetricNamePercentAvailableGameSessions is a MetricName enum value + MetricNamePercentAvailableGameSessions = "PercentAvailableGameSessions" + + // MetricNamePercentIdleInstances is a MetricName enum value + MetricNamePercentIdleInstances = "PercentIdleInstances" + + // MetricNameQueueDepth is a MetricName enum value + MetricNameQueueDepth = "QueueDepth" + + // MetricNameWaitTime is a MetricName enum value + MetricNameWaitTime = "WaitTime" +) + +const ( + // OperatingSystemWindows2012 is a OperatingSystem enum value + OperatingSystemWindows2012 = "WINDOWS_2012" + + // OperatingSystemAmazonLinux is a OperatingSystem enum value + OperatingSystemAmazonLinux = "AMAZON_LINUX" +) + +const ( + // PlayerSessionCreationPolicyAcceptAll is a PlayerSessionCreationPolicy enum value + PlayerSessionCreationPolicyAcceptAll = "ACCEPT_ALL" + + // PlayerSessionCreationPolicyDenyAll is a PlayerSessionCreationPolicy enum value + PlayerSessionCreationPolicyDenyAll = "DENY_ALL" +) + +const ( + // PlayerSessionStatusReserved is a PlayerSessionStatus enum value + PlayerSessionStatusReserved = "RESERVED" + + // PlayerSessionStatusActive is a PlayerSessionStatus enum value + PlayerSessionStatusActive = "ACTIVE" + + // PlayerSessionStatusCompleted is a PlayerSessionStatus enum value + PlayerSessionStatusCompleted = "COMPLETED" + + // PlayerSessionStatusTimedout is a PlayerSessionStatus enum value + PlayerSessionStatusTimedout = "TIMEDOUT" +) + +const ( + // ProtectionPolicyNoProtection is a ProtectionPolicy enum value + ProtectionPolicyNoProtection = "NoProtection" + + // ProtectionPolicyFullProtection is a ProtectionPolicy enum value + ProtectionPolicyFullProtection = "FullProtection" +) + +const ( + // RoutingStrategyTypeSimple is a RoutingStrategyType enum value + RoutingStrategyTypeSimple = "SIMPLE" + + // RoutingStrategyTypeTerminal is a RoutingStrategyType enum value + RoutingStrategyTypeTerminal = "TERMINAL" +) + +const ( + // ScalingAdjustmentTypeChangeInCapacity is a ScalingAdjustmentType enum value + ScalingAdjustmentTypeChangeInCapacity = "ChangeInCapacity" + + // ScalingAdjustmentTypeExactCapacity is a ScalingAdjustmentType enum value + ScalingAdjustmentTypeExactCapacity = "ExactCapacity" + + // ScalingAdjustmentTypePercentChangeInCapacity is a ScalingAdjustmentType enum value + ScalingAdjustmentTypePercentChangeInCapacity = "PercentChangeInCapacity" +) + +const ( + // ScalingStatusTypeActive is a ScalingStatusType enum value + ScalingStatusTypeActive = "ACTIVE" + + // ScalingStatusTypeUpdateRequested is a ScalingStatusType enum value + ScalingStatusTypeUpdateRequested = "UPDATE_REQUESTED" + + // ScalingStatusTypeUpdating is a ScalingStatusType enum value + ScalingStatusTypeUpdating = "UPDATING" + + // ScalingStatusTypeDeleteRequested is a ScalingStatusType enum value + ScalingStatusTypeDeleteRequested = "DELETE_REQUESTED" + + // ScalingStatusTypeDeleting is a ScalingStatusType enum value + ScalingStatusTypeDeleting = "DELETING" + + // ScalingStatusTypeDeleted is a ScalingStatusType enum value + ScalingStatusTypeDeleted = "DELETED" + + // ScalingStatusTypeError is a ScalingStatusType enum value + ScalingStatusTypeError = "ERROR" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go new file mode 100644 index 00000000000..d73c39d65ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go @@ -0,0 +1,304 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package gamelift provides the client and types for making API +// requests to Amazon GameLift. +// +// Amazon GameLift is a managed service for developers who need a scalable, +// dedicated server solution for their multiplayer games. Amazon GameLift provides +// tools for the following tasks: (1) acquire computing resources and deploy +// game servers, (2) scale game server capacity to meet player demand, (3) host +// game sessions and manage player access, and (4) track in-depth metrics on +// player usage and server performance. +// +// The Amazon GameLift service API includes two important function sets: +// +// * Manage game sessions and player access -- Retrieve information on available +// game sessions; create new game sessions; send player requests to join +// a game session. +// +// * Configure and manage game server resources -- Manage builds, fleets, +// queues, and aliases; set autoscaling policies; retrieve logs and metrics. +// +// This reference guide describes the low-level service API for Amazon GameLift. +// You can use the API functionality with these tools: +// +// * The Amazon Web Services software development kit (AWS SDK (http://aws.amazon.com/tools/#sdk)) +// is available in multiple languages (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-supported.html#gamelift-supported-clients) +// including C++ and C#. Use the SDK to access the API programmatically from +// an application, such as a game client. +// +// * The AWS command-line interface (http://aws.amazon.com/cli/) (CLI) tool +// is primarily useful for handling administrative actions, such as setting +// up and managing Amazon GameLift settings and resources. You can use the +// AWS CLI to manage all of your AWS services. +// +// * The AWS Management Console (https://console.aws.amazon.com/gamelift/home) +// for Amazon GameLift provides a web interface to manage your Amazon GameLift +// settings and resources. The console includes a dashboard for tracking +// key resources, including builds and fleets, and displays usage and performance +// metrics for your games as customizable graphs. +// +// * Amazon GameLift Local is a tool for testing your game's integration +// with Amazon GameLift before deploying it on the service. This tools supports +// a subset of key API actions, which can be called from either the AWS CLI +// or programmatically. See Testing an Integration (http://docs.aws.amazon.com/gamelift/latest/developerguide/integration-testing-local.html). +// +// MORE RESOURCES +// +// * Amazon GameLift Developer Guide (http://docs.aws.amazon.com/gamelift/latest/developerguide/) +// -- Learn more about Amazon GameLift features and how to use them. +// +// * Lumberyard and Amazon GameLift Tutorials (https://gamedev.amazon.com/forums/tutorials) +// -- Get started fast with walkthroughs and sample projects. +// +// * GameDev Blog (http://aws.amazon.com/blogs/gamedev/) -- Stay up to date +// with new features and techniques. +// +// * GameDev Forums (https://gamedev.amazon.com/forums/spaces/123/gamelift-discussion.html) +// -- Connect with the GameDev community. +// +// * Amazon GameLift Document History (http://docs.aws.amazon.com/gamelift/latest/developerguide/doc-history.html) +// -- See changes to the Amazon GameLift service, SDKs, and documentation, +// as well as links to release notes. +// +// API SUMMARY +// +// This list offers a functional overview of the Amazon GameLift service API. +// +// Managing Games and Players +// +// Use these actions to start new game sessions, find existing game sessions, +// track game session status and other information, and enable player access +// to game sessions. +// +// * Discover existing game sessions +// +// SearchGameSessions -- Retrieve all available game sessions or search for +// game sessions that match a set of criteria. +// +// * Start new game sessions +// +// Start new games with Queues to find the best available hosting resources +// across multiple regions, minimize player latency, and balance game session +// activity for efficiency and cost effectiveness. +// +// StartGameSessionPlacement -- Request a new game session placement and add +// one or more players to it. +// +// DescribeGameSessionPlacement -- Get details on a placement request, including +// status. +// +// StopGameSessionPlacement -- Cancel a placement request. +// +// CreateGameSession -- Start a new game session on a specific fleet. Available +// in Amazon GameLift Local. +// +// * Start new game sessions with FlexMatch matchmaking +// +// StartMatchmaking -- Request matchmaking for one players or a group who want +// to play together. +// +// DescribeMatchmaking -- Get details on a matchmaking request, including status. +// +// AcceptMatch -- Register that a player accepts a proposed match, for matches +// that require player acceptance. +// +// StopMatchmaking -- Cancel a matchmaking request. +// +// * Manage game session data +// +// DescribeGameSessions -- Retrieve metadata for one or more game sessions, +// including length of time active and current player count. Available in +// Amazon GameLift Local. +// +// DescribeGameSessionDetails -- Retrieve metadata and the game session protection +// setting for one or more game sessions. +// +// UpdateGameSession -- Change game session settings, such as maximum player +// count and join policy. +// +// GetGameSessionLogUrl -- Get the location of saved logs for a game session. +// +// * Manage player sessions +// +// CreatePlayerSession -- Send a request for a player to join a game session. +// Available in Amazon GameLift Local. +// +// CreatePlayerSessions -- Send a request for multiple players to join a game +// session. Available in Amazon GameLift Local. +// +// DescribePlayerSessions -- Get details on player activity, including status, +// playing time, and player data. Available in Amazon GameLift Local. +// +// Setting Up and Managing Game Servers +// +// When setting up Amazon GameLift resources for your game, you first create +// a game build (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html) +// and upload it to Amazon GameLift. You can then use these actions to configure +// and manage a fleet of resources to run your game servers, scale capacity +// to meet player demand, access performance and utilization metrics, and more. +// +// * Manage game builds +// +// CreateBuild -- Create a new build using files stored in an Amazon S3 bucket. +// (Update uploading permissions with RequestUploadCredentials.) To create +// a build and upload files from a local path, use the AWS CLI command upload-build. +// +// ListBuilds -- Get a list of all builds uploaded to a Amazon GameLift region. +// +// DescribeBuild -- Retrieve information associated with a build. +// +// UpdateBuild -- Change build metadata, including build name and version. +// +// DeleteBuild -- Remove a build from Amazon GameLift. +// +// * Manage fleets +// +// CreateFleet -- Configure and activate a new fleet to run a build's game servers. +// +// ListFleets -- Get a list of all fleet IDs in a Amazon GameLift region (all +// statuses). +// +// DeleteFleet -- Terminate a fleet that is no longer running game servers or +// hosting players. +// +// View / update fleet configurations. +// +// DescribeFleetAttributes / UpdateFleetAttributes -- View or change a fleet's +// metadata and settings for game session protection and resource creation +// limits. +// +// DescribeFleetPortSettings / UpdateFleetPortSettings -- View or change the +// inbound permissions (IP address and port setting ranges) allowed for a +// fleet. +// +// DescribeRuntimeConfiguration / UpdateRuntimeConfiguration -- View or change +// what server processes (and how many) to run on each instance in a fleet. +// +// * Control fleet capacity +// +// DescribeEC2InstanceLimits -- Retrieve maximum number of instances allowed +// for the current AWS account and the current usage level. +// +// DescribeFleetCapacity / UpdateFleetCapacity -- Retrieve the capacity settings +// and the current number of instances in a fleet; adjust fleet capacity +// settings to scale up or down. +// +// Autoscale -- Manage autoscaling rules and apply them to a fleet. +// +// PutScalingPolicy -- Create a new autoscaling policy, or update an existing +// one. +// +// DescribeScalingPolicies -- Retrieve an existing autoscaling policy. +// +// DeleteScalingPolicy -- Delete an autoscaling policy and stop it from affecting +// a fleet's capacity. +// +// * Manage VPC peering connections for fleets +// +// CreateVpcPeeringAuthorization -- Authorize a peering connection to one of +// your VPCs. +// +// DescribeVpcPeeringAuthorizations -- Retrieve valid peering connection authorizations. +// +// +// DeleteVpcPeeringAuthorization -- Delete a peering connection authorization. +// +// CreateVpcPeeringConnection -- Establish a peering connection between the +// VPC for a Amazon GameLift fleet and one of your VPCs. +// +// DescribeVpcPeeringConnections -- Retrieve information on active or pending +// VPC peering connections with a Amazon GameLift fleet. +// +// DeleteVpcPeeringConnection -- Delete a VPC peering connection with a Amazon +// GameLift fleet. +// +// * Access fleet activity statistics +// +// DescribeFleetUtilization -- Get current data on the number of server processes, +// game sessions, and players currently active on a fleet. +// +// DescribeFleetEvents -- Get a fleet's logged events for a specified time span. +// +// DescribeGameSessions -- Retrieve metadata associated with one or more game +// sessions, including length of time active and current player count. +// +// * Remotely access an instance +// +// DescribeInstances -- Get information on each instance in a fleet, including +// instance ID, IP address, and status. +// +// GetInstanceAccess -- Request access credentials needed to remotely connect +// to a specified instance in a fleet. +// +// * Manage fleet aliases +// +// CreateAlias -- Define a new alias and optionally assign it to a fleet. +// +// ListAliases -- Get all fleet aliases defined in a Amazon GameLift region. +// +// DescribeAlias -- Retrieve information on an existing alias. +// +// UpdateAlias -- Change settings for a alias, such as redirecting it from one +// fleet to another. +// +// DeleteAlias -- Remove an alias from the region. +// +// ResolveAlias -- Get the fleet ID that a specified alias points to. +// +// * Manage game session queues +// +// CreateGameSessionQueue -- Create a queue for processing requests for new +// game sessions. +// +// DescribeGameSessionQueues -- Retrieve game session queues defined in a Amazon +// GameLift region. +// +// UpdateGameSessionQueue -- Change the configuration of a game session queue. +// +// DeleteGameSessionQueue -- Remove a game session queue from the region. +// +// * Manage FlexMatch resources +// +// CreateMatchmakingConfiguration -- Create a matchmaking configuration with +// instructions for building a player group and placing in a new game session. +// +// +// DescribeMatchmakingConfigurations -- Retrieve matchmaking configurations +// defined a Amazon GameLift region. +// +// UpdateMatchmakingConfiguration -- Change settings for matchmaking configuration. +// queue. +// +// DeleteMatchmakingConfiguration -- Remove a matchmaking configuration from +// the region. +// +// CreateMatchmakingRuleSet -- Create a set of rules to use when searching for +// player matches. +// +// DescribeMatchmakingRuleSets -- Retrieve matchmaking rule sets defined in +// a Amazon GameLift region. +// +// ValidateMatchmakingRuleSet -- Verify syntax for a set of matchmaking rules. +// +// See https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01 for more information on this service. +// +// See gamelift package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/gamelift/ +// +// Using the Client +// +// To contact Amazon GameLift with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon GameLift client GameLift for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/gamelift/#New +package gamelift diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/errors.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/errors.go new file mode 100644 index 00000000000..d04e78d0d2c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/errors.go @@ -0,0 +1,102 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package gamelift + +const ( + + // ErrCodeConflictException for service response error code + // "ConflictException". + // + // The requested operation would cause a conflict with the current state of + // a service resource associated with the request. Resolve the conflict before + // retrying this request. + ErrCodeConflictException = "ConflictException" + + // ErrCodeFleetCapacityExceededException for service response error code + // "FleetCapacityExceededException". + // + // The specified fleet has no available instances to fulfill a CreateGameSession + // request. Clients can retry such requests immediately or after a waiting period. + ErrCodeFleetCapacityExceededException = "FleetCapacityExceededException" + + // ErrCodeGameSessionFullException for service response error code + // "GameSessionFullException". + // + // The game instance is currently full and cannot allow the requested player(s) + // to join. Clients can retry such requests immediately or after a waiting period. + ErrCodeGameSessionFullException = "GameSessionFullException" + + // ErrCodeIdempotentParameterMismatchException for service response error code + // "IdempotentParameterMismatchException". + // + // A game session with this custom ID string already exists in this fleet. Resolve + // this conflict before retrying this request. + ErrCodeIdempotentParameterMismatchException = "IdempotentParameterMismatchException" + + // ErrCodeInternalServiceException for service response error code + // "InternalServiceException". + // + // The service encountered an unrecoverable internal failure while processing + // the request. Clients can retry such requests immediately or after a waiting + // period. + ErrCodeInternalServiceException = "InternalServiceException" + + // ErrCodeInvalidFleetStatusException for service response error code + // "InvalidFleetStatusException". + // + // The requested operation would cause a conflict with the current state of + // a resource associated with the request and/or the fleet. Resolve the conflict + // before retrying. + ErrCodeInvalidFleetStatusException = "InvalidFleetStatusException" + + // ErrCodeInvalidGameSessionStatusException for service response error code + // "InvalidGameSessionStatusException". + // + // The requested operation would cause a conflict with the current state of + // a resource associated with the request and/or the game instance. Resolve + // the conflict before retrying. + ErrCodeInvalidGameSessionStatusException = "InvalidGameSessionStatusException" + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // One or more parameter values in the request are invalid. Correct the invalid + // parameter values before retrying. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // The requested operation would cause the resource to exceed the allowed service + // limit. Resolve the issue before retrying. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeNotFoundException for service response error code + // "NotFoundException". + // + // A service resource associated with the request could not be found. Clients + // should not retry such requests. + ErrCodeNotFoundException = "NotFoundException" + + // ErrCodeTerminalRoutingStrategyException for service response error code + // "TerminalRoutingStrategyException". + // + // The service is unable to resolve the routing for a particular alias because + // it has a terminal RoutingStrategy associated with it. The message returned + // in this exception is the message defined in the routing strategy itself. + // Such requests should only be retried if the routing strategy for the specified + // alias is modified. + ErrCodeTerminalRoutingStrategyException = "TerminalRoutingStrategyException" + + // ErrCodeUnauthorizedException for service response error code + // "UnauthorizedException". + // + // The client failed authentication. Clients should not retry such requests. + ErrCodeUnauthorizedException = "UnauthorizedException" + + // ErrCodeUnsupportedRegionException for service response error code + // "UnsupportedRegionException". + // + // The requested operation is not supported in the region specified. + ErrCodeUnsupportedRegionException = "UnsupportedRegionException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go new file mode 100644 index 00000000000..b79ac20478d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go @@ -0,0 +1,95 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package gamelift + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// GameLift provides the API operation methods for making requests to +// Amazon GameLift. See this package's package overview docs +// for details on the service. +// +// GameLift methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type GameLift struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "gamelift" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the GameLift client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a GameLift client from just a session. +// svc := gamelift.New(mySession) +// +// // Create a GameLift client with additional configuration +// svc := gamelift.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *GameLift { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *GameLift { + svc := &GameLift{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-10-01", + JSONVersion: "1.1", + TargetPrefix: "GameLift", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a GameLift operation and runs any +// custom request initialization. +func (c *GameLift) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 5b75513c61c..5b257d0e4bc 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -676,6 +676,14 @@ "version": "v1.12.53", "versionExact": "v1.12.53" }, + { + "checksumSHA1": "Rodm1XwZ9Ncah1NLHep0behQpXg=", + "path": "github.com/aws/aws-sdk-go/service/gamelift", + "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", + "revisionTime": "2017-12-22T21:05:23Z", + "version": "v1.12.53", + "versionExact": "v1.12.53" + }, { "checksumSHA1": "oDoGvSfmO2Z099ixV2HXn+SDeHE=", "path": "github.com/aws/aws-sdk-go/service/glacier", From 4e8f6e5ce2118918be8bace8570aa58a239d34ac Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 3 Jan 2018 03:45:00 -0500 Subject: [PATCH 090/350] r/aws_cloudwatch_metric_alarm: Support optional datapoints_to_alarm configuration (#2609) * r/aws_cloudwatch_metric_alarm: Support optional datapoints_to_alarm configuration * r/aws_cloudwatch_metric_alarm: Fix testing go vet error with fmt.Sprintf typing * r/aws_cloudwatch_metric_alarm: #2609 PR feedback * Use rInt and fix indentation in new datapoints_to_alarm test for consistency * Remove extraneous period in datapoints_to_alarm documentation --- aws/resource_aws_cloudwatch_metric_alarm.go | 9 +++++ ...source_aws_cloudwatch_metric_alarm_test.go | 40 +++++++++++++++++++ .../r/cloudwatch_metric_alarm.html.markdown | 1 + 3 files changed, 50 insertions(+) diff --git a/aws/resource_aws_cloudwatch_metric_alarm.go b/aws/resource_aws_cloudwatch_metric_alarm.go index 8eef4ebeeda..9c21dac34d0 100644 --- a/aws/resource_aws_cloudwatch_metric_alarm.go +++ b/aws/resource_aws_cloudwatch_metric_alarm.go @@ -74,6 +74,10 @@ func resourceAwsCloudWatchMetricAlarm() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "datapoints_to_alarm": { + Type: schema.TypeInt, + Optional: true, + }, "dimensions": { Type: schema.TypeMap, Optional: true, @@ -158,6 +162,7 @@ func resourceAwsCloudWatchMetricAlarmRead(d *schema.ResourceData, meta interface d.Set("alarm_description", a.AlarmDescription) d.Set("alarm_name", a.AlarmName) d.Set("comparison_operator", a.ComparisonOperator) + d.Set("datapoints_to_alarm", a.DatapointsToAlarm) if err := d.Set("dimensions", flattenDimensions(a.Dimensions)); err != nil { return err } @@ -243,6 +248,10 @@ func getAwsCloudWatchPutMetricAlarmInput(d *schema.ResourceData) cloudwatch.PutM params.AlarmDescription = aws.String(v.(string)) } + if v, ok := d.GetOk("datapoints_to_alarm"); ok { + params.DatapointsToAlarm = aws.Int64(int64(v.(int))) + } + if v, ok := d.GetOk("unit"); ok { params.Unit = aws.String(v.(string)) } diff --git a/aws/resource_aws_cloudwatch_metric_alarm_test.go b/aws/resource_aws_cloudwatch_metric_alarm_test.go index 6e266c03b82..ca865ec25fa 100644 --- a/aws/resource_aws_cloudwatch_metric_alarm_test.go +++ b/aws/resource_aws_cloudwatch_metric_alarm_test.go @@ -34,6 +34,26 @@ func TestAccAWSCloudWatchMetricAlarm_basic(t *testing.T) { }) } +func TestAccAWSCloudWatchMetricAlarm_datapointsToAlarm(t *testing.T) { + var alarm cloudwatch.MetricAlarm + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCloudWatchMetricAlarmDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudWatchMetricAlarmConfigDatapointsToAlarm(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudWatchMetricAlarmExists("aws_cloudwatch_metric_alarm.foobar", &alarm), + resource.TestCheckResourceAttr("aws_cloudwatch_metric_alarm.foobar", "datapoints_to_alarm", "2"), + ), + }, + }, + }) +} + func TestAccAWSCloudWatchMetricAlarm_treatMissingData(t *testing.T) { var alarm cloudwatch.MetricAlarm rInt := acctest.RandInt() @@ -209,6 +229,26 @@ resource "aws_cloudwatch_metric_alarm" "foobar" { }`, rInt) } +func testAccAWSCloudWatchMetricAlarmConfigDatapointsToAlarm(rInt int) string { + return fmt.Sprintf(` +resource "aws_cloudwatch_metric_alarm" "foobar" { + alarm_name = "terraform-test-foobar%d" + comparison_operator = "GreaterThanOrEqualToThreshold" + datapoints_to_alarm = "2" + evaluation_periods = "2" + metric_name = "CPUUtilization" + namespace = "AWS/EC2" + period = "120" + statistic = "Average" + threshold = "80" + alarm_description = "This metric monitors ec2 cpu utilization" + insufficient_data_actions = [] + dimensions { + InstanceId = "i-abc123" + } +}`, rInt) +} + func testAccAWSCloudWatchMetricAlarmConfigTreatMissingData(rInt int) string { return fmt.Sprintf(` resource "aws_cloudwatch_metric_alarm" "foobar" { diff --git a/website/docs/r/cloudwatch_metric_alarm.html.markdown b/website/docs/r/cloudwatch_metric_alarm.html.markdown index b8bcb26ba44..5fac7f2314f 100644 --- a/website/docs/r/cloudwatch_metric_alarm.html.markdown +++ b/website/docs/r/cloudwatch_metric_alarm.html.markdown @@ -81,6 +81,7 @@ The following arguments are supported: * `actions_enabled` - (Optional) Indicates whether or not actions should be executed during any changes to the alarm's state. Defaults to `true`. * `alarm_actions` - (Optional) The list of actions to execute when this alarm transitions into an ALARM state from any other state. Each action is specified as an Amazon Resource Number (ARN). * `alarm_description` - (Optional) The description for the alarm. +* `datapoints_to_alarm` - (Optional) The number of datapoints that must be breaching to trigger the alarm. * `dimensions` - (Optional) The dimensions for the alarm's associated metric. For the list of available dimensions see the AWS documentation [here](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html). * `insufficient_data_actions` - (Optional) The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Number (ARN). * `ok_actions` - (Optional) The list of actions to execute when this alarm transitions into an OK state from any other state. Each action is specified as an Amazon Resource Number (ARN). From a5aa85f8ba3cdf70b86463d7439ada5899c1c560 Mon Sep 17 00:00:00 2001 From: Gauthier Wallet Date: Wed, 3 Jan 2018 09:45:45 +0100 Subject: [PATCH 091/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ddcf45c1fa6..f948ecc7738 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ ENHANCEMENTS: * resource/aws_codepipeline: ARN is now exposed as an attribute [GH-2773] * resource/aws_appautoscaling_scheduled_action: min_capcity argument is now honoured [GH-2794] * resource/aws_rds_cluster: Clusters in the `resetting-master-credentials` state no longer cause an error [GH-2791] +* resource/aws_cloudwatch_metric_alarm: Support optional datapoints_to_alarm configuration [GH-2609] BUG FIXES: From b2e7a7360a5c9c60dc6b3c4021f729d3e3607ade Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 3 Jan 2018 09:53:44 +0100 Subject: [PATCH 092/350] test: Randomize names and cleanup --- ...resource_aws_ses_event_destination_test.go | 73 +++++++++++-------- 1 file changed, 44 insertions(+), 29 deletions(-) diff --git a/aws/resource_aws_ses_event_destination_test.go b/aws/resource_aws_ses_event_destination_test.go index 7e357955454..d781dfd2cf5 100644 --- a/aws/resource_aws_ses_event_destination_test.go +++ b/aws/resource_aws_ses_event_destination_test.go @@ -11,6 +11,18 @@ import ( ) func TestAccAWSSESEventDestination_basic(t *testing.T) { + rString := acctest.RandString(8) + + bucketName := fmt.Sprintf("tf-acc-bucket-ses-event-dst-%s", rString) + roleName := fmt.Sprintf("tf_acc_role_ses_event_dst_%s", rString) + streamName := fmt.Sprintf("tf_acc_stream_ses_event_dst_%s", rString) + policyName := fmt.Sprintf("tf_acc_policy_ses_event_dst_%s", rString) + topicName := fmt.Sprintf("tf_acc_topic_ses_event_dst_%s", rString) + sesCfgSetName := fmt.Sprintf("tf_acc_cfg_ses_event_dst_%s", rString) + sesEventDstNameKinesis := fmt.Sprintf("tf_acc_event_dst_kinesis_%s", rString) + sesEventDstNameCw := fmt.Sprintf("tf_acc_event_dst_cloudwatch_%s", rString) + sesEventDstNameSns := fmt.Sprintf("tf_acc_event_dst_sns_%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) @@ -19,15 +31,16 @@ func TestAccAWSSESEventDestination_basic(t *testing.T) { CheckDestroy: testAccCheckSESEventDestinationDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSSESEventDestinationConfig, + Config: testAccAWSSESEventDestinationConfig(bucketName, roleName, streamName, policyName, topicName, + sesCfgSetName, sesEventDstNameKinesis, sesEventDstNameCw, sesEventDstNameSns), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESEventDestinationExists("aws_ses_configuration_set.test"), resource.TestCheckResourceAttr( - "aws_ses_event_destination.kinesis", "name", "event-destination-kinesis"), + "aws_ses_event_destination.kinesis", "name", sesEventDstNameKinesis), resource.TestCheckResourceAttr( - "aws_ses_event_destination.cloudwatch", "name", "event-destination-cloudwatch"), + "aws_ses_event_destination.cloudwatch", "name", sesEventDstNameCw), resource.TestCheckResourceAttr( - "aws_ses_event_destination.sns", "name", "event-destination-sns"), + "aws_ses_event_destination.sns", "name", sesEventDstNameSns), ), }, }, @@ -49,7 +62,7 @@ func testAccCheckSESEventDestinationDestroy(s *terraform.State) error { found := false for _, element := range response.ConfigurationSets { - if *element.Name == fmt.Sprintf("some-configuration-set-%d", edRandomInteger) { + if *element.Name == rs.Primary.ID { found = true } } @@ -84,7 +97,7 @@ func testAccCheckAwsSESEventDestinationExists(n string) resource.TestCheckFunc { found := false for _, element := range response.ConfigurationSets { - if *element.Name == fmt.Sprintf("some-configuration-set-%d", edRandomInteger) { + if *element.Name == rs.Primary.ID { found = true } } @@ -97,15 +110,16 @@ func testAccCheckAwsSESEventDestinationExists(n string) resource.TestCheckFunc { } } -var edRandomInteger = acctest.RandInt() -var testAccAWSSESEventDestinationConfig = fmt.Sprintf(` +func testAccAWSSESEventDestinationConfig(bucketName, roleName, streamName, policyName, topicName, + sesCfgSetName, sesEventDstNameKinesis, sesEventDstNameCw, sesEventDstNameSns string) string { + return fmt.Sprintf(` resource "aws_s3_bucket" "bucket" { - bucket = "tf-test-bucket-format" + bucket = "%s" acl = "private" } resource "aws_iam_role" "firehose_role" { - name = "firehose_test_role_test" + name = "%s" assume_role_policy = < Date: Wed, 3 Jan 2018 10:00:07 +0100 Subject: [PATCH 093/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f948ecc7738..6649793a1a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ ENHANCEMENTS: * resource/aws_appautoscaling_scheduled_action: min_capcity argument is now honoured [GH-2794] * resource/aws_rds_cluster: Clusters in the `resetting-master-credentials` state no longer cause an error [GH-2791] * resource/aws_cloudwatch_metric_alarm: Support optional datapoints_to_alarm configuration [GH-2609] +* resource/aws_ses_event_destination: Add support for SNS destinations [GH-1737] BUG FIXES: From 1e04bfc18605940d7149f03e868ace5d6a0cd8e7 Mon Sep 17 00:00:00 2001 From: Nguyen Kien Trung Date: Wed, 3 Jan 2018 06:03:32 -0500 Subject: [PATCH 094/350] d/aws_iam_server_certificate: add support for retrieving public key (#2749) * #2742: add support for retrieving public key * #2742: re-organized imports and added additional assertions * #2742: re-organized imports and added additional assertions * #2742: certificate_chain assertion --- aws/data_source_aws_iam_server_certificate.go | 29 ++++++++++++++++++- ..._source_aws_iam_server_certificate_test.go | 3 ++ .../d/iam_server_certificate.html.markdown | 9 ++++-- 3 files changed, 37 insertions(+), 4 deletions(-) diff --git a/aws/data_source_aws_iam_server_certificate.go b/aws/data_source_aws_iam_server_certificate.go index e39a7843f0e..1ec63c52aad 100644 --- a/aws/data_source_aws_iam_server_certificate.go +++ b/aws/data_source_aws_iam_server_certificate.go @@ -5,6 +5,7 @@ import ( "log" "sort" "strings" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" @@ -68,6 +69,21 @@ func dataSourceAwsIAMServerCertificate() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "upload_date": { + Type: schema.TypeString, + Computed: true, + }, + + "certificate_body": { + Type: schema.TypeString, + Computed: true, + }, + + "certificate_chain": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -129,8 +145,19 @@ func dataSourceAwsIAMServerCertificateRead(d *schema.ResourceData, meta interfac d.Set("path", *metadata.Path) d.Set("name", *metadata.ServerCertificateName) if metadata.Expiration != nil { - d.Set("expiration_date", metadata.Expiration.Format("2006-01-02T15:04:05")) + d.Set("expiration_date", metadata.Expiration.Format(time.RFC3339)) + } + + log.Printf("[DEBUG] Get Public Key Certificate for %s", *metadata.ServerCertificateName) + serverCertificateResp, err := iamconn.GetServerCertificate(&iam.GetServerCertificateInput{ + ServerCertificateName: metadata.ServerCertificateName, + }) + if err != nil { + return err } + d.Set("upload_date", serverCertificateResp.ServerCertificate.ServerCertificateMetadata.UploadDate.Format(time.RFC3339)) + d.Set("certificate_body", aws.StringValue(serverCertificateResp.ServerCertificate.CertificateBody)) + d.Set("certificate_chain", aws.StringValue(serverCertificateResp.ServerCertificate.CertificateChain)) return nil } diff --git a/aws/data_source_aws_iam_server_certificate_test.go b/aws/data_source_aws_iam_server_certificate_test.go index 55c156dedfb..07712b0ebf5 100644 --- a/aws/data_source_aws_iam_server_certificate_test.go +++ b/aws/data_source_aws_iam_server_certificate_test.go @@ -57,6 +57,9 @@ func TestAccAWSDataSourceIAMServerCertificate_basic(t *testing.T) { resource.TestCheckResourceAttrSet("data.aws_iam_server_certificate.test", "id"), resource.TestCheckResourceAttrSet("data.aws_iam_server_certificate.test", "name"), resource.TestCheckResourceAttrSet("data.aws_iam_server_certificate.test", "path"), + resource.TestCheckResourceAttrSet("data.aws_iam_server_certificate.test", "upload_date"), + resource.TestCheckResourceAttr("data.aws_iam_server_certificate.test", "certificate_chain", ""), + resource.TestMatchResourceAttr("data.aws_iam_server_certificate.test", "certificate_body", regexp.MustCompile("^-----BEGIN CERTIFICATE-----")), ), }, }, diff --git a/website/docs/d/iam_server_certificate.html.markdown b/website/docs/d/iam_server_certificate.html.markdown index 8046295d74f..f34344c18df 100644 --- a/website/docs/d/iam_server_certificate.html.markdown +++ b/website/docs/d/iam_server_certificate.html.markdown @@ -39,9 +39,12 @@ resource "aws_elb" "elb" { ## Attributes Reference -`arn` is set to the ARN of the IAM Server Certificate -`path` is set to the path of the IAM Server Certificate -`expiration_date` is set to the expiration date of the IAM Server Certificate +* `arn` is set to the ARN of the IAM Server Certificate +* `path` is set to the path of the IAM Server Certificate +* `expiration_date` is set to the expiration date of the IAM Server Certificate +* `upload_date` is the date when the server certificate was uploaded +* `certificate_body` is the public key certificate (PEM-encoded). This is useful when [configuring back-end instance authentication](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html) policy for load balancer +* `certificate_chain` is the public key certificate chain (PEM-encoded) if exists, empty otherwise ## Import From 076331c1c6415e551b51cea63e0d1e92ca533407 Mon Sep 17 00:00:00 2001 From: Gauthier Wallet Date: Wed, 3 Jan 2018 12:05:12 +0100 Subject: [PATCH 095/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6649793a1a9..b908a64b493 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ ENHANCEMENTS: * resource/aws_rds_cluster: Clusters in the `resetting-master-credentials` state no longer cause an error [GH-2791] * resource/aws_cloudwatch_metric_alarm: Support optional datapoints_to_alarm configuration [GH-2609] * resource/aws_ses_event_destination: Add support for SNS destinations [GH-1737] +* data-source/aws_iam_server_certificate: add support for retrieving public key [GH-2749] BUG FIXES: From 8d6e751de55c56aa9f34651f6c7ee37a92f3b161 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 23 Oct 2017 16:47:59 +0100 Subject: [PATCH 096/350] r/cognito_identity_pool_domain: Add resource --- aws/provider.go | 1 + aws/resource_aws_cognito_user_pool_domain.go | 171 ++++++++++++++++++ ...ource_aws_cognito_user_pool_domain_test.go | 120 ++++++++++++ aws/validators.go | 9 + aws/validators_test.go | 27 +++ website/aws.erb | 3 + .../docs/r/cognito_user_pool_domain.markdown | 40 ++++ 7 files changed, 371 insertions(+) create mode 100644 aws/resource_aws_cognito_user_pool_domain.go create mode 100644 aws/resource_aws_cognito_user_pool_domain_test.go create mode 100644 website/docs/r/cognito_user_pool_domain.markdown diff --git a/aws/provider.go b/aws/provider.go index e4c80bfcceb..b3887547b51 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -292,6 +292,7 @@ func Provider() terraform.ResourceProvider { "aws_cognito_identity_pool": resourceAwsCognitoIdentityPool(), "aws_cognito_identity_pool_roles_attachment": resourceAwsCognitoIdentityPoolRolesAttachment(), "aws_cognito_user_pool": resourceAwsCognitoUserPool(), + "aws_cognito_user_pool_domain": resourceAwsCognitoUserPoolDomain(), "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), "aws_cloudwatch_dashboard": resourceAwsCloudWatchDashboard(), diff --git a/aws/resource_aws_cognito_user_pool_domain.go b/aws/resource_aws_cognito_user_pool_domain.go new file mode 100644 index 00000000000..08c70a478fd --- /dev/null +++ b/aws/resource_aws_cognito_user_pool_domain.go @@ -0,0 +1,171 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCognitoUserPoolDomain() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCognitoUserPoolDomainCreate, + Read: resourceAwsCognitoUserPoolDomainRead, + Delete: resourceAwsCognitoUserPoolDomainDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateCognitoUserPoolDomain, + }, + "user_pool_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "aws_account_id": { + Type: schema.TypeString, + Computed: true, + }, + "cloudfront_distribution_arn": { + Type: schema.TypeString, + Computed: true, + }, + "s3_bucket": { + Type: schema.TypeString, + Computed: true, + }, + "version": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsCognitoUserPoolDomainCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cognitoidpconn + + domain := d.Get("domain").(string) + + params := &cognitoidentityprovider.CreateUserPoolDomainInput{ + Domain: aws.String(domain), + UserPoolId: aws.String(d.Get("user_pool_id").(string)), + } + log.Printf("[DEBUG] Creating Cognito User Pool Domain: %s", params) + + _, err := conn.CreateUserPoolDomain(params) + if err != nil { + return fmt.Errorf("Error creating Cognito User Pool Domain: %s", err) + } + + d.SetId(domain) + + stateConf := resource.StateChangeConf{ + Pending: []string{ + cognitoidentityprovider.DomainStatusTypeCreating, + cognitoidentityprovider.DomainStatusTypeUpdating, + }, + Target: []string{ + cognitoidentityprovider.DomainStatusTypeActive, + }, + Timeout: 1 * time.Minute, + Refresh: func() (interface{}, string, error) { + domain, err := conn.DescribeUserPoolDomain(&cognitoidentityprovider.DescribeUserPoolDomainInput{ + Domain: aws.String(d.Get("domain").(string)), + }) + if err != nil { + return 42, "", err + } + + desc := domain.DomainDescription + + return domain, *desc.Status, nil + }, + } + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsCognitoUserPoolDomainRead(d, meta) +} + +func resourceAwsCognitoUserPoolDomainRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cognitoidpconn + log.Printf("[DEBUG] Reading Cognito User Pool Domain: %s", d.Id()) + + domain, err := conn.DescribeUserPoolDomain(&cognitoidentityprovider.DescribeUserPoolDomainInput{ + Domain: aws.String(d.Id()), + }) + if err != nil { + if isAWSErr(err, "ResourceNotFoundException", "") { + log.Printf("[WARN] Cognito User Pool Domain %q not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return err + } + + desc := domain.DomainDescription + + d.Set("domain", d.Id()) + d.Set("aws_account_id", desc.AWSAccountId) + d.Set("cloudfront_distribution_arn", desc.CloudFrontDistribution) + d.Set("s3_bucket", desc.S3Bucket) + d.Set("user_pool_id", desc.UserPoolId) + d.Set("version", desc.Version) + + return nil +} + +func resourceAwsCognitoUserPoolDomainDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cognitoidpconn + log.Printf("[DEBUG] Deleting Cognito User Pool Domain: %s", d.Id()) + + _, err := conn.DeleteUserPoolDomain(&cognitoidentityprovider.DeleteUserPoolDomainInput{ + Domain: aws.String(d.Id()), + UserPoolId: aws.String(d.Get("user_pool_id").(string)), + }) + if err != nil { + return err + } + + stateConf := resource.StateChangeConf{ + Pending: []string{ + cognitoidentityprovider.DomainStatusTypeUpdating, + cognitoidentityprovider.DomainStatusTypeDeleting, + }, + Target: []string{""}, + Timeout: 1 * time.Minute, + Refresh: func() (interface{}, string, error) { + domain, err := conn.DescribeUserPoolDomain(&cognitoidentityprovider.DescribeUserPoolDomainInput{ + Domain: aws.String(d.Id()), + }) + if err != nil { + if isAWSErr(err, "ResourceNotFoundException", "") { + return 42, "", nil + } + return 42, "", err + } + + desc := domain.DomainDescription + if desc.Status == nil { + return 42, "", nil + } + + return domain, *desc.Status, nil + }, + } + _, err = stateConf.WaitForState() + return err +} diff --git a/aws/resource_aws_cognito_user_pool_domain_test.go b/aws/resource_aws_cognito_user_pool_domain_test.go new file mode 100644 index 00000000000..eef019df2f7 --- /dev/null +++ b/aws/resource_aws_cognito_user_pool_domain_test.go @@ -0,0 +1,120 @@ +package aws + +import ( + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSCognitoUserPoolDomain_basic(t *testing.T) { + domainName := fmt.Sprintf("tf-acc-test-domain-%d", acctest.RandInt()) + poolName := fmt.Sprintf("tf-acc-test-pool-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCognitoUserPoolDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCognitoUserPoolDomainConfig_basic(domainName, poolName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSCognitoUserPoolDomainExists("aws_cognito_user_pool_domain.main"), + resource.TestCheckResourceAttr("aws_cognito_user_pool_domain.main", "domain", domainName), + resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "name", poolName), + resource.TestCheckResourceAttrSet("aws_cognito_user_pool_domain.main", "aws_account_id"), + resource.TestCheckResourceAttrSet("aws_cognito_user_pool_domain.main", "cloudfront_distribution_arn"), + resource.TestCheckResourceAttrSet("aws_cognito_user_pool_domain.main", "s3_bucket"), + resource.TestCheckResourceAttrSet("aws_cognito_user_pool_domain.main", "version"), + ), + }, + }, + }) +} + +func TestAccAWSCognitoUserPoolDomain_import(t *testing.T) { + domainName := fmt.Sprintf("tf-acc-test-domain-%d", acctest.RandInt()) + poolName := fmt.Sprintf("tf-acc-test-pool-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCognitoUserPoolDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCognitoUserPoolDomainConfig_basic(domainName, poolName), + }, + { + ResourceName: "aws_cognito_user_pool_domain.main", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckAWSCognitoUserPoolDomainExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Cognito User Pool Domain ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).cognitoidpconn + + _, err := conn.DescribeUserPoolDomain(&cognitoidentityprovider.DescribeUserPoolDomainInput{ + Domain: aws.String(rs.Primary.ID), + }) + + if err != nil { + return err + } + + return nil + } +} + +func testAccCheckAWSCognitoUserPoolDomainDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).cognitoidpconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_cognito_user_pool_domain" { + continue + } + + _, err := conn.DescribeUserPoolDomain(&cognitoidentityprovider.DescribeUserPoolDomainInput{ + Domain: aws.String(rs.Primary.ID), + }) + + if err != nil { + if isAWSErr(err, "ResourceNotFoundException", "") { + return nil + } + return err + } + } + + return nil +} + +func testAccAWSCognitoUserPoolDomainConfig_basic(domainName, poolName string) string { + return fmt.Sprintf(` +resource "aws_cognito_user_pool_domain" "main" { + domain = "%s" + user_pool_id = "${aws_cognito_user_pool.main.id}" +} + +resource "aws_cognito_user_pool" "main" { + name = "%s" +} +`, domainName, poolName) +} diff --git a/aws/validators.go b/aws/validators.go index c502925e31d..380db338bf7 100644 --- a/aws/validators.go +++ b/aws/validators.go @@ -1986,6 +1986,15 @@ func validateCognitoRoles(v map[string]interface{}, k string) (errors []error) { return } +func validateCognitoUserPoolDomain(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[a-z0-9](?:[a-z0-9\-]{0,61}[a-z0-9])?$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters and hyphens (max length 63 chars) allowed in %q", k)) + } + return +} + func validateDxConnectionBandWidth(v interface{}, k string) (ws []string, errors []error) { val, ok := v.(string) if !ok { diff --git a/aws/validators_test.go b/aws/validators_test.go index 9337faacb44..1a63f7ab8ad 100644 --- a/aws/validators_test.go +++ b/aws/validators_test.go @@ -2863,3 +2863,30 @@ func TestResourceAWSElastiCacheReplicationGroupAuthTokenValidation(t *testing.T) } } } + +func TestValidateCognitoUserPoolDomain(t *testing.T) { + validTypes := []string{ + "valid-domain", + "validdomain", + "val1d-d0main", + } + for _, v := range validTypes { + _, errors := validateCognitoUserPoolDomain(v, "name") + if len(errors) != 0 { + t.Fatalf("%q should be a valid Cognito User Pool Domain: %q", v, errors) + } + } + + invalidTypes := []string{ + "UpperCase", + "-invalid", + "invalid-", + strings.Repeat("i", 64), // > 63 + } + for _, v := range invalidTypes { + _, errors := validateCognitoUserPoolDomain(v, "name") + if len(errors) == 0 { + t.Fatalf("%q should be an invalid Cognito User Pool Domain", v) + } + } +} diff --git a/website/aws.erb b/website/aws.erb index 5bf9ba7af53..81fba1358e4 100644 --- a/website/aws.erb +++ b/website/aws.erb @@ -487,6 +487,9 @@ > aws_cognito_user_pool + > + aws_cognito_user_pool_domain + diff --git a/website/docs/r/cognito_user_pool_domain.markdown b/website/docs/r/cognito_user_pool_domain.markdown new file mode 100644 index 00000000000..a5e995002a5 --- /dev/null +++ b/website/docs/r/cognito_user_pool_domain.markdown @@ -0,0 +1,40 @@ +-- +layout: "aws" +page_title: "AWS: aws_cognito_user_pool_domain" +side_bar_current: "docs-aws-resource-cognito-user-pool-domain" +description: |- + Provides a Cognito User Pool Domain resource. +--- + +# aws_cognito_user_pool_domain + +Provides a Cognito User Pool Domain resource. + +## Example Usage + +```hcl +resource "aws_cognito_user_pool_domain" "main" { + domain = "example-domain" + user_pool_id = "${aws_cognito_user_pool.example.id}" +} + +resource "aws_cognito_user_pool" "example" { + name = "example-pool" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `domain` - (Required) The domain string. +* `user_pool_id` - (Required) The user pool ID. + +## Attribute Reference + +The following attributes are exported: + +* `aws_account_id` - The AWS account ID for the user pool owner. +* `cloudfront_distribution_arn` - The ARN of the CloudFront distribution. +* `s3_bucket` - The S3 bucket where the static files for this domain are stored. +* `version` - The app version. From 85f2a280a49b6e31f11ca386230215da6ea9d213 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 3 Jan 2018 12:42:54 +0100 Subject: [PATCH 097/350] Update CHANGELOG.md --- CHANGELOG.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b908a64b493..ab114cd92a3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 1.6.1 (Unreleased) +FEATURES: + +* **New Resource:** `aws_cognito_user_pool_domain` [GH-2325] + ENHANCEMENTS: * provider: `eu-west-3` is now supported [GH-2707] @@ -7,16 +11,16 @@ ENHANCEMENTS: * provider: Endpoints can now be specified for API Gateway and Lambda [GH-2641] * resource/aws_kinesis_firehose_delivery_stream: Import is now supported [GH-2707] * resource/aws_cognito_user_pool: The ARN for the pool is now computed and exposed as an attribute [GH-2723] -* resource/aws_directory_service_directory: Add security_group_id field [GH-2688] +* resource/aws_directory_service_directory: Add `security_group_id` field [GH-2688] * resource/aws_rds_cluster_instance: Support Performance Insights [GH-2331] -* resource/aws_rds_cluster_instance: Set db_subnet_group_name in state on read if available [GH-2606] +* resource/aws_rds_cluster_instance: Set `db_subnet_group_name` in state on read if available [GH-2606] * resource/aws_eip: Tagging is now supported [GH-2768] * resource/aws_codepipeline: ARN is now exposed as an attribute [GH-2773] -* resource/aws_appautoscaling_scheduled_action: min_capcity argument is now honoured [GH-2794] +* resource/aws_appautoscaling_scheduled_action: `min_capacity` argument is now honoured [GH-2794] * resource/aws_rds_cluster: Clusters in the `resetting-master-credentials` state no longer cause an error [GH-2791] * resource/aws_cloudwatch_metric_alarm: Support optional datapoints_to_alarm configuration [GH-2609] * resource/aws_ses_event_destination: Add support for SNS destinations [GH-1737] -* data-source/aws_iam_server_certificate: add support for retrieving public key [GH-2749] +* data-source/aws_iam_server_certificate: Add support for retrieving public key [GH-2749] BUG FIXES: From d14d916acac693807388fbda1ab911897cdecb0f Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 3 Jan 2018 12:43:09 +0100 Subject: [PATCH 098/350] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ab114cd92a3..370390ade33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 1.6.1 (Unreleased) +## 1.7.0 (Unreleased) FEATURES: From 0dedb8dca122409d110eb47731d055ab57a8aa60 Mon Sep 17 00:00:00 2001 From: atsushi-ishibashi Date: Wed, 3 Jan 2018 21:44:28 +0900 Subject: [PATCH 099/350] r/iam_role: Delete inline policies when force_detach_policies = true (#2388) * WIP * Delete inline policy when force_detach_policies=true * Add TestAccAWSIAMRole_force_detach_policies for testacc * Use paging, modify test --- aws/resource_aws_iam_role.go | 25 +++++++ aws/resource_aws_iam_role_test.go | 119 ++++++++++++++++++++++++++++++ 2 files changed, 144 insertions(+) diff --git a/aws/resource_aws_iam_role.go b/aws/resource_aws_iam_role.go index 518a12dc0e2..439868062d9 100644 --- a/aws/resource_aws_iam_role.go +++ b/aws/resource_aws_iam_role.go @@ -285,6 +285,31 @@ func resourceAwsIamRoleDelete(d *schema.ResourceData, meta interface{}) error { } } } + + // For inline policies + rolePolicyNames := make([]*string, 0) + err = iamconn.ListRolePoliciesPages(&iam.ListRolePoliciesInput{ + RoleName: aws.String(d.Id()), + }, func(page *iam.ListRolePoliciesOutput, lastPage bool) bool { + for _, v := range page.PolicyNames { + rolePolicyNames = append(rolePolicyNames, v) + } + return len(page.PolicyNames) > 0 + }) + if err != nil { + return fmt.Errorf("Error listing inline Policies for IAM Role (%s) when trying to delete: %s", d.Id(), err) + } + if len(rolePolicyNames) > 0 { + for _, pname := range rolePolicyNames { + _, err := iamconn.DeleteRolePolicy(&iam.DeleteRolePolicyInput{ + PolicyName: pname, + RoleName: aws.String(d.Id()), + }) + if err != nil { + return fmt.Errorf("Error deleting inline policy of IAM Role %s: %s", d.Id(), err) + } + } + } } request := &iam.DeleteRoleInput{ diff --git a/aws/resource_aws_iam_role_test.go b/aws/resource_aws_iam_role_test.go index 52d22c7d913..5de50c37a01 100644 --- a/aws/resource_aws_iam_role_test.go +++ b/aws/resource_aws_iam_role_test.go @@ -138,6 +138,26 @@ func TestAccAWSIAMRole_badJSON(t *testing.T) { }) } +func TestAccAWSIAMRole_force_detach_policies(t *testing.T) { + var conf iam.GetRoleOutput + rName := acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRoleDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSIAMRoleConfig_force_detach_policies(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSRoleExists("aws_iam_role.test", &conf), + testAccAddAwsIAMRolePolicy("aws_iam_role.test"), + ), + }, + }, + }) +} + func testAccCheckAWSRoleDestroy(s *terraform.State) error { iamconn := testAccProvider.Meta().(*AWSClient).iamconn @@ -210,6 +230,37 @@ func testAccCheckAWSRoleGeneratedNamePrefix(resource, prefix string) resource.Te } } +// Attach inline policy outside of terraform CRUD. +func testAccAddAwsIAMRolePolicy(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Resource not found") + } + if rs.Primary.ID == "" { + return fmt.Errorf("No Role name is set") + } + + iamconn := testAccProvider.Meta().(*AWSClient).iamconn + + input := &iam.PutRolePolicyInput{ + RoleName: aws.String(rs.Primary.ID), + PolicyDocument: aws.String(`{ + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "*", + "Resource": "*" + } + }`), + PolicyName: aws.String(resource.UniqueId()), + } + + _, err := iamconn.PutRolePolicy(input) + return err + } +} + func testAccAWSIAMRoleConfig(rName string) string { return fmt.Sprintf(` resource "aws_iam_role" "role" { @@ -375,3 +426,71 @@ POLICY } `, rName) } + +func testAccAWSIAMRoleConfig_force_detach_policies(rName string) string { + return fmt.Sprintf(` +resource "aws_iam_role_policy" "test" { + name = "tf-iam-role-policy-%s" + role = "${aws_iam_role.test.id}" + + policy = < Date: Wed, 3 Jan 2018 13:45:48 +0100 Subject: [PATCH 100/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 370390ade33..7986af3521b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ ENHANCEMENTS: * resource/aws_rds_cluster: Clusters in the `resetting-master-credentials` state no longer cause an error [GH-2791] * resource/aws_cloudwatch_metric_alarm: Support optional datapoints_to_alarm configuration [GH-2609] * resource/aws_ses_event_destination: Add support for SNS destinations [GH-1737] +* resource/aws_iam_role: Delete inline policies when `force_detach_policies = true` [GH-2388] * data-source/aws_iam_server_certificate: Add support for retrieving public key [GH-2749] BUG FIXES: From 4b56a7315336596dfafcb1dfd8a32772bbdafd3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=BCven?= Date: Wed, 3 Jan 2018 18:24:22 +0100 Subject: [PATCH 101/350] fix lb_target_group health check inconsistencies (#2580) * path defaults to "/" and validate function checks for "/" prefix * timeout doc reflects the actual default in code --- aws/resource_aws_lb_target_group.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/aws/resource_aws_lb_target_group.go b/aws/resource_aws_lb_target_group.go index 21826891d42..0e4a6622bc9 100644 --- a/aws/resource_aws_lb_target_group.go +++ b/aws/resource_aws_lb_target_group.go @@ -395,6 +395,10 @@ func validateAwsLbTargetGroupHealthCheckPath(v interface{}, k string) (ws []stri errors = append(errors, fmt.Errorf( "%q cannot be longer than 1024 characters: %q", k, value)) } + if !strings.HasPrefix(value, "/") { + errors = append(errors, fmt.Errorf( + "%q must begin with a '/' character: %q", k, value)) + } return } From e3d0215fafe859134ca90f2fd9be8235f51f7d2b Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 3 Jan 2018 18:25:32 +0100 Subject: [PATCH 102/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7986af3521b..aafbb56e0d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ ENHANCEMENTS: * resource/aws_cloudwatch_metric_alarm: Support optional datapoints_to_alarm configuration [GH-2609] * resource/aws_ses_event_destination: Add support for SNS destinations [GH-1737] * resource/aws_iam_role: Delete inline policies when `force_detach_policies = true` [GH-2388] +* resource/aws_lb_target_group: Improve `health_check` validation [GH-2580] * data-source/aws_iam_server_certificate: Add support for retrieving public key [GH-2749] BUG FIXES: From 707417e9796e5113e4dfff594b97d2ac000e4bf1 Mon Sep 17 00:00:00 2001 From: russelldear Date: Thu, 4 Jan 2018 08:39:21 +1300 Subject: [PATCH 103/350] Adds REQUEST authorizer type and additional identity_source values --- website/docs/r/api_gateway_authorizer.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/api_gateway_authorizer.html.markdown b/website/docs/r/api_gateway_authorizer.html.markdown index ab3c54b8198..ac4dd7241e0 100644 --- a/website/docs/r/api_gateway_authorizer.html.markdown +++ b/website/docs/r/api_gateway_authorizer.html.markdown @@ -102,8 +102,8 @@ The following arguments are supported: * `name` - (Required) The name of the authorizer * `rest_api_id` - (Required) The ID of the associated REST API * `identity_source` - (Optional) The source of the identity in an incoming request. - Defaults to `method.request.header.Authorization`. -* `type` - (Optional) The type of the authorizer. `TOKEN` is currently the only allowed value. + Defaults to `method.request.header.Authorization`. For `REQUEST` type, this may be a comma-separated list of values, including headers, query string parameters and stage variables - e.g. `"method.request.header.SomeHeaderName,method.request.querystring.SomeQueryStringName,stageVariables.SomeStageVariableName"` +* `type` - (Optional) The type of the authorizer. Possible values are `TOKEN` and `REQUEST`. Defaults to `TOKEN`. * `authorizer_credentials` - (Optional) The credentials required for the authorizer. To specify an IAM Role for API Gateway to assume, use the IAM Role ARN. From e3e5b370e81df1bde434f1189845231b9d8e3f45 Mon Sep 17 00:00:00 2001 From: Clint Date: Wed, 3 Jan 2018 15:31:02 -0600 Subject: [PATCH 104/350] Adding "Data Source" header to data source resource (continuing #2422) (#2713) * Add "Data Source:" to main heading of data source resources (issue #18) --- .gitignore | 1 + website/docs/d/acm_certificate.html.markdown | 2 +- website/docs/d/ami.html.markdown | 2 +- website/docs/d/ami_ids.html.markdown | 2 +- website/docs/d/autoscaling_groups.html.markdown | 2 +- website/docs/d/availability_zone.html.markdown | 2 +- website/docs/d/availability_zones.html.markdown | 2 +- website/docs/d/billing_service_account.html.markdown | 2 +- website/docs/d/caller_identity.html.markdown | 2 +- website/docs/d/canonical_user_id.html.markdown | 2 +- website/docs/d/cloudformation_stack.html.markdown | 2 +- website/docs/d/cloudtrail_service_account.html.markdown | 2 +- website/docs/d/db_instance.html.markdown | 2 +- website/docs/d/db_snapshot.html.markdown | 2 +- website/docs/d/dynamodb_table.html.markdown | 2 +- website/docs/d/ebs_snapshot.html.markdown | 2 +- website/docs/d/ebs_snapshot_ids.html.markdown | 2 +- website/docs/d/ebs_volume.html.markdown | 2 +- website/docs/d/ecr_repository.html.markdown | 2 +- website/docs/d/ecs_cluster.html.markdown | 2 +- website/docs/d/ecs_container_definition.html.markdown | 2 +- website/docs/d/ecs_task_definition.html.markdown | 2 +- website/docs/d/efs_file_system.html.markdown | 2 +- website/docs/d/efs_mount_target.html.markdown | 2 +- website/docs/d/eip.html.markdown | 2 +- website/docs/d/elastic_beanstalk_solution_stack.html.markdown | 4 ++-- website/docs/d/elasticache_cluster.html.markdown | 2 +- website/docs/d/elasticache_replication_group.html.markdown | 2 +- website/docs/d/elb_hosted_zone_id.html.markdown | 2 +- website/docs/d/elb_service_account.html.markdown | 2 +- website/docs/d/iam_account_alias.html.markdown | 2 +- website/docs/d/iam_group.html.markdown | 2 +- website/docs/d/iam_instance_profile.html.markdown | 2 +- website/docs/d/iam_policy_document.html.markdown | 2 +- website/docs/d/iam_role.html.markdown | 2 +- website/docs/d/iam_server_certificate.html.markdown | 2 +- website/docs/d/iam_user.html.markdown | 2 +- website/docs/d/instance.html.markdown | 2 +- website/docs/d/instances.html.markdown | 2 +- website/docs/d/internet_gateway.html.markdown | 2 +- website/docs/d/ip_ranges.html.markdown | 2 +- website/docs/d/kinesis_stream.html.markdown | 4 ++-- website/docs/d/kms_alias.html.markdown | 2 +- website/docs/d/kms_ciphertext.html.markdown | 2 +- website/docs/d/kms_secret.html.markdown | 2 +- website/docs/d/lb.html.markdown | 2 +- website/docs/d/lb_listener.html.markdown | 2 +- website/docs/d/lb_target_group.html.markdown | 2 +- website/docs/d/nat_gateway.html.markdown | 2 +- website/docs/d/partition.html.markdown | 2 +- website/docs/d/prefix_list.html.markdown | 2 +- website/docs/d/rds_cluster.html.markdown | 2 +- website/docs/d/redshift_service_account.html.markdown | 2 +- website/docs/d/region.html.markdown | 2 +- website/docs/d/route53_zone.html.markdown | 2 +- website/docs/d/route_table.html.markdown | 2 +- website/docs/d/s3_bucket.html.markdown | 4 ++-- website/docs/d/s3_bucket_object.html.markdown | 2 +- website/docs/d/security_group.html.markdown | 2 +- website/docs/d/sns_topic.html.markdown | 2 +- website/docs/d/ssm_parameter.html.markdown | 2 +- website/docs/d/subnet.html.markdown | 2 +- website/docs/d/subnet_ids.html.markdown | 2 +- website/docs/d/vpc.html.markdown | 2 +- website/docs/d/vpc_endpoint.html.markdown | 2 +- website/docs/d/vpc_endpoint_service.html.markdown | 2 +- website/docs/d/vpc_peering_connection.html.markdown | 2 +- website/docs/d/vpn_gateway.html.markdown | 2 +- 68 files changed, 71 insertions(+), 70 deletions(-) diff --git a/.gitignore b/.gitignore index c12dda081bc..5aaec9d3378 100644 --- a/.gitignore +++ b/.gitignore @@ -32,3 +32,4 @@ website/vendor # Keep windows files with windows line endings *.winfile eol=crlf +/.vs \ No newline at end of file diff --git a/website/docs/d/acm_certificate.html.markdown b/website/docs/d/acm_certificate.html.markdown index 2fc2d337560..992067e3062 100644 --- a/website/docs/d/acm_certificate.html.markdown +++ b/website/docs/d/acm_certificate.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on a Amazon Certificate Manager (ACM) Certificate --- -# aws_acm_certificate +# Data Source: aws_acm_certificate Use this data source to get the ARN of a certificate in AWS Certificate Manager (ACM). The process of requesting and verifying a certificate in ACM diff --git a/website/docs/d/ami.html.markdown b/website/docs/d/ami.html.markdown index 520910d3dc4..716d5854817 100644 --- a/website/docs/d/ami.html.markdown +++ b/website/docs/d/ami.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on a Amazon Machine Image (AMI). --- -# aws_ami +# Data Source: aws_ami Use this data source to get the ID of a registered AMI for use in other resources. diff --git a/website/docs/d/ami_ids.html.markdown b/website/docs/d/ami_ids.html.markdown index 25f2073c7d9..bc691f5faec 100644 --- a/website/docs/d/ami_ids.html.markdown +++ b/website/docs/d/ami_ids.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a list of AMI IDs. --- -# aws_ami_ids +# Data Source: aws_ami_ids Use this data source to get a list of AMI IDs matching the specified criteria. diff --git a/website/docs/d/autoscaling_groups.html.markdown b/website/docs/d/autoscaling_groups.html.markdown index ae4e1b29883..470f05e3152 100644 --- a/website/docs/d/autoscaling_groups.html.markdown +++ b/website/docs/d/autoscaling_groups.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a list of Autoscaling Groups within a specific region. --- -# aws_autoscaling_groups +# Data Source: aws_autoscaling_groups The Autoscaling Groups data source allows access to the list of AWS ASGs within a specific region. This will allow you to pass a list of AutoScaling Groups to other resources. diff --git a/website/docs/d/availability_zone.html.markdown b/website/docs/d/availability_zone.html.markdown index bcf6b99ff75..7cc945be36e 100644 --- a/website/docs/d/availability_zone.html.markdown +++ b/website/docs/d/availability_zone.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific availability zone --- -# aws_availability_zone +# Data Source: aws_availability_zone `aws_availability_zone` provides details about a specific availability zone (AZ) in the current region. diff --git a/website/docs/d/availability_zones.html.markdown b/website/docs/d/availability_zones.html.markdown index 333f9fe453c..3000a2f8b16 100644 --- a/website/docs/d/availability_zones.html.markdown +++ b/website/docs/d/availability_zones.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a list of Availability Zones which can be used by an AWS account. --- -# aws_availability_zones +# Data Source: aws_availability_zones The Availability Zones data source allows access to the list of AWS Availability Zones which can be accessed by an AWS account within the region diff --git a/website/docs/d/billing_service_account.html.markdown b/website/docs/d/billing_service_account.html.markdown index 01608c96b7d..5545d230349 100644 --- a/website/docs/d/billing_service_account.html.markdown +++ b/website/docs/d/billing_service_account.html.markdown @@ -6,7 +6,7 @@ description: |- Get AWS Billing Service Account --- -# aws_billing_service_account +# Data Source: aws_billing_service_account Use this data source to get the Account ID of the [AWS Billing and Cost Management Service Account](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-getting-started.html#step-2) for the purpose of whitelisting in S3 bucket policy. diff --git a/website/docs/d/caller_identity.html.markdown b/website/docs/d/caller_identity.html.markdown index 11aac53c374..6fdcf2657f2 100644 --- a/website/docs/d/caller_identity.html.markdown +++ b/website/docs/d/caller_identity.html.markdown @@ -7,7 +7,7 @@ description: |- connection to AWS. --- -# aws_caller_identity +# Data Source: aws_caller_identity Use this data source to get the access to the effective Account ID, User ID, and ARN in which Terraform is authorized. diff --git a/website/docs/d/canonical_user_id.html.markdown b/website/docs/d/canonical_user_id.html.markdown index b6d608145b5..e9b19657f03 100644 --- a/website/docs/d/canonical_user_id.html.markdown +++ b/website/docs/d/canonical_user_id.html.markdown @@ -7,7 +7,7 @@ description: |- connection to AWS. --- -# aws_canonical_user_id +# Data Source: aws_canonical_user_id The Canonical User ID data source allows access to the [canonical user ID](http://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html) for the effective account in which Terraform is working. diff --git a/website/docs/d/cloudformation_stack.html.markdown b/website/docs/d/cloudformation_stack.html.markdown index 5e4f0a604c2..815b8813180 100644 --- a/website/docs/d/cloudformation_stack.html.markdown +++ b/website/docs/d/cloudformation_stack.html.markdown @@ -6,7 +6,7 @@ description: |- Provides metadata of a CloudFormation stack (e.g. outputs) --- -# aws_cloudformation_stack +# Data Source: aws_cloudformation_stack The CloudFormation Stack data source allows access to stack outputs and other useful data including the template body. diff --git a/website/docs/d/cloudtrail_service_account.html.markdown b/website/docs/d/cloudtrail_service_account.html.markdown index 8825de8bcd0..a1819edcab2 100644 --- a/website/docs/d/cloudtrail_service_account.html.markdown +++ b/website/docs/d/cloudtrail_service_account.html.markdown @@ -6,7 +6,7 @@ description: |- Get AWS CloudTrail Service Account ID for storing trail data in S3. --- -# aws_cloudtrail_service_account +# Data Source: aws_cloudtrail_service_account Use this data source to get the Account ID of the [AWS CloudTrail Service Account](http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-regions.html) in a given region for the purpose of allowing CloudTrail to store trail data in S3. diff --git a/website/docs/d/db_instance.html.markdown b/website/docs/d/db_instance.html.markdown index 4f0d31ec01e..04146fdd710 100644 --- a/website/docs/d/db_instance.html.markdown +++ b/website/docs/d/db_instance.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on an RDS Database Instance. --- -# aws_db_instance +# Data Source: aws_db_instance Use this data source to get information about an RDS instance diff --git a/website/docs/d/db_snapshot.html.markdown b/website/docs/d/db_snapshot.html.markdown index 43c07112b47..8d1354c5957 100644 --- a/website/docs/d/db_snapshot.html.markdown +++ b/website/docs/d/db_snapshot.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on a DB Snapshot. --- -# aws_db_snapshot +# Data Source: aws_db_snapshot Use this data source to get information about a DB Snapshot for use when provisioning DB instances diff --git a/website/docs/d/dynamodb_table.html.markdown b/website/docs/d/dynamodb_table.html.markdown index 6aab4f26577..1b55f23193d 100644 --- a/website/docs/d/dynamodb_table.html.markdown +++ b/website/docs/d/dynamodb_table.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a DynamoDB table data source. --- -# aws_dynamodb_table +# Data Source: aws_dynamodb_table Provides information about a DynamoDB table. diff --git a/website/docs/d/ebs_snapshot.html.markdown b/website/docs/d/ebs_snapshot.html.markdown index 6001151f3df..57f4e9f72b2 100644 --- a/website/docs/d/ebs_snapshot.html.markdown +++ b/website/docs/d/ebs_snapshot.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on an EBS Snapshot. --- -# aws_ebs_snapshot +# Data Source: aws_ebs_snapshot Use this data source to get information about an EBS Snapshot for use when provisioning EBS Volumes diff --git a/website/docs/d/ebs_snapshot_ids.html.markdown b/website/docs/d/ebs_snapshot_ids.html.markdown index eb3eed17fc5..6113ef7a651 100644 --- a/website/docs/d/ebs_snapshot_ids.html.markdown +++ b/website/docs/d/ebs_snapshot_ids.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a list of EBS snapshot IDs. --- -# aws_ebs_snapshot_ids +# Data Source: aws_ebs_snapshot_ids Use this data source to get a list of EBS Snapshot IDs matching the specified criteria. diff --git a/website/docs/d/ebs_volume.html.markdown b/website/docs/d/ebs_volume.html.markdown index 37dad32a362..c46b089a53d 100644 --- a/website/docs/d/ebs_volume.html.markdown +++ b/website/docs/d/ebs_volume.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on an EBS volume. --- -# aws_ebs_volume +# Data Source: aws_ebs_volume Use this data source to get information about an EBS volume for use in other resources. diff --git a/website/docs/d/ecr_repository.html.markdown b/website/docs/d/ecr_repository.html.markdown index 92e5e19acdb..91ee3730851 100644 --- a/website/docs/d/ecr_repository.html.markdown +++ b/website/docs/d/ecr_repository.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about an ECR Repository --- -# aws_ecr_repository +# Data Source: aws_ecr_repository The ECR Repository data source allows the ARN, Repository URI and Registry ID to be retrieved for an ECR repository. diff --git a/website/docs/d/ecs_cluster.html.markdown b/website/docs/d/ecs_cluster.html.markdown index 4fcd931b7d1..309ba59dd96 100644 --- a/website/docs/d/ecs_cluster.html.markdown +++ b/website/docs/d/ecs_cluster.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about an ecs cluster --- -# aws_ecs_cluster +# Data Source: aws_ecs_cluster The ECS Cluster data source allows access to details of a specific cluster within an AWS ECS service. diff --git a/website/docs/d/ecs_container_definition.html.markdown b/website/docs/d/ecs_container_definition.html.markdown index f18c0bce11f..f6c8f2587ca 100644 --- a/website/docs/d/ecs_container_definition.html.markdown +++ b/website/docs/d/ecs_container_definition.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a single container within an ecs task definition --- -# aws_ecs_container_definition +# Data Source: aws_ecs_container_definition The ECS container definition data source allows access to details of a specific container within an AWS ECS service. diff --git a/website/docs/d/ecs_task_definition.html.markdown b/website/docs/d/ecs_task_definition.html.markdown index a13b59f53dd..8e680543d5a 100644 --- a/website/docs/d/ecs_task_definition.html.markdown +++ b/website/docs/d/ecs_task_definition.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about an ecs task definition --- -# aws_ecs_task_definition +# Data Source: aws_ecs_task_definition The ECS task definition data source allows access to details of a specific AWS ECS task definition. diff --git a/website/docs/d/efs_file_system.html.markdown b/website/docs/d/efs_file_system.html.markdown index 240634e7d18..82fc30e4d9a 100644 --- a/website/docs/d/efs_file_system.html.markdown +++ b/website/docs/d/efs_file_system.html.markdown @@ -6,7 +6,7 @@ description: |- Provides an Elastic File System (EFS) data source. --- -# aws_efs_file_system +# Data Source: aws_efs_file_system Provides information about an Elastic File System (EFS). diff --git a/website/docs/d/efs_mount_target.html.markdown b/website/docs/d/efs_mount_target.html.markdown index 7a55b9a1fdd..0cb47413d84 100644 --- a/website/docs/d/efs_mount_target.html.markdown +++ b/website/docs/d/efs_mount_target.html.markdown @@ -6,7 +6,7 @@ description: |- Provides an Elastic File System Mount Target (EFS) data source. --- -# aws_efs_mount_target +# Data Source: aws_efs_mount_target Provides information about an Elastic File System Mount Target (EFS). diff --git a/website/docs/d/eip.html.markdown b/website/docs/d/eip.html.markdown index 3ee0d39382c..9a6535eedb4 100644 --- a/website/docs/d/eip.html.markdown +++ b/website/docs/d/eip.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific Elastic IP --- -# aws_eip +# Data Source: aws_eip `aws_eip` provides details about a specific Elastic IP. diff --git a/website/docs/d/elastic_beanstalk_solution_stack.html.markdown b/website/docs/d/elastic_beanstalk_solution_stack.html.markdown index e2b3c2b1100..31e1b6983e0 100644 --- a/website/docs/d/elastic_beanstalk_solution_stack.html.markdown +++ b/website/docs/d/elastic_beanstalk_solution_stack.html.markdown @@ -6,7 +6,7 @@ description: |- Get an elastic beanstalk solution stack. --- -# aws_elastic_beanstalk_solution_stack +# Data Source: aws_elastic_beanstalk_solution_stack Use this data source to get the name of a elastic beanstalk solution stack. @@ -37,4 +37,4 @@ a single solution stack, or use `most_recent` to choose the most recent one. * `name` - The name of the solution stack. -[beanstalk-platforms]: http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/concepts.platforms.html "AWS Elastic Beanstalk Supported Platforms documentation" \ No newline at end of file +[beanstalk-platforms]: http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/concepts.platforms.html "AWS Elastic Beanstalk Supported Platforms documentation" diff --git a/website/docs/d/elasticache_cluster.html.markdown b/website/docs/d/elasticache_cluster.html.markdown index ae2cb4cbd53..6764941c0b6 100644 --- a/website/docs/d/elasticache_cluster.html.markdown +++ b/website/docs/d/elasticache_cluster.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on an ElastiCache Cluster resource. --- -# aws_elasticache_cluster +# Data Source: aws_elasticache_cluster Use this data source to get information about an Elasticache Cluster diff --git a/website/docs/d/elasticache_replication_group.html.markdown b/website/docs/d/elasticache_replication_group.html.markdown index 53b41abfbd4..fbb02603aaf 100644 --- a/website/docs/d/elasticache_replication_group.html.markdown +++ b/website/docs/d/elasticache_replication_group.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on an ElastiCache Replication Group resource. --- -# aws_elasticache_replication_group +# Data Source: aws_elasticache_replication_group Use this data source to get information about an Elasticache Replication Group. diff --git a/website/docs/d/elb_hosted_zone_id.html.markdown b/website/docs/d/elb_hosted_zone_id.html.markdown index ab7589b0f59..e92c9307188 100644 --- a/website/docs/d/elb_hosted_zone_id.html.markdown +++ b/website/docs/d/elb_hosted_zone_id.html.markdown @@ -6,7 +6,7 @@ description: |- Get AWS Elastic Load Balancing Hosted Zone Id --- -# aws_elb_hosted_zone_id +# Data Source: aws_elb_hosted_zone_id Use this data source to get the HostedZoneId of the AWS Elastic Load Balancing HostedZoneId in a given region for the purpose of using in an AWS Route53 Alias. diff --git a/website/docs/d/elb_service_account.html.markdown b/website/docs/d/elb_service_account.html.markdown index b61d5f8d193..7433f369ebe 100644 --- a/website/docs/d/elb_service_account.html.markdown +++ b/website/docs/d/elb_service_account.html.markdown @@ -6,7 +6,7 @@ description: |- Get AWS Elastic Load Balancing Service Account --- -# aws_elb_service_account +# Data Source: aws_elb_service_account Use this data source to get the Account ID of the [AWS Elastic Load Balancing Service Account](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy) in a given region for the purpose of whitelisting in S3 bucket policy. diff --git a/website/docs/d/iam_account_alias.html.markdown b/website/docs/d/iam_account_alias.html.markdown index 0e44b23473e..07fbebc490e 100644 --- a/website/docs/d/iam_account_alias.html.markdown +++ b/website/docs/d/iam_account_alias.html.markdown @@ -7,7 +7,7 @@ description: |- connection to AWS. --- -# aws_iam_account_alias +# Data Source: aws_iam_account_alias The IAM Account Alias data source allows access to the account alias for the effective account in which Terraform is working. diff --git a/website/docs/d/iam_group.html.markdown b/website/docs/d/iam_group.html.markdown index fba2bc0b7a2..fc0941fed63 100644 --- a/website/docs/d/iam_group.html.markdown +++ b/website/docs/d/iam_group.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on a Amazon IAM group --- -# aws_iam_group +# Data Source: aws_iam_group This data source can be used to fetch information about a specific IAM group. By using this data source, you can reference IAM group diff --git a/website/docs/d/iam_instance_profile.html.markdown b/website/docs/d/iam_instance_profile.html.markdown index d393d708553..4eb866a0799 100644 --- a/website/docs/d/iam_instance_profile.html.markdown +++ b/website/docs/d/iam_instance_profile.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on a Amazon IAM Instance Profile --- -# aws_iam_instance_profile +# Data Source: aws_iam_instance_profile This data source can be used to fetch information about a specific IAM instance profile. By using this data source, you can reference IAM diff --git a/website/docs/d/iam_policy_document.html.markdown b/website/docs/d/iam_policy_document.html.markdown index e755b42f4ee..bb12823fead 100644 --- a/website/docs/d/iam_policy_document.html.markdown +++ b/website/docs/d/iam_policy_document.html.markdown @@ -6,7 +6,7 @@ description: |- Generates an IAM policy document in JSON format --- -# aws_iam_policy_document +# Data Source: aws_iam_policy_document Generates an IAM policy document in JSON format. diff --git a/website/docs/d/iam_role.html.markdown b/website/docs/d/iam_role.html.markdown index 75f3a18126a..b2e981794e3 100644 --- a/website/docs/d/iam_role.html.markdown +++ b/website/docs/d/iam_role.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on a Amazon IAM role --- -# aws_iam_role +# Data Source: aws_iam_role This data source can be used to fetch information about a specific IAM role. By using this data source, you can reference IAM role diff --git a/website/docs/d/iam_server_certificate.html.markdown b/website/docs/d/iam_server_certificate.html.markdown index f34344c18df..cc51baa37f1 100644 --- a/website/docs/d/iam_server_certificate.html.markdown +++ b/website/docs/d/iam_server_certificate.html.markdown @@ -6,7 +6,7 @@ description: |- Get information about a server certificate --- -# aws_iam_server_certificate +# Data Source: aws_iam_server_certificate Use this data source to lookup information about IAM Server Certificates. diff --git a/website/docs/d/iam_user.html.markdown b/website/docs/d/iam_user.html.markdown index 0446f436880..09835aad46f 100644 --- a/website/docs/d/iam_user.html.markdown +++ b/website/docs/d/iam_user.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on a Amazon IAM user --- -# aws_iam_user +# Data Source: aws_iam_user This data source can be used to fetch information about a specific IAM user. By using this data source, you can reference IAM user diff --git a/website/docs/d/instance.html.markdown b/website/docs/d/instance.html.markdown index 27c425b8840..530db418e22 100644 --- a/website/docs/d/instance.html.markdown +++ b/website/docs/d/instance.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on an Amazon EC2 Instance. --- -# aws_instance +# Data Source: aws_instance Use this data source to get the ID of an Amazon EC2 Instance for use in other resources. diff --git a/website/docs/d/instances.html.markdown b/website/docs/d/instances.html.markdown index a3ccb0b9f58..22713c0be77 100644 --- a/website/docs/d/instances.html.markdown +++ b/website/docs/d/instances.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on an Amazon EC2 instances. --- -# aws_instances +# Data Source: aws_instances Use this data source to get IDs or IPs of Amazon EC2 instances to be referenced elsewhere, e.g. to allow easier migration from another management solution diff --git a/website/docs/d/internet_gateway.html.markdown b/website/docs/d/internet_gateway.html.markdown index 1773b725eac..288631578b8 100644 --- a/website/docs/d/internet_gateway.html.markdown +++ b/website/docs/d/internet_gateway.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific Internet Gateway --- -# aws_internet_gateway +# Data Source: aws_internet_gateway `aws_internet_gateway` provides details about a specific Internet Gateway. diff --git a/website/docs/d/ip_ranges.html.markdown b/website/docs/d/ip_ranges.html.markdown index 7c6a27acab9..c5490f7f075 100644 --- a/website/docs/d/ip_ranges.html.markdown +++ b/website/docs/d/ip_ranges.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on AWS IP ranges. --- -# aws_ip_ranges +# Data Source: aws_ip_ranges Use this data source to get the [IP ranges][1] of various AWS products and services. diff --git a/website/docs/d/kinesis_stream.html.markdown b/website/docs/d/kinesis_stream.html.markdown index a764073db95..c8630fbe061 100644 --- a/website/docs/d/kinesis_stream.html.markdown +++ b/website/docs/d/kinesis_stream.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a Kinesis Stream data source. --- -# aws_kinesis_stream +# Data Source: aws_kinesis_stream Use this data source to get information about a Kinesis Stream for use in other resources. @@ -42,4 +42,4 @@ are exported: [1]: https://aws.amazon.com/documentation/kinesis/ [2]: https://docs.aws.amazon.com/streams/latest/dev/kinesis-using-sdk-java-after-resharding.html#kinesis-using-sdk-java-resharding-data-routing -[3]: https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html \ No newline at end of file +[3]: https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html diff --git a/website/docs/d/kms_alias.html.markdown b/website/docs/d/kms_alias.html.markdown index 3d8e0c715d7..354685732b3 100644 --- a/website/docs/d/kms_alias.html.markdown +++ b/website/docs/d/kms_alias.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on a AWS Key Management Service (KMS) Alias --- -# aws_kms_alias +# Data Source: aws_kms_alias Use this data source to get the ARN of a KMS key alias. By using this data source, you can reference key alias diff --git a/website/docs/d/kms_ciphertext.html.markdown b/website/docs/d/kms_ciphertext.html.markdown index 9eb491aba4e..503940dc5fd 100644 --- a/website/docs/d/kms_ciphertext.html.markdown +++ b/website/docs/d/kms_ciphertext.html.markdown @@ -6,7 +6,7 @@ description: |- Provides ciphertext encrypted using a KMS key --- -# aws_kms_ciphertext +# Data Source: aws_kms_ciphertext The KMS ciphertext data source allows you to encrypt plaintext into ciphertext by using an AWS KMS customer master key. diff --git a/website/docs/d/kms_secret.html.markdown b/website/docs/d/kms_secret.html.markdown index 0423e2c66f6..113ad814a02 100644 --- a/website/docs/d/kms_secret.html.markdown +++ b/website/docs/d/kms_secret.html.markdown @@ -6,7 +6,7 @@ description: |- Provides secret data encrypted with the KMS service --- -# aws_kms_secret +# Data Source: aws_kms_secret The KMS secret data source allows you to use data encrypted with the AWS KMS service within your resource definitions. diff --git a/website/docs/d/lb.html.markdown b/website/docs/d/lb.html.markdown index d50991d97d9..d7a2066288d 100644 --- a/website/docs/d/lb.html.markdown +++ b/website/docs/d/lb.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a Load Balancer data source. --- -# aws_lb +# Data Source: aws_lb ~> **Note:** `aws_alb` is known as `aws_lb`. The functionality is identical. diff --git a/website/docs/d/lb_listener.html.markdown b/website/docs/d/lb_listener.html.markdown index b5ab57051c5..ee75a430cf2 100644 --- a/website/docs/d/lb_listener.html.markdown +++ b/website/docs/d/lb_listener.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a Load Balancer Listener data source. --- -# aws_lb_listener +# Data Source: aws_lb_listener ~> **Note:** `aws_alb_listener` is known as `aws_lb_listener`. The functionality is identical. diff --git a/website/docs/d/lb_target_group.html.markdown b/website/docs/d/lb_target_group.html.markdown index c78a0e3e577..210d69e50fe 100644 --- a/website/docs/d/lb_target_group.html.markdown +++ b/website/docs/d/lb_target_group.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a Load Balancer Target Group data source. --- -# aws_lb_target_group +# Data Source: aws_lb_target_group ~> **Note:** `aws_alb_target_group` is known as `aws_lb_target_group`. The functionality is identical. diff --git a/website/docs/d/nat_gateway.html.markdown b/website/docs/d/nat_gateway.html.markdown index 5ca998a998e..f8fd3a8a48d 100644 --- a/website/docs/d/nat_gateway.html.markdown +++ b/website/docs/d/nat_gateway.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific Nat Gateway --- -# aws_nat_gateway +# Data Source: aws_nat_gateway Provides details about a specific Nat Gateway. diff --git a/website/docs/d/partition.html.markdown b/website/docs/d/partition.html.markdown index 728ab20f110..24ce6935dc0 100644 --- a/website/docs/d/partition.html.markdown +++ b/website/docs/d/partition.html.markdown @@ -6,7 +6,7 @@ description: |- Get AWS partition identifier --- -# aws_partition +# Data Source: aws_partition Use this data source to lookup current AWS partition in which Terraform is working diff --git a/website/docs/d/prefix_list.html.markdown b/website/docs/d/prefix_list.html.markdown index 27c0a26889b..22d655c41db 100644 --- a/website/docs/d/prefix_list.html.markdown +++ b/website/docs/d/prefix_list.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific prefix list --- -# aws_prefix_list +# Data Source: aws_prefix_list `aws_prefix_list` provides details about a specific prefix list (PL) in the current region. diff --git a/website/docs/d/rds_cluster.html.markdown b/website/docs/d/rds_cluster.html.markdown index 2a65039f24e..5c863c5a137 100644 --- a/website/docs/d/rds_cluster.html.markdown +++ b/website/docs/d/rds_cluster.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a RDS cluster data source. --- -# aws_rds_cluster +# Data Source: aws_rds_cluster Provides information about a RDS cluster. diff --git a/website/docs/d/redshift_service_account.html.markdown b/website/docs/d/redshift_service_account.html.markdown index dbaf12faa8b..460c8ab15e7 100644 --- a/website/docs/d/redshift_service_account.html.markdown +++ b/website/docs/d/redshift_service_account.html.markdown @@ -6,7 +6,7 @@ description: |- Get AWS Redshift Service Account for storing audit data in S3. --- -# aws_redshift_service_account +# Data Source: aws_redshift_service_account Use this data source to get the Account ID of the [AWS Redshift Service Account](http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging) in a given region for the purpose of allowing Redshift to store audit data in S3. diff --git a/website/docs/d/region.html.markdown b/website/docs/d/region.html.markdown index 77d1cfdb8ea..9728b856013 100644 --- a/website/docs/d/region.html.markdown +++ b/website/docs/d/region.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific service region --- -# aws_region +# Data Source: aws_region `aws_region` provides details about a specific AWS region. diff --git a/website/docs/d/route53_zone.html.markdown b/website/docs/d/route53_zone.html.markdown index ef53956eb08..1b6c0b09734 100644 --- a/website/docs/d/route53_zone.html.markdown +++ b/website/docs/d/route53_zone.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific Route 53 Hosted Zone --- -# aws_route53_zone +# Data Source: aws_route53_zone `aws_route53_zone` provides details about a specific Route 53 Hosted Zone. diff --git a/website/docs/d/route_table.html.markdown b/website/docs/d/route_table.html.markdown index edcb1199a1f..6286932d27a 100644 --- a/website/docs/d/route_table.html.markdown +++ b/website/docs/d/route_table.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific Route Table --- -# aws_route_table +# Data Source: aws_route_table `aws_route_table` provides details about a specific Route Table. diff --git a/website/docs/d/s3_bucket.html.markdown b/website/docs/d/s3_bucket.html.markdown index 1d435185e90..1d5301c0b7a 100644 --- a/website/docs/d/s3_bucket.html.markdown +++ b/website/docs/d/s3_bucket.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific S3 bucket --- -# aws_s3_bucket +# Data Source: aws_s3_bucket Provides details about a specific S3 bucket. @@ -69,4 +69,4 @@ The following attributes are exported: * `hosted_zone_id` - The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. * `region` - The AWS region this bucket resides in. * `website_endpoint` - The website endpoint, if the bucket is configured with a website. If not, this will be an empty string. -* `website_domain` - The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records. \ No newline at end of file +* `website_domain` - The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records. diff --git a/website/docs/d/s3_bucket_object.html.markdown b/website/docs/d/s3_bucket_object.html.markdown index 6b2936c3519..93e96ce1b55 100644 --- a/website/docs/d/s3_bucket_object.html.markdown +++ b/website/docs/d/s3_bucket_object.html.markdown @@ -6,7 +6,7 @@ description: |- Provides metadata and optionally content of an S3 object --- -# aws_s3_bucket_object +# Data Source: aws_s3_bucket_object The S3 object data source allows access to the metadata and _optionally_ (see below) content of an object stored inside S3 bucket. diff --git a/website/docs/d/security_group.html.markdown b/website/docs/d/security_group.html.markdown index fcd4572d262..65175790033 100644 --- a/website/docs/d/security_group.html.markdown +++ b/website/docs/d/security_group.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific Security Group --- -# aws_security_group +# Data Source: aws_security_group `aws_security_group` provides details about a specific Security Group. diff --git a/website/docs/d/sns_topic.html.markdown b/website/docs/d/sns_topic.html.markdown index 94eb78b05cd..82726fd417f 100644 --- a/website/docs/d/sns_topic.html.markdown +++ b/website/docs/d/sns_topic.html.markdown @@ -6,7 +6,7 @@ description: |- Get information on a Amazon Simple Notification Service (SNS) Topic --- -# aws_sns_topic +# Data Source: aws_sns_topic Use this data source to get the ARN of a topic in AWS Simple Notification Service (SNS). By using this data source, you can reference SNS topics diff --git a/website/docs/d/ssm_parameter.html.markdown b/website/docs/d/ssm_parameter.html.markdown index 4bda67dfa8c..a7dbcd1370d 100644 --- a/website/docs/d/ssm_parameter.html.markdown +++ b/website/docs/d/ssm_parameter.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a SSM Parameter datasource --- -# aws_ssm_parameter +# Data Source: aws_ssm_parameter Provides an SSM Parameter data source. diff --git a/website/docs/d/subnet.html.markdown b/website/docs/d/subnet.html.markdown index 4c0389b29b1..440f5b20fb6 100644 --- a/website/docs/d/subnet.html.markdown +++ b/website/docs/d/subnet.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific VPC subnet --- -# aws_subnet +# Data Source: aws_subnet `aws_subnet` provides details about a specific VPC subnet. diff --git a/website/docs/d/subnet_ids.html.markdown b/website/docs/d/subnet_ids.html.markdown index 5fcadbe4578..bab6870f156 100644 --- a/website/docs/d/subnet_ids.html.markdown +++ b/website/docs/d/subnet_ids.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a list of subnet Ids for a VPC --- -# aws_subnet_ids +# Data Source: aws_subnet_ids `aws_subnet_ids` provides a list of ids for a vpc_id diff --git a/website/docs/d/vpc.html.markdown b/website/docs/d/vpc.html.markdown index 3d1779ac907..a003c0311d4 100644 --- a/website/docs/d/vpc.html.markdown +++ b/website/docs/d/vpc.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific VPC --- -# aws_vpc +# Data Source: aws_vpc `aws_vpc` provides details about a specific VPC. diff --git a/website/docs/d/vpc_endpoint.html.markdown b/website/docs/d/vpc_endpoint.html.markdown index c42e6dbc28f..0f29249e951 100644 --- a/website/docs/d/vpc_endpoint.html.markdown +++ b/website/docs/d/vpc_endpoint.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific VPC endpoint. --- -# aws_vpc_endpoint +# Data Source: aws_vpc_endpoint The VPC Endpoint data source provides details about a specific VPC endpoint. diff --git a/website/docs/d/vpc_endpoint_service.html.markdown b/website/docs/d/vpc_endpoint_service.html.markdown index fae9778a9de..887e3d7e461 100644 --- a/website/docs/d/vpc_endpoint_service.html.markdown +++ b/website/docs/d/vpc_endpoint_service.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific AWS service that can be specified when creating a VPC endpoint. --- -# aws_vpc_endpoint_service +# Data Source: aws_vpc_endpoint_service The VPC Endpoint Service data source allows access to a specific AWS service that can be specified when creating a VPC endpoint within the region diff --git a/website/docs/d/vpc_peering_connection.html.markdown b/website/docs/d/vpc_peering_connection.html.markdown index 490afbadf27..817f6635c85 100644 --- a/website/docs/d/vpc_peering_connection.html.markdown +++ b/website/docs/d/vpc_peering_connection.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific VPC peering connection. --- -# aws_vpc_peering_connection +# Data Source: aws_vpc_peering_connection The VPC Peering Connection data source provides details about a specific VPC peering connection. diff --git a/website/docs/d/vpn_gateway.html.markdown b/website/docs/d/vpn_gateway.html.markdown index 4fa2ac55299..c117b0b0aec 100644 --- a/website/docs/d/vpn_gateway.html.markdown +++ b/website/docs/d/vpn_gateway.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific VPN gateway. --- -# aws_vpn_gateway +# Data Source: aws_vpn_gateway The VPN Gateway data source provides details about a specific VPN gateway. From 09a59a8b15aed3f4b27e56c8c932a01da34f809b Mon Sep 17 00:00:00 2001 From: Atsushi Ishibashi Date: Thu, 4 Jan 2018 16:36:17 +0900 Subject: [PATCH 105/350] Use ListAttachedRolePoliciesPages --- aws/resource_aws_iam_role.go | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_iam_role.go b/aws/resource_aws_iam_role.go index 439868062d9..43526174c0f 100644 --- a/aws/resource_aws_iam_role.go +++ b/aws/resource_aws_iam_role.go @@ -267,17 +267,23 @@ func resourceAwsIamRoleDelete(d *schema.ResourceData, meta interface{}) error { } if d.Get("force_detach_policies").(bool) { - policiesResp, err := iamconn.ListAttachedRolePolicies(&iam.ListAttachedRolePoliciesInput{ + // For managed policies + managedPolicies := make([]*string, 0) + err = iamconn.ListAttachedRolePoliciesPages(&iam.ListAttachedRolePoliciesInput{ RoleName: aws.String(d.Id()), + }, func(page *iam.ListAttachedRolePoliciesOutput, lastPage bool) bool { + for _, v := range page.AttachedPolicies { + managedPolicies = append(managedPolicies, v.PolicyArn) + } + return len(page.AttachedPolicies) > 0 }) if err != nil { return fmt.Errorf("Error listing Policies for IAM Role (%s) when trying to delete: %s", d.Id(), err) } - // Loop and remove the Policies from the Role - if len(policiesResp.AttachedPolicies) > 0 { - for _, i := range policiesResp.AttachedPolicies { - _, err := iamconn.DetachRolePolicy(&iam.DetachRolePolicyInput{ - PolicyArn: i.PolicyArn, + if len(managedPolicies) > 0 { + for _, parn := range managedPolicies { + _, err = iamconn.DetachRolePolicy(&iam.DetachRolePolicyInput{ + PolicyArn: parn, RoleName: aws.String(d.Id()), }) if err != nil { @@ -287,20 +293,20 @@ func resourceAwsIamRoleDelete(d *schema.ResourceData, meta interface{}) error { } // For inline policies - rolePolicyNames := make([]*string, 0) + inlinePolicies := make([]*string, 0) err = iamconn.ListRolePoliciesPages(&iam.ListRolePoliciesInput{ RoleName: aws.String(d.Id()), }, func(page *iam.ListRolePoliciesOutput, lastPage bool) bool { for _, v := range page.PolicyNames { - rolePolicyNames = append(rolePolicyNames, v) + inlinePolicies = append(inlinePolicies, v) } return len(page.PolicyNames) > 0 }) if err != nil { return fmt.Errorf("Error listing inline Policies for IAM Role (%s) when trying to delete: %s", d.Id(), err) } - if len(rolePolicyNames) > 0 { - for _, pname := range rolePolicyNames { + if len(inlinePolicies) > 0 { + for _, pname := range inlinePolicies { _, err := iamconn.DeleteRolePolicy(&iam.DeleteRolePolicyInput{ PolicyName: pname, RoleName: aws.String(d.Id()), From 10772d9379e454b7872bf2c459aa9b6f52b6c0d7 Mon Sep 17 00:00:00 2001 From: VEBER Arnaud Date: Thu, 4 Jan 2018 14:40:53 +0100 Subject: [PATCH 106/350] chore(vendor): bump aws-sdk-go to v1.12.55 --- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 1 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/rds/api.go | 193 ++-- .../aws/aws-sdk-go/service/workspaces/api.go | 500 ++++++----- .../aws/aws-sdk-go/service/workspaces/doc.go | 4 +- vendor/vendor.json | 840 +++++++++--------- 6 files changed, 780 insertions(+), 760 deletions(-) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 4fc11726d89..56f08e38629 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -1654,6 +1654,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index f71be27c6f6..a92ed43b1c7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.12.53" +const SDKVersion = "1.12.55" diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go index 41fc4059f1e..e658c1cdfa2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go @@ -7941,23 +7941,16 @@ func (c *RDS) RebootDBInstanceRequest(input *RebootDBInstanceInput) (req *reques // RebootDBInstance API operation for Amazon Relational Database Service. // -// Rebooting a DB instance restarts the database engine service. A reboot also -// applies to the DB instance any modifications to the associated DB parameter -// group that were pending. Rebooting a DB instance results in a momentary outage -// of the instance, during which the DB instance status is set to rebooting. -// If the RDS instance is configured for MultiAZ, it is possible that the reboot -// is conducted through a failover. An Amazon RDS event is created when the -// reboot is completed. -// -// If your DB instance is deployed in multiple Availability Zones, you can force -// a failover from one AZ to the other during the reboot. You might force a -// failover to test the availability of your DB instance deployment or to restore -// operations to the original AZ after a failover occurs. -// -// The time required to reboot is a function of the specific database engine's -// crash recovery process. To improve the reboot time, we recommend that you -// reduce database activities as much as possible during the reboot process -// to reduce rollback activity for in-transit transactions. +// You might need to reboot your DB instance, usually for maintenance reasons. +// For example, if you make certain modifications, or if you change the DB parameter +// group associated with the DB instance, you must reboot the instance for the +// changes to take effect. +// +// Rebooting a DB instance restarts the database engine service. Rebooting a +// DB instance results in a momentary outage, during which the DB instance status +// is set to rebooting. +// +// For more information about rebooting, see Rebooting a DB Instance (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_RebootInstance.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11202,7 +11195,7 @@ type CreateDBClusterInput struct { // The port number on which the instances in the DB cluster accept connections. // - // Default: 3306 + // Default: 3306 if engine is set as aurora or 5432 if set to aurora-postgresql. Port *int64 `type:"integer"` // A URL that contains a Signature Version 4 signed request for the CreateDBCluster @@ -11709,8 +11702,7 @@ func (s *CreateDBClusterSnapshotOutput) SetDBClusterSnapshot(v *DBClusterSnapsho type CreateDBInstanceInput struct { _ struct{} `type:"structure"` - // The amount of storage (in gigabytes) to be initially allocated for the DB - // instance. + // The amount of storage (in gibibytes) to allocate for the DB instance. // // Type: Integer // @@ -11724,9 +11716,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 5 to 6144. + // * General Purpose (SSD) storage (gp2): Must be an integer from 5 to 16384. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 6144. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 16384. // // * Magnetic storage (standard): Must be an integer from 5 to 3072. // @@ -11734,9 +11726,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 5 to 6144. + // * General Purpose (SSD) storage (gp2): Must be an integer from 5 to 16384. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 6144. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 16384. // // * Magnetic storage (standard): Must be an integer from 5 to 3072. // @@ -11744,9 +11736,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 5 to 6144. + // * General Purpose (SSD) storage (gp2): Must be an integer from 5 to 16384. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 6144. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 16384. // // * Magnetic storage (standard): Must be an integer from 5 to 3072. // @@ -11754,9 +11746,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 10 to 6144. + // * General Purpose (SSD) storage (gp2): Must be an integer from 10 to 16384. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 6144. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 16384. // // * Magnetic storage (standard): Must be an integer from 10 to 3072. // @@ -12036,13 +12028,17 @@ type CreateDBInstanceInput struct { // // MariaDB // + // * 10.1.26 (supported in all AWS Regions) + // // * 10.1.23 (supported in all AWS Regions) // // * 10.1.19 (supported in all AWS Regions) // // * 10.1.14 (supported in all AWS Regions except us-east-2) // - // 10.0.31 (supported in all AWS Regions) + // 10.0.32 (supported in all AWS Regions) + // + // * 10.0.31 (supported in all AWS Regions) // // * 10.0.28 (supported in all AWS Regions) // @@ -12051,9 +12047,15 @@ type CreateDBInstanceInput struct { // * 10.0.17 (supported in all AWS Regions except us-east-2, ca-central-1, // eu-west-2) // + // Microsoft SQL Server 2017 + // + // 14.00.1000.169.v1 (supported for all editions, and all AWS Regions) + // // Microsoft SQL Server 2016 // - // 13.00.4422.0.v1 (supported for all editions, and all AWS Regions) + // 13.00.4451.0.v1 (supported for all editions, and all AWS Regions) + // + // * 13.00.4422.0.v1 (supported for all editions, and all AWS Regions) // // * 13.00.2164.0.v1 (supported for all editions, and all AWS Regions) // @@ -12178,9 +12180,9 @@ type CreateDBInstanceInput struct { // values, see see Amazon RDS Provisioned IOPS Storage to Improve Performance // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS). // - // Constraints: Must be a multiple between 3 and 10 of the storage amount for + // Constraints: Must be a multiple between 1 and 50 of the storage amount for // the DB instance. Must also be an integer multiple of 1000. For example, if - // the size of your DB instance is 500 GB, then your Iops value can be 2000, + // the size of your DB instance is 500 GiB, then your Iops value can be 2000, // 3000, 4000, or 5000. Iops *int64 `type:"integer"` @@ -13945,7 +13947,7 @@ type DBCluster struct { _ struct{} `type:"structure"` // For all database engines except Amazon Aurora, AllocatedStorage specifies - // the allocated storage size in gigabytes (GB). For Aurora, AllocatedStorage + // the allocated storage size in gibibytes (GiB). For Aurora, AllocatedStorage // always returns 1, because Aurora DB cluster storage size is not fixed, but // instead automatically adjusts as needed. AllocatedStorage *int64 `type:"integer"` @@ -14538,7 +14540,7 @@ func (s *DBClusterRole) SetStatus(v string) *DBClusterRole { type DBClusterSnapshot struct { _ struct{} `type:"structure"` - // Specifies the allocated storage size in gigabytes (GB). + // Specifies the allocated storage size in gibibytes (GiB). AllocatedStorage *int64 `type:"integer"` // Provides the list of EC2 Availability Zones that instances in the DB cluster @@ -14934,7 +14936,7 @@ func (s *DBEngineVersion) SetValidUpgradeTarget(v []*UpgradeTarget) *DBEngineVer type DBInstance struct { _ struct{} `type:"structure"` - // Specifies the allocated storage size specified in gigabytes. + // Specifies the allocated storage size specified in gibibytes. AllocatedStorage *int64 `type:"integer"` // Indicates that minor version patches are applied automatically. @@ -15792,7 +15794,7 @@ func (s *DBSecurityGroupMembership) SetStatus(v string) *DBSecurityGroupMembersh type DBSnapshot struct { _ struct{} `type:"structure"` - // Specifies the allocated storage size in gigabytes (GB). + // Specifies the allocated storage size in gibibytes (GiB). AllocatedStorage *int64 `type:"integer"` // Specifies the name of the Availability Zone the DB instance was located in @@ -21969,72 +21971,14 @@ func (s *ModifyDBClusterSnapshotAttributeOutput) SetDBClusterSnapshotAttributesR type ModifyDBInstanceInput struct { _ struct{} `type:"structure"` - // The new storage capacity of the RDS instance. Changing this setting does - // not result in an outage and the change is applied during the next maintenance - // window unless ApplyImmediately is set to true for this request. - // - // MySQL - // - // Default: Uses existing setting - // - // Valid Values: 5-6144 - // - // Constraints: Value supplied must be at least 10% greater than the current - // value. Values that are not at least 10% greater than the existing value are - // rounded up so that they are 10% greater than the current value. - // - // Type: Integer - // - // MariaDB - // - // Default: Uses existing setting - // - // Valid Values: 5-6144 + // The new amount of storage (in gibibytes) to allocate for the DB instance. // - // Constraints: Value supplied must be at least 10% greater than the current - // value. Values that are not at least 10% greater than the existing value are - // rounded up so that they are 10% greater than the current value. + // For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at + // least 10% greater than the current value. Values that are not at least 10% + // greater than the existing value are rounded up so that they are 10% greater + // than the current value. // - // Type: Integer - // - // PostgreSQL - // - // Default: Uses existing setting - // - // Valid Values: 5-6144 - // - // Constraints: Value supplied must be at least 10% greater than the current - // value. Values that are not at least 10% greater than the existing value are - // rounded up so that they are 10% greater than the current value. - // - // Type: Integer - // - // Oracle - // - // Default: Uses existing setting - // - // Valid Values: 10-6144 - // - // Constraints: Value supplied must be at least 10% greater than the current - // value. Values that are not at least 10% greater than the existing value are - // rounded up so that they are 10% greater than the current value. - // - // SQL Server - // - // Cannot be modified. - // - // If you choose to migrate your DB instance from using standard storage to - // using Provisioned IOPS, or from using Provisioned IOPS to using standard - // storage, the process can take time. The duration of the migration depends - // on several factors such as database load, storage size, storage type (standard - // or Provisioned IOPS), amount of IOPS provisioned (if any), and the number - // of prior scale storage operations. Typical migration times are under 24 hours, - // but the process can take up to several days in some cases. During the migration, - // the DB instance is available for use, but might experience performance degradation. - // While the migration takes place, nightly backups for the instance are suspended. - // No other Amazon RDS operations can take place for the instance, including - // modifying the instance, rebooting the instance, deleting the instance, creating - // a Read Replica for the instance, and creating a DB snapshot of the instance. + // For the valid values for allocated storage for each engine, see CreateDBInstance. AllocatedStorage *int64 `type:"integer"` // Indicates that major version upgrades are allowed. Changing this parameter @@ -22257,24 +22201,12 @@ type ModifyDBInstanceInput struct { EngineVersion *string `type:"string"` // The new Provisioned IOPS (I/O operations per second) value for the RDS instance. + // // Changing this setting does not result in an outage and the change is applied // during the next maintenance window unless the ApplyImmediately parameter - // is set to true for this request. - // - // Default: Uses existing setting - // - // Constraints: Value supplied must be at least 10% greater than the current - // value. Values that are not at least 10% greater than the existing value are - // rounded up so that they are 10% greater than the current value. If you are - // migrating from Provisioned IOPS to standard storage, set this value to 0. - // The DB instance will require a reboot for the change in storage type to take - // effect. - // - // SQL Server - // - // Setting the IOPS value for the SQL Server database engine is not supported. - // - // Type: Integer + // is set to true for this request. If you are migrating from Provisioned IOPS + // to standard storage, set this value to 0. The DB instance will require a + // reboot for the change in storage type to take effect. // // If you choose to migrate your DB instance from using standard storage to // using Provisioned IOPS, or from using Provisioned IOPS to using standard @@ -22288,6 +22220,13 @@ type ModifyDBInstanceInput struct { // No other Amazon RDS operations can take place for the instance, including // modifying the instance, rebooting the instance, deleting the instance, creating // a Read Replica for the instance, and creating a DB snapshot of the instance. + // + // Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied + // must be at least 10% greater than the current value. Values that are not + // at least 10% greater than the existing value are rounded up so that they + // are 10% greater than the current value. + // + // Default: Uses existing setting Iops *int64 `type:"integer"` // The license model for the DB instance. @@ -22462,9 +22401,23 @@ type ModifyDBInstanceInput struct { // Specifies the storage type to be associated with the DB instance. // - // Valid values: standard | gp2 | io1 + // If you specify Provisioned IOPS (io1), you must also include a value for + // the Iops parameter. // - // If you specify io1, you must also include a value for the Iops parameter. + // If you choose to migrate your DB instance from using standard storage to + // using Provisioned IOPS, or from using Provisioned IOPS to using standard + // storage, the process can take time. The duration of the migration depends + // on several factors such as database load, storage size, storage type (standard + // or Provisioned IOPS), amount of IOPS provisioned (if any), and the number + // of prior scale storage operations. Typical migration times are under 24 hours, + // but the process can take up to several days in some cases. During the migration, + // the DB instance is available for use, but might experience performance degradation. + // While the migration takes place, nightly backups for the instance are suspended. + // No other Amazon RDS operations can take place for the instance, including + // modifying the instance, rebooting the instance, deleting the instance, creating + // a Read Replica for the instance, and creating a DB snapshot of the instance. + // + // Valid values: standard | gp2 | io1 // // Default: io1 if the Iops parameter is specified, otherwise standard StorageType *string `type:"string"` @@ -28455,7 +28408,7 @@ func (s *ValidDBInstanceModificationsMessage) SetStorage(v []*ValidStorageOption type ValidStorageOptions struct { _ struct{} `type:"structure"` - // The valid range of Provisioned IOPS to gigabytes of storage multiplier. For + // The valid range of Provisioned IOPS to gibibytes of storage multiplier. For // example, 3-10, which means that provisioned IOPS can be between 3 and 10 // times storage. IopsToStorageRatio []*DoubleRange `locationNameList:"DoubleRange" type:"list"` @@ -28463,7 +28416,7 @@ type ValidStorageOptions struct { // The valid range of provisioned IOPS. For example, 1000-20000. ProvisionedIops []*Range `locationNameList:"Range" type:"list"` - // The valid range of storage in gigabytes. For example, 100 to 6144. + // The valid range of storage in gibibytes. For example, 100 to 16384. StorageSize []*Range `locationNameList:"Range" type:"list"` // The valid storage types for your DB instance. For example, gp2, io1. diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go index 3e89a13a55e..1eae0467869 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go @@ -55,7 +55,7 @@ func (c *WorkSpaces) CreateTagsRequest(input *CreateTagsInput) (req *request.Req // CreateTags API operation for Amazon WorkSpaces. // -// Creates tags for a WorkSpace. +// Creates tags for the specified WorkSpace. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -224,7 +224,7 @@ func (c *WorkSpaces) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Req // DeleteTags API operation for Amazon WorkSpaces. // -// Deletes tags from a WorkSpace. +// Deletes the specified tags from a WorkSpace. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -306,7 +306,7 @@ func (c *WorkSpaces) DescribeTagsRequest(input *DescribeTagsInput) (req *request // DescribeTags API operation for Amazon WorkSpaces. // -// Describes tags for a WorkSpace. +// Describes the tags for the specified WorkSpace. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -391,16 +391,9 @@ func (c *WorkSpaces) DescribeWorkspaceBundlesRequest(input *DescribeWorkspaceBun // DescribeWorkspaceBundles API operation for Amazon WorkSpaces. // -// Obtains information about the WorkSpace bundles that are available to your -// account in the specified region. +// Describes the available WorkSpace bundles. // -// You can filter the results with either the BundleIds parameter, or the Owner -// parameter, but not both. -// -// This operation supports pagination with the use of the NextToken request -// and response parameters. If more results are available, the NextToken response -// member contains a token that you pass in the next call to this operation -// to retrieve the next set of items. +// You can filter the results using either bundle ID or owner, but not both. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -535,14 +528,8 @@ func (c *WorkSpaces) DescribeWorkspaceDirectoriesRequest(input *DescribeWorkspac // DescribeWorkspaceDirectories API operation for Amazon WorkSpaces. // -// Retrieves information about the AWS Directory Service directories in the -// region that are registered with Amazon WorkSpaces and are available to your -// account. -// -// This operation supports pagination with the use of the NextToken request -// and response parameters. If more results are available, the NextToken response -// member contains a token that you pass in the next call to this operation -// to retrieve the next set of items. +// Describes the available AWS Directory Service directories that are registered +// with Amazon WorkSpaces. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -677,15 +664,10 @@ func (c *WorkSpaces) DescribeWorkspacesRequest(input *DescribeWorkspacesInput) ( // DescribeWorkspaces API operation for Amazon WorkSpaces. // -// Obtains information about the specified WorkSpaces. -// -// Only one of the filter parameters, such as BundleId, DirectoryId, or WorkspaceIds, -// can be specified at a time. +// Describes the specified WorkSpaces. // -// This operation supports pagination with the use of the NextToken request -// and response parameters. If more results are available, the NextToken response -// member contains a token that you pass in the next call to this operation -// to retrieve the next set of items. +// You can filter the results using bundle ID, directory ID, or owner, but you +// can specify only one filter at a time. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -817,7 +799,7 @@ func (c *WorkSpaces) DescribeWorkspacesConnectionStatusRequest(input *DescribeWo // DescribeWorkspacesConnectionStatus API operation for Amazon WorkSpaces. // -// Describes the connection status of a specified WorkSpace. +// Describes the connection status of the specified WorkSpaces. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -896,8 +878,7 @@ func (c *WorkSpaces) ModifyWorkspacePropertiesRequest(input *ModifyWorkspaceProp // ModifyWorkspaceProperties API operation for Amazon WorkSpaces. // -// Modifies the WorkSpace properties, including the running mode and AutoStop -// time. +// Modifies the specified WorkSpace properties. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -998,8 +979,8 @@ func (c *WorkSpaces) RebootWorkspacesRequest(input *RebootWorkspacesInput) (req // // Reboots the specified WorkSpaces. // -// To be able to reboot a WorkSpace, the WorkSpace must have a State of AVAILABLE, -// IMPAIRED, or INOPERABLE. +// You cannot reboot a WorkSpace unless its state is AVAILABLE, IMPAIRED, or +// INOPERABLE. // // This operation is asynchronous and returns before the WorkSpaces have rebooted. // @@ -1077,20 +1058,10 @@ func (c *WorkSpaces) RebuildWorkspacesRequest(input *RebuildWorkspacesInput) (re // // Rebuilds the specified WorkSpaces. // -// Rebuilding a WorkSpace is a potentially destructive action that can result -// in the loss of data. Rebuilding a WorkSpace causes the following to occur: -// -// * The system is restored to the image of the bundle that the WorkSpace -// is created from. Any applications that have been installed, or system -// settings that have been made since the WorkSpace was created will be lost. -// -// * The data drive (D drive) is re-created from the last automatic snapshot -// taken of the data drive. The current contents of the data drive are overwritten. -// Automatic snapshots of the data drive are taken every 12 hours, so the -// snapshot can be as much as 12 hours old. +// You cannot rebuild a WorkSpace unless its state is AVAILABLE or ERROR. // -// To be able to rebuild a WorkSpace, the WorkSpace must have a State of AVAILABLE -// or ERROR. +// Rebuilding a WorkSpace is a potentially destructive action that can result +// in the loss of data. For more information, see Rebuild a WorkSpace (http://docs.aws.amazon.com/workspaces/latest/adminguide/reset-workspace.html). // // This operation is asynchronous and returns before the WorkSpaces have been // completely rebuilt. @@ -1167,8 +1138,10 @@ func (c *WorkSpaces) StartWorkspacesRequest(input *StartWorkspacesInput) (req *r // StartWorkspaces API operation for Amazon WorkSpaces. // -// Starts the specified WorkSpaces. The WorkSpaces must have a running mode -// of AutoStop and a state of STOPPED. +// Starts the specified WorkSpaces. +// +// You cannot start a WorkSpace unless it has a running mode of AutoStop and +// a state of STOPPED. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1242,8 +1215,10 @@ func (c *WorkSpaces) StopWorkspacesRequest(input *StopWorkspacesInput) (req *req // StopWorkspaces API operation for Amazon WorkSpaces. // -// Stops the specified WorkSpaces. The WorkSpaces must have a running mode of -// AutoStop and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR. +// Stops the specified WorkSpaces. +// +// You cannot stop a WorkSpace unless it has a running mode of AutoStop and +// a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1320,8 +1295,8 @@ func (c *WorkSpaces) TerminateWorkspacesRequest(input *TerminateWorkspacesInput) // Terminates the specified WorkSpaces. // // Terminating a WorkSpace is a permanent action and cannot be undone. The user's -// data is not maintained and will be destroyed. If you need to archive any -// user data, contact Amazon Web Services before terminating the WorkSpace. +// data is destroyed. If you need to archive any user data, contact Amazon Web +// Services before terminating the WorkSpace. // // You can terminate a WorkSpace that is in any state except SUSPENDED. // @@ -1356,12 +1331,12 @@ func (c *WorkSpaces) TerminateWorkspacesWithContext(ctx aws.Context, input *Term return out, req.Send() } -// Contains information about the compute type of a WorkSpace bundle. +// Information about the compute type. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ComputeType type ComputeType struct { _ struct{} `type:"structure"` - // The name of the compute type for the bundle. + // The compute type. Name *string `type:"string" enum:"Compute"` } @@ -1381,17 +1356,16 @@ func (s *ComputeType) SetName(v string) *ComputeType { return s } -// The request of the CreateTags operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateTagsRequest type CreateTagsInput struct { _ struct{} `type:"structure"` - // The resource ID of the request. + // The ID of the resource. // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` - // The tags of the request. + // The tags. Each resource can have a maximum of 50 tags. // // Tags is a required field Tags []*Tag `type:"list" required:"true"` @@ -1448,7 +1422,6 @@ func (s *CreateTagsInput) SetTags(v []*Tag) *CreateTagsInput { return s } -// The result of the CreateTags operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateTagsResult type CreateTagsOutput struct { _ struct{} `type:"structure"` @@ -1464,12 +1437,11 @@ func (s CreateTagsOutput) GoString() string { return s.String() } -// Contains the inputs for the CreateWorkspaces operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateWorkspacesRequest type CreateWorkspacesInput struct { _ struct{} `type:"structure"` - // An array of structures that specify the WorkSpaces to create. + // Information about the WorkSpaces to create. // // Workspaces is a required field Workspaces []*WorkspaceRequest `min:"1" type:"list" required:"true"` @@ -1517,19 +1489,18 @@ func (s *CreateWorkspacesInput) SetWorkspaces(v []*WorkspaceRequest) *CreateWork return s } -// Contains the result of the CreateWorkspaces operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateWorkspacesResult type CreateWorkspacesOutput struct { _ struct{} `type:"structure"` - // An array of structures that represent the WorkSpaces that could not be created. + // Information about the WorkSpaces that could not be created. FailedRequests []*FailedCreateWorkspaceRequest `type:"list"` - // An array of structures that represent the WorkSpaces that were created. + // Information about the WorkSpaces that were created. // - // Because this operation is asynchronous, the identifier in WorkspaceId is - // not immediately available. If you immediately call DescribeWorkspaces with - // this identifier, no information will be returned. + // Because this operation is asynchronous, the identifier returned is not immediately + // available for use with other operations. For example, if you call DescribeWorkspaces + // before the WorkSpace is created, the information returned can be incomplete. PendingRequests []*Workspace `type:"list"` } @@ -1555,27 +1526,25 @@ func (s *CreateWorkspacesOutput) SetPendingRequests(v []*Workspace) *CreateWorks return s } -// Contains default WorkSpace creation information. +// Information about defaults used to create a WorkSpace. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DefaultWorkspaceCreationProperties type DefaultWorkspaceCreationProperties struct { _ struct{} `type:"structure"` - // The identifier of any custom security groups that are applied to the WorkSpaces - // when they are created. + // The identifier of any security groups to apply to WorkSpaces when they are + // created. CustomSecurityGroupId *string `type:"string"` - // The organizational unit (OU) in the directory that the WorkSpace machine - // accounts are placed in. + // The organizational unit (OU) in the directory for the WorkSpace machine accounts. DefaultOu *string `type:"string"` - // A public IP address will be attached to all WorkSpaces that are created or - // rebuilt. + // The public IP address to attach to all WorkSpaces that are created or rebuilt. EnableInternetAccess *bool `type:"boolean"` - // Specifies if the directory is enabled for Amazon WorkDocs. + // Indicates whether the directory is enabled for Amazon WorkDocs. EnableWorkDocs *bool `type:"boolean"` - // The WorkSpace user is an administrator on the WorkSpace. + // Indicates whether the WorkSpace user is an administrator on the WorkSpace. UserEnabledAsLocalAdministrator *bool `type:"boolean"` } @@ -1619,17 +1588,16 @@ func (s *DefaultWorkspaceCreationProperties) SetUserEnabledAsLocalAdministrator( return s } -// The request of the DeleteTags operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeleteTagsRequest type DeleteTagsInput struct { _ struct{} `type:"structure"` - // The resource ID of the request. + // The ID of the resource. // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` - // The tag keys of the request. + // The tag keys. // // TagKeys is a required field TagKeys []*string `type:"list" required:"true"` @@ -1676,7 +1644,6 @@ func (s *DeleteTagsInput) SetTagKeys(v []*string) *DeleteTagsInput { return s } -// The result of the DeleteTags operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeleteTagsResult type DeleteTagsOutput struct { _ struct{} `type:"structure"` @@ -1692,12 +1659,11 @@ func (s DeleteTagsOutput) GoString() string { return s.String() } -// The request of the DescribeTags operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeTagsRequest type DescribeTagsInput struct { _ struct{} `type:"structure"` - // The resource ID of the request. + // The ID of the resource. // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -1735,12 +1701,11 @@ func (s *DescribeTagsInput) SetResourceId(v string) *DescribeTagsInput { return s } -// The result of the DescribeTags operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeTagsResult type DescribeTagsOutput struct { _ struct{} `type:"structure"` - // The list of tags. + // The tags. TagList []*Tag `type:"list"` } @@ -1760,27 +1725,23 @@ func (s *DescribeTagsOutput) SetTagList(v []*Tag) *DescribeTagsOutput { return s } -// Contains the inputs for the DescribeWorkspaceBundles operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceBundlesRequest type DescribeWorkspaceBundlesInput struct { _ struct{} `type:"structure"` - // An array of strings that contains the identifiers of the bundles to retrieve. - // This parameter cannot be combined with any other filter parameter. + // The IDs of the bundles. This parameter cannot be combined with any other + // filter. BundleIds []*string `min:"1" type:"list"` - // The NextToken value from a previous call to this operation. Pass null if - // this is the first call. + // The token for the next set of results. (You received this token from a previous + // call.) NextToken *string `min:"1" type:"string"` - // The owner of the bundles to retrieve. This parameter cannot be combined with - // any other filter parameter. - // - // This contains one of the following values: - // - // * null- Retrieves the bundles that belong to the account making the call. + // The owner of the bundles. This parameter cannot be combined with any other + // filter. // - // * AMAZON- Retrieves the bundles that are provided by AWS. + // Specify AMAZON to describe the bundles provided by AWS or null to describe + // the bundles that belong to your account. Owner *string `type:"string"` } @@ -1828,18 +1789,16 @@ func (s *DescribeWorkspaceBundlesInput) SetOwner(v string) *DescribeWorkspaceBun return s } -// Contains the results of the DescribeWorkspaceBundles operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceBundlesResult type DescribeWorkspaceBundlesOutput struct { _ struct{} `type:"structure"` - // An array of structures that contain information about the bundles. + // Information about the bundles. Bundles []*WorkspaceBundle `type:"list"` - // If not null, more results are available. Pass this value for the NextToken - // parameter in a subsequent call to this operation to retrieve the next set - // of items. This token is valid for one day and must be used within that time - // frame. + // The token to use to retrieve the next set of results, or null if there are + // no more results available. This token is valid for one day and must be used + // within that time frame. NextToken *string `min:"1" type:"string"` } @@ -1865,17 +1824,16 @@ func (s *DescribeWorkspaceBundlesOutput) SetNextToken(v string) *DescribeWorkspa return s } -// Contains the inputs for the DescribeWorkspaceDirectories operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceDirectoriesRequest type DescribeWorkspaceDirectoriesInput struct { _ struct{} `type:"structure"` - // An array of strings that contains the directory identifiers to retrieve information - // for. If this member is null, all directories are retrieved. + // The identifiers of the directories. If the value is null, all directories + // are retrieved. DirectoryIds []*string `min:"1" type:"list"` - // The NextToken value from a previous call to this operation. Pass null if - // this is the first call. + // The token for the next set of results. (You received this token from a previous + // call.) NextToken *string `min:"1" type:"string"` } @@ -1917,18 +1875,16 @@ func (s *DescribeWorkspaceDirectoriesInput) SetNextToken(v string) *DescribeWork return s } -// Contains the results of the DescribeWorkspaceDirectories operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceDirectoriesResult type DescribeWorkspaceDirectoriesOutput struct { _ struct{} `type:"structure"` - // An array of structures that contain information about the directories. + // Information about the directories. Directories []*WorkspaceDirectory `type:"list"` - // If not null, more results are available. Pass this value for the NextToken - // parameter in a subsequent call to this operation to retrieve the next set - // of items. This token is valid for one day and must be used within that time - // frame. + // The token to use to retrieve the next set of results, or null if there are + // no more results available. This token is valid for one day and must be used + // within that time frame. NextToken *string `min:"1" type:"string"` } @@ -1958,10 +1914,11 @@ func (s *DescribeWorkspaceDirectoriesOutput) SetNextToken(v string) *DescribeWor type DescribeWorkspacesConnectionStatusInput struct { _ struct{} `type:"structure"` - // The next token of the request. + // The token for the next set of results. (You received this token from a previous + // call.) NextToken *string `min:"1" type:"string"` - // An array of strings that contain the identifiers of the WorkSpaces. + // The identifiers of the WorkSpaces. WorkspaceIds []*string `min:"1" type:"list"` } @@ -2007,10 +1964,11 @@ func (s *DescribeWorkspacesConnectionStatusInput) SetWorkspaceIds(v []*string) * type DescribeWorkspacesConnectionStatusOutput struct { _ struct{} `type:"structure"` - // The next token of the result. + // The token to use to retrieve the next set of results, or null if there are + // no more results available. NextToken *string `min:"1" type:"string"` - // The connection status of the WorkSpace. + // Information about the connection status of the WorkSpace. WorkspacesConnectionStatus []*WorkspaceConnectionStatus `type:"list"` } @@ -2036,35 +1994,31 @@ func (s *DescribeWorkspacesConnectionStatusOutput) SetWorkspacesConnectionStatus return s } -// Contains the inputs for the DescribeWorkspaces operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesRequest type DescribeWorkspacesInput struct { _ struct{} `type:"structure"` - // The identifier of a bundle to obtain the WorkSpaces for. All WorkSpaces that - // are created from this bundle will be retrieved. This parameter cannot be - // combined with any other filter parameter. + // The ID of the bundle. All WorkSpaces that are created from this bundle are + // retrieved. This parameter cannot be combined with any other filter. BundleId *string `type:"string"` - // Specifies the directory identifier to which to limit the WorkSpaces. Optionally, - // you can specify a specific directory user with the UserName parameter. This - // parameter cannot be combined with any other filter parameter. + // The ID of the directory. In addition, you can optionally specify a specific + // directory user (see UserName). This parameter cannot be combined with any + // other filter. DirectoryId *string `type:"string"` // The maximum number of items to return. Limit *int64 `min:"1" type:"integer"` - // The NextToken value from a previous call to this operation. Pass null if - // this is the first call. + // The token for the next set of results. (You received this token from a previous + // call.) NextToken *string `min:"1" type:"string"` - // Used with the DirectoryId parameter to specify the directory user for whom - // to obtain the WorkSpace. + // The name of the directory user. You must specify this parameter with DirectoryId. UserName *string `min:"1" type:"string"` - // An array of strings that contain the identifiers of the WorkSpaces for which - // to retrieve information. This parameter cannot be combined with any other - // filter parameter. + // The IDs of the WorkSpaces. This parameter cannot be combined with any other + // filter. // // Because the CreateWorkspaces operation is asynchronous, the identifier it // returns is not immediately available. If you immediately call DescribeWorkspaces @@ -2140,21 +2094,19 @@ func (s *DescribeWorkspacesInput) SetWorkspaceIds(v []*string) *DescribeWorkspac return s } -// Contains the results for the DescribeWorkspaces operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesResult type DescribeWorkspacesOutput struct { _ struct{} `type:"structure"` - // If not null, more results are available. Pass this value for the NextToken - // parameter in a subsequent call to this operation to retrieve the next set - // of items. This token is valid for one day and must be used within that time - // frame. + // The token to use to retrieve the next set of results, or null if there are + // no more results available. This token is valid for one day and must be used + // within that time frame. NextToken *string `min:"1" type:"string"` - // An array of structures that contain the information about the WorkSpaces. + // Information about the WorkSpaces. // - // Because the CreateWorkspaces operation is asynchronous, some of this information - // may be incomplete for a newly-created WorkSpace. + // Because CreateWorkspaces is an asynchronous operation, some of the returned + // information could be incomplete. Workspaces []*Workspace `type:"list"` } @@ -2180,7 +2132,7 @@ func (s *DescribeWorkspacesOutput) SetWorkspaces(v []*Workspace) *DescribeWorksp return s } -// Contains information about a WorkSpace that could not be created. +// Information about a WorkSpace that could not be created. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/FailedCreateWorkspaceRequest type FailedCreateWorkspaceRequest struct { _ struct{} `type:"structure"` @@ -2191,8 +2143,7 @@ type FailedCreateWorkspaceRequest struct { // The textual error message. ErrorMessage *string `type:"string"` - // A FailedCreateWorkspaceRequest$WorkspaceRequest object that contains the - // information about the WorkSpace that could not be created. + // Information about the WorkSpace. WorkspaceRequest *WorkspaceRequest `type:"structure"` } @@ -2224,7 +2175,7 @@ func (s *FailedCreateWorkspaceRequest) SetWorkspaceRequest(v *WorkspaceRequest) return s } -// Contains information about a WorkSpace that could not be rebooted (RebootWorkspaces), +// Information about a WorkSpace that could not be rebooted (RebootWorkspaces), // rebuilt (RebuildWorkspaces), terminated (TerminateWorkspaces), started (StartWorkspaces), // or stopped (StopWorkspaces). // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/FailedWorkspaceChangeRequest @@ -2269,6 +2220,40 @@ func (s *FailedWorkspaceChangeRequest) SetWorkspaceId(v string) *FailedWorkspace return s } +// Information about a WorkSpace modification. +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModificationState +type ModificationState struct { + _ struct{} `type:"structure"` + + // The resource. + Resource *string `type:"string" enum:"ModificationResourceEnum"` + + // The modification state. + State *string `type:"string" enum:"ModificationStateEnum"` +} + +// String returns the string representation +func (s ModificationState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModificationState) GoString() string { + return s.String() +} + +// SetResource sets the Resource field's value. +func (s *ModificationState) SetResource(v string) *ModificationState { + s.Resource = &v + return s +} + +// SetState sets the State field's value. +func (s *ModificationState) SetState(v string) *ModificationState { + s.State = &v + return s +} + // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModifyWorkspacePropertiesRequest type ModifyWorkspacePropertiesInput struct { _ struct{} `type:"structure"` @@ -2278,7 +2263,7 @@ type ModifyWorkspacePropertiesInput struct { // WorkspaceId is a required field WorkspaceId *string `type:"string" required:"true"` - // The WorkSpace properties of the request. + // The properties of the WorkSpace. // // WorkspaceProperties is a required field WorkspaceProperties *WorkspaceProperties `type:"structure" required:"true"` @@ -2337,13 +2322,12 @@ func (s ModifyWorkspacePropertiesOutput) GoString() string { return s.String() } -// Contains information used with the RebootWorkspaces operation to reboot a -// WorkSpace. +// Information used to reboot a WorkSpace. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootRequest type RebootRequest struct { _ struct{} `type:"structure"` - // The identifier of the WorkSpace to reboot. + // The identifier of the WorkSpace. // // WorkspaceId is a required field WorkspaceId *string `type:"string" required:"true"` @@ -2378,12 +2362,11 @@ func (s *RebootRequest) SetWorkspaceId(v string) *RebootRequest { return s } -// Contains the inputs for the RebootWorkspaces operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootWorkspacesRequest type RebootWorkspacesInput struct { _ struct{} `type:"structure"` - // An array of structures that specify the WorkSpaces to reboot. + // The WorkSpaces to reboot. // // RebootWorkspaceRequests is a required field RebootWorkspaceRequests []*RebootRequest `min:"1" type:"list" required:"true"` @@ -2431,12 +2414,11 @@ func (s *RebootWorkspacesInput) SetRebootWorkspaceRequests(v []*RebootRequest) * return s } -// Contains the results of the RebootWorkspaces operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootWorkspacesResult type RebootWorkspacesOutput struct { _ struct{} `type:"structure"` - // An array of structures representing any WorkSpaces that could not be rebooted. + // Information about the WorkSpaces that could not be rebooted. FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` } @@ -2456,13 +2438,12 @@ func (s *RebootWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeReq return s } -// Contains information used with the RebuildWorkspaces operation to rebuild -// a WorkSpace. +// Information used to rebuild a WorkSpace. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildRequest type RebuildRequest struct { _ struct{} `type:"structure"` - // The identifier of the WorkSpace to rebuild. + // The identifier of the WorkSpace. // // WorkspaceId is a required field WorkspaceId *string `type:"string" required:"true"` @@ -2497,12 +2478,11 @@ func (s *RebuildRequest) SetWorkspaceId(v string) *RebuildRequest { return s } -// Contains the inputs for the RebuildWorkspaces operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildWorkspacesRequest type RebuildWorkspacesInput struct { _ struct{} `type:"structure"` - // An array of structures that specify the WorkSpaces to rebuild. + // The WorkSpaces to rebuild. // // RebuildWorkspaceRequests is a required field RebuildWorkspaceRequests []*RebuildRequest `min:"1" type:"list" required:"true"` @@ -2550,12 +2530,11 @@ func (s *RebuildWorkspacesInput) SetRebuildWorkspaceRequests(v []*RebuildRequest return s } -// Contains the results of the RebuildWorkspaces operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildWorkspacesResult type RebuildWorkspacesOutput struct { _ struct{} `type:"structure"` - // An array of structures representing any WorkSpaces that could not be rebuilt. + // Information about the WorkSpaces that could not be rebuilt. FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` } @@ -2575,7 +2554,32 @@ func (s *RebuildWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeRe return s } -// Describes the start request. +// Information about the root volume for a WorkSpace bundle. +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RootStorage +type RootStorage struct { + _ struct{} `type:"structure"` + + // The size of the root volume. + Capacity *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RootStorage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RootStorage) GoString() string { + return s.String() +} + +// SetCapacity sets the Capacity field's value. +func (s *RootStorage) SetCapacity(v string) *RootStorage { + s.Capacity = &v + return s +} + +// Information used to start a WorkSpace. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StartRequest type StartRequest struct { _ struct{} `type:"structure"` @@ -2604,7 +2608,7 @@ func (s *StartRequest) SetWorkspaceId(v string) *StartRequest { type StartWorkspacesInput struct { _ struct{} `type:"structure"` - // The requests. + // The WorkSpaces to start. // // StartWorkspaceRequests is a required field StartWorkspaceRequests []*StartRequest `min:"1" type:"list" required:"true"` @@ -2646,7 +2650,7 @@ func (s *StartWorkspacesInput) SetStartWorkspaceRequests(v []*StartRequest) *Sta type StartWorkspacesOutput struct { _ struct{} `type:"structure"` - // The failed requests. + // Information about the WorkSpaces that could not be started. FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` } @@ -2666,7 +2670,7 @@ func (s *StartWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeRequ return s } -// Describes the stop request. +// Information used to stop a WorkSpace. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StopRequest type StopRequest struct { _ struct{} `type:"structure"` @@ -2695,7 +2699,7 @@ func (s *StopRequest) SetWorkspaceId(v string) *StopRequest { type StopWorkspacesInput struct { _ struct{} `type:"structure"` - // The requests. + // The WorkSpaces to stop. // // StopWorkspaceRequests is a required field StopWorkspaceRequests []*StopRequest `min:"1" type:"list" required:"true"` @@ -2737,7 +2741,7 @@ func (s *StopWorkspacesInput) SetStopWorkspaceRequests(v []*StopRequest) *StopWo type StopWorkspacesOutput struct { _ struct{} `type:"structure"` - // The failed requests. + // Information about the WorkSpaces that could not be stopped. FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` } @@ -2757,7 +2761,7 @@ func (s *StopWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeReque return s } -// Describes the tag of the WorkSpace. +// Information about a tag. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/Tag type Tag struct { _ struct{} `type:"structure"` @@ -2809,13 +2813,12 @@ func (s *Tag) SetValue(v string) *Tag { return s } -// Contains information used with the TerminateWorkspaces operation to terminate -// a WorkSpace. +// Information used to terminate a WorkSpace. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateRequest type TerminateRequest struct { _ struct{} `type:"structure"` - // The identifier of the WorkSpace to terminate. + // The identifier of the WorkSpace. // // WorkspaceId is a required field WorkspaceId *string `type:"string" required:"true"` @@ -2850,12 +2853,11 @@ func (s *TerminateRequest) SetWorkspaceId(v string) *TerminateRequest { return s } -// Contains the inputs for the TerminateWorkspaces operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateWorkspacesRequest type TerminateWorkspacesInput struct { _ struct{} `type:"structure"` - // An array of structures that specify the WorkSpaces to terminate. + // The WorkSpaces to terminate. // // TerminateWorkspaceRequests is a required field TerminateWorkspaceRequests []*TerminateRequest `min:"1" type:"list" required:"true"` @@ -2903,12 +2905,11 @@ func (s *TerminateWorkspacesInput) SetTerminateWorkspaceRequests(v []*TerminateR return s } -// Contains the results of the TerminateWorkspaces operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateWorkspacesResult type TerminateWorkspacesOutput struct { _ struct{} `type:"structure"` - // An array of structures representing any WorkSpaces that could not be terminated. + // Information about the WorkSpaces that could not be terminated. FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` } @@ -2928,12 +2929,12 @@ func (s *TerminateWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChange return s } -// Contains information about the user storage for a WorkSpace bundle. +// Information about the user storage for a WorkSpace bundle. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/UserStorage type UserStorage struct { _ struct{} `type:"structure"` - // The amount of user storage for the bundle. + // The size of the user storage. Capacity *string `min:"1" type:"string"` } @@ -2953,44 +2954,46 @@ func (s *UserStorage) SetCapacity(v string) *UserStorage { return s } -// Contains information about a WorkSpace. +// Information about a WorkSpace. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/Workspace type Workspace struct { _ struct{} `type:"structure"` - // The identifier of the bundle that the WorkSpace was created from. + // The identifier of the bundle used to create the WorkSpace. BundleId *string `type:"string"` - // The name of the WorkSpace as seen by the operating system. + // The name of the WorkSpace, as seen by the operating system. ComputerName *string `type:"string"` - // The identifier of the AWS Directory Service directory that the WorkSpace - // belongs to. + // The identifier of the AWS Directory Service directory for the WorkSpace. DirectoryId *string `type:"string"` - // If the WorkSpace could not be created, this contains the error code. + // If the WorkSpace could not be created, contains the error code. ErrorCode *string `type:"string"` - // If the WorkSpace could not be created, this contains a textual error message - // that describes the failure. + // If the WorkSpace could not be created, contains a textual error message that + // describes the failure. ErrorMessage *string `type:"string"` // The IP address of the WorkSpace. IpAddress *string `type:"string"` - // Specifies whether the data stored on the root volume, or C: drive, is encrypted. + // The modification states of the WorkSpace. + ModificationStates []*ModificationState `type:"list"` + + // Indicates whether the data stored on the root volume is encrypted. RootVolumeEncryptionEnabled *bool `type:"boolean"` // The operational state of the WorkSpace. State *string `type:"string" enum:"WorkspaceState"` - // The identifier of the subnet that the WorkSpace is in. + // The identifier of the subnet for the WorkSpace. SubnetId *string `type:"string"` - // The user that the WorkSpace is assigned to. + // The user for the WorkSpace. UserName *string `min:"1" type:"string"` - // Specifies whether the data stored on the user volume, or D: drive, is encrypted. + // Indicates whether the data stored on the user volume is encrypted. UserVolumeEncryptionEnabled *bool `type:"boolean"` // The KMS key used to encrypt data stored on your WorkSpace. @@ -2999,7 +3002,7 @@ type Workspace struct { // The identifier of the WorkSpace. WorkspaceId *string `type:"string"` - // Describes the properties of a WorkSpace. + // The properties of the WorkSpace. WorkspaceProperties *WorkspaceProperties `type:"structure"` } @@ -3049,6 +3052,12 @@ func (s *Workspace) SetIpAddress(v string) *Workspace { return s } +// SetModificationStates sets the ModificationStates field's value. +func (s *Workspace) SetModificationStates(v []*ModificationState) *Workspace { + s.ModificationStates = v + return s +} + // SetRootVolumeEncryptionEnabled sets the RootVolumeEncryptionEnabled field's value. func (s *Workspace) SetRootVolumeEncryptionEnabled(v bool) *Workspace { s.RootVolumeEncryptionEnabled = &v @@ -3097,7 +3106,7 @@ func (s *Workspace) SetWorkspaceProperties(v *WorkspaceProperties) *Workspace { return s } -// Contains information about a WorkSpace bundle. +// Information about a WorkSpace bundle. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceBundle type WorkspaceBundle struct { _ struct{} `type:"structure"` @@ -3105,21 +3114,23 @@ type WorkspaceBundle struct { // The bundle identifier. BundleId *string `type:"string"` - // A ComputeType object that specifies the compute type for the bundle. + // The compute type. For more information, see Amazon WorkSpaces Bundles (http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). ComputeType *ComputeType `type:"structure"` - // The bundle description. + // A description. Description *string `type:"string"` // The name of the bundle. Name *string `min:"1" type:"string"` - // The owner of the bundle. This contains the owner's account identifier, or + // The owner of the bundle. This is the account identifier of the owner, or // AMAZON if the bundle is provided by AWS. Owner *string `type:"string"` - // A UserStorage object that specifies the amount of user storage that the bundle - // contains. + // The size of the root volume. + RootStorage *RootStorage `type:"structure"` + + // The size of the user storage. UserStorage *UserStorage `type:"structure"` } @@ -3163,6 +3174,12 @@ func (s *WorkspaceBundle) SetOwner(v string) *WorkspaceBundle { return s } +// SetRootStorage sets the RootStorage field's value. +func (s *WorkspaceBundle) SetRootStorage(v *RootStorage) *WorkspaceBundle { + s.RootStorage = v + return s +} + // SetUserStorage sets the UserStorage field's value. func (s *WorkspaceBundle) SetUserStorage(v *UserStorage) *WorkspaceBundle { s.UserStorage = v @@ -3174,8 +3191,8 @@ func (s *WorkspaceBundle) SetUserStorage(v *UserStorage) *WorkspaceBundle { type WorkspaceConnectionStatus struct { _ struct{} `type:"structure"` - // The connection state of the WorkSpace. Returns UNKOWN if the WorkSpace is - // in a Stopped state. + // The connection state of the WorkSpace. The connection state is unknown if + // the WorkSpace is stopped. ConnectionState *string `type:"string" enum:"ConnectionState"` // The timestamp of the connection state check. @@ -3243,8 +3260,7 @@ type WorkspaceDirectory struct { // The directory type. DirectoryType *string `type:"string" enum:"WorkspaceDirectoryType"` - // An array of strings that contains the IP addresses of the DNS servers for - // the directory. + // The IP addresses of the DNS servers for the directory. DnsIpAddresses []*string `type:"list"` // The identifier of the IAM role. This is the role that allows Amazon WorkSpaces @@ -3258,12 +3274,10 @@ type WorkspaceDirectory struct { // The state of the directory's registration with Amazon WorkSpaces State *string `type:"string" enum:"WorkspaceDirectoryState"` - // An array of strings that contains the identifiers of the subnets used with - // the directory. + // The identifiers of the subnets used with the directory. SubnetIds []*string `type:"list"` - // A structure that specifies the default creation properties for all WorkSpaces - // in the directory. + // The default creation properties for all WorkSpaces in the directory. WorkspaceCreationProperties *DefaultWorkspaceCreationProperties `type:"structure"` // The identifier of the security group that is assigned to new WorkSpaces. @@ -3352,19 +3366,27 @@ func (s *WorkspaceDirectory) SetWorkspaceSecurityGroupId(v string) *WorkspaceDir return s } -// Describes the properties of a WorkSpace. +// Information about a WorkSpace. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceProperties type WorkspaceProperties struct { _ struct{} `type:"structure"` - // The running mode of the WorkSpace. AlwaysOn WorkSpaces are billed monthly. - // AutoStop WorkSpaces are billed by the hour and stopped when no longer being - // used in order to save on costs. + // The compute type. For more information, see Amazon WorkSpaces Bundles (http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). + ComputeTypeName *string `type:"string" enum:"Compute"` + + // The size of the root volume. + RootVolumeSizeGib *int64 `type:"integer"` + + // The running mode. For more information, see Manage the WorkSpace Running + // Mode (http://docs.aws.amazon.com/workspaces/latest/adminguide/running-mode.html). RunningMode *string `type:"string" enum:"RunningMode"` // The time after a user logs off when WorkSpaces are automatically stopped. // Configured in 60 minute intervals. RunningModeAutoStopTimeoutInMinutes *int64 `type:"integer"` + + // The size of the user storage. + UserVolumeSizeGib *int64 `type:"integer"` } // String returns the string representation @@ -3377,6 +3399,18 @@ func (s WorkspaceProperties) GoString() string { return s.String() } +// SetComputeTypeName sets the ComputeTypeName field's value. +func (s *WorkspaceProperties) SetComputeTypeName(v string) *WorkspaceProperties { + s.ComputeTypeName = &v + return s +} + +// SetRootVolumeSizeGib sets the RootVolumeSizeGib field's value. +func (s *WorkspaceProperties) SetRootVolumeSizeGib(v int64) *WorkspaceProperties { + s.RootVolumeSizeGib = &v + return s +} + // SetRunningMode sets the RunningMode field's value. func (s *WorkspaceProperties) SetRunningMode(v string) *WorkspaceProperties { s.RunningMode = &v @@ -3389,44 +3423,48 @@ func (s *WorkspaceProperties) SetRunningModeAutoStopTimeoutInMinutes(v int64) *W return s } -// Contains information about a WorkSpace creation request. +// SetUserVolumeSizeGib sets the UserVolumeSizeGib field's value. +func (s *WorkspaceProperties) SetUserVolumeSizeGib(v int64) *WorkspaceProperties { + s.UserVolumeSizeGib = &v + return s +} + +// Information used to create a WorkSpace. // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceRequest type WorkspaceRequest struct { _ struct{} `type:"structure"` - // The identifier of the bundle to create the WorkSpace from. You can use the - // DescribeWorkspaceBundles operation to obtain a list of the bundles that are - // available. + // The identifier of the bundle for the WorkSpace. You can use DescribeWorkspaceBundles + // to list the available bundles. // // BundleId is a required field BundleId *string `type:"string" required:"true"` - // The identifier of the AWS Directory Service directory to create the WorkSpace - // in. You can use the DescribeWorkspaceDirectories operation to obtain a list - // of the directories that are available. + // The identifier of the AWS Directory Service directory for the WorkSpace. + // You can use DescribeWorkspaceDirectories to list the available directories. // // DirectoryId is a required field DirectoryId *string `type:"string" required:"true"` - // Specifies whether the data stored on the root volume, or C: drive, is encrypted. + // Indicates whether the data stored on the root volume is encrypted. RootVolumeEncryptionEnabled *bool `type:"boolean"` - // The tags of the WorkSpace request. + // The tags for the WorkSpace. Tags []*Tag `type:"list"` - // The username that the WorkSpace is assigned to. This username must exist - // in the AWS Directory Service directory specified by the DirectoryId member. + // The username of the user for the WorkSpace. This username must exist in the + // AWS Directory Service directory for the WorkSpace. // // UserName is a required field UserName *string `min:"1" type:"string" required:"true"` - // Specifies whether the data stored on the user volume, or D: drive, is encrypted. + // Indicates whether the data stored on the user volume is encrypted. UserVolumeEncryptionEnabled *bool `type:"boolean"` // The KMS key used to encrypt data stored on your WorkSpace. VolumeEncryptionKey *string `type:"string"` - // Describes the properties of a WorkSpace. + // The WorkSpace properties. WorkspaceProperties *WorkspaceProperties `type:"structure"` } @@ -3529,6 +3567,12 @@ const ( // ComputePerformance is a Compute enum value ComputePerformance = "PERFORMANCE" + + // ComputePower is a Compute enum value + ComputePower = "POWER" + + // ComputeGraphics is a Compute enum value + ComputeGraphics = "GRAPHICS" ) const ( @@ -3542,6 +3586,25 @@ const ( ConnectionStateUnknown = "UNKNOWN" ) +const ( + // ModificationResourceEnumRootVolume is a ModificationResourceEnum enum value + ModificationResourceEnumRootVolume = "ROOT_VOLUME" + + // ModificationResourceEnumUserVolume is a ModificationResourceEnum enum value + ModificationResourceEnumUserVolume = "USER_VOLUME" + + // ModificationResourceEnumComputeType is a ModificationResourceEnum enum value + ModificationResourceEnumComputeType = "COMPUTE_TYPE" +) + +const ( + // ModificationStateEnumUpdateInitiated is a ModificationStateEnum enum value + ModificationStateEnumUpdateInitiated = "UPDATE_INITIATED" + + // ModificationStateEnumUpdateInProgress is a ModificationStateEnum enum value + ModificationStateEnumUpdateInProgress = "UPDATE_IN_PROGRESS" +) + const ( // RunningModeAutoStop is a RunningMode enum value RunningModeAutoStop = "AUTO_STOP" @@ -3609,6 +3672,9 @@ const ( // WorkspaceStateSuspended is a WorkspaceState enum value WorkspaceStateSuspended = "SUSPENDED" + // WorkspaceStateUpdating is a WorkspaceState enum value + WorkspaceStateUpdating = "UPDATING" + // WorkspaceStateStopping is a WorkspaceState enum value WorkspaceStateStopping = "STOPPING" diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/doc.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/doc.go index 36810295f11..cae0167d1b3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/workspaces/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/doc.go @@ -3,8 +3,8 @@ // Package workspaces provides the client and types for making API // requests to Amazon WorkSpaces. // -// This reference provides detailed information about the Amazon WorkSpaces -// operations. +// Amazon WorkSpaces enables you to provision virtual, cloud-based Microsoft +// Windows desktops for your users. // // See https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08 for more information on this service. // diff --git a/vendor/vendor.json b/vendor/vendor.json index 5b257d0e4bc..bdb77abd1bd 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -141,836 +141,836 @@ "revisionTime": "2017-07-27T15:54:43Z" }, { - "checksumSHA1": "rRZbZbRFXiRN25up38799OeziZw=", + "checksumSHA1": "m/lQ3DQtkr3nS4w5irRAje91Erw=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "DtuTqKH29YnLjrIJkRYX0HQtXY0=", "path": "github.com/aws/aws-sdk-go/aws/arn", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "9nE/FjZ4pYrT883KtV2/aI+Gayo=", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "7/8j/q0TWtOgXyvEcv4B2Dhl00o=", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "Y+cPwQL0dZMyqp3wI+KJWmA9KQ8=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "OnU/n7R33oYXiB4SAGd5pK7I0Bs=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { - "checksumSHA1": "aOgB3+hNeX2svLhaX373ToSkhTg=", + "checksumSHA1": "BT2+PhuOjbAuMcLpdop0FKQY5EY=", "path": "github.com/aws/aws-sdk-go/aws/endpoints", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "9GvAyILJ7g+VUg8Ef5DsT5GuYsg=", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "HcGL4e6Uep4/80eCUI5xkcWjpQ0=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "iU00ZjhAml/13g+1YXT21IqoXqg=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "04ypv4x12l4q0TksA1zEVsmgpvw=", "path": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "NStHCXEvYqG72GknZyv1jaKaeH0=", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "yHfT5DTbeCLs4NE2Rgnqrhe15ls=", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "9V1PvtFQ9MObZTc3sa86WcuOtOU=", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "pkeoOfZpHRvFG/AOZeTf0lwtsFg=", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "Rpu8KBtHZgvhkwHxUfaky+qW+G4=", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "0qYPUga28aQVkxZgBR3Z86AbGUQ=", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "vnYDXA1NxJ7Hu+DMfXNk1UnmkWg=", "path": "github.com/aws/aws-sdk-go/service/acm", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "DPl/OkvEUjrd+XKqX73l6nUNw3U=", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "X8tOI6i+RJwXIgg1qBjDNclyG/0=", "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "aDAaH6YiA50IrJ5Smfg0fovrniA=", "path": "github.com/aws/aws-sdk-go/service/appsync", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "oBXDw1zQTfxcKsK3ZjtKcS7gBLI=", "path": "github.com/aws/aws-sdk-go/service/athena", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "ITAwWyJp4t9AGfUXm9M3pFWTHVA=", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "Zz8qI6RloveM1zrXAglLxJZT1ZA=", "path": "github.com/aws/aws-sdk-go/service/batch", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "/nO06EpnD22+Ex80gHi4UYrAvKc=", "path": "github.com/aws/aws-sdk-go/service/budgets", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "6gM3CZZgiB0JvS7EK1c31Q8L09U=", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "T80IDetBz1hqJpq5Wqmx3MwCh8w=", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "bYrI9mxspB0xDFZEy3OIfWuez5g=", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "oB+M+kOmYG28V0PuI75IF6E+/w8=", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "Nc3vXlV7s309PprScYpRDPQWeDQ=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "bPh7NF3mLpGMV0rIakolMPHqMyw=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "P6qyaFX9X6Nnvm3avLigjmjfYds=", "path": "github.com/aws/aws-sdk-go/service/codebuild", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "7nW1Ho2X3RcUU8FaFBhJIUeuDNw=", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "m19PZt1B51QCWo1jxSbII2zzL6Q=", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "LKw7fnNwq17Eqy0clzS/LK89vS4=", "path": "github.com/aws/aws-sdk-go/service/codepipeline", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "aXh1KIbNX+g+tH+lh3pk++9lm3k=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentity", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "IWi9xZz+OncotjM/vJ87Iffg2Qk=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentityprovider", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "56F6Stg8hQ1kxiAEzqB0TDctW9k=", "path": "github.com/aws/aws-sdk-go/service/configservice", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "hYCwLQdIjHj8rMHLGVyUVhecI4s=", "path": "github.com/aws/aws-sdk-go/service/databasemigrationservice", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "26CWoHQP/dyL2VzE5ZNd8zNzhko=", "path": "github.com/aws/aws-sdk-go/service/devicefarm", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "6g94rUHAgjcqMMTtMqKUbLU37wY=", "path": "github.com/aws/aws-sdk-go/service/directconnect", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "oFnS6I0u7KqnxK0/r1uoz8rTkxI=", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "0TXXUPjrbOCHpX555B6suH36Nnk=", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "INaeHZ2L5x6RlrcQBm4q1hFqNRM=", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "uEv9kkBsVIjg7K4+Y8TVlU0Cc8o=", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "3B3RtWG7IY9qhFhWGEwroeMxnPI=", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "eoM9nF5iVMbuGOmkY33d19aHt8Y=", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "dU5MPXUUOYD/E9sNncpFZ/U86Cw=", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "pj8mBWT3HE0Iid6HSmhw7lmyZDU=", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "VYGtTaSiajfKOVTbi9/SNmbiIac=", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "SZ7yLDZ6RvMhpWe0Goyem64kgyA=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "WYqHhdRNsiGGBLWlBLbOItZf+zA=", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "ae7VWg/xuXpnSD6wGumN44qEd+Q=", "path": "github.com/aws/aws-sdk-go/service/elbv2", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "NbkH6F+792jQ7BW4lGCb+vJVw58=", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "5btWHj2fZrPc/zfYdJLPaOcivxI=", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "Rodm1XwZ9Ncah1NLHep0behQpXg=", "path": "github.com/aws/aws-sdk-go/service/gamelift", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "oDoGvSfmO2Z099ixV2HXn+SDeHE=", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "HRmbBf3dUEBAfdC2xKaoWAGeM7Y=", "path": "github.com/aws/aws-sdk-go/service/glue", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "6JlxJoy1JCArNK2qBkaJ5IV6qBc=", "path": "github.com/aws/aws-sdk-go/service/guardduty", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "oZaxMqnwl2rA+V/W0tJ3uownORI=", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "Pg4c7tUVP15Ry9uPA3qixJXSd4I=", "path": "github.com/aws/aws-sdk-go/service/inspector", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "pZwCI4DpP5hcMa/ItKhiwo/ukd0=", "path": "github.com/aws/aws-sdk-go/service/iot", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "IoSyRZhlL0petrB28nXk5jKM9YA=", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "oAFLgD0uJiVOZkFkL5dd/wUgBz4=", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "XDVse9fKF0RkAywzzgsO31AV4oc=", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "HluEcyZNywrbKnj/aR3tXbu29d8=", "path": "github.com/aws/aws-sdk-go/service/lexmodelbuildingservice", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "wjs9YBsHx0YQH0zKBA7Ibd1UV5Y=", "path": "github.com/aws/aws-sdk-go/service/lightsail", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "4VfB5vMLNYs0y6K159YCBgo9T3c=", "path": "github.com/aws/aws-sdk-go/service/mediaconvert", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "Ox3VWHYSQq0YKmlr0paUPdr5W/0=", "path": "github.com/aws/aws-sdk-go/service/medialive", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "Rs7QtkcLl3XNPnKb8ss/AhF2X50=", "path": "github.com/aws/aws-sdk-go/service/mediapackage", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "QjiIL8LrlhwrQw8FboF+wMNvUF0=", "path": "github.com/aws/aws-sdk-go/service/mediastore", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "ZY1SJNE03I6NL2OBJD9hlwVsqO0=", "path": "github.com/aws/aws-sdk-go/service/mediastoredata", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "ynB7Flcudp0VOqBVKZJ+23DtLHU=", "path": "github.com/aws/aws-sdk-go/service/mq", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "fpsBu+F79ktlLRwal1GugVMUDo0=", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { - "checksumSHA1": "Iqkgx2nafQPV7fjw+uP35jtF6t4=", + "checksumSHA1": "IddJCt5BrI6zRuUpFJqqnS9qrIM=", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "vP1FcccUZbuUlin7ME89w1GVJtA=", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "tKnVaKPOCiU6xl3/AYcdBCLtRdw=", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "sCaHoPWsJXRHFbilUKwN71qFTOI=", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "QZU8vR9cOIenYiH+Ywl4Gzfnlp0=", "path": "github.com/aws/aws-sdk-go/service/servicecatalog", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "dk6ebvA0EYgdPyc5HPKLBPEtsm4=", "path": "github.com/aws/aws-sdk-go/service/servicediscovery", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "Ex1Ma0SFGpqeNuPbeXZtsliZ3zo=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "maVXeR3WDAkONlzf04e4mDgCYxo=", "path": "github.com/aws/aws-sdk-go/service/sfn", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "ADoR4mlCW5usH8iOa6mPNSy49LM=", "path": "github.com/aws/aws-sdk-go/service/shield", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "B3CgAFSREebpsFoFOo4vrQ6u04w=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "FfY8w4DM8XIULdRnFhd3Um8Mj8c=", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "Wx189wAbIhWChx4kVbvsyqKMF4U=", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "ijz0rBDeR6JP/06S+97k84FRYxc=", "path": "github.com/aws/aws-sdk-go/service/ssm", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "W1oFtpaT4TWIIJrAvFcn/XdcT7g=", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "Uw4pOUxSMbx4xBHUcOUkNhtnywE=", "path": "github.com/aws/aws-sdk-go/service/swf", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "on6d7Hydx2bM9jkFOf1JZcZZgeY=", "path": "github.com/aws/aws-sdk-go/service/waf", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "rHqjsOndIR82gX5mSKybaRWf3UY=", "path": "github.com/aws/aws-sdk-go/service/wafregional", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { - "checksumSHA1": "5HDSvmMW7F3xzPAzughe4dEn6RM=", + "checksumSHA1": "y0XODBzpJjZvR1e9F6ULItV5nG4=", "path": "github.com/aws/aws-sdk-go/service/workspaces", - "revision": "82ad808f2307df0776c038bfd7ea85440a35c02e", - "revisionTime": "2017-12-22T21:05:23Z", - "version": "v1.12.53", - "versionExact": "v1.12.53" + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" }, { "checksumSHA1": "usT4LCSQItkFvFOQT7cBlkCuGaE=", From 2bd9fdd438da277f894a0d360b1300abda4ce6b7 Mon Sep 17 00:00:00 2001 From: Atsushi Ishibashi Date: Fri, 5 Jan 2018 11:19:19 +0900 Subject: [PATCH 107/350] Modify docs --- website/docs/r/ecs_service.html.markdown | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/docs/r/ecs_service.html.markdown b/website/docs/r/ecs_service.html.markdown index 2aafe1d4a7b..fd6b98940c4 100644 --- a/website/docs/r/ecs_service.html.markdown +++ b/website/docs/r/ecs_service.html.markdown @@ -63,7 +63,7 @@ into consideration during task placement. The maximum number of `placement_constraints` is `10`. Defined below. * `network_configuration` - (Optional) The network configuration for the service. This parameter is required for task definitions that use the awsvpc network mode to receive their own Elastic Network Interface, and it is not supported for other network modes. --> **Note:** As a result of an AWS limitation, a single `load_balancer` can be attached to the ECS service at most. See [related docs](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html#load-balancing-concepts). +-> **Note:** As a result of an AWS limitation, a single `load_balancer` can be attached to the ECS service at most. See [related docs](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html#load-balancing-concepts). Load balancers support the following: @@ -80,7 +80,7 @@ Load balancers support the following: * `field` - (Optional) For the `spread` placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance. For the `binpack` type, valid values are `memory` and `cpu`. For the `random` type, this attribute is not - needed. For more information, see [Placement Strategy](http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PlacementStrategy.html). + needed. For more information, see [Placement Strategy](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PlacementStrategy.html). -> **Note:** for `spread`, `host` and `instanceId` will be normalized, by AWS, to be `instanceId`. This means the statefile will show `instanceId` but your config will differ if you use `host`. @@ -93,7 +93,7 @@ Load balancers support the following: for the `distinctInstance` type. For more information, see [Cluster Query Language in the Amazon EC2 Container Service Developer -Guide](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html). +Guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html). ## network_configuration @@ -101,7 +101,7 @@ Guide](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query- * `subnets` - (Required) The subnets associated with the task or service. * `security_groups` - (Optional) The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. -For more information, see [Task Networking](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) +For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) ## Attributes Reference From 18b0649dd14bfa02610a9f85e71979cab4938165 Mon Sep 17 00:00:00 2001 From: Gareth Oakley Date: Fri, 5 Jan 2018 11:05:50 +0700 Subject: [PATCH 108/350] vendor: Add DAX --- .../aws/aws-sdk-go/service/dax/api.go | 4677 +++++++++++++++++ .../aws/aws-sdk-go/service/dax/doc.go | 33 + .../aws/aws-sdk-go/service/dax/errors.go | 160 + .../aws/aws-sdk-go/service/dax/service.go | 95 + vendor/vendor.json | 8 + 5 files changed, 4973 insertions(+) create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dax/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dax/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dax/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dax/service.go diff --git a/vendor/github.com/aws/aws-sdk-go/service/dax/api.go b/vendor/github.com/aws/aws-sdk-go/service/dax/api.go new file mode 100644 index 00000000000..31fdef87fd9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dax/api.go @@ -0,0 +1,4677 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dax + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateCluster = "CreateCluster" + +// CreateClusterRequest generates a "aws/request.Request" representing the +// client's request for the CreateCluster operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCluster for more information on using the CreateCluster +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateClusterRequest method. +// req, resp := client.CreateClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateCluster +func (c *DAX) CreateClusterRequest(input *CreateClusterInput) (req *request.Request, output *CreateClusterOutput) { + op := &request.Operation{ + Name: opCreateCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterInput{} + } + + output = &CreateClusterOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateCluster API operation for Amazon DynamoDB Accelerator (DAX). +// +// Creates a DAX cluster. All nodes in the cluster run the same DAX caching +// software. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation CreateCluster for usage and error information. +// +// Returned Error Codes: +// * ErrCodeClusterAlreadyExistsFault "ClusterAlreadyExistsFault" +// You already have a DAX cluster with the given identifier. +// +// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" +// The requested DAX cluster is not in the available state. +// +// * ErrCodeInsufficientClusterCapacityFault "InsufficientClusterCapacityFault" +// There are not enough system resources to create the cluster you requested +// (or to resize an already-existing cluster). +// +// * ErrCodeSubnetGroupNotFoundFault "SubnetGroupNotFoundFault" +// The requested subnet group name does not refer to an existing subnet group. +// +// * ErrCodeInvalidParameterGroupStateFault "InvalidParameterGroupStateFault" +// One or more parameters in a parameter group are in an invalid state. +// +// * ErrCodeParameterGroupNotFoundFault "ParameterGroupNotFoundFault" +// The specified parameter group does not exist. +// +// * ErrCodeClusterQuotaForCustomerExceededFault "ClusterQuotaForCustomerExceededFault" +// You have attempted to exceed the maximum number of DAX clusters for your +// AWS account. +// +// * ErrCodeNodeQuotaForClusterExceededFault "NodeQuotaForClusterExceededFault" +// You have attempted to exceed the maximum number of nodes for a DAX cluster. +// +// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceededFault" +// You have attempted to exceed the maximum number of nodes for your AWS account. +// +// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" +// The VPC network is in an invalid state. +// +// * ErrCodeTagQuotaPerResourceExceeded "TagQuotaPerResourceExceeded" +// You have exceeded the maximum number of tags for this DAX cluster. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateCluster +func (c *DAX) CreateCluster(input *CreateClusterInput) (*CreateClusterOutput, error) { + req, out := c.CreateClusterRequest(input) + return out, req.Send() +} + +// CreateClusterWithContext is the same as CreateCluster with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCluster for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) CreateClusterWithContext(ctx aws.Context, input *CreateClusterInput, opts ...request.Option) (*CreateClusterOutput, error) { + req, out := c.CreateClusterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateParameterGroup = "CreateParameterGroup" + +// CreateParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateParameterGroup operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateParameterGroup for more information on using the CreateParameterGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateParameterGroupRequest method. +// req, resp := client.CreateParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateParameterGroup +func (c *DAX) CreateParameterGroupRequest(input *CreateParameterGroupInput) (req *request.Request, output *CreateParameterGroupOutput) { + op := &request.Operation{ + Name: opCreateParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateParameterGroupInput{} + } + + output = &CreateParameterGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateParameterGroup API operation for Amazon DynamoDB Accelerator (DAX). +// +// Creates a new parameter group. A parameter group is a collection of parameters +// that you apply to all of the nodes in a DAX cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation CreateParameterGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeParameterGroupQuotaExceededFault "ParameterGroupQuotaExceededFault" +// You have attempted to exceed the maximum number of parameter groups. +// +// * ErrCodeParameterGroupAlreadyExistsFault "ParameterGroupAlreadyExistsFault" +// The specified parameter group already exists. +// +// * ErrCodeInvalidParameterGroupStateFault "InvalidParameterGroupStateFault" +// One or more parameters in a parameter group are in an invalid state. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateParameterGroup +func (c *DAX) CreateParameterGroup(input *CreateParameterGroupInput) (*CreateParameterGroupOutput, error) { + req, out := c.CreateParameterGroupRequest(input) + return out, req.Send() +} + +// CreateParameterGroupWithContext is the same as CreateParameterGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateParameterGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) CreateParameterGroupWithContext(ctx aws.Context, input *CreateParameterGroupInput, opts ...request.Option) (*CreateParameterGroupOutput, error) { + req, out := c.CreateParameterGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSubnetGroup = "CreateSubnetGroup" + +// CreateSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateSubnetGroup operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSubnetGroup for more information on using the CreateSubnetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSubnetGroupRequest method. +// req, resp := client.CreateSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateSubnetGroup +func (c *DAX) CreateSubnetGroupRequest(input *CreateSubnetGroupInput) (req *request.Request, output *CreateSubnetGroupOutput) { + op := &request.Operation{ + Name: opCreateSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSubnetGroupInput{} + } + + output = &CreateSubnetGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSubnetGroup API operation for Amazon DynamoDB Accelerator (DAX). +// +// Creates a new subnet group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation CreateSubnetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeSubnetGroupAlreadyExistsFault "SubnetGroupAlreadyExistsFault" +// The specified subnet group already exists. +// +// * ErrCodeSubnetGroupQuotaExceededFault "SubnetGroupQuotaExceededFault" +// The request cannot be processed because it would exceed the allowed number +// of subnets in a subnet group. +// +// * ErrCodeSubnetQuotaExceededFault "SubnetQuotaExceededFault" +// The request cannot be processed because it would exceed the allowed number +// of subnets in a subnet group. +// +// * ErrCodeInvalidSubnet "InvalidSubnet" +// An invalid subnet identifier was specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateSubnetGroup +func (c *DAX) CreateSubnetGroup(input *CreateSubnetGroupInput) (*CreateSubnetGroupOutput, error) { + req, out := c.CreateSubnetGroupRequest(input) + return out, req.Send() +} + +// CreateSubnetGroupWithContext is the same as CreateSubnetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSubnetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) CreateSubnetGroupWithContext(ctx aws.Context, input *CreateSubnetGroupInput, opts ...request.Option) (*CreateSubnetGroupOutput, error) { + req, out := c.CreateSubnetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecreaseReplicationFactor = "DecreaseReplicationFactor" + +// DecreaseReplicationFactorRequest generates a "aws/request.Request" representing the +// client's request for the DecreaseReplicationFactor operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DecreaseReplicationFactor for more information on using the DecreaseReplicationFactor +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DecreaseReplicationFactorRequest method. +// req, resp := client.DecreaseReplicationFactorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DecreaseReplicationFactor +func (c *DAX) DecreaseReplicationFactorRequest(input *DecreaseReplicationFactorInput) (req *request.Request, output *DecreaseReplicationFactorOutput) { + op := &request.Operation{ + Name: opDecreaseReplicationFactor, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecreaseReplicationFactorInput{} + } + + output = &DecreaseReplicationFactorOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecreaseReplicationFactor API operation for Amazon DynamoDB Accelerator (DAX). +// +// Removes one or more nodes from a DAX cluster. +// +// You cannot use DecreaseReplicationFactor to remove the last node in a DAX +// cluster. If you need to do this, use DeleteCluster instead. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation DecreaseReplicationFactor for usage and error information. +// +// Returned Error Codes: +// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" +// The requested cluster ID does not refer to an existing DAX cluster. +// +// * ErrCodeNodeNotFoundFault "NodeNotFoundFault" +// None of the nodes in the cluster have the given node ID. +// +// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" +// The requested DAX cluster is not in the available state. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DecreaseReplicationFactor +func (c *DAX) DecreaseReplicationFactor(input *DecreaseReplicationFactorInput) (*DecreaseReplicationFactorOutput, error) { + req, out := c.DecreaseReplicationFactorRequest(input) + return out, req.Send() +} + +// DecreaseReplicationFactorWithContext is the same as DecreaseReplicationFactor with the addition of +// the ability to pass a context and additional request options. +// +// See DecreaseReplicationFactor for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) DecreaseReplicationFactorWithContext(ctx aws.Context, input *DecreaseReplicationFactorInput, opts ...request.Option) (*DecreaseReplicationFactorOutput, error) { + req, out := c.DecreaseReplicationFactorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteCluster = "DeleteCluster" + +// DeleteClusterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCluster operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteCluster for more information on using the DeleteCluster +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteClusterRequest method. +// req, resp := client.DeleteClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteCluster +func (c *DAX) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Request, output *DeleteClusterOutput) { + op := &request.Operation{ + Name: opDeleteCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterInput{} + } + + output = &DeleteClusterOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteCluster API operation for Amazon DynamoDB Accelerator (DAX). +// +// Deletes a previously provisioned DAX cluster. DeleteCluster deletes all associated +// nodes, node endpoints and the DAX cluster itself. When you receive a successful +// response from this action, DAX immediately begins deleting the cluster; you +// cannot cancel or revert this action. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation DeleteCluster for usage and error information. +// +// Returned Error Codes: +// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" +// The requested cluster ID does not refer to an existing DAX cluster. +// +// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" +// The requested DAX cluster is not in the available state. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteCluster +func (c *DAX) DeleteCluster(input *DeleteClusterInput) (*DeleteClusterOutput, error) { + req, out := c.DeleteClusterRequest(input) + return out, req.Send() +} + +// DeleteClusterWithContext is the same as DeleteCluster with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteCluster for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) DeleteClusterWithContext(ctx aws.Context, input *DeleteClusterInput, opts ...request.Option) (*DeleteClusterOutput, error) { + req, out := c.DeleteClusterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteParameterGroup = "DeleteParameterGroup" + +// DeleteParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteParameterGroup operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteParameterGroup for more information on using the DeleteParameterGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteParameterGroupRequest method. +// req, resp := client.DeleteParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteParameterGroup +func (c *DAX) DeleteParameterGroupRequest(input *DeleteParameterGroupInput) (req *request.Request, output *DeleteParameterGroupOutput) { + op := &request.Operation{ + Name: opDeleteParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteParameterGroupInput{} + } + + output = &DeleteParameterGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteParameterGroup API operation for Amazon DynamoDB Accelerator (DAX). +// +// Deletes the specified parameter group. You cannot delete a parameter group +// if it is associated with any DAX clusters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation DeleteParameterGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterGroupStateFault "InvalidParameterGroupStateFault" +// One or more parameters in a parameter group are in an invalid state. +// +// * ErrCodeParameterGroupNotFoundFault "ParameterGroupNotFoundFault" +// The specified parameter group does not exist. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteParameterGroup +func (c *DAX) DeleteParameterGroup(input *DeleteParameterGroupInput) (*DeleteParameterGroupOutput, error) { + req, out := c.DeleteParameterGroupRequest(input) + return out, req.Send() +} + +// DeleteParameterGroupWithContext is the same as DeleteParameterGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteParameterGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) DeleteParameterGroupWithContext(ctx aws.Context, input *DeleteParameterGroupInput, opts ...request.Option) (*DeleteParameterGroupOutput, error) { + req, out := c.DeleteParameterGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSubnetGroup = "DeleteSubnetGroup" + +// DeleteSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSubnetGroup operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSubnetGroup for more information on using the DeleteSubnetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteSubnetGroupRequest method. +// req, resp := client.DeleteSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteSubnetGroup +func (c *DAX) DeleteSubnetGroupRequest(input *DeleteSubnetGroupInput) (req *request.Request, output *DeleteSubnetGroupOutput) { + op := &request.Operation{ + Name: opDeleteSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSubnetGroupInput{} + } + + output = &DeleteSubnetGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteSubnetGroup API operation for Amazon DynamoDB Accelerator (DAX). +// +// Deletes a subnet group. +// +// You cannot delete a subnet group if it is associated with any DAX clusters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation DeleteSubnetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeSubnetGroupInUseFault "SubnetGroupInUseFault" +// The specified subnet group is currently in use. +// +// * ErrCodeSubnetGroupNotFoundFault "SubnetGroupNotFoundFault" +// The requested subnet group name does not refer to an existing subnet group. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteSubnetGroup +func (c *DAX) DeleteSubnetGroup(input *DeleteSubnetGroupInput) (*DeleteSubnetGroupOutput, error) { + req, out := c.DeleteSubnetGroupRequest(input) + return out, req.Send() +} + +// DeleteSubnetGroupWithContext is the same as DeleteSubnetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSubnetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) DeleteSubnetGroupWithContext(ctx aws.Context, input *DeleteSubnetGroupInput, opts ...request.Option) (*DeleteSubnetGroupOutput, error) { + req, out := c.DeleteSubnetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeClusters = "DescribeClusters" + +// DescribeClustersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusters operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeClusters for more information on using the DescribeClusters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeClustersRequest method. +// req, resp := client.DescribeClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeClusters +func (c *DAX) DescribeClustersRequest(input *DescribeClustersInput) (req *request.Request, output *DescribeClustersOutput) { + op := &request.Operation{ + Name: opDescribeClusters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeClustersInput{} + } + + output = &DescribeClustersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeClusters API operation for Amazon DynamoDB Accelerator (DAX). +// +// Returns information about all provisioned DAX clusters if no cluster identifier +// is specified, or about a specific DAX cluster if a cluster identifier is +// supplied. +// +// If the cluster is in the CREATING state, only cluster level information will +// be displayed until all of the nodes are successfully provisioned. +// +// If the cluster is in the DELETING state, only cluster level information will +// be displayed. +// +// If nodes are currently being added to the DAX cluster, node endpoint information +// and creation time for the additional nodes will not be displayed until they +// are completely provisioned. When the DAX cluster state is available, the +// cluster is ready for use. +// +// If nodes are currently being removed from the DAX cluster, no endpoint information +// for the removed nodes is displayed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation DescribeClusters for usage and error information. +// +// Returned Error Codes: +// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" +// The requested cluster ID does not refer to an existing DAX cluster. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeClusters +func (c *DAX) DescribeClusters(input *DescribeClustersInput) (*DescribeClustersOutput, error) { + req, out := c.DescribeClustersRequest(input) + return out, req.Send() +} + +// DescribeClustersWithContext is the same as DescribeClusters with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeClusters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) DescribeClustersWithContext(ctx aws.Context, input *DescribeClustersInput, opts ...request.Option) (*DescribeClustersOutput, error) { + req, out := c.DescribeClustersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDefaultParameters = "DescribeDefaultParameters" + +// DescribeDefaultParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDefaultParameters operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDefaultParameters for more information on using the DescribeDefaultParameters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDefaultParametersRequest method. +// req, resp := client.DescribeDefaultParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeDefaultParameters +func (c *DAX) DescribeDefaultParametersRequest(input *DescribeDefaultParametersInput) (req *request.Request, output *DescribeDefaultParametersOutput) { + op := &request.Operation{ + Name: opDescribeDefaultParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDefaultParametersInput{} + } + + output = &DescribeDefaultParametersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDefaultParameters API operation for Amazon DynamoDB Accelerator (DAX). +// +// Returns the default system parameter information for the DAX caching software. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation DescribeDefaultParameters for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeDefaultParameters +func (c *DAX) DescribeDefaultParameters(input *DescribeDefaultParametersInput) (*DescribeDefaultParametersOutput, error) { + req, out := c.DescribeDefaultParametersRequest(input) + return out, req.Send() +} + +// DescribeDefaultParametersWithContext is the same as DescribeDefaultParameters with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDefaultParameters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) DescribeDefaultParametersWithContext(ctx aws.Context, input *DescribeDefaultParametersInput, opts ...request.Option) (*DescribeDefaultParametersOutput, error) { + req, out := c.DescribeDefaultParametersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeEvents = "DescribeEvents" + +// DescribeEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEvents operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeEvents for more information on using the DescribeEvents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeEventsRequest method. +// req, resp := client.DescribeEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeEvents +func (c *DAX) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { + op := &request.Operation{ + Name: opDescribeEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEventsInput{} + } + + output = &DescribeEventsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeEvents API operation for Amazon DynamoDB Accelerator (DAX). +// +// Returns events related to DAX clusters and parameter groups. You can obtain +// events specific to a particular DAX cluster or parameter group by providing +// the name as a parameter. +// +// By default, only the events occurring within the last hour are returned; +// however, you can retrieve up to 14 days' worth of events if necessary. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation DescribeEvents for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeEvents +func (c *DAX) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) + return out, req.Send() +} + +// DescribeEventsWithContext is the same as DescribeEvents with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeEvents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) DescribeEventsWithContext(ctx aws.Context, input *DescribeEventsInput, opts ...request.Option) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeParameterGroups = "DescribeParameterGroups" + +// DescribeParameterGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeParameterGroups operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeParameterGroups for more information on using the DescribeParameterGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeParameterGroupsRequest method. +// req, resp := client.DescribeParameterGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParameterGroups +func (c *DAX) DescribeParameterGroupsRequest(input *DescribeParameterGroupsInput) (req *request.Request, output *DescribeParameterGroupsOutput) { + op := &request.Operation{ + Name: opDescribeParameterGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeParameterGroupsInput{} + } + + output = &DescribeParameterGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeParameterGroups API operation for Amazon DynamoDB Accelerator (DAX). +// +// Returns a list of parameter group descriptions. If a parameter group name +// is specified, the list will contain only the descriptions for that group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation DescribeParameterGroups for usage and error information. +// +// Returned Error Codes: +// * ErrCodeParameterGroupNotFoundFault "ParameterGroupNotFoundFault" +// The specified parameter group does not exist. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParameterGroups +func (c *DAX) DescribeParameterGroups(input *DescribeParameterGroupsInput) (*DescribeParameterGroupsOutput, error) { + req, out := c.DescribeParameterGroupsRequest(input) + return out, req.Send() +} + +// DescribeParameterGroupsWithContext is the same as DescribeParameterGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeParameterGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) DescribeParameterGroupsWithContext(ctx aws.Context, input *DescribeParameterGroupsInput, opts ...request.Option) (*DescribeParameterGroupsOutput, error) { + req, out := c.DescribeParameterGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeParameters = "DescribeParameters" + +// DescribeParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeParameters operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeParameters for more information on using the DescribeParameters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeParametersRequest method. +// req, resp := client.DescribeParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParameters +func (c *DAX) DescribeParametersRequest(input *DescribeParametersInput) (req *request.Request, output *DescribeParametersOutput) { + op := &request.Operation{ + Name: opDescribeParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeParametersInput{} + } + + output = &DescribeParametersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeParameters API operation for Amazon DynamoDB Accelerator (DAX). +// +// Returns the detailed parameter list for a particular parameter group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation DescribeParameters for usage and error information. +// +// Returned Error Codes: +// * ErrCodeParameterGroupNotFoundFault "ParameterGroupNotFoundFault" +// The specified parameter group does not exist. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParameters +func (c *DAX) DescribeParameters(input *DescribeParametersInput) (*DescribeParametersOutput, error) { + req, out := c.DescribeParametersRequest(input) + return out, req.Send() +} + +// DescribeParametersWithContext is the same as DescribeParameters with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeParameters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) DescribeParametersWithContext(ctx aws.Context, input *DescribeParametersInput, opts ...request.Option) (*DescribeParametersOutput, error) { + req, out := c.DescribeParametersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeSubnetGroups = "DescribeSubnetGroups" + +// DescribeSubnetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSubnetGroups operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSubnetGroups for more information on using the DescribeSubnetGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSubnetGroupsRequest method. +// req, resp := client.DescribeSubnetGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeSubnetGroups +func (c *DAX) DescribeSubnetGroupsRequest(input *DescribeSubnetGroupsInput) (req *request.Request, output *DescribeSubnetGroupsOutput) { + op := &request.Operation{ + Name: opDescribeSubnetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSubnetGroupsInput{} + } + + output = &DescribeSubnetGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSubnetGroups API operation for Amazon DynamoDB Accelerator (DAX). +// +// Returns a list of subnet group descriptions. If a subnet group name is specified, +// the list will contain only the description of that group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation DescribeSubnetGroups for usage and error information. +// +// Returned Error Codes: +// * ErrCodeSubnetGroupNotFoundFault "SubnetGroupNotFoundFault" +// The requested subnet group name does not refer to an existing subnet group. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeSubnetGroups +func (c *DAX) DescribeSubnetGroups(input *DescribeSubnetGroupsInput) (*DescribeSubnetGroupsOutput, error) { + req, out := c.DescribeSubnetGroupsRequest(input) + return out, req.Send() +} + +// DescribeSubnetGroupsWithContext is the same as DescribeSubnetGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSubnetGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) DescribeSubnetGroupsWithContext(ctx aws.Context, input *DescribeSubnetGroupsInput, opts ...request.Option) (*DescribeSubnetGroupsOutput, error) { + req, out := c.DescribeSubnetGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opIncreaseReplicationFactor = "IncreaseReplicationFactor" + +// IncreaseReplicationFactorRequest generates a "aws/request.Request" representing the +// client's request for the IncreaseReplicationFactor operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See IncreaseReplicationFactor for more information on using the IncreaseReplicationFactor +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the IncreaseReplicationFactorRequest method. +// req, resp := client.IncreaseReplicationFactorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/IncreaseReplicationFactor +func (c *DAX) IncreaseReplicationFactorRequest(input *IncreaseReplicationFactorInput) (req *request.Request, output *IncreaseReplicationFactorOutput) { + op := &request.Operation{ + Name: opIncreaseReplicationFactor, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &IncreaseReplicationFactorInput{} + } + + output = &IncreaseReplicationFactorOutput{} + req = c.newRequest(op, input, output) + return +} + +// IncreaseReplicationFactor API operation for Amazon DynamoDB Accelerator (DAX). +// +// Adds one or more nodes to a DAX cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation IncreaseReplicationFactor for usage and error information. +// +// Returned Error Codes: +// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" +// The requested cluster ID does not refer to an existing DAX cluster. +// +// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" +// The requested DAX cluster is not in the available state. +// +// * ErrCodeInsufficientClusterCapacityFault "InsufficientClusterCapacityFault" +// There are not enough system resources to create the cluster you requested +// (or to resize an already-existing cluster). +// +// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" +// The VPC network is in an invalid state. +// +// * ErrCodeNodeQuotaForClusterExceededFault "NodeQuotaForClusterExceededFault" +// You have attempted to exceed the maximum number of nodes for a DAX cluster. +// +// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceededFault" +// You have attempted to exceed the maximum number of nodes for your AWS account. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/IncreaseReplicationFactor +func (c *DAX) IncreaseReplicationFactor(input *IncreaseReplicationFactorInput) (*IncreaseReplicationFactorOutput, error) { + req, out := c.IncreaseReplicationFactorRequest(input) + return out, req.Send() +} + +// IncreaseReplicationFactorWithContext is the same as IncreaseReplicationFactor with the addition of +// the ability to pass a context and additional request options. +// +// See IncreaseReplicationFactor for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) IncreaseReplicationFactorWithContext(ctx aws.Context, input *IncreaseReplicationFactorInput, opts ...request.Option) (*IncreaseReplicationFactorOutput, error) { + req, out := c.IncreaseReplicationFactorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTags = "ListTags" + +// ListTagsRequest generates a "aws/request.Request" representing the +// client's request for the ListTags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTags for more information on using the ListTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsRequest method. +// req, resp := client.ListTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/ListTags +func (c *DAX) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { + op := &request.Operation{ + Name: opListTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsInput{} + } + + output = &ListTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTags API operation for Amazon DynamoDB Accelerator (DAX). +// +// List all of the tags for a DAX cluster. You can call ListTags up to 10 times +// per second, per account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation ListTags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" +// The requested cluster ID does not refer to an existing DAX cluster. +// +// * ErrCodeInvalidARNFault "InvalidARNFault" +// The Amazon Resource Name (ARN) supplied in the request is not valid. +// +// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" +// The requested DAX cluster is not in the available state. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/ListTags +func (c *DAX) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { + req, out := c.ListTagsRequest(input) + return out, req.Send() +} + +// ListTagsWithContext is the same as ListTags with the addition of +// the ability to pass a context and additional request options. +// +// See ListTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) ListTagsWithContext(ctx aws.Context, input *ListTagsInput, opts ...request.Option) (*ListTagsOutput, error) { + req, out := c.ListTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRebootNode = "RebootNode" + +// RebootNodeRequest generates a "aws/request.Request" representing the +// client's request for the RebootNode operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RebootNode for more information on using the RebootNode +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RebootNodeRequest method. +// req, resp := client.RebootNodeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/RebootNode +func (c *DAX) RebootNodeRequest(input *RebootNodeInput) (req *request.Request, output *RebootNodeOutput) { + op := &request.Operation{ + Name: opRebootNode, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootNodeInput{} + } + + output = &RebootNodeOutput{} + req = c.newRequest(op, input, output) + return +} + +// RebootNode API operation for Amazon DynamoDB Accelerator (DAX). +// +// Reboots a single node of a DAX cluster. The reboot action takes place as +// soon as possible. During the reboot, the node status is set to REBOOTING. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation RebootNode for usage and error information. +// +// Returned Error Codes: +// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" +// The requested cluster ID does not refer to an existing DAX cluster. +// +// * ErrCodeNodeNotFoundFault "NodeNotFoundFault" +// None of the nodes in the cluster have the given node ID. +// +// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" +// The requested DAX cluster is not in the available state. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/RebootNode +func (c *DAX) RebootNode(input *RebootNodeInput) (*RebootNodeOutput, error) { + req, out := c.RebootNodeRequest(input) + return out, req.Send() +} + +// RebootNodeWithContext is the same as RebootNode with the addition of +// the ability to pass a context and additional request options. +// +// See RebootNode for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) RebootNodeWithContext(ctx aws.Context, input *RebootNodeInput, opts ...request.Option) (*RebootNodeOutput, error) { + req, out := c.RebootNodeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/TagResource +func (c *DAX) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// TagResource API operation for Amazon DynamoDB Accelerator (DAX). +// +// Associates a set of tags with a DAX resource. You can call TagResource up +// to 5 times per second, per account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" +// The requested cluster ID does not refer to an existing DAX cluster. +// +// * ErrCodeTagQuotaPerResourceExceeded "TagQuotaPerResourceExceeded" +// You have exceeded the maximum number of tags for this DAX cluster. +// +// * ErrCodeInvalidARNFault "InvalidARNFault" +// The Amazon Resource Name (ARN) supplied in the request is not valid. +// +// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" +// The requested DAX cluster is not in the available state. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/TagResource +func (c *DAX) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UntagResource +func (c *DAX) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// UntagResource API operation for Amazon DynamoDB Accelerator (DAX). +// +// Removes the association of tags from a DAX resource. You can call UntagResource +// up to 5 times per second, per account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" +// The requested cluster ID does not refer to an existing DAX cluster. +// +// * ErrCodeInvalidARNFault "InvalidARNFault" +// The Amazon Resource Name (ARN) supplied in the request is not valid. +// +// * ErrCodeTagNotFoundFault "TagNotFoundFault" +// The tag does not exist. +// +// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" +// The requested DAX cluster is not in the available state. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UntagResource +func (c *DAX) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateCluster = "UpdateCluster" + +// UpdateClusterRequest generates a "aws/request.Request" representing the +// client's request for the UpdateCluster operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateCluster for more information on using the UpdateCluster +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateClusterRequest method. +// req, resp := client.UpdateClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateCluster +func (c *DAX) UpdateClusterRequest(input *UpdateClusterInput) (req *request.Request, output *UpdateClusterOutput) { + op := &request.Operation{ + Name: opUpdateCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateClusterInput{} + } + + output = &UpdateClusterOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateCluster API operation for Amazon DynamoDB Accelerator (DAX). +// +// Modifies the settings for a DAX cluster. You can use this action to change +// one or more cluster configuration parameters by specifying the parameters +// and the new values. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation UpdateCluster for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" +// The requested DAX cluster is not in the available state. +// +// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" +// The requested cluster ID does not refer to an existing DAX cluster. +// +// * ErrCodeInvalidParameterGroupStateFault "InvalidParameterGroupStateFault" +// One or more parameters in a parameter group are in an invalid state. +// +// * ErrCodeParameterGroupNotFoundFault "ParameterGroupNotFoundFault" +// The specified parameter group does not exist. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateCluster +func (c *DAX) UpdateCluster(input *UpdateClusterInput) (*UpdateClusterOutput, error) { + req, out := c.UpdateClusterRequest(input) + return out, req.Send() +} + +// UpdateClusterWithContext is the same as UpdateCluster with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateCluster for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) UpdateClusterWithContext(ctx aws.Context, input *UpdateClusterInput, opts ...request.Option) (*UpdateClusterOutput, error) { + req, out := c.UpdateClusterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateParameterGroup = "UpdateParameterGroup" + +// UpdateParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the UpdateParameterGroup operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateParameterGroup for more information on using the UpdateParameterGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateParameterGroupRequest method. +// req, resp := client.UpdateParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateParameterGroup +func (c *DAX) UpdateParameterGroupRequest(input *UpdateParameterGroupInput) (req *request.Request, output *UpdateParameterGroupOutput) { + op := &request.Operation{ + Name: opUpdateParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateParameterGroupInput{} + } + + output = &UpdateParameterGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateParameterGroup API operation for Amazon DynamoDB Accelerator (DAX). +// +// Modifies the parameters of a parameter group. You can modify up to 20 parameters +// in a single request by submitting a list parameter name and value pairs. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation UpdateParameterGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterGroupStateFault "InvalidParameterGroupStateFault" +// One or more parameters in a parameter group are in an invalid state. +// +// * ErrCodeParameterGroupNotFoundFault "ParameterGroupNotFoundFault" +// The specified parameter group does not exist. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateParameterGroup +func (c *DAX) UpdateParameterGroup(input *UpdateParameterGroupInput) (*UpdateParameterGroupOutput, error) { + req, out := c.UpdateParameterGroupRequest(input) + return out, req.Send() +} + +// UpdateParameterGroupWithContext is the same as UpdateParameterGroup with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateParameterGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) UpdateParameterGroupWithContext(ctx aws.Context, input *UpdateParameterGroupInput, opts ...request.Option) (*UpdateParameterGroupOutput, error) { + req, out := c.UpdateParameterGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSubnetGroup = "UpdateSubnetGroup" + +// UpdateSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSubnetGroup operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSubnetGroup for more information on using the UpdateSubnetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSubnetGroupRequest method. +// req, resp := client.UpdateSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateSubnetGroup +func (c *DAX) UpdateSubnetGroupRequest(input *UpdateSubnetGroupInput) (req *request.Request, output *UpdateSubnetGroupOutput) { + op := &request.Operation{ + Name: opUpdateSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSubnetGroupInput{} + } + + output = &UpdateSubnetGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSubnetGroup API operation for Amazon DynamoDB Accelerator (DAX). +// +// Modifies an existing subnet group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s +// API operation UpdateSubnetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeSubnetGroupNotFoundFault "SubnetGroupNotFoundFault" +// The requested subnet group name does not refer to an existing subnet group. +// +// * ErrCodeSubnetQuotaExceededFault "SubnetQuotaExceededFault" +// The request cannot be processed because it would exceed the allowed number +// of subnets in a subnet group. +// +// * ErrCodeSubnetInUse "SubnetInUse" +// The requested subnet is being used by another subnet group. +// +// * ErrCodeInvalidSubnet "InvalidSubnet" +// An invalid subnet identifier was specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateSubnetGroup +func (c *DAX) UpdateSubnetGroup(input *UpdateSubnetGroupInput) (*UpdateSubnetGroupOutput, error) { + req, out := c.UpdateSubnetGroupRequest(input) + return out, req.Send() +} + +// UpdateSubnetGroupWithContext is the same as UpdateSubnetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSubnetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DAX) UpdateSubnetGroupWithContext(ctx aws.Context, input *UpdateSubnetGroupInput, opts ...request.Option) (*UpdateSubnetGroupOutput, error) { + req, out := c.UpdateSubnetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Contains all of the attributes of a specific DAX cluster. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/Cluster +type Cluster struct { + _ struct{} `type:"structure"` + + // The number of nodes in the cluster that are active (i.e., capable of serving + // requests). + ActiveNodes *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) that uniquely identifies the cluster. + ClusterArn *string `type:"string"` + + // The configuration endpoint for this DAX cluster, consisting of a DNS name + // and a port number. Client applications can specify this endpoint, rather + // than an individual node endpoint, and allow the DAX client software to intelligently + // route requests and responses to nodes in the DAX cluster. + ClusterDiscoveryEndpoint *Endpoint `type:"structure"` + + // The name of the DAX cluster. + ClusterName *string `type:"string"` + + // The description of the cluster. + Description *string `type:"string"` + + // A valid Amazon Resource Name (ARN) that identifies an IAM role. At runtime, + // DAX will assume this role and use the role's permissions to access DynamoDB + // on your behalf. + IamRoleArn *string `type:"string"` + + // A list of nodes to be removed from the cluster. + NodeIdsToRemove []*string `type:"list"` + + // The node type for the nodes in the cluster. (All nodes in a DAX cluster are + // of the same type.) + NodeType *string `type:"string"` + + // A list of nodes that are currently in the cluster. + Nodes []*Node `type:"list"` + + // Describes a notification topic and its status. Notification topics are used + // for publishing DAX events to subscribers using Amazon Simple Notification + // Service (SNS). + NotificationConfiguration *NotificationConfiguration `type:"structure"` + + // The parameter group being used by nodes in the cluster. + ParameterGroup *ParameterGroupStatus `type:"structure"` + + // A range of time when maintenance of DAX cluster software will be performed. + // For example: sun:01:00-sun:09:00. Cluster maintenance normally takes less + // than 30 minutes, and is performed automatically within the maintenance window. + PreferredMaintenanceWindow *string `type:"string"` + + // A list of security groups, and the status of each, for the nodes in the cluster. + SecurityGroups []*SecurityGroupMembership `type:"list"` + + // The current status of the cluster. + Status *string `type:"string"` + + // The subnet group where the DAX cluster is running. + SubnetGroup *string `type:"string"` + + // The total number of nodes in the cluster. + TotalNodes *int64 `type:"integer"` +} + +// String returns the string representation +func (s Cluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Cluster) GoString() string { + return s.String() +} + +// SetActiveNodes sets the ActiveNodes field's value. +func (s *Cluster) SetActiveNodes(v int64) *Cluster { + s.ActiveNodes = &v + return s +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *Cluster) SetClusterArn(v string) *Cluster { + s.ClusterArn = &v + return s +} + +// SetClusterDiscoveryEndpoint sets the ClusterDiscoveryEndpoint field's value. +func (s *Cluster) SetClusterDiscoveryEndpoint(v *Endpoint) *Cluster { + s.ClusterDiscoveryEndpoint = v + return s +} + +// SetClusterName sets the ClusterName field's value. +func (s *Cluster) SetClusterName(v string) *Cluster { + s.ClusterName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Cluster) SetDescription(v string) *Cluster { + s.Description = &v + return s +} + +// SetIamRoleArn sets the IamRoleArn field's value. +func (s *Cluster) SetIamRoleArn(v string) *Cluster { + s.IamRoleArn = &v + return s +} + +// SetNodeIdsToRemove sets the NodeIdsToRemove field's value. +func (s *Cluster) SetNodeIdsToRemove(v []*string) *Cluster { + s.NodeIdsToRemove = v + return s +} + +// SetNodeType sets the NodeType field's value. +func (s *Cluster) SetNodeType(v string) *Cluster { + s.NodeType = &v + return s +} + +// SetNodes sets the Nodes field's value. +func (s *Cluster) SetNodes(v []*Node) *Cluster { + s.Nodes = v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *Cluster) SetNotificationConfiguration(v *NotificationConfiguration) *Cluster { + s.NotificationConfiguration = v + return s +} + +// SetParameterGroup sets the ParameterGroup field's value. +func (s *Cluster) SetParameterGroup(v *ParameterGroupStatus) *Cluster { + s.ParameterGroup = v + return s +} + +// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. +func (s *Cluster) SetPreferredMaintenanceWindow(v string) *Cluster { + s.PreferredMaintenanceWindow = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *Cluster) SetSecurityGroups(v []*SecurityGroupMembership) *Cluster { + s.SecurityGroups = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Cluster) SetStatus(v string) *Cluster { + s.Status = &v + return s +} + +// SetSubnetGroup sets the SubnetGroup field's value. +func (s *Cluster) SetSubnetGroup(v string) *Cluster { + s.SubnetGroup = &v + return s +} + +// SetTotalNodes sets the TotalNodes field's value. +func (s *Cluster) SetTotalNodes(v int64) *Cluster { + s.TotalNodes = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateClusterRequest +type CreateClusterInput struct { + _ struct{} `type:"structure"` + + // The Availability Zones (AZs) in which the cluster nodes will be created. + // All nodes belonging to the cluster are placed in these Availability Zones. + // Use this parameter if you want to distribute the nodes across multiple AZs. + AvailabilityZones []*string `type:"list"` + + // The cluster identifier. This parameter is stored as a lowercase string. + // + // Constraints: + // + // * A name must contain from 1 to 20 alphanumeric characters or hyphens. + // + // * The first character must be a letter. + // + // * A name cannot end with a hyphen or contain two consecutive hyphens. + // + // ClusterName is a required field + ClusterName *string `type:"string" required:"true"` + + // A description of the cluster. + Description *string `type:"string"` + + // A valid Amazon Resource Name (ARN) that identifies an IAM role. At runtime, + // DAX will assume this role and use the role's permissions to access DynamoDB + // on your behalf. + // + // IamRoleArn is a required field + IamRoleArn *string `type:"string" required:"true"` + + // The compute and memory capacity of the nodes in the cluster. + // + // NodeType is a required field + NodeType *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications + // will be sent. + // + // The Amazon SNS topic owner must be same as the DAX cluster owner. + NotificationTopicArn *string `type:"string"` + + // The parameter group to be associated with the DAX cluster. + ParameterGroupName *string `type:"string"` + + // Specifies the weekly time range during which maintenance on the DAX cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // * sun + // + // * mon + // + // * tue + // + // * wed + // + // * thu + // + // * fri + // + // * sat + // + // Example: sun:05:00-sun:09:00 + // + // If you don't specify a preferred maintenance window when you create or modify + // a cache cluster, DAX assigns a 60-minute maintenance window on a randomly + // selected day of the week. + PreferredMaintenanceWindow *string `type:"string"` + + // The number of nodes in the DAX cluster. A replication factor of 1 will create + // a single-node cluster, without any read replicas. For additional fault tolerance, + // you can create a multiple node cluster with one or more read replicas. To + // do this, set ReplicationFactor to 2 or more. + // + // AWS recommends that you have at least two read replicas per cluster. + // + // ReplicationFactor is a required field + ReplicationFactor *int64 `type:"integer" required:"true"` + + // A list of security group IDs to be assigned to each node in the DAX cluster. + // (Each of the security group ID is system-generated.) + // + // If this parameter is not specified, DAX assigns the default VPC security + // group to each node. + SecurityGroupIds []*string `type:"list"` + + // The name of the subnet group to be used for the replication group. + // + // DAX clusters can only run in an Amazon VPC environment. All of the subnets + // that you specify in a subnet group must exist in the same VPC. + SubnetGroupName *string `type:"string"` + + // A set of tags to associate with the DAX cluster. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateClusterInput"} + if s.ClusterName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterName")) + } + if s.IamRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("IamRoleArn")) + } + if s.NodeType == nil { + invalidParams.Add(request.NewErrParamRequired("NodeType")) + } + if s.ReplicationFactor == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationFactor")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *CreateClusterInput) SetAvailabilityZones(v []*string) *CreateClusterInput { + s.AvailabilityZones = v + return s +} + +// SetClusterName sets the ClusterName field's value. +func (s *CreateClusterInput) SetClusterName(v string) *CreateClusterInput { + s.ClusterName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateClusterInput) SetDescription(v string) *CreateClusterInput { + s.Description = &v + return s +} + +// SetIamRoleArn sets the IamRoleArn field's value. +func (s *CreateClusterInput) SetIamRoleArn(v string) *CreateClusterInput { + s.IamRoleArn = &v + return s +} + +// SetNodeType sets the NodeType field's value. +func (s *CreateClusterInput) SetNodeType(v string) *CreateClusterInput { + s.NodeType = &v + return s +} + +// SetNotificationTopicArn sets the NotificationTopicArn field's value. +func (s *CreateClusterInput) SetNotificationTopicArn(v string) *CreateClusterInput { + s.NotificationTopicArn = &v + return s +} + +// SetParameterGroupName sets the ParameterGroupName field's value. +func (s *CreateClusterInput) SetParameterGroupName(v string) *CreateClusterInput { + s.ParameterGroupName = &v + return s +} + +// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. +func (s *CreateClusterInput) SetPreferredMaintenanceWindow(v string) *CreateClusterInput { + s.PreferredMaintenanceWindow = &v + return s +} + +// SetReplicationFactor sets the ReplicationFactor field's value. +func (s *CreateClusterInput) SetReplicationFactor(v int64) *CreateClusterInput { + s.ReplicationFactor = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *CreateClusterInput) SetSecurityGroupIds(v []*string) *CreateClusterInput { + s.SecurityGroupIds = v + return s +} + +// SetSubnetGroupName sets the SubnetGroupName field's value. +func (s *CreateClusterInput) SetSubnetGroupName(v string) *CreateClusterInput { + s.SubnetGroupName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateClusterInput) SetTags(v []*Tag) *CreateClusterInput { + s.Tags = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateClusterResponse +type CreateClusterOutput struct { + _ struct{} `type:"structure"` + + // A description of the DAX cluster that you have created. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterOutput) GoString() string { + return s.String() +} + +// SetCluster sets the Cluster field's value. +func (s *CreateClusterOutput) SetCluster(v *Cluster) *CreateClusterOutput { + s.Cluster = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateParameterGroupRequest +type CreateParameterGroupInput struct { + _ struct{} `type:"structure"` + + // A description of the parameter group. + Description *string `type:"string"` + + // The name of the parameter group to apply to all of the clusters in this replication + // group. + // + // ParameterGroupName is a required field + ParameterGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateParameterGroupInput"} + if s.ParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ParameterGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateParameterGroupInput) SetDescription(v string) *CreateParameterGroupInput { + s.Description = &v + return s +} + +// SetParameterGroupName sets the ParameterGroupName field's value. +func (s *CreateParameterGroupInput) SetParameterGroupName(v string) *CreateParameterGroupInput { + s.ParameterGroupName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateParameterGroupResponse +type CreateParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of a CreateParameterGroup action. + ParameterGroup *ParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateParameterGroupOutput) GoString() string { + return s.String() +} + +// SetParameterGroup sets the ParameterGroup field's value. +func (s *CreateParameterGroupOutput) SetParameterGroup(v *ParameterGroup) *CreateParameterGroupOutput { + s.ParameterGroup = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateSubnetGroupRequest +type CreateSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // A description for the subnet group + Description *string `type:"string"` + + // A name for the subnet group. This value is stored as a lowercase string. + // + // SubnetGroupName is a required field + SubnetGroupName *string `type:"string" required:"true"` + + // A list of VPC subnet IDs for the subnet group. + // + // SubnetIds is a required field + SubnetIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSubnetGroupInput"} + if s.SubnetGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetGroupName")) + } + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateSubnetGroupInput) SetDescription(v string) *CreateSubnetGroupInput { + s.Description = &v + return s +} + +// SetSubnetGroupName sets the SubnetGroupName field's value. +func (s *CreateSubnetGroupInput) SetSubnetGroupName(v string) *CreateSubnetGroupInput { + s.SubnetGroupName = &v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *CreateSubnetGroupInput) SetSubnetIds(v []*string) *CreateSubnetGroupInput { + s.SubnetIds = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateSubnetGroupResponse +type CreateSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of a CreateSubnetGroup operation. + SubnetGroup *SubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubnetGroupOutput) GoString() string { + return s.String() +} + +// SetSubnetGroup sets the SubnetGroup field's value. +func (s *CreateSubnetGroupOutput) SetSubnetGroup(v *SubnetGroup) *CreateSubnetGroupOutput { + s.SubnetGroup = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DecreaseReplicationFactorRequest +type DecreaseReplicationFactorInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone(s) from which to remove nodes. + AvailabilityZones []*string `type:"list"` + + // The name of the DAX cluster from which you want to remove nodes. + // + // ClusterName is a required field + ClusterName *string `type:"string" required:"true"` + + // The new number of nodes for the DAX cluster. + // + // NewReplicationFactor is a required field + NewReplicationFactor *int64 `type:"integer" required:"true"` + + // The unique identifiers of the nodes to be removed from the cluster. + NodeIdsToRemove []*string `type:"list"` +} + +// String returns the string representation +func (s DecreaseReplicationFactorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecreaseReplicationFactorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecreaseReplicationFactorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecreaseReplicationFactorInput"} + if s.ClusterName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterName")) + } + if s.NewReplicationFactor == nil { + invalidParams.Add(request.NewErrParamRequired("NewReplicationFactor")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *DecreaseReplicationFactorInput) SetAvailabilityZones(v []*string) *DecreaseReplicationFactorInput { + s.AvailabilityZones = v + return s +} + +// SetClusterName sets the ClusterName field's value. +func (s *DecreaseReplicationFactorInput) SetClusterName(v string) *DecreaseReplicationFactorInput { + s.ClusterName = &v + return s +} + +// SetNewReplicationFactor sets the NewReplicationFactor field's value. +func (s *DecreaseReplicationFactorInput) SetNewReplicationFactor(v int64) *DecreaseReplicationFactorInput { + s.NewReplicationFactor = &v + return s +} + +// SetNodeIdsToRemove sets the NodeIdsToRemove field's value. +func (s *DecreaseReplicationFactorInput) SetNodeIdsToRemove(v []*string) *DecreaseReplicationFactorInput { + s.NodeIdsToRemove = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DecreaseReplicationFactorResponse +type DecreaseReplicationFactorOutput struct { + _ struct{} `type:"structure"` + + // A description of the DAX cluster, after you have decreased its replication + // factor. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s DecreaseReplicationFactorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecreaseReplicationFactorOutput) GoString() string { + return s.String() +} + +// SetCluster sets the Cluster field's value. +func (s *DecreaseReplicationFactorOutput) SetCluster(v *Cluster) *DecreaseReplicationFactorOutput { + s.Cluster = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteClusterRequest +type DeleteClusterInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster to be deleted. + // + // ClusterName is a required field + ClusterName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteClusterInput"} + if s.ClusterName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterName sets the ClusterName field's value. +func (s *DeleteClusterInput) SetClusterName(v string) *DeleteClusterInput { + s.ClusterName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteClusterResponse +type DeleteClusterOutput struct { + _ struct{} `type:"structure"` + + // A description of the DAX cluster that is being deleted. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterOutput) GoString() string { + return s.String() +} + +// SetCluster sets the Cluster field's value. +func (s *DeleteClusterOutput) SetCluster(v *Cluster) *DeleteClusterOutput { + s.Cluster = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteParameterGroupRequest +type DeleteParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the parameter group to delete. + // + // ParameterGroupName is a required field + ParameterGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteParameterGroupInput"} + if s.ParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ParameterGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetParameterGroupName sets the ParameterGroupName field's value. +func (s *DeleteParameterGroupInput) SetParameterGroupName(v string) *DeleteParameterGroupInput { + s.ParameterGroupName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteParameterGroupResponse +type DeleteParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // A user-specified message for this action (i.e., a reason for deleting the + // parameter group). + DeletionMessage *string `type:"string"` +} + +// String returns the string representation +func (s DeleteParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteParameterGroupOutput) GoString() string { + return s.String() +} + +// SetDeletionMessage sets the DeletionMessage field's value. +func (s *DeleteParameterGroupOutput) SetDeletionMessage(v string) *DeleteParameterGroupOutput { + s.DeletionMessage = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteSubnetGroupRequest +type DeleteSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the subnet group to delete. + // + // SubnetGroupName is a required field + SubnetGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSubnetGroupInput"} + if s.SubnetGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSubnetGroupName sets the SubnetGroupName field's value. +func (s *DeleteSubnetGroupInput) SetSubnetGroupName(v string) *DeleteSubnetGroupInput { + s.SubnetGroupName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteSubnetGroupResponse +type DeleteSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // A user-specified message for this action (i.e., a reason for deleting the + // subnet group). + DeletionMessage *string `type:"string"` +} + +// String returns the string representation +func (s DeleteSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubnetGroupOutput) GoString() string { + return s.String() +} + +// SetDeletionMessage sets the DeletionMessage field's value. +func (s *DeleteSubnetGroupOutput) SetDeletionMessage(v string) *DeleteSubnetGroupOutput { + s.DeletionMessage = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeClustersRequest +type DescribeClustersInput struct { + _ struct{} `type:"structure"` + + // The names of the DAX clusters being described. + ClusterNames []*string `type:"list"` + + // The maximum number of results to include in the response. If more results + // exist than the specified MaxResults value, a token is included in the response + // so that the remaining results can be retrieved. + // + // The value for MaxResults must be between 20 and 100. + MaxResults *int64 `type:"integer"` + + // An optional token returned from a prior request. Use this token for pagination + // of results from this action. If this parameter is specified, the response + // includes only results beyond the token, up to the value specified by MaxResults. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersInput) GoString() string { + return s.String() +} + +// SetClusterNames sets the ClusterNames field's value. +func (s *DescribeClustersInput) SetClusterNames(v []*string) *DescribeClustersInput { + s.ClusterNames = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeClustersInput) SetMaxResults(v int64) *DescribeClustersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeClustersInput) SetNextToken(v string) *DescribeClustersInput { + s.NextToken = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeClustersResponse +type DescribeClustersOutput struct { + _ struct{} `type:"structure"` + + // The descriptions of your DAX clusters, in response to a DescribeClusters + // request. + Clusters []*Cluster `type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersOutput) GoString() string { + return s.String() +} + +// SetClusters sets the Clusters field's value. +func (s *DescribeClustersOutput) SetClusters(v []*Cluster) *DescribeClustersOutput { + s.Clusters = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeClustersOutput) SetNextToken(v string) *DescribeClustersOutput { + s.NextToken = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeDefaultParametersRequest +type DescribeDefaultParametersInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to include in the response. If more results + // exist than the specified MaxResults value, a token is included in the response + // so that the remaining results can be retrieved. + // + // The value for MaxResults must be between 20 and 100. + MaxResults *int64 `type:"integer"` + + // An optional token returned from a prior request. Use this token for pagination + // of results from this action. If this parameter is specified, the response + // includes only results beyond the token, up to the value specified by MaxResults. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDefaultParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDefaultParametersInput) GoString() string { + return s.String() +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeDefaultParametersInput) SetMaxResults(v int64) *DescribeDefaultParametersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDefaultParametersInput) SetNextToken(v string) *DescribeDefaultParametersInput { + s.NextToken = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeDefaultParametersResponse +type DescribeDefaultParametersOutput struct { + _ struct{} `type:"structure"` + + // Provides an identifier to allow retrieval of paginated results. + NextToken *string `type:"string"` + + // A list of parameters. Each element in the list represents one parameter. + Parameters []*Parameter `type:"list"` +} + +// String returns the string representation +func (s DescribeDefaultParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDefaultParametersOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDefaultParametersOutput) SetNextToken(v string) *DescribeDefaultParametersOutput { + s.NextToken = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *DescribeDefaultParametersOutput) SetParameters(v []*Parameter) *DescribeDefaultParametersOutput { + s.Parameters = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeEventsRequest +type DescribeEventsInput struct { + _ struct{} `type:"structure"` + + // The number of minutes' worth of events to retrieve. + Duration *int64 `type:"integer"` + + // The end of the time interval for which to retrieve events, specified in ISO + // 8601 format. + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The maximum number of results to include in the response. If more results + // exist than the specified MaxResults value, a token is included in the response + // so that the remaining results can be retrieved. + // + // The value for MaxResults must be between 20 and 100. + MaxResults *int64 `type:"integer"` + + // An optional token returned from a prior request. Use this token for pagination + // of results from this action. If this parameter is specified, the response + // includes only results beyond the token, up to the value specified by MaxResults. + NextToken *string `type:"string"` + + // The identifier of the event source for which events will be returned. If + // not specified, then all sources are included in the response. + SourceName *string `type:"string"` + + // The event source to retrieve events for. If no value is specified, all events + // are returned. + SourceType *string `type:"string" enum:"SourceType"` + + // The beginning of the time interval to retrieve events for, specified in ISO + // 8601 format. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s DescribeEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsInput) GoString() string { + return s.String() +} + +// SetDuration sets the Duration field's value. +func (s *DescribeEventsInput) SetDuration(v int64) *DescribeEventsInput { + s.Duration = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *DescribeEventsInput) SetEndTime(v time.Time) *DescribeEventsInput { + s.EndTime = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeEventsInput) SetMaxResults(v int64) *DescribeEventsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeEventsInput) SetNextToken(v string) *DescribeEventsInput { + s.NextToken = &v + return s +} + +// SetSourceName sets the SourceName field's value. +func (s *DescribeEventsInput) SetSourceName(v string) *DescribeEventsInput { + s.SourceName = &v + return s +} + +// SetSourceType sets the SourceType field's value. +func (s *DescribeEventsInput) SetSourceType(v string) *DescribeEventsInput { + s.SourceType = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *DescribeEventsInput) SetStartTime(v time.Time) *DescribeEventsInput { + s.StartTime = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeEventsResponse +type DescribeEventsOutput struct { + _ struct{} `type:"structure"` + + // An array of events. Each element in the array represents one event. + Events []*Event `type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsOutput) GoString() string { + return s.String() +} + +// SetEvents sets the Events field's value. +func (s *DescribeEventsOutput) SetEvents(v []*Event) *DescribeEventsOutput { + s.Events = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeEventsOutput) SetNextToken(v string) *DescribeEventsOutput { + s.NextToken = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParameterGroupsRequest +type DescribeParameterGroupsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to include in the response. If more results + // exist than the specified MaxResults value, a token is included in the response + // so that the remaining results can be retrieved. + // + // The value for MaxResults must be between 20 and 100. + MaxResults *int64 `type:"integer"` + + // An optional token returned from a prior request. Use this token for pagination + // of results from this action. If this parameter is specified, the response + // includes only results beyond the token, up to the value specified by MaxResults. + NextToken *string `type:"string"` + + // The names of the parameter groups. + ParameterGroupNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeParameterGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeParameterGroupsInput) GoString() string { + return s.String() +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeParameterGroupsInput) SetMaxResults(v int64) *DescribeParameterGroupsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeParameterGroupsInput) SetNextToken(v string) *DescribeParameterGroupsInput { + s.NextToken = &v + return s +} + +// SetParameterGroupNames sets the ParameterGroupNames field's value. +func (s *DescribeParameterGroupsInput) SetParameterGroupNames(v []*string) *DescribeParameterGroupsInput { + s.ParameterGroupNames = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParameterGroupsResponse +type DescribeParameterGroupsOutput struct { + _ struct{} `type:"structure"` + + // Provides an identifier to allow retrieval of paginated results. + NextToken *string `type:"string"` + + // An array of parameter groups. Each element in the array represents one parameter + // group. + ParameterGroups []*ParameterGroup `type:"list"` +} + +// String returns the string representation +func (s DescribeParameterGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeParameterGroupsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeParameterGroupsOutput) SetNextToken(v string) *DescribeParameterGroupsOutput { + s.NextToken = &v + return s +} + +// SetParameterGroups sets the ParameterGroups field's value. +func (s *DescribeParameterGroupsOutput) SetParameterGroups(v []*ParameterGroup) *DescribeParameterGroupsOutput { + s.ParameterGroups = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParametersRequest +type DescribeParametersInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to include in the response. If more results + // exist than the specified MaxResults value, a token is included in the response + // so that the remaining results can be retrieved. + // + // The value for MaxResults must be between 20 and 100. + MaxResults *int64 `type:"integer"` + + // An optional token returned from a prior request. Use this token for pagination + // of results from this action. If this parameter is specified, the response + // includes only results beyond the token, up to the value specified by MaxResults. + NextToken *string `type:"string"` + + // The name of the parameter group. + // + // ParameterGroupName is a required field + ParameterGroupName *string `type:"string" required:"true"` + + // How the parameter is defined. For example, system denotes a system-defined + // parameter. + Source *string `type:"string"` +} + +// String returns the string representation +func (s DescribeParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeParametersInput"} + if s.ParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ParameterGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeParametersInput) SetMaxResults(v int64) *DescribeParametersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeParametersInput) SetNextToken(v string) *DescribeParametersInput { + s.NextToken = &v + return s +} + +// SetParameterGroupName sets the ParameterGroupName field's value. +func (s *DescribeParametersInput) SetParameterGroupName(v string) *DescribeParametersInput { + s.ParameterGroupName = &v + return s +} + +// SetSource sets the Source field's value. +func (s *DescribeParametersInput) SetSource(v string) *DescribeParametersInput { + s.Source = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParametersResponse +type DescribeParametersOutput struct { + _ struct{} `type:"structure"` + + // Provides an identifier to allow retrieval of paginated results. + NextToken *string `type:"string"` + + // A list of parameters within a parameter group. Each element in the list represents + // one parameter. + Parameters []*Parameter `type:"list"` +} + +// String returns the string representation +func (s DescribeParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeParametersOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeParametersOutput) SetNextToken(v string) *DescribeParametersOutput { + s.NextToken = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *DescribeParametersOutput) SetParameters(v []*Parameter) *DescribeParametersOutput { + s.Parameters = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeSubnetGroupsRequest +type DescribeSubnetGroupsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to include in the response. If more results + // exist than the specified MaxResults value, a token is included in the response + // so that the remaining results can be retrieved. + // + // The value for MaxResults must be between 20 and 100. + MaxResults *int64 `type:"integer"` + + // An optional token returned from a prior request. Use this token for pagination + // of results from this action. If this parameter is specified, the response + // includes only results beyond the token, up to the value specified by MaxResults. + NextToken *string `type:"string"` + + // The name of the subnet group. + SubnetGroupNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeSubnetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubnetGroupsInput) GoString() string { + return s.String() +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSubnetGroupsInput) SetMaxResults(v int64) *DescribeSubnetGroupsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSubnetGroupsInput) SetNextToken(v string) *DescribeSubnetGroupsInput { + s.NextToken = &v + return s +} + +// SetSubnetGroupNames sets the SubnetGroupNames field's value. +func (s *DescribeSubnetGroupsInput) SetSubnetGroupNames(v []*string) *DescribeSubnetGroupsInput { + s.SubnetGroupNames = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeSubnetGroupsResponse +type DescribeSubnetGroupsOutput struct { + _ struct{} `type:"structure"` + + // Provides an identifier to allow retrieval of paginated results. + NextToken *string `type:"string"` + + // An array of subnet groups. Each element in the array represents a single + // subnet group. + SubnetGroups []*SubnetGroup `type:"list"` +} + +// String returns the string representation +func (s DescribeSubnetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubnetGroupsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSubnetGroupsOutput) SetNextToken(v string) *DescribeSubnetGroupsOutput { + s.NextToken = &v + return s +} + +// SetSubnetGroups sets the SubnetGroups field's value. +func (s *DescribeSubnetGroupsOutput) SetSubnetGroups(v []*SubnetGroup) *DescribeSubnetGroupsOutput { + s.SubnetGroups = v + return s +} + +// Represents the information required for client programs to connect to the +// configuration endpoint for a DAX cluster, or to an individual node within +// the cluster. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/Endpoint +type Endpoint struct { + _ struct{} `type:"structure"` + + // The DNS hostname of the endpoint. + Address *string `type:"string"` + + // The port number that applications should use to connect to the endpoint. + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// SetAddress sets the Address field's value. +func (s *Endpoint) SetAddress(v string) *Endpoint { + s.Address = &v + return s +} + +// SetPort sets the Port field's value. +func (s *Endpoint) SetPort(v int64) *Endpoint { + s.Port = &v + return s +} + +// Represents a single occurrence of something interesting within the system. +// Some examples of events are creating a DAX cluster, adding or removing a +// node, or rebooting a node. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/Event +type Event struct { + _ struct{} `type:"structure"` + + // The date and time when the event occurred. + Date *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A user-defined message associated with the event. + Message *string `type:"string"` + + // The source of the event. For example, if the event occurred at the node level, + // the source would be the node ID. + SourceName *string `type:"string"` + + // Specifies the origin of this event - a cluster, a parameter group, a node + // ID, etc. + SourceType *string `type:"string" enum:"SourceType"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *Event) SetDate(v time.Time) *Event { + s.Date = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *Event) SetMessage(v string) *Event { + s.Message = &v + return s +} + +// SetSourceName sets the SourceName field's value. +func (s *Event) SetSourceName(v string) *Event { + s.SourceName = &v + return s +} + +// SetSourceType sets the SourceType field's value. +func (s *Event) SetSourceType(v string) *Event { + s.SourceType = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/IncreaseReplicationFactorRequest +type IncreaseReplicationFactorInput struct { + _ struct{} `type:"structure"` + + // The Availability Zones (AZs) in which the cluster nodes will be created. + // All nodes belonging to the cluster are placed in these Availability Zones. + // Use this parameter if you want to distribute the nodes across multiple AZs. + AvailabilityZones []*string `type:"list"` + + // The name of the DAX cluster that will receive additional nodes. + // + // ClusterName is a required field + ClusterName *string `type:"string" required:"true"` + + // The new number of nodes for the DAX cluster. + // + // NewReplicationFactor is a required field + NewReplicationFactor *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s IncreaseReplicationFactorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IncreaseReplicationFactorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IncreaseReplicationFactorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IncreaseReplicationFactorInput"} + if s.ClusterName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterName")) + } + if s.NewReplicationFactor == nil { + invalidParams.Add(request.NewErrParamRequired("NewReplicationFactor")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *IncreaseReplicationFactorInput) SetAvailabilityZones(v []*string) *IncreaseReplicationFactorInput { + s.AvailabilityZones = v + return s +} + +// SetClusterName sets the ClusterName field's value. +func (s *IncreaseReplicationFactorInput) SetClusterName(v string) *IncreaseReplicationFactorInput { + s.ClusterName = &v + return s +} + +// SetNewReplicationFactor sets the NewReplicationFactor field's value. +func (s *IncreaseReplicationFactorInput) SetNewReplicationFactor(v int64) *IncreaseReplicationFactorInput { + s.NewReplicationFactor = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/IncreaseReplicationFactorResponse +type IncreaseReplicationFactorOutput struct { + _ struct{} `type:"structure"` + + // A description of the DAX cluster. with its new replication factor. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s IncreaseReplicationFactorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IncreaseReplicationFactorOutput) GoString() string { + return s.String() +} + +// SetCluster sets the Cluster field's value. +func (s *IncreaseReplicationFactorOutput) SetCluster(v *Cluster) *IncreaseReplicationFactorOutput { + s.Cluster = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/ListTagsRequest +type ListTagsInput struct { + _ struct{} `type:"structure"` + + // An optional token returned from a prior request. Use this token for pagination + // of results from this action. If this parameter is specified, the response + // includes only results beyond the token. + NextToken *string `type:"string"` + + // The name of the DAX resource to which the tags belong. + // + // ResourceName is a required field + ResourceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsInput"} + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsInput) SetNextToken(v string) *ListTagsInput { + s.NextToken = &v + return s +} + +// SetResourceName sets the ResourceName field's value. +func (s *ListTagsInput) SetResourceName(v string) *ListTagsInput { + s.ResourceName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/ListTagsResponse +type ListTagsOutput struct { + _ struct{} `type:"structure"` + + // If this value is present, there are additional results to be displayed. To + // retrieve them, call ListTags again, with NextToken set to this value. + NextToken *string `type:"string"` + + // A list of tags currently associated with the DAX cluster. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsOutput) SetNextToken(v string) *ListTagsOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsOutput) SetTags(v []*Tag) *ListTagsOutput { + s.Tags = v + return s +} + +// Represents an individual node within a DAX cluster. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/Node +type Node struct { + _ struct{} `type:"structure"` + + // The Availability Zone (AZ) in which the node has been deployed. + AvailabilityZone *string `type:"string"` + + // The endpoint for the node, consisting of a DNS name and a port number. Client + // applications can connect directly to a node endpoint, if desired (as an alternative + // to allowing DAX client software to intelligently route requests and responses + // to nodes in the DAX cluster. + Endpoint *Endpoint `type:"structure"` + + // The date and time (in UNIX epoch format) when the node was launched. + NodeCreateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A system-generated identifier for the node. + NodeId *string `type:"string"` + + // The current status of the node. For example: available. + NodeStatus *string `type:"string"` + + // The status of the parameter group associated with this node. For example, + // in-sync. + ParameterGroupStatus *string `type:"string"` +} + +// String returns the string representation +func (s Node) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Node) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *Node) SetAvailabilityZone(v string) *Node { + s.AvailabilityZone = &v + return s +} + +// SetEndpoint sets the Endpoint field's value. +func (s *Node) SetEndpoint(v *Endpoint) *Node { + s.Endpoint = v + return s +} + +// SetNodeCreateTime sets the NodeCreateTime field's value. +func (s *Node) SetNodeCreateTime(v time.Time) *Node { + s.NodeCreateTime = &v + return s +} + +// SetNodeId sets the NodeId field's value. +func (s *Node) SetNodeId(v string) *Node { + s.NodeId = &v + return s +} + +// SetNodeStatus sets the NodeStatus field's value. +func (s *Node) SetNodeStatus(v string) *Node { + s.NodeStatus = &v + return s +} + +// SetParameterGroupStatus sets the ParameterGroupStatus field's value. +func (s *Node) SetParameterGroupStatus(v string) *Node { + s.ParameterGroupStatus = &v + return s +} + +// Represents a parameter value that is applicable to a particular node type. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/NodeTypeSpecificValue +type NodeTypeSpecificValue struct { + _ struct{} `type:"structure"` + + // A node type to which the parameter value applies. + NodeType *string `type:"string"` + + // The parameter value for this node type. + Value *string `type:"string"` +} + +// String returns the string representation +func (s NodeTypeSpecificValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeTypeSpecificValue) GoString() string { + return s.String() +} + +// SetNodeType sets the NodeType field's value. +func (s *NodeTypeSpecificValue) SetNodeType(v string) *NodeTypeSpecificValue { + s.NodeType = &v + return s +} + +// SetValue sets the Value field's value. +func (s *NodeTypeSpecificValue) SetValue(v string) *NodeTypeSpecificValue { + s.Value = &v + return s +} + +// Describes a notification topic and its status. Notification topics are used +// for publishing DAX events to subscribers using Amazon Simple Notification +// Service (SNS). +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/NotificationConfiguration +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the topic. + TopicArn *string `type:"string"` + + // The current state of the topic. + TopicStatus *string `type:"string"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// SetTopicArn sets the TopicArn field's value. +func (s *NotificationConfiguration) SetTopicArn(v string) *NotificationConfiguration { + s.TopicArn = &v + return s +} + +// SetTopicStatus sets the TopicStatus field's value. +func (s *NotificationConfiguration) SetTopicStatus(v string) *NotificationConfiguration { + s.TopicStatus = &v + return s +} + +// Describes an individual setting that controls some aspect of DAX behavior. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/Parameter +type Parameter struct { + _ struct{} `type:"structure"` + + // A range of values within which the parameter can be set. + AllowedValues *string `type:"string"` + + // The conditions under which changes to this parameter can be applied. For + // example, requires-reboot indicates that a new value for this parameter will + // only take effect if a node is rebooted. + ChangeType *string `type:"string" enum:"ChangeType"` + + // The data type of the parameter. For example, integer: + DataType *string `type:"string"` + + // A description of the parameter + Description *string `type:"string"` + + // Whether the customer is allowed to modify the parameter. + IsModifiable *string `type:"string" enum:"IsModifiable"` + + // A list of node types, and specific parameter values for each node. + NodeTypeSpecificValues []*NodeTypeSpecificValue `type:"list"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // Determines whether the parameter can be applied to any nodes, or only nodes + // of a particular type. + ParameterType *string `type:"string" enum:"ParameterType"` + + // The value for the parameter. + ParameterValue *string `type:"string"` + + // How the parameter is defined. For example, system denotes a system-defined + // parameter. + Source *string `type:"string"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +// SetAllowedValues sets the AllowedValues field's value. +func (s *Parameter) SetAllowedValues(v string) *Parameter { + s.AllowedValues = &v + return s +} + +// SetChangeType sets the ChangeType field's value. +func (s *Parameter) SetChangeType(v string) *Parameter { + s.ChangeType = &v + return s +} + +// SetDataType sets the DataType field's value. +func (s *Parameter) SetDataType(v string) *Parameter { + s.DataType = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Parameter) SetDescription(v string) *Parameter { + s.Description = &v + return s +} + +// SetIsModifiable sets the IsModifiable field's value. +func (s *Parameter) SetIsModifiable(v string) *Parameter { + s.IsModifiable = &v + return s +} + +// SetNodeTypeSpecificValues sets the NodeTypeSpecificValues field's value. +func (s *Parameter) SetNodeTypeSpecificValues(v []*NodeTypeSpecificValue) *Parameter { + s.NodeTypeSpecificValues = v + return s +} + +// SetParameterName sets the ParameterName field's value. +func (s *Parameter) SetParameterName(v string) *Parameter { + s.ParameterName = &v + return s +} + +// SetParameterType sets the ParameterType field's value. +func (s *Parameter) SetParameterType(v string) *Parameter { + s.ParameterType = &v + return s +} + +// SetParameterValue sets the ParameterValue field's value. +func (s *Parameter) SetParameterValue(v string) *Parameter { + s.ParameterValue = &v + return s +} + +// SetSource sets the Source field's value. +func (s *Parameter) SetSource(v string) *Parameter { + s.Source = &v + return s +} + +// A named set of parameters that are applied to all of the nodes in a DAX cluster. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/ParameterGroup +type ParameterGroup struct { + _ struct{} `type:"structure"` + + // A description of the parameter group. + Description *string `type:"string"` + + // The name of the parameter group. + ParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s ParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterGroup) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ParameterGroup) SetDescription(v string) *ParameterGroup { + s.Description = &v + return s +} + +// SetParameterGroupName sets the ParameterGroupName field's value. +func (s *ParameterGroup) SetParameterGroupName(v string) *ParameterGroup { + s.ParameterGroupName = &v + return s +} + +// The status of a parameter group. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/ParameterGroupStatus +type ParameterGroupStatus struct { + _ struct{} `type:"structure"` + + // The node IDs of one or more nodes to be rebooted. + NodeIdsToReboot []*string `type:"list"` + + // The status of parameter updates. + ParameterApplyStatus *string `type:"string"` + + // The name of the parameter group. + ParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s ParameterGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterGroupStatus) GoString() string { + return s.String() +} + +// SetNodeIdsToReboot sets the NodeIdsToReboot field's value. +func (s *ParameterGroupStatus) SetNodeIdsToReboot(v []*string) *ParameterGroupStatus { + s.NodeIdsToReboot = v + return s +} + +// SetParameterApplyStatus sets the ParameterApplyStatus field's value. +func (s *ParameterGroupStatus) SetParameterApplyStatus(v string) *ParameterGroupStatus { + s.ParameterApplyStatus = &v + return s +} + +// SetParameterGroupName sets the ParameterGroupName field's value. +func (s *ParameterGroupStatus) SetParameterGroupName(v string) *ParameterGroupStatus { + s.ParameterGroupName = &v + return s +} + +// An individual DAX parameter. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/ParameterNameValue +type ParameterNameValue struct { + _ struct{} `type:"structure"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // The value of the parameter. + ParameterValue *string `type:"string"` +} + +// String returns the string representation +func (s ParameterNameValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterNameValue) GoString() string { + return s.String() +} + +// SetParameterName sets the ParameterName field's value. +func (s *ParameterNameValue) SetParameterName(v string) *ParameterNameValue { + s.ParameterName = &v + return s +} + +// SetParameterValue sets the ParameterValue field's value. +func (s *ParameterNameValue) SetParameterValue(v string) *ParameterNameValue { + s.ParameterValue = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/RebootNodeRequest +type RebootNodeInput struct { + _ struct{} `type:"structure"` + + // The name of the DAX cluster containing the node to be rebooted. + // + // ClusterName is a required field + ClusterName *string `type:"string" required:"true"` + + // The system-assigned ID of the node to be rebooted. + // + // NodeId is a required field + NodeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RebootNodeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootNodeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebootNodeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebootNodeInput"} + if s.ClusterName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterName")) + } + if s.NodeId == nil { + invalidParams.Add(request.NewErrParamRequired("NodeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterName sets the ClusterName field's value. +func (s *RebootNodeInput) SetClusterName(v string) *RebootNodeInput { + s.ClusterName = &v + return s +} + +// SetNodeId sets the NodeId field's value. +func (s *RebootNodeInput) SetNodeId(v string) *RebootNodeInput { + s.NodeId = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/RebootNodeResponse +type RebootNodeOutput struct { + _ struct{} `type:"structure"` + + // A description of the DAX cluster after a node has been rebooted. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s RebootNodeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootNodeOutput) GoString() string { + return s.String() +} + +// SetCluster sets the Cluster field's value. +func (s *RebootNodeOutput) SetCluster(v *Cluster) *RebootNodeOutput { + s.Cluster = v + return s +} + +// An individual VPC security group and its status. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/SecurityGroupMembership +type SecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The unique ID for this security group. + SecurityGroupIdentifier *string `type:"string"` + + // The status of this security group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s SecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroupMembership) GoString() string { + return s.String() +} + +// SetSecurityGroupIdentifier sets the SecurityGroupIdentifier field's value. +func (s *SecurityGroupMembership) SetSecurityGroupIdentifier(v string) *SecurityGroupMembership { + s.SecurityGroupIdentifier = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SecurityGroupMembership) SetStatus(v string) *SecurityGroupMembership { + s.Status = &v + return s +} + +// Represents the subnet associated with a DAX cluster. This parameter refers +// to subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used +// with DAX. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/Subnet +type Subnet struct { + _ struct{} `type:"structure"` + + // The Availability Zone (AZ) for subnet subnet. + SubnetAvailabilityZone *string `type:"string"` + + // The system-assigned identifier for the subnet. + SubnetIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// SetSubnetAvailabilityZone sets the SubnetAvailabilityZone field's value. +func (s *Subnet) SetSubnetAvailabilityZone(v string) *Subnet { + s.SubnetAvailabilityZone = &v + return s +} + +// SetSubnetIdentifier sets the SubnetIdentifier field's value. +func (s *Subnet) SetSubnetIdentifier(v string) *Subnet { + s.SubnetIdentifier = &v + return s +} + +// Represents the output of one of the following actions: +// +// * CreateSubnetGroup +// +// * ModifySubnetGroup +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/SubnetGroup +type SubnetGroup struct { + _ struct{} `type:"structure"` + + // The description of the subnet group. + Description *string `type:"string"` + + // The name of the subnet group. + SubnetGroupName *string `type:"string"` + + // A list of subnets associated with the subnet group. + Subnets []*Subnet `type:"list"` + + // The Amazon Virtual Private Cloud identifier (VPC ID) of the subnet group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s SubnetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubnetGroup) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *SubnetGroup) SetDescription(v string) *SubnetGroup { + s.Description = &v + return s +} + +// SetSubnetGroupName sets the SubnetGroupName field's value. +func (s *SubnetGroup) SetSubnetGroupName(v string) *SubnetGroup { + s.SubnetGroupName = &v + return s +} + +// SetSubnets sets the Subnets field's value. +func (s *SubnetGroup) SetSubnets(v []*Subnet) *SubnetGroup { + s.Subnets = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *SubnetGroup) SetVpcId(v string) *SubnetGroup { + s.VpcId = &v + return s +} + +// A description of a tag. Every tag is a key-value pair. You can add up to +// 50 tags to a single DAX cluster. +// +// AWS-assigned tag names and values are automatically assigned the aws: prefix, +// which the user cannot assign. AWS-assigned tag names do not count towards +// the tag limit of 50. User-assigned tag names have the prefix user:. +// +// You cannot backdate the application of a tag. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/Tag +type Tag struct { + _ struct{} `type:"structure"` + + // The key for the tag. Tag keys are case sensitive. Every DAX cluster can only + // have one tag with the same key. If you try to add an existing tag (same key), + // the existing tag value will be updated to the new value. + Key *string `type:"string"` + + // The value of the tag. Tag values are case-sensitive and can be null. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/TagResourceRequest +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The name of the DAX resource to which tags should be added. + // + // ResourceName is a required field + ResourceName *string `type:"string" required:"true"` + + // The tags to be assigned to the DAX resource. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceName sets the ResourceName field's value. +func (s *TagResourceInput) SetResourceName(v string) *TagResourceInput { + s.ResourceName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/TagResourceResponse +type TagResourceOutput struct { + _ struct{} `type:"structure"` + + // The list of tags that are associated with the DAX resource. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *TagResourceOutput) SetTags(v []*Tag) *TagResourceOutput { + s.Tags = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UntagResourceRequest +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The name of the DAX resource from which the tags should be removed. + // + // ResourceName is a required field + ResourceName *string `type:"string" required:"true"` + + // A list of tag keys. If the DAX cluster has any tags with these keys, then + // the tags are removed from the cluster. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceName sets the ResourceName field's value. +func (s *UntagResourceInput) SetResourceName(v string) *UntagResourceInput { + s.ResourceName = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UntagResourceResponse +type UntagResourceOutput struct { + _ struct{} `type:"structure"` + + // The tag keys that have been removed from the cluster. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *UntagResourceOutput) SetTags(v []*Tag) *UntagResourceOutput { + s.Tags = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateClusterRequest +type UpdateClusterInput struct { + _ struct{} `type:"structure"` + + // The name of the DAX cluster to be modified. + // + // ClusterName is a required field + ClusterName *string `type:"string" required:"true"` + + // A description of the changes being made to the cluster. + Description *string `type:"string"` + + // The Amazon Resource Name (ARN) that identifies the topic. + NotificationTopicArn *string `type:"string"` + + // The current state of the topic. + NotificationTopicStatus *string `type:"string"` + + // The name of a parameter group for this cluster. + ParameterGroupName *string `type:"string"` + + // A range of time when maintenance of DAX cluster software will be performed. + // For example: sun:01:00-sun:09:00. Cluster maintenance normally takes less + // than 30 minutes, and is performed automatically within the maintenance window. + PreferredMaintenanceWindow *string `type:"string"` + + // A list of user-specified security group IDs to be assigned to each node in + // the DAX cluster. If this parameter is not specified, DAX assigns the default + // VPC security group to each node. + SecurityGroupIds []*string `type:"list"` +} + +// String returns the string representation +func (s UpdateClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateClusterInput"} + if s.ClusterName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterName sets the ClusterName field's value. +func (s *UpdateClusterInput) SetClusterName(v string) *UpdateClusterInput { + s.ClusterName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateClusterInput) SetDescription(v string) *UpdateClusterInput { + s.Description = &v + return s +} + +// SetNotificationTopicArn sets the NotificationTopicArn field's value. +func (s *UpdateClusterInput) SetNotificationTopicArn(v string) *UpdateClusterInput { + s.NotificationTopicArn = &v + return s +} + +// SetNotificationTopicStatus sets the NotificationTopicStatus field's value. +func (s *UpdateClusterInput) SetNotificationTopicStatus(v string) *UpdateClusterInput { + s.NotificationTopicStatus = &v + return s +} + +// SetParameterGroupName sets the ParameterGroupName field's value. +func (s *UpdateClusterInput) SetParameterGroupName(v string) *UpdateClusterInput { + s.ParameterGroupName = &v + return s +} + +// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. +func (s *UpdateClusterInput) SetPreferredMaintenanceWindow(v string) *UpdateClusterInput { + s.PreferredMaintenanceWindow = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *UpdateClusterInput) SetSecurityGroupIds(v []*string) *UpdateClusterInput { + s.SecurityGroupIds = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateClusterResponse +type UpdateClusterOutput struct { + _ struct{} `type:"structure"` + + // A description of the DAX cluster, after it has been modified. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s UpdateClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateClusterOutput) GoString() string { + return s.String() +} + +// SetCluster sets the Cluster field's value. +func (s *UpdateClusterOutput) SetCluster(v *Cluster) *UpdateClusterOutput { + s.Cluster = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateParameterGroupRequest +type UpdateParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the parameter group. + // + // ParameterGroupName is a required field + ParameterGroupName *string `type:"string" required:"true"` + + // An array of name-value pairs for the parameters in the group. Each element + // in the array represents a single parameter. + // + // ParameterNameValues is a required field + ParameterNameValues []*ParameterNameValue `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateParameterGroupInput"} + if s.ParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ParameterGroupName")) + } + if s.ParameterNameValues == nil { + invalidParams.Add(request.NewErrParamRequired("ParameterNameValues")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetParameterGroupName sets the ParameterGroupName field's value. +func (s *UpdateParameterGroupInput) SetParameterGroupName(v string) *UpdateParameterGroupInput { + s.ParameterGroupName = &v + return s +} + +// SetParameterNameValues sets the ParameterNameValues field's value. +func (s *UpdateParameterGroupInput) SetParameterNameValues(v []*ParameterNameValue) *UpdateParameterGroupInput { + s.ParameterNameValues = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateParameterGroupResponse +type UpdateParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // The parameter group that has been modified. + ParameterGroup *ParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s UpdateParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateParameterGroupOutput) GoString() string { + return s.String() +} + +// SetParameterGroup sets the ParameterGroup field's value. +func (s *UpdateParameterGroupOutput) SetParameterGroup(v *ParameterGroup) *UpdateParameterGroupOutput { + s.ParameterGroup = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateSubnetGroupRequest +type UpdateSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // A description of the subnet group. + Description *string `type:"string"` + + // The name of the subnet group. + // + // SubnetGroupName is a required field + SubnetGroupName *string `type:"string" required:"true"` + + // A list of subnet IDs in the subnet group. + SubnetIds []*string `type:"list"` +} + +// String returns the string representation +func (s UpdateSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSubnetGroupInput"} + if s.SubnetGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *UpdateSubnetGroupInput) SetDescription(v string) *UpdateSubnetGroupInput { + s.Description = &v + return s +} + +// SetSubnetGroupName sets the SubnetGroupName field's value. +func (s *UpdateSubnetGroupInput) SetSubnetGroupName(v string) *UpdateSubnetGroupInput { + s.SubnetGroupName = &v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *UpdateSubnetGroupInput) SetSubnetIds(v []*string) *UpdateSubnetGroupInput { + s.SubnetIds = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateSubnetGroupResponse +type UpdateSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // The subnet group that has been modified. + SubnetGroup *SubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s UpdateSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSubnetGroupOutput) GoString() string { + return s.String() +} + +// SetSubnetGroup sets the SubnetGroup field's value. +func (s *UpdateSubnetGroupOutput) SetSubnetGroup(v *SubnetGroup) *UpdateSubnetGroupOutput { + s.SubnetGroup = v + return s +} + +const ( + // ChangeTypeImmediate is a ChangeType enum value + ChangeTypeImmediate = "IMMEDIATE" + + // ChangeTypeRequiresReboot is a ChangeType enum value + ChangeTypeRequiresReboot = "REQUIRES_REBOOT" +) + +const ( + // IsModifiableTrue is a IsModifiable enum value + IsModifiableTrue = "TRUE" + + // IsModifiableFalse is a IsModifiable enum value + IsModifiableFalse = "FALSE" + + // IsModifiableConditional is a IsModifiable enum value + IsModifiableConditional = "CONDITIONAL" +) + +const ( + // ParameterTypeDefault is a ParameterType enum value + ParameterTypeDefault = "DEFAULT" + + // ParameterTypeNodeTypeSpecific is a ParameterType enum value + ParameterTypeNodeTypeSpecific = "NODE_TYPE_SPECIFIC" +) + +const ( + // SourceTypeCluster is a SourceType enum value + SourceTypeCluster = "CLUSTER" + + // SourceTypeParameterGroup is a SourceType enum value + SourceTypeParameterGroup = "PARAMETER_GROUP" + + // SourceTypeSubnetGroup is a SourceType enum value + SourceTypeSubnetGroup = "SUBNET_GROUP" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dax/doc.go b/vendor/github.com/aws/aws-sdk-go/service/dax/doc.go new file mode 100644 index 00000000000..a3b25ccbd5a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dax/doc.go @@ -0,0 +1,33 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package dax provides the client and types for making API +// requests to Amazon DynamoDB Accelerator (DAX). +// +// DAX is a managed caching service engineered for Amazon DynamoDB. DAX dramatically +// speeds up database reads by caching frequently-accessed data from DynamoDB, +// so applications can access that data with sub-millisecond latency. You can +// create a DAX cluster easily, using the AWS Management Console. With a few +// simple modifications to your code, your application can begin taking advantage +// of the DAX cluster and realize significant improvements in read performance. +// +// See https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19 for more information on this service. +// +// See dax package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/dax/ +// +// Using the Client +// +// To contact Amazon DynamoDB Accelerator (DAX) with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon DynamoDB Accelerator (DAX) client DAX for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/dax/#New +package dax diff --git a/vendor/github.com/aws/aws-sdk-go/service/dax/errors.go b/vendor/github.com/aws/aws-sdk-go/service/dax/errors.go new file mode 100644 index 00000000000..24aaf1a2327 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dax/errors.go @@ -0,0 +1,160 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dax + +const ( + + // ErrCodeClusterAlreadyExistsFault for service response error code + // "ClusterAlreadyExistsFault". + // + // You already have a DAX cluster with the given identifier. + ErrCodeClusterAlreadyExistsFault = "ClusterAlreadyExistsFault" + + // ErrCodeClusterNotFoundFault for service response error code + // "ClusterNotFoundFault". + // + // The requested cluster ID does not refer to an existing DAX cluster. + ErrCodeClusterNotFoundFault = "ClusterNotFoundFault" + + // ErrCodeClusterQuotaForCustomerExceededFault for service response error code + // "ClusterQuotaForCustomerExceededFault". + // + // You have attempted to exceed the maximum number of DAX clusters for your + // AWS account. + ErrCodeClusterQuotaForCustomerExceededFault = "ClusterQuotaForCustomerExceededFault" + + // ErrCodeInsufficientClusterCapacityFault for service response error code + // "InsufficientClusterCapacityFault". + // + // There are not enough system resources to create the cluster you requested + // (or to resize an already-existing cluster). + ErrCodeInsufficientClusterCapacityFault = "InsufficientClusterCapacityFault" + + // ErrCodeInvalidARNFault for service response error code + // "InvalidARNFault". + // + // The Amazon Resource Name (ARN) supplied in the request is not valid. + ErrCodeInvalidARNFault = "InvalidARNFault" + + // ErrCodeInvalidClusterStateFault for service response error code + // "InvalidClusterStateFault". + // + // The requested DAX cluster is not in the available state. + ErrCodeInvalidClusterStateFault = "InvalidClusterStateFault" + + // ErrCodeInvalidParameterCombinationException for service response error code + // "InvalidParameterCombinationException". + // + // Two or more incompatible parameters were specified. + ErrCodeInvalidParameterCombinationException = "InvalidParameterCombinationException" + + // ErrCodeInvalidParameterGroupStateFault for service response error code + // "InvalidParameterGroupStateFault". + // + // One or more parameters in a parameter group are in an invalid state. + ErrCodeInvalidParameterGroupStateFault = "InvalidParameterGroupStateFault" + + // ErrCodeInvalidParameterValueException for service response error code + // "InvalidParameterValueException". + // + // The value for a parameter is invalid. + ErrCodeInvalidParameterValueException = "InvalidParameterValueException" + + // ErrCodeInvalidSubnet for service response error code + // "InvalidSubnet". + // + // An invalid subnet identifier was specified. + ErrCodeInvalidSubnet = "InvalidSubnet" + + // ErrCodeInvalidVPCNetworkStateFault for service response error code + // "InvalidVPCNetworkStateFault". + // + // The VPC network is in an invalid state. + ErrCodeInvalidVPCNetworkStateFault = "InvalidVPCNetworkStateFault" + + // ErrCodeNodeNotFoundFault for service response error code + // "NodeNotFoundFault". + // + // None of the nodes in the cluster have the given node ID. + ErrCodeNodeNotFoundFault = "NodeNotFoundFault" + + // ErrCodeNodeQuotaForClusterExceededFault for service response error code + // "NodeQuotaForClusterExceededFault". + // + // You have attempted to exceed the maximum number of nodes for a DAX cluster. + ErrCodeNodeQuotaForClusterExceededFault = "NodeQuotaForClusterExceededFault" + + // ErrCodeNodeQuotaForCustomerExceededFault for service response error code + // "NodeQuotaForCustomerExceededFault". + // + // You have attempted to exceed the maximum number of nodes for your AWS account. + ErrCodeNodeQuotaForCustomerExceededFault = "NodeQuotaForCustomerExceededFault" + + // ErrCodeParameterGroupAlreadyExistsFault for service response error code + // "ParameterGroupAlreadyExistsFault". + // + // The specified parameter group already exists. + ErrCodeParameterGroupAlreadyExistsFault = "ParameterGroupAlreadyExistsFault" + + // ErrCodeParameterGroupNotFoundFault for service response error code + // "ParameterGroupNotFoundFault". + // + // The specified parameter group does not exist. + ErrCodeParameterGroupNotFoundFault = "ParameterGroupNotFoundFault" + + // ErrCodeParameterGroupQuotaExceededFault for service response error code + // "ParameterGroupQuotaExceededFault". + // + // You have attempted to exceed the maximum number of parameter groups. + ErrCodeParameterGroupQuotaExceededFault = "ParameterGroupQuotaExceededFault" + + // ErrCodeSubnetGroupAlreadyExistsFault for service response error code + // "SubnetGroupAlreadyExistsFault". + // + // The specified subnet group already exists. + ErrCodeSubnetGroupAlreadyExistsFault = "SubnetGroupAlreadyExistsFault" + + // ErrCodeSubnetGroupInUseFault for service response error code + // "SubnetGroupInUseFault". + // + // The specified subnet group is currently in use. + ErrCodeSubnetGroupInUseFault = "SubnetGroupInUseFault" + + // ErrCodeSubnetGroupNotFoundFault for service response error code + // "SubnetGroupNotFoundFault". + // + // The requested subnet group name does not refer to an existing subnet group. + ErrCodeSubnetGroupNotFoundFault = "SubnetGroupNotFoundFault" + + // ErrCodeSubnetGroupQuotaExceededFault for service response error code + // "SubnetGroupQuotaExceededFault". + // + // The request cannot be processed because it would exceed the allowed number + // of subnets in a subnet group. + ErrCodeSubnetGroupQuotaExceededFault = "SubnetGroupQuotaExceededFault" + + // ErrCodeSubnetInUse for service response error code + // "SubnetInUse". + // + // The requested subnet is being used by another subnet group. + ErrCodeSubnetInUse = "SubnetInUse" + + // ErrCodeSubnetQuotaExceededFault for service response error code + // "SubnetQuotaExceededFault". + // + // The request cannot be processed because it would exceed the allowed number + // of subnets in a subnet group. + ErrCodeSubnetQuotaExceededFault = "SubnetQuotaExceededFault" + + // ErrCodeTagNotFoundFault for service response error code + // "TagNotFoundFault". + // + // The tag does not exist. + ErrCodeTagNotFoundFault = "TagNotFoundFault" + + // ErrCodeTagQuotaPerResourceExceeded for service response error code + // "TagQuotaPerResourceExceeded". + // + // You have exceeded the maximum number of tags for this DAX cluster. + ErrCodeTagQuotaPerResourceExceeded = "TagQuotaPerResourceExceeded" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dax/service.go b/vendor/github.com/aws/aws-sdk-go/service/dax/service.go new file mode 100644 index 00000000000..a80ed1441a0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dax/service.go @@ -0,0 +1,95 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dax + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// DAX provides the API operation methods for making requests to +// Amazon DynamoDB Accelerator (DAX). See this package's package overview docs +// for details on the service. +// +// DAX methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type DAX struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "dax" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the DAX client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DAX client from just a session. +// svc := dax.New(mySession) +// +// // Create a DAX client with additional configuration +// svc := dax.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DAX { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DAX { + svc := &DAX{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2017-04-19", + JSONVersion: "1.1", + TargetPrefix: "AmazonDAXV3", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DAX operation and runs any +// custom request initialization. +func (c *DAX) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/vendor.json b/vendor/vendor.json index bdb77abd1bd..6b4f20f2ab2 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -548,6 +548,14 @@ "version": "v1.12.55", "versionExact": "v1.12.55" }, + { + "checksumSHA1": "siWpqsOY3u69XkgPF8+F8V1K0Pc=", + "path": "github.com/aws/aws-sdk-go/service/dax", + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" + }, { "checksumSHA1": "26CWoHQP/dyL2VzE5ZNd8zNzhko=", "path": "github.com/aws/aws-sdk-go/service/devicefarm", From 10895f997e9a9903e0bc04f0b9e0a2f0a4051113 Mon Sep 17 00:00:00 2001 From: Atsushi Ishibashi Date: Mon, 1 Jan 2018 23:55:47 +0900 Subject: [PATCH 109/350] fix cycle --- aws/resource_aws_ssm_maintenance_window.go | 6 ++---- aws/resource_aws_ssm_maintenance_window_test.go | 6 ++++++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_ssm_maintenance_window.go b/aws/resource_aws_ssm_maintenance_window.go index 5ce56677867..e895b532355 100644 --- a/aws/resource_aws_ssm_maintenance_window.go +++ b/aws/resource_aws_ssm_maintenance_window.go @@ -68,7 +68,7 @@ func resourceAwsSsmMaintenanceWindowCreate(d *schema.ResourceData, meta interfac } d.SetId(*resp.WindowId) - return resourceAwsSsmMaintenanceWindowRead(d, meta) + return resourceAwsSsmMaintenanceWindowUpdate(d, meta) } func resourceAwsSsmMaintenanceWindowUpdate(d *schema.ResourceData, meta interface{}) error { @@ -98,9 +98,7 @@ func resourceAwsSsmMaintenanceWindowUpdate(d *schema.ResourceData, meta interfac params.AllowUnassociatedTargets = aws.Bool(d.Get("allow_unassociated_targets").(bool)) } - if d.HasChange("enabled") { - params.Enabled = aws.Bool(d.Get("enabled").(bool)) - } + params.Enabled = aws.Bool(d.Get("enabled").(bool)) _, err := ssmconn.UpdateMaintenanceWindow(params) if err != nil { diff --git a/aws/resource_aws_ssm_maintenance_window_test.go b/aws/resource_aws_ssm_maintenance_window_test.go index d316e6bdd7d..2fca94f3739 100644 --- a/aws/resource_aws_ssm_maintenance_window_test.go +++ b/aws/resource_aws_ssm_maintenance_window_test.go @@ -30,6 +30,8 @@ func TestAccAWSSSMMaintenanceWindow_basic(t *testing.T) { "aws_ssm_maintenance_window.foo", "cutoff", "1"), resource.TestCheckResourceAttr( "aws_ssm_maintenance_window.foo", "name", fmt.Sprintf("maintenance-window-%s", name)), + resource.TestCheckResourceAttr( + "aws_ssm_maintenance_window.foo", "enabled", "false"), ), }, { @@ -44,6 +46,8 @@ func TestAccAWSSSMMaintenanceWindow_basic(t *testing.T) { "aws_ssm_maintenance_window.foo", "cutoff", "8"), resource.TestCheckResourceAttr( "aws_ssm_maintenance_window.foo", "name", fmt.Sprintf("updated-maintenance-window-%s", name)), + resource.TestCheckResourceAttr( + "aws_ssm_maintenance_window.foo", "enabled", "true"), ), }, }, @@ -123,6 +127,7 @@ resource "aws_ssm_maintenance_window" "foo" { schedule = "cron(0 16 ? * TUE *)" duration = 3 cutoff = 1 + enabled = false } `, rName) @@ -135,6 +140,7 @@ resource "aws_ssm_maintenance_window" "foo" { schedule = "cron(0 16 ? * WED *)" duration = 10 cutoff = 8 + enabled = true } `, rName) From 21f1241233d320f187827f61fd0c0a8043448c65 Mon Sep 17 00:00:00 2001 From: VEBER Arnaud Date: Sun, 7 Jan 2018 10:46:58 +0100 Subject: [PATCH 110/350] chore(vendor): bump aws-sdk-go to v1.12.56 --- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/inspector/api.go | 23 + .../aws/aws-sdk-go/service/ssm/api.go | 469 ++++++++-- .../aws/aws-sdk-go/service/ssm/errors.go | 14 +- vendor/vendor.json | 846 +++++++++--------- 5 files changed, 869 insertions(+), 485 deletions(-) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index a92ed43b1c7..9a1ae5b23fe 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.12.55" +const SDKVersion = "1.12.56" diff --git a/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go b/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go index f9ec51dd5c0..fe34c261bba 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go @@ -4391,6 +4391,12 @@ type AssessmentTemplate struct { // Arn is a required field Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` + // The number of existing assessment runs associated with this assessment template. + // This value can be zero or a positive integer. + // + // AssessmentRunCount is a required field + AssessmentRunCount *int64 `locationName:"assessmentRunCount" type:"integer" required:"true"` + // The ARN of the assessment target that corresponds to this assessment template. // // AssessmentTargetArn is a required field @@ -4408,6 +4414,11 @@ type AssessmentTemplate struct { // DurationInSeconds is a required field DurationInSeconds *int64 `locationName:"durationInSeconds" min:"180" type:"integer" required:"true"` + // The Amazon Resource Name (ARN) of the most recent assessment run associated + // with this assessment template. This value exists only when the value of assessmentRunCount + // is greater than zero. + LastAssessmentRunArn *string `locationName:"lastAssessmentRunArn" min:"1" type:"string"` + // The name of the assessment template. // // Name is a required field @@ -4441,6 +4452,12 @@ func (s *AssessmentTemplate) SetArn(v string) *AssessmentTemplate { return s } +// SetAssessmentRunCount sets the AssessmentRunCount field's value. +func (s *AssessmentTemplate) SetAssessmentRunCount(v int64) *AssessmentTemplate { + s.AssessmentRunCount = &v + return s +} + // SetAssessmentTargetArn sets the AssessmentTargetArn field's value. func (s *AssessmentTemplate) SetAssessmentTargetArn(v string) *AssessmentTemplate { s.AssessmentTargetArn = &v @@ -4459,6 +4476,12 @@ func (s *AssessmentTemplate) SetDurationInSeconds(v int64) *AssessmentTemplate { return s } +// SetLastAssessmentRunArn sets the LastAssessmentRunArn field's value. +func (s *AssessmentTemplate) SetLastAssessmentRunArn(v string) *AssessmentTemplate { + s.LastAssessmentRunArn = &v + return s +} + // SetName sets the Name field's value. func (s *AssessmentTemplate) SetName(v string) *AssessmentTemplate { s.Name = &v diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go index 0f57d3ff38b..38d637a82e5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go @@ -65,7 +65,7 @@ func (c *SSM) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *requ // and stack level. For example: Key=Owner and Value=DbAdmin, SysAdmin, or Dev. // Or Key=Stack and Value=Production, Pre-Production, or Test. // -// Each resource can have a maximum of 10 tags. +// Each resource can have a maximum of 50 tags. // // We recommend that you devise a set of tag keys that meets your needs for // each resource type. Using a consistent set of tag keys makes it easier for @@ -726,8 +726,11 @@ func (c *SSM) CreateMaintenanceWindowRequest(input *CreateMaintenanceWindowInput // don't match the original call to the API with the same idempotency token. // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" -// Error returned when the caller has exceeded the default resource limits (e.g. -// too many Maintenance Windows have been created). +// Error returned when the caller has exceeded the default resource limits. +// For example, too many Maintenance Windows or Patch baselines have been created. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -800,6 +803,9 @@ func (c *SSM) CreatePatchBaselineRequest(input *CreatePatchBaselineInput) (req * // // Creates a patch baseline. // +// For information about valid key and value pairs in PatchFilters for each +// supported operating system type, see PatchFilter (http://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchFilter.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -813,8 +819,11 @@ func (c *SSM) CreatePatchBaselineRequest(input *CreatePatchBaselineInput) (req * // don't match the original call to the API with the same idempotency token. // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" -// Error returned when the caller has exceeded the default resource limits (e.g. -// too many Maintenance Windows have been created). +// Error returned when the caller has exceeded the default resource limits. +// For example, too many Maintenance Windows or Patch baselines have been created. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -1879,8 +1888,11 @@ func (c *SSM) DeregisterTargetFromMaintenanceWindowRequest(input *DeregisterTarg // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -1966,8 +1978,11 @@ func (c *SSM) DeregisterTaskFromMaintenanceWindowRequest(input *DeregisterTaskFr // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -2838,8 +2853,11 @@ func (c *SSM) DescribeEffectivePatchesForPatchBaselineRequest(input *DescribeEff // try again. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeUnsupportedOperatingSystem "UnsupportedOperatingSystem" // The operating systems you specified is not supported, or the operation is @@ -3459,8 +3477,11 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocationsRequest(input *De // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -3542,8 +3563,11 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTasksRequest(input *DescribeMain // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -3706,8 +3730,11 @@ func (c *SSM) DescribeMaintenanceWindowTargetsRequest(input *DescribeMaintenance // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -3789,8 +3816,11 @@ func (c *SSM) DescribeMaintenanceWindowTasksRequest(input *DescribeMaintenanceWi // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -4963,8 +4993,11 @@ func (c *SSM) GetMaintenanceWindowRequest(input *GetMaintenanceWindowInput) (req // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -5047,8 +5080,11 @@ func (c *SSM) GetMaintenanceWindowExecutionRequest(input *GetMaintenanceWindowEx // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -5131,8 +5167,11 @@ func (c *SSM) GetMaintenanceWindowExecutionTaskRequest(input *GetMaintenanceWind // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -5215,8 +5254,11 @@ func (c *SSM) GetMaintenanceWindowExecutionTaskInvocationRequest(input *GetMaint // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -5298,8 +5340,11 @@ func (c *SSM) GetMaintenanceWindowTaskRequest(input *GetMaintenanceWindowTaskInp // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -5702,6 +5747,8 @@ func (c *SSM) GetParametersByPathRequest(input *GetParametersByPathInput) (req * // that point and a NextToken. You can specify the NextToken in a subsequent // call to get the next set of results. // +// This API action doesn't support filtering by tags. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5856,8 +5903,11 @@ func (c *SSM) GetPatchBaselineRequest(input *GetPatchBaselineInput) (req *reques // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInvalidResourceId "InvalidResourceId" // The resource ID is not valid. Verify that you entered the correct ID and @@ -7439,6 +7489,43 @@ func (c *SSM) PutComplianceItemsRequest(input *PutComplianceItemsInput) (req *re // so you must provide a full list of compliance items each time that you send // the request. // +// ComplianceType can be one of the following: +// +// * ExecutionId: The execution ID when the patch, association, or custom +// compliance item was applied. +// +// * ExecutionType: Specify patch, association, or Custom:string. +// +// * ExecutionTime. The time the patch, association, or custom compliance +// item was applied to the instance. +// +// * Id: The patch, association, or custom compliance ID. +// +// * Title: A title. +// +// * Status: The status of the compliance item. For example, approved for +// patches, or Failed for associations. +// +// * Severity: A patch severity. For example, critical. +// +// * DocumentName: A SSM document name. For example, AWS-RunPatchBaseline. +// +// * DocumentVersion: An SSM document version number. For example, 4. +// +// * Classification: A patch classification. For example, security updates. +// +// * PatchBaselineId: A patch baseline ID. +// +// * PatchSeverity: A patch severity. For example, Critical. +// +// * PatchState: A patch state. For example, InstancesWithFailedPatches. +// +// * PatchGroup: The name of a patch group. +// +// * InstalledTime: The time the association, patch, or custom compliance +// item was applied to the resource. Specify the time by using the following +// format: yyyy-MM-dd'T'HH:mm:ss'Z' +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7801,8 +7888,11 @@ func (c *SSM) RegisterDefaultPatchBaselineRequest(input *RegisterDefaultPatchBas // try again. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -7888,16 +7978,22 @@ func (c *SSM) RegisterPatchBaselineForPatchGroupRequest(input *RegisterPatchBase // baseline that is already registered with a different patch baseline. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInvalidResourceId "InvalidResourceId" // The resource ID is not valid. Verify that you entered the correct ID and // try again. // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" -// Error returned when the caller has exceeded the default resource limits (e.g. -// too many Maintenance Windows have been created). +// Error returned when the caller has exceeded the default resource limits. +// For example, too many Maintenance Windows or Patch baselines have been created. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -7983,12 +8079,18 @@ func (c *SSM) RegisterTargetWithMaintenanceWindowRequest(input *RegisterTargetWi // don't match the original call to the API with the same idempotency token. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" -// Error returned when the caller has exceeded the default resource limits (e.g. -// too many Maintenance Windows have been created). +// Error returned when the caller has exceeded the default resource limits. +// For example, too many Maintenance Windows or Patch baselines have been created. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -8074,12 +8176,18 @@ func (c *SSM) RegisterTaskWithMaintenanceWindowRequest(input *RegisterTaskWithMa // don't match the original call to the API with the same idempotency token. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" -// Error returned when the caller has exceeded the default resource limits (e.g. -// too many Maintenance Windows have been created). +// Error returned when the caller has exceeded the default resource limits. +// For example, too many Maintenance Windows or Patch baselines have been created. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeFeatureNotAvailableException "FeatureNotAvailableException" // You attempted to register a LAMBDA or STEP_FUNCTION task in a region where @@ -9075,8 +9183,11 @@ func (c *SSM) UpdateMaintenanceWindowRequest(input *UpdateMaintenanceWindowInput // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -9174,8 +9285,11 @@ func (c *SSM) UpdateMaintenanceWindowTargetRequest(input *UpdateMaintenanceWindo // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -9276,8 +9390,11 @@ func (c *SSM) UpdateMaintenanceWindowTaskRequest(input *UpdateMaintenanceWindowT // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -9446,6 +9563,9 @@ func (c *SSM) UpdatePatchBaselineRequest(input *UpdatePatchBaselineInput) (req * // Modifies an existing patch baseline. Fields not specified in the request // are left unchanged. // +// For information about valid key and value pairs in PatchFilters for each +// supported operating system type, see PatchFilter (http://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchFilter.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -9455,8 +9575,11 @@ func (c *SSM) UpdatePatchBaselineRequest(input *UpdatePatchBaselineInput) (req * // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource (e.g. a Maintenance Window) -// doesn't exist. +// Error returned when the ID specified for a resource, such as a Maintenance +// Window or Patch baseline, doesn't exist. +// +// For information about resource limits in Systems Manager, see AWS Systems +// Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -18990,8 +19113,8 @@ type GetParametersByPathInput struct { ParameterFilters []*ParameterStringFilter `type:"list"` // The hierarchy for the parameter. Hierarchies start with a forward slash (/) - // and end with the parameter name. A hierarchy can have a maximum of five levels. - // For example: /Finance/Prod/IAD/WinServ2016/license15 + // and end with the parameter name. A hierarchy can have a maximum of 15 levels. + // Here is an example of a hierarchy: /Finance/Prod/IAD/WinServ2016/license33 // // Path is a required field Path *string `min:"1" type:"string" required:"true"` @@ -23644,7 +23767,7 @@ func (s *ParameterStringFilter) SetValues(v []*string) *ParameterStringFilter { return s } -// One or more filters. Use a filter to return a more specific list of results. +// This data type is deprecated. Instead, use ParameterStringFilter. // See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ParametersFilter type ParametersFilter struct { _ struct{} `type:"structure"` @@ -23985,17 +24108,249 @@ func (s *PatchComplianceData) SetTitle(v string) *PatchComplianceData { } // Defines a patch filter. +// +// A patch filter consists of key/value pairs, but not all keys are valid for +// all operating system types. For example, the key PRODUCT is valid for all +// supported operating system types. The key MSRC_SEVERITY, however, is valid +// only for Windows operating systems, and the key SECTION is valid only for +// Ubuntu operating systems. +// +// Refer to the following sections for information about which keys may be used +// with each major operating system, and which values are valid for each key. +// +// Windows Operating Systems +// +// The supported keys for Windows operating systems are PRODUCT, CLASSIFICATION, +// and MSRC_SEVERITY. See the following lists for valid values for each of these +// keys. +// +// Supported key:PRODUCT +// +// Supported values: +// +// * Windows7 +// +// * Windows8 +// +// * Windows8.1 +// +// * Windows8Embedded +// +// * Windows10 +// +// * Windows10LTSB +// +// * WindowsServer2008 +// +// * WindowsServer2008R2 +// +// * WindowsServer2012 +// +// * WindowsServer2012R2 +// +// * WindowsServer2016 +// +// Supported key:CLASSIFICATION +// +// Supported values: +// +// * CriticalUpdates +// +// * DefinitionUpdates +// +// * Drivers +// +// * FeaturePacks +// +// * SecurityUpdates +// +// * ServicePacks +// +// * Tools +// +// * UpdateRollups +// +// * Updates +// +// * Upgrades +// +// Supported key:MSRC_SEVERITY +// +// Supported values: +// +// * Critical +// +// * Important +// +// * Moderate +// +// * Low +// +// * Unspecified +// +// Ubuntu Operating Systems +// +// The supported keys for Ubuntu operating systems are PRODUCT, PRIORITY, and +// SECTION. See the following lists for valid values for each of these keys. +// +// Supported key:PRODUCT +// +// Supported values: +// +// * Ubuntu14.04 +// +// * Ubuntu16.04 +// +// Supported key:PRIORITY +// +// Supported values: +// +// * Required +// +// * Important +// +// * Standard +// +// * Optional +// +// * Extra +// +// Supported key:SECTION +// +// Only the length of the key value is validated. Minimum length is 1. Maximum +// length is 64. +// +// Amazon Linux Operating Systems +// +// The supported keys for Amazon Linux operating systems are PRODUCT, CLASSIFICATION, +// and SEVERITY. See the following lists for valid values for each of these +// keys. +// +// Supported key:PRODUCT +// +// Supported values: +// +// * AmazonLinux2012.03 +// +// * AmazonLinux2012.09 +// +// * AmazonLinux2013.03 +// +// * AmazonLinux2013.09 +// +// * AmazonLinux2014.03 +// +// * AmazonLinux2014.09 +// +// * AmazonLinux2015.03 +// +// * AmazonLinux2015.09 +// +// * AmazonLinux2016.03 +// +// * AmazonLinux2016.09 +// +// * AmazonLinux2017.03 +// +// * AmazonLinux2017.09 +// +// Supported key:CLASSIFICATION +// +// Supported values: +// +// * Security +// +// * Bugfix +// +// * Enhancement +// +// * Recommended +// +// * Newpackage +// +// Supported key:SEVERITY +// +// Supported values: +// +// * Critical +// +// * Important +// +// * Medium +// +// * Low +// +// RedHat Enterprise Linux (RHEL) Operating Systems +// +// The supported keys for RedHat Enterprise Linux operating systems are PRODUCT, +// CLASSIFICATION, and SEVERITY. See the following lists for valid values for +// each of these keys. +// +// Supported key:PRODUCT +// +// Supported values: +// +// * RedhatEnterpriseLinux6.5 +// +// * RedhatEnterpriseLinux6.6 +// +// * RedhatEnterpriseLinux6.7 +// +// * RedhatEnterpriseLinux6.8 +// +// * RedhatEnterpriseLinux6.9 +// +// * RedhatEnterpriseLinux7.0 +// +// * RedhatEnterpriseLinux7.1 +// +// * RedhatEnterpriseLinux7.2 +// +// * RedhatEnterpriseLinux7.3 +// +// * RedhatEnterpriseLinux7.4 +// +// Supported key:CLASSIFICATION +// +// Supported values: +// +// * Security +// +// * Bugfix +// +// * Enhancement +// +// * Recommended +// +// * Newpackage +// +// Supported key:SEVERITY +// +// Supported values: +// +// * Critical +// +// * Important +// +// * Medium +// +// * Low // See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/PatchFilter type PatchFilter struct { _ struct{} `type:"structure"` - // The key for the filter (PRODUCT, CLASSIFICATION, MSRC_SEVERITY, PATCH_ID) + // The key for the filter. + // + // See PatchFilter for lists of valid keys for each operating system type. // // Key is a required field Key *string `type:"string" required:"true" enum:"PatchFilterKey"` // The value for the filter key. // + // See PatchFilter for lists of valid values for each key based on operating + // system type. + // // Values is a required field Values []*string `min:"1" type:"list" required:"true"` } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go index c0030a51957..cc45bcb71ed 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go @@ -120,8 +120,11 @@ const ( // ErrCodeDoesNotExistException for service response error code // "DoesNotExistException". // - // Error returned when the ID specified for a resource (e.g. a Maintenance Window) - // doesn't exist. + // Error returned when the ID specified for a resource, such as a Maintenance + // Window or Patch baseline, doesn't exist. + // + // For information about resource limits in Systems Manager, see AWS Systems + // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). ErrCodeDoesNotExistException = "DoesNotExistException" // ErrCodeDuplicateDocumentContent for service response error code @@ -525,8 +528,11 @@ const ( // ErrCodeResourceLimitExceededException for service response error code // "ResourceLimitExceededException". // - // Error returned when the caller has exceeded the default resource limits (e.g. - // too many Maintenance Windows have been created). + // Error returned when the caller has exceeded the default resource limits. + // For example, too many Maintenance Windows or Patch baselines have been created. + // + // For information about resource limits in Systems Manager, see AWS Systems + // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). ErrCodeResourceLimitExceededException = "ResourceLimitExceededException" // ErrCodeStatusUnchanged for service response error code diff --git a/vendor/vendor.json b/vendor/vendor.json index 6b4f20f2ab2..d30f270378b 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -141,844 +141,844 @@ "revisionTime": "2017-07-27T15:54:43Z" }, { - "checksumSHA1": "m/lQ3DQtkr3nS4w5irRAje91Erw=", + "checksumSHA1": "LNHL71DHaVF6ZpRpHabRk1QJf2M=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "DtuTqKH29YnLjrIJkRYX0HQtXY0=", "path": "github.com/aws/aws-sdk-go/aws/arn", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "9nE/FjZ4pYrT883KtV2/aI+Gayo=", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "7/8j/q0TWtOgXyvEcv4B2Dhl00o=", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "Y+cPwQL0dZMyqp3wI+KJWmA9KQ8=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "OnU/n7R33oYXiB4SAGd5pK7I0Bs=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "BT2+PhuOjbAuMcLpdop0FKQY5EY=", "path": "github.com/aws/aws-sdk-go/aws/endpoints", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "9GvAyILJ7g+VUg8Ef5DsT5GuYsg=", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "HcGL4e6Uep4/80eCUI5xkcWjpQ0=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "iU00ZjhAml/13g+1YXT21IqoXqg=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "04ypv4x12l4q0TksA1zEVsmgpvw=", "path": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "NStHCXEvYqG72GknZyv1jaKaeH0=", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "yHfT5DTbeCLs4NE2Rgnqrhe15ls=", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "9V1PvtFQ9MObZTc3sa86WcuOtOU=", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "pkeoOfZpHRvFG/AOZeTf0lwtsFg=", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "Rpu8KBtHZgvhkwHxUfaky+qW+G4=", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "0qYPUga28aQVkxZgBR3Z86AbGUQ=", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "vnYDXA1NxJ7Hu+DMfXNk1UnmkWg=", "path": "github.com/aws/aws-sdk-go/service/acm", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "DPl/OkvEUjrd+XKqX73l6nUNw3U=", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "X8tOI6i+RJwXIgg1qBjDNclyG/0=", "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "aDAaH6YiA50IrJ5Smfg0fovrniA=", "path": "github.com/aws/aws-sdk-go/service/appsync", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "oBXDw1zQTfxcKsK3ZjtKcS7gBLI=", "path": "github.com/aws/aws-sdk-go/service/athena", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "ITAwWyJp4t9AGfUXm9M3pFWTHVA=", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "Zz8qI6RloveM1zrXAglLxJZT1ZA=", "path": "github.com/aws/aws-sdk-go/service/batch", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "/nO06EpnD22+Ex80gHi4UYrAvKc=", "path": "github.com/aws/aws-sdk-go/service/budgets", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "6gM3CZZgiB0JvS7EK1c31Q8L09U=", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "T80IDetBz1hqJpq5Wqmx3MwCh8w=", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "bYrI9mxspB0xDFZEy3OIfWuez5g=", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "oB+M+kOmYG28V0PuI75IF6E+/w8=", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "Nc3vXlV7s309PprScYpRDPQWeDQ=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "bPh7NF3mLpGMV0rIakolMPHqMyw=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "P6qyaFX9X6Nnvm3avLigjmjfYds=", "path": "github.com/aws/aws-sdk-go/service/codebuild", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "7nW1Ho2X3RcUU8FaFBhJIUeuDNw=", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "m19PZt1B51QCWo1jxSbII2zzL6Q=", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "LKw7fnNwq17Eqy0clzS/LK89vS4=", "path": "github.com/aws/aws-sdk-go/service/codepipeline", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "aXh1KIbNX+g+tH+lh3pk++9lm3k=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentity", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "IWi9xZz+OncotjM/vJ87Iffg2Qk=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentityprovider", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "56F6Stg8hQ1kxiAEzqB0TDctW9k=", "path": "github.com/aws/aws-sdk-go/service/configservice", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "hYCwLQdIjHj8rMHLGVyUVhecI4s=", "path": "github.com/aws/aws-sdk-go/service/databasemigrationservice", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "siWpqsOY3u69XkgPF8+F8V1K0Pc=", "path": "github.com/aws/aws-sdk-go/service/dax", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "26CWoHQP/dyL2VzE5ZNd8zNzhko=", "path": "github.com/aws/aws-sdk-go/service/devicefarm", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "6g94rUHAgjcqMMTtMqKUbLU37wY=", "path": "github.com/aws/aws-sdk-go/service/directconnect", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "oFnS6I0u7KqnxK0/r1uoz8rTkxI=", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "0TXXUPjrbOCHpX555B6suH36Nnk=", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "INaeHZ2L5x6RlrcQBm4q1hFqNRM=", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "uEv9kkBsVIjg7K4+Y8TVlU0Cc8o=", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "3B3RtWG7IY9qhFhWGEwroeMxnPI=", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "eoM9nF5iVMbuGOmkY33d19aHt8Y=", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "dU5MPXUUOYD/E9sNncpFZ/U86Cw=", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "pj8mBWT3HE0Iid6HSmhw7lmyZDU=", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "VYGtTaSiajfKOVTbi9/SNmbiIac=", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "SZ7yLDZ6RvMhpWe0Goyem64kgyA=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "WYqHhdRNsiGGBLWlBLbOItZf+zA=", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "ae7VWg/xuXpnSD6wGumN44qEd+Q=", "path": "github.com/aws/aws-sdk-go/service/elbv2", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "NbkH6F+792jQ7BW4lGCb+vJVw58=", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "5btWHj2fZrPc/zfYdJLPaOcivxI=", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "Rodm1XwZ9Ncah1NLHep0behQpXg=", "path": "github.com/aws/aws-sdk-go/service/gamelift", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "oDoGvSfmO2Z099ixV2HXn+SDeHE=", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "HRmbBf3dUEBAfdC2xKaoWAGeM7Y=", "path": "github.com/aws/aws-sdk-go/service/glue", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "6JlxJoy1JCArNK2qBkaJ5IV6qBc=", "path": "github.com/aws/aws-sdk-go/service/guardduty", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "oZaxMqnwl2rA+V/W0tJ3uownORI=", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { - "checksumSHA1": "Pg4c7tUVP15Ry9uPA3qixJXSd4I=", + "checksumSHA1": "nMdRXIfhgvEKBHnLX61Ze3EUJWU=", "path": "github.com/aws/aws-sdk-go/service/inspector", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "pZwCI4DpP5hcMa/ItKhiwo/ukd0=", "path": "github.com/aws/aws-sdk-go/service/iot", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "IoSyRZhlL0petrB28nXk5jKM9YA=", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "oAFLgD0uJiVOZkFkL5dd/wUgBz4=", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "XDVse9fKF0RkAywzzgsO31AV4oc=", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "HluEcyZNywrbKnj/aR3tXbu29d8=", "path": "github.com/aws/aws-sdk-go/service/lexmodelbuildingservice", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "wjs9YBsHx0YQH0zKBA7Ibd1UV5Y=", "path": "github.com/aws/aws-sdk-go/service/lightsail", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "4VfB5vMLNYs0y6K159YCBgo9T3c=", "path": "github.com/aws/aws-sdk-go/service/mediaconvert", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "Ox3VWHYSQq0YKmlr0paUPdr5W/0=", "path": "github.com/aws/aws-sdk-go/service/medialive", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "Rs7QtkcLl3XNPnKb8ss/AhF2X50=", "path": "github.com/aws/aws-sdk-go/service/mediapackage", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "QjiIL8LrlhwrQw8FboF+wMNvUF0=", "path": "github.com/aws/aws-sdk-go/service/mediastore", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "ZY1SJNE03I6NL2OBJD9hlwVsqO0=", "path": "github.com/aws/aws-sdk-go/service/mediastoredata", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "ynB7Flcudp0VOqBVKZJ+23DtLHU=", "path": "github.com/aws/aws-sdk-go/service/mq", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "fpsBu+F79ktlLRwal1GugVMUDo0=", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "IddJCt5BrI6zRuUpFJqqnS9qrIM=", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "vP1FcccUZbuUlin7ME89w1GVJtA=", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "tKnVaKPOCiU6xl3/AYcdBCLtRdw=", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "sCaHoPWsJXRHFbilUKwN71qFTOI=", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "QZU8vR9cOIenYiH+Ywl4Gzfnlp0=", "path": "github.com/aws/aws-sdk-go/service/servicecatalog", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "dk6ebvA0EYgdPyc5HPKLBPEtsm4=", "path": "github.com/aws/aws-sdk-go/service/servicediscovery", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "Ex1Ma0SFGpqeNuPbeXZtsliZ3zo=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "maVXeR3WDAkONlzf04e4mDgCYxo=", "path": "github.com/aws/aws-sdk-go/service/sfn", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "ADoR4mlCW5usH8iOa6mPNSy49LM=", "path": "github.com/aws/aws-sdk-go/service/shield", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "B3CgAFSREebpsFoFOo4vrQ6u04w=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "FfY8w4DM8XIULdRnFhd3Um8Mj8c=", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "Wx189wAbIhWChx4kVbvsyqKMF4U=", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { - "checksumSHA1": "ijz0rBDeR6JP/06S+97k84FRYxc=", + "checksumSHA1": "Al7CCaQRNd22FwUZXigUEWN820M=", "path": "github.com/aws/aws-sdk-go/service/ssm", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "W1oFtpaT4TWIIJrAvFcn/XdcT7g=", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "Uw4pOUxSMbx4xBHUcOUkNhtnywE=", "path": "github.com/aws/aws-sdk-go/service/swf", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "on6d7Hydx2bM9jkFOf1JZcZZgeY=", "path": "github.com/aws/aws-sdk-go/service/waf", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "rHqjsOndIR82gX5mSKybaRWf3UY=", "path": "github.com/aws/aws-sdk-go/service/wafregional", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "y0XODBzpJjZvR1e9F6ULItV5nG4=", "path": "github.com/aws/aws-sdk-go/service/workspaces", - "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", - "revisionTime": "2018-01-02T21:46:00Z", - "version": "v1.12.55", - "versionExact": "v1.12.55" + "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", + "revisionTime": "2018-01-05T21:48:20Z", + "version": "v1.12.56", + "versionExact": "v1.12.56" }, { "checksumSHA1": "usT4LCSQItkFvFOQT7cBlkCuGaE=", From 8d0aa9093ebed1e829975da650c06d9589307b54 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Sun, 7 Jan 2018 15:38:59 -0800 Subject: [PATCH 111/350] New Resource: aws_cloudwatch_event_permission --- aws/provider.go | 1 + ...esource_aws_cloudwatch_event_permission.go | 218 +++++++++++++ ...ce_aws_cloudwatch_event_permission_test.go | 295 ++++++++++++++++++ website/aws.erb | 4 + .../cloudwatch_event_permission.html.markdown | 42 +++ 5 files changed, 560 insertions(+) create mode 100644 aws/resource_aws_cloudwatch_event_permission.go create mode 100644 aws/resource_aws_cloudwatch_event_permission_test.go create mode 100644 website/docs/r/cloudwatch_event_permission.html.markdown diff --git a/aws/provider.go b/aws/provider.go index b3887547b51..6a7eb29597b 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -276,6 +276,7 @@ func Provider() terraform.ResourceProvider { "aws_cloudfront_distribution": resourceAwsCloudFrontDistribution(), "aws_cloudfront_origin_access_identity": resourceAwsCloudFrontOriginAccessIdentity(), "aws_cloudtrail": resourceAwsCloudTrail(), + "aws_cloudwatch_event_permission": resourceAwsCloudWatchEventPermission(), "aws_cloudwatch_event_rule": resourceAwsCloudWatchEventRule(), "aws_cloudwatch_event_target": resourceAwsCloudWatchEventTarget(), "aws_cloudwatch_log_destination": resourceAwsCloudWatchLogDestination(), diff --git a/aws/resource_aws_cloudwatch_event_permission.go b/aws/resource_aws_cloudwatch_event_permission.go new file mode 100644 index 00000000000..8ca9db9a106 --- /dev/null +++ b/aws/resource_aws_cloudwatch_event_permission.go @@ -0,0 +1,218 @@ +package aws + +import ( + "encoding/json" + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + events "github.com/aws/aws-sdk-go/service/cloudwatchevents" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCloudWatchEventPermission() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudWatchEventPermissionCreate, + Read: resourceAwsCloudWatchEventPermissionRead, + Update: resourceAwsCloudWatchEventPermissionUpdate, + Delete: resourceAwsCloudWatchEventPermissionDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "action": { + Type: schema.TypeString, + Optional: true, + Default: "events:PutEvents", + ValidateFunc: validateCloudWatchEventPermissionAction, + }, + "principal": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateCloudWatchEventPermissionPrincipal, + }, + "statement_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateCloudWatchEventPermissionStatementID, + }, + }, + } +} + +func resourceAwsCloudWatchEventPermissionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatcheventsconn + + statementID := d.Get("statement_id").(string) + + input := events.PutPermissionInput{ + Action: aws.String(d.Get("action").(string)), + Principal: aws.String(d.Get("principal").(string)), + StatementId: aws.String(statementID), + } + + log.Printf("[DEBUG] Creating CloudWatch Events permission: %s", input) + _, err := conn.PutPermission(&input) + if err != nil { + return fmt.Errorf("Creating CloudWatch Events permission failed: %s", err.Error()) + } + + d.SetId(statementID) + + return resourceAwsCloudWatchEventPermissionRead(d, meta) +} + +// See also: https://docs.aws.amazon.com/AmazonCloudWatchEvents/latest/APIReference/API_DescribeEventBus.html +func resourceAwsCloudWatchEventPermissionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatcheventsconn + input := events.DescribeEventBusInput{} + var policyDoc CloudWatchEventPermissionPolicyDoc + var policyStatement *CloudWatchEventPermissionPolicyStatement + + // Especially with concurrent PutPermission calls there can be a slight delay + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Reading CloudWatch Events bus: %s", input) + debo, err := conn.DescribeEventBus(&input) + if err != nil { + return resource.NonRetryableError(fmt.Errorf("Reading CloudWatch Events permission '%s' failed: %s", d.Id(), err.Error())) + } + + if debo.Policy == nil { + return resource.RetryableError(fmt.Errorf("CloudWatch Events permission %q not found", d.Id())) + } + + err = json.Unmarshal([]byte(*debo.Policy), &policyDoc) + if err != nil { + return resource.NonRetryableError(fmt.Errorf("Reading CloudWatch Events permission '%s' failed: %s", d.Id(), err.Error())) + } + + for _, statement := range policyDoc.Statements { + if statement.Sid == d.Id() { + policyStatement = &statement + return nil + } + } + if policyStatement == nil { + return resource.RetryableError(fmt.Errorf("CloudWatch Events permission %q not found", d.Id())) + } + + return nil + }) + if err != nil { + return err + } + + if policyStatement == nil { + log.Printf("[WARN] CloudWatch Events permission %q not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + d.Set("action", policyStatement.Action) + + principalString, ok := policyStatement.Principal.(string) + if ok && (principalString == "*") { + d.Set("principal", "*") + } else { + principalMap := policyStatement.Principal.(map[string]interface{}) + policyARN, err := arn.Parse(principalMap["AWS"].(string)) + if err != nil { + return fmt.Errorf("Reading CloudWatch Events permission '%s' failed: %s", d.Id(), err.Error()) + } + d.Set("principal", policyARN.AccountID) + } + d.Set("statement_id", policyStatement.Sid) + + return nil +} + +func resourceAwsCloudWatchEventPermissionUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatcheventsconn + + input := events.PutPermissionInput{ + Action: aws.String(d.Get("action").(string)), + Principal: aws.String(d.Get("principal").(string)), + StatementId: aws.String(d.Get("statement_id").(string)), + } + + log.Printf("[DEBUG] Update CloudWatch Events permission: %s", input) + _, err := conn.PutPermission(&input) + if err != nil { + return fmt.Errorf("Updating CloudWatch Events permission '%s' failed: %s", d.Id(), err.Error()) + } + + return resourceAwsCloudWatchEventPermissionRead(d, meta) +} + +func resourceAwsCloudWatchEventPermissionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatcheventsconn + input := events.RemovePermissionInput{ + StatementId: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Delete CloudWatch Events permission: %s", input) + _, err := conn.RemovePermission(&input) + if err != nil { + return fmt.Errorf("Deleting CloudWatch Events permission '%s' failed: %s", d.Id(), err.Error()) + } + return nil +} + +// https://docs.aws.amazon.com/AmazonCloudWatchEvents/latest/APIReference/API_PutPermission.html#API_PutPermission_RequestParameters +func validateCloudWatchEventPermissionAction(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if (len(value) < 1) || (len(value) > 64) { + es = append(es, fmt.Errorf("%q must be between 1 and 64 characters", k)) + } + + if !regexp.MustCompile(`^events:[a-zA-Z]+$`).MatchString(value) { + es = append(es, fmt.Errorf("%q must be: events: followed by one or more alphabetic characters", k)) + } + return +} + +// https://docs.aws.amazon.com/AmazonCloudWatchEvents/latest/APIReference/API_PutPermission.html#API_PutPermission_RequestParameters +func validateCloudWatchEventPermissionPrincipal(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if !regexp.MustCompile(`^(\d{12}|\*)$`).MatchString(value) { + es = append(es, fmt.Errorf("%q must be * or a 12 digit AWS account ID", k)) + } + return +} + +// https://docs.aws.amazon.com/AmazonCloudWatchEvents/latest/APIReference/API_PutPermission.html#API_PutPermission_RequestParameters +func validateCloudWatchEventPermissionStatementID(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if (len(value) < 1) || (len(value) > 64) { + es = append(es, fmt.Errorf("%q must be between 1 and 64 characters", k)) + } + + if !regexp.MustCompile(`^[a-zA-Z0-9-_]+$`).MatchString(value) { + es = append(es, fmt.Errorf("%q must be one or more alphanumeric, hyphen, or underscore characters", k)) + } + return +} + +// CloudWatchEventPermissionPolicyDoc represents the Policy attribute of DescribeEventBus +// See also: https://docs.aws.amazon.com/AmazonCloudWatchEvents/latest/APIReference/API_DescribeEventBus.html +type CloudWatchEventPermissionPolicyDoc struct { + Version string + ID string `json:"Id,omitempty"` + Statements []CloudWatchEventPermissionPolicyStatement `json:"Statement"` +} + +// CloudWatchEventPermissionPolicyStatement represents the Statement attribute of CloudWatchEventPermissionPolicyDoc +// See also: https://docs.aws.amazon.com/AmazonCloudWatchEvents/latest/APIReference/API_DescribeEventBus.html +type CloudWatchEventPermissionPolicyStatement struct { + Sid string + Effect string + Action string + Principal interface{} // "*" or {"AWS": "arn:aws:iam::111111111111:root"} + Resource string +} diff --git a/aws/resource_aws_cloudwatch_event_permission_test.go b/aws/resource_aws_cloudwatch_event_permission_test.go new file mode 100644 index 00000000000..6771f0cf517 --- /dev/null +++ b/aws/resource_aws_cloudwatch_event_permission_test.go @@ -0,0 +1,295 @@ +package aws + +import ( + "encoding/json" + "fmt" + "regexp" + "testing" + "time" + + events "github.com/aws/aws-sdk-go/service/cloudwatchevents" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSCloudWatchEventPermission_Basic(t *testing.T) { + principal1 := "111111111111" + principal2 := "*" + statementID := acctest.RandomWithPrefix(t.Name()) + resourceName := "aws_cloudwatch_event_permission.test1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEcsServiceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigBasic("", statementID), + ExpectError: regexp.MustCompile(`must be \* or a 12 digit AWS account ID`), + }, + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigBasic(".", statementID), + ExpectError: regexp.MustCompile(`must be \* or a 12 digit AWS account ID`), + }, + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigBasic("12345678901", statementID), + ExpectError: regexp.MustCompile(`must be \* or a 12 digit AWS account ID`), + }, + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigBasic("abcdefghijkl", statementID), + ExpectError: regexp.MustCompile(`must be \* or a 12 digit AWS account ID`), + }, + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigBasic(principal1, ""), + ExpectError: regexp.MustCompile(`must be between 1 and 64 characters`), + }, + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigBasic(principal1, acctest.RandString(65)), + ExpectError: regexp.MustCompile(`must be between 1 and 64 characters`), + }, + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigBasic(principal1, " "), + ExpectError: regexp.MustCompile(`must be one or more alphanumeric, hyphen, or underscore characters`), + }, + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigBasic(principal1, statementID), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudWatchEventPermissionExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "action", "events:PutEvents"), + resource.TestCheckResourceAttr(resourceName, "principal", principal1), + resource.TestCheckResourceAttr(resourceName, "statement_id", statementID), + ), + }, + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigBasic(principal2, statementID), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudWatchEventPermissionExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "principal", principal2), + ), + }, + }, + }) +} + +func TestAccAWSCloudWatchEventPermission_Action(t *testing.T) { + principal := "111111111111" + statementID := acctest.RandomWithPrefix(t.Name()) + resourceName := "aws_cloudwatch_event_permission.test1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEcsServiceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigAction("", principal, statementID), + ExpectError: regexp.MustCompile(`must be between 1 and 64 characters`), + }, + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigAction(acctest.RandString(65), principal, statementID), + ExpectError: regexp.MustCompile(`must be between 1 and 64 characters`), + }, + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigAction("events:", principal, statementID), + ExpectError: regexp.MustCompile(`must be: events: followed by one or more alphabetic characters`), + }, + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigAction("events:1", principal, statementID), + ExpectError: regexp.MustCompile(`must be: events: followed by one or more alphabetic characters`), + }, + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigAction("events:PutEvents", principal, statementID), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudWatchEventPermissionExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "action", "events:PutEvents"), + ), + }, + }, + }) +} + +func TestAccAWSCloudWatchEventPermission_Import(t *testing.T) { + principal := "123456789012" + statementID := acctest.RandomWithPrefix(t.Name()) + resourceName := "aws_cloudwatch_event_permission.test1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudWatchEventPermissionDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigBasic(principal, statementID), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudWatchEventPermissionExists(resourceName), + ), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSCloudWatchEventPermission_Multiple(t *testing.T) { + principal1 := "111111111111" + principal2 := "222222222222" + statementID1 := acctest.RandomWithPrefix(t.Name()) + statementID2 := acctest.RandomWithPrefix(t.Name()) + resourceName1 := "aws_cloudwatch_event_permission.test1" + resourceName2 := "aws_cloudwatch_event_permission.test2" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEcsServiceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigBasic(principal1, statementID1), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudWatchEventPermissionExists(resourceName1), + resource.TestCheckResourceAttr(resourceName1, "principal", principal1), + resource.TestCheckResourceAttr(resourceName1, "statement_id", statementID1), + ), + }, + { + Config: testAccCheckAwsCloudWatchEventPermissionResourceConfigMultiple(principal1, statementID1, principal2, statementID2), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudWatchEventPermissionExists(resourceName1), + testAccCheckCloudWatchEventPermissionExists(resourceName2), + resource.TestCheckResourceAttr(resourceName1, "principal", principal1), + resource.TestCheckResourceAttr(resourceName1, "statement_id", statementID1), + resource.TestCheckResourceAttr(resourceName2, "principal", principal2), + resource.TestCheckResourceAttr(resourceName2, "statement_id", statementID2), + ), + }, + }, + }) +} + +func testAccCheckCloudWatchEventPermissionExists(pr string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).cloudwatcheventsconn + rs, ok := s.RootModule().Resources[pr] + if !ok { + return fmt.Errorf("Not found: %s", pr) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + debo, err := conn.DescribeEventBus(&events.DescribeEventBusInput{}) + if err != nil { + return fmt.Errorf("Reading CloudWatch Events bus policy for '%s' failed: %s", pr, err.Error()) + } + + if debo.Policy == nil { + return fmt.Errorf("Not found: %s", pr) + } + + var policyDoc CloudWatchEventPermissionPolicyDoc + err = json.Unmarshal([]byte(*debo.Policy), &policyDoc) + if err != nil { + return fmt.Errorf("Reading CloudWatch Events bus policy for '%s' failed: %s", pr, err.Error()) + } + + var policyStatement *CloudWatchEventPermissionPolicyStatement + for _, statement := range policyDoc.Statements { + if statement.Sid == rs.Primary.ID { + policyStatement = &statement + break + } + } + if policyStatement == nil { + return fmt.Errorf("Not found: %s", pr) + } + + return nil + } +} + +func testAccCheckCloudWatchEventPermissionDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).cloudwatcheventsconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_cloudwatch_event_permission" { + continue + } + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + input := events.DescribeEventBusInput{} + + debo, err := conn.DescribeEventBus(&input) + if err != nil { + return resource.NonRetryableError(err) + } + if debo.Policy == nil { + return nil + } + + var policyDoc CloudWatchEventPermissionPolicyDoc + err = json.Unmarshal([]byte(*debo.Policy), &policyDoc) + if err != nil { + return resource.NonRetryableError(fmt.Errorf("Reading CloudWatch Events permission '%s' failed: %s", rs.Primary.ID, err.Error())) + } + + var policyStatement *CloudWatchEventPermissionPolicyStatement + for _, statement := range policyDoc.Statements { + if statement.Sid == rs.Primary.ID { + policyStatement = &statement + break + } + } + if policyStatement == nil { + return resource.RetryableError(fmt.Errorf("CloudWatch Events permission exists: %s", rs.Primary.ID)) + } + + return nil + }) + + if err != nil { + return err + } + } + + return nil +} + +func testAccCheckAwsCloudWatchEventPermissionResourceConfigBasic(principal, statementID string) string { + return fmt.Sprintf(` +resource "aws_cloudwatch_event_permission" "test1" { + principal = "%[1]s" + statement_id = "%[2]s" +} +`, principal, statementID) +} + +func testAccCheckAwsCloudWatchEventPermissionResourceConfigAction(action, principal, statementID string) string { + return fmt.Sprintf(` +resource "aws_cloudwatch_event_permission" "test1" { + action = "%[1]s" + principal = "%[2]s" + statement_id = "%[3]s" +} +`, action, principal, statementID) +} + +func testAccCheckAwsCloudWatchEventPermissionResourceConfigMultiple(principal1, statementID1, principal2, statementID2 string) string { + return fmt.Sprintf(` +resource "aws_cloudwatch_event_permission" "test1" { + principal = "%[1]s" + statement_id = "%[2]s" +} + +resource "aws_cloudwatch_event_permission" "test2" { + principal = "%[3]s" + statement_id = "%[4]s" +} +`, principal1, statementID1, principal2, statementID2) +} diff --git a/website/aws.erb b/website/aws.erb index 81fba1358e4..4a46fc53de2 100644 --- a/website/aws.erb +++ b/website/aws.erb @@ -375,6 +375,10 @@ aws_cloudwatch_dashboard + > + aws_cloudwatch_event_permission + + > aws_cloudwatch_event_rule diff --git a/website/docs/r/cloudwatch_event_permission.html.markdown b/website/docs/r/cloudwatch_event_permission.html.markdown new file mode 100644 index 00000000000..ffcae819f10 --- /dev/null +++ b/website/docs/r/cloudwatch_event_permission.html.markdown @@ -0,0 +1,42 @@ +--- +layout: "aws" +page_title: "AWS: aws_cloudwatch_event_permission" +sidebar_current: "docs-aws-resource-cloudwatch-event-permission" +description: |- + Provides a resource to create a CloudWatch Events permission to support cross-account events in the current account default event bus. +--- + +# aws_cloudwatch_event_permission + +Provides a resource to create a CloudWatch Events permission to support cross-account events in the current account default event bus. + +## Example Usage + +```hcl +resource "aws_cloudwatch_event_permission" "DevAccountAccess" { + principal = "123456789012" + statement_id = "DevAccountAccess" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `principal` - (Required) The 12-digit AWS account ID that you are permitting to put events to your default event bus. Specify `*` to permit any account to put events to your default event bus. +* `statement_id` - (Required) An identifier string for the external account that you are granting permissions to. +* `action` - (Optional) The action that you are enabling the other account to perform. Defaults to `events:PutEvents`. + +## Attributes Reference + +The following additional attributes are exported: + +* `id` - The statement ID of the CloudWatch Events permission. + +## Import + +CloudWatch Events permissions can be imported using the statement ID, e.g. + +```shell +$ terraform import aws_cloudwatch_event_permission.DevAccountAccess DevAccountAccess +``` From 8947b6b574453889cd48b23a6040f73324957334 Mon Sep 17 00:00:00 2001 From: Anthony Teisseire Date: Wed, 11 Oct 2017 16:25:46 +0300 Subject: [PATCH 112/350] Added tunnel options to vpn_connection --- aws/resource_aws_vpn_connection.go | 77 ++++++++++++++++++++----- aws/resource_aws_vpn_connection_test.go | 66 +++++++++++++++++++++ 2 files changed, 127 insertions(+), 16 deletions(-) diff --git a/aws/resource_aws_vpn_connection.go b/aws/resource_aws_vpn_connection.go index adc9eeef76a..ec425d7123c 100644 --- a/aws/resource_aws_vpn_connection.go +++ b/aws/resource_aws_vpn_connection.go @@ -94,6 +94,36 @@ func resourceAwsVpnConnection() *schema.Resource { ForceNew: true, }, + "tunnel1_inside_cidr": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "tunnel1_preshared_key": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Computed: true, + ForceNew: true, + }, + + "tunnel2_inside_cidr": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "tunnel2_preshared_key": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Computed: true, + ForceNew: true, + }, + "tags": tagsSchema(), // Begin read only attributes @@ -107,22 +137,14 @@ func resourceAwsVpnConnection() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "tunnel1_cgw_inside_address": { Type: schema.TypeString, Computed: true, }, - "tunnel1_vgw_inside_address": { Type: schema.TypeString, Computed: true, }, - - "tunnel1_preshared_key": { - Type: schema.TypeString, - Sensitive: true, - Computed: true, - }, "tunnel1_bgp_asn": { Type: schema.TypeString, Computed: true, @@ -131,26 +153,19 @@ func resourceAwsVpnConnection() *schema.Resource { Type: schema.TypeInt, Computed: true, }, + "tunnel2_address": { Type: schema.TypeString, Computed: true, }, - "tunnel2_cgw_inside_address": { Type: schema.TypeString, Computed: true, }, - "tunnel2_vgw_inside_address": { Type: schema.TypeString, Computed: true, }, - - "tunnel2_preshared_key": { - Type: schema.TypeString, - Sensitive: true, - Computed: true, - }, "tunnel2_bgp_asn": { Type: schema.TypeString, Computed: true, @@ -159,6 +174,7 @@ func resourceAwsVpnConnection() *schema.Resource { Type: schema.TypeInt, Computed: true, }, + "routes": { Type: schema.TypeSet, Computed: true, @@ -245,8 +261,37 @@ func resourceAwsVpnConnection() *schema.Resource { func resourceAwsVpnConnectionCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn + // Get the optional tunnel options + tunnel1_cidr := d.Get("tunnel1_inside_cidr").(string) + tunnel2_cidr := d.Get("tunnel2_inside_cidr").(string) + + tunnel1_psk := d.Get("tunnel1_preshared_key").(string) + tunnel2_psk := d.Get("tunnel2_preshared_key").(string) + + // Fill the tunnel options for the EC2 API + options := []*ec2.VpnTunnelOptionsSpecification{ + {}, {}, + } + + if tunnel1_cidr != "" { + options[0].TunnelInsideCidr = aws.String(tunnel1_cidr) + } + + if tunnel2_cidr != "" { + options[1].TunnelInsideCidr = aws.String(tunnel2_cidr) + } + + if tunnel1_psk != "" { + options[0].PreSharedKey = aws.String(tunnel1_psk) + } + + if tunnel2_psk != "" { + options[1].PreSharedKey = aws.String(tunnel2_psk) + } + connectOpts := &ec2.VpnConnectionOptionsSpecification{ StaticRoutesOnly: aws.Bool(d.Get("static_routes_only").(bool)), + TunnelOptions: options, } createOpts := &ec2.CreateVpnConnectionInput{ diff --git a/aws/resource_aws_vpn_connection_test.go b/aws/resource_aws_vpn_connection_test.go index 8bd85c2676c..c5f8c33c3d9 100644 --- a/aws/resource_aws_vpn_connection_test.go +++ b/aws/resource_aws_vpn_connection_test.go @@ -53,10 +53,44 @@ func TestAccAWSVpnConnection_basic(t *testing.T) { }) } +func TestAccAWSVpnConnection_tunnelOptions(t *testing.T) { + rBgpAsn := acctest.RandIntRange(64512, 65534) + var vpn ec2.VpnConnection + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_vpn_connection.foo", + Providers: testAccProviders, + CheckDestroy: testAccAwsVpnConnectionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsVpnConnectionConfigTunnelOptions(rBgpAsn), + Check: resource.ComposeTestCheckFunc( + testAccAwsVpnConnection( + "aws_vpc.vpc", + "aws_vpn_gateway.vpn_gateway", + "aws_customer_gateway.customer_gateway", + "aws_vpn_connection.foo", + &vpn, + ), + resource.TestCheckResourceAttr("aws_vpn_connection.foo", "static_routes_only", "false"), + + resource.TestCheckResourceAttr("aws_vpn_connection.foo", "tunnel1_inside_cidr", "169.254.8.0/30"), + resource.TestCheckResourceAttr("aws_vpn_connection.foo", "tunnel1_preshared_key", "lookatmethisisaprivatekey1"), + + resource.TestCheckResourceAttr("aws_vpn_connection.foo", "tunnel2_inside_cidr", "169.254.9.0/30"), + resource.TestCheckResourceAttr("aws_vpn_connection.foo", "tunnel2_preshared_key", "lookatmethisisaprivatekey2"), + ), + }, + }, + }) +} + func TestAccAWSVpnConnection_withoutStaticRoutes(t *testing.T) { rInt := acctest.RandInt() rBgpAsn := acctest.RandIntRange(64512, 65534) var vpn ec2.VpnConnection + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, IDRefreshName: "aws_vpn_connection.foo", @@ -325,6 +359,38 @@ func testAccAwsVpnConnectionConfigUpdate(rInt, rBgpAsn int) string { `, rBgpAsn, rInt) } +func testAccAwsVpnConnectionConfigTunnelOptions(rBgpAsn int) string { + return fmt.Sprintf(` + resource "aws_vpn_gateway" "vpn_gateway" { + tags { + Name = "vpn_gateway" + } + } + + resource "aws_customer_gateway" "customer_gateway" { + bgp_asn = %d + ip_address = "178.0.0.1" + type = "ipsec.1" + tags { + Name = "main-customer-gateway" + } + } + + resource "aws_vpn_connection" "foo" { + vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}" + customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}" + type = "ipsec.1" + static_routes_only = false + + tunnel1_inside_cidr = "169.254.8.0/30" + tunnel1_preshared_key = "lookatmethisisaprivatekey1" + + tunnel2_inside_cidr = "169.254.9.0/30" + tunnel2_preshared_key = "lookatmethisisaprivatekey2" + } + `, rBgpAsn) +} + // Test our VPN tunnel config XML parsing const testAccAwsVpnTunnelInfoXML = ` From 4db7cc37735a1feb97b818ef48f3e5231bf21435 Mon Sep 17 00:00:00 2001 From: Mike Cowgill Date: Mon, 8 Jan 2018 01:32:06 -0800 Subject: [PATCH 113/350] Addresses #490 - DB Parameter Group Not Found (#2868) * Do not treat DBParameterGroupNotFound as an error. It's expected that DB Parameter Groups will not exist in some cases, e.g. they were manually deleted. Terraform should just plan to re-create them instead of erroring. Fixes #490 * adding test for unexpected removal of db parameter group --- aws/resource_aws_db_parameter_group.go | 5 +++ aws/resource_aws_db_parameter_group_test.go | 37 +++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/aws/resource_aws_db_parameter_group.go b/aws/resource_aws_db_parameter_group.go index fe935b63627..7d2be31dcd8 100644 --- a/aws/resource_aws_db_parameter_group.go +++ b/aws/resource_aws_db_parameter_group.go @@ -134,6 +134,11 @@ func resourceAwsDbParameterGroupRead(d *schema.ResourceData, meta interface{}) e describeResp, err := rdsconn.DescribeDBParameterGroups(&describeOpts) if err != nil { + if isAWSErr(err, rds.ErrCodeDBParameterGroupNotFoundFault, "") { + log.Printf("[WARN] DB Parameter Group (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } return err } diff --git a/aws/resource_aws_db_parameter_group_test.go b/aws/resource_aws_db_parameter_group_test.go index 7348569d4ff..ab100b03b65 100644 --- a/aws/resource_aws_db_parameter_group_test.go +++ b/aws/resource_aws_db_parameter_group_test.go @@ -291,6 +291,26 @@ func TestAccAWSDBParameterGroup_basic(t *testing.T) { }) } +func TestAccAWSDBParameterGroup_Disappears(t *testing.T) { + var v rds.DBParameterGroup + groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSDBParameterGroupConfig(groupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.bar", &v), + testAccCheckAWSDbParamaterGroupDisappears(&v), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccAWSDBParameterGroup_namePrefix(t *testing.T) { var v rds.DBParameterGroup @@ -436,6 +456,23 @@ func TestResourceAWSDBParameterGroupName_validation(t *testing.T) { } } +func testAccCheckAWSDbParamaterGroupDisappears(v *rds.DBParameterGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).rdsconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_db_parameter_group" { + continue + } + _, err := conn.DeleteDBParameterGroup(&rds.DeleteDBParameterGroupInput{ + DBParameterGroupName: v.DBParameterGroupName, + }) + return err + } + return nil + } +} + func testAccCheckAWSDBParameterGroupDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).rdsconn From ad37b2ace72c5ac88e476c189bc8c9e61e38043a Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 8 Jan 2018 09:33:02 +0000 Subject: [PATCH 114/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index aafbb56e0d8..8de93f05893 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ BUG FIXES: * resource/aws_s3_bucket_object: Object tagging is now supported in GovCloud [GH-2665] * resource/aws_elasticsearch_domain: Fixed a crash when no Cloudwatch log group is configured [GH-2787] * resource/aws_s3_bucket_policy: Set the resource ID after successful creation [GH-2820] +* resource/aws_db_parameter_group: Remove group from state if it's gone [GH-2868] ## 1.6.0 (December 18, 2017) From 4546f64835978d84714cf5fbc2925095d0774477 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 8 Jan 2018 04:46:10 -0500 Subject: [PATCH 115/350] New resource: aws_guardduty_detector (#2524) * New resource: aws_guardduty_detector * r/aws_guardduty_detector: #2524 PR review updates * Use guardduty.ErrCodeBadRequestException for non-existent detector * Use %s formatter for debug logging * Simplify setting enable attribute * Perform test cases in group --- aws/config.go | 3 + aws/provider.go | 1 + aws/resource_aws_guardduty_detector.go | 106 ++++++++++++++++ aws/resource_aws_guardduty_detector_test.go | 116 ++++++++++++++++++ aws/resource_aws_guardduty_test.go | 26 ++++ website/aws.erb | 8 ++ .../docs/r/guardduty_detector.html.markdown | 42 +++++++ 7 files changed, 302 insertions(+) create mode 100644 aws/resource_aws_guardduty_detector.go create mode 100644 aws/resource_aws_guardduty_detector_test.go create mode 100644 aws/resource_aws_guardduty_test.go create mode 100644 website/docs/r/guardduty_detector.html.markdown diff --git a/aws/config.go b/aws/config.go index 86eb60f4f9b..4fc6d666212 100644 --- a/aws/config.go +++ b/aws/config.go @@ -51,6 +51,7 @@ import ( "github.com/aws/aws-sdk-go/service/emr" "github.com/aws/aws-sdk-go/service/firehose" "github.com/aws/aws-sdk-go/service/glacier" + "github.com/aws/aws-sdk-go/service/guardduty" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/inspector" "github.com/aws/aws-sdk-go/service/iot" @@ -185,6 +186,7 @@ type AWSClient struct { mqconn *mq.MQ opsworksconn *opsworks.OpsWorks glacierconn *glacier.Glacier + guarddutyconn *guardduty.GuardDuty codebuildconn *codebuild.CodeBuild codedeployconn *codedeploy.CodeDeploy codecommitconn *codecommit.CodeCommit @@ -420,6 +422,7 @@ func (c *Config) Client() (interface{}, error) { client.firehoseconn = firehose.New(sess) client.inspectorconn = inspector.New(sess) client.glacierconn = glacier.New(sess) + client.guarddutyconn = guardduty.New(sess) client.iotconn = iot.New(sess) client.kinesisconn = kinesis.New(awsKinesisSess) client.kmsconn = kms.New(awsKmsSess) diff --git a/aws/provider.go b/aws/provider.go index b3887547b51..529f522bbf8 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -355,6 +355,7 @@ func Provider() terraform.ResourceProvider { "aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(), "aws_flow_log": resourceAwsFlowLog(), "aws_glacier_vault": resourceAwsGlacierVault(), + "aws_guardduty_detector": resourceAwsGuardDutyDetector(), "aws_iam_access_key": resourceAwsIamAccessKey(), "aws_iam_account_alias": resourceAwsIamAccountAlias(), "aws_iam_account_password_policy": resourceAwsIamAccountPasswordPolicy(), diff --git a/aws/resource_aws_guardduty_detector.go b/aws/resource_aws_guardduty_detector.go new file mode 100644 index 00000000000..58793b96769 --- /dev/null +++ b/aws/resource_aws_guardduty_detector.go @@ -0,0 +1,106 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/guardduty" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsGuardDutyDetector() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsGuardDutyDetectorCreate, + Read: resourceAwsGuardDutyDetectorRead, + Update: resourceAwsGuardDutyDetectorUpdate, + Delete: resourceAwsGuardDutyDetectorDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "enable": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "account_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsGuardDutyDetectorCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).guarddutyconn + + input := guardduty.CreateDetectorInput{ + Enable: aws.Bool(d.Get("enable").(bool)), + } + + log.Printf("[DEBUG] Creating GuardDuty Detector: %s", input) + output, err := conn.CreateDetector(&input) + if err != nil { + return fmt.Errorf("Creating GuardDuty Detector failed: %s", err.Error()) + } + d.SetId(*output.DetectorId) + + return resourceAwsGuardDutyDetectorRead(d, meta) +} + +func resourceAwsGuardDutyDetectorRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).guarddutyconn + input := guardduty.GetDetectorInput{ + DetectorId: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Reading GuardDuty Detector: %s", input) + gdo, err := conn.GetDetector(&input) + if err != nil { + if isAWSErr(err, guardduty.ErrCodeBadRequestException, "The request is rejected because the input detectorId is not owned by the current account.") { + log.Printf("[WARN] GuardDuty detector %q not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Reading GuardDuty Detector '%s' failed: %s", d.Id(), err.Error()) + } + + d.Set("account_id", meta.(*AWSClient).accountid) + d.Set("enable", *gdo.Status == guardduty.DetectorStatusEnabled) + + return nil +} + +func resourceAwsGuardDutyDetectorUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).guarddutyconn + + input := guardduty.UpdateDetectorInput{ + DetectorId: aws.String(d.Id()), + Enable: aws.Bool(d.Get("enable").(bool)), + } + + log.Printf("[DEBUG] Update GuardDuty Detector: %s", input) + _, err := conn.UpdateDetector(&input) + if err != nil { + return fmt.Errorf("Updating GuardDuty Detector '%s' failed: %s", d.Id(), err.Error()) + } + + return resourceAwsGuardDutyDetectorRead(d, meta) +} + +func resourceAwsGuardDutyDetectorDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).guarddutyconn + input := guardduty.DeleteDetectorInput{ + DetectorId: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Delete GuardDuty Detector: %s", input) + _, err := conn.DeleteDetector(&input) + if err != nil { + return fmt.Errorf("Deleting GuardDuty Detector '%s' failed: %s", d.Id(), err.Error()) + } + return nil +} diff --git a/aws/resource_aws_guardduty_detector_test.go b/aws/resource_aws_guardduty_detector_test.go new file mode 100644 index 00000000000..893bdcfebd5 --- /dev/null +++ b/aws/resource_aws_guardduty_detector_test.go @@ -0,0 +1,116 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/guardduty" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func testAccAwsGuardDutyDetector_basic(t *testing.T) { + resourceName := "aws_guardduty_detector.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsGuardDutyDetectorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGuardDutyDetectorConfig_basic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsGuardDutyDetectorExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "account_id"), + resource.TestCheckResourceAttr(resourceName, "enable", "true"), + ), + }, + { + Config: testAccGuardDutyDetectorConfig_basic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsGuardDutyDetectorExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enable", "false"), + ), + }, + { + Config: testAccGuardDutyDetectorConfig_basic3, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsGuardDutyDetectorExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enable", "true"), + ), + }, + }, + }) +} + +func testAccAwsGuardDutyDetector_import(t *testing.T) { + resourceName := "aws_guardduty_detector.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSesTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGuardDutyDetectorConfig_basic1, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckAwsGuardDutyDetectorDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).guarddutyconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_guardduty_detector" { + continue + } + + input := &guardduty.GetDetectorInput{ + DetectorId: aws.String(rs.Primary.ID), + } + + _, err := conn.GetDetector(input) + if err != nil { + if isAWSErr(err, guardduty.ErrCodeBadRequestException, "The request is rejected because the input detectorId is not owned by the current account.") { + return nil + } + return err + } + + return fmt.Errorf("Expected GuardDuty Detector to be destroyed, %s found", rs.Primary.ID) + } + + return nil +} + +func testAccCheckAwsGuardDutyDetectorExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + return nil + } +} + +const testAccGuardDutyDetectorConfig_basic1 = ` +resource "aws_guardduty_detector" "test" {}` + +const testAccGuardDutyDetectorConfig_basic2 = ` +resource "aws_guardduty_detector" "test" { + enable = false +}` + +const testAccGuardDutyDetectorConfig_basic3 = ` +resource "aws_guardduty_detector" "test" { + enable = true +}` diff --git a/aws/resource_aws_guardduty_test.go b/aws/resource_aws_guardduty_test.go new file mode 100644 index 00000000000..5fb8d764f34 --- /dev/null +++ b/aws/resource_aws_guardduty_test.go @@ -0,0 +1,26 @@ +package aws + +import ( + "testing" +) + +func TestAccAWSGuardDuty(t *testing.T) { + testCases := map[string]map[string]func(t *testing.T){ + "Detector": { + "basic": testAccAwsGuardDutyDetector_basic, + "import": testAccAwsGuardDutyDetector_import, + }, + } + + for group, m := range testCases { + m := m + t.Run(group, func(t *testing.T) { + for name, tc := range m { + tc := tc + t.Run(name, func(t *testing.T) { + tc(t) + }) + } + }) + } +} diff --git a/website/aws.erb b/website/aws.erb index 81fba1358e4..c1de8467dcf 100644 --- a/website/aws.erb +++ b/website/aws.erb @@ -919,6 +919,14 @@ + > + GuardDuty Resources + + > IAM Resources diff --git a/website/docs/r/guardduty_detector.html.markdown b/website/docs/r/guardduty_detector.html.markdown new file mode 100644 index 00000000000..6ff7ee6e4c0 --- /dev/null +++ b/website/docs/r/guardduty_detector.html.markdown @@ -0,0 +1,42 @@ +--- +layout: "aws" +page_title: "AWS: aws_guardduty_detector" +sidebar_current: "docs-aws-resource-guardduty-detector" +description: |- + Provides a resource to manage a GuardDuty detector +--- + +# aws_guardduty_detector + +Provides a resource to manage a GuardDuty detector. + +~> **NOTE:** Deleting this resource is equivalent to "disabling" GuardDuty for an AWS region, which removes all existing findings. You can set the `enable` attribute to `false` to instead "suspend" monitoring and feedback reporting while keeping existing data. See the [Suspending or Disabling Amazon GuardDuty documentation](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_suspend-disable.html) for more information. + +## Example Usage + +```hcl +resource "aws_guardduty_detector" "MyDetector" { + enable = true +} +``` + +## Argument Reference + +The following arguments are supported: + +* `enable` - (Optional) Enable monitoring and feedback reporting. Setting to `false` is equivalent to "suspending" GuardDuty. Defaults to `true`. + +## Attributes Reference + +The following additional attributes are exported: + +* `id` - The ID of the GuardDuty detector +* `account_id` - The AWS account ID of the GuardDuty detector + +## Import + +GuardDuty detectors can be imported using the detector ID, e.g. + +``` +$ terraform import aws_guardduty_detector.MyDetector 00b00fd5aecc0ab60a708659477e9617 +``` From b9f52eb2e0d2735f8d3c09902b93575fe4e3aa98 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 8 Jan 2018 09:46:35 +0000 Subject: [PATCH 116/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8de93f05893..f0c9a115edc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ FEATURES: * **New Resource:** `aws_cognito_user_pool_domain` [GH-2325] +* **New resource:** `aws_guardduty_detector` [GH-2524] ENHANCEMENTS: From 1fafa8b325600ac18300b0f1f57e595c06388186 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 8 Jan 2018 09:46:45 +0000 Subject: [PATCH 117/350] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f0c9a115edc..4d33d422087 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,7 @@ FEATURES: * **New Resource:** `aws_cognito_user_pool_domain` [GH-2325] -* **New resource:** `aws_guardduty_detector` [GH-2524] +* **New Resource:** `aws_guardduty_detector` [GH-2524] ENHANCEMENTS: From ca7b7ceaa2932474abcf28bc3bdb032a717865c0 Mon Sep 17 00:00:00 2001 From: Arman Shanjani Date: Mon, 8 Jan 2018 05:27:05 -0500 Subject: [PATCH 118/350] Switch aws_appautoscaling_target's role_arn to optional (#2889) * Make aws_appautoscaling_target's role_arn optional * Add test for aws_appautoscaling_target's role_arn being optional * Fix aws_appautoscaling_target tests that fail because of AWS' automatic overwriting of role_arn --- aws/resource_aws_appautoscaling_target.go | 8 +- ...resource_aws_appautoscaling_target_test.go | 229 ++++-------------- .../r/appautoscaling_target.html.markdown | 2 +- 3 files changed, 57 insertions(+), 182 deletions(-) diff --git a/aws/resource_aws_appautoscaling_target.go b/aws/resource_aws_appautoscaling_target.go index d23f72e6dd6..4a1a39322d6 100644 --- a/aws/resource_aws_appautoscaling_target.go +++ b/aws/resource_aws_appautoscaling_target.go @@ -37,7 +37,8 @@ func resourceAwsAppautoscalingTarget() *schema.Resource { }, "role_arn": { Type: schema.TypeString, - Required: true, + Optional: true, + Computed: true, ForceNew: true, }, "scalable_dimension": { @@ -64,10 +65,13 @@ func resourceAwsAppautoscalingTargetCreate(d *schema.ResourceData, meta interfac targetOpts.MaxCapacity = aws.Int64(int64(d.Get("max_capacity").(int))) targetOpts.MinCapacity = aws.Int64(int64(d.Get("min_capacity").(int))) targetOpts.ResourceId = aws.String(d.Get("resource_id").(string)) - targetOpts.RoleARN = aws.String(d.Get("role_arn").(string)) targetOpts.ScalableDimension = aws.String(d.Get("scalable_dimension").(string)) targetOpts.ServiceNamespace = aws.String(d.Get("service_namespace").(string)) + if roleArn, exists := d.GetOk("role_arn"); exists { + targetOpts.RoleARN = aws.String(roleArn.(string)) + } + log.Printf("[DEBUG] Application autoscaling target create configuration %#v", targetOpts) var err error err = resource.Retry(1*time.Minute, func() *resource.RetryError { diff --git a/aws/resource_aws_appautoscaling_target_test.go b/aws/resource_aws_appautoscaling_target_test.go index 0f6b3977b32..f7b8027781b 100644 --- a/aws/resource_aws_appautoscaling_target_test.go +++ b/aws/resource_aws_appautoscaling_target_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -122,6 +123,30 @@ func TestAccAWSAppautoScalingTarget_multipleTargets(t *testing.T) { }) } +func TestAccAWSAppautoScalingTarget_optionalRoleArn(t *testing.T) { + var readTarget applicationautoscaling.ScalableTarget + + rInt := acctest.RandInt() + tableName := fmt.Sprintf("tf_acc_test_table_%d", rInt) + + r, _ := regexp.Compile("arn:aws:iam::.*:role/aws-service-role/dynamodb.application-autoscaling.amazonaws.com/AWSServiceRoleForApplicationAutoScaling_DynamoDBTable") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAppautoscalingTargetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAppautoscalingTarget_optionalRoleArn(tableName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAppautoscalingTargetExists("aws_appautoscaling_target.read", &readTarget), + resource.TestMatchResourceAttr("aws_appautoscaling_target.read", "role_arn", r), + ), + }, + }, + }) +} + func testAccCheckAWSAppautoscalingTargetDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).appautoscalingconn @@ -192,58 +217,6 @@ func testAccCheckAWSAppautoscalingTargetExists(n string, target *applicationauto func testAccAWSAppautoscalingTargetConfig( randClusterName string) string { return fmt.Sprintf(` -resource "aws_iam_role" "autoscale_role" { - name = "autoscalerole%s" - path = "/" - - assume_role_policy = < Date: Mon, 8 Jan 2018 10:27:36 +0000 Subject: [PATCH 119/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d33d422087..ea960014d98 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ BUG FIXES: * resource/aws_elasticsearch_domain: Fixed a crash when no Cloudwatch log group is configured [GH-2787] * resource/aws_s3_bucket_policy: Set the resource ID after successful creation [GH-2820] * resource/aws_db_parameter_group: Remove group from state if it's gone [GH-2868] +* resource/aws_appautoscaling_target: Make `role_arn` optional & computed [GH-2889] ## 1.6.0 (December 18, 2017) From a3e644f2215df2a4bf8e47c809728a628175a427 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 8 Jan 2018 12:31:26 +0000 Subject: [PATCH 120/350] test/aws_load_balancer_backend_server_policy: Randomize LB name --- ...oad_balancer_backend_server_policy_test.go | 41 +++++++++++-------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/aws/resource_aws_load_balancer_backend_server_policy_test.go b/aws/resource_aws_load_balancer_backend_server_policy_test.go index be91f1d0fc2..b8db5d39b5d 100644 --- a/aws/resource_aws_load_balancer_backend_server_policy_test.go +++ b/aws/resource_aws_load_balancer_backend_server_policy_test.go @@ -7,13 +7,16 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elb" - + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" tlsprovider "github.com/terraform-providers/terraform-provider-tls/tls" ) func TestAccAWSLoadBalancerBackendServerPolicy_basic(t *testing.T) { + rString := acctest.RandString(8) + lbName := fmt.Sprintf("tf-acc-lb-bsp-basic-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: map[string]terraform.ResourceProvider{ @@ -23,26 +26,26 @@ func TestAccAWSLoadBalancerBackendServerPolicy_basic(t *testing.T) { CheckDestroy: testAccCheckAWSLoadBalancerBackendServerPolicyDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSLoadBalancerBackendServerPolicyConfig_basic0, + Config: testAccAWSLoadBalancerBackendServerPolicyConfig_basic0(lbName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-pubkey-policy0"), testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-backend-auth-policy0"), - testAccCheckAWSLoadBalancerBackendServerPolicyState("test-aws-policies-lb", "test-backend-auth-policy0", true), + testAccCheckAWSLoadBalancerBackendServerPolicyState(lbName, "test-backend-auth-policy0", true), ), }, resource.TestStep{ - Config: testAccAWSLoadBalancerBackendServerPolicyConfig_basic1, + Config: testAccAWSLoadBalancerBackendServerPolicyConfig_basic1(lbName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-pubkey-policy0"), testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-pubkey-policy1"), testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-backend-auth-policy0"), - testAccCheckAWSLoadBalancerBackendServerPolicyState("test-aws-policies-lb", "test-backend-auth-policy0", true), + testAccCheckAWSLoadBalancerBackendServerPolicyState(lbName, "test-backend-auth-policy0", true), ), }, resource.TestStep{ - Config: testAccAWSLoadBalancerBackendServerPolicyConfig_basic2, + Config: testAccAWSLoadBalancerBackendServerPolicyConfig_basic2(lbName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerBackendServerPolicyState("test-aws-policies-lb", "test-backend-auth-policy0", false), + testAccCheckAWSLoadBalancerBackendServerPolicyState(lbName, "test-backend-auth-policy0", false), ), }, }, @@ -136,7 +139,8 @@ func testAccCheckAWSLoadBalancerBackendServerPolicyState(loadBalancerName string } } -const testAccAWSLoadBalancerBackendServerPolicyConfig_basic0 = ` +func testAccAWSLoadBalancerBackendServerPolicyConfig_basic0(lbName string) string { + return fmt.Sprintf(` resource "tls_private_key" "example0" { algorithm = "RSA" } @@ -166,7 +170,7 @@ resource "aws_iam_server_certificate" "test-iam-cert0" { } resource "aws_elb" "test-lb" { - name = "test-aws-policies-lb" + name = "%s" availability_zones = ["us-west-2a"] listener { @@ -209,9 +213,11 @@ resource "aws_load_balancer_backend_server_policy" "test-backend-auth-policies-4 "${aws_load_balancer_policy.test-backend-auth-policy0.policy_name}" ] } -` +`, lbName) +} -const testAccAWSLoadBalancerBackendServerPolicyConfig_basic1 = ` +func testAccAWSLoadBalancerBackendServerPolicyConfig_basic1(lbName string) string { + return fmt.Sprintf(` resource "tls_private_key" "example0" { algorithm = "RSA" } @@ -263,7 +269,7 @@ resource "aws_iam_server_certificate" "test-iam-cert0" { } resource "aws_elb" "test-lb" { - name = "test-aws-policies-lb" + name = "%s" availability_zones = ["us-west-2a"] listener { @@ -316,9 +322,11 @@ resource "aws_load_balancer_backend_server_policy" "test-backend-auth-policies-4 "${aws_load_balancer_policy.test-backend-auth-policy0.policy_name}" ] } -` +`, lbName) +} -const testAccAWSLoadBalancerBackendServerPolicyConfig_basic2 = ` +func testAccAWSLoadBalancerBackendServerPolicyConfig_basic2(lbName string) string { + return fmt.Sprintf(` resource "tls_private_key" "example0" { algorithm = "RSA" } @@ -370,7 +378,7 @@ resource "aws_iam_server_certificate" "test-iam-cert0" { } resource "aws_elb" "test-lb" { - name = "test-aws-policies-lb" + name = "%s" availability_zones = ["us-west-2a"] listener { @@ -385,4 +393,5 @@ resource "aws_elb" "test-lb" { Name = "tf-acc-test" } } -` +`, lbName) +} From b221c377ed1854e6fe27d3d3f190f6ede62a17be Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 8 Jan 2018 13:40:52 +0000 Subject: [PATCH 121/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ea960014d98..5caa8828056 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ FEATURES: * **New Resource:** `aws_cognito_user_pool_domain` [GH-2325] * **New Resource:** `aws_guardduty_detector` [GH-2524] +* **New Resource:** `aws_route53_query_log` [GH-2770] ENHANCEMENTS: From c60c2f96b9c50bfb78164cbe421c94226a44480b Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 8 Jan 2018 13:27:55 +0000 Subject: [PATCH 122/350] test/aws_mq_broker: Add a passing test for VPC-enabled broker --- aws/resource_aws_mq_broker_test.go | 182 ++++++++++++++++++++++++++++- 1 file changed, 177 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_mq_broker_test.go b/aws/resource_aws_mq_broker_test.go index 0ef11a88f43..ba4c50ba832 100644 --- a/aws/resource_aws_mq_broker_test.go +++ b/aws/resource_aws_mq_broker_test.go @@ -236,7 +236,7 @@ func TestAccAwsMqBroker_basic(t *testing.T) { }) } -func TestAccAwsMqBroker_allFields(t *testing.T) { +func TestAccAwsMqBroker_allFieldsDefaultVpc(t *testing.T) { sgName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(5)) cfgNameBefore := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(5)) cfgNameAfter := fmt.Sprintf("tf-acc-test-updated-%s", acctest.RandString(5)) @@ -260,7 +260,7 @@ func TestAccAwsMqBroker_allFields(t *testing.T) { CheckDestroy: testAccCheckAwsMqBrokerDestroy, Steps: []resource.TestStep{ { - Config: testAccMqBrokerConfig_allFields(sgName, cfgNameBefore, cfgBodyBefore, brokerName), + Config: testAccMqBrokerConfig_allFieldsDefaultVpc(sgName, cfgNameBefore, cfgBodyBefore, brokerName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsMqBrokerExists("aws_mq_broker.test"), resource.TestCheckResourceAttr("aws_mq_broker.test", "auto_minor_version_upgrade", "true"), @@ -314,7 +314,7 @@ func TestAccAwsMqBroker_allFields(t *testing.T) { }, { // Update configuration in-place - Config: testAccMqBrokerConfig_allFields(sgName, cfgNameBefore, cfgBodyAfter, brokerName), + Config: testAccMqBrokerConfig_allFieldsDefaultVpc(sgName, cfgNameBefore, cfgBodyAfter, brokerName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsMqBrokerExists("aws_mq_broker.test"), resource.TestCheckResourceAttr("aws_mq_broker.test", "broker_name", brokerName), @@ -325,7 +325,109 @@ func TestAccAwsMqBroker_allFields(t *testing.T) { }, { // Replace configuration - Config: testAccMqBrokerConfig_allFields(sgName, cfgNameAfter, cfgBodyAfter, brokerName), + Config: testAccMqBrokerConfig_allFieldsDefaultVpc(sgName, cfgNameAfter, cfgBodyAfter, brokerName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsMqBrokerExists("aws_mq_broker.test"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "broker_name", brokerName), + resource.TestCheckResourceAttr("aws_mq_broker.test", "configuration.#", "1"), + resource.TestMatchResourceAttr("aws_mq_broker.test", "configuration.0.id", regexp.MustCompile(`^c-[a-z0-9-]+$`)), + resource.TestCheckResourceAttr("aws_mq_broker.test", "configuration.0.revision", "2"), + ), + }, + }, + }) +} + +func TestAccAwsMqBroker_allFieldsCustomVpc(t *testing.T) { + sgName := fmt.Sprintf("tf-acc-test-vpc-%s", acctest.RandString(5)) + cfgNameBefore := fmt.Sprintf("tf-acc-test-vpc-%s", acctest.RandString(5)) + cfgNameAfter := fmt.Sprintf("tf-acc-test-vpc-updated-%s", acctest.RandString(5)) + brokerName := fmt.Sprintf("tf-acc-test-vpc-%s", acctest.RandString(5)) + + cfgBodyBefore := ` + +` + cfgBodyAfter := ` + + + + + + +` + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsMqBrokerDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMqBrokerConfig_allFieldsCustomVpc(sgName, cfgNameBefore, cfgBodyBefore, brokerName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsMqBrokerExists("aws_mq_broker.test"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "auto_minor_version_upgrade", "true"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "broker_name", brokerName), + resource.TestCheckResourceAttr("aws_mq_broker.test", "configuration.#", "1"), + resource.TestMatchResourceAttr("aws_mq_broker.test", "configuration.0.id", regexp.MustCompile(`^c-[a-z0-9-]+$`)), + resource.TestCheckResourceAttr("aws_mq_broker.test", "configuration.0.revision", "2"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "deployment_mode", "ACTIVE_STANDBY_MULTI_AZ"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "engine_type", "ActiveMQ"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "engine_version", "5.15.0"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "host_instance_type", "mq.t2.micro"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "maintenance_window_start_time.#", "1"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "maintenance_window_start_time.0.day_of_week", "TUESDAY"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "maintenance_window_start_time.0.time_of_day", "02:00"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "maintenance_window_start_time.0.time_zone", "CET"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "publicly_accessible", "true"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "security_groups.#", "2"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "subnet_ids.#", "2"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "user.#", "2"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "user.1344916805.console_access", "true"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "user.1344916805.groups.#", "3"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "user.1344916805.groups.2456940119", "first"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "user.1344916805.groups.3055489385", "second"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "user.1344916805.groups.607264868", "third"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "user.1344916805.password", "SecondTestTest1234"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "user.1344916805.username", "SecondTest"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "user.3793764891.console_access", "false"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "user.3793764891.groups.#", "0"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "user.3793764891.password", "TestTest1234"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "user.3793764891.username", "Test"), + resource.TestMatchResourceAttr("aws_mq_broker.test", "arn", + regexp.MustCompile("^arn:aws:mq:[a-z0-9-]+:[0-9]{12}:broker:[a-z0-9-]+:[a-f0-9-]+$")), + resource.TestCheckResourceAttr("aws_mq_broker.test", "instances.#", "2"), + resource.TestMatchResourceAttr("aws_mq_broker.test", "instances.0.console_url", + regexp.MustCompile(`^https://[a-f0-9-]+\.mq.[a-z0-9-]+.amazonaws.com:8162$`)), + resource.TestCheckResourceAttr("aws_mq_broker.test", "instances.0.endpoints.#", "5"), + resource.TestMatchResourceAttr("aws_mq_broker.test", "instances.0.endpoints.0", regexp.MustCompile(`^ssl://[a-z0-9-\.]+:61617$`)), + resource.TestMatchResourceAttr("aws_mq_broker.test", "instances.0.endpoints.1", regexp.MustCompile(`^amqp\+ssl://[a-z0-9-\.]+:5671$`)), + resource.TestMatchResourceAttr("aws_mq_broker.test", "instances.0.endpoints.2", regexp.MustCompile(`^stomp\+ssl://[a-z0-9-\.]+:61614$`)), + resource.TestMatchResourceAttr("aws_mq_broker.test", "instances.0.endpoints.3", regexp.MustCompile(`^mqtt\+ssl://[a-z0-9-\.]+:8883$`)), + resource.TestMatchResourceAttr("aws_mq_broker.test", "instances.0.endpoints.4", regexp.MustCompile(`^wss://[a-z0-9-\.]+:61619$`)), + resource.TestMatchResourceAttr("aws_mq_broker.test", "instances.1.console_url", + regexp.MustCompile(`^https://[a-f0-9-]+\.mq.[a-z0-9-]+.amazonaws.com:8162$`)), + resource.TestCheckResourceAttr("aws_mq_broker.test", "instances.1.endpoints.#", "5"), + resource.TestMatchResourceAttr("aws_mq_broker.test", "instances.1.endpoints.0", regexp.MustCompile(`^ssl://[a-z0-9-\.]+:61617$`)), + resource.TestMatchResourceAttr("aws_mq_broker.test", "instances.1.endpoints.1", regexp.MustCompile(`^amqp\+ssl://[a-z0-9-\.]+:5671$`)), + resource.TestMatchResourceAttr("aws_mq_broker.test", "instances.1.endpoints.2", regexp.MustCompile(`^stomp\+ssl://[a-z0-9-\.]+:61614$`)), + resource.TestMatchResourceAttr("aws_mq_broker.test", "instances.1.endpoints.3", regexp.MustCompile(`^mqtt\+ssl://[a-z0-9-\.]+:8883$`)), + resource.TestMatchResourceAttr("aws_mq_broker.test", "instances.1.endpoints.4", regexp.MustCompile(`^wss://[a-z0-9-\.]+:61619$`)), + ), + }, + { + // Update configuration in-place + Config: testAccMqBrokerConfig_allFieldsCustomVpc(sgName, cfgNameBefore, cfgBodyAfter, brokerName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsMqBrokerExists("aws_mq_broker.test"), + resource.TestCheckResourceAttr("aws_mq_broker.test", "broker_name", brokerName), + resource.TestCheckResourceAttr("aws_mq_broker.test", "configuration.#", "1"), + resource.TestMatchResourceAttr("aws_mq_broker.test", "configuration.0.id", regexp.MustCompile(`^c-[a-z0-9-]+$`)), + resource.TestCheckResourceAttr("aws_mq_broker.test", "configuration.0.revision", "3"), + ), + }, + { + // Replace configuration + Config: testAccMqBrokerConfig_allFieldsCustomVpc(sgName, cfgNameAfter, cfgBodyAfter, brokerName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsMqBrokerExists("aws_mq_broker.test"), resource.TestCheckResourceAttr("aws_mq_broker.test", "broker_name", brokerName), @@ -447,7 +549,58 @@ resource "aws_mq_broker" "test" { }`, sgName, brokerName) } -func testAccMqBrokerConfig_allFields(sgName, cfgName, cfgBody, brokerName string) string { +func testAccMqBrokerConfig_allFieldsDefaultVpc(sgName, cfgName, cfgBody, brokerName string) string { + return fmt.Sprintf(` +resource "aws_security_group" "mq1" { + name = "%s-1" +} + +resource "aws_security_group" "mq2" { + name = "%s-2" +} + +resource "aws_mq_configuration" "test" { + name = "%s" + engine_type = "ActiveMQ" + engine_version = "5.15.0" + data = < Date: Mon, 8 Jan 2018 11:45:44 -0500 Subject: [PATCH 123/350] r/aws_ecs_service: Add health_check_grace_period_seconds attribute (#2788) * r/aws_ecs_service: Add health_check_grace_period_seconds attribute * r/aws_ecs_service: Indentation fixes for testAccAWSEcsService_healthCheckGracePeriodSeconds --- aws/resource_aws_ecs_service.go | 24 +++- aws/resource_aws_ecs_service_test.go | 167 +++++++++++++++++++++++ website/docs/r/ecs_service.html.markdown | 1 + 3 files changed, 191 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_ecs_service.go b/aws/resource_aws_ecs_service.go index e1942508400..22f8eb94550 100644 --- a/aws/resource_aws_ecs_service.go +++ b/aws/resource_aws_ecs_service.go @@ -49,6 +49,12 @@ func resourceAwsEcsService() *schema.Resource { Optional: true, }, + "health_check_grace_period_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateAwsEcsServiceHealthCheckGracePeriodSeconds, + }, + "launch_type": { Type: schema.TypeString, ForceNew: true, @@ -213,6 +219,10 @@ func resourceAwsEcsServiceCreate(d *schema.ResourceData, meta interface{}) error input.Cluster = aws.String(v.(string)) } + if v, ok := d.GetOk("health_check_grace_period_seconds"); ok { + input.HealthCheckGracePeriodSeconds = aws.Int64(int64(v.(int))) + } + if v, ok := d.GetOk("launch_type"); ok { input.LaunchType = aws.String(v.(string)) } @@ -352,7 +362,7 @@ func resourceAwsEcsServiceRead(d *schema.ResourceData, meta interface{}) error { } d.Set("desired_count", service.DesiredCount) - + d.Set("health_check_grace_period_seconds", service.HealthCheckGracePeriodSeconds) d.Set("launch_type", service.LaunchType) // Save cluster in the same format @@ -469,6 +479,10 @@ func resourceAwsEcsServiceUpdate(d *schema.ResourceData, meta interface{}) error _, n := d.GetChange("desired_count") input.DesiredCount = aws.Int64(int64(n.(int))) } + if d.HasChange("health_check_grace_period_seconds") { + _, n := d.GetChange("health_check_grace_period_seconds") + input.HealthCheckGracePeriodSeconds = aws.Int64(int64(n.(int))) + } if d.HasChange("task_definition") { _, n := d.GetChange("task_definition") input.TaskDefinition = aws.String(n.(string)) @@ -646,3 +660,11 @@ func parseTaskDefinition(taskDefinition string) (string, string, error) { return matches[0][1], matches[0][2], nil } + +func validateAwsEcsServiceHealthCheckGracePeriodSeconds(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if (value < 0) || (value > 1800) { + errors = append(errors, fmt.Errorf("%q must be between 0 and 1800", k)) + } + return +} diff --git a/aws/resource_aws_ecs_service_test.go b/aws/resource_aws_ecs_service_test.go index 2b10eaeaac4..4f754891d29 100644 --- a/aws/resource_aws_ecs_service_test.go +++ b/aws/resource_aws_ecs_service_test.go @@ -182,6 +182,41 @@ func TestAccAWSEcsServiceWithRenamedCluster(t *testing.T) { }) } +func TestAccAWSEcsService_healthCheckGracePeriodSeconds(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc") + resourceName := "aws_ecs_service.with_alb" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEcsServiceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSEcsService_healthCheckGracePeriodSeconds(rName, t.Name(), -1), + ExpectError: regexp.MustCompile(`must be between 0 and 1800`), + }, + { + Config: testAccAWSEcsService_healthCheckGracePeriodSeconds(rName, t.Name(), 1801), + ExpectError: regexp.MustCompile(`must be between 0 and 1800`), + }, + { + Config: testAccAWSEcsService_healthCheckGracePeriodSeconds(rName, t.Name(), 300), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEcsServiceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "health_check_grace_period_seconds", "300"), + ), + }, + { + Config: testAccAWSEcsService_healthCheckGracePeriodSeconds(rName, t.Name(), 600), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEcsServiceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "health_check_grace_period_seconds", "600"), + ), + }, + }, + }) +} + func TestAccAWSEcsService_withIamRole(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -698,6 +733,138 @@ resource "aws_ecs_service" "main" { `, rInt, rInt, rInt, rInt) } +func testAccAWSEcsService_healthCheckGracePeriodSeconds(rName string, testName string, healthCheckGracePeriodSeconds int) string { + return fmt.Sprintf(` +data "aws_availability_zones" "available" {} + +resource "aws_vpc" "main" { + cidr_block = "10.10.0.0/16" + tags { + Name = "%[2]s" + } +} + +resource "aws_subnet" "main" { + count = 2 + cidr_block = "${cidrsubnet(aws_vpc.main.cidr_block, 8, count.index)}" + availability_zone = "${data.aws_availability_zones.available.names[count.index]}" + vpc_id = "${aws_vpc.main.id}" +} + +resource "aws_ecs_cluster" "main" { + name = "%[1]s" +} + +resource "aws_ecs_task_definition" "with_lb_changes" { + family = "%[1]s" + container_definitions = < Date: Mon, 8 Jan 2018 16:46:11 +0000 Subject: [PATCH 124/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5caa8828056..f1bd1d0a3bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ ENHANCEMENTS: * resource/aws_ses_event_destination: Add support for SNS destinations [GH-1737] * resource/aws_iam_role: Delete inline policies when `force_detach_policies = true` [GH-2388] * resource/aws_lb_target_group: Improve `health_check` validation [GH-2580] +* resource/aws_ecs_service: Add `health_check_grace_period_seconds` attribute [GH-2788] * data-source/aws_iam_server_certificate: Add support for retrieving public key [GH-2749] BUG FIXES: From dd7722f42a44f019cdfac9d032c76ca3866eb31f Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 8 Jan 2018 16:58:56 +0000 Subject: [PATCH 125/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f1bd1d0a3bf..82e8bf3da7b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ BUG FIXES: * resource/aws_s3_bucket_policy: Set the resource ID after successful creation [GH-2820] * resource/aws_db_parameter_group: Remove group from state if it's gone [GH-2868] * resource/aws_appautoscaling_target: Make `role_arn` optional & computed [GH-2889] +* resource/aws_ssm_maintenance_window: Respect `enabled` during updates [GH-2818] ## 1.6.0 (December 18, 2017) From b9b64ba46f9c3dcf90617d0d5e922764c94e2915 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 8 Jan 2018 12:03:32 -0600 Subject: [PATCH 126/350] Update CHANGELOG.md --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 82e8bf3da7b..a41c0142fe7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,7 +25,10 @@ ENHANCEMENTS: * resource/aws_iam_role: Delete inline policies when `force_detach_policies = true` [GH-2388] * resource/aws_lb_target_group: Improve `health_check` validation [GH-2580] * resource/aws_ecs_service: Add `health_check_grace_period_seconds` attribute [GH-2788] +* resource/aws_vpc_peering_connection: Add support for cross-region VPC peering [GH-2508] +* resource/aws_vpc_peering_connection_accepter: Add support for cross-region VPC peering [GH-2508] * data-source/aws_iam_server_certificate: Add support for retrieving public key [GH-2749] +* data-source/aws_vpc_peering_connection: Add support for cross-region VPC peering [GH-2508] BUG FIXES: From e38174ed7a77b204a3d376d5f45e0acf289b8024 Mon Sep 17 00:00:00 2001 From: "xiaowei.wang" Date: Mon, 8 Jan 2018 19:45:34 +0100 Subject: [PATCH 127/350] document import for resource codecommit_repository --- website/docs/r/code_commit_repository.html.markdown | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/website/docs/r/code_commit_repository.html.markdown b/website/docs/r/code_commit_repository.html.markdown index a2853b6f60a..70e1cc3758b 100644 --- a/website/docs/r/code_commit_repository.html.markdown +++ b/website/docs/r/code_commit_repository.html.markdown @@ -38,4 +38,12 @@ The following attributes are exported: * `repository_id` - The ID of the repository * `arn` - The ARN of the repository * `clone_url_http` - The URL to use for cloning the repository over HTTPS. -* `clone_url_ssh` - The URL to use for cloning the repository over SSH. \ No newline at end of file +* `clone_url_ssh` - The URL to use for cloning the repository over SSH. + +## Import + +Codecommit repository can be imported using repository name, e.g. + +``` +$ terraform import aws_codecommit_repository.imported ExistingRepo +``` From a7287174d81de7aa526541baf66b4caa23f84df7 Mon Sep 17 00:00:00 2001 From: VEBER Arnaud Date: Tue, 9 Jan 2018 10:55:58 +0100 Subject: [PATCH 128/350] chore(vendor): bump aws-sdk-go to v1.12.57 --- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/codedeploy/api.go | 737 ++++++++++++++- .../aws/aws-sdk-go/service/codedeploy/doc.go | 42 +- .../aws-sdk-go/service/codedeploy/errors.go | 83 ++ .../aws/aws-sdk-go/service/route53/api.go | 10 + .../aws/aws-sdk-go/service/route53/errors.go | 13 + vendor/vendor.json | 846 +++++++++--------- 7 files changed, 1277 insertions(+), 456 deletions(-) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 9a1ae5b23fe..f040f8cbada 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.12.56" +const SDKVersion = "1.12.57" diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go index 6e281cb133e..838f695ecde 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go @@ -71,6 +71,9 @@ func (c *CodeDeploy) AddTagsToOnPremisesInstancesRequest(input *AddTagsToOnPremi // * ErrCodeInstanceNameRequiredException "InstanceNameRequiredException" // An on-premises instance name was not specified. // +// * ErrCodeInvalidInstanceNameException "InvalidInstanceNameException" +// The specified on-premises instance name was specified in an invalid format. +// // * ErrCodeTagRequiredException "TagRequiredException" // A tag was not specified. // @@ -818,6 +821,9 @@ func (c *CodeDeploy) CreateApplicationRequest(input *CreateApplicationInput) (re // * ErrCodeApplicationLimitExceededException "ApplicationLimitExceededException" // More applications were attempted to be created than are allowed. // +// * ErrCodeInvalidComputePlatformException "InvalidComputePlatformException" +// The computePlatform is invalid. The computePlatform should be Lambda or Server. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/CreateApplication func (c *CodeDeploy) CreateApplication(input *CreateApplicationInput) (*CreateApplicationOutput, error) { req, out := c.CreateApplicationRequest(input) @@ -961,6 +967,25 @@ func (c *CodeDeploy) CreateDeploymentRequest(input *CreateDeploymentInput) (req // but weren't part of the previous successful deployment. Valid values include // "DISALLOW", "OVERWRITE", and "RETAIN". // +// * ErrCodeInvalidRoleException "InvalidRoleException" +// The service role ARN was specified in an invalid format. Or, if an Auto Scaling +// group was specified, the specified service role does not grant the appropriate +// permissions to Auto Scaling. +// +// * ErrCodeInvalidAutoScalingGroupException "InvalidAutoScalingGroupException" +// The Auto Scaling group was specified in an invalid format or does not exist. +// +// * ErrCodeThrottlingException "ThrottlingException" +// An API function was called too frequently. +// +// * ErrCodeInvalidUpdateOutdatedInstancesOnlyValueException "InvalidUpdateOutdatedInstancesOnlyValueException" +// The UpdateOutdatedInstancesOnly value is invalid. For AWS Lambda deployments, +// false is expected. For EC2/On-premises deployments, true or false is expected. +// +// * ErrCodeInvalidIgnoreApplicationStopFailuresValueException "InvalidIgnoreApplicationStopFailuresValueException" +// The IgnoreApplicationStopFailures value is invalid. For AWS Lambda deployments, +// false is expected. For EC2/On-premises deployments, true or false is expected. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/CreateDeployment func (c *CodeDeploy) CreateDeployment(input *CreateDeploymentInput) (*CreateDeploymentOutput, error) { req, out := c.CreateDeploymentRequest(input) @@ -1053,6 +1078,13 @@ func (c *CodeDeploy) CreateDeploymentConfigRequest(input *CreateDeploymentConfig // * ErrCodeDeploymentConfigLimitExceededException "DeploymentConfigLimitExceededException" // The deployment configurations limit was exceeded. // +// * ErrCodeInvalidComputePlatformException "InvalidComputePlatformException" +// The computePlatform is invalid. The computePlatform should be Lambda or Server. +// +// * ErrCodeInvalidTrafficRoutingConfigurationException "InvalidTrafficRoutingConfigurationException" +// The configuration that specifies how traffic is routed during a deployment +// is invalid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/CreateDeploymentConfig func (c *CodeDeploy) CreateDeploymentConfig(input *CreateDeploymentConfigInput) (*CreateDeploymentConfigOutput, error) { req, out := c.CreateDeploymentConfigRequest(input) @@ -1230,6 +1262,9 @@ func (c *CodeDeploy) CreateDeploymentGroupRequest(input *CreateDeploymentGroupIn // The number of tag groups included in the tag set list exceeded the maximum // allowed limit of 3. // +// * ErrCodeInvalidInputException "InvalidInputException" +// The specified input was specified in an invalid format. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/CreateDeploymentGroup func (c *CodeDeploy) CreateDeploymentGroup(input *CreateDeploymentGroupInput) (*CreateDeploymentGroupOutput, error) { req, out := c.CreateDeploymentGroupRequest(input) @@ -1522,6 +1557,97 @@ func (c *CodeDeploy) DeleteDeploymentGroupWithContext(ctx aws.Context, input *De return out, req.Send() } +const opDeleteGitHubAccountToken = "DeleteGitHubAccountToken" + +// DeleteGitHubAccountTokenRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGitHubAccountToken operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteGitHubAccountToken for more information on using the DeleteGitHubAccountToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteGitHubAccountTokenRequest method. +// req, resp := client.DeleteGitHubAccountTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/DeleteGitHubAccountToken +func (c *CodeDeploy) DeleteGitHubAccountTokenRequest(input *DeleteGitHubAccountTokenInput) (req *request.Request, output *DeleteGitHubAccountTokenOutput) { + op := &request.Operation{ + Name: opDeleteGitHubAccountToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGitHubAccountTokenInput{} + } + + output = &DeleteGitHubAccountTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteGitHubAccountToken API operation for AWS CodeDeploy. +// +// Deletes a GitHub account connection. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeDeploy's +// API operation DeleteGitHubAccountToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeGitHubAccountTokenNameRequiredException "GitHubAccountTokenNameRequiredException" +// The call is missing a required GitHub account connection name. +// +// * ErrCodeGitHubAccountTokenDoesNotExistException "GitHubAccountTokenDoesNotExistException" +// No GitHub account connection exists with the named specified in the call. +// +// * ErrCodeInvalidGitHubAccountTokenNameException "InvalidGitHubAccountTokenNameException" +// The format of the specified GitHub account connection name is invalid. +// +// * ErrCodeResourceValidationException "ResourceValidationException" +// The specified resource could not be validated. +// +// * ErrCodeOperationNotSupportedException "OperationNotSupportedException" +// The API used does not support the deployment. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/DeleteGitHubAccountToken +func (c *CodeDeploy) DeleteGitHubAccountToken(input *DeleteGitHubAccountTokenInput) (*DeleteGitHubAccountTokenOutput, error) { + req, out := c.DeleteGitHubAccountTokenRequest(input) + return out, req.Send() +} + +// DeleteGitHubAccountTokenWithContext is the same as DeleteGitHubAccountToken with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteGitHubAccountToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeDeploy) DeleteGitHubAccountTokenWithContext(ctx aws.Context, input *DeleteGitHubAccountTokenInput, opts ...request.Option) (*DeleteGitHubAccountTokenOutput, error) { + req, out := c.DeleteGitHubAccountTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeregisterOnPremisesInstance = "DeregisterOnPremisesInstance" // DeregisterOnPremisesInstanceRequest generates a "aws/request.Request" representing the @@ -3189,6 +3315,9 @@ func (c *CodeDeploy) ListGitHubAccountTokenNamesRequest(input *ListGitHubAccount // * ErrCodeResourceValidationException "ResourceValidationException" // The specified resource could not be validated. // +// * ErrCodeOperationNotSupportedException "OperationNotSupportedException" +// The API used does not support the deployment. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/ListGitHubAccountTokenNames func (c *CodeDeploy) ListGitHubAccountTokenNames(input *ListGitHubAccountTokenNamesInput) (*ListGitHubAccountTokenNamesOutput, error) { req, out := c.ListGitHubAccountTokenNamesRequest(input) @@ -3300,6 +3429,107 @@ func (c *CodeDeploy) ListOnPremisesInstancesWithContext(ctx aws.Context, input * return out, req.Send() } +const opPutLifecycleEventHookExecutionStatus = "PutLifecycleEventHookExecutionStatus" + +// PutLifecycleEventHookExecutionStatusRequest generates a "aws/request.Request" representing the +// client's request for the PutLifecycleEventHookExecutionStatus operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutLifecycleEventHookExecutionStatus for more information on using the PutLifecycleEventHookExecutionStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutLifecycleEventHookExecutionStatusRequest method. +// req, resp := client.PutLifecycleEventHookExecutionStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/PutLifecycleEventHookExecutionStatus +func (c *CodeDeploy) PutLifecycleEventHookExecutionStatusRequest(input *PutLifecycleEventHookExecutionStatusInput) (req *request.Request, output *PutLifecycleEventHookExecutionStatusOutput) { + op := &request.Operation{ + Name: opPutLifecycleEventHookExecutionStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutLifecycleEventHookExecutionStatusInput{} + } + + output = &PutLifecycleEventHookExecutionStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutLifecycleEventHookExecutionStatus API operation for AWS CodeDeploy. +// +// Sets the result of a Lambda validation function. The function validates one +// or both lifecycle events (BeforeAllowTraffic and AfterAllowTraffic) and returns +// Succeeded or Failed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeDeploy's +// API operation PutLifecycleEventHookExecutionStatus for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidLifecycleEventHookExecutionStatusException "InvalidLifecycleEventHookExecutionStatusException" +// The result of a Lambda validation function that verifies a lifecycle event +// is invalid. It should return Succeeded or Failed. +// +// * ErrCodeInvalidLifecycleEventHookExecutionIdException "InvalidLifecycleEventHookExecutionIdException" +// A lifecycle event hook is invalid. Review the hooks section in your AppSpec +// file to ensure the lifecycle events and hooks functions are valid. +// +// * ErrCodeLifecycleEventAlreadyCompletedException "LifecycleEventAlreadyCompletedException" +// An attempt to return the status of an already completed lifecycle event occurred. +// +// * ErrCodeDeploymentIdRequiredException "DeploymentIdRequiredException" +// At least one deployment ID must be specified. +// +// * ErrCodeDeploymentDoesNotExistException "DeploymentDoesNotExistException" +// The deployment does not exist with the applicable IAM user or AWS account. +// +// * ErrCodeInvalidDeploymentIdException "InvalidDeploymentIdException" +// At least one of the deployment IDs was specified in an invalid format. +// +// * ErrCodeUnsupportedActionForDeploymentTypeException "UnsupportedActionForDeploymentTypeException" +// A call was submitted that is not supported for the specified deployment type. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/PutLifecycleEventHookExecutionStatus +func (c *CodeDeploy) PutLifecycleEventHookExecutionStatus(input *PutLifecycleEventHookExecutionStatusInput) (*PutLifecycleEventHookExecutionStatusOutput, error) { + req, out := c.PutLifecycleEventHookExecutionStatusRequest(input) + return out, req.Send() +} + +// PutLifecycleEventHookExecutionStatusWithContext is the same as PutLifecycleEventHookExecutionStatus with the addition of +// the ability to pass a context and additional request options. +// +// See PutLifecycleEventHookExecutionStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeDeploy) PutLifecycleEventHookExecutionStatusWithContext(ctx aws.Context, input *PutLifecycleEventHookExecutionStatusInput, opts ...request.Option) (*PutLifecycleEventHookExecutionStatusOutput, error) { + req, out := c.PutLifecycleEventHookExecutionStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRegisterApplicationRevision = "RegisterApplicationRevision" // RegisterApplicationRevisionRequest generates a "aws/request.Request" representing the @@ -3569,6 +3799,9 @@ func (c *CodeDeploy) RemoveTagsFromOnPremisesInstancesRequest(input *RemoveTagsF // * ErrCodeInstanceNameRequiredException "InstanceNameRequiredException" // An on-premises instance name was not specified. // +// * ErrCodeInvalidInstanceNameException "InvalidInstanceNameException" +// The specified on-premises instance name was specified in an invalid format. +// // * ErrCodeTagRequiredException "TagRequiredException" // A tag was not specified. // @@ -4036,6 +4269,9 @@ func (c *CodeDeploy) UpdateDeploymentGroupRequest(input *UpdateDeploymentGroupIn // The number of tag groups included in the tag set list exceeded the maximum // allowed limit of 3. // +// * ErrCodeInvalidInputException "InvalidInputException" +// The specified input was specified in an invalid format. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/UpdateDeploymentGroup func (c *CodeDeploy) UpdateDeploymentGroup(input *UpdateDeploymentGroupInput) (*UpdateDeploymentGroupOutput, error) { req, out := c.UpdateDeploymentGroupRequest(input) @@ -4219,6 +4455,10 @@ type ApplicationInfo struct { // The application name. ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + // The destination platform type for deployment of the application (Lambda or + // Server). + ComputePlatform *string `locationName:"computePlatform" type:"string" enum:"ComputePlatform"` + // The time at which the application was created. CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` @@ -4252,6 +4492,12 @@ func (s *ApplicationInfo) SetApplicationName(v string) *ApplicationInfo { return s } +// SetComputePlatform sets the ComputePlatform field's value. +func (s *ApplicationInfo) SetComputePlatform(v string) *ApplicationInfo { + s.ComputePlatform = &v + return s +} + // SetCreateTime sets the CreateTime field's value. func (s *ApplicationInfo) SetCreateTime(v time.Time) *ApplicationInfo { s.CreateTime = &v @@ -4446,7 +4692,9 @@ type BatchGetApplicationsInput struct { _ struct{} `type:"structure"` // A list of application names separated by spaces. - ApplicationNames []*string `locationName:"applicationNames" type:"list"` + // + // ApplicationNames is a required field + ApplicationNames []*string `locationName:"applicationNames" type:"list" required:"true"` } // String returns the string representation @@ -4459,6 +4707,19 @@ func (s BatchGetApplicationsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetApplicationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetApplicationsInput"} + if s.ApplicationNames == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetApplicationNames sets the ApplicationNames field's value. func (s *BatchGetApplicationsInput) SetApplicationNames(v []*string) *BatchGetApplicationsInput { s.ApplicationNames = v @@ -4676,7 +4937,9 @@ type BatchGetDeploymentsInput struct { _ struct{} `type:"structure"` // A list of deployment IDs, separated by spaces. - DeploymentIds []*string `locationName:"deploymentIds" type:"list"` + // + // DeploymentIds is a required field + DeploymentIds []*string `locationName:"deploymentIds" type:"list" required:"true"` } // String returns the string representation @@ -4689,6 +4952,19 @@ func (s BatchGetDeploymentsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetDeploymentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetDeploymentsInput"} + if s.DeploymentIds == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDeploymentIds sets the DeploymentIds field's value. func (s *BatchGetDeploymentsInput) SetDeploymentIds(v []*string) *BatchGetDeploymentsInput { s.DeploymentIds = v @@ -4726,7 +5002,9 @@ type BatchGetOnPremisesInstancesInput struct { _ struct{} `type:"structure"` // The names of the on-premises instances about which to get information. - InstanceNames []*string `locationName:"instanceNames" type:"list"` + // + // InstanceNames is a required field + InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` } // String returns the string representation @@ -4739,6 +5017,19 @@ func (s BatchGetOnPremisesInstancesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetOnPremisesInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetOnPremisesInstancesInput"} + if s.InstanceNames == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetInstanceNames sets the InstanceNames field's value. func (s *BatchGetOnPremisesInstancesInput) SetInstanceNames(v []*string) *BatchGetOnPremisesInstancesInput { s.InstanceNames = v @@ -4908,6 +5199,9 @@ type CreateApplicationInput struct { // // ApplicationName is a required field ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The destination platform type for the deployment (Lambda or Server). + ComputePlatform *string `locationName:"computePlatform" type:"string" enum:"ComputePlatform"` } // String returns the string representation @@ -4942,6 +5236,12 @@ func (s *CreateApplicationInput) SetApplicationName(v string) *CreateApplication return s } +// SetComputePlatform sets the ComputePlatform field's value. +func (s *CreateApplicationInput) SetComputePlatform(v string) *CreateApplicationInput { + s.ComputePlatform = &v + return s +} + // Represents the output of a CreateApplication operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/CreateApplicationOutput type CreateApplicationOutput struct { @@ -4972,6 +5272,9 @@ func (s *CreateApplicationOutput) SetApplicationId(v string) *CreateApplicationO type CreateDeploymentConfigInput struct { _ struct{} `type:"structure"` + // The destination platform type for the deployment (Lambda or Server>). + ComputePlatform *string `locationName:"computePlatform" type:"string" enum:"ComputePlatform"` + // The name of the deployment configuration to create. // // DeploymentConfigName is a required field @@ -4996,9 +5299,10 @@ type CreateDeploymentConfigInput struct { // // For example, to set a minimum of 95% healthy instance, specify a type of // FLEET_PERCENT and a value of 95. - // - // MinimumHealthyHosts is a required field - MinimumHealthyHosts *MinimumHealthyHosts `locationName:"minimumHealthyHosts" type:"structure" required:"true"` + MinimumHealthyHosts *MinimumHealthyHosts `locationName:"minimumHealthyHosts" type:"structure"` + + // The configuration that specifies how the deployment traffic will be routed. + TrafficRoutingConfig *TrafficRoutingConfig `locationName:"trafficRoutingConfig" type:"structure"` } // String returns the string representation @@ -5020,9 +5324,6 @@ func (s *CreateDeploymentConfigInput) Validate() error { if s.DeploymentConfigName != nil && len(*s.DeploymentConfigName) < 1 { invalidParams.Add(request.NewErrParamMinLen("DeploymentConfigName", 1)) } - if s.MinimumHealthyHosts == nil { - invalidParams.Add(request.NewErrParamRequired("MinimumHealthyHosts")) - } if invalidParams.Len() > 0 { return invalidParams @@ -5030,6 +5331,12 @@ func (s *CreateDeploymentConfigInput) Validate() error { return nil } +// SetComputePlatform sets the ComputePlatform field's value. +func (s *CreateDeploymentConfigInput) SetComputePlatform(v string) *CreateDeploymentConfigInput { + s.ComputePlatform = &v + return s +} + // SetDeploymentConfigName sets the DeploymentConfigName field's value. func (s *CreateDeploymentConfigInput) SetDeploymentConfigName(v string) *CreateDeploymentConfigInput { s.DeploymentConfigName = &v @@ -5042,6 +5349,12 @@ func (s *CreateDeploymentConfigInput) SetMinimumHealthyHosts(v *MinimumHealthyHo return s } +// SetTrafficRoutingConfig sets the TrafficRoutingConfig field's value. +func (s *CreateDeploymentConfigInput) SetTrafficRoutingConfig(v *TrafficRoutingConfig) *CreateDeploymentConfigInput { + s.TrafficRoutingConfig = v + return s +} + // Represents the output of a CreateDeploymentConfig operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/CreateDeploymentConfigOutput type CreateDeploymentConfigOutput struct { @@ -5697,11 +6010,64 @@ func (s *DeleteDeploymentGroupOutput) SetHooksNotCleanedUp(v []*AutoScalingGroup return s } +// Represents the input of a DeleteGitHubAccount operation. +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/DeleteGitHubAccountTokenInput +type DeleteGitHubAccountTokenInput struct { + _ struct{} `type:"structure"` + + // The name of the GitHub account connection to delete. + TokenName *string `locationName:"tokenName" type:"string"` +} + +// String returns the string representation +func (s DeleteGitHubAccountTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGitHubAccountTokenInput) GoString() string { + return s.String() +} + +// SetTokenName sets the TokenName field's value. +func (s *DeleteGitHubAccountTokenInput) SetTokenName(v string) *DeleteGitHubAccountTokenInput { + s.TokenName = &v + return s +} + +// Represents the output of a DeleteGitHubAccountToken operation. +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/DeleteGitHubAccountTokenOutput +type DeleteGitHubAccountTokenOutput struct { + _ struct{} `type:"structure"` + + // The name of the GitHub account connection that was deleted. + TokenName *string `locationName:"tokenName" type:"string"` +} + +// String returns the string representation +func (s DeleteGitHubAccountTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGitHubAccountTokenOutput) GoString() string { + return s.String() +} + +// SetTokenName sets the TokenName field's value. +func (s *DeleteGitHubAccountTokenOutput) SetTokenName(v string) *DeleteGitHubAccountTokenOutput { + s.TokenName = &v + return s +} + // Information about a deployment configuration. // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/DeploymentConfigInfo type DeploymentConfigInfo struct { _ struct{} `type:"structure"` + // The destination platform type for the deployment (Lambda or Server). + ComputePlatform *string `locationName:"computePlatform" type:"string" enum:"ComputePlatform"` + // The time at which the deployment configuration was created. CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` @@ -5713,6 +6079,10 @@ type DeploymentConfigInfo struct { // Information about the number or percentage of minimum healthy instance. MinimumHealthyHosts *MinimumHealthyHosts `locationName:"minimumHealthyHosts" type:"structure"` + + // The configuration specifying how the deployment traffic will be routed. Only + // deployments with a Lambda compute platform can specify this. + TrafficRoutingConfig *TrafficRoutingConfig `locationName:"trafficRoutingConfig" type:"structure"` } // String returns the string representation @@ -5725,6 +6095,12 @@ func (s DeploymentConfigInfo) GoString() string { return s.String() } +// SetComputePlatform sets the ComputePlatform field's value. +func (s *DeploymentConfigInfo) SetComputePlatform(v string) *DeploymentConfigInfo { + s.ComputePlatform = &v + return s +} + // SetCreateTime sets the CreateTime field's value. func (s *DeploymentConfigInfo) SetCreateTime(v time.Time) *DeploymentConfigInfo { s.CreateTime = &v @@ -5749,6 +6125,12 @@ func (s *DeploymentConfigInfo) SetMinimumHealthyHosts(v *MinimumHealthyHosts) *D return s } +// SetTrafficRoutingConfig sets the TrafficRoutingConfig field's value. +func (s *DeploymentConfigInfo) SetTrafficRoutingConfig(v *TrafficRoutingConfig) *DeploymentConfigInfo { + s.TrafficRoutingConfig = v + return s +} + // Information about a deployment group. // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/DeploymentGroupInfo type DeploymentGroupInfo struct { @@ -5770,6 +6152,9 @@ type DeploymentGroupInfo struct { // Information about blue/green deployment options for a deployment group. BlueGreenDeploymentConfiguration *BlueGreenDeploymentConfiguration `locationName:"blueGreenDeploymentConfiguration" type:"structure"` + // The destination platform type for the deployment group (Lambda or Server). + ComputePlatform *string `locationName:"computePlatform" type:"string" enum:"ComputePlatform"` + // The deployment configuration name. DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` @@ -5863,6 +6248,12 @@ func (s *DeploymentGroupInfo) SetBlueGreenDeploymentConfiguration(v *BlueGreenDe return s } +// SetComputePlatform sets the ComputePlatform field's value. +func (s *DeploymentGroupInfo) SetComputePlatform(v string) *DeploymentGroupInfo { + s.ComputePlatform = &v + return s +} + // SetDeploymentConfigName sets the DeploymentConfigName field's value. func (s *DeploymentGroupInfo) SetDeploymentConfigName(v string) *DeploymentGroupInfo { s.DeploymentConfigName = &v @@ -5954,7 +6345,7 @@ type DeploymentInfo struct { // Provides information about the results of a deployment, such as whether instances // in the original environment in a blue/green deployment were not terminated. - AdditionalDeploymentStatusInfo *string `locationName:"additionalDeploymentStatusInfo" type:"string"` + AdditionalDeploymentStatusInfo *string `locationName:"additionalDeploymentStatusInfo" deprecated:"true" type:"string"` // The application name. ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` @@ -5969,6 +6360,9 @@ type DeploymentInfo struct { // A timestamp indicating when the deployment was complete. CompleteTime *time.Time `locationName:"completeTime" type:"timestamp" timestampFormat:"unix"` + // The destination platform type for the deployment (Lambda or Server). + ComputePlatform *string `locationName:"computePlatform" type:"string" enum:"ComputePlatform"` + // A timestamp indicating when the deployment was created. CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` @@ -5993,6 +6387,9 @@ type DeploymentInfo struct { // A summary of the deployment status of the instances in the deployment. DeploymentOverview *DeploymentOverview `locationName:"deploymentOverview" type:"structure"` + // Messages that contain information about the status of a deployment. + DeploymentStatusMessages []*string `locationName:"deploymentStatusMessages" type:"list"` + // Information about the type of deployment, either in-place or blue/green, // you want to run and whether to route deployment traffic behind a load balancer. DeploymentStyle *DeploymentStyle `locationName:"deploymentStyle" type:"structure"` @@ -6108,6 +6505,12 @@ func (s *DeploymentInfo) SetCompleteTime(v time.Time) *DeploymentInfo { return s } +// SetComputePlatform sets the ComputePlatform field's value. +func (s *DeploymentInfo) SetComputePlatform(v string) *DeploymentInfo { + s.ComputePlatform = &v + return s +} + // SetCreateTime sets the CreateTime field's value. func (s *DeploymentInfo) SetCreateTime(v time.Time) *DeploymentInfo { s.CreateTime = &v @@ -6144,6 +6547,12 @@ func (s *DeploymentInfo) SetDeploymentOverview(v *DeploymentOverview) *Deploymen return s } +// SetDeploymentStatusMessages sets the DeploymentStatusMessages field's value. +func (s *DeploymentInfo) SetDeploymentStatusMessages(v []*string) *DeploymentInfo { + s.DeploymentStatusMessages = v + return s +} + // SetDeploymentStyle sets the DeploymentStyle field's value. func (s *DeploymentInfo) SetDeploymentStyle(v *DeploymentStyle) *DeploymentInfo { s.DeploymentStyle = v @@ -6592,7 +7001,7 @@ type ELBInfo struct { // For blue/green deployments, the name of the load balancer that will be used // to route traffic from original instances to replacement instances in a blue/green // deployment. For in-place deployments, the name of the load balancer that - // instances are deregistered from, so they are not serving traffic during a + // instances are deregistered from so they are not serving traffic during a // deployment, and then re-registered with after the deployment completes. Name *string `locationName:"name" type:"string"` } @@ -8554,6 +8963,114 @@ func (s *OnPremisesTagSet) SetOnPremisesTagSetList(v [][]*TagFilter) *OnPremises return s } +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/PutLifecycleEventHookExecutionStatusInput +type PutLifecycleEventHookExecutionStatusInput struct { + _ struct{} `type:"structure"` + + // The ID of the deployment. Pass this ID to a Lambda function that validates + // a deployment lifecycle event. + DeploymentId *string `locationName:"deploymentId" type:"string"` + + // The execution ID of a deployment's lifecycle hook. A deployment lifecycle + // hook is specified in the hooks section of the AppSpec file. + LifecycleEventHookExecutionId *string `locationName:"lifecycleEventHookExecutionId" type:"string"` + + // The result of a Lambda function that validates a deployment lifecycle event + // (Succeeded or Failed). + Status *string `locationName:"status" type:"string" enum:"LifecycleEventStatus"` +} + +// String returns the string representation +func (s PutLifecycleEventHookExecutionStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecycleEventHookExecutionStatusInput) GoString() string { + return s.String() +} + +// SetDeploymentId sets the DeploymentId field's value. +func (s *PutLifecycleEventHookExecutionStatusInput) SetDeploymentId(v string) *PutLifecycleEventHookExecutionStatusInput { + s.DeploymentId = &v + return s +} + +// SetLifecycleEventHookExecutionId sets the LifecycleEventHookExecutionId field's value. +func (s *PutLifecycleEventHookExecutionStatusInput) SetLifecycleEventHookExecutionId(v string) *PutLifecycleEventHookExecutionStatusInput { + s.LifecycleEventHookExecutionId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *PutLifecycleEventHookExecutionStatusInput) SetStatus(v string) *PutLifecycleEventHookExecutionStatusInput { + s.Status = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/PutLifecycleEventHookExecutionStatusOutput +type PutLifecycleEventHookExecutionStatusOutput struct { + _ struct{} `type:"structure"` + + // The execution ID of the lifecycle event hook. A hook is specified in the + // hooks section of the deployment's AppSpec file. + LifecycleEventHookExecutionId *string `locationName:"lifecycleEventHookExecutionId" type:"string"` +} + +// String returns the string representation +func (s PutLifecycleEventHookExecutionStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecycleEventHookExecutionStatusOutput) GoString() string { + return s.String() +} + +// SetLifecycleEventHookExecutionId sets the LifecycleEventHookExecutionId field's value. +func (s *PutLifecycleEventHookExecutionStatusOutput) SetLifecycleEventHookExecutionId(v string) *PutLifecycleEventHookExecutionStatusOutput { + s.LifecycleEventHookExecutionId = &v + return s +} + +// A revision for an AWS Lambda deployment that is a YAML-formatted or JSON-formatted +// string. For AWS Lambda deployments, the revision is the same as the AppSpec +// file. +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/RawString +type RawString struct { + _ struct{} `type:"structure"` + + // The YAML-formatted or JSON-formatted revision string. It includes information + // about which Lambda function to update and optional Lambda functions that + // validate deployment lifecycle events. + Content *string `locationName:"content" type:"string"` + + // The SHA256 hash value of the revision that is specified as a RawString. + Sha256 *string `locationName:"sha256" type:"string"` +} + +// String returns the string representation +func (s RawString) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RawString) GoString() string { + return s.String() +} + +// SetContent sets the Content field's value. +func (s *RawString) SetContent(v string) *RawString { + s.Content = &v + return s +} + +// SetSha256 sets the Sha256 field's value. +func (s *RawString) SetSha256(v string) *RawString { + s.Sha256 = &v + return s +} + // Represents the input of a RegisterApplicationRevision operation. // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/RegisterApplicationRevisionInput type RegisterApplicationRevisionInput struct { @@ -8826,12 +9343,19 @@ type RevisionLocation struct { // // * S3: An application revision stored in Amazon S3. // - // * GitHub: An application revision stored in GitHub. + // * GitHub: An application revision stored in GitHub (EC2/On-premises deployments + // only) + // + // * String: A YAML-formatted or JSON-formatted string (AWS Lambda deployments + // only) RevisionType *string `locationName:"revisionType" type:"string" enum:"RevisionLocationType"` - // Information about the location of application artifacts stored in Amazon - // S3. + // Information about the location of a revision stored in Amazon S3. S3Location *S3Location `locationName:"s3Location" type:"structure"` + + // Information about the location of an AWS Lambda deployment revision stored + // as a RawString. + String_ *RawString `locationName:"string" type:"structure"` } // String returns the string representation @@ -8862,6 +9386,12 @@ func (s *RevisionLocation) SetS3Location(v *S3Location) *RevisionLocation { return s } +// SetString_ sets the String_ field's value. +func (s *RevisionLocation) SetString_(v *RawString) *RevisionLocation { + s.String_ = v + return s +} + // Information about a deployment rollback. // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/RollbackInfo type RollbackInfo struct { @@ -9276,6 +9806,83 @@ func (s *TargetInstances) SetTagFilters(v []*EC2TagFilter) *TargetInstances { return s } +// A configuration that shifts traffic from one version of a Lambda function +// to another in two increments. The original and target Lambda function versions +// are specified in the deployment's AppSpec file. +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/TimeBasedCanary +type TimeBasedCanary struct { + _ struct{} `type:"structure"` + + // The number of minutes between the first and second traffic shifts of a TimeBasedCanary + // deployment. + CanaryInterval *int64 `locationName:"canaryInterval" type:"integer"` + + // The percentage of traffic to shift in the first increment of a TimeBasedCanary + // deployment. + CanaryPercentage *int64 `locationName:"canaryPercentage" type:"integer"` +} + +// String returns the string representation +func (s TimeBasedCanary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeBasedCanary) GoString() string { + return s.String() +} + +// SetCanaryInterval sets the CanaryInterval field's value. +func (s *TimeBasedCanary) SetCanaryInterval(v int64) *TimeBasedCanary { + s.CanaryInterval = &v + return s +} + +// SetCanaryPercentage sets the CanaryPercentage field's value. +func (s *TimeBasedCanary) SetCanaryPercentage(v int64) *TimeBasedCanary { + s.CanaryPercentage = &v + return s +} + +// A configuration that shifts traffic from one version of a Lambda function +// to another in equal increments, with an equal number of minutes between each +// increment. The original and target Lambda function versions are specified +// in the deployment's AppSpec file. +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/TimeBasedLinear +type TimeBasedLinear struct { + _ struct{} `type:"structure"` + + // The number of minutes between each incremental traffic shift of a TimeBasedLinear + // deployment. + LinearInterval *int64 `locationName:"linearInterval" type:"integer"` + + // The percentage of traffic that is shifted at the start of each increment + // of a TimeBasedLinear deployment. + LinearPercentage *int64 `locationName:"linearPercentage" type:"integer"` +} + +// String returns the string representation +func (s TimeBasedLinear) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeBasedLinear) GoString() string { + return s.String() +} + +// SetLinearInterval sets the LinearInterval field's value. +func (s *TimeBasedLinear) SetLinearInterval(v int64) *TimeBasedLinear { + s.LinearInterval = &v + return s +} + +// SetLinearPercentage sets the LinearPercentage field's value. +func (s *TimeBasedLinear) SetLinearPercentage(v int64) *TimeBasedLinear { + s.LinearPercentage = &v + return s +} + // Information about a time range. // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/TimeRange type TimeRange struct { @@ -9314,6 +9921,56 @@ func (s *TimeRange) SetStart(v time.Time) *TimeRange { return s } +// The configuration that specifies how traffic is shifted from one version +// of a Lambda function to another version during an AWS Lambda deployment. +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/TrafficRoutingConfig +type TrafficRoutingConfig struct { + _ struct{} `type:"structure"` + + // A configuration that shifts traffic from one version of a Lambda function + // to another in two increments. The original and target Lambda function versions + // are specified in the deployment's AppSpec file. + TimeBasedCanary *TimeBasedCanary `locationName:"timeBasedCanary" type:"structure"` + + // A configuration that shifts traffic from one version of a Lambda function + // to another in equal increments, with an equal number of minutes between each + // increment. The original and target Lambda function versions are specified + // in the deployment's AppSpec file. + TimeBasedLinear *TimeBasedLinear `locationName:"timeBasedLinear" type:"structure"` + + // The type of traffic shifting (TimeBasedCanary or TimeBasedLinear) used by + // a deployment configuration . + Type *string `locationName:"type" type:"string" enum:"TrafficRoutingType"` +} + +// String returns the string representation +func (s TrafficRoutingConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficRoutingConfig) GoString() string { + return s.String() +} + +// SetTimeBasedCanary sets the TimeBasedCanary field's value. +func (s *TrafficRoutingConfig) SetTimeBasedCanary(v *TimeBasedCanary) *TrafficRoutingConfig { + s.TimeBasedCanary = v + return s +} + +// SetTimeBasedLinear sets the TimeBasedLinear field's value. +func (s *TrafficRoutingConfig) SetTimeBasedLinear(v *TimeBasedLinear) *TrafficRoutingConfig { + s.TimeBasedLinear = v + return s +} + +// SetType sets the Type field's value. +func (s *TrafficRoutingConfig) SetType(v string) *TrafficRoutingConfig { + s.Type = &v + return s +} + // Information about notification triggers for the deployment group. // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/TriggerConfig type TriggerConfig struct { @@ -9690,6 +10347,20 @@ const ( // BundleTypeZip is a BundleType enum value BundleTypeZip = "zip" + + // BundleTypeYaml is a BundleType enum value + BundleTypeYaml = "YAML" + + // BundleTypeJson is a BundleType enum value + BundleTypeJson = "JSON" +) + +const ( + // ComputePlatformServer is a ComputePlatform enum value + ComputePlatformServer = "Server" + + // ComputePlatformLambda is a ComputePlatform enum value + ComputePlatformLambda = "Lambda" ) const ( @@ -9815,6 +10486,30 @@ const ( // ErrorCodeManualStop is a ErrorCode enum value ErrorCodeManualStop = "MANUAL_STOP" + + // ErrorCodeMissingBlueGreenDeploymentConfiguration is a ErrorCode enum value + ErrorCodeMissingBlueGreenDeploymentConfiguration = "MISSING_BLUE_GREEN_DEPLOYMENT_CONFIGURATION" + + // ErrorCodeMissingElbInformation is a ErrorCode enum value + ErrorCodeMissingElbInformation = "MISSING_ELB_INFORMATION" + + // ErrorCodeMissingGithubToken is a ErrorCode enum value + ErrorCodeMissingGithubToken = "MISSING_GITHUB_TOKEN" + + // ErrorCodeElasticLoadBalancingInvalid is a ErrorCode enum value + ErrorCodeElasticLoadBalancingInvalid = "ELASTIC_LOAD_BALANCING_INVALID" + + // ErrorCodeElbInvalidInstance is a ErrorCode enum value + ErrorCodeElbInvalidInstance = "ELB_INVALID_INSTANCE" + + // ErrorCodeInvalidLambdaConfiguration is a ErrorCode enum value + ErrorCodeInvalidLambdaConfiguration = "INVALID_LAMBDA_CONFIGURATION" + + // ErrorCodeInvalidLambdaFunction is a ErrorCode enum value + ErrorCodeInvalidLambdaFunction = "INVALID_LAMBDA_FUNCTION" + + // ErrorCodeHookExecutionFailure is a ErrorCode enum value + ErrorCodeHookExecutionFailure = "HOOK_EXECUTION_FAILURE" ) const ( @@ -9948,6 +10643,9 @@ const ( // RevisionLocationTypeGitHub is a RevisionLocationType enum value RevisionLocationTypeGitHub = "GitHub" + + // RevisionLocationTypeString is a RevisionLocationType enum value + RevisionLocationTypeString = "String" ) const ( @@ -9977,6 +10675,17 @@ const ( TagFilterTypeKeyAndValue = "KEY_AND_VALUE" ) +const ( + // TrafficRoutingTypeTimeBasedCanary is a TrafficRoutingType enum value + TrafficRoutingTypeTimeBasedCanary = "TimeBasedCanary" + + // TrafficRoutingTypeTimeBasedLinear is a TrafficRoutingType enum value + TrafficRoutingTypeTimeBasedLinear = "TimeBasedLinear" + + // TrafficRoutingTypeAllAtOnce is a TrafficRoutingType enum value + TrafficRoutingTypeAllAtOnce = "AllAtOnce" +) + const ( // TriggerEventTypeDeploymentStart is a TriggerEventType enum value TriggerEventTypeDeploymentStart = "DeploymentStart" diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/doc.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/doc.go index 1af87287251..48544140be8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/doc.go @@ -4,13 +4,15 @@ // requests to AWS CodeDeploy. // // AWS CodeDeploy is a deployment service that automates application deployments -// to Amazon EC2 instances or on-premises instances running in your own facility. +// to Amazon EC2 instances, on-premises instances running in your own facility, +// or serverless AWS Lambda functions. // // You can deploy a nearly unlimited variety of application content, such as -// code, web and configuration files, executables, packages, scripts, multimedia -// files, and so on. AWS CodeDeploy can deploy application content stored in -// Amazon S3 buckets, GitHub repositories, or Bitbucket repositories. You do -// not need to make changes to your existing code before you can use AWS CodeDeploy. +// an updated Lambda function, code, web and configuration files, executables, +// packages, scripts, multimedia files, and so on. AWS CodeDeploy can deploy +// application content stored in Amazon S3 buckets, GitHub repositories, or +// Bitbucket repositories. You do not need to make changes to your existing +// code before you can use AWS CodeDeploy. // // AWS CodeDeploy makes it easier for you to rapidly release new features, helps // you avoid downtime during application deployment, and handles the complexity @@ -27,26 +29,30 @@ // to ensure the correct combination of revision, deployment configuration, // and deployment group are referenced during a deployment. // -// * Deployment group: A set of individual instances. A deployment group -// contains individually tagged instances, Amazon EC2 instances in Auto Scaling -// groups, or both. +// * Deployment group: A set of individual instances or CodeDeploy Lambda +// applications. A Lambda deployment group contains a group of applications. +// An EC2/On-premises deployment group contains individually tagged instances, +// Amazon EC2 instances in Auto Scaling groups, or both. // // * Deployment configuration: A set of deployment rules and deployment success // and failure conditions used by AWS CodeDeploy during a deployment. // -// * Deployment: The process, and the components involved in the process, -// of installing content on one or more instances. +// * Deployment: The process and the components used in the process of updating +// a Lambda function or of installing content on one or more instances. // -// * Application revisions: An archive file containing source content—source -// code, web pages, executable files, and deployment scripts—along with an -// application specification file (AppSpec file). Revisions are stored in -// Amazon S3 buckets or GitHub repositories. For Amazon S3, a revision is -// uniquely identified by its Amazon S3 object key and its ETag, version, -// or both. For GitHub, a revision is uniquely identified by its commit ID. +// * Application revisions: For an AWS Lambda deployment, this is an AppSpec +// file that specifies the Lambda function to update and one or more functions +// to validate deployment lifecycle events. For an EC2/On-premises deployment, +// this is an archive file containing source content—source code, web pages, +// executable files, and deployment scripts—along with an AppSpec file. Revisions +// are stored in Amazon S3 buckets or GitHub repositories. For Amazon S3, +// a revision is uniquely identified by its Amazon S3 object key and its +// ETag, version, or both. For GitHub, a revision is uniquely identified +// by its commit ID. // // This guide also contains information to help you get details about the instances -// in your deployments and to make on-premises instances available for AWS CodeDeploy -// deployments. +// in your deployments, to make on-premises instances available for AWS CodeDeploy +// deployments, and to get details about a Lambda function deployment. // // AWS CodeDeploy Information Resources // diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/errors.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/errors.go index 1ff3ee3438c..963a57a533f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/errors.go @@ -147,6 +147,18 @@ const ( // The description is too long. ErrCodeDescriptionTooLongException = "DescriptionTooLongException" + // ErrCodeGitHubAccountTokenDoesNotExistException for service response error code + // "GitHubAccountTokenDoesNotExistException". + // + // No GitHub account connection exists with the named specified in the call. + ErrCodeGitHubAccountTokenDoesNotExistException = "GitHubAccountTokenDoesNotExistException" + + // ErrCodeGitHubAccountTokenNameRequiredException for service response error code + // "GitHubAccountTokenNameRequiredException". + // + // The call is missing a required GitHub account connection name. + ErrCodeGitHubAccountTokenNameRequiredException = "GitHubAccountTokenNameRequiredException" + // ErrCodeIamArnRequiredException for service response error code // "IamArnRequiredException". // @@ -260,6 +272,12 @@ const ( // The bucket name either doesn't exist or was specified in an invalid format. ErrCodeInvalidBucketNameFilterException = "InvalidBucketNameFilterException" + // ErrCodeInvalidComputePlatformException for service response error code + // "InvalidComputePlatformException". + // + // The computePlatform is invalid. The computePlatform should be Lambda or Server. + ErrCodeInvalidComputePlatformException = "InvalidComputePlatformException" + // ErrCodeInvalidDeployedStateFilterException for service response error code // "InvalidDeployedStateFilterException". // @@ -327,6 +345,12 @@ const ( // "DISALLOW", "OVERWRITE", and "RETAIN". ErrCodeInvalidFileExistsBehaviorException = "InvalidFileExistsBehaviorException" + // ErrCodeInvalidGitHubAccountTokenNameException for service response error code + // "InvalidGitHubAccountTokenNameException". + // + // The format of the specified GitHub account connection name is invalid. + ErrCodeInvalidGitHubAccountTokenNameException = "InvalidGitHubAccountTokenNameException" + // ErrCodeInvalidIamSessionArnException for service response error code // "InvalidIamSessionArnException". // @@ -339,6 +363,19 @@ const ( // The IAM user ARN was specified in an invalid format. ErrCodeInvalidIamUserArnException = "InvalidIamUserArnException" + // ErrCodeInvalidIgnoreApplicationStopFailuresValueException for service response error code + // "InvalidIgnoreApplicationStopFailuresValueException". + // + // The IgnoreApplicationStopFailures value is invalid. For AWS Lambda deployments, + // false is expected. For EC2/On-premises deployments, true or false is expected. + ErrCodeInvalidIgnoreApplicationStopFailuresValueException = "InvalidIgnoreApplicationStopFailuresValueException" + + // ErrCodeInvalidInputException for service response error code + // "InvalidInputException". + // + // The specified input was specified in an invalid format. + ErrCodeInvalidInputException = "InvalidInputException" + // ErrCodeInvalidInstanceNameException for service response error code // "InvalidInstanceNameException". // @@ -365,6 +402,20 @@ const ( // The specified key prefix filter was specified in an invalid format. ErrCodeInvalidKeyPrefixFilterException = "InvalidKeyPrefixFilterException" + // ErrCodeInvalidLifecycleEventHookExecutionIdException for service response error code + // "InvalidLifecycleEventHookExecutionIdException". + // + // A lifecycle event hook is invalid. Review the hooks section in your AppSpec + // file to ensure the lifecycle events and hooks functions are valid. + ErrCodeInvalidLifecycleEventHookExecutionIdException = "InvalidLifecycleEventHookExecutionIdException" + + // ErrCodeInvalidLifecycleEventHookExecutionStatusException for service response error code + // "InvalidLifecycleEventHookExecutionStatusException". + // + // The result of a Lambda validation function that verifies a lifecycle event + // is invalid. It should return Succeeded or Failed. + ErrCodeInvalidLifecycleEventHookExecutionStatusException = "InvalidLifecycleEventHookExecutionStatusException" + // ErrCodeInvalidLoadBalancerInfoException for service response error code // "InvalidLoadBalancerInfoException". // @@ -462,12 +513,32 @@ const ( // The specified time range was specified in an invalid format. ErrCodeInvalidTimeRangeException = "InvalidTimeRangeException" + // ErrCodeInvalidTrafficRoutingConfigurationException for service response error code + // "InvalidTrafficRoutingConfigurationException". + // + // The configuration that specifies how traffic is routed during a deployment + // is invalid. + ErrCodeInvalidTrafficRoutingConfigurationException = "InvalidTrafficRoutingConfigurationException" + // ErrCodeInvalidTriggerConfigException for service response error code // "InvalidTriggerConfigException". // // The trigger was specified in an invalid format. ErrCodeInvalidTriggerConfigException = "InvalidTriggerConfigException" + // ErrCodeInvalidUpdateOutdatedInstancesOnlyValueException for service response error code + // "InvalidUpdateOutdatedInstancesOnlyValueException". + // + // The UpdateOutdatedInstancesOnly value is invalid. For AWS Lambda deployments, + // false is expected. For EC2/On-premises deployments, true or false is expected. + ErrCodeInvalidUpdateOutdatedInstancesOnlyValueException = "InvalidUpdateOutdatedInstancesOnlyValueException" + + // ErrCodeLifecycleEventAlreadyCompletedException for service response error code + // "LifecycleEventAlreadyCompletedException". + // + // An attempt to return the status of an already completed lifecycle event occurred. + ErrCodeLifecycleEventAlreadyCompletedException = "LifecycleEventAlreadyCompletedException" + // ErrCodeLifecycleHookLimitExceededException for service response error code // "LifecycleHookLimitExceededException". // @@ -481,6 +552,12 @@ const ( // Use only one ARN type. ErrCodeMultipleIamArnsProvidedException = "MultipleIamArnsProvidedException" + // ErrCodeOperationNotSupportedException for service response error code + // "OperationNotSupportedException". + // + // The API used does not support the deployment. + ErrCodeOperationNotSupportedException = "OperationNotSupportedException" + // ErrCodeResourceValidationException for service response error code // "ResourceValidationException". // @@ -524,6 +601,12 @@ const ( // allowed limit of 3. ErrCodeTagSetListLimitExceededException = "TagSetListLimitExceededException" + // ErrCodeThrottlingException for service response error code + // "ThrottlingException". + // + // An API function was called too frequently. + ErrCodeThrottlingException = "ThrottlingException" + // ErrCodeTriggerTargetsLimitExceededException for service response error code // "TriggerTargetsLimitExceededException". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go index 5fafaa53e10..b4e6b0bdf82 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go @@ -1352,6 +1352,16 @@ func (c *Route53) CreateTrafficPolicyVersionRequest(input *CreateTrafficPolicyVe // * ErrCodeInvalidInput "InvalidInput" // The input is not valid. // +// * ErrCodeTooManyTrafficPolicyVersionsForCurrentPolicy "TooManyTrafficPolicyVersionsForCurrentPolicy" +// This traffic policy version can't be created because you've reached the limit +// of 1000 on the number of versions that you can create for the current traffic +// policy. +// +// To create more traffic policy versions, you can use GetTrafficPolicy to get +// the traffic policy document for a specified traffic policy version, and then +// use CreateTrafficPolicy to create a new traffic policy using the traffic +// policy document. +// // * ErrCodeConcurrentModification "ConcurrentModification" // Another user submitted a request to create, update, or delete the object // at the same time that you did. Retry the request. diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/errors.go b/vendor/github.com/aws/aws-sdk-go/service/route53/errors.go index 856039e95d7..d37e10cdebd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/errors.go @@ -379,6 +379,19 @@ const ( // with the AWS Support Center. ErrCodeTooManyTrafficPolicyInstances = "TooManyTrafficPolicyInstances" + // ErrCodeTooManyTrafficPolicyVersionsForCurrentPolicy for service response error code + // "TooManyTrafficPolicyVersionsForCurrentPolicy". + // + // This traffic policy version can't be created because you've reached the limit + // of 1000 on the number of versions that you can create for the current traffic + // policy. + // + // To create more traffic policy versions, you can use GetTrafficPolicy to get + // the traffic policy document for a specified traffic policy version, and then + // use CreateTrafficPolicy to create a new traffic policy using the traffic + // policy document. + ErrCodeTooManyTrafficPolicyVersionsForCurrentPolicy = "TooManyTrafficPolicyVersionsForCurrentPolicy" + // ErrCodeTooManyVPCAssociationAuthorizations for service response error code // "TooManyVPCAssociationAuthorizations". // diff --git a/vendor/vendor.json b/vendor/vendor.json index d30f270378b..d53ba0b466f 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -141,844 +141,844 @@ "revisionTime": "2017-07-27T15:54:43Z" }, { - "checksumSHA1": "LNHL71DHaVF6ZpRpHabRk1QJf2M=", + "checksumSHA1": "h8863Fok+80x0JWRr78XXcGywxM=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "DtuTqKH29YnLjrIJkRYX0HQtXY0=", "path": "github.com/aws/aws-sdk-go/aws/arn", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "9nE/FjZ4pYrT883KtV2/aI+Gayo=", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "7/8j/q0TWtOgXyvEcv4B2Dhl00o=", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "Y+cPwQL0dZMyqp3wI+KJWmA9KQ8=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "OnU/n7R33oYXiB4SAGd5pK7I0Bs=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "BT2+PhuOjbAuMcLpdop0FKQY5EY=", "path": "github.com/aws/aws-sdk-go/aws/endpoints", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "9GvAyILJ7g+VUg8Ef5DsT5GuYsg=", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "HcGL4e6Uep4/80eCUI5xkcWjpQ0=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "iU00ZjhAml/13g+1YXT21IqoXqg=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "04ypv4x12l4q0TksA1zEVsmgpvw=", "path": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "NStHCXEvYqG72GknZyv1jaKaeH0=", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "yHfT5DTbeCLs4NE2Rgnqrhe15ls=", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "9V1PvtFQ9MObZTc3sa86WcuOtOU=", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "pkeoOfZpHRvFG/AOZeTf0lwtsFg=", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "Rpu8KBtHZgvhkwHxUfaky+qW+G4=", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "0qYPUga28aQVkxZgBR3Z86AbGUQ=", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "vnYDXA1NxJ7Hu+DMfXNk1UnmkWg=", "path": "github.com/aws/aws-sdk-go/service/acm", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "DPl/OkvEUjrd+XKqX73l6nUNw3U=", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "X8tOI6i+RJwXIgg1qBjDNclyG/0=", "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "aDAaH6YiA50IrJ5Smfg0fovrniA=", "path": "github.com/aws/aws-sdk-go/service/appsync", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "oBXDw1zQTfxcKsK3ZjtKcS7gBLI=", "path": "github.com/aws/aws-sdk-go/service/athena", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "ITAwWyJp4t9AGfUXm9M3pFWTHVA=", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "Zz8qI6RloveM1zrXAglLxJZT1ZA=", "path": "github.com/aws/aws-sdk-go/service/batch", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "/nO06EpnD22+Ex80gHi4UYrAvKc=", "path": "github.com/aws/aws-sdk-go/service/budgets", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "6gM3CZZgiB0JvS7EK1c31Q8L09U=", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "T80IDetBz1hqJpq5Wqmx3MwCh8w=", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "bYrI9mxspB0xDFZEy3OIfWuez5g=", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "oB+M+kOmYG28V0PuI75IF6E+/w8=", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "Nc3vXlV7s309PprScYpRDPQWeDQ=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "bPh7NF3mLpGMV0rIakolMPHqMyw=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "P6qyaFX9X6Nnvm3avLigjmjfYds=", "path": "github.com/aws/aws-sdk-go/service/codebuild", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "7nW1Ho2X3RcUU8FaFBhJIUeuDNw=", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { - "checksumSHA1": "m19PZt1B51QCWo1jxSbII2zzL6Q=", + "checksumSHA1": "+petAU2sPfykSoVBAitmGxvGOlw=", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "LKw7fnNwq17Eqy0clzS/LK89vS4=", "path": "github.com/aws/aws-sdk-go/service/codepipeline", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "aXh1KIbNX+g+tH+lh3pk++9lm3k=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentity", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "IWi9xZz+OncotjM/vJ87Iffg2Qk=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentityprovider", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "56F6Stg8hQ1kxiAEzqB0TDctW9k=", "path": "github.com/aws/aws-sdk-go/service/configservice", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "hYCwLQdIjHj8rMHLGVyUVhecI4s=", "path": "github.com/aws/aws-sdk-go/service/databasemigrationservice", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "siWpqsOY3u69XkgPF8+F8V1K0Pc=", "path": "github.com/aws/aws-sdk-go/service/dax", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "26CWoHQP/dyL2VzE5ZNd8zNzhko=", "path": "github.com/aws/aws-sdk-go/service/devicefarm", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "6g94rUHAgjcqMMTtMqKUbLU37wY=", "path": "github.com/aws/aws-sdk-go/service/directconnect", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "oFnS6I0u7KqnxK0/r1uoz8rTkxI=", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "0TXXUPjrbOCHpX555B6suH36Nnk=", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "INaeHZ2L5x6RlrcQBm4q1hFqNRM=", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "uEv9kkBsVIjg7K4+Y8TVlU0Cc8o=", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "3B3RtWG7IY9qhFhWGEwroeMxnPI=", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "eoM9nF5iVMbuGOmkY33d19aHt8Y=", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "dU5MPXUUOYD/E9sNncpFZ/U86Cw=", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "pj8mBWT3HE0Iid6HSmhw7lmyZDU=", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "VYGtTaSiajfKOVTbi9/SNmbiIac=", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "SZ7yLDZ6RvMhpWe0Goyem64kgyA=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "WYqHhdRNsiGGBLWlBLbOItZf+zA=", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "ae7VWg/xuXpnSD6wGumN44qEd+Q=", "path": "github.com/aws/aws-sdk-go/service/elbv2", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "NbkH6F+792jQ7BW4lGCb+vJVw58=", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "5btWHj2fZrPc/zfYdJLPaOcivxI=", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "Rodm1XwZ9Ncah1NLHep0behQpXg=", "path": "github.com/aws/aws-sdk-go/service/gamelift", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "oDoGvSfmO2Z099ixV2HXn+SDeHE=", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "HRmbBf3dUEBAfdC2xKaoWAGeM7Y=", "path": "github.com/aws/aws-sdk-go/service/glue", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "6JlxJoy1JCArNK2qBkaJ5IV6qBc=", "path": "github.com/aws/aws-sdk-go/service/guardduty", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "oZaxMqnwl2rA+V/W0tJ3uownORI=", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "nMdRXIfhgvEKBHnLX61Ze3EUJWU=", "path": "github.com/aws/aws-sdk-go/service/inspector", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "pZwCI4DpP5hcMa/ItKhiwo/ukd0=", "path": "github.com/aws/aws-sdk-go/service/iot", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "IoSyRZhlL0petrB28nXk5jKM9YA=", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "oAFLgD0uJiVOZkFkL5dd/wUgBz4=", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "XDVse9fKF0RkAywzzgsO31AV4oc=", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "HluEcyZNywrbKnj/aR3tXbu29d8=", "path": "github.com/aws/aws-sdk-go/service/lexmodelbuildingservice", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "wjs9YBsHx0YQH0zKBA7Ibd1UV5Y=", "path": "github.com/aws/aws-sdk-go/service/lightsail", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "4VfB5vMLNYs0y6K159YCBgo9T3c=", "path": "github.com/aws/aws-sdk-go/service/mediaconvert", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "Ox3VWHYSQq0YKmlr0paUPdr5W/0=", "path": "github.com/aws/aws-sdk-go/service/medialive", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "Rs7QtkcLl3XNPnKb8ss/AhF2X50=", "path": "github.com/aws/aws-sdk-go/service/mediapackage", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "QjiIL8LrlhwrQw8FboF+wMNvUF0=", "path": "github.com/aws/aws-sdk-go/service/mediastore", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "ZY1SJNE03I6NL2OBJD9hlwVsqO0=", "path": "github.com/aws/aws-sdk-go/service/mediastoredata", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "ynB7Flcudp0VOqBVKZJ+23DtLHU=", "path": "github.com/aws/aws-sdk-go/service/mq", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "fpsBu+F79ktlLRwal1GugVMUDo0=", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "IddJCt5BrI6zRuUpFJqqnS9qrIM=", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "vP1FcccUZbuUlin7ME89w1GVJtA=", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { - "checksumSHA1": "tKnVaKPOCiU6xl3/AYcdBCLtRdw=", + "checksumSHA1": "fgSXmayOZRgur/41Gp1tFvH0GGg=", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "sCaHoPWsJXRHFbilUKwN71qFTOI=", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "QZU8vR9cOIenYiH+Ywl4Gzfnlp0=", "path": "github.com/aws/aws-sdk-go/service/servicecatalog", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "dk6ebvA0EYgdPyc5HPKLBPEtsm4=", "path": "github.com/aws/aws-sdk-go/service/servicediscovery", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "Ex1Ma0SFGpqeNuPbeXZtsliZ3zo=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "maVXeR3WDAkONlzf04e4mDgCYxo=", "path": "github.com/aws/aws-sdk-go/service/sfn", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "ADoR4mlCW5usH8iOa6mPNSy49LM=", "path": "github.com/aws/aws-sdk-go/service/shield", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "B3CgAFSREebpsFoFOo4vrQ6u04w=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "FfY8w4DM8XIULdRnFhd3Um8Mj8c=", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "Wx189wAbIhWChx4kVbvsyqKMF4U=", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "Al7CCaQRNd22FwUZXigUEWN820M=", "path": "github.com/aws/aws-sdk-go/service/ssm", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "W1oFtpaT4TWIIJrAvFcn/XdcT7g=", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "Uw4pOUxSMbx4xBHUcOUkNhtnywE=", "path": "github.com/aws/aws-sdk-go/service/swf", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "on6d7Hydx2bM9jkFOf1JZcZZgeY=", "path": "github.com/aws/aws-sdk-go/service/waf", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "rHqjsOndIR82gX5mSKybaRWf3UY=", "path": "github.com/aws/aws-sdk-go/service/wafregional", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "y0XODBzpJjZvR1e9F6ULItV5nG4=", "path": "github.com/aws/aws-sdk-go/service/workspaces", - "revision": "ce8d7a13a9e7f883d91c39eb98d4f72021eb48e2", - "revisionTime": "2018-01-05T21:48:20Z", - "version": "v1.12.56", - "versionExact": "v1.12.56" + "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", + "revisionTime": "2018-01-09T00:04:15Z", + "version": "v1.12.57", + "versionExact": "v1.12.57" }, { "checksumSHA1": "usT4LCSQItkFvFOQT7cBlkCuGaE=", From cae1456dedc061fa6cbf522d6dbcac22711c6860 Mon Sep 17 00:00:00 2001 From: Daniel del Castillo Date: Tue, 9 Jan 2018 09:58:07 +0000 Subject: [PATCH 129/350] Add SageMaker vendor --- .../aws/aws-sdk-go/service/sagemaker/api.go | 7569 +++++++++++++++++ .../aws/aws-sdk-go/service/sagemaker/doc.go | 28 + .../aws-sdk-go/service/sagemaker/errors.go | 25 + .../aws-sdk-go/service/sagemaker/service.go | 98 + .../aws-sdk-go/service/sagemaker/waiters.go | 331 + vendor/vendor.json | 8 + 6 files changed, 8059 insertions(+) create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sagemaker/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sagemaker/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sagemaker/waiters.go diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go new file mode 100644 index 00000000000..96a70c949d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go @@ -0,0 +1,7569 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sagemaker + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAddTags = "AddTags" + +// AddTagsRequest generates a "aws/request.Request" representing the +// client's request for the AddTags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AddTags for more information on using the AddTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AddTagsRequest method. +// req, resp := client.AddTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AddTags +func (c *SageMaker) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsInput{} + } + + output = &AddTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// AddTags API operation for Amazon SageMaker Service. +// +// Adds or overwrites one or more tags for the specified Amazon SageMaker resource. +// You can add tags to notebook instances, training jobs, models, endpoint configurations, +// and endpoints. +// +// Each tag consists of a key and an optional value. Tag keys must be unique +// per resource. For more information about tags, see Using Cost Allocation +// Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) +// in the AWS Billing and Cost Management User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation AddTags for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AddTags +func (c *SageMaker) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + return out, req.Send() +} + +// AddTagsWithContext is the same as AddTags with the addition of +// the ability to pass a context and additional request options. +// +// See AddTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) AddTagsWithContext(ctx aws.Context, input *AddTagsInput, opts ...request.Option) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateEndpoint = "CreateEndpoint" + +// CreateEndpointRequest generates a "aws/request.Request" representing the +// client's request for the CreateEndpoint operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateEndpoint for more information on using the CreateEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateEndpointRequest method. +// req, resp := client.CreateEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpoint +func (c *SageMaker) CreateEndpointRequest(input *CreateEndpointInput) (req *request.Request, output *CreateEndpointOutput) { + op := &request.Operation{ + Name: opCreateEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEndpointInput{} + } + + output = &CreateEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateEndpoint API operation for Amazon SageMaker Service. +// +// Creates an endpoint using the endpoint configuration specified in the request. +// Amazon SageMaker uses the endpoint to provision resources and deploy models. +// You create the endpoint configuration with the CreateEndpointConfig (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpointConfig.html) +// API. +// +// Use this API only for hosting models using Amazon SageMaker hosting services. +// +// The endpoint name must be unique within an AWS Region in your AWS account. +// +// When it receives the request, Amazon SageMaker creates the endpoint, launches +// the resources (ML compute instances), and deploys the model(s) on them. +// +// When Amazon SageMaker receives the request, it sets the endpoint status to +// Creating. After it creates the endpoint, it sets the status to InService. +// Amazon SageMaker can then process incoming requests for inferences. To check +// the status of an endpoint, use the DescribeEndpoint (http://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html) +// API. +// +// For an example, see Exercise 1: Using the K-Means Algorithm Provided by Amazon +// SageMaker (http://docs.aws.amazon.com/sagemaker/latest/dg/ex1.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation CreateEndpoint for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" +// You have exceeded an Amazon SageMaker resource limit. For example, you might +// have too many training jobs created. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpoint +func (c *SageMaker) CreateEndpoint(input *CreateEndpointInput) (*CreateEndpointOutput, error) { + req, out := c.CreateEndpointRequest(input) + return out, req.Send() +} + +// CreateEndpointWithContext is the same as CreateEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See CreateEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) CreateEndpointWithContext(ctx aws.Context, input *CreateEndpointInput, opts ...request.Option) (*CreateEndpointOutput, error) { + req, out := c.CreateEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateEndpointConfig = "CreateEndpointConfig" + +// CreateEndpointConfigRequest generates a "aws/request.Request" representing the +// client's request for the CreateEndpointConfig operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateEndpointConfig for more information on using the CreateEndpointConfig +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateEndpointConfigRequest method. +// req, resp := client.CreateEndpointConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpointConfig +func (c *SageMaker) CreateEndpointConfigRequest(input *CreateEndpointConfigInput) (req *request.Request, output *CreateEndpointConfigOutput) { + op := &request.Operation{ + Name: opCreateEndpointConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEndpointConfigInput{} + } + + output = &CreateEndpointConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateEndpointConfig API operation for Amazon SageMaker Service. +// +// Creates an endpoint configuration that Amazon SageMaker hosting services +// uses to deploy models. In the configuration, you identify one or more models, +// created using the CreateModel API, to deploy and the resources that you want +// Amazon SageMaker to provision. Then you call the CreateEndpoint (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html) +// API. +// +// Use this API only if you want to use Amazon SageMaker hosting services to +// deploy models into production. +// +// In the request, you define one or more ProductionVariants, each of which +// identifies a model. Each ProductionVariant parameter also describes the resources +// that you want Amazon SageMaker to provision. This includes the number and +// type of ML compute instances to deploy. +// +// If you are hosting multiple models, you also assign a VariantWeight to specify +// how much traffic you want to allocate to each model. For example, suppose +// that you want to host two models, A and B, and you assign traffic weight +// 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds +// of the traffic to Model A, and one-third to model B. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation CreateEndpointConfig for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" +// You have exceeded an Amazon SageMaker resource limit. For example, you might +// have too many training jobs created. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpointConfig +func (c *SageMaker) CreateEndpointConfig(input *CreateEndpointConfigInput) (*CreateEndpointConfigOutput, error) { + req, out := c.CreateEndpointConfigRequest(input) + return out, req.Send() +} + +// CreateEndpointConfigWithContext is the same as CreateEndpointConfig with the addition of +// the ability to pass a context and additional request options. +// +// See CreateEndpointConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) CreateEndpointConfigWithContext(ctx aws.Context, input *CreateEndpointConfigInput, opts ...request.Option) (*CreateEndpointConfigOutput, error) { + req, out := c.CreateEndpointConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateModel = "CreateModel" + +// CreateModelRequest generates a "aws/request.Request" representing the +// client's request for the CreateModel operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateModel for more information on using the CreateModel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateModelRequest method. +// req, resp := client.CreateModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateModel +func (c *SageMaker) CreateModelRequest(input *CreateModelInput) (req *request.Request, output *CreateModelOutput) { + op := &request.Operation{ + Name: opCreateModel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateModelInput{} + } + + output = &CreateModelOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateModel API operation for Amazon SageMaker Service. +// +// Creates a model in Amazon SageMaker. In the request, you name the model and +// describe one or more containers. For each container, you specify the docker +// image containing inference code, artifacts (from prior training), and custom +// environment map that the inference code uses when you deploy the model into +// production. +// +// Use this API to create a model only if you want to use Amazon SageMaker hosting +// services. To host your model, you create an endpoint configuration with the +// CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint +// API. +// +// Amazon SageMaker then deploys all of the containers that you defined for +// the model in the hosting environment. +// +// In the CreateModel request, you must define a container with the PrimaryContainer +// parameter. +// +// In the request, you also provide an IAM role that Amazon SageMaker can assume +// to access model artifacts and docker image for deployment on ML compute hosting +// instances. In addition, you also use the IAM role to manage permissions the +// inference code needs. For example, if the inference code access any other +// AWS resources, you grant necessary permissions via this role. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation CreateModel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" +// You have exceeded an Amazon SageMaker resource limit. For example, you might +// have too many training jobs created. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateModel +func (c *SageMaker) CreateModel(input *CreateModelInput) (*CreateModelOutput, error) { + req, out := c.CreateModelRequest(input) + return out, req.Send() +} + +// CreateModelWithContext is the same as CreateModel with the addition of +// the ability to pass a context and additional request options. +// +// See CreateModel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) CreateModelWithContext(ctx aws.Context, input *CreateModelInput, opts ...request.Option) (*CreateModelOutput, error) { + req, out := c.CreateModelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateNotebookInstance = "CreateNotebookInstance" + +// CreateNotebookInstanceRequest generates a "aws/request.Request" representing the +// client's request for the CreateNotebookInstance operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateNotebookInstance for more information on using the CreateNotebookInstance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateNotebookInstanceRequest method. +// req, resp := client.CreateNotebookInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateNotebookInstance +func (c *SageMaker) CreateNotebookInstanceRequest(input *CreateNotebookInstanceInput) (req *request.Request, output *CreateNotebookInstanceOutput) { + op := &request.Operation{ + Name: opCreateNotebookInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNotebookInstanceInput{} + } + + output = &CreateNotebookInstanceOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateNotebookInstance API operation for Amazon SageMaker Service. +// +// Creates an Amazon SageMaker notebook instance. A notebook instance is an +// ML compute instance running on a Jupyter notebook. +// +// In a CreateNotebookInstance request, you specify the type of ML compute instance +// that you want to run. Amazon SageMaker launches the instance, installs common +// libraries that you can use to explore datasets for model training, and attaches +// an ML storage volume to the notebook instance. +// +// Amazon SageMaker also provides a set of example notebooks. Each notebook +// demonstrates how to use Amazon SageMaker with a specific an algorithm or +// with a machine learning framework. +// +// After receiving the request, Amazon SageMaker does the following: +// +// Creates a network interface in the Amazon SageMaker VPC. +// +// (Option) If you specified SubnetId, creates a network interface in your own +// VPC, which is inferred from the subnet ID that you provide in the input. +// When creating this network interface, Amazon SageMaker attaches the security +// group that you specified in the request to the network interface that it +// creates in your VPC. +// +// Launches an EC2 instance of the type specified in the request in the Amazon +// SageMaker VPC. If you specified SubnetId of your VPC, Amazon SageMaker specifies +// both network interfaces when launching this instance. This enables inbound +// traffic from your own VPC to the notebook instance, assuming that the security +// groups allow it. +// +// After creating the notebook instance, Amazon SageMaker returns its Amazon +// Resource Name (ARN). +// +// After Amazon SageMaker creates the notebook instance, you can connect to +// the Jupyter server and work in Jupyter notebooks. For example, you can write +// code to explore a dataset that you can use for model training, train a model, +// host models by creating Amazon SageMaker endpoints, and validate hosted models. +// +// For more information, see How It Works (http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation CreateNotebookInstance for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" +// You have exceeded an Amazon SageMaker resource limit. For example, you might +// have too many training jobs created. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateNotebookInstance +func (c *SageMaker) CreateNotebookInstance(input *CreateNotebookInstanceInput) (*CreateNotebookInstanceOutput, error) { + req, out := c.CreateNotebookInstanceRequest(input) + return out, req.Send() +} + +// CreateNotebookInstanceWithContext is the same as CreateNotebookInstance with the addition of +// the ability to pass a context and additional request options. +// +// See CreateNotebookInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) CreateNotebookInstanceWithContext(ctx aws.Context, input *CreateNotebookInstanceInput, opts ...request.Option) (*CreateNotebookInstanceOutput, error) { + req, out := c.CreateNotebookInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreatePresignedNotebookInstanceUrl = "CreatePresignedNotebookInstanceUrl" + +// CreatePresignedNotebookInstanceUrlRequest generates a "aws/request.Request" representing the +// client's request for the CreatePresignedNotebookInstanceUrl operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreatePresignedNotebookInstanceUrl for more information on using the CreatePresignedNotebookInstanceUrl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreatePresignedNotebookInstanceUrlRequest method. +// req, resp := client.CreatePresignedNotebookInstanceUrlRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreatePresignedNotebookInstanceUrl +func (c *SageMaker) CreatePresignedNotebookInstanceUrlRequest(input *CreatePresignedNotebookInstanceUrlInput) (req *request.Request, output *CreatePresignedNotebookInstanceUrlOutput) { + op := &request.Operation{ + Name: opCreatePresignedNotebookInstanceUrl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePresignedNotebookInstanceUrlInput{} + } + + output = &CreatePresignedNotebookInstanceUrlOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreatePresignedNotebookInstanceUrl API operation for Amazon SageMaker Service. +// +// Returns a URL that you can use to connect to the Juypter server from a notebook +// instance. In the Amazon SageMaker console, when you choose Open next to a +// notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server +// home page from the notebook instance. The console uses this API to get the +// URL and show the page. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation CreatePresignedNotebookInstanceUrl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreatePresignedNotebookInstanceUrl +func (c *SageMaker) CreatePresignedNotebookInstanceUrl(input *CreatePresignedNotebookInstanceUrlInput) (*CreatePresignedNotebookInstanceUrlOutput, error) { + req, out := c.CreatePresignedNotebookInstanceUrlRequest(input) + return out, req.Send() +} + +// CreatePresignedNotebookInstanceUrlWithContext is the same as CreatePresignedNotebookInstanceUrl with the addition of +// the ability to pass a context and additional request options. +// +// See CreatePresignedNotebookInstanceUrl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) CreatePresignedNotebookInstanceUrlWithContext(ctx aws.Context, input *CreatePresignedNotebookInstanceUrlInput, opts ...request.Option) (*CreatePresignedNotebookInstanceUrlOutput, error) { + req, out := c.CreatePresignedNotebookInstanceUrlRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTrainingJob = "CreateTrainingJob" + +// CreateTrainingJobRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrainingJob operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTrainingJob for more information on using the CreateTrainingJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTrainingJobRequest method. +// req, resp := client.CreateTrainingJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateTrainingJob +func (c *SageMaker) CreateTrainingJobRequest(input *CreateTrainingJobInput) (req *request.Request, output *CreateTrainingJobOutput) { + op := &request.Operation{ + Name: opCreateTrainingJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrainingJobInput{} + } + + output = &CreateTrainingJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTrainingJob API operation for Amazon SageMaker Service. +// +// Starts a model training job. After training completes, Amazon SageMaker saves +// the resulting model artifacts to an Amazon S3 location that you specify. +// +// If you choose to host your model using Amazon SageMaker hosting services, +// you can use the resulting model artifacts as part of the model. You can also +// use the artifacts in a deep learning service other than Amazon SageMaker, +// provided that you know how to use them for inferences. +// +// In the request body, you provide the following: +// +// * AlgorithmSpecification - Identifies the training algorithm to use. +// +// * HyperParameters - Specify these algorithm-specific parameters to influence +// the quality of the final model. For a list of hyperparameters for each +// training algorithm provided by Amazon SageMaker, see Algorithms (http://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). +// +// +// * InputDataConfig - Describes the training dataset and the Amazon S3 location +// where it is stored. +// +// * OutputDataConfig - Identifies the Amazon S3 location where you want +// Amazon SageMaker to save the results of model training. +// +// * ResourceConfig - Identifies the resources, ML compute instances, and +// ML storage volumes to deploy for model training. In distributed training, +// you specify more than one instance. +// +// * RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes +// to perform tasks on your behalf during model training. You must grant +// this role the necessary permissions so that Amazon SageMaker can successfully +// complete model training. +// +// * StoppingCondition - Sets a duration for training. Use this parameter +// to cap model training costs. +// +// For more information about Amazon SageMaker, see How It Works (http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation CreateTrainingJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUse "ResourceInUse" +// Resource being accessed is in use. +// +// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" +// You have exceeded an Amazon SageMaker resource limit. For example, you might +// have too many training jobs created. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateTrainingJob +func (c *SageMaker) CreateTrainingJob(input *CreateTrainingJobInput) (*CreateTrainingJobOutput, error) { + req, out := c.CreateTrainingJobRequest(input) + return out, req.Send() +} + +// CreateTrainingJobWithContext is the same as CreateTrainingJob with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTrainingJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) CreateTrainingJobWithContext(ctx aws.Context, input *CreateTrainingJobInput, opts ...request.Option) (*CreateTrainingJobOutput, error) { + req, out := c.CreateTrainingJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteEndpoint = "DeleteEndpoint" + +// DeleteEndpointRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEndpoint operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteEndpoint for more information on using the DeleteEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteEndpointRequest method. +// req, resp := client.DeleteEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpoint +func (c *SageMaker) DeleteEndpointRequest(input *DeleteEndpointInput) (req *request.Request, output *DeleteEndpointOutput) { + op := &request.Operation{ + Name: opDeleteEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEndpointInput{} + } + + output = &DeleteEndpointOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteEndpoint API operation for Amazon SageMaker Service. +// +// Deletes an endpoint. Amazon SageMaker frees up all of the resources that +// were deployed when the endpoint was created. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DeleteEndpoint for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpoint +func (c *SageMaker) DeleteEndpoint(input *DeleteEndpointInput) (*DeleteEndpointOutput, error) { + req, out := c.DeleteEndpointRequest(input) + return out, req.Send() +} + +// DeleteEndpointWithContext is the same as DeleteEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DeleteEndpointWithContext(ctx aws.Context, input *DeleteEndpointInput, opts ...request.Option) (*DeleteEndpointOutput, error) { + req, out := c.DeleteEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteEndpointConfig = "DeleteEndpointConfig" + +// DeleteEndpointConfigRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEndpointConfig operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteEndpointConfig for more information on using the DeleteEndpointConfig +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteEndpointConfigRequest method. +// req, resp := client.DeleteEndpointConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpointConfig +func (c *SageMaker) DeleteEndpointConfigRequest(input *DeleteEndpointConfigInput) (req *request.Request, output *DeleteEndpointConfigOutput) { + op := &request.Operation{ + Name: opDeleteEndpointConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEndpointConfigInput{} + } + + output = &DeleteEndpointConfigOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteEndpointConfig API operation for Amazon SageMaker Service. +// +// Deletes an endpoint configuration. The DeleteEndpoingConfig API deletes only +// the specified configuration. It does not delete endpoints created using the +// configuration. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DeleteEndpointConfig for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpointConfig +func (c *SageMaker) DeleteEndpointConfig(input *DeleteEndpointConfigInput) (*DeleteEndpointConfigOutput, error) { + req, out := c.DeleteEndpointConfigRequest(input) + return out, req.Send() +} + +// DeleteEndpointConfigWithContext is the same as DeleteEndpointConfig with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteEndpointConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DeleteEndpointConfigWithContext(ctx aws.Context, input *DeleteEndpointConfigInput, opts ...request.Option) (*DeleteEndpointConfigOutput, error) { + req, out := c.DeleteEndpointConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteModel = "DeleteModel" + +// DeleteModelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteModel operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteModel for more information on using the DeleteModel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteModelRequest method. +// req, resp := client.DeleteModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteModel +func (c *SageMaker) DeleteModelRequest(input *DeleteModelInput) (req *request.Request, output *DeleteModelOutput) { + op := &request.Operation{ + Name: opDeleteModel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteModelInput{} + } + + output = &DeleteModelOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteModel API operation for Amazon SageMaker Service. +// +// Deletes a model. The DeleteModel API deletes only the model entry that was +// created in Amazon SageMaker when you called the CreateModel (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateModel.html) +// API. It does not delete model artifacts, inference code, or the IAM role +// that you specified when creating the model. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DeleteModel for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteModel +func (c *SageMaker) DeleteModel(input *DeleteModelInput) (*DeleteModelOutput, error) { + req, out := c.DeleteModelRequest(input) + return out, req.Send() +} + +// DeleteModelWithContext is the same as DeleteModel with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteModel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DeleteModelWithContext(ctx aws.Context, input *DeleteModelInput, opts ...request.Option) (*DeleteModelOutput, error) { + req, out := c.DeleteModelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteNotebookInstance = "DeleteNotebookInstance" + +// DeleteNotebookInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNotebookInstance operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteNotebookInstance for more information on using the DeleteNotebookInstance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteNotebookInstanceRequest method. +// req, resp := client.DeleteNotebookInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteNotebookInstance +func (c *SageMaker) DeleteNotebookInstanceRequest(input *DeleteNotebookInstanceInput) (req *request.Request, output *DeleteNotebookInstanceOutput) { + op := &request.Operation{ + Name: opDeleteNotebookInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNotebookInstanceInput{} + } + + output = &DeleteNotebookInstanceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteNotebookInstance API operation for Amazon SageMaker Service. +// +// Deletes an Amazon SageMaker notebook instance. Before you can delete a notebook +// instance, you must call the StopNotebookInstance API. +// +// When you delete a notebook instance, you lose all of your data. Amazon SageMaker +// removes the ML compute instance, and deletes the ML storage volume and the +// network interface associated with the notebook instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DeleteNotebookInstance for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteNotebookInstance +func (c *SageMaker) DeleteNotebookInstance(input *DeleteNotebookInstanceInput) (*DeleteNotebookInstanceOutput, error) { + req, out := c.DeleteNotebookInstanceRequest(input) + return out, req.Send() +} + +// DeleteNotebookInstanceWithContext is the same as DeleteNotebookInstance with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteNotebookInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DeleteNotebookInstanceWithContext(ctx aws.Context, input *DeleteNotebookInstanceInput, opts ...request.Option) (*DeleteNotebookInstanceOutput, error) { + req, out := c.DeleteNotebookInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTags for more information on using the DeleteTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTagsRequest method. +// req, resp := client.DeleteTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteTags +func (c *SageMaker) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + output = &DeleteTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTags API operation for Amazon SageMaker Service. +// +// Deletes the specified tags from an Amazon SageMaker resource. +// +// To list a resource's tags, use the ListTags API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DeleteTags for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteTags +func (c *SageMaker) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + return out, req.Send() +} + +// DeleteTagsWithContext is the same as DeleteTags with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DeleteTagsWithContext(ctx aws.Context, input *DeleteTagsInput, opts ...request.Option) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeEndpoint = "DescribeEndpoint" + +// DescribeEndpointRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEndpoint operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeEndpoint for more information on using the DescribeEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeEndpointRequest method. +// req, resp := client.DescribeEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpoint +func (c *SageMaker) DescribeEndpointRequest(input *DescribeEndpointInput) (req *request.Request, output *DescribeEndpointOutput) { + op := &request.Operation{ + Name: opDescribeEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEndpointInput{} + } + + output = &DescribeEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeEndpoint API operation for Amazon SageMaker Service. +// +// Returns the description of an endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DescribeEndpoint for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpoint +func (c *SageMaker) DescribeEndpoint(input *DescribeEndpointInput) (*DescribeEndpointOutput, error) { + req, out := c.DescribeEndpointRequest(input) + return out, req.Send() +} + +// DescribeEndpointWithContext is the same as DescribeEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DescribeEndpointWithContext(ctx aws.Context, input *DescribeEndpointInput, opts ...request.Option) (*DescribeEndpointOutput, error) { + req, out := c.DescribeEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeEndpointConfig = "DescribeEndpointConfig" + +// DescribeEndpointConfigRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEndpointConfig operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeEndpointConfig for more information on using the DescribeEndpointConfig +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeEndpointConfigRequest method. +// req, resp := client.DescribeEndpointConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpointConfig +func (c *SageMaker) DescribeEndpointConfigRequest(input *DescribeEndpointConfigInput) (req *request.Request, output *DescribeEndpointConfigOutput) { + op := &request.Operation{ + Name: opDescribeEndpointConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEndpointConfigInput{} + } + + output = &DescribeEndpointConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeEndpointConfig API operation for Amazon SageMaker Service. +// +// Returns the description of an endpoint configuration created using the CreateEndpointConfig +// API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DescribeEndpointConfig for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpointConfig +func (c *SageMaker) DescribeEndpointConfig(input *DescribeEndpointConfigInput) (*DescribeEndpointConfigOutput, error) { + req, out := c.DescribeEndpointConfigRequest(input) + return out, req.Send() +} + +// DescribeEndpointConfigWithContext is the same as DescribeEndpointConfig with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeEndpointConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DescribeEndpointConfigWithContext(ctx aws.Context, input *DescribeEndpointConfigInput, opts ...request.Option) (*DescribeEndpointConfigOutput, error) { + req, out := c.DescribeEndpointConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeModel = "DescribeModel" + +// DescribeModelRequest generates a "aws/request.Request" representing the +// client's request for the DescribeModel operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeModel for more information on using the DescribeModel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeModelRequest method. +// req, resp := client.DescribeModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeModel +func (c *SageMaker) DescribeModelRequest(input *DescribeModelInput) (req *request.Request, output *DescribeModelOutput) { + op := &request.Operation{ + Name: opDescribeModel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeModelInput{} + } + + output = &DescribeModelOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeModel API operation for Amazon SageMaker Service. +// +// Describes a model that you created using the CreateModel API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DescribeModel for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeModel +func (c *SageMaker) DescribeModel(input *DescribeModelInput) (*DescribeModelOutput, error) { + req, out := c.DescribeModelRequest(input) + return out, req.Send() +} + +// DescribeModelWithContext is the same as DescribeModel with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeModel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DescribeModelWithContext(ctx aws.Context, input *DescribeModelInput, opts ...request.Option) (*DescribeModelOutput, error) { + req, out := c.DescribeModelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeNotebookInstance = "DescribeNotebookInstance" + +// DescribeNotebookInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNotebookInstance operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeNotebookInstance for more information on using the DescribeNotebookInstance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeNotebookInstanceRequest method. +// req, resp := client.DescribeNotebookInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeNotebookInstance +func (c *SageMaker) DescribeNotebookInstanceRequest(input *DescribeNotebookInstanceInput) (req *request.Request, output *DescribeNotebookInstanceOutput) { + op := &request.Operation{ + Name: opDescribeNotebookInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNotebookInstanceInput{} + } + + output = &DescribeNotebookInstanceOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeNotebookInstance API operation for Amazon SageMaker Service. +// +// Returns information about a notebook instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DescribeNotebookInstance for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeNotebookInstance +func (c *SageMaker) DescribeNotebookInstance(input *DescribeNotebookInstanceInput) (*DescribeNotebookInstanceOutput, error) { + req, out := c.DescribeNotebookInstanceRequest(input) + return out, req.Send() +} + +// DescribeNotebookInstanceWithContext is the same as DescribeNotebookInstance with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNotebookInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DescribeNotebookInstanceWithContext(ctx aws.Context, input *DescribeNotebookInstanceInput, opts ...request.Option) (*DescribeNotebookInstanceOutput, error) { + req, out := c.DescribeNotebookInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTrainingJob = "DescribeTrainingJob" + +// DescribeTrainingJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrainingJob operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTrainingJob for more information on using the DescribeTrainingJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTrainingJobRequest method. +// req, resp := client.DescribeTrainingJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeTrainingJob +func (c *SageMaker) DescribeTrainingJobRequest(input *DescribeTrainingJobInput) (req *request.Request, output *DescribeTrainingJobOutput) { + op := &request.Operation{ + Name: opDescribeTrainingJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrainingJobInput{} + } + + output = &DescribeTrainingJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTrainingJob API operation for Amazon SageMaker Service. +// +// Returns information about a training job. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DescribeTrainingJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFound "ResourceNotFound" +// Resource being access is not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeTrainingJob +func (c *SageMaker) DescribeTrainingJob(input *DescribeTrainingJobInput) (*DescribeTrainingJobOutput, error) { + req, out := c.DescribeTrainingJobRequest(input) + return out, req.Send() +} + +// DescribeTrainingJobWithContext is the same as DescribeTrainingJob with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTrainingJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DescribeTrainingJobWithContext(ctx aws.Context, input *DescribeTrainingJobInput, opts ...request.Option) (*DescribeTrainingJobOutput, error) { + req, out := c.DescribeTrainingJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListEndpointConfigs = "ListEndpointConfigs" + +// ListEndpointConfigsRequest generates a "aws/request.Request" representing the +// client's request for the ListEndpointConfigs operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListEndpointConfigs for more information on using the ListEndpointConfigs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListEndpointConfigsRequest method. +// req, resp := client.ListEndpointConfigsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpointConfigs +func (c *SageMaker) ListEndpointConfigsRequest(input *ListEndpointConfigsInput) (req *request.Request, output *ListEndpointConfigsOutput) { + op := &request.Operation{ + Name: opListEndpointConfigs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEndpointConfigsInput{} + } + + output = &ListEndpointConfigsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListEndpointConfigs API operation for Amazon SageMaker Service. +// +// Lists endpoint configurations. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation ListEndpointConfigs for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpointConfigs +func (c *SageMaker) ListEndpointConfigs(input *ListEndpointConfigsInput) (*ListEndpointConfigsOutput, error) { + req, out := c.ListEndpointConfigsRequest(input) + return out, req.Send() +} + +// ListEndpointConfigsWithContext is the same as ListEndpointConfigs with the addition of +// the ability to pass a context and additional request options. +// +// See ListEndpointConfigs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListEndpointConfigsWithContext(ctx aws.Context, input *ListEndpointConfigsInput, opts ...request.Option) (*ListEndpointConfigsOutput, error) { + req, out := c.ListEndpointConfigsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListEndpointConfigsPages iterates over the pages of a ListEndpointConfigs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEndpointConfigs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEndpointConfigs operation. +// pageNum := 0 +// err := client.ListEndpointConfigsPages(params, +// func(page *ListEndpointConfigsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SageMaker) ListEndpointConfigsPages(input *ListEndpointConfigsInput, fn func(*ListEndpointConfigsOutput, bool) bool) error { + return c.ListEndpointConfigsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListEndpointConfigsPagesWithContext same as ListEndpointConfigsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListEndpointConfigsPagesWithContext(ctx aws.Context, input *ListEndpointConfigsInput, fn func(*ListEndpointConfigsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListEndpointConfigsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListEndpointConfigsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListEndpointConfigsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListEndpoints = "ListEndpoints" + +// ListEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the ListEndpoints operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListEndpoints for more information on using the ListEndpoints +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListEndpointsRequest method. +// req, resp := client.ListEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpoints +func (c *SageMaker) ListEndpointsRequest(input *ListEndpointsInput) (req *request.Request, output *ListEndpointsOutput) { + op := &request.Operation{ + Name: opListEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEndpointsInput{} + } + + output = &ListEndpointsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListEndpoints API operation for Amazon SageMaker Service. +// +// Lists endpoints. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation ListEndpoints for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpoints +func (c *SageMaker) ListEndpoints(input *ListEndpointsInput) (*ListEndpointsOutput, error) { + req, out := c.ListEndpointsRequest(input) + return out, req.Send() +} + +// ListEndpointsWithContext is the same as ListEndpoints with the addition of +// the ability to pass a context and additional request options. +// +// See ListEndpoints for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListEndpointsWithContext(ctx aws.Context, input *ListEndpointsInput, opts ...request.Option) (*ListEndpointsOutput, error) { + req, out := c.ListEndpointsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListEndpointsPages iterates over the pages of a ListEndpoints operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEndpoints method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEndpoints operation. +// pageNum := 0 +// err := client.ListEndpointsPages(params, +// func(page *ListEndpointsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SageMaker) ListEndpointsPages(input *ListEndpointsInput, fn func(*ListEndpointsOutput, bool) bool) error { + return c.ListEndpointsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListEndpointsPagesWithContext same as ListEndpointsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListEndpointsPagesWithContext(ctx aws.Context, input *ListEndpointsInput, fn func(*ListEndpointsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListEndpointsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListEndpointsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListEndpointsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListModels = "ListModels" + +// ListModelsRequest generates a "aws/request.Request" representing the +// client's request for the ListModels operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListModels for more information on using the ListModels +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListModelsRequest method. +// req, resp := client.ListModelsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListModels +func (c *SageMaker) ListModelsRequest(input *ListModelsInput) (req *request.Request, output *ListModelsOutput) { + op := &request.Operation{ + Name: opListModels, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListModelsInput{} + } + + output = &ListModelsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListModels API operation for Amazon SageMaker Service. +// +// Lists models created with the CreateModel (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateModel.html) +// API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation ListModels for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListModels +func (c *SageMaker) ListModels(input *ListModelsInput) (*ListModelsOutput, error) { + req, out := c.ListModelsRequest(input) + return out, req.Send() +} + +// ListModelsWithContext is the same as ListModels with the addition of +// the ability to pass a context and additional request options. +// +// See ListModels for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListModelsWithContext(ctx aws.Context, input *ListModelsInput, opts ...request.Option) (*ListModelsOutput, error) { + req, out := c.ListModelsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListModelsPages iterates over the pages of a ListModels operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListModels method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListModels operation. +// pageNum := 0 +// err := client.ListModelsPages(params, +// func(page *ListModelsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SageMaker) ListModelsPages(input *ListModelsInput, fn func(*ListModelsOutput, bool) bool) error { + return c.ListModelsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListModelsPagesWithContext same as ListModelsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListModelsPagesWithContext(ctx aws.Context, input *ListModelsInput, fn func(*ListModelsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListModelsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListModelsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListModelsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListNotebookInstances = "ListNotebookInstances" + +// ListNotebookInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ListNotebookInstances operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListNotebookInstances for more information on using the ListNotebookInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListNotebookInstancesRequest method. +// req, resp := client.ListNotebookInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListNotebookInstances +func (c *SageMaker) ListNotebookInstancesRequest(input *ListNotebookInstancesInput) (req *request.Request, output *ListNotebookInstancesOutput) { + op := &request.Operation{ + Name: opListNotebookInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListNotebookInstancesInput{} + } + + output = &ListNotebookInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListNotebookInstances API operation for Amazon SageMaker Service. +// +// Returns a list of the Amazon SageMaker notebook instances in the requester's +// account in an AWS Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation ListNotebookInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListNotebookInstances +func (c *SageMaker) ListNotebookInstances(input *ListNotebookInstancesInput) (*ListNotebookInstancesOutput, error) { + req, out := c.ListNotebookInstancesRequest(input) + return out, req.Send() +} + +// ListNotebookInstancesWithContext is the same as ListNotebookInstances with the addition of +// the ability to pass a context and additional request options. +// +// See ListNotebookInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListNotebookInstancesWithContext(ctx aws.Context, input *ListNotebookInstancesInput, opts ...request.Option) (*ListNotebookInstancesOutput, error) { + req, out := c.ListNotebookInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListNotebookInstancesPages iterates over the pages of a ListNotebookInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListNotebookInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListNotebookInstances operation. +// pageNum := 0 +// err := client.ListNotebookInstancesPages(params, +// func(page *ListNotebookInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SageMaker) ListNotebookInstancesPages(input *ListNotebookInstancesInput, fn func(*ListNotebookInstancesOutput, bool) bool) error { + return c.ListNotebookInstancesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListNotebookInstancesPagesWithContext same as ListNotebookInstancesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListNotebookInstancesPagesWithContext(ctx aws.Context, input *ListNotebookInstancesInput, fn func(*ListNotebookInstancesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListNotebookInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListNotebookInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListNotebookInstancesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListTags = "ListTags" + +// ListTagsRequest generates a "aws/request.Request" representing the +// client's request for the ListTags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTags for more information on using the ListTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsRequest method. +// req, resp := client.ListTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTags +func (c *SageMaker) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { + op := &request.Operation{ + Name: opListTags, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTagsInput{} + } + + output = &ListTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTags API operation for Amazon SageMaker Service. +// +// Returns the tags for the specified Amazon SageMaker resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation ListTags for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTags +func (c *SageMaker) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { + req, out := c.ListTagsRequest(input) + return out, req.Send() +} + +// ListTagsWithContext is the same as ListTags with the addition of +// the ability to pass a context and additional request options. +// +// See ListTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListTagsWithContext(ctx aws.Context, input *ListTagsInput, opts ...request.Option) (*ListTagsOutput, error) { + req, out := c.ListTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTagsPages iterates over the pages of a ListTags operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTags method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTags operation. +// pageNum := 0 +// err := client.ListTagsPages(params, +// func(page *ListTagsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SageMaker) ListTagsPages(input *ListTagsInput, fn func(*ListTagsOutput, bool) bool) error { + return c.ListTagsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTagsPagesWithContext same as ListTagsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListTagsPagesWithContext(ctx aws.Context, input *ListTagsInput, fn func(*ListTagsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTagsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTagsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListTagsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListTrainingJobs = "ListTrainingJobs" + +// ListTrainingJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListTrainingJobs operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTrainingJobs for more information on using the ListTrainingJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTrainingJobsRequest method. +// req, resp := client.ListTrainingJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTrainingJobs +func (c *SageMaker) ListTrainingJobsRequest(input *ListTrainingJobsInput) (req *request.Request, output *ListTrainingJobsOutput) { + op := &request.Operation{ + Name: opListTrainingJobs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTrainingJobsInput{} + } + + output = &ListTrainingJobsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTrainingJobs API operation for Amazon SageMaker Service. +// +// Lists training jobs. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation ListTrainingJobs for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTrainingJobs +func (c *SageMaker) ListTrainingJobs(input *ListTrainingJobsInput) (*ListTrainingJobsOutput, error) { + req, out := c.ListTrainingJobsRequest(input) + return out, req.Send() +} + +// ListTrainingJobsWithContext is the same as ListTrainingJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListTrainingJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListTrainingJobsWithContext(ctx aws.Context, input *ListTrainingJobsInput, opts ...request.Option) (*ListTrainingJobsOutput, error) { + req, out := c.ListTrainingJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTrainingJobsPages iterates over the pages of a ListTrainingJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTrainingJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTrainingJobs operation. +// pageNum := 0 +// err := client.ListTrainingJobsPages(params, +// func(page *ListTrainingJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SageMaker) ListTrainingJobsPages(input *ListTrainingJobsInput, fn func(*ListTrainingJobsOutput, bool) bool) error { + return c.ListTrainingJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTrainingJobsPagesWithContext same as ListTrainingJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListTrainingJobsPagesWithContext(ctx aws.Context, input *ListTrainingJobsInput, fn func(*ListTrainingJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTrainingJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTrainingJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListTrainingJobsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opStartNotebookInstance = "StartNotebookInstance" + +// StartNotebookInstanceRequest generates a "aws/request.Request" representing the +// client's request for the StartNotebookInstance operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartNotebookInstance for more information on using the StartNotebookInstance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartNotebookInstanceRequest method. +// req, resp := client.StartNotebookInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StartNotebookInstance +func (c *SageMaker) StartNotebookInstanceRequest(input *StartNotebookInstanceInput) (req *request.Request, output *StartNotebookInstanceOutput) { + op := &request.Operation{ + Name: opStartNotebookInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartNotebookInstanceInput{} + } + + output = &StartNotebookInstanceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// StartNotebookInstance API operation for Amazon SageMaker Service. +// +// Launches an ML compute instance with the latest version of the libraries +// and attaches your ML storage volume. After configuring the notebook instance, +// Amazon SageMaker sets the notebook instance status to InService. A notebook +// instance's status must be InService before you can connect to your Jupyter +// notebook. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation StartNotebookInstance for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" +// You have exceeded an Amazon SageMaker resource limit. For example, you might +// have too many training jobs created. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StartNotebookInstance +func (c *SageMaker) StartNotebookInstance(input *StartNotebookInstanceInput) (*StartNotebookInstanceOutput, error) { + req, out := c.StartNotebookInstanceRequest(input) + return out, req.Send() +} + +// StartNotebookInstanceWithContext is the same as StartNotebookInstance with the addition of +// the ability to pass a context and additional request options. +// +// See StartNotebookInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) StartNotebookInstanceWithContext(ctx aws.Context, input *StartNotebookInstanceInput, opts ...request.Option) (*StartNotebookInstanceOutput, error) { + req, out := c.StartNotebookInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopNotebookInstance = "StopNotebookInstance" + +// StopNotebookInstanceRequest generates a "aws/request.Request" representing the +// client's request for the StopNotebookInstance operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopNotebookInstance for more information on using the StopNotebookInstance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopNotebookInstanceRequest method. +// req, resp := client.StopNotebookInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopNotebookInstance +func (c *SageMaker) StopNotebookInstanceRequest(input *StopNotebookInstanceInput) (req *request.Request, output *StopNotebookInstanceOutput) { + op := &request.Operation{ + Name: opStopNotebookInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopNotebookInstanceInput{} + } + + output = &StopNotebookInstanceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// StopNotebookInstance API operation for Amazon SageMaker Service. +// +// Terminates the ML compute instance. Before terminating the instance, Amazon +// SageMaker disconnects the ML storage volume from it. Amazon SageMaker preserves +// the ML storage volume. +// +// To access data on the ML storage volume for a notebook instance that has +// been terminated, call the StartNotebookInstance API. StartNotebookInstance +// launches another ML compute instance, configures it, and attaches the preserved +// ML storage volume so you can continue your work. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation StopNotebookInstance for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopNotebookInstance +func (c *SageMaker) StopNotebookInstance(input *StopNotebookInstanceInput) (*StopNotebookInstanceOutput, error) { + req, out := c.StopNotebookInstanceRequest(input) + return out, req.Send() +} + +// StopNotebookInstanceWithContext is the same as StopNotebookInstance with the addition of +// the ability to pass a context and additional request options. +// +// See StopNotebookInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) StopNotebookInstanceWithContext(ctx aws.Context, input *StopNotebookInstanceInput, opts ...request.Option) (*StopNotebookInstanceOutput, error) { + req, out := c.StopNotebookInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopTrainingJob = "StopTrainingJob" + +// StopTrainingJobRequest generates a "aws/request.Request" representing the +// client's request for the StopTrainingJob operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopTrainingJob for more information on using the StopTrainingJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopTrainingJobRequest method. +// req, resp := client.StopTrainingJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopTrainingJob +func (c *SageMaker) StopTrainingJobRequest(input *StopTrainingJobInput) (req *request.Request, output *StopTrainingJobOutput) { + op := &request.Operation{ + Name: opStopTrainingJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopTrainingJobInput{} + } + + output = &StopTrainingJobOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// StopTrainingJob API operation for Amazon SageMaker Service. +// +// Stops a training job. To stop a job, Amazon SageMaker sends the algorithm +// the SIGTERM signal, which delays job termination for 120 seconds. Algorithms +// might use this 120-second window to save the model artifacts, so the results +// of the training is not lost. +// +// Training algorithms provided by Amazon SageMaker save the intermediate results +// of a model training job. This intermediate data is a valid model artifact. +// You can use the model artifacts that are saved when Amazon SageMaker stops +// a training job to create a model. +// +// When it receives a StopTrainingJob request, Amazon SageMaker changes the +// status of the job to Stopping. After Amazon SageMaker stops the job, it sets +// the status to Stopped. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation StopTrainingJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFound "ResourceNotFound" +// Resource being access is not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopTrainingJob +func (c *SageMaker) StopTrainingJob(input *StopTrainingJobInput) (*StopTrainingJobOutput, error) { + req, out := c.StopTrainingJobRequest(input) + return out, req.Send() +} + +// StopTrainingJobWithContext is the same as StopTrainingJob with the addition of +// the ability to pass a context and additional request options. +// +// See StopTrainingJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) StopTrainingJobWithContext(ctx aws.Context, input *StopTrainingJobInput, opts ...request.Option) (*StopTrainingJobOutput, error) { + req, out := c.StopTrainingJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateEndpoint = "UpdateEndpoint" + +// UpdateEndpointRequest generates a "aws/request.Request" representing the +// client's request for the UpdateEndpoint operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateEndpoint for more information on using the UpdateEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateEndpointRequest method. +// req, resp := client.UpdateEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpoint +func (c *SageMaker) UpdateEndpointRequest(input *UpdateEndpointInput) (req *request.Request, output *UpdateEndpointOutput) { + op := &request.Operation{ + Name: opUpdateEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateEndpointInput{} + } + + output = &UpdateEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateEndpoint API operation for Amazon SageMaker Service. +// +// Deploys the new EndpointConfig specified in the request, switches to using +// newly created endpoint, and then deletes resources provisioned for the endpoint +// using the previous EndpointConfig (there is no availability loss). +// +// When Amazon SageMaker receives the request, it sets the endpoint status to +// Updating. After updating the endpoint, it sets the status to InService. To +// check the status of an endpoint, use the DescribeEndpoint (http://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html) +// API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation UpdateEndpoint for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" +// You have exceeded an Amazon SageMaker resource limit. For example, you might +// have too many training jobs created. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpoint +func (c *SageMaker) UpdateEndpoint(input *UpdateEndpointInput) (*UpdateEndpointOutput, error) { + req, out := c.UpdateEndpointRequest(input) + return out, req.Send() +} + +// UpdateEndpointWithContext is the same as UpdateEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) UpdateEndpointWithContext(ctx aws.Context, input *UpdateEndpointInput, opts ...request.Option) (*UpdateEndpointOutput, error) { + req, out := c.UpdateEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateEndpointWeightsAndCapacities = "UpdateEndpointWeightsAndCapacities" + +// UpdateEndpointWeightsAndCapacitiesRequest generates a "aws/request.Request" representing the +// client's request for the UpdateEndpointWeightsAndCapacities operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateEndpointWeightsAndCapacities for more information on using the UpdateEndpointWeightsAndCapacities +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateEndpointWeightsAndCapacitiesRequest method. +// req, resp := client.UpdateEndpointWeightsAndCapacitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpointWeightsAndCapacities +func (c *SageMaker) UpdateEndpointWeightsAndCapacitiesRequest(input *UpdateEndpointWeightsAndCapacitiesInput) (req *request.Request, output *UpdateEndpointWeightsAndCapacitiesOutput) { + op := &request.Operation{ + Name: opUpdateEndpointWeightsAndCapacities, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateEndpointWeightsAndCapacitiesInput{} + } + + output = &UpdateEndpointWeightsAndCapacitiesOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateEndpointWeightsAndCapacities API operation for Amazon SageMaker Service. +// +// Updates variant weight, capacity, or both of one or more variants associated +// with an endpoint. This operation updates weight, capacity, or both for the +// previously provisioned endpoint. When it receives the request, Amazon SageMaker +// sets the endpoint status to Updating. After updating the endpoint, it sets +// the status to InService. To check the status of an endpoint, use the DescribeEndpoint +// (http://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html) +// API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation UpdateEndpointWeightsAndCapacities for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" +// You have exceeded an Amazon SageMaker resource limit. For example, you might +// have too many training jobs created. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpointWeightsAndCapacities +func (c *SageMaker) UpdateEndpointWeightsAndCapacities(input *UpdateEndpointWeightsAndCapacitiesInput) (*UpdateEndpointWeightsAndCapacitiesOutput, error) { + req, out := c.UpdateEndpointWeightsAndCapacitiesRequest(input) + return out, req.Send() +} + +// UpdateEndpointWeightsAndCapacitiesWithContext is the same as UpdateEndpointWeightsAndCapacities with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateEndpointWeightsAndCapacities for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) UpdateEndpointWeightsAndCapacitiesWithContext(ctx aws.Context, input *UpdateEndpointWeightsAndCapacitiesInput, opts ...request.Option) (*UpdateEndpointWeightsAndCapacitiesOutput, error) { + req, out := c.UpdateEndpointWeightsAndCapacitiesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateNotebookInstance = "UpdateNotebookInstance" + +// UpdateNotebookInstanceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateNotebookInstance operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateNotebookInstance for more information on using the UpdateNotebookInstance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateNotebookInstanceRequest method. +// req, resp := client.UpdateNotebookInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateNotebookInstance +func (c *SageMaker) UpdateNotebookInstanceRequest(input *UpdateNotebookInstanceInput) (req *request.Request, output *UpdateNotebookInstanceOutput) { + op := &request.Operation{ + Name: opUpdateNotebookInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateNotebookInstanceInput{} + } + + output = &UpdateNotebookInstanceOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateNotebookInstance API operation for Amazon SageMaker Service. +// +// Updates a notebook instance. NotebookInstance updates include upgrading or +// downgrading the ML compute instance used for your notebook instance to accommodate +// changes in your workload requirements. You can also update the VPC security +// groups. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation UpdateNotebookInstance for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" +// You have exceeded an Amazon SageMaker resource limit. For example, you might +// have too many training jobs created. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateNotebookInstance +func (c *SageMaker) UpdateNotebookInstance(input *UpdateNotebookInstanceInput) (*UpdateNotebookInstanceOutput, error) { + req, out := c.UpdateNotebookInstanceRequest(input) + return out, req.Send() +} + +// UpdateNotebookInstanceWithContext is the same as UpdateNotebookInstance with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateNotebookInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) UpdateNotebookInstanceWithContext(ctx aws.Context, input *UpdateNotebookInstanceInput, opts ...request.Option) (*UpdateNotebookInstanceOutput, error) { + req, out := c.UpdateNotebookInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AddTagsInput +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource that you want to tag. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` + + // An array of Tag objects. Each tag is a key-value pair. Only the key parameter + // is required. If you don't specify a value, Amazon SageMaker sets the value + // to an empty string. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *AddTagsInput) SetResourceArn(v string) *AddTagsInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AddTagsInput) SetTags(v []*Tag) *AddTagsInput { + s.Tags = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AddTagsOutput +type AddTagsOutput struct { + _ struct{} `type:"structure"` + + // A list of tags associated with the Amazon SageMaker resource. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *AddTagsOutput) SetTags(v []*Tag) *AddTagsOutput { + s.Tags = v + return s +} + +// Specifies the training algorithm to use in a CreateTrainingJob (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateTrainingJob.html) +// request. +// +// For more information about algorithms provided by Amazon SageMaker, see Algorithms +// (http://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). For information +// about using your own algorithms, see Bring Your Own Algorithms (http://docs.aws.amazon.com/sagemaker/latest/dg/adv-topics-own-algo.html). +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AlgorithmSpecification +type AlgorithmSpecification struct { + _ struct{} `type:"structure"` + + // The registry path of the Docker image that contains the training algorithm. + // For information about using your own algorithms, see Docker Registry Paths + // for Algorithms Provided by Amazon SageMaker (http://docs.aws.amazon.com/sagemaker/latest/dg/algos-docker-registry-paths.html). + // + // TrainingImage is a required field + TrainingImage *string `type:"string" required:"true"` + + // The input mode that the algorithm supports. For the input modes that Amazon + // SageMaker algorithms support, see Algorithms (http://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + // If an algorithm supports the File input mode, Amazon SageMaker downloads + // the training data from S3 to the provisioned ML storage Volume, and mounts + // the directory to docker volume for training container. If an algorithm supports + // the Pipe input mode, Amazon SageMaker streams data directly from S3 to the + // container. + // + // In File mode, make sure you provision ML storage volume with sufficient capacity + // to accomodate the data download from S3. In addition to the training data, + // the ML storage volume also stores the output model. The algorithm container + // use ML storage volume to also store intermediate information, if any. + // + // For distributed algorithms using File mode, training data is distributed + // uniformly, and your training duration is predictable if the input data objects + // size is approximately same. Amazon SageMaker does not split the files any + // further for model training. If the object sizes are skewed, training won't + // be optimal as the data distribution is also skewed where one host in a training + // cluster is overloaded, thus becoming bottleneck in training. + // + // TrainingInputMode is a required field + TrainingInputMode *string `type:"string" required:"true" enum:"TrainingInputMode"` +} + +// String returns the string representation +func (s AlgorithmSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AlgorithmSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AlgorithmSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AlgorithmSpecification"} + if s.TrainingImage == nil { + invalidParams.Add(request.NewErrParamRequired("TrainingImage")) + } + if s.TrainingInputMode == nil { + invalidParams.Add(request.NewErrParamRequired("TrainingInputMode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTrainingImage sets the TrainingImage field's value. +func (s *AlgorithmSpecification) SetTrainingImage(v string) *AlgorithmSpecification { + s.TrainingImage = &v + return s +} + +// SetTrainingInputMode sets the TrainingInputMode field's value. +func (s *AlgorithmSpecification) SetTrainingInputMode(v string) *AlgorithmSpecification { + s.TrainingInputMode = &v + return s +} + +// A channel is a named input source that training algorithms can consume. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/Channel +type Channel struct { + _ struct{} `type:"structure"` + + // The name of the channel. + // + // ChannelName is a required field + ChannelName *string `min:"1" type:"string" required:"true"` + + // If training data is compressed, the compression type. The default value is + // None. CompressionType is used only in PIPE input mode. In FILE mode, leave + // this field unset or set it to None. + CompressionType *string `type:"string" enum:"CompressionType"` + + // The MIME type of the data. + ContentType *string `type:"string"` + + // The location of the channel data. + // + // DataSource is a required field + DataSource *DataSource `type:"structure" required:"true"` + + // Specify RecordIO as the value when input data is in raw format but the training + // algorithm requires the RecordIO format, in which caseAmazon SageMaker wraps + // each individual S3 object in a RecordIO record. If the input data is already + // in RecordIO format, you don't need to set this attribute. For more information, + // see Create a Dataset Using RecordIO (https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) + RecordWrapperType *string `type:"string" enum:"RecordWrapper"` +} + +// String returns the string representation +func (s Channel) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Channel) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Channel) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Channel"} + if s.ChannelName == nil { + invalidParams.Add(request.NewErrParamRequired("ChannelName")) + } + if s.ChannelName != nil && len(*s.ChannelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChannelName", 1)) + } + if s.DataSource == nil { + invalidParams.Add(request.NewErrParamRequired("DataSource")) + } + if s.DataSource != nil { + if err := s.DataSource.Validate(); err != nil { + invalidParams.AddNested("DataSource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChannelName sets the ChannelName field's value. +func (s *Channel) SetChannelName(v string) *Channel { + s.ChannelName = &v + return s +} + +// SetCompressionType sets the CompressionType field's value. +func (s *Channel) SetCompressionType(v string) *Channel { + s.CompressionType = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *Channel) SetContentType(v string) *Channel { + s.ContentType = &v + return s +} + +// SetDataSource sets the DataSource field's value. +func (s *Channel) SetDataSource(v *DataSource) *Channel { + s.DataSource = v + return s +} + +// SetRecordWrapperType sets the RecordWrapperType field's value. +func (s *Channel) SetRecordWrapperType(v string) *Channel { + s.RecordWrapperType = &v + return s +} + +// Describes the container, as part of model definition. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ContainerDefinition +type ContainerDefinition struct { + _ struct{} `type:"structure"` + + // The DNS host name for the container after Amazon SageMaker deploys it. + ContainerHostname *string `type:"string"` + + // The environment variables to set in the Docker container. Each key and value + // in the Environment string to string map can have length of up to 1024. We + // support up to 16 entries in the map. + Environment map[string]*string `type:"map"` + + // The Amazon EC2 Container Registry (Amazon ECR) path where inference code + // is stored. If you are using your own custom algorithm instead of an algorithm + // provided by Amazon SageMaker, the inference code must meet Amazon SageMaker + // requirements. For more information, see Using Your Own Algorithms with Amazon + // SageMaker (http://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html) + // + // Image is a required field + Image *string `type:"string" required:"true"` + + // The S3 path where the model artifacts, which result from model training, + // are stored. This path must point to a single gzip compressed tar archive + // (.tar.gz suffix). + ModelDataUrl *string `type:"string"` +} + +// String returns the string representation +func (s ContainerDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerDefinition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContainerDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContainerDefinition"} + if s.Image == nil { + invalidParams.Add(request.NewErrParamRequired("Image")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContainerHostname sets the ContainerHostname field's value. +func (s *ContainerDefinition) SetContainerHostname(v string) *ContainerDefinition { + s.ContainerHostname = &v + return s +} + +// SetEnvironment sets the Environment field's value. +func (s *ContainerDefinition) SetEnvironment(v map[string]*string) *ContainerDefinition { + s.Environment = v + return s +} + +// SetImage sets the Image field's value. +func (s *ContainerDefinition) SetImage(v string) *ContainerDefinition { + s.Image = &v + return s +} + +// SetModelDataUrl sets the ModelDataUrl field's value. +func (s *ContainerDefinition) SetModelDataUrl(v string) *ContainerDefinition { + s.ModelDataUrl = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpointConfigInput +type CreateEndpointConfigInput struct { + _ struct{} `type:"structure"` + + // The name of the endpoint configuration. You specify this name in a CreateEndpoint + // (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html) + // request. + // + // EndpointConfigName is a required field + EndpointConfigName *string `type:"string" required:"true"` + + // An array of ProductionVariant objects, one for each model that you want to + // host at this endpoint. + // + // ProductionVariants is a required field + ProductionVariants []*ProductionVariant `min:"1" type:"list" required:"true"` + + // An array of key-value pairs. For more information, see Using Cost Allocation + // Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) + // in the AWS Billing and Cost Management User Guide. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateEndpointConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEndpointConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateEndpointConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateEndpointConfigInput"} + if s.EndpointConfigName == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointConfigName")) + } + if s.ProductionVariants == nil { + invalidParams.Add(request.NewErrParamRequired("ProductionVariants")) + } + if s.ProductionVariants != nil && len(s.ProductionVariants) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProductionVariants", 1)) + } + if s.ProductionVariants != nil { + for i, v := range s.ProductionVariants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ProductionVariants", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndpointConfigName sets the EndpointConfigName field's value. +func (s *CreateEndpointConfigInput) SetEndpointConfigName(v string) *CreateEndpointConfigInput { + s.EndpointConfigName = &v + return s +} + +// SetProductionVariants sets the ProductionVariants field's value. +func (s *CreateEndpointConfigInput) SetProductionVariants(v []*ProductionVariant) *CreateEndpointConfigInput { + s.ProductionVariants = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateEndpointConfigInput) SetTags(v []*Tag) *CreateEndpointConfigInput { + s.Tags = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpointConfigOutput +type CreateEndpointConfigOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the endpoint configuration. + // + // EndpointConfigArn is a required field + EndpointConfigArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateEndpointConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEndpointConfigOutput) GoString() string { + return s.String() +} + +// SetEndpointConfigArn sets the EndpointConfigArn field's value. +func (s *CreateEndpointConfigOutput) SetEndpointConfigArn(v string) *CreateEndpointConfigOutput { + s.EndpointConfigArn = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpointInput +type CreateEndpointInput struct { + _ struct{} `type:"structure"` + + // The name of an endpoint configuration. For more information, see CreateEndpointConfig + // (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpointConfig.html). + // + // EndpointConfigName is a required field + EndpointConfigName *string `type:"string" required:"true"` + + // The name of the endpoint. The name must be unique within an AWS Region in + // your AWS account. + // + // EndpointName is a required field + EndpointName *string `type:"string" required:"true"` + + // An array of key-value pairs. For more information, see Using Cost Allocation + // Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what)in + // the AWS Billing and Cost Management User Guide. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateEndpointInput"} + if s.EndpointConfigName == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointConfigName")) + } + if s.EndpointName == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointName")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndpointConfigName sets the EndpointConfigName field's value. +func (s *CreateEndpointInput) SetEndpointConfigName(v string) *CreateEndpointInput { + s.EndpointConfigName = &v + return s +} + +// SetEndpointName sets the EndpointName field's value. +func (s *CreateEndpointInput) SetEndpointName(v string) *CreateEndpointInput { + s.EndpointName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateEndpointInput) SetTags(v []*Tag) *CreateEndpointInput { + s.Tags = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpointOutput +type CreateEndpointOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the endpoint. + // + // EndpointArn is a required field + EndpointArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEndpointOutput) GoString() string { + return s.String() +} + +// SetEndpointArn sets the EndpointArn field's value. +func (s *CreateEndpointOutput) SetEndpointArn(v string) *CreateEndpointOutput { + s.EndpointArn = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateModelInput +type CreateModelInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can + // assume to access model artifacts and docker image for deployment on ML compute + // instances. Deploying on ML compute instances is part of model hosting. For + // more information, see Amazon SageMaker Roles (http://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). + // + // ExecutionRoleArn is a required field + ExecutionRoleArn *string `min:"20" type:"string" required:"true"` + + // The name of the new model. + // + // ModelName is a required field + ModelName *string `type:"string" required:"true"` + + // The location of the primary docker image containing inference code, associated + // artifacts, and custom environment map that the inference code uses when the + // model is deployed into production. + // + // PrimaryContainer is a required field + PrimaryContainer *ContainerDefinition `type:"structure" required:"true"` + + // An array of key-value pairs. For more information, see Using Cost Allocation + // Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) + // in the AWS Billing and Cost Management User Guide. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateModelInput"} + if s.ExecutionRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("ExecutionRoleArn")) + } + if s.ExecutionRoleArn != nil && len(*s.ExecutionRoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ExecutionRoleArn", 20)) + } + if s.ModelName == nil { + invalidParams.Add(request.NewErrParamRequired("ModelName")) + } + if s.PrimaryContainer == nil { + invalidParams.Add(request.NewErrParamRequired("PrimaryContainer")) + } + if s.PrimaryContainer != nil { + if err := s.PrimaryContainer.Validate(); err != nil { + invalidParams.AddNested("PrimaryContainer", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *CreateModelInput) SetExecutionRoleArn(v string) *CreateModelInput { + s.ExecutionRoleArn = &v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *CreateModelInput) SetModelName(v string) *CreateModelInput { + s.ModelName = &v + return s +} + +// SetPrimaryContainer sets the PrimaryContainer field's value. +func (s *CreateModelInput) SetPrimaryContainer(v *ContainerDefinition) *CreateModelInput { + s.PrimaryContainer = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateModelInput) SetTags(v []*Tag) *CreateModelInput { + s.Tags = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateModelOutput +type CreateModelOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the model created in Amazon SageMaker. + // + // ModelArn is a required field + ModelArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateModelOutput) GoString() string { + return s.String() +} + +// SetModelArn sets the ModelArn field's value. +func (s *CreateModelOutput) SetModelArn(v string) *CreateModelOutput { + s.ModelArn = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateNotebookInstanceInput +type CreateNotebookInstanceInput struct { + _ struct{} `type:"structure"` + + // The type of ML compute instance to launch for the notebook instance. + // + // InstanceType is a required field + InstanceType *string `type:"string" required:"true" enum:"InstanceType"` + + // If you provide a AWS KMS key ID, Amazon SageMaker uses it to encrypt data + // at rest on the ML storage volume that is attached to your notebook instance. + KmsKeyId *string `type:"string"` + + // The name of the new notebook instance. + // + // NotebookInstanceName is a required field + NotebookInstanceName *string `type:"string" required:"true"` + + // When you send any requests to AWS resources from the notebook instance, Amazon + // SageMaker assumes this role to perform tasks on your behalf. You must grant + // this role necessary permissions so Amazon SageMaker can perform these tasks. + // The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) + // permissions to assume this role. For more information, see Amazon SageMaker + // Roles (http://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The VPC security group IDs, in the form sg-xxxxxxxx. The security groups + // must be for the same VPC as specified in the subnet. + SecurityGroupIds []*string `type:"list"` + + // The ID of the subnet in a VPC to which you would like to have a connectivity + // from your ML compute instance. + SubnetId *string `type:"string"` + + // A list of tags to associate with the notebook instance. You can add tags + // later by using the CreateTags API. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateNotebookInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNotebookInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNotebookInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNotebookInstanceInput"} + if s.InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceType")) + } + if s.NotebookInstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookInstanceName")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceType sets the InstanceType field's value. +func (s *CreateNotebookInstanceInput) SetInstanceType(v string) *CreateNotebookInstanceInput { + s.InstanceType = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CreateNotebookInstanceInput) SetKmsKeyId(v string) *CreateNotebookInstanceInput { + s.KmsKeyId = &v + return s +} + +// SetNotebookInstanceName sets the NotebookInstanceName field's value. +func (s *CreateNotebookInstanceInput) SetNotebookInstanceName(v string) *CreateNotebookInstanceInput { + s.NotebookInstanceName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateNotebookInstanceInput) SetRoleArn(v string) *CreateNotebookInstanceInput { + s.RoleArn = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *CreateNotebookInstanceInput) SetSecurityGroupIds(v []*string) *CreateNotebookInstanceInput { + s.SecurityGroupIds = v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *CreateNotebookInstanceInput) SetSubnetId(v string) *CreateNotebookInstanceInput { + s.SubnetId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateNotebookInstanceInput) SetTags(v []*Tag) *CreateNotebookInstanceInput { + s.Tags = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateNotebookInstanceOutput +type CreateNotebookInstanceOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the notebook instance. + NotebookInstanceArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateNotebookInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNotebookInstanceOutput) GoString() string { + return s.String() +} + +// SetNotebookInstanceArn sets the NotebookInstanceArn field's value. +func (s *CreateNotebookInstanceOutput) SetNotebookInstanceArn(v string) *CreateNotebookInstanceOutput { + s.NotebookInstanceArn = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreatePresignedNotebookInstanceUrlInput +type CreatePresignedNotebookInstanceUrlInput struct { + _ struct{} `type:"structure"` + + // The name of the notebook instance. + // + // NotebookInstanceName is a required field + NotebookInstanceName *string `type:"string" required:"true"` + + // The duration of the session, in seconds. The default is 12 hours. + SessionExpirationDurationInSeconds *int64 `min:"1800" type:"integer"` +} + +// String returns the string representation +func (s CreatePresignedNotebookInstanceUrlInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePresignedNotebookInstanceUrlInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePresignedNotebookInstanceUrlInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePresignedNotebookInstanceUrlInput"} + if s.NotebookInstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookInstanceName")) + } + if s.SessionExpirationDurationInSeconds != nil && *s.SessionExpirationDurationInSeconds < 1800 { + invalidParams.Add(request.NewErrParamMinValue("SessionExpirationDurationInSeconds", 1800)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNotebookInstanceName sets the NotebookInstanceName field's value. +func (s *CreatePresignedNotebookInstanceUrlInput) SetNotebookInstanceName(v string) *CreatePresignedNotebookInstanceUrlInput { + s.NotebookInstanceName = &v + return s +} + +// SetSessionExpirationDurationInSeconds sets the SessionExpirationDurationInSeconds field's value. +func (s *CreatePresignedNotebookInstanceUrlInput) SetSessionExpirationDurationInSeconds(v int64) *CreatePresignedNotebookInstanceUrlInput { + s.SessionExpirationDurationInSeconds = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreatePresignedNotebookInstanceUrlOutput +type CreatePresignedNotebookInstanceUrlOutput struct { + _ struct{} `type:"structure"` + + // A JSON object that contains the URL string. + AuthorizedUrl *string `type:"string"` +} + +// String returns the string representation +func (s CreatePresignedNotebookInstanceUrlOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePresignedNotebookInstanceUrlOutput) GoString() string { + return s.String() +} + +// SetAuthorizedUrl sets the AuthorizedUrl field's value. +func (s *CreatePresignedNotebookInstanceUrlOutput) SetAuthorizedUrl(v string) *CreatePresignedNotebookInstanceUrlOutput { + s.AuthorizedUrl = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateTrainingJobRequest +type CreateTrainingJobInput struct { + _ struct{} `type:"structure"` + + // The registry path of the Docker image that contains the training algorithm + // and algorithm-specific metadata, including the input mode. For more information + // about algorithms provided by Amazon SageMaker, see Algorithms (http://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + // For information about providing your own algorithms, see Bring Your Own Algorithms + // (http://docs.aws.amazon.com/sagemaker/latest/dg/adv-topics-own-algo.html). + // + // AlgorithmSpecification is a required field + AlgorithmSpecification *AlgorithmSpecification `type:"structure" required:"true"` + + // Algorithm-specific parameters. You set hyperparameters before you start the + // learning process. Hyperparameters influence the quality of the model. For + // a list of hyperparameters for each training algorithm provided by Amazon + // SageMaker, see Algorithms (http://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + // + // You can specify a maximum of 100 hyperparameters. Each hyperparameter is + // a key-value pair. Each key and value is limited to 256 characters, as specified + // by the Length Constraint. + HyperParameters map[string]*string `type:"map"` + + // An array of Channel objects. Each channel is a named input source. InputDataConfig + // describes the input data and its location. + // + // Algorithms can accept input data from one or more channels. For example, + // an algorithm might have two channels of input data, training_data and validation_data. + // The configuration for each channel provides the S3 location where the input + // data is stored. It also provides information about the stored data: the MIME + // type, compression method, and whether the data is wrapped in RecordIO format. + // + // Depending on the input mode that the algorithm supports, Amazon SageMaker + // either copies input data files from an S3 bucket to a local directory in + // the Docker container, or makes it available as input streams. + // + // InputDataConfig is a required field + InputDataConfig []*Channel `min:"1" type:"list" required:"true"` + + // Specifies the path to the S3 bucket where you want to store model artifacts. + // Amazon SageMaker creates subfolders for the artifacts. + // + // OutputDataConfig is a required field + OutputDataConfig *OutputDataConfig `type:"structure" required:"true"` + + // The resources, including the ML compute instances and ML storage volumes, + // to use for model training. + // + // ML storage volumes store model artifacts and incremental states. Training + // algorithms might also use ML storage volumes for scratch space. If you want + // Amazon SageMaker to use the ML storage volume to store the training data, + // choose File as the TrainingInputMode in the algorithm specification. For + // distributed training algorithms, specify an instance count greater than 1. + // + // ResourceConfig is a required field + ResourceConfig *ResourceConfig `type:"structure" required:"true"` + + // The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume + // to perform tasks on your behalf. + // + // During model training, Amazon SageMaker needs your permission to read input + // data from an S3 bucket, download a Docker image that contains training code, + // write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, + // and publish metrics to Amazon CloudWatch. You grant permissions for all of + // these tasks to an IAM role. For more information, see Amazon SageMaker Roles + // (http://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // Sets a duration for training. Use this parameter to cap model training costs. + // To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which + // delays job termination for 120 seconds. Algorithms might use this 120-second + // window to save the model artifacts. + // + // When Amazon SageMaker terminates a job because the stopping condition has + // been met, training algorithms provided by Amazon SageMaker save the intermediate + // results of the job. This intermediate data is a valid model artifact. You + // can use it to create a model using the CreateModel API. + // + // StoppingCondition is a required field + StoppingCondition *StoppingCondition `type:"structure" required:"true"` + + // An array of key-value pairs. For more information, see Using Cost Allocation + // Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) + // in the AWS Billing and Cost Management User Guide. + Tags []*Tag `type:"list"` + + // The name of the training job. The name must be unique within an AWS Region + // in an AWS account. It appears in the Amazon SageMaker console. + // + // TrainingJobName is a required field + TrainingJobName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTrainingJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrainingJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTrainingJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTrainingJobInput"} + if s.AlgorithmSpecification == nil { + invalidParams.Add(request.NewErrParamRequired("AlgorithmSpecification")) + } + if s.InputDataConfig == nil { + invalidParams.Add(request.NewErrParamRequired("InputDataConfig")) + } + if s.InputDataConfig != nil && len(s.InputDataConfig) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputDataConfig", 1)) + } + if s.OutputDataConfig == nil { + invalidParams.Add(request.NewErrParamRequired("OutputDataConfig")) + } + if s.ResourceConfig == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceConfig")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.StoppingCondition == nil { + invalidParams.Add(request.NewErrParamRequired("StoppingCondition")) + } + if s.TrainingJobName == nil { + invalidParams.Add(request.NewErrParamRequired("TrainingJobName")) + } + if s.TrainingJobName != nil && len(*s.TrainingJobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TrainingJobName", 1)) + } + if s.AlgorithmSpecification != nil { + if err := s.AlgorithmSpecification.Validate(); err != nil { + invalidParams.AddNested("AlgorithmSpecification", err.(request.ErrInvalidParams)) + } + } + if s.InputDataConfig != nil { + for i, v := range s.InputDataConfig { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InputDataConfig", i), err.(request.ErrInvalidParams)) + } + } + } + if s.OutputDataConfig != nil { + if err := s.OutputDataConfig.Validate(); err != nil { + invalidParams.AddNested("OutputDataConfig", err.(request.ErrInvalidParams)) + } + } + if s.ResourceConfig != nil { + if err := s.ResourceConfig.Validate(); err != nil { + invalidParams.AddNested("ResourceConfig", err.(request.ErrInvalidParams)) + } + } + if s.StoppingCondition != nil { + if err := s.StoppingCondition.Validate(); err != nil { + invalidParams.AddNested("StoppingCondition", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAlgorithmSpecification sets the AlgorithmSpecification field's value. +func (s *CreateTrainingJobInput) SetAlgorithmSpecification(v *AlgorithmSpecification) *CreateTrainingJobInput { + s.AlgorithmSpecification = v + return s +} + +// SetHyperParameters sets the HyperParameters field's value. +func (s *CreateTrainingJobInput) SetHyperParameters(v map[string]*string) *CreateTrainingJobInput { + s.HyperParameters = v + return s +} + +// SetInputDataConfig sets the InputDataConfig field's value. +func (s *CreateTrainingJobInput) SetInputDataConfig(v []*Channel) *CreateTrainingJobInput { + s.InputDataConfig = v + return s +} + +// SetOutputDataConfig sets the OutputDataConfig field's value. +func (s *CreateTrainingJobInput) SetOutputDataConfig(v *OutputDataConfig) *CreateTrainingJobInput { + s.OutputDataConfig = v + return s +} + +// SetResourceConfig sets the ResourceConfig field's value. +func (s *CreateTrainingJobInput) SetResourceConfig(v *ResourceConfig) *CreateTrainingJobInput { + s.ResourceConfig = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateTrainingJobInput) SetRoleArn(v string) *CreateTrainingJobInput { + s.RoleArn = &v + return s +} + +// SetStoppingCondition sets the StoppingCondition field's value. +func (s *CreateTrainingJobInput) SetStoppingCondition(v *StoppingCondition) *CreateTrainingJobInput { + s.StoppingCondition = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateTrainingJobInput) SetTags(v []*Tag) *CreateTrainingJobInput { + s.Tags = v + return s +} + +// SetTrainingJobName sets the TrainingJobName field's value. +func (s *CreateTrainingJobInput) SetTrainingJobName(v string) *CreateTrainingJobInput { + s.TrainingJobName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateTrainingJobResponse +type CreateTrainingJobOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the training job. + // + // TrainingJobArn is a required field + TrainingJobArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTrainingJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrainingJobOutput) GoString() string { + return s.String() +} + +// SetTrainingJobArn sets the TrainingJobArn field's value. +func (s *CreateTrainingJobOutput) SetTrainingJobArn(v string) *CreateTrainingJobOutput { + s.TrainingJobArn = &v + return s +} + +// Describes the location of the channel data. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DataSource +type DataSource struct { + _ struct{} `type:"structure"` + + // The S3 location of the data source that is associated with a channel. + // + // S3DataSource is a required field + S3DataSource *S3DataSource `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DataSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DataSource"} + if s.S3DataSource == nil { + invalidParams.Add(request.NewErrParamRequired("S3DataSource")) + } + if s.S3DataSource != nil { + if err := s.S3DataSource.Validate(); err != nil { + invalidParams.AddNested("S3DataSource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3DataSource sets the S3DataSource field's value. +func (s *DataSource) SetS3DataSource(v *S3DataSource) *DataSource { + s.S3DataSource = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpointConfigInput +type DeleteEndpointConfigInput struct { + _ struct{} `type:"structure"` + + // The name of the endpoint configuration that you want to delete. + // + // EndpointConfigName is a required field + EndpointConfigName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEndpointConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEndpointConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEndpointConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEndpointConfigInput"} + if s.EndpointConfigName == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointConfigName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndpointConfigName sets the EndpointConfigName field's value. +func (s *DeleteEndpointConfigInput) SetEndpointConfigName(v string) *DeleteEndpointConfigInput { + s.EndpointConfigName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpointConfigOutput +type DeleteEndpointConfigOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEndpointConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEndpointConfigOutput) GoString() string { + return s.String() +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpointInput +type DeleteEndpointInput struct { + _ struct{} `type:"structure"` + + // The name of the endpoint that you want to delete. + // + // EndpointName is a required field + EndpointName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEndpointInput"} + if s.EndpointName == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndpointName sets the EndpointName field's value. +func (s *DeleteEndpointInput) SetEndpointName(v string) *DeleteEndpointInput { + s.EndpointName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpointOutput +type DeleteEndpointOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEndpointOutput) GoString() string { + return s.String() +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteModelInput +type DeleteModelInput struct { + _ struct{} `type:"structure"` + + // The name of the model to delete. + // + // ModelName is a required field + ModelName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteModelInput"} + if s.ModelName == nil { + invalidParams.Add(request.NewErrParamRequired("ModelName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetModelName sets the ModelName field's value. +func (s *DeleteModelInput) SetModelName(v string) *DeleteModelInput { + s.ModelName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteModelOutput +type DeleteModelOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteModelOutput) GoString() string { + return s.String() +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteNotebookInstanceInput +type DeleteNotebookInstanceInput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon SageMaker notebook instance to delete. + // + // NotebookInstanceName is a required field + NotebookInstanceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNotebookInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNotebookInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNotebookInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNotebookInstanceInput"} + if s.NotebookInstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookInstanceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNotebookInstanceName sets the NotebookInstanceName field's value. +func (s *DeleteNotebookInstanceInput) SetNotebookInstanceName(v string) *DeleteNotebookInstanceInput { + s.NotebookInstanceName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteNotebookInstanceOutput +type DeleteNotebookInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNotebookInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNotebookInstanceOutput) GoString() string { + return s.String() +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteTagsInput +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource whose tags you want to delete. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` + + // An array or one or more tag keys to delete. + // + // TagKeys is a required field + TagKeys []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTagsInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *DeleteTagsInput) SetResourceArn(v string) *DeleteTagsInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *DeleteTagsInput) SetTagKeys(v []*string) *DeleteTagsInput { + s.TagKeys = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteTagsOutput +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpointConfigInput +type DescribeEndpointConfigInput struct { + _ struct{} `type:"structure"` + + // The name of the endpoint configuration. + // + // EndpointConfigName is a required field + EndpointConfigName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeEndpointConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEndpointConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEndpointConfigInput"} + if s.EndpointConfigName == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointConfigName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndpointConfigName sets the EndpointConfigName field's value. +func (s *DescribeEndpointConfigInput) SetEndpointConfigName(v string) *DescribeEndpointConfigInput { + s.EndpointConfigName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpointConfigOutput +type DescribeEndpointConfigOutput struct { + _ struct{} `type:"structure"` + + // A timestamp that shows when the endpoint configuration was created. + // + // CreationTime is a required field + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The Amazon Resource Name (ARN) of the endpoint configuration. + // + // EndpointConfigArn is a required field + EndpointConfigArn *string `min:"20" type:"string" required:"true"` + + // Name of the Amazon SageMaker endpoint configuration. + // + // EndpointConfigName is a required field + EndpointConfigName *string `type:"string" required:"true"` + + // An array of ProductionVariant objects, one for each model that you want to + // host at this endpoint. + // + // ProductionVariants is a required field + ProductionVariants []*ProductionVariant `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeEndpointConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointConfigOutput) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeEndpointConfigOutput) SetCreationTime(v time.Time) *DescribeEndpointConfigOutput { + s.CreationTime = &v + return s +} + +// SetEndpointConfigArn sets the EndpointConfigArn field's value. +func (s *DescribeEndpointConfigOutput) SetEndpointConfigArn(v string) *DescribeEndpointConfigOutput { + s.EndpointConfigArn = &v + return s +} + +// SetEndpointConfigName sets the EndpointConfigName field's value. +func (s *DescribeEndpointConfigOutput) SetEndpointConfigName(v string) *DescribeEndpointConfigOutput { + s.EndpointConfigName = &v + return s +} + +// SetProductionVariants sets the ProductionVariants field's value. +func (s *DescribeEndpointConfigOutput) SetProductionVariants(v []*ProductionVariant) *DescribeEndpointConfigOutput { + s.ProductionVariants = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpointInput +type DescribeEndpointInput struct { + _ struct{} `type:"structure"` + + // The name of the endpoint. + // + // EndpointName is a required field + EndpointName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEndpointInput"} + if s.EndpointName == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndpointName sets the EndpointName field's value. +func (s *DescribeEndpointInput) SetEndpointName(v string) *DescribeEndpointInput { + s.EndpointName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpointOutput +type DescribeEndpointOutput struct { + _ struct{} `type:"structure"` + + // A timestamp that shows when the endpoint was created. + // + // CreationTime is a required field + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The Amazon Resource Name (ARN) of the endpoint. + // + // EndpointArn is a required field + EndpointArn *string `min:"20" type:"string" required:"true"` + + // The name of the endpoint configuration associated with this endpoint. + // + // EndpointConfigName is a required field + EndpointConfigName *string `type:"string" required:"true"` + + // Name of the endpoint. + // + // EndpointName is a required field + EndpointName *string `type:"string" required:"true"` + + // The status of the endpoint. + // + // EndpointStatus is a required field + EndpointStatus *string `type:"string" required:"true" enum:"EndpointStatus"` + + // If the status of the endpoint is Failed, the reason why it failed. + FailureReason *string `type:"string"` + + // A timestamp that shows when the endpoint was last modified. + // + // LastModifiedTime is a required field + LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // An array of ProductionVariant objects, one for each model hosted behind this + // endpoint. + ProductionVariants []*ProductionVariantSummary `min:"1" type:"list"` +} + +// String returns the string representation +func (s DescribeEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointOutput) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeEndpointOutput) SetCreationTime(v time.Time) *DescribeEndpointOutput { + s.CreationTime = &v + return s +} + +// SetEndpointArn sets the EndpointArn field's value. +func (s *DescribeEndpointOutput) SetEndpointArn(v string) *DescribeEndpointOutput { + s.EndpointArn = &v + return s +} + +// SetEndpointConfigName sets the EndpointConfigName field's value. +func (s *DescribeEndpointOutput) SetEndpointConfigName(v string) *DescribeEndpointOutput { + s.EndpointConfigName = &v + return s +} + +// SetEndpointName sets the EndpointName field's value. +func (s *DescribeEndpointOutput) SetEndpointName(v string) *DescribeEndpointOutput { + s.EndpointName = &v + return s +} + +// SetEndpointStatus sets the EndpointStatus field's value. +func (s *DescribeEndpointOutput) SetEndpointStatus(v string) *DescribeEndpointOutput { + s.EndpointStatus = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *DescribeEndpointOutput) SetFailureReason(v string) *DescribeEndpointOutput { + s.FailureReason = &v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *DescribeEndpointOutput) SetLastModifiedTime(v time.Time) *DescribeEndpointOutput { + s.LastModifiedTime = &v + return s +} + +// SetProductionVariants sets the ProductionVariants field's value. +func (s *DescribeEndpointOutput) SetProductionVariants(v []*ProductionVariantSummary) *DescribeEndpointOutput { + s.ProductionVariants = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeModelInput +type DescribeModelInput struct { + _ struct{} `type:"structure"` + + // The name of the model. + // + // ModelName is a required field + ModelName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeModelInput"} + if s.ModelName == nil { + invalidParams.Add(request.NewErrParamRequired("ModelName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetModelName sets the ModelName field's value. +func (s *DescribeModelInput) SetModelName(v string) *DescribeModelInput { + s.ModelName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeModelOutput +type DescribeModelOutput struct { + _ struct{} `type:"structure"` + + // A timestamp that shows when the model was created. + // + // CreationTime is a required field + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM role that you specified for the + // model. + // + // ExecutionRoleArn is a required field + ExecutionRoleArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the model. + // + // ModelArn is a required field + ModelArn *string `min:"20" type:"string" required:"true"` + + // Name of the Amazon SageMaker model. + // + // ModelName is a required field + ModelName *string `type:"string" required:"true"` + + // The location of the primary inference code, associated artifacts, and custom + // environment map that the inference code uses when it is deployed in production. + // + // PrimaryContainer is a required field + PrimaryContainer *ContainerDefinition `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeModelOutput) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeModelOutput) SetCreationTime(v time.Time) *DescribeModelOutput { + s.CreationTime = &v + return s +} + +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *DescribeModelOutput) SetExecutionRoleArn(v string) *DescribeModelOutput { + s.ExecutionRoleArn = &v + return s +} + +// SetModelArn sets the ModelArn field's value. +func (s *DescribeModelOutput) SetModelArn(v string) *DescribeModelOutput { + s.ModelArn = &v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *DescribeModelOutput) SetModelName(v string) *DescribeModelOutput { + s.ModelName = &v + return s +} + +// SetPrimaryContainer sets the PrimaryContainer field's value. +func (s *DescribeModelOutput) SetPrimaryContainer(v *ContainerDefinition) *DescribeModelOutput { + s.PrimaryContainer = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeNotebookInstanceInput +type DescribeNotebookInstanceInput struct { + _ struct{} `type:"structure"` + + // The name of the notebook instance that you want information about. + // + // NotebookInstanceName is a required field + NotebookInstanceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeNotebookInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNotebookInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeNotebookInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeNotebookInstanceInput"} + if s.NotebookInstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookInstanceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNotebookInstanceName sets the NotebookInstanceName field's value. +func (s *DescribeNotebookInstanceInput) SetNotebookInstanceName(v string) *DescribeNotebookInstanceInput { + s.NotebookInstanceName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeNotebookInstanceOutput +type DescribeNotebookInstanceOutput struct { + _ struct{} `type:"structure"` + + // A timestamp. Use this parameter to return the time when the notebook instance + // was created + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // If staus is failed, the reason it failed. + FailureReason *string `type:"string"` + + // The type of ML compute instance running on the notebook instance. + InstanceType *string `type:"string" enum:"InstanceType"` + + // AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it on the + // ML storage volume attached to the instance. + KmsKeyId *string `type:"string"` + + // A timestamp. Use this parameter to retrieve the time when the notebook instance + // was last modified. + LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Network interface IDs that Amazon SageMaker created at the time of creating + // the instance. + NetworkInterfaceId *string `type:"string"` + + // The Amazon Resource Name (ARN) of the notebook instance. + NotebookInstanceArn *string `type:"string"` + + // Name of the Amazon SageMaker notebook instance. + NotebookInstanceName *string `type:"string"` + + // The status of the notebook instance. + NotebookInstanceStatus *string `type:"string" enum:"NotebookInstanceStatus"` + + // Amazon Resource Name (ARN) of the IAM role associated with the instance. + RoleArn *string `min:"20" type:"string"` + + // The IDs of the VPC security groups. + SecurityGroups []*string `type:"list"` + + // The ID of the VPC subnet. + SubnetId *string `type:"string"` + + // The URL that you use to connect to the Jupyter notebook that is running in + // your notebook instance. + Url *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNotebookInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNotebookInstanceOutput) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeNotebookInstanceOutput) SetCreationTime(v time.Time) *DescribeNotebookInstanceOutput { + s.CreationTime = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *DescribeNotebookInstanceOutput) SetFailureReason(v string) *DescribeNotebookInstanceOutput { + s.FailureReason = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *DescribeNotebookInstanceOutput) SetInstanceType(v string) *DescribeNotebookInstanceOutput { + s.InstanceType = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *DescribeNotebookInstanceOutput) SetKmsKeyId(v string) *DescribeNotebookInstanceOutput { + s.KmsKeyId = &v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *DescribeNotebookInstanceOutput) SetLastModifiedTime(v time.Time) *DescribeNotebookInstanceOutput { + s.LastModifiedTime = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *DescribeNotebookInstanceOutput) SetNetworkInterfaceId(v string) *DescribeNotebookInstanceOutput { + s.NetworkInterfaceId = &v + return s +} + +// SetNotebookInstanceArn sets the NotebookInstanceArn field's value. +func (s *DescribeNotebookInstanceOutput) SetNotebookInstanceArn(v string) *DescribeNotebookInstanceOutput { + s.NotebookInstanceArn = &v + return s +} + +// SetNotebookInstanceName sets the NotebookInstanceName field's value. +func (s *DescribeNotebookInstanceOutput) SetNotebookInstanceName(v string) *DescribeNotebookInstanceOutput { + s.NotebookInstanceName = &v + return s +} + +// SetNotebookInstanceStatus sets the NotebookInstanceStatus field's value. +func (s *DescribeNotebookInstanceOutput) SetNotebookInstanceStatus(v string) *DescribeNotebookInstanceOutput { + s.NotebookInstanceStatus = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DescribeNotebookInstanceOutput) SetRoleArn(v string) *DescribeNotebookInstanceOutput { + s.RoleArn = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *DescribeNotebookInstanceOutput) SetSecurityGroups(v []*string) *DescribeNotebookInstanceOutput { + s.SecurityGroups = v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *DescribeNotebookInstanceOutput) SetSubnetId(v string) *DescribeNotebookInstanceOutput { + s.SubnetId = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *DescribeNotebookInstanceOutput) SetUrl(v string) *DescribeNotebookInstanceOutput { + s.Url = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeTrainingJobRequest +type DescribeTrainingJobInput struct { + _ struct{} `type:"structure"` + + // The name of the training job. + // + // TrainingJobName is a required field + TrainingJobName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTrainingJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrainingJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTrainingJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTrainingJobInput"} + if s.TrainingJobName == nil { + invalidParams.Add(request.NewErrParamRequired("TrainingJobName")) + } + if s.TrainingJobName != nil && len(*s.TrainingJobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TrainingJobName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTrainingJobName sets the TrainingJobName field's value. +func (s *DescribeTrainingJobInput) SetTrainingJobName(v string) *DescribeTrainingJobInput { + s.TrainingJobName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeTrainingJobResponse +type DescribeTrainingJobOutput struct { + _ struct{} `type:"structure"` + + // Information about the algorithm used for training, and algorithm metadata. + // + // AlgorithmSpecification is a required field + AlgorithmSpecification *AlgorithmSpecification `type:"structure" required:"true"` + + // A timestamp that indicates when the training job was created. + // + // CreationTime is a required field + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // If the training job failed, the reason it failed. + FailureReason *string `type:"string"` + + // Algorithm-specific parameters. + HyperParameters map[string]*string `type:"map"` + + // An array of Channel objects that describes each data input channel. + // + // InputDataConfig is a required field + InputDataConfig []*Channel `min:"1" type:"list" required:"true"` + + // A timestamp that indicates when the status of the training job was last modified. + LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Information about the Amazon S3 location that is configured for storing model + // artifacts. + // + // ModelArtifacts is a required field + ModelArtifacts *ModelArtifacts `type:"structure" required:"true"` + + // The S3 path where model artifacts that you configured when creating the job + // are stored. Amazon SageMaker creates subfolders for model artifacts. + OutputDataConfig *OutputDataConfig `type:"structure"` + + // Resources, including ML compute instances and ML storage volumes, that are + // configured for model training. + // + // ResourceConfig is a required field + ResourceConfig *ResourceConfig `type:"structure" required:"true"` + + // The AWS Identity and Access Management (IAM) role configured for the training + // job. + RoleArn *string `min:"20" type:"string"` + + // Provides granular information about the system state. For more information, + // see TrainingJobStatus. + // + // SecondaryStatus is a required field + SecondaryStatus *string `type:"string" required:"true" enum:"SecondaryStatus"` + + // The condition under which to stop the training job. + // + // StoppingCondition is a required field + StoppingCondition *StoppingCondition `type:"structure" required:"true"` + + // A timestamp that indicates when model training ended. + TrainingEndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Amazon Resource Name (ARN) of the training job. + // + // TrainingJobArn is a required field + TrainingJobArn *string `type:"string" required:"true"` + + // Name of the model training job. + // + // TrainingJobName is a required field + TrainingJobName *string `min:"1" type:"string" required:"true"` + + // The status of the training job. + // + // For the InProgress status, Amazon SageMaker can return these secondary statuses: + // + // * Starting - Preparing for training. + // + // * Downloading - Optional stage for algorithms that support File training + // input mode. It indicates data is being downloaded to ML storage volumes. + // + // * Training - Training is in progress. + // + // * Uploading - Training is complete and model upload is in progress. + // + // For the Stopped training status, Amazon SageMaker can return these secondary + // statuses: + // + // * MaxRuntimeExceeded - Job stopped as a result of maximum allowed runtime + // exceeded. + // + // TrainingJobStatus is a required field + TrainingJobStatus *string `type:"string" required:"true" enum:"TrainingJobStatus"` + + // A timestamp that indicates when training started. + TrainingStartTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s DescribeTrainingJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrainingJobOutput) GoString() string { + return s.String() +} + +// SetAlgorithmSpecification sets the AlgorithmSpecification field's value. +func (s *DescribeTrainingJobOutput) SetAlgorithmSpecification(v *AlgorithmSpecification) *DescribeTrainingJobOutput { + s.AlgorithmSpecification = v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeTrainingJobOutput) SetCreationTime(v time.Time) *DescribeTrainingJobOutput { + s.CreationTime = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *DescribeTrainingJobOutput) SetFailureReason(v string) *DescribeTrainingJobOutput { + s.FailureReason = &v + return s +} + +// SetHyperParameters sets the HyperParameters field's value. +func (s *DescribeTrainingJobOutput) SetHyperParameters(v map[string]*string) *DescribeTrainingJobOutput { + s.HyperParameters = v + return s +} + +// SetInputDataConfig sets the InputDataConfig field's value. +func (s *DescribeTrainingJobOutput) SetInputDataConfig(v []*Channel) *DescribeTrainingJobOutput { + s.InputDataConfig = v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *DescribeTrainingJobOutput) SetLastModifiedTime(v time.Time) *DescribeTrainingJobOutput { + s.LastModifiedTime = &v + return s +} + +// SetModelArtifacts sets the ModelArtifacts field's value. +func (s *DescribeTrainingJobOutput) SetModelArtifacts(v *ModelArtifacts) *DescribeTrainingJobOutput { + s.ModelArtifacts = v + return s +} + +// SetOutputDataConfig sets the OutputDataConfig field's value. +func (s *DescribeTrainingJobOutput) SetOutputDataConfig(v *OutputDataConfig) *DescribeTrainingJobOutput { + s.OutputDataConfig = v + return s +} + +// SetResourceConfig sets the ResourceConfig field's value. +func (s *DescribeTrainingJobOutput) SetResourceConfig(v *ResourceConfig) *DescribeTrainingJobOutput { + s.ResourceConfig = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DescribeTrainingJobOutput) SetRoleArn(v string) *DescribeTrainingJobOutput { + s.RoleArn = &v + return s +} + +// SetSecondaryStatus sets the SecondaryStatus field's value. +func (s *DescribeTrainingJobOutput) SetSecondaryStatus(v string) *DescribeTrainingJobOutput { + s.SecondaryStatus = &v + return s +} + +// SetStoppingCondition sets the StoppingCondition field's value. +func (s *DescribeTrainingJobOutput) SetStoppingCondition(v *StoppingCondition) *DescribeTrainingJobOutput { + s.StoppingCondition = v + return s +} + +// SetTrainingEndTime sets the TrainingEndTime field's value. +func (s *DescribeTrainingJobOutput) SetTrainingEndTime(v time.Time) *DescribeTrainingJobOutput { + s.TrainingEndTime = &v + return s +} + +// SetTrainingJobArn sets the TrainingJobArn field's value. +func (s *DescribeTrainingJobOutput) SetTrainingJobArn(v string) *DescribeTrainingJobOutput { + s.TrainingJobArn = &v + return s +} + +// SetTrainingJobName sets the TrainingJobName field's value. +func (s *DescribeTrainingJobOutput) SetTrainingJobName(v string) *DescribeTrainingJobOutput { + s.TrainingJobName = &v + return s +} + +// SetTrainingJobStatus sets the TrainingJobStatus field's value. +func (s *DescribeTrainingJobOutput) SetTrainingJobStatus(v string) *DescribeTrainingJobOutput { + s.TrainingJobStatus = &v + return s +} + +// SetTrainingStartTime sets the TrainingStartTime field's value. +func (s *DescribeTrainingJobOutput) SetTrainingStartTime(v time.Time) *DescribeTrainingJobOutput { + s.TrainingStartTime = &v + return s +} + +// Specifies weight and capacity values for a production variant. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DesiredWeightAndCapacity +type DesiredWeightAndCapacity struct { + _ struct{} `type:"structure"` + + // The variant's capacity. + DesiredInstanceCount *int64 `min:"1" type:"integer"` + + // The variant's weight. + DesiredWeight *float64 `type:"float"` + + // The name of the variant to update. + // + // VariantName is a required field + VariantName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DesiredWeightAndCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DesiredWeightAndCapacity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DesiredWeightAndCapacity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DesiredWeightAndCapacity"} + if s.DesiredInstanceCount != nil && *s.DesiredInstanceCount < 1 { + invalidParams.Add(request.NewErrParamMinValue("DesiredInstanceCount", 1)) + } + if s.VariantName == nil { + invalidParams.Add(request.NewErrParamRequired("VariantName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDesiredInstanceCount sets the DesiredInstanceCount field's value. +func (s *DesiredWeightAndCapacity) SetDesiredInstanceCount(v int64) *DesiredWeightAndCapacity { + s.DesiredInstanceCount = &v + return s +} + +// SetDesiredWeight sets the DesiredWeight field's value. +func (s *DesiredWeightAndCapacity) SetDesiredWeight(v float64) *DesiredWeightAndCapacity { + s.DesiredWeight = &v + return s +} + +// SetVariantName sets the VariantName field's value. +func (s *DesiredWeightAndCapacity) SetVariantName(v string) *DesiredWeightAndCapacity { + s.VariantName = &v + return s +} + +// Provides summary information for an endpoint configuration. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/EndpointConfigSummary +type EndpointConfigSummary struct { + _ struct{} `type:"structure"` + + // A timestamp that shows when the endpoint configuration was created. + // + // CreationTime is a required field + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The Amazon Resource Name (ARN) of the endpoint configuration. + // + // EndpointConfigArn is a required field + EndpointConfigArn *string `min:"20" type:"string" required:"true"` + + // The name of the endpoint configuration. + // + // EndpointConfigName is a required field + EndpointConfigName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EndpointConfigSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EndpointConfigSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *EndpointConfigSummary) SetCreationTime(v time.Time) *EndpointConfigSummary { + s.CreationTime = &v + return s +} + +// SetEndpointConfigArn sets the EndpointConfigArn field's value. +func (s *EndpointConfigSummary) SetEndpointConfigArn(v string) *EndpointConfigSummary { + s.EndpointConfigArn = &v + return s +} + +// SetEndpointConfigName sets the EndpointConfigName field's value. +func (s *EndpointConfigSummary) SetEndpointConfigName(v string) *EndpointConfigSummary { + s.EndpointConfigName = &v + return s +} + +// Provides summary information for an endpoint. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/EndpointSummary +type EndpointSummary struct { + _ struct{} `type:"structure"` + + // A timestamp that shows when the endpoint was created. + // + // CreationTime is a required field + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The Amazon Resource Name (ARN) of the endpoint. + // + // EndpointArn is a required field + EndpointArn *string `min:"20" type:"string" required:"true"` + + // The name of the endpoint. + // + // EndpointName is a required field + EndpointName *string `type:"string" required:"true"` + + // The status of the endpoint. + // + // EndpointStatus is a required field + EndpointStatus *string `type:"string" required:"true" enum:"EndpointStatus"` + + // A timestamp that shows when the endpoint was last modified. + // + // LastModifiedTime is a required field + LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` +} + +// String returns the string representation +func (s EndpointSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EndpointSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *EndpointSummary) SetCreationTime(v time.Time) *EndpointSummary { + s.CreationTime = &v + return s +} + +// SetEndpointArn sets the EndpointArn field's value. +func (s *EndpointSummary) SetEndpointArn(v string) *EndpointSummary { + s.EndpointArn = &v + return s +} + +// SetEndpointName sets the EndpointName field's value. +func (s *EndpointSummary) SetEndpointName(v string) *EndpointSummary { + s.EndpointName = &v + return s +} + +// SetEndpointStatus sets the EndpointStatus field's value. +func (s *EndpointSummary) SetEndpointStatus(v string) *EndpointSummary { + s.EndpointStatus = &v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *EndpointSummary) SetLastModifiedTime(v time.Time) *EndpointSummary { + s.LastModifiedTime = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpointConfigsInput +type ListEndpointConfigsInput struct { + _ struct{} `type:"structure"` + + // A filter that returns only endpoint configurations created after the specified + // time (timestamp). + CreationTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A filter that returns only endpoint configurations created before the specified + // time (timestamp). + CreationTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The maximum number of training jobs to return in the response. + MaxResults *int64 `min:"1" type:"integer"` + + // A string in the endpoint configuration name. This filter returns only endpoint + // configurations whose name contains the specified string. + NameContains *string `type:"string"` + + // If the result of the previous ListEndpointConfig request was truncated, the + // response includes a NextToken. To retrieve the next set of endpoint configurations, + // use the token in the next request. + NextToken *string `type:"string"` + + // The field to sort results by. The default is CreationTime. + SortBy *string `type:"string" enum:"EndpointConfigSortKey"` + + // The sort order for results. The default is Ascending. + SortOrder *string `type:"string" enum:"OrderKey"` +} + +// String returns the string representation +func (s ListEndpointConfigsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEndpointConfigsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListEndpointConfigsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEndpointConfigsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreationTimeAfter sets the CreationTimeAfter field's value. +func (s *ListEndpointConfigsInput) SetCreationTimeAfter(v time.Time) *ListEndpointConfigsInput { + s.CreationTimeAfter = &v + return s +} + +// SetCreationTimeBefore sets the CreationTimeBefore field's value. +func (s *ListEndpointConfigsInput) SetCreationTimeBefore(v time.Time) *ListEndpointConfigsInput { + s.CreationTimeBefore = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListEndpointConfigsInput) SetMaxResults(v int64) *ListEndpointConfigsInput { + s.MaxResults = &v + return s +} + +// SetNameContains sets the NameContains field's value. +func (s *ListEndpointConfigsInput) SetNameContains(v string) *ListEndpointConfigsInput { + s.NameContains = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEndpointConfigsInput) SetNextToken(v string) *ListEndpointConfigsInput { + s.NextToken = &v + return s +} + +// SetSortBy sets the SortBy field's value. +func (s *ListEndpointConfigsInput) SetSortBy(v string) *ListEndpointConfigsInput { + s.SortBy = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *ListEndpointConfigsInput) SetSortOrder(v string) *ListEndpointConfigsInput { + s.SortOrder = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpointConfigsOutput +type ListEndpointConfigsOutput struct { + _ struct{} `type:"structure"` + + // An array of endpoint configurations. + // + // EndpointConfigs is a required field + EndpointConfigs []*EndpointConfigSummary `type:"list" required:"true"` + + // If the response is truncated, Amazon SageMaker returns this token. To retrieve + // the next set of endpoint configurations, use it in the subsequent request + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListEndpointConfigsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEndpointConfigsOutput) GoString() string { + return s.String() +} + +// SetEndpointConfigs sets the EndpointConfigs field's value. +func (s *ListEndpointConfigsOutput) SetEndpointConfigs(v []*EndpointConfigSummary) *ListEndpointConfigsOutput { + s.EndpointConfigs = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEndpointConfigsOutput) SetNextToken(v string) *ListEndpointConfigsOutput { + s.NextToken = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpointsInput +type ListEndpointsInput struct { + _ struct{} `type:"structure"` + + // A filter that returns only endpoints that were created after the specified + // time (timestamp). + CreationTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A filter that returns only endpoints that were created before the specified + // time (timestamp). + CreationTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A filter that returns only endpoints that were modified after the specified + // timestamp. + LastModifiedTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A filter that returns only endpoints that were modified before the specified + // timestamp. + LastModifiedTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The maximum number of endpoints to return in the response. + MaxResults *int64 `min:"1" type:"integer"` + + // A string in endpoint names. This filter returns only endpoints whose name + // contains the specified string. + NameContains *string `type:"string"` + + // If the result of a ListEndpoints request was truncated, the response includes + // a NextToken. To retrieve the next set of endpoints, use the token in the + // next request. + NextToken *string `type:"string"` + + // Sorts the list of results. The default is CreationTime. + SortBy *string `type:"string" enum:"EndpointSortKey"` + + // The sort order for results. The default is Ascending. + SortOrder *string `type:"string" enum:"OrderKey"` + + // A filter that returns only endpoints with the specified status. + StatusEquals *string `type:"string" enum:"EndpointStatus"` +} + +// String returns the string representation +func (s ListEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEndpointsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListEndpointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEndpointsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreationTimeAfter sets the CreationTimeAfter field's value. +func (s *ListEndpointsInput) SetCreationTimeAfter(v time.Time) *ListEndpointsInput { + s.CreationTimeAfter = &v + return s +} + +// SetCreationTimeBefore sets the CreationTimeBefore field's value. +func (s *ListEndpointsInput) SetCreationTimeBefore(v time.Time) *ListEndpointsInput { + s.CreationTimeBefore = &v + return s +} + +// SetLastModifiedTimeAfter sets the LastModifiedTimeAfter field's value. +func (s *ListEndpointsInput) SetLastModifiedTimeAfter(v time.Time) *ListEndpointsInput { + s.LastModifiedTimeAfter = &v + return s +} + +// SetLastModifiedTimeBefore sets the LastModifiedTimeBefore field's value. +func (s *ListEndpointsInput) SetLastModifiedTimeBefore(v time.Time) *ListEndpointsInput { + s.LastModifiedTimeBefore = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListEndpointsInput) SetMaxResults(v int64) *ListEndpointsInput { + s.MaxResults = &v + return s +} + +// SetNameContains sets the NameContains field's value. +func (s *ListEndpointsInput) SetNameContains(v string) *ListEndpointsInput { + s.NameContains = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEndpointsInput) SetNextToken(v string) *ListEndpointsInput { + s.NextToken = &v + return s +} + +// SetSortBy sets the SortBy field's value. +func (s *ListEndpointsInput) SetSortBy(v string) *ListEndpointsInput { + s.SortBy = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *ListEndpointsInput) SetSortOrder(v string) *ListEndpointsInput { + s.SortOrder = &v + return s +} + +// SetStatusEquals sets the StatusEquals field's value. +func (s *ListEndpointsInput) SetStatusEquals(v string) *ListEndpointsInput { + s.StatusEquals = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpointsOutput +type ListEndpointsOutput struct { + _ struct{} `type:"structure"` + + // An array or endpoint objects. + // + // Endpoints is a required field + Endpoints []*EndpointSummary `type:"list" required:"true"` + + // If the response is truncated, Amazon SageMaker returns this token. To retrieve + // the next set of training jobs, use it in the subsequent request. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEndpointsOutput) GoString() string { + return s.String() +} + +// SetEndpoints sets the Endpoints field's value. +func (s *ListEndpointsOutput) SetEndpoints(v []*EndpointSummary) *ListEndpointsOutput { + s.Endpoints = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEndpointsOutput) SetNextToken(v string) *ListEndpointsOutput { + s.NextToken = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListModelsInput +type ListModelsInput struct { + _ struct{} `type:"structure"` + + // A filter that returns only models created after the specified time (timestamp). + CreationTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A filter that returns only models created before the specified time (timestamp). + CreationTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The maximum number of models to return in the response. + MaxResults *int64 `min:"1" type:"integer"` + + // A string in the training job name. This filter returns only models in the + // training job whose name contains the specified string. + NameContains *string `type:"string"` + + // If the response to a previous ListModels request was truncated, the response + // includes a NextToken. To retrieve the next set of models, use the token in + // the next request. + NextToken *string `type:"string"` + + // Sorts the list of results. The default is CreationTime. + SortBy *string `type:"string" enum:"ModelSortKey"` + + // The sort order for results. The default is Ascending. + SortOrder *string `type:"string" enum:"OrderKey"` +} + +// String returns the string representation +func (s ListModelsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListModelsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListModelsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListModelsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreationTimeAfter sets the CreationTimeAfter field's value. +func (s *ListModelsInput) SetCreationTimeAfter(v time.Time) *ListModelsInput { + s.CreationTimeAfter = &v + return s +} + +// SetCreationTimeBefore sets the CreationTimeBefore field's value. +func (s *ListModelsInput) SetCreationTimeBefore(v time.Time) *ListModelsInput { + s.CreationTimeBefore = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListModelsInput) SetMaxResults(v int64) *ListModelsInput { + s.MaxResults = &v + return s +} + +// SetNameContains sets the NameContains field's value. +func (s *ListModelsInput) SetNameContains(v string) *ListModelsInput { + s.NameContains = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListModelsInput) SetNextToken(v string) *ListModelsInput { + s.NextToken = &v + return s +} + +// SetSortBy sets the SortBy field's value. +func (s *ListModelsInput) SetSortBy(v string) *ListModelsInput { + s.SortBy = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *ListModelsInput) SetSortOrder(v string) *ListModelsInput { + s.SortOrder = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListModelsOutput +type ListModelsOutput struct { + _ struct{} `type:"structure"` + + // An array of ModelSummary objects, each of which lists a model. + // + // Models is a required field + Models []*ModelSummary `type:"list" required:"true"` + + // If the response is truncated, Amazon SageMaker returns this token. To retrieve + // the next set of models, use it in the subsequent request. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListModelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListModelsOutput) GoString() string { + return s.String() +} + +// SetModels sets the Models field's value. +func (s *ListModelsOutput) SetModels(v []*ModelSummary) *ListModelsOutput { + s.Models = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListModelsOutput) SetNextToken(v string) *ListModelsOutput { + s.NextToken = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListNotebookInstancesInput +type ListNotebookInstancesInput struct { + _ struct{} `type:"structure"` + + // A filter that returns only notebook instances that were created after the + // specified time (timestamp). + CreationTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A filter that returns only notebook instances that were created before the + // specified time (timestamp). + CreationTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A filter that returns only notebook instances that were modified after the + // specified time (timestamp). + LastModifiedTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A filter that returns only notebook instances that were modified before the + // specified time (timestamp). + LastModifiedTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The maximum number of notebook instances to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A string in the notebook instances' name. This filter returns only notebook + // instances whose name contains the specified string. + NameContains *string `type:"string"` + + // If the previous call to the ListNotebookInstances is truncated, the response + // includes a NextToken. You can use this token in your subsequent ListNotebookInstances + // request to fetch the next set of notebook instances. + // + // You might specify a filter or a sort order in your request. When response + // is truncated, you must use the same values for the filer and sort order in + // the next request. + NextToken *string `type:"string"` + + // The field to sort results by. The default is Name. + SortBy *string `type:"string" enum:"NotebookInstanceSortKey"` + + // The sort order for results. + SortOrder *string `type:"string" enum:"NotebookInstanceSortOrder"` + + // A filter that returns only notebook instances with the specified status. + StatusEquals *string `type:"string" enum:"NotebookInstanceStatus"` +} + +// String returns the string representation +func (s ListNotebookInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListNotebookInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListNotebookInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListNotebookInstancesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreationTimeAfter sets the CreationTimeAfter field's value. +func (s *ListNotebookInstancesInput) SetCreationTimeAfter(v time.Time) *ListNotebookInstancesInput { + s.CreationTimeAfter = &v + return s +} + +// SetCreationTimeBefore sets the CreationTimeBefore field's value. +func (s *ListNotebookInstancesInput) SetCreationTimeBefore(v time.Time) *ListNotebookInstancesInput { + s.CreationTimeBefore = &v + return s +} + +// SetLastModifiedTimeAfter sets the LastModifiedTimeAfter field's value. +func (s *ListNotebookInstancesInput) SetLastModifiedTimeAfter(v time.Time) *ListNotebookInstancesInput { + s.LastModifiedTimeAfter = &v + return s +} + +// SetLastModifiedTimeBefore sets the LastModifiedTimeBefore field's value. +func (s *ListNotebookInstancesInput) SetLastModifiedTimeBefore(v time.Time) *ListNotebookInstancesInput { + s.LastModifiedTimeBefore = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListNotebookInstancesInput) SetMaxResults(v int64) *ListNotebookInstancesInput { + s.MaxResults = &v + return s +} + +// SetNameContains sets the NameContains field's value. +func (s *ListNotebookInstancesInput) SetNameContains(v string) *ListNotebookInstancesInput { + s.NameContains = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListNotebookInstancesInput) SetNextToken(v string) *ListNotebookInstancesInput { + s.NextToken = &v + return s +} + +// SetSortBy sets the SortBy field's value. +func (s *ListNotebookInstancesInput) SetSortBy(v string) *ListNotebookInstancesInput { + s.SortBy = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *ListNotebookInstancesInput) SetSortOrder(v string) *ListNotebookInstancesInput { + s.SortOrder = &v + return s +} + +// SetStatusEquals sets the StatusEquals field's value. +func (s *ListNotebookInstancesInput) SetStatusEquals(v string) *ListNotebookInstancesInput { + s.StatusEquals = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListNotebookInstancesOutput +type ListNotebookInstancesOutput struct { + _ struct{} `type:"structure"` + + // If the response to the previous ListNotebookInstances request was truncated, + // Amazon SageMaker returns this token. To retrieve the next set of notebook + // instances, use the token in the next request. + NextToken *string `type:"string"` + + // An array of NotebookInstanceSummary objects, one for each notebook instance. + NotebookInstances []*NotebookInstanceSummary `type:"list"` +} + +// String returns the string representation +func (s ListNotebookInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListNotebookInstancesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListNotebookInstancesOutput) SetNextToken(v string) *ListNotebookInstancesOutput { + s.NextToken = &v + return s +} + +// SetNotebookInstances sets the NotebookInstances field's value. +func (s *ListNotebookInstancesOutput) SetNotebookInstances(v []*NotebookInstanceSummary) *ListNotebookInstancesOutput { + s.NotebookInstances = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTagsInput +type ListTagsInput struct { + _ struct{} `type:"structure"` + + // Maximum number of tags to return. + MaxResults *int64 `min:"50" type:"integer"` + + // If the response to the previous ListTags request is truncated, Amazon SageMaker + // returns this token. To retrieve the next set of tags, use it in the subsequent + // request. + NextToken *string `type:"string"` + + // The Amazon Resource Name (ARN) of the resource whose tags you want to retrieve. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsInput"} + if s.MaxResults != nil && *s.MaxResults < 50 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 50)) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListTagsInput) SetMaxResults(v int64) *ListTagsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsInput) SetNextToken(v string) *ListTagsInput { + s.NextToken = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsInput) SetResourceArn(v string) *ListTagsInput { + s.ResourceArn = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTagsOutput +type ListTagsOutput struct { + _ struct{} `type:"structure"` + + // If response is truncated, Amazon SageMaker includes a token in the response. + // You can use this token in your subsequent request to fetch next set of tokens. + NextToken *string `type:"string"` + + // An array of Tag objects, each with a tag key and a value. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsOutput) SetNextToken(v string) *ListTagsOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsOutput) SetTags(v []*Tag) *ListTagsOutput { + s.Tags = v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTrainingJobsRequest +type ListTrainingJobsInput struct { + _ struct{} `type:"structure"` + + // A filter that only training jobs created after the specified time (timestamp). + CreationTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A filter that returns only training jobs created before the specified time + // (timestamp). + CreationTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A filter that returns only training jobs modified after the specified time + // (timestamp). + LastModifiedTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A filter that returns only training jobs modified before the specified time + // (timestamp). + LastModifiedTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The maximum number of training jobs to return in the response. + MaxResults *int64 `min:"1" type:"integer"` + + // A string in the training job name. This filter returns only models whose + // name contains the specified string. + NameContains *string `type:"string"` + + // If the result of the previous ListTrainingJobs request was truncated, the + // response includes a NextToken. To retrieve the next set of training jobs, + // use the token in the next request. + NextToken *string `type:"string"` + + // The field to sort results by. The default is CreationTime. + SortBy *string `type:"string" enum:"SortBy"` + + // The sort order for results. The default is Ascending. + SortOrder *string `type:"string" enum:"SortOrder"` + + // A filter that retrieves only training jobs with a specific status. + StatusEquals *string `type:"string" enum:"TrainingJobStatus"` +} + +// String returns the string representation +func (s ListTrainingJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrainingJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTrainingJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTrainingJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreationTimeAfter sets the CreationTimeAfter field's value. +func (s *ListTrainingJobsInput) SetCreationTimeAfter(v time.Time) *ListTrainingJobsInput { + s.CreationTimeAfter = &v + return s +} + +// SetCreationTimeBefore sets the CreationTimeBefore field's value. +func (s *ListTrainingJobsInput) SetCreationTimeBefore(v time.Time) *ListTrainingJobsInput { + s.CreationTimeBefore = &v + return s +} + +// SetLastModifiedTimeAfter sets the LastModifiedTimeAfter field's value. +func (s *ListTrainingJobsInput) SetLastModifiedTimeAfter(v time.Time) *ListTrainingJobsInput { + s.LastModifiedTimeAfter = &v + return s +} + +// SetLastModifiedTimeBefore sets the LastModifiedTimeBefore field's value. +func (s *ListTrainingJobsInput) SetLastModifiedTimeBefore(v time.Time) *ListTrainingJobsInput { + s.LastModifiedTimeBefore = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListTrainingJobsInput) SetMaxResults(v int64) *ListTrainingJobsInput { + s.MaxResults = &v + return s +} + +// SetNameContains sets the NameContains field's value. +func (s *ListTrainingJobsInput) SetNameContains(v string) *ListTrainingJobsInput { + s.NameContains = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTrainingJobsInput) SetNextToken(v string) *ListTrainingJobsInput { + s.NextToken = &v + return s +} + +// SetSortBy sets the SortBy field's value. +func (s *ListTrainingJobsInput) SetSortBy(v string) *ListTrainingJobsInput { + s.SortBy = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *ListTrainingJobsInput) SetSortOrder(v string) *ListTrainingJobsInput { + s.SortOrder = &v + return s +} + +// SetStatusEquals sets the StatusEquals field's value. +func (s *ListTrainingJobsInput) SetStatusEquals(v string) *ListTrainingJobsInput { + s.StatusEquals = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTrainingJobsResponse +type ListTrainingJobsOutput struct { + _ struct{} `type:"structure"` + + // If the response is truncated, Amazon SageMaker returns this token. To retrieve + // the next set of training jobs, use it in the subsequent request. + NextToken *string `type:"string"` + + // An array of TrainingJobSummary objects, each listing a training job. + // + // TrainingJobSummaries is a required field + TrainingJobSummaries []*TrainingJobSummary `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrainingJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrainingJobsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTrainingJobsOutput) SetNextToken(v string) *ListTrainingJobsOutput { + s.NextToken = &v + return s +} + +// SetTrainingJobSummaries sets the TrainingJobSummaries field's value. +func (s *ListTrainingJobsOutput) SetTrainingJobSummaries(v []*TrainingJobSummary) *ListTrainingJobsOutput { + s.TrainingJobSummaries = v + return s +} + +// Provides information about the location that is configured for storing model +// artifacts. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ModelArtifacts +type ModelArtifacts struct { + _ struct{} `type:"structure"` + + // The path of the S3 object that contains the model artifacts. For example, + // s3://bucket-name/keynameprefix/model.tar.gz. + // + // S3ModelArtifacts is a required field + S3ModelArtifacts *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModelArtifacts) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModelArtifacts) GoString() string { + return s.String() +} + +// SetS3ModelArtifacts sets the S3ModelArtifacts field's value. +func (s *ModelArtifacts) SetS3ModelArtifacts(v string) *ModelArtifacts { + s.S3ModelArtifacts = &v + return s +} + +// Provides summary information about a model. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ModelSummary +type ModelSummary struct { + _ struct{} `type:"structure"` + + // A timestamp that indicates when the model was created. + // + // CreationTime is a required field + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The Amazon Resource Name (ARN) of the model. + // + // ModelArn is a required field + ModelArn *string `min:"20" type:"string" required:"true"` + + // The name of the model that you want a summary for. + // + // ModelName is a required field + ModelName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModelSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModelSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ModelSummary) SetCreationTime(v time.Time) *ModelSummary { + s.CreationTime = &v + return s +} + +// SetModelArn sets the ModelArn field's value. +func (s *ModelSummary) SetModelArn(v string) *ModelSummary { + s.ModelArn = &v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *ModelSummary) SetModelName(v string) *ModelSummary { + s.ModelName = &v + return s +} + +// Provides summary information for an Amazon SageMaker notebook instance. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/NotebookInstanceSummary +type NotebookInstanceSummary struct { + _ struct{} `type:"structure"` + + // A timestamp that shows when the notebook instance was created. + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The type of ML compute instance that the notebook instance is running on. + InstanceType *string `type:"string" enum:"InstanceType"` + + // A timestamp that shows when the notebook instance was last modified. + LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Amazon Resource Name (ARN) of the notebook instance. + // + // NotebookInstanceArn is a required field + NotebookInstanceArn *string `type:"string" required:"true"` + + // The name of the notebook instance that you want a summary for. + // + // NotebookInstanceName is a required field + NotebookInstanceName *string `type:"string" required:"true"` + + // The status of the notebook instance. + NotebookInstanceStatus *string `type:"string" enum:"NotebookInstanceStatus"` + + // The URL that you use to connect to the Jupyter instance running in your notebook + // instance. + Url *string `type:"string"` +} + +// String returns the string representation +func (s NotebookInstanceSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotebookInstanceSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *NotebookInstanceSummary) SetCreationTime(v time.Time) *NotebookInstanceSummary { + s.CreationTime = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *NotebookInstanceSummary) SetInstanceType(v string) *NotebookInstanceSummary { + s.InstanceType = &v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *NotebookInstanceSummary) SetLastModifiedTime(v time.Time) *NotebookInstanceSummary { + s.LastModifiedTime = &v + return s +} + +// SetNotebookInstanceArn sets the NotebookInstanceArn field's value. +func (s *NotebookInstanceSummary) SetNotebookInstanceArn(v string) *NotebookInstanceSummary { + s.NotebookInstanceArn = &v + return s +} + +// SetNotebookInstanceName sets the NotebookInstanceName field's value. +func (s *NotebookInstanceSummary) SetNotebookInstanceName(v string) *NotebookInstanceSummary { + s.NotebookInstanceName = &v + return s +} + +// SetNotebookInstanceStatus sets the NotebookInstanceStatus field's value. +func (s *NotebookInstanceSummary) SetNotebookInstanceStatus(v string) *NotebookInstanceSummary { + s.NotebookInstanceStatus = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *NotebookInstanceSummary) SetUrl(v string) *NotebookInstanceSummary { + s.Url = &v + return s +} + +// Provides information about how to store model training results (model artifacts). +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/OutputDataConfig +type OutputDataConfig struct { + _ struct{} `type:"structure"` + + // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to + // encrypt the model artifacts at rest using Amazon S3 server-side encryption. + // + // If the configuration of the output S3 bucket requires server-side encryption + // for objects, and you don't provide the KMS key ID, Amazon SageMaker uses + // the default service key. For more information, see KMS-Managed Encryption + // Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) + // in Amazon Simple Storage Service developer guide. + // + // The KMS key policy must grant permission to the IAM role you specify in your + // CreateTrainingJob request. Using Key Policies in AWS KMS (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + // in the AWS Key Management Service Developer Guide. + KmsKeyId *string `type:"string"` + + // Identifies the S3 path where you want Amazon SageMaker to store the model + // artifacts. For example, s3://bucket-name/key-name-prefix. + // + // S3OutputPath is a required field + S3OutputPath *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s OutputDataConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputDataConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputDataConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputDataConfig"} + if s.S3OutputPath == nil { + invalidParams.Add(request.NewErrParamRequired("S3OutputPath")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *OutputDataConfig) SetKmsKeyId(v string) *OutputDataConfig { + s.KmsKeyId = &v + return s +} + +// SetS3OutputPath sets the S3OutputPath field's value. +func (s *OutputDataConfig) SetS3OutputPath(v string) *OutputDataConfig { + s.S3OutputPath = &v + return s +} + +// Identifies a model that you want to host and the resources to deploy for +// hosting it. If you are deploying multiple models, tell Amazon SageMaker how +// to distribute traffic among the models by specifying variant weights. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ProductionVariant +type ProductionVariant struct { + _ struct{} `type:"structure"` + + // Number of instances to launch initially. + // + // InitialInstanceCount is a required field + InitialInstanceCount *int64 `min:"1" type:"integer" required:"true"` + + // Determines initial traffic distribution among all of the models that you + // specify in the endpoint configuration. The traffic to a production variant + // is determined by the ratio of the VariantWeight to the sum of all VariantWeight + // values across all ProductionVariants. If unspecified, it defaults to 1.0. + InitialVariantWeight *float64 `type:"float"` + + // The ML compute instance type. + // + // InstanceType is a required field + InstanceType *string `type:"string" required:"true" enum:"ProductionVariantInstanceType"` + + // The name of the model that you want to host. This is the name that you specified + // when creating the model. + // + // ModelName is a required field + ModelName *string `type:"string" required:"true"` + + // The name of the production variant. + // + // VariantName is a required field + VariantName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ProductionVariant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProductionVariant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProductionVariant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProductionVariant"} + if s.InitialInstanceCount == nil { + invalidParams.Add(request.NewErrParamRequired("InitialInstanceCount")) + } + if s.InitialInstanceCount != nil && *s.InitialInstanceCount < 1 { + invalidParams.Add(request.NewErrParamMinValue("InitialInstanceCount", 1)) + } + if s.InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceType")) + } + if s.ModelName == nil { + invalidParams.Add(request.NewErrParamRequired("ModelName")) + } + if s.VariantName == nil { + invalidParams.Add(request.NewErrParamRequired("VariantName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInitialInstanceCount sets the InitialInstanceCount field's value. +func (s *ProductionVariant) SetInitialInstanceCount(v int64) *ProductionVariant { + s.InitialInstanceCount = &v + return s +} + +// SetInitialVariantWeight sets the InitialVariantWeight field's value. +func (s *ProductionVariant) SetInitialVariantWeight(v float64) *ProductionVariant { + s.InitialVariantWeight = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *ProductionVariant) SetInstanceType(v string) *ProductionVariant { + s.InstanceType = &v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *ProductionVariant) SetModelName(v string) *ProductionVariant { + s.ModelName = &v + return s +} + +// SetVariantName sets the VariantName field's value. +func (s *ProductionVariant) SetVariantName(v string) *ProductionVariant { + s.VariantName = &v + return s +} + +// Describes weight and capacities for a production variant associated with +// an endpoint. If you sent a request to the UpdateWeightAndCapacities API and +// the endpoint status is Updating, you get different desired and current values. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ProductionVariantSummary +type ProductionVariantSummary struct { + _ struct{} `type:"structure"` + + // The number of instances associated with the variant. + CurrentInstanceCount *int64 `min:"1" type:"integer"` + + // The weight associated with the variant. + CurrentWeight *float64 `type:"float"` + + // The number of instances requested in the UpdateWeightAndCapacities request. + DesiredInstanceCount *int64 `min:"1" type:"integer"` + + // The requested weight, as specified in the UpdateWeightAndCapacities request. + DesiredWeight *float64 `type:"float"` + + // The name of the variant. + // + // VariantName is a required field + VariantName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ProductionVariantSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProductionVariantSummary) GoString() string { + return s.String() +} + +// SetCurrentInstanceCount sets the CurrentInstanceCount field's value. +func (s *ProductionVariantSummary) SetCurrentInstanceCount(v int64) *ProductionVariantSummary { + s.CurrentInstanceCount = &v + return s +} + +// SetCurrentWeight sets the CurrentWeight field's value. +func (s *ProductionVariantSummary) SetCurrentWeight(v float64) *ProductionVariantSummary { + s.CurrentWeight = &v + return s +} + +// SetDesiredInstanceCount sets the DesiredInstanceCount field's value. +func (s *ProductionVariantSummary) SetDesiredInstanceCount(v int64) *ProductionVariantSummary { + s.DesiredInstanceCount = &v + return s +} + +// SetDesiredWeight sets the DesiredWeight field's value. +func (s *ProductionVariantSummary) SetDesiredWeight(v float64) *ProductionVariantSummary { + s.DesiredWeight = &v + return s +} + +// SetVariantName sets the VariantName field's value. +func (s *ProductionVariantSummary) SetVariantName(v string) *ProductionVariantSummary { + s.VariantName = &v + return s +} + +// Describes the resources, including ML compute instances and ML storage volumes, +// to use for model training. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ResourceConfig +type ResourceConfig struct { + _ struct{} `type:"structure"` + + // The number of ML compute instances to use. For distributed training, provide + // a value greater than 1. + // + // InstanceCount is a required field + InstanceCount *int64 `min:"1" type:"integer" required:"true"` + + // The ML compute instance type. + // + // InstanceType is a required field + InstanceType *string `type:"string" required:"true" enum:"TrainingInstanceType"` + + // The size of the ML storage volume that you want to provision. + // + // ML storage volumes store model artifacts and incremental states. Training + // algorithms might also use the ML storage volume for scratch space. If you + // want to store the training data in the ML storage volume, choose File as + // the TrainingInputMode in the algorithm specification. + // + // You must specify sufficient ML storage for your scenario. + // + // Amazon SageMaker supports only the General Purpose SSD (gp2) ML storage volume + // type. + // + // VolumeSizeInGB is a required field + VolumeSizeInGB *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ResourceConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceConfig"} + if s.InstanceCount == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceCount")) + } + if s.InstanceCount != nil && *s.InstanceCount < 1 { + invalidParams.Add(request.NewErrParamMinValue("InstanceCount", 1)) + } + if s.InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceType")) + } + if s.VolumeSizeInGB == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeSizeInGB")) + } + if s.VolumeSizeInGB != nil && *s.VolumeSizeInGB < 1 { + invalidParams.Add(request.NewErrParamMinValue("VolumeSizeInGB", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceCount sets the InstanceCount field's value. +func (s *ResourceConfig) SetInstanceCount(v int64) *ResourceConfig { + s.InstanceCount = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *ResourceConfig) SetInstanceType(v string) *ResourceConfig { + s.InstanceType = &v + return s +} + +// SetVolumeSizeInGB sets the VolumeSizeInGB field's value. +func (s *ResourceConfig) SetVolumeSizeInGB(v int64) *ResourceConfig { + s.VolumeSizeInGB = &v + return s +} + +// Describes the S3 data source. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/S3DataSource +type S3DataSource struct { + _ struct{} `type:"structure"` + + // If you want Amazon SageMaker to replicate the entire dataset on each ML compute + // instance that is launched for model training, specify FullyReplicated. + // + // If you want Amazon SageMaker to replicate a subset of data on each ML compute + // instance that is launched for model training, specify ShardedByS3Key. If + // there are n ML compute instances launched for a training job, each instance + // gets approximately 1/n of the number of S3 objects. In this case, model training + // on each machine uses only the subset of training data. + // + // Don't choose more ML compute instances for training than available S3 objects. + // If you do, some nodes won't get any data and you will pay for nodes that + // aren't getting any training data. This applies in both FILE and PIPE modes. + // Keep this in mind when developing algorithms. + // + // In distributed training, where you use multiple ML compute EC2 instances, + // you might choose ShardedByS3Key. If the algorithm requires copying training + // data to the ML storage volume (when TrainingInputMode is set to File), this + // copies 1/n of the number of objects. + S3DataDistributionType *string `type:"string" enum:"S3DataDistribution"` + + // If you choose S3Prefix, S3Uri identifies a key name prefix. Amazon SageMaker + // uses all objects with the specified key name prefix for model training. + // + // If you choose ManifestFile, S3Uri identifies an object that is a manifest + // file containing a list of object keys that you want Amazon SageMaker to use + // for model training. + // + // S3DataType is a required field + S3DataType *string `type:"string" required:"true" enum:"S3DataType"` + + // Depending on the value specified for the S3DataType, identifies either a + // key name prefix or a manifest. For example: + // + // * A key name prefix might look like this: s3://bucketname/exampleprefix. + // + // + // * A manifest might look like this: s3://bucketname/example.manifest + // + // The manifest is an S3 object which is a JSON file with the following format: + // + // + // [ + // + // {"prefix": "s3://customer_bucket/some/prefix/"}, + // + // "relative/path/to/custdata-1", + // + // "relative/path/custdata-2", + // + // ... + // + // ] + // + // The preceding JSON matches the following s3Uris: + // + // s3://customer_bucket/some/prefix/relative/path/to/custdata-1 + // + // s3://customer_bucket/some/prefix/relative/path/custdata-1 + // + // ... + // + // The complete set of s3uris in this manifest constitutes the input data for + // the channel for this datasource. The object that each s3uris points to + // must readable by the IAM role that Amazon SageMaker uses to perform tasks + // on your behalf. + // + // S3Uri is a required field + S3Uri *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s S3DataSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3DataSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3DataSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3DataSource"} + if s.S3DataType == nil { + invalidParams.Add(request.NewErrParamRequired("S3DataType")) + } + if s.S3Uri == nil { + invalidParams.Add(request.NewErrParamRequired("S3Uri")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3DataDistributionType sets the S3DataDistributionType field's value. +func (s *S3DataSource) SetS3DataDistributionType(v string) *S3DataSource { + s.S3DataDistributionType = &v + return s +} + +// SetS3DataType sets the S3DataType field's value. +func (s *S3DataSource) SetS3DataType(v string) *S3DataSource { + s.S3DataType = &v + return s +} + +// SetS3Uri sets the S3Uri field's value. +func (s *S3DataSource) SetS3Uri(v string) *S3DataSource { + s.S3Uri = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StartNotebookInstanceInput +type StartNotebookInstanceInput struct { + _ struct{} `type:"structure"` + + // The name of the notebook instance to start. + // + // NotebookInstanceName is a required field + NotebookInstanceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartNotebookInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartNotebookInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartNotebookInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartNotebookInstanceInput"} + if s.NotebookInstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookInstanceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNotebookInstanceName sets the NotebookInstanceName field's value. +func (s *StartNotebookInstanceInput) SetNotebookInstanceName(v string) *StartNotebookInstanceInput { + s.NotebookInstanceName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StartNotebookInstanceOutput +type StartNotebookInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartNotebookInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartNotebookInstanceOutput) GoString() string { + return s.String() +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopNotebookInstanceInput +type StopNotebookInstanceInput struct { + _ struct{} `type:"structure"` + + // The name of the notebook instance to terminate. + // + // NotebookInstanceName is a required field + NotebookInstanceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopNotebookInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopNotebookInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopNotebookInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopNotebookInstanceInput"} + if s.NotebookInstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookInstanceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNotebookInstanceName sets the NotebookInstanceName field's value. +func (s *StopNotebookInstanceInput) SetNotebookInstanceName(v string) *StopNotebookInstanceInput { + s.NotebookInstanceName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopNotebookInstanceOutput +type StopNotebookInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopNotebookInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopNotebookInstanceOutput) GoString() string { + return s.String() +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopTrainingJobRequest +type StopTrainingJobInput struct { + _ struct{} `type:"structure"` + + // The name of the training job to stop. + // + // TrainingJobName is a required field + TrainingJobName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopTrainingJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopTrainingJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopTrainingJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopTrainingJobInput"} + if s.TrainingJobName == nil { + invalidParams.Add(request.NewErrParamRequired("TrainingJobName")) + } + if s.TrainingJobName != nil && len(*s.TrainingJobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TrainingJobName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTrainingJobName sets the TrainingJobName field's value. +func (s *StopTrainingJobInput) SetTrainingJobName(v string) *StopTrainingJobInput { + s.TrainingJobName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopTrainingJobOutput +type StopTrainingJobOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopTrainingJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopTrainingJobOutput) GoString() string { + return s.String() +} + +// Specifies how long model training can run. When model training reaches the +// limit, Amazon SageMaker ends the training job. Use this API to cap model +// training cost. +// +// To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which +// delays job termination for120 seconds. Algorithms might use this 120-second +// window to save the model artifacts, so the results of training is not lost. +// +// Training algorithms provided by Amazon SageMaker automatically saves the +// intermediate results of a model training job (it is best effort case, as +// model might not be ready to save as some stages, for example training just +// started). This intermediate data is a valid model artifact. You can use it +// to create a model (CreateModel). +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StoppingCondition +type StoppingCondition struct { + _ struct{} `type:"structure"` + + // The maximum length of time, in seconds, that the training job can run. If + // model training does not complete during this time, Amazon SageMaker ends + // the job. If value is not specified, default value is 1 day. Maximum value + // is 5 days. + MaxRuntimeInSeconds *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s StoppingCondition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StoppingCondition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StoppingCondition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StoppingCondition"} + if s.MaxRuntimeInSeconds != nil && *s.MaxRuntimeInSeconds < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxRuntimeInSeconds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxRuntimeInSeconds sets the MaxRuntimeInSeconds field's value. +func (s *StoppingCondition) SetMaxRuntimeInSeconds(v int64) *StoppingCondition { + s.MaxRuntimeInSeconds = &v + return s +} + +// Describes a tag. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/Tag +type Tag struct { + _ struct{} `type:"structure"` + + // The tag key. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The tag value. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// Provides summary information about a training job. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/TrainingJobSummary +type TrainingJobSummary struct { + _ struct{} `type:"structure"` + + // A timestamp that shows when the training job was created. + // + // CreationTime is a required field + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // Timestamp when the training job was last modified. + LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A timestamp that shows when the training job ended. This field is set only + // if the training job has one of the terminal statuses (Completed, Failed, + // or Stopped). + TrainingEndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Amazon Resource Name (ARN) of the training job. + // + // TrainingJobArn is a required field + TrainingJobArn *string `type:"string" required:"true"` + + // The name of the training job that you want a summary for. + // + // TrainingJobName is a required field + TrainingJobName *string `min:"1" type:"string" required:"true"` + + // The status of the training job. + // + // TrainingJobStatus is a required field + TrainingJobStatus *string `type:"string" required:"true" enum:"TrainingJobStatus"` +} + +// String returns the string representation +func (s TrainingJobSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrainingJobSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *TrainingJobSummary) SetCreationTime(v time.Time) *TrainingJobSummary { + s.CreationTime = &v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *TrainingJobSummary) SetLastModifiedTime(v time.Time) *TrainingJobSummary { + s.LastModifiedTime = &v + return s +} + +// SetTrainingEndTime sets the TrainingEndTime field's value. +func (s *TrainingJobSummary) SetTrainingEndTime(v time.Time) *TrainingJobSummary { + s.TrainingEndTime = &v + return s +} + +// SetTrainingJobArn sets the TrainingJobArn field's value. +func (s *TrainingJobSummary) SetTrainingJobArn(v string) *TrainingJobSummary { + s.TrainingJobArn = &v + return s +} + +// SetTrainingJobName sets the TrainingJobName field's value. +func (s *TrainingJobSummary) SetTrainingJobName(v string) *TrainingJobSummary { + s.TrainingJobName = &v + return s +} + +// SetTrainingJobStatus sets the TrainingJobStatus field's value. +func (s *TrainingJobSummary) SetTrainingJobStatus(v string) *TrainingJobSummary { + s.TrainingJobStatus = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpointInput +type UpdateEndpointInput struct { + _ struct{} `type:"structure"` + + // The name of the new endpoint configuration. + // + // EndpointConfigName is a required field + EndpointConfigName *string `type:"string" required:"true"` + + // The name of the endpoint whose configuration you want to update. + // + // EndpointName is a required field + EndpointName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateEndpointInput"} + if s.EndpointConfigName == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointConfigName")) + } + if s.EndpointName == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndpointConfigName sets the EndpointConfigName field's value. +func (s *UpdateEndpointInput) SetEndpointConfigName(v string) *UpdateEndpointInput { + s.EndpointConfigName = &v + return s +} + +// SetEndpointName sets the EndpointName field's value. +func (s *UpdateEndpointInput) SetEndpointName(v string) *UpdateEndpointInput { + s.EndpointName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpointOutput +type UpdateEndpointOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the endpoint. + // + // EndpointArn is a required field + EndpointArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEndpointOutput) GoString() string { + return s.String() +} + +// SetEndpointArn sets the EndpointArn field's value. +func (s *UpdateEndpointOutput) SetEndpointArn(v string) *UpdateEndpointOutput { + s.EndpointArn = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpointWeightsAndCapacitiesInput +type UpdateEndpointWeightsAndCapacitiesInput struct { + _ struct{} `type:"structure"` + + // An object that provides new capacity and weight values for a variant. + // + // DesiredWeightsAndCapacities is a required field + DesiredWeightsAndCapacities []*DesiredWeightAndCapacity `min:"1" type:"list" required:"true"` + + // The name of an existing Amazon SageMaker endpoint. + // + // EndpointName is a required field + EndpointName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateEndpointWeightsAndCapacitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEndpointWeightsAndCapacitiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateEndpointWeightsAndCapacitiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateEndpointWeightsAndCapacitiesInput"} + if s.DesiredWeightsAndCapacities == nil { + invalidParams.Add(request.NewErrParamRequired("DesiredWeightsAndCapacities")) + } + if s.DesiredWeightsAndCapacities != nil && len(s.DesiredWeightsAndCapacities) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DesiredWeightsAndCapacities", 1)) + } + if s.EndpointName == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointName")) + } + if s.DesiredWeightsAndCapacities != nil { + for i, v := range s.DesiredWeightsAndCapacities { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DesiredWeightsAndCapacities", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDesiredWeightsAndCapacities sets the DesiredWeightsAndCapacities field's value. +func (s *UpdateEndpointWeightsAndCapacitiesInput) SetDesiredWeightsAndCapacities(v []*DesiredWeightAndCapacity) *UpdateEndpointWeightsAndCapacitiesInput { + s.DesiredWeightsAndCapacities = v + return s +} + +// SetEndpointName sets the EndpointName field's value. +func (s *UpdateEndpointWeightsAndCapacitiesInput) SetEndpointName(v string) *UpdateEndpointWeightsAndCapacitiesInput { + s.EndpointName = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpointWeightsAndCapacitiesOutput +type UpdateEndpointWeightsAndCapacitiesOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the updated endpoint. + // + // EndpointArn is a required field + EndpointArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateEndpointWeightsAndCapacitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEndpointWeightsAndCapacitiesOutput) GoString() string { + return s.String() +} + +// SetEndpointArn sets the EndpointArn field's value. +func (s *UpdateEndpointWeightsAndCapacitiesOutput) SetEndpointArn(v string) *UpdateEndpointWeightsAndCapacitiesOutput { + s.EndpointArn = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateNotebookInstanceInput +type UpdateNotebookInstanceInput struct { + _ struct{} `type:"structure"` + + // The Amazon ML compute instance type. + InstanceType *string `type:"string" enum:"InstanceType"` + + // The name of the notebook instance to update. + // + // NotebookInstanceName is a required field + NotebookInstanceName *string `type:"string" required:"true"` + + // Amazon Resource Name (ARN) of the IAM role to associate with the instance. + RoleArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s UpdateNotebookInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateNotebookInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateNotebookInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateNotebookInstanceInput"} + if s.NotebookInstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookInstanceName")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceType sets the InstanceType field's value. +func (s *UpdateNotebookInstanceInput) SetInstanceType(v string) *UpdateNotebookInstanceInput { + s.InstanceType = &v + return s +} + +// SetNotebookInstanceName sets the NotebookInstanceName field's value. +func (s *UpdateNotebookInstanceInput) SetNotebookInstanceName(v string) *UpdateNotebookInstanceInput { + s.NotebookInstanceName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *UpdateNotebookInstanceInput) SetRoleArn(v string) *UpdateNotebookInstanceInput { + s.RoleArn = &v + return s +} + +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateNotebookInstanceOutput +type UpdateNotebookInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateNotebookInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateNotebookInstanceOutput) GoString() string { + return s.String() +} + +const ( + // CompressionTypeNone is a CompressionType enum value + CompressionTypeNone = "None" + + // CompressionTypeGzip is a CompressionType enum value + CompressionTypeGzip = "Gzip" +) + +const ( + // EndpointConfigSortKeyName is a EndpointConfigSortKey enum value + EndpointConfigSortKeyName = "Name" + + // EndpointConfigSortKeyCreationTime is a EndpointConfigSortKey enum value + EndpointConfigSortKeyCreationTime = "CreationTime" +) + +const ( + // EndpointSortKeyName is a EndpointSortKey enum value + EndpointSortKeyName = "Name" + + // EndpointSortKeyCreationTime is a EndpointSortKey enum value + EndpointSortKeyCreationTime = "CreationTime" + + // EndpointSortKeyStatus is a EndpointSortKey enum value + EndpointSortKeyStatus = "Status" +) + +const ( + // EndpointStatusOutOfService is a EndpointStatus enum value + EndpointStatusOutOfService = "OutOfService" + + // EndpointStatusCreating is a EndpointStatus enum value + EndpointStatusCreating = "Creating" + + // EndpointStatusUpdating is a EndpointStatus enum value + EndpointStatusUpdating = "Updating" + + // EndpointStatusRollingBack is a EndpointStatus enum value + EndpointStatusRollingBack = "RollingBack" + + // EndpointStatusInService is a EndpointStatus enum value + EndpointStatusInService = "InService" + + // EndpointStatusDeleting is a EndpointStatus enum value + EndpointStatusDeleting = "Deleting" + + // EndpointStatusFailed is a EndpointStatus enum value + EndpointStatusFailed = "Failed" +) + +const ( + // InstanceTypeMlT2Medium is a InstanceType enum value + InstanceTypeMlT2Medium = "ml.t2.medium" + + // InstanceTypeMlM4Xlarge is a InstanceType enum value + InstanceTypeMlM4Xlarge = "ml.m4.xlarge" + + // InstanceTypeMlP2Xlarge is a InstanceType enum value + InstanceTypeMlP2Xlarge = "ml.p2.xlarge" +) + +const ( + // ModelSortKeyName is a ModelSortKey enum value + ModelSortKeyName = "Name" + + // ModelSortKeyCreationTime is a ModelSortKey enum value + ModelSortKeyCreationTime = "CreationTime" +) + +const ( + // NotebookInstanceSortKeyName is a NotebookInstanceSortKey enum value + NotebookInstanceSortKeyName = "Name" + + // NotebookInstanceSortKeyCreationTime is a NotebookInstanceSortKey enum value + NotebookInstanceSortKeyCreationTime = "CreationTime" + + // NotebookInstanceSortKeyStatus is a NotebookInstanceSortKey enum value + NotebookInstanceSortKeyStatus = "Status" +) + +const ( + // NotebookInstanceSortOrderAscending is a NotebookInstanceSortOrder enum value + NotebookInstanceSortOrderAscending = "Ascending" + + // NotebookInstanceSortOrderDescending is a NotebookInstanceSortOrder enum value + NotebookInstanceSortOrderDescending = "Descending" +) + +const ( + // NotebookInstanceStatusPending is a NotebookInstanceStatus enum value + NotebookInstanceStatusPending = "Pending" + + // NotebookInstanceStatusInService is a NotebookInstanceStatus enum value + NotebookInstanceStatusInService = "InService" + + // NotebookInstanceStatusStopping is a NotebookInstanceStatus enum value + NotebookInstanceStatusStopping = "Stopping" + + // NotebookInstanceStatusStopped is a NotebookInstanceStatus enum value + NotebookInstanceStatusStopped = "Stopped" + + // NotebookInstanceStatusFailed is a NotebookInstanceStatus enum value + NotebookInstanceStatusFailed = "Failed" + + // NotebookInstanceStatusDeleting is a NotebookInstanceStatus enum value + NotebookInstanceStatusDeleting = "Deleting" +) + +const ( + // OrderKeyAscending is a OrderKey enum value + OrderKeyAscending = "Ascending" + + // OrderKeyDescending is a OrderKey enum value + OrderKeyDescending = "Descending" +) + +const ( + // ProductionVariantInstanceTypeMlC42xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC42xlarge = "ml.c4.2xlarge" + + // ProductionVariantInstanceTypeMlC48xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC48xlarge = "ml.c4.8xlarge" + + // ProductionVariantInstanceTypeMlC4Xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC4Xlarge = "ml.c4.xlarge" + + // ProductionVariantInstanceTypeMlC52xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC52xlarge = "ml.c5.2xlarge" + + // ProductionVariantInstanceTypeMlC59xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC59xlarge = "ml.c5.9xlarge" + + // ProductionVariantInstanceTypeMlC5Xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5Xlarge = "ml.c5.xlarge" + + // ProductionVariantInstanceTypeMlM4Xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM4Xlarge = "ml.m4.xlarge" + + // ProductionVariantInstanceTypeMlP2Xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlP2Xlarge = "ml.p2.xlarge" + + // ProductionVariantInstanceTypeMlP32xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlP32xlarge = "ml.p3.2xlarge" + + // ProductionVariantInstanceTypeMlT2Medium is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlT2Medium = "ml.t2.medium" +) + +const ( + // RecordWrapperNone is a RecordWrapper enum value + RecordWrapperNone = "None" + + // RecordWrapperRecordIo is a RecordWrapper enum value + RecordWrapperRecordIo = "RecordIO" +) + +const ( + // S3DataDistributionFullyReplicated is a S3DataDistribution enum value + S3DataDistributionFullyReplicated = "FullyReplicated" + + // S3DataDistributionShardedByS3key is a S3DataDistribution enum value + S3DataDistributionShardedByS3key = "ShardedByS3Key" +) + +const ( + // S3DataTypeManifestFile is a S3DataType enum value + S3DataTypeManifestFile = "ManifestFile" + + // S3DataTypeS3prefix is a S3DataType enum value + S3DataTypeS3prefix = "S3Prefix" +) + +const ( + // SecondaryStatusStarting is a SecondaryStatus enum value + SecondaryStatusStarting = "Starting" + + // SecondaryStatusDownloading is a SecondaryStatus enum value + SecondaryStatusDownloading = "Downloading" + + // SecondaryStatusTraining is a SecondaryStatus enum value + SecondaryStatusTraining = "Training" + + // SecondaryStatusUploading is a SecondaryStatus enum value + SecondaryStatusUploading = "Uploading" + + // SecondaryStatusStopping is a SecondaryStatus enum value + SecondaryStatusStopping = "Stopping" + + // SecondaryStatusStopped is a SecondaryStatus enum value + SecondaryStatusStopped = "Stopped" + + // SecondaryStatusMaxRuntimeExceeded is a SecondaryStatus enum value + SecondaryStatusMaxRuntimeExceeded = "MaxRuntimeExceeded" + + // SecondaryStatusCompleted is a SecondaryStatus enum value + SecondaryStatusCompleted = "Completed" + + // SecondaryStatusFailed is a SecondaryStatus enum value + SecondaryStatusFailed = "Failed" +) + +const ( + // SortByName is a SortBy enum value + SortByName = "Name" + + // SortByCreationTime is a SortBy enum value + SortByCreationTime = "CreationTime" + + // SortByStatus is a SortBy enum value + SortByStatus = "Status" +) + +const ( + // SortOrderAscending is a SortOrder enum value + SortOrderAscending = "Ascending" + + // SortOrderDescending is a SortOrder enum value + SortOrderDescending = "Descending" +) + +const ( + // TrainingInputModePipe is a TrainingInputMode enum value + TrainingInputModePipe = "Pipe" + + // TrainingInputModeFile is a TrainingInputMode enum value + TrainingInputModeFile = "File" +) + +const ( + // TrainingInstanceTypeMlM4Xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlM4Xlarge = "ml.m4.xlarge" + + // TrainingInstanceTypeMlM44xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlM44xlarge = "ml.m4.4xlarge" + + // TrainingInstanceTypeMlM410xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlM410xlarge = "ml.m4.10xlarge" + + // TrainingInstanceTypeMlC4Xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlC4Xlarge = "ml.c4.xlarge" + + // TrainingInstanceTypeMlC42xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlC42xlarge = "ml.c4.2xlarge" + + // TrainingInstanceTypeMlC48xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlC48xlarge = "ml.c4.8xlarge" + + // TrainingInstanceTypeMlP2Xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlP2Xlarge = "ml.p2.xlarge" + + // TrainingInstanceTypeMlP28xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlP28xlarge = "ml.p2.8xlarge" + + // TrainingInstanceTypeMlP216xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlP216xlarge = "ml.p2.16xlarge" + + // TrainingInstanceTypeMlP32xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlP32xlarge = "ml.p3.2xlarge" + + // TrainingInstanceTypeMlP38xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlP38xlarge = "ml.p3.8xlarge" + + // TrainingInstanceTypeMlP316xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlP316xlarge = "ml.p3.16xlarge" + + // TrainingInstanceTypeMlC5Xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlC5Xlarge = "ml.c5.xlarge" + + // TrainingInstanceTypeMlC52xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlC52xlarge = "ml.c5.2xlarge" + + // TrainingInstanceTypeMlC54xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlC54xlarge = "ml.c5.4xlarge" + + // TrainingInstanceTypeMlC59xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlC59xlarge = "ml.c5.9xlarge" + + // TrainingInstanceTypeMlC518xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlC518xlarge = "ml.c5.18xlarge" +) + +const ( + // TrainingJobStatusInProgress is a TrainingJobStatus enum value + TrainingJobStatusInProgress = "InProgress" + + // TrainingJobStatusCompleted is a TrainingJobStatus enum value + TrainingJobStatusCompleted = "Completed" + + // TrainingJobStatusFailed is a TrainingJobStatus enum value + TrainingJobStatusFailed = "Failed" + + // TrainingJobStatusStopping is a TrainingJobStatus enum value + TrainingJobStatusStopping = "Stopping" + + // TrainingJobStatusStopped is a TrainingJobStatus enum value + TrainingJobStatusStopped = "Stopped" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/doc.go new file mode 100644 index 00000000000..d74f40297cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/doc.go @@ -0,0 +1,28 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sagemaker provides the client and types for making API +// requests to Amazon SageMaker Service. +// +// Definition of the public APIs exposed by SageMaker +// +// See https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24 for more information on this service. +// +// See sagemaker package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sagemaker/ +// +// Using the Client +// +// To contact Amazon SageMaker Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon SageMaker Service client SageMaker for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sagemaker/#New +package sagemaker diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/errors.go new file mode 100644 index 00000000000..d7fd2c45754 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/errors.go @@ -0,0 +1,25 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sagemaker + +const ( + + // ErrCodeResourceInUse for service response error code + // "ResourceInUse". + // + // Resource being accessed is in use. + ErrCodeResourceInUse = "ResourceInUse" + + // ErrCodeResourceLimitExceeded for service response error code + // "ResourceLimitExceeded". + // + // You have exceeded an Amazon SageMaker resource limit. For example, you might + // have too many training jobs created. + ErrCodeResourceLimitExceeded = "ResourceLimitExceeded" + + // ErrCodeResourceNotFound for service response error code + // "ResourceNotFound". + // + // Resource being access is not found. + ErrCodeResourceNotFound = "ResourceNotFound" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go new file mode 100644 index 00000000000..fac6d92bae3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go @@ -0,0 +1,98 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sagemaker + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// SageMaker provides the API operation methods for making requests to +// Amazon SageMaker Service. See this package's package overview docs +// for details on the service. +// +// SageMaker methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SageMaker struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sagemaker" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the SageMaker client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SageMaker client from just a session. +// svc := sagemaker.New(mySession) +// +// // Create a SageMaker client with additional configuration +// svc := sagemaker.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SageMaker { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SageMaker { + if len(signingName) == 0 { + signingName = "sagemaker" + } + svc := &SageMaker{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2017-07-24", + JSONVersion: "1.1", + TargetPrefix: "SageMaker", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SageMaker operation and runs any +// custom request initialization. +func (c *SageMaker) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/waiters.go new file mode 100644 index 00000000000..c8515cc633d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/waiters.go @@ -0,0 +1,331 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sagemaker + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilEndpointDeleted uses the SageMaker API operation +// DescribeEndpoint to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *SageMaker) WaitUntilEndpointDeleted(input *DescribeEndpointInput) error { + return c.WaitUntilEndpointDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilEndpointDeletedWithContext is an extended version of WaitUntilEndpointDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) WaitUntilEndpointDeletedWithContext(ctx aws.Context, input *DescribeEndpointInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilEndpointDeleted", + MaxAttempts: 60, + Delay: request.ConstantWaiterDelay(30 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "ValidationException", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "EndpointStatus", + Expected: "Failed", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeEndpointInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeEndpointRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilEndpointInService uses the SageMaker API operation +// DescribeEndpoint to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *SageMaker) WaitUntilEndpointInService(input *DescribeEndpointInput) error { + return c.WaitUntilEndpointInServiceWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilEndpointInServiceWithContext is an extended version of WaitUntilEndpointInService. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) WaitUntilEndpointInServiceWithContext(ctx aws.Context, input *DescribeEndpointInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilEndpointInService", + MaxAttempts: 120, + Delay: request.ConstantWaiterDelay(30 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "EndpointStatus", + Expected: "InService", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "EndpointStatus", + Expected: "Failed", + }, + { + State: request.FailureWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "ValidationException", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeEndpointInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeEndpointRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilNotebookInstanceDeleted uses the SageMaker API operation +// DescribeNotebookInstance to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *SageMaker) WaitUntilNotebookInstanceDeleted(input *DescribeNotebookInstanceInput) error { + return c.WaitUntilNotebookInstanceDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilNotebookInstanceDeletedWithContext is an extended version of WaitUntilNotebookInstanceDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) WaitUntilNotebookInstanceDeletedWithContext(ctx aws.Context, input *DescribeNotebookInstanceInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilNotebookInstanceDeleted", + MaxAttempts: 60, + Delay: request.ConstantWaiterDelay(30 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "ValidationException", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "NotebookInstanceStatus", + Expected: "Failed", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeNotebookInstanceInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNotebookInstanceRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilNotebookInstanceInService uses the SageMaker API operation +// DescribeNotebookInstance to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *SageMaker) WaitUntilNotebookInstanceInService(input *DescribeNotebookInstanceInput) error { + return c.WaitUntilNotebookInstanceInServiceWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilNotebookInstanceInServiceWithContext is an extended version of WaitUntilNotebookInstanceInService. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) WaitUntilNotebookInstanceInServiceWithContext(ctx aws.Context, input *DescribeNotebookInstanceInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilNotebookInstanceInService", + MaxAttempts: 60, + Delay: request.ConstantWaiterDelay(30 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "NotebookInstanceStatus", + Expected: "InService", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "NotebookInstanceStatus", + Expected: "Failed", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeNotebookInstanceInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNotebookInstanceRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilNotebookInstanceStopped uses the SageMaker API operation +// DescribeNotebookInstance to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *SageMaker) WaitUntilNotebookInstanceStopped(input *DescribeNotebookInstanceInput) error { + return c.WaitUntilNotebookInstanceStoppedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilNotebookInstanceStoppedWithContext is an extended version of WaitUntilNotebookInstanceStopped. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) WaitUntilNotebookInstanceStoppedWithContext(ctx aws.Context, input *DescribeNotebookInstanceInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilNotebookInstanceStopped", + MaxAttempts: 60, + Delay: request.ConstantWaiterDelay(30 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "NotebookInstanceStatus", + Expected: "Stopped", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "NotebookInstanceStatus", + Expected: "Failed", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeNotebookInstanceInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNotebookInstanceRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilTrainingJobCompletedOrStopped uses the SageMaker API operation +// DescribeTrainingJob to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *SageMaker) WaitUntilTrainingJobCompletedOrStopped(input *DescribeTrainingJobInput) error { + return c.WaitUntilTrainingJobCompletedOrStoppedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilTrainingJobCompletedOrStoppedWithContext is an extended version of WaitUntilTrainingJobCompletedOrStopped. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) WaitUntilTrainingJobCompletedOrStoppedWithContext(ctx aws.Context, input *DescribeTrainingJobInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilTrainingJobCompletedOrStopped", + MaxAttempts: 180, + Delay: request.ConstantWaiterDelay(120 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "TrainingJobStatus", + Expected: "Completed", + }, + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "TrainingJobStatus", + Expected: "Stopped", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "TrainingJobStatus", + Expected: "Failed", + }, + { + State: request.FailureWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "ValidationException", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeTrainingJobInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTrainingJobRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/vendor.json b/vendor/vendor.json index bdb77abd1bd..eb39e73606b 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -860,6 +860,14 @@ "version": "v1.12.55", "versionExact": "v1.12.55" }, + { + "checksumSHA1": "C5Zj/tXuFQfwXkRsj6l0e6c71qE=", + "path": "github.com/aws/aws-sdk-go/service/sagemaker", + "revision": "f62f7b7c5425f2b1a630932617477bdeac6dc371", + "revisionTime": "2018-01-02T21:46:00Z", + "version": "v1.12.55", + "versionExact": "v1.12.55" + }, { "checksumSHA1": "QZU8vR9cOIenYiH+Ywl4Gzfnlp0=", "path": "github.com/aws/aws-sdk-go/service/servicecatalog", From 35d36510819be819844954ede4714529bcc580e1 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 9 Jan 2018 14:17:19 +0000 Subject: [PATCH 130/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a41c0142fe7..f5540b4da70 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ BUG FIXES: * resource/aws_db_parameter_group: Remove group from state if it's gone [GH-2868] * resource/aws_appautoscaling_target: Make `role_arn` optional & computed [GH-2889] * resource/aws_ssm_maintenance_window: Respect `enabled` during updates [GH-2818] +* resource/aws_lb_target_group: Fix max prefix length check [GH-2790] ## 1.6.0 (December 18, 2017) From 1831c1c3fedc53c44601204a5360fe280c67e3ec Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 9 Jan 2018 11:34:02 +0000 Subject: [PATCH 131/350] tests/aws_appautoscaling_*: Remove role_arn --- ...resource_aws_appautoscaling_policy_test.go | 176 +----------------- ...ws_appautoscaling_scheduled_action_test.go | 109 ----------- 2 files changed, 2 insertions(+), 283 deletions(-) diff --git a/aws/resource_aws_appautoscaling_policy_test.go b/aws/resource_aws_appautoscaling_policy_test.go index b039a0a6f75..a31e94ee5a3 100644 --- a/aws/resource_aws_appautoscaling_policy_test.go +++ b/aws/resource_aws_appautoscaling_policy_test.go @@ -202,35 +202,6 @@ func testAccAWSAppautoscalingPolicyConfig( randClusterName string, randPolicyName string) string { return fmt.Sprintf(` -resource "aws_iam_role" "autoscale_role" { - name = "%s" - path = "/" - - assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"*\"},\"Action\":[\"sts:AssumeRole\"]}]}" -} - -resource "aws_iam_role_policy" "autoscale_role_policy" { - name = "%s" - role = "${aws_iam_role.autoscale_role.id}" - - policy = < Date: Tue, 9 Jan 2018 09:23:09 -0800 Subject: [PATCH 132/350] New Resource: aws_guardduty_member --- aws/provider.go | 1 + aws/resource_aws_guardduty_member.go | 121 ++++++++++++++++ aws/resource_aws_guardduty_member_test.go | 133 ++++++++++++++++++ aws/resource_aws_guardduty_test.go | 4 + website/aws.erb | 4 + website/docs/r/guardduty_member.html.markdown | 55 ++++++++ 6 files changed, 318 insertions(+) create mode 100644 aws/resource_aws_guardduty_member.go create mode 100644 aws/resource_aws_guardduty_member_test.go create mode 100644 website/docs/r/guardduty_member.html.markdown diff --git a/aws/provider.go b/aws/provider.go index b3b3ac340c2..4c3af722c88 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -356,6 +356,7 @@ func Provider() terraform.ResourceProvider { "aws_flow_log": resourceAwsFlowLog(), "aws_glacier_vault": resourceAwsGlacierVault(), "aws_guardduty_detector": resourceAwsGuardDutyDetector(), + "aws_guardduty_member": resourceAwsGuardDutyMember(), "aws_iam_access_key": resourceAwsIamAccessKey(), "aws_iam_account_alias": resourceAwsIamAccountAlias(), "aws_iam_account_password_policy": resourceAwsIamAccountPasswordPolicy(), diff --git a/aws/resource_aws_guardduty_member.go b/aws/resource_aws_guardduty_member.go new file mode 100644 index 00000000000..937cce3f760 --- /dev/null +++ b/aws/resource_aws_guardduty_member.go @@ -0,0 +1,121 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/guardduty" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsGuardDutyMember() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsGuardDutyMemberCreate, + Read: resourceAwsGuardDutyMemberRead, + Delete: resourceAwsGuardDutyMemberDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAwsAccountId, + }, + "detector_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "email": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsGuardDutyMemberCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).guarddutyconn + accountID := d.Get("account_id").(string) + detectorID := d.Get("detector_id").(string) + + input := guardduty.CreateMembersInput{ + AccountDetails: []*guardduty.AccountDetail{{ + AccountId: aws.String(accountID), + Email: aws.String(d.Get("email").(string)), + }}, + DetectorId: aws.String(detectorID), + } + + log.Printf("[DEBUG] Creating GuardDuty Member: %s", input) + _, err := conn.CreateMembers(&input) + if err != nil { + return fmt.Errorf("Creating GuardDuty Member failed: %s", err.Error()) + } + d.SetId(fmt.Sprintf("%s:%s", detectorID, accountID)) + + return resourceAwsGuardDutyMemberRead(d, meta) +} + +func resourceAwsGuardDutyMemberRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).guarddutyconn + + idParts := strings.Split(d.Id(), ":") + accountID := idParts[1] + detectorID := idParts[0] + + input := guardduty.GetMembersInput{ + AccountIds: []*string{aws.String(accountID)}, + DetectorId: aws.String(detectorID), + } + + log.Printf("[DEBUG] Reading GuardDuty Member: %s", input) + gmo, err := conn.GetMembers(&input) + if err != nil { + if isAWSErr(err, guardduty.ErrCodeBadRequestException, "The request is rejected because the input detectorId is not owned by the current account.") { + log.Printf("[WARN] GuardDuty detector %q not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Reading GuardDuty Member '%s' failed: %s", d.Id(), err.Error()) + } + + if gmo.Members == nil || (len(gmo.Members) < 1) { + log.Printf("[WARN] GuardDuty Member %q not found, removing from state", d.Id()) + d.SetId("") + return nil + } + member := gmo.Members[0] + d.Set("account_id", *member.AccountId) + d.Set("detector_id", detectorID) + d.Set("email", *member.Email) + + return nil +} + +func resourceAwsGuardDutyMemberDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).guarddutyconn + + idParts := strings.Split(d.Id(), ":") + accountID := idParts[1] + detectorID := idParts[0] + + input := guardduty.DeleteMembersInput{ + AccountIds: []*string{aws.String(accountID)}, + DetectorId: aws.String(detectorID), + } + + log.Printf("[DEBUG] Delete GuardDuty Member: %s", input) + _, err := conn.DeleteMembers(&input) + if err != nil { + return fmt.Errorf("Deleting GuardDuty Member '%s' failed: %s", d.Id(), err.Error()) + } + return nil +} diff --git a/aws/resource_aws_guardduty_member_test.go b/aws/resource_aws_guardduty_member_test.go new file mode 100644 index 00000000000..db674af55fb --- /dev/null +++ b/aws/resource_aws_guardduty_member_test.go @@ -0,0 +1,133 @@ +package aws + +import ( + "fmt" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/guardduty" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func testAccAwsGuardDutyMember_basic(t *testing.T) { + resourceName := "aws_guardduty_member.test" + accountID := "111111111111" + email := "required@example.com" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsGuardDutyMemberDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGuardDutyMemberConfig_basic(accountID, email), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsGuardDutyMemberExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "account_id", accountID), + resource.TestCheckResourceAttrSet(resourceName, "detector_id"), + resource.TestCheckResourceAttr(resourceName, "email", email), + ), + }, + }, + }) +} + +func testAccAwsGuardDutyMember_import(t *testing.T) { + resourceName := "aws_guardduty_member.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSesTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGuardDutyMemberConfig_basic("111111111111", "required@example.com"), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckAwsGuardDutyMemberDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).guarddutyconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_guardduty_member" { + continue + } + + idParts := strings.Split(rs.Primary.ID, ":") + accountID := idParts[1] + detectorID := idParts[0] + + input := &guardduty.GetMembersInput{ + AccountIds: []*string{aws.String(accountID)}, + DetectorId: aws.String(detectorID), + } + + gmo, err := conn.GetMembers(input) + if err != nil { + if isAWSErr(err, guardduty.ErrCodeBadRequestException, "The request is rejected because the input detectorId is not owned by the current account.") { + return nil + } + return err + } + + if len(gmo.Members) < 1 { + continue + } + + return fmt.Errorf("Expected GuardDuty Detector to be destroyed, %s found", rs.Primary.ID) + } + + return nil +} + +func testAccCheckAwsGuardDutyMemberExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + idParts := strings.Split(rs.Primary.ID, ":") + accountID := idParts[1] + detectorID := idParts[0] + + input := &guardduty.GetMembersInput{ + AccountIds: []*string{aws.String(accountID)}, + DetectorId: aws.String(detectorID), + } + + conn := testAccProvider.Meta().(*AWSClient).guarddutyconn + gmo, err := conn.GetMembers(input) + if err != nil { + return err + } + + if len(gmo.Members) < 1 { + return fmt.Errorf("Not found: %s", name) + } + + return nil + } +} + +func testAccGuardDutyMemberConfig_basic(accountID, email string) string { + return fmt.Sprintf(` +%[1]s + +resource "aws_guardduty_member" "test" { + account_id = "%[2]s" + detector_id = "${aws_guardduty_detector.test.id}" + email = "%[3]s" +} +`, testAccGuardDutyDetectorConfig_basic1, accountID, email) +} diff --git a/aws/resource_aws_guardduty_test.go b/aws/resource_aws_guardduty_test.go index 5fb8d764f34..bee271457ca 100644 --- a/aws/resource_aws_guardduty_test.go +++ b/aws/resource_aws_guardduty_test.go @@ -10,6 +10,10 @@ func TestAccAWSGuardDuty(t *testing.T) { "basic": testAccAwsGuardDutyDetector_basic, "import": testAccAwsGuardDutyDetector_import, }, + "Member": { + "basic": testAccAwsGuardDutyMember_basic, + "import": testAccAwsGuardDutyMember_import, + }, } for group, m := range testCases { diff --git a/website/aws.erb b/website/aws.erb index ee605f3fb6f..530c8d4fef8 100644 --- a/website/aws.erb +++ b/website/aws.erb @@ -925,6 +925,10 @@ > aws_guardduty_detector + + > + aws_guardduty_member + diff --git a/website/docs/r/guardduty_member.html.markdown b/website/docs/r/guardduty_member.html.markdown new file mode 100644 index 00000000000..13e8d6a0382 --- /dev/null +++ b/website/docs/r/guardduty_member.html.markdown @@ -0,0 +1,55 @@ +--- +layout: "aws" +page_title: "AWS: aws_guardduty_member" +sidebar_current: "docs-aws-resource-guardduty-member" +description: |- + Provides a resource to manage a GuardDuty member +--- + +# aws_guardduty_member + +Provides a resource to manage a GuardDuty member. + +~> **NOTE:** Currently after using this resource, you must manually invite and accept member account invitations before GuardDuty will begin sending cross-account events. More information for how to accomplish this via the AWS Console or API can be found in the [GuardDuty User Guide](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_accounts.html). Terraform implementation of member invitation and acceptance resources can be tracked in [Github](https://github.com/terraform-providers/terraform-provider-aws/issues/2489). + +## Example Usage + +```hcl +resource "aws_guardduty_detector" "master" { + enable = true +} + +resource "aws_guardduty_detector" "member" { + provider = "aws.dev" + + enable = true +} + +resource "aws_guardduty_member" "member" { + account_id = "${aws_guardduty_detector.member.account_id}" + detector_id = "${aws_guardduty_detector.master.id}" + email = "required@example.com" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `account_id` - (Required) AWS account ID for member account. +* `detector_id` - (Required) The detector ID of the GuardDuty account where you want to create member accounts. +* `email` - (Required) Email address for member account. + +## Attributes Reference + +The following additional attributes are exported: + +* `id` - The ID of the GuardDuty member + +## Import + +GuardDuty members can be imported using the the master GuardDuty detector ID and member AWS account ID, e.g. + +``` +$ terraform import aws_guardduty_member.MyMember 00b00fd5aecc0ab60a708659477e9617:123456789012 +``` From 688157819987ede200d74430aadd2bb3c3d7fb21 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 9 Jan 2018 15:15:06 +0000 Subject: [PATCH 133/350] resource/aws_config_delivery_channel: Retry deletion --- aws/resource_aws_config_delivery_channel.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_config_delivery_channel.go b/aws/resource_aws_config_delivery_channel.go index 7d23727ac09..78501fe8c61 100644 --- a/aws/resource_aws_config_delivery_channel.go +++ b/aws/resource_aws_config_delivery_channel.go @@ -162,7 +162,18 @@ func resourceAwsConfigDeliveryChannelDelete(d *schema.ResourceData, meta interfa input := configservice.DeleteDeliveryChannelInput{ DeliveryChannelName: aws.String(d.Id()), } - _, err := conn.DeleteDeliveryChannel(&input) + + err := resource.Retry(30*time.Second, func() *resource.RetryError { + _, err := conn.DeleteDeliveryChannel(&input) + if err != nil { + if isAWSErr(err, configservice.ErrCodeLastDeliveryChannelDeleteFailedException, "there is a running configuration recorder") { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + } + return nil + }) if err != nil { return fmt.Errorf("Unable to delete delivery channel: %s", err) } From 9dda90ae1e55fd0e0b626cd0a97c98923d3047c7 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 10 Jan 2018 07:50:01 +0000 Subject: [PATCH 134/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f5540b4da70..f5b0d4c170b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ BUG FIXES: * resource/aws_appautoscaling_target: Make `role_arn` optional & computed [GH-2889] * resource/aws_ssm_maintenance_window: Respect `enabled` during updates [GH-2818] * resource/aws_lb_target_group: Fix max prefix length check [GH-2790] +* resource/aws_config_delivery_channel: Retry deletion [GH-2910] ## 1.6.0 (December 18, 2017) From f8444edf2b191713df30d49ccd73c745097cb4c8 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 9 Jan 2018 16:34:50 +0000 Subject: [PATCH 135/350] test/aws_ecs_service: Randomize names --- aws/resource_aws_ecs_service_test.go | 437 ++++++++++++++++----------- 1 file changed, 263 insertions(+), 174 deletions(-) diff --git a/aws/resource_aws_ecs_service_test.go b/aws/resource_aws_ecs_service_test.go index 4f754891d29..d513bbe5566 100644 --- a/aws/resource_aws_ecs_service_test.go +++ b/aws/resource_aws_ecs_service_test.go @@ -84,22 +84,27 @@ func TestParseTaskDefinition(t *testing.T) { } } -func TestAccAWSEcsServiceWithARN(t *testing.T) { - rInt := acctest.RandInt() +func TestAccAWSEcsService_withARN(t *testing.T) { + rString := acctest.RandString(8) + + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-arn-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-arn-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-arn-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsService(rInt), + Config: testAccAWSEcsService(clusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), ), }, { - Config: testAccAWSEcsServiceModified(rInt), + Config: testAccAWSEcsServiceModified(clusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), ), @@ -108,15 +113,20 @@ func TestAccAWSEcsServiceWithARN(t *testing.T) { }) } -func TestAccAWSEcsServiceWithUnnormalizedPlacementStrategy(t *testing.T) { - rInt := acctest.RandInt() +func TestAccAWSEcsService_withUnnormalizedPlacementStrategy(t *testing.T) { + rString := acctest.RandString(8) + + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-ups-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-ups-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-ups-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsServiceWithInterchangeablePlacementStrategy(rInt), + Config: testAccAWSEcsServiceWithInterchangeablePlacementStrategy(clusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), ), @@ -125,22 +135,27 @@ func TestAccAWSEcsServiceWithUnnormalizedPlacementStrategy(t *testing.T) { }) } -func TestAccAWSEcsServiceWithFamilyAndRevision(t *testing.T) { - rName := acctest.RandomWithPrefix("tf-test") +func TestAccAWSEcsService_withFamilyAndRevision(t *testing.T) { + rString := acctest.RandString(8) + + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-far-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-far-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-far-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsServiceWithFamilyAndRevision(rName), + Config: testAccAWSEcsServiceWithFamilyAndRevision(clusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.jenkins"), ), }, { - Config: testAccAWSEcsServiceWithFamilyAndRevisionModified(rName), + Config: testAccAWSEcsServiceWithFamilyAndRevisionModified(clusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.jenkins"), ), @@ -150,11 +165,18 @@ func TestAccAWSEcsServiceWithFamilyAndRevision(t *testing.T) { } // Regression for https://github.com/hashicorp/terraform/issues/2427 -func TestAccAWSEcsServiceWithRenamedCluster(t *testing.T) { +func TestAccAWSEcsService_withRenamedCluster(t *testing.T) { + rString := acctest.RandString(8) + + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-rc-%s", rString) + uClusterName := fmt.Sprintf("tf-acc-cluster-svc-w-rc-updated-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-rc-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-rc-%s", rString) + originalRegexp := regexp.MustCompile( - "^arn:aws:ecs:[^:]+:[0-9]+:cluster/terraformecstest3$") + "^arn:aws:ecs:[^:]+:[0-9]+:cluster/" + clusterName + "$") modifiedRegexp := regexp.MustCompile( - "^arn:aws:ecs:[^:]+:[0-9]+:cluster/terraformecstest3modified$") + "^arn:aws:ecs:[^:]+:[0-9]+:cluster/" + uClusterName + "$") resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -162,7 +184,7 @@ func TestAccAWSEcsServiceWithRenamedCluster(t *testing.T) { CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsServiceWithRenamedCluster, + Config: testAccAWSEcsServiceWithRenamedCluster(clusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.ghost"), resource.TestMatchResourceAttr( @@ -171,7 +193,7 @@ func TestAccAWSEcsServiceWithRenamedCluster(t *testing.T) { }, { - Config: testAccAWSEcsServiceWithRenamedClusterModified, + Config: testAccAWSEcsServiceWithRenamedCluster(uClusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.ghost"), resource.TestMatchResourceAttr( @@ -183,7 +205,17 @@ func TestAccAWSEcsServiceWithRenamedCluster(t *testing.T) { } func TestAccAWSEcsService_healthCheckGracePeriodSeconds(t *testing.T) { - rName := acctest.RandomWithPrefix("tf-acc") + rString := acctest.RandString(8) + + vpcNameTag := fmt.Sprintf("tf-acc-vpc-svc-w-hcgps-%s", rString) + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-hcgps-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-hcgps-%s", rString) + roleName := fmt.Sprintf("tf-acc-role-svc-w-hcgps-%s", rString) + policyName := fmt.Sprintf("tf-acc-policy-svc-w-hcgps-%s", rString) + tgName := fmt.Sprintf("tf-acc-tg-svc-w-hcgps-%s", rString) + lbName := fmt.Sprintf("tf-acc-lb-svc-w-hcgps-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-hcgps-%s", rString) + resourceName := "aws_ecs_service.with_alb" resource.Test(t, resource.TestCase{ @@ -192,22 +224,26 @@ func TestAccAWSEcsService_healthCheckGracePeriodSeconds(t *testing.T) { CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsService_healthCheckGracePeriodSeconds(rName, t.Name(), -1), + Config: testAccAWSEcsService_healthCheckGracePeriodSeconds(vpcNameTag, clusterName, tdName, + roleName, policyName, tgName, lbName, svcName, -1), ExpectError: regexp.MustCompile(`must be between 0 and 1800`), }, { - Config: testAccAWSEcsService_healthCheckGracePeriodSeconds(rName, t.Name(), 1801), + Config: testAccAWSEcsService_healthCheckGracePeriodSeconds(vpcNameTag, clusterName, tdName, + roleName, policyName, tgName, lbName, svcName, 1801), ExpectError: regexp.MustCompile(`must be between 0 and 1800`), }, { - Config: testAccAWSEcsService_healthCheckGracePeriodSeconds(rName, t.Name(), 300), + Config: testAccAWSEcsService_healthCheckGracePeriodSeconds(vpcNameTag, clusterName, tdName, + roleName, policyName, tgName, lbName, svcName, 300), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists(resourceName), resource.TestCheckResourceAttr(resourceName, "health_check_grace_period_seconds", "300"), ), }, { - Config: testAccAWSEcsService_healthCheckGracePeriodSeconds(rName, t.Name(), 600), + Config: testAccAWSEcsService_healthCheckGracePeriodSeconds(vpcNameTag, clusterName, tdName, + roleName, policyName, tgName, lbName, svcName, 600), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists(resourceName), resource.TestCheckResourceAttr(resourceName, "health_check_grace_period_seconds", "600"), @@ -218,13 +254,21 @@ func TestAccAWSEcsService_healthCheckGracePeriodSeconds(t *testing.T) { } func TestAccAWSEcsService_withIamRole(t *testing.T) { + rString := acctest.RandString(8) + + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-iam-role-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-iam-role-%s", rString) + roleName := fmt.Sprintf("tf-acc-role-svc-w-iam-role-%s", rString) + policyName := fmt.Sprintf("tf-acc-policy-svc-w-iam-role-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-iam-role-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsService_withIamRole, + Config: testAccAWSEcsService_withIamRole(clusterName, tdName, roleName, policyName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.ghost"), ), @@ -234,14 +278,19 @@ func TestAccAWSEcsService_withIamRole(t *testing.T) { } func TestAccAWSEcsService_withDeploymentValues(t *testing.T) { - rInt := acctest.RandInt() + rString := acctest.RandString(8) + + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-dv-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-dv-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-dv-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsServiceWithDeploymentValues(rInt), + Config: testAccAWSEcsServiceWithDeploymentValues(clusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), resource.TestCheckResourceAttr( @@ -256,19 +305,27 @@ func TestAccAWSEcsService_withDeploymentValues(t *testing.T) { // Regression for https://github.com/hashicorp/terraform/issues/3444 func TestAccAWSEcsService_withLbChanges(t *testing.T) { + rString := acctest.RandString(8) + + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-lbc-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-lbc-%s", rString) + roleName := fmt.Sprintf("tf-acc-role-svc-w-lbc-%s", rString) + policyName := fmt.Sprintf("tf-acc-policy-svc-w-lbc-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-lbc-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsService_withLbChanges, + Config: testAccAWSEcsService_withLbChanges(clusterName, tdName, roleName, policyName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.with_lb_changes"), ), }, { - Config: testAccAWSEcsService_withLbChanges_modified, + Config: testAccAWSEcsService_withLbChanges_modified(clusterName, tdName, roleName, policyName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.with_lb_changes"), ), @@ -279,17 +336,22 @@ func TestAccAWSEcsService_withLbChanges(t *testing.T) { // Regression for https://github.com/hashicorp/terraform/issues/3361 func TestAccAWSEcsService_withEcsClusterName(t *testing.T) { - clusterName := regexp.MustCompile("^terraformecstestcluster$") + rString := acctest.RandString(8) + + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-cluster-name-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-cluster-name-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-cluster-name-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsServiceWithEcsClusterName, + Config: testAccAWSEcsServiceWithEcsClusterName(clusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.jenkins"), - resource.TestMatchResourceAttr( + resource.TestCheckResourceAttr( "aws_ecs_service.jenkins", "cluster", clusterName), ), }, @@ -298,7 +360,15 @@ func TestAccAWSEcsService_withEcsClusterName(t *testing.T) { } func TestAccAWSEcsService_withAlb(t *testing.T) { - rName := acctest.RandomWithPrefix("tf-acc") + rString := acctest.RandString(8) + + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-alb-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-alb-%s", rString) + roleName := fmt.Sprintf("tf-acc-role-svc-w-alb-%s", rString) + policyName := fmt.Sprintf("tf-acc-policy-svc-w-alb-%s", rString) + tgName := fmt.Sprintf("tf-acc-tg-svc-w-alb-%s", rString) + lbName := fmt.Sprintf("tf-acc-lb-svc-w-alb-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-alb-%s", rString) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -306,7 +376,7 @@ func TestAccAWSEcsService_withAlb(t *testing.T) { CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsServiceWithAlb(rName), + Config: testAccAWSEcsServiceWithAlb(clusterName, tdName, roleName, policyName, tgName, lbName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.with_alb"), ), @@ -315,22 +385,27 @@ func TestAccAWSEcsService_withAlb(t *testing.T) { }) } -func TestAccAWSEcsServiceWithPlacementStrategy(t *testing.T) { - rInt := acctest.RandInt() +func TestAccAWSEcsService_withPlacementStrategy(t *testing.T) { + rString := acctest.RandString(8) + + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-ps-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-ps-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-ps-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsService(rInt), + Config: testAccAWSEcsService(clusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), resource.TestCheckResourceAttr("aws_ecs_service.mongo", "placement_strategy.#", "0"), ), }, { - Config: testAccAWSEcsServiceWithPlacementStrategy(rInt), + Config: testAccAWSEcsServiceWithPlacementStrategy(clusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), resource.TestCheckResourceAttr("aws_ecs_service.mongo", "placement_strategy.#", "1"), @@ -340,15 +415,20 @@ func TestAccAWSEcsServiceWithPlacementStrategy(t *testing.T) { }) } -func TestAccAWSEcsServiceWithPlacementConstraints(t *testing.T) { - rInt := acctest.RandInt() +func TestAccAWSEcsService_withPlacementConstraints(t *testing.T) { + rString := acctest.RandString(8) + + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-pc-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-pc-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-pc-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsServiceWithPlacementConstraint(rInt), + Config: testAccAWSEcsServiceWithPlacementConstraint(clusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), resource.TestCheckResourceAttr("aws_ecs_service.mongo", "placement_constraints.#", "1"), @@ -358,15 +438,20 @@ func TestAccAWSEcsServiceWithPlacementConstraints(t *testing.T) { }) } -func TestAccAWSEcsServiceWithPlacementConstraints_emptyExpression(t *testing.T) { - rInt := acctest.RandInt() +func TestAccAWSEcsService_withPlacementConstraints_emptyExpression(t *testing.T) { + rString := acctest.RandString(8) + + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-pc-ee-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-pc-ee-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-pc-ee-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsServiceWithPlacementConstraintEmptyExpression(rInt), + Config: testAccAWSEcsServiceWithPlacementConstraintEmptyExpression(clusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), resource.TestCheckResourceAttr("aws_ecs_service.mongo", "placement_constraints.#", "1"), @@ -376,15 +461,22 @@ func TestAccAWSEcsServiceWithPlacementConstraints_emptyExpression(t *testing.T) }) } -func TestAccAWSEcsServiceWithLaunchTypeFargate(t *testing.T) { - rInt := acctest.RandInt() +func TestAccAWSEcsService_withLaunchTypeFargate(t *testing.T) { + rString := acctest.RandString(8) + + sg1Name := fmt.Sprintf("tf-acc-sg-1-svc-w-ltf-%s", rString) + sg2Name := fmt.Sprintf("tf-acc-sg-2-svc-w-ltf-%s", rString) + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-ltf-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-ltf-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-ltf-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsServiceWithLaunchTypeFargate(rInt), + Config: testAccAWSEcsServiceWithLaunchTypeFargate(sg1Name, sg2Name, clusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.main"), resource.TestCheckResourceAttr("aws_ecs_service.main", "launch_type", "FARGATE"), @@ -394,14 +486,22 @@ func TestAccAWSEcsServiceWithLaunchTypeFargate(t *testing.T) { }) } -func TestAccAWSEcsServiceWithNetworkConfiguration(t *testing.T) { +func TestAccAWSEcsService_withNetworkConfiguration(t *testing.T) { + rString := acctest.RandString(8) + + sg1Name := fmt.Sprintf("tf-acc-sg-1-svc-w-nc-%s", rString) + sg2Name := fmt.Sprintf("tf-acc-sg-2-svc-w-nc-%s", rString) + clusterName := fmt.Sprintf("tf-acc-cluster-svc-w-nc-%s", rString) + tdName := fmt.Sprintf("tf-acc-td-svc-w-nc-%s", rString) + svcName := fmt.Sprintf("tf-acc-svc-w-nc-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsServiceWithNetworkConfigration(acctest.RandString(5)), + Config: testAccAWSEcsServiceWithNetworkConfigration(sg1Name, sg2Name, clusterName, tdName, svcName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.main"), ), @@ -457,14 +557,14 @@ func testAccCheckAWSEcsServiceExists(name string) resource.TestCheckFunc { } } -func testAccAWSEcsService(rInt int) string { +func testAccAWSEcsService(clusterName, tdName, svcName string) string { return fmt.Sprintf(` resource "aws_ecs_cluster" "default" { - name = "terraformecstest%d" + name = "%s" } resource "aws_ecs_task_definition" "mongo" { - family = "mongodb" + family = "%s" container_definitions = < Date: Wed, 10 Jan 2018 10:21:53 +0100 Subject: [PATCH 136/350] r/aws_elasticsearch_domain: export kibana endpoint (#2804) * r/aws_elasticsearch_domain: export kibana endpoint * add testacc to check kibana_endpoint; add documentation; extract function to get kibana endpoint --- aws/resource_aws_elasticsearch_domain.go | 10 ++++++++++ aws/resource_aws_elasticsearch_domain_test.go | 1 + website/docs/r/elasticsearch_domain.html.markdown | 1 + 3 files changed, 12 insertions(+) diff --git a/aws/resource_aws_elasticsearch_domain.go b/aws/resource_aws_elasticsearch_domain.go index 86f1e9c7b45..634c40d02da 100644 --- a/aws/resource_aws_elasticsearch_domain.go +++ b/aws/resource_aws_elasticsearch_domain.go @@ -63,6 +63,10 @@ func resourceAwsElasticSearchDomain() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "kibana_endpoint": { + Type: schema.TypeString, + Computed: true, + }, "ebs_options": { Type: schema.TypeList, Optional: true, @@ -480,12 +484,14 @@ func resourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface{} if err != nil { return err } + d.Set("kibana_endpoint", getKibanaEndpoint(d)) if ds.Endpoint != nil { return fmt.Errorf("%q: Elasticsearch domain in VPC expected to have null Endpoint value", d.Id()) } } else { if ds.Endpoint != nil { d.Set("endpoint", *ds.Endpoint) + d.Set("kibana_endpoint", getKibanaEndpoint(d)) } if ds.Endpoints != nil { return fmt.Errorf("%q: Elasticsearch domain not in VPC expected to have null Endpoints value", d.Id()) @@ -676,3 +682,7 @@ func resourceAwsElasticSearchDomainDelete(d *schema.ResourceData, meta interface return err } + +func getKibanaEndpoint(d *schema.ResourceData) string { + return d.Get("endpoint").(string) + "/_plugin/kibana/" +} diff --git a/aws/resource_aws_elasticsearch_domain_test.go b/aws/resource_aws_elasticsearch_domain_test.go index 5640fffb8cd..6c9ed40bc87 100644 --- a/aws/resource_aws_elasticsearch_domain_test.go +++ b/aws/resource_aws_elasticsearch_domain_test.go @@ -28,6 +28,7 @@ func TestAccAWSElasticSearchDomain_basic(t *testing.T) { testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain), resource.TestCheckResourceAttr( "aws_elasticsearch_domain.example", "elasticsearch_version", "1.5"), + resource.TestMatchResourceAttr("aws_elasticsearch_domain.example", "kibana_endpoint", regexp.MustCompile(".*es.amazonaws.com/_plugin/kibana/")), ), }, }, diff --git a/website/docs/r/elasticsearch_domain.html.markdown b/website/docs/r/elasticsearch_domain.html.markdown index dd825dcfc50..bc86cf60fcf 100644 --- a/website/docs/r/elasticsearch_domain.html.markdown +++ b/website/docs/r/elasticsearch_domain.html.markdown @@ -109,6 +109,7 @@ The following attributes are exported: * `arn` - Amazon Resource Name (ARN) of the domain. * `domain_id` - Unique identifier for the domain. * `endpoint` - Domain-specific endpoint used to submit index, search, and data upload requests. +* `kibana_endpoint` - Domain-specific endpoint for kibana without https scheme. * `vpc_options.0.availability_zones` - If the domain was created inside a VPC, the names of the availability zones the configured `subnet_ids` were created inside. * `vpc_options.0.vpc_id` - If the domain was created inside a VPC, the ID of the VPC. From ec94bd9eb94e3f47b6df0fed27cda59bfaa775a5 Mon Sep 17 00:00:00 2001 From: Gauthier Wallet Date: Wed, 10 Jan 2018 10:22:24 +0100 Subject: [PATCH 137/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f5b0d4c170b..390abd55e67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ ENHANCEMENTS: * resource/aws_vpc_peering_connection_accepter: Add support for cross-region VPC peering [GH-2508] * data-source/aws_iam_server_certificate: Add support for retrieving public key [GH-2749] * data-source/aws_vpc_peering_connection: Add support for cross-region VPC peering [GH-2508] +* resource/aws_elasticsearch_domain: export kibana endpoint [GH-2804] BUG FIXES: From 2b5cfb089a7679a6ff3888455136ebe34a82ba81 Mon Sep 17 00:00:00 2001 From: Puneeth Nanjundaswamy Date: Wed, 10 Jan 2018 10:48:57 +0100 Subject: [PATCH 138/350] Bump aws-sdk-go to v1.12.59 --- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../service/directoryservice/api.go | 32 +- .../aws/aws-sdk-go/service/kms/api.go | 96 +- vendor/vendor.json | 846 +++++++++--------- 4 files changed, 501 insertions(+), 475 deletions(-) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index f040f8cbada..d220989d3b1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.12.57" +const SDKVersion = "1.12.59" diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go index 2d6395bcf57..ef2d0cad721 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go @@ -4805,6 +4805,10 @@ type CreateMicrosoftADInput struct { // console Directory Details page after the directory is created. Description *string `type:"string"` + // AWS Microsoft AD is available in two editions: Standard and Enterprise. Enterprise + // is the default. + Edition *string `type:"string" enum:"DirectoryEdition"` + // The fully qualified domain name for the directory, such as corp.example.com. // This name will resolve inside your VPC only. It does not need to be publicly // resolvable. @@ -4868,6 +4872,12 @@ func (s *CreateMicrosoftADInput) SetDescription(v string) *CreateMicrosoftADInpu return s } +// SetEdition sets the Edition field's value. +func (s *CreateMicrosoftADInput) SetEdition(v string) *CreateMicrosoftADInput { + s.Edition = &v + return s +} + // SetName sets the Name field's value. func (s *CreateMicrosoftADInput) SetName(v string) *CreateMicrosoftADInput { s.Name = &v @@ -6208,6 +6218,9 @@ type DirectoryDescription struct { // which the AD Connector is connected. DnsIpAddrs []*string `type:"list"` + // The edition associated with this directory. + Edition *string `type:"string" enum:"DirectoryEdition"` + // Specifies when the directory was created. LaunchTime *time.Time `type:"timestamp" timestampFormat:"unix"` @@ -6301,6 +6314,12 @@ func (s *DirectoryDescription) SetDnsIpAddrs(v []*string) *DirectoryDescription return s } +// SetEdition sets the Edition field's value. +func (s *DirectoryDescription) SetEdition(v string) *DirectoryDescription { + s.Edition = &v + return s +} + // SetLaunchTime sets the LaunchTime field's value. func (s *DirectoryDescription) SetLaunchTime(v time.Time) *DirectoryDescription { s.LaunchTime = &v @@ -6534,10 +6553,7 @@ type DirectoryVpcSettingsDescription struct { // The list of Availability Zones that the directory is in. AvailabilityZones []*string `type:"list"` - // The security group identifier for the directory. If the directory was created - // before 8/1/2014, this is the identifier of the directory members security - // group that was created when the directory was created. If the directory was - // created after this date, this value is null. + // The domain controller security group identifier for the directory. SecurityGroupId *string `type:"string"` // The identifiers of the subnets for the directory servers. @@ -8708,6 +8724,14 @@ func (s *VerifyTrustOutput) SetTrustId(v string) *VerifyTrustOutput { return s } +const ( + // DirectoryEditionEnterprise is a DirectoryEdition enum value + DirectoryEditionEnterprise = "Enterprise" + + // DirectoryEditionStandard is a DirectoryEdition enum value + DirectoryEditionStandard = "Standard" +) + const ( // DirectorySizeSmall is a DirectorySize enum value DirectorySizeSmall = "Small" diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go index 5452ee70fc1..4efc28b999e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go @@ -4596,8 +4596,8 @@ type CreateKeyInput struct { // A flag to indicate whether to bypass the key policy lockout safety check. // - // Setting this value to true increases the likelihood that the CMK becomes - // unmanageable. Do not set this value to true indiscriminately. + // Setting this value to true increases the risk that the CMK becomes unmanageable. + // Do not set this value to true indiscriminately. // // For more information, refer to the scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) // section in the AWS Key Management Service Developer Guide. @@ -4634,28 +4634,29 @@ type CreateKeyInput struct { // The key policy to attach to the CMK. // - // If you specify a policy and do not set BypassPolicyLockoutSafetyCheck to - // true, the policy must meet the following criteria: - // - // * It must allow the principal that is making the CreateKey request to - // make a subsequent PutKeyPolicy request on the CMK. This reduces the likelihood - // that the CMK becomes unmanageable. For more information, refer to the - // scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the AWS Key Management Service Developer Guide. - // - // * The principals that are specified in the key policy must exist and be - // visible to AWS KMS. When you create a new AWS principal (for example, - // an IAM user or role), you might need to enforce a delay before specifying - // the new principal in a key policy because the new principal might not - // immediately be visible to AWS KMS. For more information, see Changes that - // I make are not always immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) - // in the IAM User Guide. - // - // If you do not specify a policy, AWS KMS attaches a default key policy to - // the CMK. For more information, see Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) + // If you provide a key policy, it must meet the following criteria: + // + // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy + // must allow the principal that is making the CreateKey request to make + // a subsequent PutKeyPolicy request on the CMK. This reduces the risk that + // the CMK becomes unmanageable. For more information, refer to the scenario + // in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section of the AWS Key Management Service Developer Guide. + // + // * Each statement in the key policy must contain one or more principals. + // The principals in the key policy must exist and be visible to AWS KMS. + // When you create a new AWS principal (for example, an IAM user or role), + // you might need to enforce a delay before including the new principal in + // a key policy because the new principal might not be immediately visible + // to AWS KMS. For more information, see Changes that I make are not always + // immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the AWS Identity and Access Management User Guide. + // + // If you do not provide a key policy, AWS KMS attaches a default key policy + // to the CMK. For more information, see Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) // in the AWS Key Management Service Developer Guide. // - // The policy size limit is 32 kilobytes (32768 bytes). + // The key policy size limit is 32 kilobytes (32768 bytes). Policy *string `min:"1" type:"string"` // One or more tags. Each tag consists of a tag key and a tag value. Tag keys @@ -5878,8 +5879,8 @@ type GetKeyPolicyInput struct { // KeyId is a required field KeyId *string `min:"1" type:"string" required:"true"` - // Specifies the name of the policy. The only valid name is default. To get - // the names of key policies, use ListKeyPolicies. + // Specifies the name of the key policy. The only valid name is default. To + // get the names of key policies, use ListKeyPolicies. // // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` @@ -5933,7 +5934,7 @@ func (s *GetKeyPolicyInput) SetPolicyName(v string) *GetKeyPolicyInput { type GetKeyPolicyOutput struct { _ struct{} `type:"structure"` - // A policy document in JSON format. + // A key policy document in JSON format. Policy *string `min:"1" type:"string"` } @@ -6976,8 +6977,8 @@ type ListKeyPoliciesOutput struct { // use for the Marker parameter in a subsequent request. NextMarker *string `min:"1" type:"string"` - // A list of policy names. Currently, there is only one policy and it is named - // "Default". + // A list of key policy names. Currently, there is only one key policy per CMK + // and it is always named default. PolicyNames []*string `type:"list"` // A flag that indicates whether there are more items in the list. When this @@ -7337,8 +7338,8 @@ type PutKeyPolicyInput struct { // A flag to indicate whether to bypass the key policy lockout safety check. // - // Setting this value to true increases the likelihood that the CMK becomes - // unmanageable. Do not set this value to true indiscriminately. + // Setting this value to true increases the risk that the CMK becomes unmanageable. + // Do not set this value to true indiscriminately. // // For more information, refer to the scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) // section in the AWS Key Management Service Developer Guide. @@ -7366,24 +7367,25 @@ type PutKeyPolicyInput struct { // The key policy to attach to the CMK. // - // If you do not set BypassPolicyLockoutSafetyCheck to true, the policy must - // meet the following criteria: - // - // * It must allow the principal that is making the PutKeyPolicy request - // to make a subsequent PutKeyPolicy request on the CMK. This reduces the - // likelihood that the CMK becomes unmanageable. For more information, refer - // to the scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the AWS Key Management Service Developer Guide. - // - // * The principals that are specified in the key policy must exist and be - // visible to AWS KMS. When you create a new AWS principal (for example, - // an IAM user or role), you might need to enforce a delay before specifying - // the new principal in a key policy because the new principal might not - // immediately be visible to AWS KMS. For more information, see Changes that - // I make are not always immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) - // in the IAM User Guide. - // - // The policy size limit is 32 kilobytes (32768 bytes). + // The key policy must meet the following criteria: + // + // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy + // must allow the principal that is making the PutKeyPolicy request to make + // a subsequent PutKeyPolicy request on the CMK. This reduces the risk that + // the CMK becomes unmanageable. For more information, refer to the scenario + // in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section of the AWS Key Management Service Developer Guide. + // + // * Each statement in the key policy must contain one or more principals. + // The principals in the key policy must exist and be visible to AWS KMS. + // When you create a new AWS principal (for example, an IAM user or role), + // you might need to enforce a delay before including the new principal in + // a key policy because the new principal might not be immediately visible + // to AWS KMS. For more information, see Changes that I make are not always + // immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the AWS Identity and Access Management User Guide. + // + // The key policy size limit is 32 kilobytes (32768 bytes). // // Policy is a required field Policy *string `min:"1" type:"string" required:"true"` diff --git a/vendor/vendor.json b/vendor/vendor.json index d53ba0b466f..48d90d4dc79 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -141,844 +141,844 @@ "revisionTime": "2017-07-27T15:54:43Z" }, { - "checksumSHA1": "h8863Fok+80x0JWRr78XXcGywxM=", + "checksumSHA1": "s4OiIOYhXiesEXjwmg1scSc0//o=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "DtuTqKH29YnLjrIJkRYX0HQtXY0=", "path": "github.com/aws/aws-sdk-go/aws/arn", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "9nE/FjZ4pYrT883KtV2/aI+Gayo=", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "7/8j/q0TWtOgXyvEcv4B2Dhl00o=", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "Y+cPwQL0dZMyqp3wI+KJWmA9KQ8=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "OnU/n7R33oYXiB4SAGd5pK7I0Bs=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "BT2+PhuOjbAuMcLpdop0FKQY5EY=", "path": "github.com/aws/aws-sdk-go/aws/endpoints", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "9GvAyILJ7g+VUg8Ef5DsT5GuYsg=", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "HcGL4e6Uep4/80eCUI5xkcWjpQ0=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "iU00ZjhAml/13g+1YXT21IqoXqg=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "04ypv4x12l4q0TksA1zEVsmgpvw=", "path": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "NStHCXEvYqG72GknZyv1jaKaeH0=", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "yHfT5DTbeCLs4NE2Rgnqrhe15ls=", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "9V1PvtFQ9MObZTc3sa86WcuOtOU=", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "pkeoOfZpHRvFG/AOZeTf0lwtsFg=", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "Rpu8KBtHZgvhkwHxUfaky+qW+G4=", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "0qYPUga28aQVkxZgBR3Z86AbGUQ=", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "vnYDXA1NxJ7Hu+DMfXNk1UnmkWg=", "path": "github.com/aws/aws-sdk-go/service/acm", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "DPl/OkvEUjrd+XKqX73l6nUNw3U=", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "X8tOI6i+RJwXIgg1qBjDNclyG/0=", "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "aDAaH6YiA50IrJ5Smfg0fovrniA=", "path": "github.com/aws/aws-sdk-go/service/appsync", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "oBXDw1zQTfxcKsK3ZjtKcS7gBLI=", "path": "github.com/aws/aws-sdk-go/service/athena", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "ITAwWyJp4t9AGfUXm9M3pFWTHVA=", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "Zz8qI6RloveM1zrXAglLxJZT1ZA=", "path": "github.com/aws/aws-sdk-go/service/batch", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "/nO06EpnD22+Ex80gHi4UYrAvKc=", "path": "github.com/aws/aws-sdk-go/service/budgets", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "6gM3CZZgiB0JvS7EK1c31Q8L09U=", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "T80IDetBz1hqJpq5Wqmx3MwCh8w=", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "bYrI9mxspB0xDFZEy3OIfWuez5g=", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "oB+M+kOmYG28V0PuI75IF6E+/w8=", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "Nc3vXlV7s309PprScYpRDPQWeDQ=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "bPh7NF3mLpGMV0rIakolMPHqMyw=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "P6qyaFX9X6Nnvm3avLigjmjfYds=", "path": "github.com/aws/aws-sdk-go/service/codebuild", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "7nW1Ho2X3RcUU8FaFBhJIUeuDNw=", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "+petAU2sPfykSoVBAitmGxvGOlw=", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "LKw7fnNwq17Eqy0clzS/LK89vS4=", "path": "github.com/aws/aws-sdk-go/service/codepipeline", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "aXh1KIbNX+g+tH+lh3pk++9lm3k=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentity", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "IWi9xZz+OncotjM/vJ87Iffg2Qk=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentityprovider", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "56F6Stg8hQ1kxiAEzqB0TDctW9k=", "path": "github.com/aws/aws-sdk-go/service/configservice", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "hYCwLQdIjHj8rMHLGVyUVhecI4s=", "path": "github.com/aws/aws-sdk-go/service/databasemigrationservice", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "siWpqsOY3u69XkgPF8+F8V1K0Pc=", "path": "github.com/aws/aws-sdk-go/service/dax", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "26CWoHQP/dyL2VzE5ZNd8zNzhko=", "path": "github.com/aws/aws-sdk-go/service/devicefarm", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "6g94rUHAgjcqMMTtMqKUbLU37wY=", "path": "github.com/aws/aws-sdk-go/service/directconnect", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { - "checksumSHA1": "oFnS6I0u7KqnxK0/r1uoz8rTkxI=", + "checksumSHA1": "edM36y+5lmI7Hne0/38qapLzGO4=", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "0TXXUPjrbOCHpX555B6suH36Nnk=", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "INaeHZ2L5x6RlrcQBm4q1hFqNRM=", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "uEv9kkBsVIjg7K4+Y8TVlU0Cc8o=", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "3B3RtWG7IY9qhFhWGEwroeMxnPI=", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "eoM9nF5iVMbuGOmkY33d19aHt8Y=", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "dU5MPXUUOYD/E9sNncpFZ/U86Cw=", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "pj8mBWT3HE0Iid6HSmhw7lmyZDU=", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "VYGtTaSiajfKOVTbi9/SNmbiIac=", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "SZ7yLDZ6RvMhpWe0Goyem64kgyA=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "WYqHhdRNsiGGBLWlBLbOItZf+zA=", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "ae7VWg/xuXpnSD6wGumN44qEd+Q=", "path": "github.com/aws/aws-sdk-go/service/elbv2", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "NbkH6F+792jQ7BW4lGCb+vJVw58=", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "5btWHj2fZrPc/zfYdJLPaOcivxI=", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "Rodm1XwZ9Ncah1NLHep0behQpXg=", "path": "github.com/aws/aws-sdk-go/service/gamelift", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "oDoGvSfmO2Z099ixV2HXn+SDeHE=", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "HRmbBf3dUEBAfdC2xKaoWAGeM7Y=", "path": "github.com/aws/aws-sdk-go/service/glue", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "6JlxJoy1JCArNK2qBkaJ5IV6qBc=", "path": "github.com/aws/aws-sdk-go/service/guardduty", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "oZaxMqnwl2rA+V/W0tJ3uownORI=", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "nMdRXIfhgvEKBHnLX61Ze3EUJWU=", "path": "github.com/aws/aws-sdk-go/service/inspector", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "pZwCI4DpP5hcMa/ItKhiwo/ukd0=", "path": "github.com/aws/aws-sdk-go/service/iot", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "IoSyRZhlL0petrB28nXk5jKM9YA=", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { - "checksumSHA1": "oAFLgD0uJiVOZkFkL5dd/wUgBz4=", + "checksumSHA1": "JOfgA6YehzwZ/4Mgh+3lY/+Gz3E=", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "XDVse9fKF0RkAywzzgsO31AV4oc=", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "HluEcyZNywrbKnj/aR3tXbu29d8=", "path": "github.com/aws/aws-sdk-go/service/lexmodelbuildingservice", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "wjs9YBsHx0YQH0zKBA7Ibd1UV5Y=", "path": "github.com/aws/aws-sdk-go/service/lightsail", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "4VfB5vMLNYs0y6K159YCBgo9T3c=", "path": "github.com/aws/aws-sdk-go/service/mediaconvert", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "Ox3VWHYSQq0YKmlr0paUPdr5W/0=", "path": "github.com/aws/aws-sdk-go/service/medialive", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "Rs7QtkcLl3XNPnKb8ss/AhF2X50=", "path": "github.com/aws/aws-sdk-go/service/mediapackage", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "QjiIL8LrlhwrQw8FboF+wMNvUF0=", "path": "github.com/aws/aws-sdk-go/service/mediastore", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "ZY1SJNE03I6NL2OBJD9hlwVsqO0=", "path": "github.com/aws/aws-sdk-go/service/mediastoredata", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "ynB7Flcudp0VOqBVKZJ+23DtLHU=", "path": "github.com/aws/aws-sdk-go/service/mq", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "fpsBu+F79ktlLRwal1GugVMUDo0=", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "IddJCt5BrI6zRuUpFJqqnS9qrIM=", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "vP1FcccUZbuUlin7ME89w1GVJtA=", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "fgSXmayOZRgur/41Gp1tFvH0GGg=", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "sCaHoPWsJXRHFbilUKwN71qFTOI=", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "QZU8vR9cOIenYiH+Ywl4Gzfnlp0=", "path": "github.com/aws/aws-sdk-go/service/servicecatalog", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "dk6ebvA0EYgdPyc5HPKLBPEtsm4=", "path": "github.com/aws/aws-sdk-go/service/servicediscovery", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "Ex1Ma0SFGpqeNuPbeXZtsliZ3zo=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "maVXeR3WDAkONlzf04e4mDgCYxo=", "path": "github.com/aws/aws-sdk-go/service/sfn", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "ADoR4mlCW5usH8iOa6mPNSy49LM=", "path": "github.com/aws/aws-sdk-go/service/shield", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "B3CgAFSREebpsFoFOo4vrQ6u04w=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "FfY8w4DM8XIULdRnFhd3Um8Mj8c=", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "Wx189wAbIhWChx4kVbvsyqKMF4U=", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "Al7CCaQRNd22FwUZXigUEWN820M=", "path": "github.com/aws/aws-sdk-go/service/ssm", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "W1oFtpaT4TWIIJrAvFcn/XdcT7g=", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "Uw4pOUxSMbx4xBHUcOUkNhtnywE=", "path": "github.com/aws/aws-sdk-go/service/swf", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "on6d7Hydx2bM9jkFOf1JZcZZgeY=", "path": "github.com/aws/aws-sdk-go/service/waf", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "rHqjsOndIR82gX5mSKybaRWf3UY=", "path": "github.com/aws/aws-sdk-go/service/wafregional", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "y0XODBzpJjZvR1e9F6ULItV5nG4=", "path": "github.com/aws/aws-sdk-go/service/workspaces", - "revision": "5177d71d80f123f6d82aaf762387e39b88c5ba23", - "revisionTime": "2018-01-09T00:04:15Z", - "version": "v1.12.57", - "versionExact": "v1.12.57" + "revision": "3c754f1d340244540350f0a00b940781bae9c905", + "revisionTime": "2018-01-09T22:08:02Z", + "version": "v1.12.59", + "versionExact": "v1.12.59" }, { "checksumSHA1": "usT4LCSQItkFvFOQT7cBlkCuGaE=", From 81bba6b1f567aed561c6a6a30916504ee0886c68 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 10 Jan 2018 09:55:01 +0000 Subject: [PATCH 139/350] test/aws_autoscaling_group: Replace hardcoded AMIs w/ data source --- aws/resource_aws_autoscaling_group_test.go | 379 ++++++++++++++++++--- 1 file changed, 331 insertions(+), 48 deletions(-) diff --git a/aws/resource_aws_autoscaling_group_test.go b/aws/resource_aws_autoscaling_group_test.go index 0e4cd7770a4..ea9251289db 100644 --- a/aws/resource_aws_autoscaling_group_test.go +++ b/aws/resource_aws_autoscaling_group_test.go @@ -879,9 +879,23 @@ func TestAccAWSAutoScalingGroup_emptyAvailabilityZones(t *testing.T) { } const testAccAWSAutoScalingGroupConfig_autoGeneratedName = ` +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } resource "aws_autoscaling_group" "bar" { @@ -894,9 +908,23 @@ resource "aws_autoscaling_group" "bar" { ` const testAccAWSAutoScalingGroupConfig_namePrefix = ` +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "test" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } resource "aws_autoscaling_group" "test" { @@ -910,9 +938,23 @@ resource "aws_autoscaling_group" "test" { ` const testAccAWSAutoScalingGroupConfig_terminationPoliciesEmpty = ` +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } resource "aws_autoscaling_group" "bar" { @@ -926,9 +968,23 @@ resource "aws_autoscaling_group" "bar" { ` const testAccAWSAutoScalingGroupConfig_terminationPoliciesExplicitDefault = ` +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } resource "aws_autoscaling_group" "bar" { @@ -943,9 +999,23 @@ resource "aws_autoscaling_group" "bar" { ` const testAccAWSAutoScalingGroupConfig_terminationPoliciesUpdate = ` +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } resource "aws_autoscaling_group" "bar" { @@ -961,9 +1031,23 @@ resource "aws_autoscaling_group" "bar" { func testAccAWSAutoScalingGroupConfig(name string) string { return fmt.Sprintf(` +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } resource "aws_placement_group" "test" { @@ -1006,14 +1090,28 @@ resource "aws_autoscaling_group" "bar" { func testAccAWSAutoScalingGroupConfigUpdate(name string) string { return fmt.Sprintf(` +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } resource "aws_launch_configuration" "new" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } resource "aws_autoscaling_group" "bar" { @@ -1053,9 +1151,23 @@ resource "aws_autoscaling_group" "bar" { func testAccAWSAutoScalingGroupImport(name string) string { return fmt.Sprintf(` +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } resource "aws_placement_group" "test" { @@ -1191,8 +1303,22 @@ resource "aws_subnet" "main" { } } +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-b5b3fc85" + image_id = "${data.aws_ami.test_ami.id}" instance_type = "t2.micro" } @@ -1224,8 +1350,22 @@ resource "aws_subnet" "main" { } } +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-b5b3fc85" + image_id = "${data.aws_ami.test_ami.id}" instance_type = "t2.micro" } @@ -1242,8 +1382,22 @@ resource "aws_autoscaling_group" "bar" { func testAccAWSAutoScalingGroupConfig_withPlacementGroup(name string) string { return fmt.Sprintf(` +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" + image_id = "${data.aws_ami.test_ami.id}" instance_type = "c3.large" } @@ -1276,9 +1430,23 @@ resource "aws_autoscaling_group" "bar" { } const testAccAWSAutoscalingMetricsCollectionConfig_allMetricsCollected = ` +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } resource "aws_autoscaling_group" "bar" { @@ -1304,9 +1472,23 @@ resource "aws_autoscaling_group" "bar" { ` const testAccAWSAutoscalingMetricsCollectionConfig_updatingMetricsCollected = ` +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } resource "aws_autoscaling_group" "bar" { @@ -1330,10 +1512,6 @@ resource "aws_autoscaling_group" "bar" { ` const testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_pre = ` -provider "aws" { - region = "us-west-2" -} - resource "aws_vpc" "default" { cidr_block = "10.0.0.0/16" @@ -1369,9 +1547,22 @@ resource "aws_subnet" "alt" { } } +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - # Golang-base from cts-hashi aws account, shared with tf testing account - image_id = "ami-1817d178" + image_id = "${data.aws_ami.test_ami.id}" instance_type = "t2.micro" enable_monitoring = false } @@ -1412,10 +1603,6 @@ resource "aws_security_group" "tf_test_self" { ` const testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_post = ` -provider "aws" { - region = "us-west-2" -} - resource "aws_vpc" "default" { cidr_block = "10.0.0.0/16" @@ -1451,9 +1638,22 @@ resource "aws_subnet" "alt" { } } +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - # Golang-base from cts-hashi aws account, shared with tf testing account - image_id = "ami-1817d178" + image_id = "${data.aws_ami.test_ami.id}" instance_type = "t2.micro" enable_monitoring = false } @@ -1542,9 +1742,22 @@ resource "aws_subnet" "alt" { } } +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - # Golang-base from cts-hashi aws account, shared with tf testing account - image_id = "ami-1817d178" + image_id = "${data.aws_ami.test_ami.id}" instance_type = "t2.micro" enable_monitoring = false } @@ -1591,9 +1804,23 @@ resource "aws_security_group" "tf_test_self" { func testAccAWSAutoScalingGroupWithHookConfig(name string) string { return fmt.Sprintf(` +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } resource "aws_autoscaling_group" "bar" { @@ -1765,9 +1992,23 @@ resource "aws_autoscaling_group" "bar" { func testAccAWSAutoScalingGroupConfigWithSuspendedProcesses(name string) string { return fmt.Sprintf(` +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } resource "aws_placement_group" "test" { @@ -1800,9 +2041,23 @@ resource "aws_autoscaling_group" "bar" { func testAccAWSAutoScalingGroupConfigWithSuspendedProcessesUpdated(name string) string { return fmt.Sprintf(` +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } resource "aws_placement_group" "test" { @@ -1843,8 +2098,22 @@ resource "aws_autoscaling_group" "test" { vpc_zone_identifier = [] } +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "test" { - image_id = "ami-21f78e11" + image_id = "${data.aws_ami.test_ami.id}" instance_type = "t1.micro" } ` @@ -1868,8 +2137,22 @@ resource "aws_autoscaling_group" "test" { vpc_zone_identifier = ["${aws_subnet.test.id}"] } +data "aws_ami" "test_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + resource "aws_launch_configuration" "test" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" + image_id = "${data.aws_ami.test_ami.id}" + instance_type = "t2.micro" } ` From 74c7ace7e85e70900821c39d1738f8caf85f8e1e Mon Sep 17 00:00:00 2001 From: Tom Henderson Date: Wed, 10 Jan 2018 22:59:38 +1300 Subject: [PATCH 140/350] resource/aws_ssm_association: Allow for multiple targets (#2297) * Allow up to 5 targets * Update docs * Add a test with multiple targets * Test that targets are well set in the state --- aws/resource_aws_ssm_association.go | 2 +- aws/resource_aws_ssm_association_test.go | 68 ++++++++++++++++++++ website/docs/r/ssm_association.html.markdown | 2 +- 3 files changed, 70 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_ssm_association.go b/aws/resource_aws_ssm_association.go index e44363cd27e..e376c715f7d 100644 --- a/aws/resource_aws_ssm_association.go +++ b/aws/resource_aws_ssm_association.go @@ -75,7 +75,7 @@ func resourceAwsSsmAssociation() *schema.Resource { Optional: true, ForceNew: true, Computed: true, - MaxItems: 1, + MaxItems: 5, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { diff --git a/aws/resource_aws_ssm_association_test.go b/aws/resource_aws_ssm_association_test.go index 791cc92b88d..4186e29e456 100644 --- a/aws/resource_aws_ssm_association_test.go +++ b/aws/resource_aws_ssm_association_test.go @@ -40,6 +40,35 @@ func TestAccAWSSSMAssociation_withTargets(t *testing.T) { Config: testAccAWSSSMAssociationBasicConfigWithTargets(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMAssociationExists("aws_ssm_association.foo"), + resource.TestCheckResourceAttr( + "aws_ssm_association.foo", "targets.0.key", "tag:Name"), + resource.TestCheckResourceAttr( + "aws_ssm_association.foo", "targets.0.values.0", "acceptanceTest"), + ), + }, + }, + }) +} + +func TestAccAWSSSMAssociation_withMultipleTargets(t *testing.T) { + name := acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSMAssociationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSMAssociationBasicConfigWithMultipleTargets(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMAssociationExists("aws_ssm_association.foo"), + resource.TestCheckResourceAttr( + "aws_ssm_association.foo", "targets.0.key", "tag:Name"), + resource.TestCheckResourceAttr( + "aws_ssm_association.foo", "targets.0.values.0", "acceptanceTest"), + resource.TestCheckResourceAttr( + "aws_ssm_association.foo", "targets.1.key", "tag:Environment"), + resource.TestCheckResourceAttr( + "aws_ssm_association.foo", "targets.1.values.0", "acceptanceTest"), ), }, }, @@ -365,6 +394,45 @@ resource "aws_ssm_association" "foo" { }`, rName) } +func testAccAWSSSMAssociationBasicConfigWithMultipleTargets(rName string) string { + return fmt.Sprintf(` +resource "aws_ssm_document" "foo_document" { + name = "test_document_association-%s", + document_type = "Command" + content = < Date: Wed, 10 Jan 2018 11:02:18 +0100 Subject: [PATCH 141/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 390abd55e67..fdda8b9d194 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ ENHANCEMENTS: * data-source/aws_iam_server_certificate: Add support for retrieving public key [GH-2749] * data-source/aws_vpc_peering_connection: Add support for cross-region VPC peering [GH-2508] * resource/aws_elasticsearch_domain: export kibana endpoint [GH-2804] +* resource/aws_ssm_association: Allow for multiple targets [GH-2297] BUG FIXES: From da3bce18060861a03f0e5e7974a75ee8d1f5d573 Mon Sep 17 00:00:00 2001 From: Anthony Teisseire Date: Wed, 10 Jan 2018 12:00:54 +0200 Subject: [PATCH 142/350] Added more tests to aws_vpn_connection and documentation --- aws/resource_aws_vpn_connection.go | 119 +++++++--- aws/resource_aws_vpn_connection_test.go | 228 ++++++++++++++------ website/docs/r/vpn_connection.html.markdown | 7 + 3 files changed, 253 insertions(+), 101 deletions(-) diff --git a/aws/resource_aws_vpn_connection.go b/aws/resource_aws_vpn_connection.go index ec425d7123c..21244784134 100644 --- a/aws/resource_aws_vpn_connection.go +++ b/aws/resource_aws_vpn_connection.go @@ -5,7 +5,10 @@ import ( "encoding/xml" "fmt" "log" + "net" + "regexp" "sort" + "strings" "time" "github.com/aws/aws-sdk-go/aws" @@ -95,33 +98,37 @@ func resourceAwsVpnConnection() *schema.Resource { }, "tunnel1_inside_cidr": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateVpnConnectionTunnelInsideCIDR, }, "tunnel1_preshared_key": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateVpnConnectionTunnelPreSharedKey, }, "tunnel2_inside_cidr": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateVpnConnectionTunnelInsideCIDR, }, "tunnel2_preshared_key": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateVpnConnectionTunnelPreSharedKey, }, "tags": tagsSchema(), @@ -261,32 +268,25 @@ func resourceAwsVpnConnection() *schema.Resource { func resourceAwsVpnConnectionCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - // Get the optional tunnel options - tunnel1_cidr := d.Get("tunnel1_inside_cidr").(string) - tunnel2_cidr := d.Get("tunnel2_inside_cidr").(string) - - tunnel1_psk := d.Get("tunnel1_preshared_key").(string) - tunnel2_psk := d.Get("tunnel2_preshared_key").(string) - // Fill the tunnel options for the EC2 API options := []*ec2.VpnTunnelOptionsSpecification{ {}, {}, } - if tunnel1_cidr != "" { - options[0].TunnelInsideCidr = aws.String(tunnel1_cidr) + if v, ok := d.GetOk("tunnel1_inside_cidr"); ok { + options[0].TunnelInsideCidr = aws.String(v.(string)) } - if tunnel2_cidr != "" { - options[1].TunnelInsideCidr = aws.String(tunnel2_cidr) + if v, ok := d.GetOk("tunnel2_inside_cidr"); ok { + options[1].TunnelInsideCidr = aws.String(v.(string)) } - if tunnel1_psk != "" { - options[0].PreSharedKey = aws.String(tunnel1_psk) + if v, ok := d.GetOk("tunnel1_preshared_key"); ok { + options[0].PreSharedKey = aws.String(v.(string)) } - if tunnel2_psk != "" { - options[1].PreSharedKey = aws.String(tunnel2_psk) + if v, ok := d.GetOk("tunnel2_preshared_key"); ok { + options[1].PreSharedKey = aws.String(v.(string)) } connectOpts := &ec2.VpnConnectionOptionsSpecification{ @@ -556,3 +556,56 @@ func xmlConfigToTunnelInfo(xmlConfig string) (*TunnelInfo, error) { return &tunnelInfo, nil } + +func validateVpnConnectionTunnelPreSharedKey(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if (len(value) < 8) || (len(value) > 64) { + errors = append(errors, fmt.Errorf("%q must be between 8 and 64 characters in length", k)) + } + + if strings.HasPrefix(value, "0") { + errors = append(errors, fmt.Errorf("%q cannot start with zero character", k)) + } + + if !regexp.MustCompile(`^[0-9a-zA-Z_]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf("%q can only contain alphanumeric and underscore characters", k)) + } + + return +} + +// https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_VpnTunnelOptionsSpecification.html +func validateVpnConnectionTunnelInsideCIDR(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, ipnet, err := net.ParseCIDR(value) + + if err != nil { + errors = append(errors, fmt.Errorf("%q must contain a valid CIDR, got error parsing: %s", k, err)) + return + } + + if !strings.HasSuffix(ipnet.String(), "/30") { + errors = append(errors, fmt.Errorf("%q must be /30 CIDR", k)) + } + + if !strings.HasPrefix(ipnet.String(), "169.254.") { + errors = append(errors, fmt.Errorf("%q must be within 169.254.0.0/16", k)) + } else if ipnet.String() == "169.254.0.0/30" { + errors = append(errors, fmt.Errorf("%q cannot be 169.254.0.0/30", k)) + } else if ipnet.String() == "169.254.1.0/30" { + errors = append(errors, fmt.Errorf("%q cannot be 169.254.1.0/30", k)) + } else if ipnet.String() == "169.254.2.0/30" { + errors = append(errors, fmt.Errorf("%q cannot be 169.254.2.0/30", k)) + } else if ipnet.String() == "169.254.3.0/30" { + errors = append(errors, fmt.Errorf("%q cannot be 169.254.3.0/30", k)) + } else if ipnet.String() == "169.254.4.0/30" { + errors = append(errors, fmt.Errorf("%q cannot be 169.254.4.0/30", k)) + } else if ipnet.String() == "169.254.5.0/30" { + errors = append(errors, fmt.Errorf("%q cannot be 169.254.5.0/30", k)) + } else if ipnet.String() == "169.254.169.252/30" { + errors = append(errors, fmt.Errorf("%q cannot be 169.254.169.252/30", k)) + } + + return +} diff --git a/aws/resource_aws_vpn_connection_test.go b/aws/resource_aws_vpn_connection_test.go index c5f8c33c3d9..47c45475e6f 100644 --- a/aws/resource_aws_vpn_connection_test.go +++ b/aws/resource_aws_vpn_connection_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "regexp" "testing" "time" @@ -63,8 +64,70 @@ func TestAccAWSVpnConnection_tunnelOptions(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccAwsVpnConnectionDestroy, Steps: []resource.TestStep{ + + // Checking CIDR blocks + { + Config: testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn, "12345678", "not-a-cidr"), + ExpectError: regexp.MustCompile(`must contain a valid CIDR`), + }, + { + Config: testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn, "12345678", "169.254.254.0/31"), + ExpectError: regexp.MustCompile(`must be /30 CIDR`), + }, + { + Config: testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn, "12345678", "172.16.0.0/30"), + ExpectError: regexp.MustCompile(`must be within 169.254.0.0/16`), + }, + { + Config: testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn, "12345678", "169.254.0.0/30"), + ExpectError: regexp.MustCompile(`cannot be 169.254.0.0/30`), + }, + { + Config: testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn, "12345678", "169.254.1.0/30"), + ExpectError: regexp.MustCompile(`cannot be 169.254.1.0/30`), + }, + { + Config: testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn, "12345678", "169.254.2.0/30"), + ExpectError: regexp.MustCompile(`cannot be 169.254.2.0/30`), + }, + { + Config: testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn, "12345678", "169.254.3.0/30"), + ExpectError: regexp.MustCompile(`cannot be 169.254.3.0/30`), + }, + { + Config: testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn, "12345678", "169.254.4.0/30"), + ExpectError: regexp.MustCompile(`cannot be 169.254.4.0/30`), + }, + { + Config: testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn, "12345678", "169.254.5.0/30"), + ExpectError: regexp.MustCompile(`cannot be 169.254.5.0/30`), + }, + { + Config: testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn, "12345678", "169.254.169.252/30"), + ExpectError: regexp.MustCompile(`cannot be 169.254.169.252/30`), + }, + + // Checking PreShared Key + { + Config: testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn, "1234567", "169.254.254.0/30"), + ExpectError: regexp.MustCompile(`must be between 8 and 64 characters in length`), + }, + { + Config: testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn, acctest.RandStringFromCharSet(65, acctest.CharSetAlpha), "169.254.254.0/30"), + ExpectError: regexp.MustCompile(`must be between 8 and 64 characters in length`), + }, + { + Config: testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn, "01234567", "169.254.254.0/30"), + ExpectError: regexp.MustCompile(`cannot start with zero character`), + }, + { + Config: testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn, "1234567!", "169.254.254.0/30"), + ExpectError: regexp.MustCompile(`can only contain alphanumeric and underscore characters`), + }, + + //Try actual building { - Config: testAccAwsVpnConnectionConfigTunnelOptions(rBgpAsn), + Config: testAccAwsVpnConnectionConfigTunnelOptions(rBgpAsn, "12345678", "169.254.8.0/30", "abcdefgh", "169.254.9.0/30"), Check: resource.ComposeTestCheckFunc( testAccAwsVpnConnection( "aws_vpc.vpc", @@ -76,10 +139,10 @@ func TestAccAWSVpnConnection_tunnelOptions(t *testing.T) { resource.TestCheckResourceAttr("aws_vpn_connection.foo", "static_routes_only", "false"), resource.TestCheckResourceAttr("aws_vpn_connection.foo", "tunnel1_inside_cidr", "169.254.8.0/30"), - resource.TestCheckResourceAttr("aws_vpn_connection.foo", "tunnel1_preshared_key", "lookatmethisisaprivatekey1"), + resource.TestCheckResourceAttr("aws_vpn_connection.foo", "tunnel1_preshared_key", "12345678"), resource.TestCheckResourceAttr("aws_vpn_connection.foo", "tunnel2_inside_cidr", "169.254.9.0/30"), - resource.TestCheckResourceAttr("aws_vpn_connection.foo", "tunnel2_preshared_key", "lookatmethisisaprivatekey2"), + resource.TestCheckResourceAttr("aws_vpn_connection.foo", "tunnel2_preshared_key", "abcdefgh"), ), }, }, @@ -308,87 +371,116 @@ func TestAWSVpnConnection_xmlconfig(t *testing.T) { func testAccAwsVpnConnectionConfig(rBgpAsn int) string { return fmt.Sprintf(` - resource "aws_vpn_gateway" "vpn_gateway" { - tags { - Name = "vpn_gateway" - } - } +resource "aws_vpn_gateway" "vpn_gateway" { + tags { + Name = "vpn_gateway" + } +} - resource "aws_customer_gateway" "customer_gateway" { - bgp_asn = %d - ip_address = "178.0.0.1" - type = "ipsec.1" - tags { - Name = "main-customer-gateway" - } - } +resource "aws_customer_gateway" "customer_gateway" { + bgp_asn = %d + ip_address = "178.0.0.1" + type = "ipsec.1" + tags { + Name = "main-customer-gateway" + } +} - resource "aws_vpn_connection" "foo" { - vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}" - customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}" - type = "ipsec.1" - static_routes_only = true - } - `, rBgpAsn) +resource "aws_vpn_connection" "foo" { + vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}" + customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}" + type = "ipsec.1" + static_routes_only = true +} + `, rBgpAsn) } // Change static_routes_only to be false, forcing a refresh. func testAccAwsVpnConnectionConfigUpdate(rInt, rBgpAsn int) string { return fmt.Sprintf(` - resource "aws_vpn_gateway" "vpn_gateway" { - tags { - Name = "vpn_gateway" - } - } +resource "aws_vpn_gateway" "vpn_gateway" { + tags { + Name = "vpn_gateway" + } +} - resource "aws_customer_gateway" "customer_gateway" { - bgp_asn = %d - ip_address = "178.0.0.1" - type = "ipsec.1" - tags { - Name = "main-customer-gateway-%d" - } - } +resource "aws_customer_gateway" "customer_gateway" { + bgp_asn = %d + ip_address = "178.0.0.1" + type = "ipsec.1" + tags { + Name = "main-customer-gateway-%d" + } +} - resource "aws_vpn_connection" "foo" { - vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}" - customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}" - type = "ipsec.1" - static_routes_only = false - } - `, rBgpAsn, rInt) +resource "aws_vpn_connection" "foo" { + vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}" + customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}" + type = "ipsec.1" + static_routes_only = false +} + `, rBgpAsn, rInt) } -func testAccAwsVpnConnectionConfigTunnelOptions(rBgpAsn int) string { +func testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn int, psk string, tunnelCidr string) string { return fmt.Sprintf(` - resource "aws_vpn_gateway" "vpn_gateway" { - tags { - Name = "vpn_gateway" - } - } +resource "aws_vpn_gateway" "vpn_gateway" { + tags { + Name = "vpn_gateway" + } +} - resource "aws_customer_gateway" "customer_gateway" { - bgp_asn = %d - ip_address = "178.0.0.1" - type = "ipsec.1" - tags { - Name = "main-customer-gateway" - } - } +resource "aws_customer_gateway" "customer_gateway" { + bgp_asn = %d + ip_address = "178.0.0.1" + type = "ipsec.1" + tags { + Name = "main-customer-gateway" + } +} - resource "aws_vpn_connection" "foo" { - vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}" - customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}" - type = "ipsec.1" - static_routes_only = false +resource "aws_vpn_connection" "foo" { + vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}" + customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}" + type = "ipsec.1" + static_routes_only = false - tunnel1_inside_cidr = "169.254.8.0/30" - tunnel1_preshared_key = "lookatmethisisaprivatekey1" + tunnel1_inside_cidr = "%s" + tunnel1_preshared_key = "%s" +} + `, rBgpAsn, tunnelCidr, psk) +} - tunnel2_inside_cidr = "169.254.9.0/30" - tunnel2_preshared_key = "lookatmethisisaprivatekey2" - } - `, rBgpAsn) +func testAccAwsVpnConnectionConfigTunnelOptions(rBgpAsn int, psk string, tunnelCidr string, psk2 string, tunnelCidr2 string) string { + return fmt.Sprintf(` +resource "aws_vpn_gateway" "vpn_gateway" { + tags { + Name = "vpn_gateway" + } +} + +resource "aws_customer_gateway" "customer_gateway" { + bgp_asn = %d + ip_address = "178.0.0.1" + type = "ipsec.1" + tags { + Name = "main-customer-gateway" + } +} + +resource "aws_vpn_connection" "foo" { + vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}" + customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}" + type = "ipsec.1" + static_routes_only = false + + tunnel1_inside_cidr = "%s" + tunnel1_preshared_key = "%s" + + tunnel2_inside_cidr = "%s" + tunnel2_preshared_key = "%s" +} + `, rBgpAsn, tunnelCidr, psk, tunnelCidr2, psk2) } // Test our VPN tunnel config XML parsing diff --git a/website/docs/r/vpn_connection.html.markdown b/website/docs/r/vpn_connection.html.markdown index 93dd1c02b59..96d6ccc065e 100644 --- a/website/docs/r/vpn_connection.html.markdown +++ b/website/docs/r/vpn_connection.html.markdown @@ -14,6 +14,9 @@ Provides a VPN connection connected to a VPC. These objects can be connected to ~> **Note:** All arguments including `tunnel1_preshared_key` and `tunnel2_preshared_key` will be stored in the raw state as plain-text. [Read more about sensitive data in state](/docs/state/sensitive-data.html). +~> **Note:** The CIDR blocks in the arguments `tunnel1_inside_cidr` and `tunnel2_inside_cidr` must have a prefix of /30 and be a part of a specific range. +[Read more about this in the AWS documentation](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_VpnTunnelOptionsSpecification.html). + ## Example Usage ```hcl @@ -48,6 +51,10 @@ The following arguments are supported: * `tags` - (Optional) Tags to apply to the connection. * `type` - (Required) The type of VPN connection. The only type AWS supports at this time is "ipsec.1". * `vpn_gateway_id` - (Required) The ID of the virtual private gateway. +* `tunnel1_inside_cidr` - (Optional) The CIDR block of the inside IP addresses for the first VPN tunnel. +* `tunnel2_inside_cidr` - (Optional) The CIDR block of the second IP addresses for the first VPN tunnel. +* `tunnel1_preshared_key` - (Optional) The preshared key of the first VPN tunnel. +* `tunnel2_preshared_key` - (Optional) The preshared key of the second VPN tunnel. ## Attribute Reference From 10cd69a93d5b5a6c5ec33f32fc13537e7f60f847 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 10 Jan 2018 10:39:07 +0000 Subject: [PATCH 143/350] test/aws_s3_bucket_notification: Randomize names --- aws/import_aws_s3_bucket_notification_test.go | 15 ++- ...esource_aws_s3_bucket_notification_test.go | 95 ++++++++++++------- 2 files changed, 72 insertions(+), 38 deletions(-) diff --git a/aws/import_aws_s3_bucket_notification_test.go b/aws/import_aws_s3_bucket_notification_test.go index 4872c7ce524..20c5fa49cee 100644 --- a/aws/import_aws_s3_bucket_notification_test.go +++ b/aws/import_aws_s3_bucket_notification_test.go @@ -1,6 +1,7 @@ package aws import ( + "fmt" "testing" "github.com/hashicorp/terraform/helper/acctest" @@ -8,6 +9,14 @@ import ( ) func TestAccAWSS3BucketNotification_importBasic(t *testing.T) { + rString := acctest.RandString(8) + + topicName := fmt.Sprintf("tf-acc-topic-s3-b-n-import-%s", rString) + bucketName := fmt.Sprintf("tf-acc-bucket-n-import-%s", rString) + queueName := fmt.Sprintf("tf-acc-queue-s3-b-n-import-%s", rString) + roleName := fmt.Sprintf("tf-acc-role-s3-b-n-import-%s", rString) + lambdaFuncName := fmt.Sprintf("tf-acc-lambda-func-s3-b-n-import-%s", rString) + resourceName := "aws_s3_bucket_notification.notification" resource.Test(t, resource.TestCase{ @@ -16,7 +25,7 @@ func TestAccAWSS3BucketNotification_importBasic(t *testing.T) { CheckDestroy: testAccCheckAWSS3BucketNotificationDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSS3BucketConfigWithTopicNotification(acctest.RandInt()), + Config: testAccAWSS3BucketConfigWithTopicNotification(topicName, bucketName), }, resource.TestStep{ @@ -33,7 +42,7 @@ func TestAccAWSS3BucketNotification_importBasic(t *testing.T) { CheckDestroy: testAccCheckAWSS3BucketNotificationDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSS3BucketConfigWithQueueNotification(acctest.RandInt()), + Config: testAccAWSS3BucketConfigWithQueueNotification(queueName, bucketName), }, resource.TestStep{ @@ -50,7 +59,7 @@ func TestAccAWSS3BucketNotification_importBasic(t *testing.T) { CheckDestroy: testAccCheckAWSS3BucketNotificationDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSS3BucketConfigWithLambdaNotification(acctest.RandInt()), + Config: testAccAWSS3BucketConfigWithLambdaNotification(roleName, lambdaFuncName, bucketName), }, resource.TestStep{ diff --git a/aws/resource_aws_s3_bucket_notification_test.go b/aws/resource_aws_s3_bucket_notification_test.go index 84860462677..99adf7cca49 100644 --- a/aws/resource_aws_s3_bucket_notification_test.go +++ b/aws/resource_aws_s3_bucket_notification_test.go @@ -16,15 +16,22 @@ import ( "github.com/aws/aws-sdk-go/service/s3" ) -func TestAccAWSS3Bucket_Notification(t *testing.T) { - rInt := acctest.RandInt() +func TestAccAWSS3BucketNotification_basic(t *testing.T) { + rString := acctest.RandString(8) + + topicName := fmt.Sprintf("tf-acc-topic-s3-b-notification-%s", rString) + bucketName := fmt.Sprintf("tf-acc-bucket-notification-%s", rString) + queueName := fmt.Sprintf("tf-acc-queue-s3-b-notification-%s", rString) + roleName := fmt.Sprintf("tf-acc-role-s3-b-notification-%s", rString) + lambdaFuncName := fmt.Sprintf("tf-acc-lambda-func-s3-b-notification-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSS3BucketNotificationDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSS3BucketConfigWithTopicNotification(rInt), + Config: testAccAWSS3BucketConfigWithTopicNotification(topicName, bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketTopicNotification( "aws_s3_bucket.bucket", @@ -35,7 +42,7 @@ func TestAccAWSS3Bucket_Notification(t *testing.T) { FilterRules: []*s3.FilterRule{ &s3.FilterRule{ Name: aws.String("Prefix"), - Value: aws.String(fmt.Sprintf("%d/", rInt)), + Value: aws.String("tf-acc-test/"), }, &s3.FilterRule{ Name: aws.String("Suffix"), @@ -61,7 +68,7 @@ func TestAccAWSS3Bucket_Notification(t *testing.T) { ), }, resource.TestStep{ - Config: testAccAWSS3BucketConfigWithQueueNotification(rInt), + Config: testAccAWSS3BucketConfigWithQueueNotification(queueName, bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketQueueNotification( "aws_s3_bucket.bucket", @@ -72,7 +79,7 @@ func TestAccAWSS3Bucket_Notification(t *testing.T) { FilterRules: []*s3.FilterRule{ &s3.FilterRule{ Name: aws.String("Prefix"), - Value: aws.String(fmt.Sprintf("%d/", rInt)), + Value: aws.String("tf-acc-test/"), }, &s3.FilterRule{ Name: aws.String("Suffix"), @@ -84,7 +91,7 @@ func TestAccAWSS3Bucket_Notification(t *testing.T) { ), }, resource.TestStep{ - Config: testAccAWSS3BucketConfigWithLambdaNotification(rInt), + Config: testAccAWSS3BucketConfigWithLambdaNotification(roleName, lambdaFuncName, bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketLambdaFunctionConfiguration( "aws_s3_bucket.bucket", @@ -95,7 +102,7 @@ func TestAccAWSS3Bucket_Notification(t *testing.T) { FilterRules: []*s3.FilterRule{ &s3.FilterRule{ Name: aws.String("Prefix"), - Value: aws.String(fmt.Sprintf("%d/", rInt)), + Value: aws.String("tf-acc-test/"), }, &s3.FilterRule{ Name: aws.String("Suffix"), @@ -110,15 +117,19 @@ func TestAccAWSS3Bucket_Notification(t *testing.T) { }) } -func TestAccAWSS3Bucket_NotificationWithoutFilter(t *testing.T) { - rInt := acctest.RandInt() +func TestAccAWSS3BucketNotification_withoutFilter(t *testing.T) { + rString := acctest.RandString(8) + + topicName := fmt.Sprintf("tf-acc-topic-s3-b-notification-wo-f-%s", rString) + bucketName := fmt.Sprintf("tf-acc-bucket-notification-wo-f-%s", rString) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSS3BucketNotificationDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSS3BucketConfigWithTopicNotificationWithoutFilter(rInt), + Config: testAccAWSS3BucketConfigWithTopicNotificationWithoutFilter(topicName, bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketTopicNotification( "aws_s3_bucket.bucket", @@ -341,10 +352,10 @@ func testAccCheckAWSS3BucketLambdaFunctionConfiguration(n, i, t string, events [ } } -func testAccAWSS3BucketConfigWithTopicNotification(randInt int) string { +func testAccAWSS3BucketConfigWithTopicNotification(topicName, bucketName string) string { return fmt.Sprintf(` resource "aws_sns_topic" "topic" { - name = "terraform-test-topic-%d" + name = "%s" policy = < Date: Wed, 10 Jan 2018 11:13:38 +0000 Subject: [PATCH 144/350] test/aws_db_option_group: Replace option name with a valid one --- aws/resource_aws_db_option_group_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_db_option_group_test.go b/aws/resource_aws_db_option_group_test.go index 3723c546103..da90ab21880 100644 --- a/aws/resource_aws_db_option_group_test.go +++ b/aws/resource_aws_db_option_group_test.go @@ -487,7 +487,7 @@ resource "aws_db_option_group" "bar" { major_engine_version = "11.00" option { - option_name = "Mirroring" + option_name = "TDE" } } `, r) From 9c1a55894d7bf1e4f02bf65443264faa454a9e8c Mon Sep 17 00:00:00 2001 From: Alex Rowley Date: Wed, 10 Jan 2018 13:41:08 +0000 Subject: [PATCH 145/350] Add computed field for volume_id of block device (#1489) resource/aws_instance: Add computed field for volume_id of block device --- aws/resource_aws_instance.go | 12 ++++++++++++ aws/resource_aws_instance_test.go | 6 ++++++ website/docs/r/instance.html.markdown | 2 ++ 3 files changed, 20 insertions(+) diff --git a/aws/resource_aws_instance.go b/aws/resource_aws_instance.go index 03a874c1c52..52e088e2c8e 100644 --- a/aws/resource_aws_instance.go +++ b/aws/resource_aws_instance.go @@ -325,6 +325,11 @@ func resourceAwsInstance() *schema.Resource { Computed: true, ForceNew: true, }, + + "volume_id": { + Type: schema.TypeString, + Computed: true, + }, }, }, Set: func(v interface{}) int { @@ -409,6 +414,11 @@ func resourceAwsInstance() *schema.Resource { Computed: true, ForceNew: true, }, + + "volume_id": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, @@ -1152,6 +1162,8 @@ func readBlockDevicesFromInstance(instance *ec2.Instance, conn *ec2.EC2) (map[st instanceBd := instanceBlockDevices[*vol.VolumeId] bd := make(map[string]interface{}) + bd["volume_id"] = *vol.VolumeId + if instanceBd.Ebs != nil && instanceBd.Ebs.DeleteOnTermination != nil { bd["delete_on_termination"] = *instanceBd.Ebs.DeleteOnTermination } diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index 3bc8762b57d..0c9f9e52a99 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -267,6 +267,8 @@ func TestAccAWSInstance_blockDevices(t *testing.T) { "aws_instance.foo", &v), resource.TestCheckResourceAttr( "aws_instance.foo", "root_block_device.#", "1"), + resource.TestMatchResourceAttr( + "aws_instance.foo", "root_block_device.0.volume_id", regexp.MustCompile("vol-[a-z0-9]+")), resource.TestCheckResourceAttr( "aws_instance.foo", "root_block_device.0.volume_size", "11"), resource.TestCheckResourceAttr( @@ -275,12 +277,16 @@ func TestAccAWSInstance_blockDevices(t *testing.T) { "aws_instance.foo", "ebs_block_device.#", "3"), resource.TestCheckResourceAttr( "aws_instance.foo", "ebs_block_device.2576023345.device_name", "/dev/sdb"), + resource.TestMatchResourceAttr( + "aws_instance.foo", "ebs_block_device.2576023345.volume_id", regexp.MustCompile("vol-[a-z0-9]+")), resource.TestCheckResourceAttr( "aws_instance.foo", "ebs_block_device.2576023345.volume_size", "9"), resource.TestCheckResourceAttr( "aws_instance.foo", "ebs_block_device.2576023345.volume_type", "standard"), resource.TestCheckResourceAttr( "aws_instance.foo", "ebs_block_device.2554893574.device_name", "/dev/sdc"), + resource.TestMatchResourceAttr( + "aws_instance.foo", "ebs_block_device.2554893574.volume_id", regexp.MustCompile("vol-[a-z0-9]+")), resource.TestCheckResourceAttr( "aws_instance.foo", "ebs_block_device.2554893574.volume_size", "10"), resource.TestCheckResourceAttr( diff --git a/website/docs/r/instance.html.markdown b/website/docs/r/instance.html.markdown index 2f4d9a95115..e000150c782 100644 --- a/website/docs/r/instance.html.markdown +++ b/website/docs/r/instance.html.markdown @@ -234,6 +234,8 @@ The following attributes are exported: * `vpc_security_group_ids` - The associated security groups in non-default VPC * `subnet_id` - The VPC subnet ID. +For any `root_block_device` and `ebs_block_device` the `volume_id` is exported. +e.g. `aws_instance.web.root_block_device.0.volume_id` ## Import From e4cb095c86cbc078d9513115252a876c07997aca Mon Sep 17 00:00:00 2001 From: Gauthier Wallet Date: Wed, 10 Jan 2018 14:41:30 +0100 Subject: [PATCH 146/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fdda8b9d194..703c6bc1aee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ ENHANCEMENTS: * data-source/aws_vpc_peering_connection: Add support for cross-region VPC peering [GH-2508] * resource/aws_elasticsearch_domain: export kibana endpoint [GH-2804] * resource/aws_ssm_association: Allow for multiple targets [GH-2297] +* resource/aws_instance: Add computed field for volume_id of block device [GH-1489] BUG FIXES: From 507d6ae47e4a0431d3e13d1a56e1436cb6da5682 Mon Sep 17 00:00:00 2001 From: pradeepbhadani Date: Wed, 10 Jan 2018 14:53:51 +0000 Subject: [PATCH 147/350] Specify RDS naming constraints for username and password (#2925) resource/rds_cluster: Specify RDS naming constraints for username and password --- website/docs/r/rds_cluster.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/rds_cluster.html.markdown b/website/docs/r/rds_cluster.html.markdown index e3feb59d4f9..9efc0609d0d 100644 --- a/website/docs/r/rds_cluster.html.markdown +++ b/website/docs/r/rds_cluster.html.markdown @@ -57,8 +57,8 @@ The following arguments are supported: * `cluster_identifier_prefix` - (Optional, Forces new resource) Creates a unique cluster identifier beginning with the specified prefix. Conflicts with `cluster_identifer`. * `database_name` - (Optional) Name for an automatically created database on cluster creation. There are different naming restrictions per database engine: [RDS Naming Constraints][5] * `master_password` - (Required unless a `snapshot_identifier` is provided) Password for the master DB user. Note that this may - show up in logs, and it will be stored in the state file -* `master_username` - (Required unless a `snapshot_identifier` is provided) Username for the master DB user + show up in logs, and it will be stored in the state file. Please refer to the [RDS Naming Constraints][5] +* `master_username` - (Required unless a `snapshot_identifier` is provided) Username for the master DB user. Please refer to the [RDS Naming Constraints][5] * `final_snapshot_identifier` - (Optional) The name of your final DB snapshot when this DB cluster is deleted. If omitted, no final snapshot will be made. From 6838bdcf81a0907b89806cb9c3834033441c19d6 Mon Sep 17 00:00:00 2001 From: Matt Lavin Date: Wed, 10 Jan 2018 10:06:18 -0500 Subject: [PATCH 148/350] resource/aws_api_gateway_integration: Allow update of URI attributes (#2834) * Allow updating API Gateway integration URI attributes instead of forcing new resources * Add testcase for updating API Gateway integration URI attributes --- aws/resource_aws_api_gateway_integration.go | 12 +++- ...source_aws_api_gateway_integration_test.go | 64 +++++++++++++++++++ 2 files changed, 75 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_api_gateway_integration.go b/aws/resource_aws_api_gateway_integration.go index 6e0390362ea..377fd804e2d 100644 --- a/aws/resource_aws_api_gateway_integration.go +++ b/aws/resource_aws_api_gateway_integration.go @@ -51,7 +51,6 @@ func resourceAwsApiGatewayIntegration() *schema.Resource { "uri": { Type: schema.TypeString, Optional: true, - ForceNew: true, }, "credentials": { @@ -370,6 +369,17 @@ func resourceAwsApiGatewayIntegrationUpdate(d *schema.ResourceData, meta interfa }) } + // The documentation https://docs.aws.amazon.com/apigateway/api-reference/link-relation/integration-update/ says + // that uri changes are only supported for non-mock types. Because the uri value is not used in mock + // resources, it means that the uri can always be updated + if d.HasChange("uri") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/uri"), + Value: aws.String(d.Get("uri").(string)), + }) + } + params := &apigateway.UpdateIntegrationInput{ HttpMethod: aws.String(d.Get("http_method").(string)), ResourceId: aws.String(d.Get("resource_id").(string)), diff --git a/aws/resource_aws_api_gateway_integration_test.go b/aws/resource_aws_api_gateway_integration_test.go index 2b72f806a43..5a7cd45dbc7 100644 --- a/aws/resource_aws_api_gateway_integration_test.go +++ b/aws/resource_aws_api_gateway_integration_test.go @@ -57,6 +57,25 @@ func TestAccAWSAPIGatewayIntegration_basic(t *testing.T) { ), }, + { + Config: testAccAWSAPIGatewayIntegrationConfigUpdateURI, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayIntegrationExists("aws_api_gateway_integration.test", &conf), + resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "type", "HTTP"), + resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "integration_http_method", "GET"), + resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "uri", "https://www.google.de/updated"), + resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "passthrough_behavior", "WHEN_NO_MATCH"), + resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "content_handling", "CONVERT_TO_TEXT"), + resource.TestCheckNoResourceAttr("aws_api_gateway_integration.test", "credentials"), + resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.%", "2"), + resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.integration.request.header.X-Authorization", "'static'"), + resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.integration.request.header.X-Foo", "'Bar'"), + resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.%", "2"), + resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.application/json", ""), + resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.application/xml", "#set($inputRoot = $input.path('$'))\n{ }"), + ), + }, + { Config: testAccAWSAPIGatewayIntegrationConfigUpdateNoTemplates, Check: resource.ComposeTestCheckFunc( @@ -279,6 +298,51 @@ resource "aws_api_gateway_integration" "test" { } ` +const testAccAWSAPIGatewayIntegrationConfigUpdateURI = ` +resource "aws_api_gateway_rest_api" "test" { + name = "test" +} + +resource "aws_api_gateway_resource" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" + path_part = "test" +} + +resource "aws_api_gateway_method" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + resource_id = "${aws_api_gateway_resource.test.id}" + http_method = "GET" + authorization = "NONE" + + request_models = { + "application/json" = "Error" + } +} + +resource "aws_api_gateway_integration" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + resource_id = "${aws_api_gateway_resource.test.id}" + http_method = "${aws_api_gateway_method.test.http_method}" + + request_templates = { + "application/json" = "" + "application/xml" = "#set($inputRoot = $input.path('$'))\n{ }" + } + + request_parameters = { + "integration.request.header.X-Authorization" = "'static'" + "integration.request.header.X-Foo" = "'Bar'" + } + + type = "HTTP" + uri = "https://www.google.de/updated" + integration_http_method = "GET" + passthrough_behavior = "WHEN_NO_MATCH" + content_handling = "CONVERT_TO_TEXT" +} +` + const testAccAWSAPIGatewayIntegrationConfigUpdateNoTemplates = ` resource "aws_api_gateway_rest_api" "test" { name = "test" From bba4a31c7f9b02c7a8733328ac6874f286b34cf2 Mon Sep 17 00:00:00 2001 From: Gauthier Wallet Date: Wed, 10 Jan 2018 16:06:38 +0100 Subject: [PATCH 149/350] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 703c6bc1aee..48a645a82a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ ENHANCEMENTS: * resource/aws_elasticsearch_domain: export kibana endpoint [GH-2804] * resource/aws_ssm_association: Allow for multiple targets [GH-2297] * resource/aws_instance: Add computed field for volume_id of block device [GH-1489] +* resource/aws_api_gateway_integration: Allow update of URI attributes [GH-2834] BUG FIXES: From ecc5649110b4b8055aeedc4eabe30f573b8928dc Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 20 Dec 2017 09:52:02 -0500 Subject: [PATCH 150/350] update terraform vendor to 0.11.2-a262a0e04 --- .../hashicorp/terraform/CHANGELOG.md | 84 ++- .../github.com/hashicorp/terraform/README.md | 1 - .../terraform/backend/local/backend_apply.go | 8 - .../terraform/backend/local/backend_local.go | 47 +- .../terraform/backend/local/backend_plan.go | 32 +- .../backend/local/backend_refresh.go | 33 +- .../backend/local/counthookaction_string.go | 4 +- .../terraform/backend/operationtype_string.go | 4 +- .../backend/remote-state/azure/backend.go | 2 +- .../backend/remote-state/gcs/backend.go | 66 ++- .../backend/remote-state/gcs/backend_state.go | 61 +-- .../backend/remote-state/s3/backend.go | 8 + .../hashicorp/terraform/backend/testing.go | 8 + .../providers/terraform/data_source_state.go | 17 +- .../salt-masterless/resource_provisioner.go | 75 ++- .../hashicorp/terraform/command/apply.go | 40 +- .../terraform/command/autocomplete.go | 12 - .../hashicorp/terraform/command/command.go | 36 +- .../hashicorp/terraform/command/console.go | 12 +- .../hashicorp/terraform/command/graph.go | 20 +- .../hashicorp/terraform/command/import.go | 70 ++- .../hashicorp/terraform/command/init.go | 18 +- .../terraform/command/internal_plugin_list.go | 2 + .../hashicorp/terraform/command/meta.go | 3 + .../hashicorp/terraform/command/meta_new.go | 20 +- .../hashicorp/terraform/command/plan.go | 52 +- .../hashicorp/terraform/command/providers.go | 18 +- .../hashicorp/terraform/command/push.go | 12 +- .../hashicorp/terraform/command/refresh.go | 17 +- .../hashicorp/terraform/command/validate.go | 32 +- .../hashicorp/terraform/commands.go | 13 +- .../hashicorp/terraform/config/config.go | 355 ++++++++----- .../config/configschema/nestingmode_string.go | 4 +- .../terraform/config/interpolate_funcs.go | 68 +++ .../terraform/config/module/storage.go | 24 +- .../hashicorp/terraform/config/module/tree.go | 162 ++---- .../terraform/config/resource_mode_string.go | 4 +- .../terraform/helper/resource/state.go | 2 +- .../terraform/helper/resource/testing.go | 225 ++++++-- .../helper/resource/testing_config.go | 16 +- .../helper/schema/field_writer_map.go | 32 ++ .../helper/schema/getsource_string.go | 6 +- .../terraform/helper/schema/resource_data.go | 12 +- .../terraform/helper/schema/resource_diff.go | 15 +- .../terraform/helper/schema/schema.go | 129 +++-- .../helper/schema/valuetype_string.go | 4 +- .../terraform/helper/validation/validation.go | 21 + vendor/github.com/hashicorp/terraform/main.go | 1 - .../module/registry.go => registry/client.go} | 122 +++-- .../registry/regsrc/friendly_host.go | 30 +- .../terraform/registry/regsrc/module.go | 50 +- .../hashicorp/terraform/terraform/context.go | 39 +- .../hashicorp/terraform/terraform/diff.go | 5 + .../terraform/eval_check_prevent_refresh.go | 49 ++ .../terraform/eval_context_builtin.go | 7 +- .../terraform/terraform/eval_diff.go | 6 +- .../terraform/terraform/eval_output.go | 4 +- .../terraform/terraform/eval_state.go | 31 -- .../hashicorp/terraform/terraform/features.go | 4 + .../terraform/graph_builder_apply.go | 3 + .../terraform/terraform/graph_builder_plan.go | 9 + .../terraform/terraform/graph_walk_context.go | 14 +- .../terraform/terraform/graphtype_string.go | 4 +- .../terraform/instancetype_string.go | 4 +- .../terraform/terraform/node_data_refresh.go | 8 +- .../terraform/node_module_removed.go | 77 +++ .../terraform/terraform/node_output_orphan.go | 5 + .../terraform/node_provider_abstract.go | 8 +- .../terraform/node_resource_abstract.go | 8 +- .../terraform/node_resource_apply.go | 6 +- .../terraform/node_resource_destroy.go | 6 +- .../terraform/node_resource_plan_instance.go | 4 +- .../terraform/node_resource_refresh.go | 4 +- .../hashicorp/terraform/terraform/resource.go | 2 +- .../terraform/resource_provider_mock.go | 5 +- .../hashicorp/terraform/terraform/state.go | 35 +- .../transform_attach_config_provider.go | 62 --- .../terraform/terraform/transform_config.go | 75 --- .../terraform/terraform/transform_deposed.go | 8 +- .../terraform/transform_import_state.go | 6 +- .../terraform/transform_orphan_output.go | 45 +- .../terraform/terraform/transform_provider.go | 316 ++++++++++-- .../terraform/transform_provider_disable.go | 50 -- .../terraform/transform_reference.go | 12 +- .../terraform/transform_removed_modules.go | 32 ++ .../terraform/walkoperation_string.go | 4 +- .../terraform/tfdiags/severity_string.go | 9 +- .../hashicorp/terraform/version/version.go | 4 +- vendor/vendor.json | 480 +++++++----------- 89 files changed, 2131 insertions(+), 1428 deletions(-) rename vendor/github.com/hashicorp/terraform/{config/module/registry.go => registry/client.go} (64%) create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_refresh.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go diff --git a/vendor/github.com/hashicorp/terraform/CHANGELOG.md b/vendor/github.com/hashicorp/terraform/CHANGELOG.md index cda55628f0a..8b98f0dd036 100644 --- a/vendor/github.com/hashicorp/terraform/CHANGELOG.md +++ b/vendor/github.com/hashicorp/terraform/CHANGELOG.md @@ -1,44 +1,82 @@ -## 0.11.0-beta1 (November 3, 2017) +## 0.11.2 (Unreleased) BACKWARDS INCOMPATIBILITIES / NOTES: -The following items give an overview of the incompatibilities and other noteworthy changes in this release. For more details on some of these changes, along with information on how to upgrade existing configurations where needed, see [the v0.11 upgrade guide](./website/upgrade-guides/0-11.html.markdown). - -* Output interpolation errors are now fatal. Module configs with unused outputs - which contained errors will no longer be valid. -* Module configuration blocks have 2 new reserved attribute names, "providers" - and "version". Modules using these as input variables will need to be - updated. -* The module provider inheritance system has been updated. Providers declared - with configuration will no longer be merged, and named provider - configurations can be explicitly passed between modules. See [the upgrade guide](./website/upgrade-guides/0-11.html.markdown#interactions-between-providers-and-modules) for more details. -* The command `terraform apply` with no explicit plan argument is now interactive by default. Specifically, it will show the generated plan and wait for confirmation before applying it. The behavior is unchanged when a plan file argument is provided, and the previous behavior can be obtained _without_ a plan file by using the `-auto-approve` option. +* backend/gcs: The gcs remote state backend was erroneously creating the state bucket if it didn't exist. This is not the intended behavior of backends, as Terraform cannot track or manage that resource. The target bucket must now be created separately, before using it with Terraform. [GH-16865] + +NEW FEATURES: + +* **[Habitat](https://www.habitat.sh/) Provisioner** allowing automatic installation of the Habitat agent [GH-16280] + +IMPROVEMENTS: + +* config: new `rsadecrypt` interpolation function allows decrypting a base64-encoded ciphertext using a given private key. This is particularly useful for decrypting the password for a Windows instance on AWS EC2, but is generic and may find other uses too. [GH-16647] +* config: new `timeadd` interpolation function allows calculating a new timestamp relative to an existing known timestamp. [GH-16644] +* cli: Module and provider installation (and some other Terraform features) now implement [RFC6555](https://tools.ietf.org/html/rfc6555) when making outgoing HTTP requests, which should improve installation reliability for dual-stack (both IPv4 and IPv6) hosts running on networks that have non-performant or broken IPv6 Internet connectivity by trying both IPv4 and IPv6 connections. [GH-16805] +* backend/s3: it is now possible to disable the region check, for improved compatibility with third-party services that attempt to mimic the S3 API. [GH-16757] +* backend/s3: it is now possible to use named credentials from the `~/.aws/credentials` file, similarly to the AWS plugin [GH-16661] +* provider/terraform: in `terraform_remote_state`, the argument `environment` is now deprecated in favor of `workspace`. The `environment` argument will be removed in a later Terraform release. [GH-16558] + +BUG FIXES: + +* config: Referencing a count attribute in an output no longer generates a warning [GH-16866] +* cli: Terraform will no longer crash when `terraform plan`, `terraform apply`, and some other commands encounter an invalid provider version constraint in configuration, generating a proper error message instead. [GH-16867] +* backend/gcs: The usage of the GOOGLE_CREDENTIALS environment variable now matches that of the google provider [GH-16865] +* backend/gcs: fixed the locking methodology to avoid "double-locking" issues when used with the `terraform_remote_state` data source [GH-16852] +* provisioner/salt-masterless: now waits for all of the remote operations to complete before returning [GH-16704] + +## 0.11.1 (November 30, 2017) + +IMPROVEMENTS: + +* modules: Modules can now receive a specific provider configuration in the `providers` map, even if it's only implicitly used ([#16619](https://github.com/hashicorp/terraform/issues/16619)) +* config: Terraform will now detect and warn about outputs containing potentially-problematic references to resources with `count` set where the references does not use the "splat" syntax. This identifies situations where an output may [reference a resource with `count = 0`](https://www.terraform.io/upgrade-guides/0-11.html#referencing-attributes-from-resources-with-count-0) even if the `count` expression does not _currently_ evaluate to `0`, allowing the bug to be detected and fixed _before_ the value is later changed to `0` and would thus become an error. **This usage will become a fatal error in Terraform 0.12**. ([#16735](https://github.com/hashicorp/terraform/issues/16735)) +* core: A new environment variable `TF_WARN_OUTPUT_ERRORS=1` is supported to opt out of the behavior introduced in 0.11.0 where errors in output expressions halt execution. This restores the previous behavior where such errors are ignored, allowing users to apply problematic configurations without fixing all of the errors. This opt-out will be removed in Terraform 0.12, so it is strongly recommended to use the new warning described in the previous item to detect and fix these problematic expressions. ([#16782](https://github.com/hashicorp/terraform/issues/16782)) + +BUG FIXES: + +* cli: fix crash when subcommands with sub-subcommands are accidentally provided as a single argument, such as `terraform "workspace list"` ([#16789](https://github.com/hashicorp/terraform/issues/16789)) + +## 0.11.0 (November 16, 2017) + +The following list combines the changes from 0.11.0-beta1 and 0.11.0-rc1 to give the full set of changes since 0.10.8. For details on each of the individual pre-releases, please see [the 0.11.0-rc1 CHANGELOG](https://github.com/hashicorp/terraform/blob/v0.11.0-rc1/CHANGELOG.md). + +BACKWARDS INCOMPATIBILITIES / NOTES: + +The following items give an overview of the incompatibilities and other noteworthy changes in this release. For more details on some of these changes, along with information on how to upgrade existing configurations where needed, see [the v0.11 upgrade guide](https://www.terraform.io/upgrade-guides/0-11.html). + +* Output interpolation errors are now fatal. Module configs with unused outputs which contained errors will no longer be valid. +* Module configuration blocks have 2 new reserved attribute names, "providers" and "version". Modules using these as input variables will need to be updated. +* The module provider inheritance rules have changed. Inherited provider configurations will no longer be merged with local configurations, and additional (aliased) provider configurations must be explicitly passed between modules when shared. See [the upgrade guide](https://www.terraform.io/upgrade-guides/0-11.html) for more details. +* The command `terraform apply` with no explicit plan argument is now interactive by default. Specifically, it will show the generated plan and wait for confirmation before applying it, similar to the existing behavior of `terraform destroy`. The behavior is unchanged when a plan file argument is provided, and the previous behavior can be obtained _without_ a plan file by using the `-auto-approve` option. * The `terraform` provider (that is, the provider that contains the `terraform_remote_state` data source) has been re-incorporated as a built-in provider in the Terraform Core executable. In 0.10 it was split into a separate plugin along with all of the other providers, but this provider uses several internal Terraform Core APIs and so in practice it's been confusing to version that separately from Terraform Core. As a consequence, this provider no longer supports version constraints, and so `version` attributes for this provider in configuration must be removed. * When remote state is enabled, Terraform will no longer generate a local `terraform.tfstate.backup` file before updating remote state. Previously this file could potentially be used to recover a previous state to help recover after a mistake, but it also caused a potentially-sensitive state file to be generated in an unexpected location that may be inadvertently copied or checked in to version control. With this local backup now removed, we recommend instead relying on versioning or backup mechanisms provided by the backend, such as Amazon S3 versioning or Terraform Enterprise's built-in state history mechanism. (Terraform will still create the local file `errored.tfstate` in the unlikely event that there is an error when writing to the remote backend.) NEW FEATURES: -* modules: Module configuration blocks now have a "version" attribute, to set a version constraint for modules sourced from a registry. [GH-16466] -* modules: Module configuration blocks now have a "providers" attribute, to map a provider configuration from the current module into a submodule [GH-16379] +* modules: Module configuration blocks now have a "version" attribute, to set a version constraint for modules sourced from a registry. ([#16466](https://github.com/hashicorp/terraform/issues/16466)) +* modules: Module configuration blocks now have a "providers" attribute, to map a provider configuration from the current module into a submodule ([#16379](https://github.com/hashicorp/terraform/issues/16379)) * backend/gcs: The gcs remote state backend now supports workspaces and locking. -* backend/manta: The Manta backend now supports workspaces and locking [GH-16296] +* backend/manta: The Manta backend now supports workspaces and locking ([#16296](https://github.com/hashicorp/terraform/issues/16296)) IMPROVEMENTS: -* cli: The `terraform apply` command now waits for interactive approval of the generated plan before applying it, unless an explicit plan file is provided. [GH-16502] -* cli: The `terraform version` command now prints out the version numbers of initialized plugins as well as the version of Terraform core, so that they can be more easily shared when opening GitHub Issues, etc. [GH-16439] -* cli: A new `TF_DATA_DIR` environment variable can be used to override the location where Terraform stores the files normally placed in the `.terraform` directory. [GH-16207] -* provider/terraform: now built in to Terraform Core so that it will always have the same backend functionality as the Terraform release it corresponds to. [GH-16543] +* cli: The `terraform apply` command now waits for interactive approval of the generated plan before applying it, unless an explicit plan file is provided. ([#16502](https://github.com/hashicorp/terraform/issues/16502)) +* cli: The `terraform version` command now prints out the version numbers of initialized plugins as well as the version of Terraform core, so that they can be more easily shared when opening GitHub Issues, etc. ([#16439](https://github.com/hashicorp/terraform/issues/16439)) +* cli: A new `TF_DATA_DIR` environment variable can be used to override the location where Terraform stores the files normally placed in the `.terraform` directory. ([#16207](https://github.com/hashicorp/terraform/issues/16207)) +* provider/terraform: now built in to Terraform Core so that it will always have the same backend functionality as the Terraform release it corresponds to. ([#16543](https://github.com/hashicorp/terraform/issues/16543)) BUG FIXES: -* config: Provider config in submodules will no longer be overridden by parent providers with the same name. [GH-16379] -* core: Module outputs can now produce errors, preventing them from silently propagating through the config. [GH-16204] -* cli: When remote state is enabled, Terraform will no longer generate a local `terraform.tfstate.backup` file before updating remote state. [GH-16464] +* config: Provider config in submodules will no longer be overridden by parent providers with the same name. ([#16379](https://github.com/hashicorp/terraform/issues/16379)) +* cli: When remote state is enabled, Terraform will no longer generate a local `terraform.tfstate.backup` file before updating remote state. ([#16464](https://github.com/hashicorp/terraform/issues/16464)) +* core: state now includes a reference to the provider configuration most recently used to create or update a resource, so that the same configuration can be used to destroy that resource if its configuration (including the explicit pointer to a provider configuration) is removed ([#16586](https://github.com/hashicorp/terraform/issues/16586)) +* core: Module outputs can now produce errors, preventing them from silently propagating through the config. ([#16204](https://github.com/hashicorp/terraform/issues/16204)) +* backend/gcs: will now automatically add a slash to the given prefix if not present, since without it the workspace enumeration does not function correctly ([#16585](https://github.com/hashicorp/terraform/issues/16585)) PROVIDER FRAMEWORK CHANGES (not user-facing): -* helper/schema: Loosen validation for 'id' field [GH-16456] +* helper/schema: Loosen validation for 'id' field ([#16456](https://github.com/hashicorp/terraform/issues/16456)) ## 0.10.8 (October 25, 2017) diff --git a/vendor/github.com/hashicorp/terraform/README.md b/vendor/github.com/hashicorp/terraform/README.md index 03f8be85dd9..eed7f74e3b0 100644 --- a/vendor/github.com/hashicorp/terraform/README.md +++ b/vendor/github.com/hashicorp/terraform/README.md @@ -3,7 +3,6 @@ Terraform - Website: https://www.terraform.io - [![Gitter chat](https://badges.gitter.im/hashicorp-terraform/Lobby.png)](https://gitter.im/hashicorp-terraform/Lobby) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform?ref=badge_shield) - Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool) Terraform diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_apply.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_apply.go index 4463431815d..9789e0b7c4a 100644 --- a/vendor/github.com/hashicorp/terraform/backend/local/backend_apply.go +++ b/vendor/github.com/hashicorp/terraform/backend/local/backend_apply.go @@ -151,14 +151,6 @@ func (b *Local) opApply( _, applyErr = tfCtx.Apply() // we always want the state, even if apply failed applyState = tfCtx.State() - - /* - // Record any shadow errors for later - if err := ctx.ShadowError(); err != nil { - shadowErr = multierror.Append(shadowErr, multierror.Prefix( - err, "apply operation:")) - } - */ }() // Wait for the apply to finish or for us to be interrupted so diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_local.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_local.go index 2c121d2e6d3..aa056a1a16d 100644 --- a/vendor/github.com/hashicorp/terraform/backend/local/backend_local.go +++ b/vendor/github.com/hashicorp/terraform/backend/local/backend_local.go @@ -2,12 +2,13 @@ package local import ( "errors" - "fmt" "log" - "strings" + + "github.com/hashicorp/terraform/command/format" + + "github.com/hashicorp/terraform/tfdiags" "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/state" "github.com/hashicorp/terraform/terraform" @@ -91,27 +92,31 @@ func (b *Local) context(op *backend.Operation) (*terraform.Context, state.State, // If validation is enabled, validate if b.OpValidation { - // We ignore warnings here on purpose. We expect users to be listening - // to the terraform.Hook called after a validation. - ws, es := tfCtx.Validate() - if len(ws) > 0 { - // Log just in case the CLI isn't enabled - log.Printf("[WARN] backend/local: %d warnings: %v", len(ws), ws) - - // If we have a CLI, output the warnings - if b.CLI != nil { - b.CLI.Warn(strings.TrimSpace(validateWarnHeader) + "\n") - for _, w := range ws { - b.CLI.Warn(fmt.Sprintf(" * %s", w)) - } + diags := tfCtx.Validate() + if len(diags) > 0 { + if diags.HasErrors() { + // If there are warnings _and_ errors then we'll take this + // path and return them all together in this error. + return nil, nil, diags.Err() + } - // Make a newline before continuing - b.CLI.Output("") + // For now we can't propagate warnings any further without + // printing them directly to the UI, so we'll need to + // format them here ourselves. + for _, diag := range diags { + if diag.Severity() != tfdiags.Warning { + continue + } + if b.CLI != nil { + b.CLI.Warn(format.Diagnostic(diag, b.Colorize(), 72)) + } else { + desc := diag.Description() + log.Printf("[WARN] backend/local: %s", desc.Summary) + } } - } - if len(es) > 0 { - return nil, nil, multierror.Append(nil, es...) + // Make a newline before continuing + b.CLI.Output("") } } } diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_plan.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_plan.go index a4e92c1c711..380ce17421a 100644 --- a/vendor/github.com/hashicorp/terraform/backend/local/backend_plan.go +++ b/vendor/github.com/hashicorp/terraform/backend/local/backend_plan.go @@ -101,14 +101,34 @@ func (b *Local) opPlan( } } - // Perform the plan - log.Printf("[INFO] backend/local: plan calling Plan") - plan, err := tfCtx.Plan() - if err != nil { - runningOp.Err = errwrap.Wrapf("Error running plan: {{err}}", err) - return + // Perform the plan in a goroutine so we can be interrupted + var plan *terraform.Plan + var planErr error + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + log.Printf("[INFO] backend/local: plan calling Plan") + plan, planErr = tfCtx.Plan() + }() + + select { + case <-ctx.Done(): + if b.CLI != nil { + b.CLI.Output("stopping plan operation...") + } + + // Stop execution + go tfCtx.Stop() + + // Wait for completion still + <-doneCh + case <-doneCh: } + if planErr != nil { + runningOp.Err = errwrap.Wrapf("Error running plan: {{err}}", planErr) + return + } // Record state runningOp.PlanEmpty = plan.Diff.Empty() diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_refresh.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_refresh.go index 282e63045af..0cf50b759ee 100644 --- a/vendor/github.com/hashicorp/terraform/backend/local/backend_refresh.go +++ b/vendor/github.com/hashicorp/terraform/backend/local/backend_refresh.go @@ -3,6 +3,7 @@ package local import ( "context" "fmt" + "log" "os" "strings" @@ -12,6 +13,7 @@ import ( "github.com/hashicorp/terraform/command/clistate" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/terraform" ) func (b *Local) opRefresh( @@ -78,11 +80,34 @@ func (b *Local) opRefresh( } } - // Perform operation and write the resulting state to the running op - newState, err := tfCtx.Refresh() + // Perform the refresh in a goroutine so we can be interrupted + var newState *terraform.State + var refreshErr error + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + newState, err = tfCtx.Refresh() + log.Printf("[INFO] backend/local: plan calling Plan") + }() + + select { + case <-ctx.Done(): + if b.CLI != nil { + b.CLI.Output("stopping refresh operation...") + } + + // Stop execution + go tfCtx.Stop() + + // Wait for completion still + <-doneCh + case <-doneCh: + } + + // write the resulting state to the running op runningOp.State = newState - if err != nil { - runningOp.Err = errwrap.Wrapf("Error refreshing state: {{err}}", err) + if refreshErr != nil { + runningOp.Err = errwrap.Wrapf("Error refreshing state: {{err}}", refreshErr) return } diff --git a/vendor/github.com/hashicorp/terraform/backend/local/counthookaction_string.go b/vendor/github.com/hashicorp/terraform/backend/local/counthookaction_string.go index 92b2624a531..507bab917a4 100644 --- a/vendor/github.com/hashicorp/terraform/backend/local/counthookaction_string.go +++ b/vendor/github.com/hashicorp/terraform/backend/local/counthookaction_string.go @@ -2,7 +2,7 @@ package local -import "fmt" +import "strconv" const _countHookAction_name = "countHookActionAddcountHookActionChangecountHookActionRemove" @@ -10,7 +10,7 @@ var _countHookAction_index = [...]uint8{0, 18, 39, 60} func (i countHookAction) String() string { if i >= countHookAction(len(_countHookAction_index)-1) { - return fmt.Sprintf("countHookAction(%d)", i) + return "countHookAction(" + strconv.FormatInt(int64(i), 10) + ")" } return _countHookAction_name[_countHookAction_index[i]:_countHookAction_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/backend/operationtype_string.go b/vendor/github.com/hashicorp/terraform/backend/operationtype_string.go index 15fbba6ecce..16b7b381941 100644 --- a/vendor/github.com/hashicorp/terraform/backend/operationtype_string.go +++ b/vendor/github.com/hashicorp/terraform/backend/operationtype_string.go @@ -2,7 +2,7 @@ package backend -import "fmt" +import "strconv" const _OperationType_name = "OperationTypeInvalidOperationTypeRefreshOperationTypePlanOperationTypeApply" @@ -10,7 +10,7 @@ var _OperationType_index = [...]uint8{0, 20, 40, 57, 75} func (i OperationType) String() string { if i >= OperationType(len(_OperationType_index)-1) { - return fmt.Sprintf("OperationType(%d)", i) + return "OperationType(" + strconv.FormatInt(int64(i), 10) + ")" } return _OperationType_name[_OperationType_index[i]:_OperationType_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend.go index 5c7577b3033..38e6de5daa4 100644 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend.go +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend.go @@ -39,7 +39,7 @@ func New() backend.Backend { Type: schema.TypeString, Optional: true, Description: "The Azure cloud environment.", - Default: "", + DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", ""), }, "access_key": { diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend.go index 3ec322e1533..12e8d43ed7f 100644 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend.go +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend.go @@ -3,14 +3,17 @@ package gcs import ( "context" + "encoding/json" "fmt" "os" "strings" "cloud.google.com/go/storage" "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/pathorcontents" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" + "golang.org/x/oauth2/jwt" "google.golang.org/api/option" ) @@ -96,6 +99,9 @@ func (b *gcsBackend) configure(ctx context.Context) error { b.bucketName = data.Get("bucket").(string) b.prefix = strings.TrimLeft(data.Get("prefix").(string), "/") + if b.prefix != "" && !strings.HasSuffix(b.prefix, "/") { + b.prefix = b.prefix + "/" + } b.defaultStateFile = strings.TrimLeft(data.Get("path").(string), "/") @@ -108,16 +114,39 @@ func (b *gcsBackend) configure(ctx context.Context) error { b.region = r } - opts := []option.ClientOption{ - option.WithScopes(storage.ScopeReadWrite), - option.WithUserAgent(terraform.UserAgentString()), + var opts []option.ClientOption + + creds := data.Get("credentials").(string) + if creds == "" { + creds = os.Getenv("GOOGLE_CREDENTIALS") } - if credentialsFile := data.Get("credentials").(string); credentialsFile != "" { - opts = append(opts, option.WithCredentialsFile(credentialsFile)) - } else if credentialsFile := os.Getenv("GOOGLE_CREDENTIALS"); credentialsFile != "" { - opts = append(opts, option.WithCredentialsFile(credentialsFile)) + + if creds != "" { + var account accountFile + + // to mirror how the provider works, we accept the file path or the contents + contents, _, err := pathorcontents.Read(creds) + if err != nil { + return fmt.Errorf("Error loading credentials: %s", err) + } + + if err := json.Unmarshal([]byte(contents), &account); err != nil { + return fmt.Errorf("Error parsing credentials '%s': %s", contents, err) + } + + conf := jwt.Config{ + Email: account.ClientEmail, + PrivateKey: []byte(account.PrivateKey), + Scopes: []string{storage.ScopeReadWrite}, + TokenURL: "https://accounts.google.com/o/oauth2/token", + } + + opts = append(opts, option.WithHTTPClient(conf.Client(ctx))) + } else { + opts = append(opts, option.WithScopes(storage.ScopeReadWrite)) } + opts = append(opts, option.WithUserAgent(terraform.UserAgentString())) client, err := storage.NewClient(b.storageContext, opts...) if err != nil { return fmt.Errorf("storage.NewClient() failed: %v", err) @@ -125,22 +154,13 @@ func (b *gcsBackend) configure(ctx context.Context) error { b.storageClient = client - return b.ensureBucketExists() + return nil } -func (b *gcsBackend) ensureBucketExists() error { - _, err := b.storageClient.Bucket(b.bucketName).Attrs(b.storageContext) - if err != storage.ErrBucketNotExist { - return err - } - - if b.projectID == "" { - return fmt.Errorf("bucket %q does not exist; specify the \"project\" option or create the bucket manually using `gsutil mb gs://%s`", b.bucketName, b.bucketName) - } - - attrs := &storage.BucketAttrs{ - Location: b.region, - } - - return b.storageClient.Bucket(b.bucketName).Create(b.storageContext, b.projectID, attrs) +// accountFile represents the structure of the account file JSON file. +type accountFile struct { + PrivateKeyId string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + ClientEmail string `json:"client_email"` + ClientId string `json:"client_id"` } diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend_state.go index 05fd1d13ac9..eddcbcbac37 100644 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend_state.go +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend_state.go @@ -91,50 +91,53 @@ func (b *gcsBackend) State(name string) (state.State, error) { } st := &remote.State{Client: c} - lockInfo := state.NewLockInfo() - lockInfo.Operation = "init" - lockID, err := st.Lock(lockInfo) - if err != nil { + + // Grab the value + if err := st.RefreshState(); err != nil { return nil, err } - // Local helper function so we can call it multiple places - unlock := func(baseErr error) error { - if err := st.Unlock(lockID); err != nil { - const unlockErrMsg = `%v -Additionally, unlocking the state file on Google Cloud Storage failed: - - Error message: %q - Lock ID (gen): %v - Lock file URL: %v + // If we have no state, we have to create an empty state + if v := st.State(); v == nil { -You may have to force-unlock this state in order to use it again. -The GCloud backend acquires a lock during initialization to ensure -the initial state file is created.` - return fmt.Errorf(unlockErrMsg, baseErr, err.Error(), lockID, c.lockFileURL()) + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockID, err := st.Lock(lockInfo) + if err != nil { + return nil, err } - return baseErr - } + // Local helper function so we can call it multiple places + unlock := func(baseErr error) error { + if err := st.Unlock(lockID); err != nil { + const unlockErrMsg = `%v + Additionally, unlocking the state file on Google Cloud Storage failed: - // Grab the value - if err := st.RefreshState(); err != nil { - return nil, unlock(err) - } + Error message: %q + Lock ID (gen): %v + Lock file URL: %v + + You may have to force-unlock this state in order to use it again. + The GCloud backend acquires a lock during initialization to ensure + the initial state file is created.` + return fmt.Errorf(unlockErrMsg, baseErr, err.Error(), lockID, c.lockFileURL()) + } + + return baseErr + } - // If we have no state, we have to create an empty state - if v := st.State(); v == nil { if err := st.WriteState(terraform.NewState()); err != nil { return nil, unlock(err) } if err := st.PersistState(); err != nil { return nil, unlock(err) } - } - // Unlock, the state should now be initialized - if err := unlock(nil); err != nil { - return nil, err + // Unlock, the state should now be initialized + if err := unlock(nil); err != nil { + return nil, err + } + } return st, nil diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend.go index a9018fd278b..f5607e62397 100644 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend.go +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend.go @@ -136,6 +136,13 @@ func New() backend.Backend { Default: false, }, + "skip_region_validation": { + Type: schema.TypeBool, + Optional: true, + Description: "Skip static validation of region name.", + Default: false, + }, + "skip_requesting_account_id": { Type: schema.TypeBool, Optional: true, @@ -243,6 +250,7 @@ func (b *Backend) configure(ctx context.Context) error { Token: data.Get("token").(string), SkipCredsValidation: data.Get("skip_credentials_validation").(bool), SkipGetEC2Platforms: data.Get("skip_get_ec2_platforms").(bool), + SkipRegionValidation: data.Get("skip_region_validation").(bool), SkipRequestingAccountId: data.Get("skip_requesting_account_id").(bool), SkipMetadataApiCheck: data.Get("skip_metadata_api_check").(bool), } diff --git a/vendor/github.com/hashicorp/terraform/backend/testing.go b/vendor/github.com/hashicorp/terraform/backend/testing.go index d7cc40ca0b9..a608b0c365c 100644 --- a/vendor/github.com/hashicorp/terraform/backend/testing.go +++ b/vendor/github.com/hashicorp/terraform/backend/testing.go @@ -281,6 +281,14 @@ func testBackendStateLock(t *testing.T, b1, b2 Backend) { t.Fatal("unable to get initial lock:", err) } + // Make sure we can still get the state.State from another instance even + // when locked. This should only happen when a state is loaded via the + // backend, and as a remote state. + _, err = b2.State(DefaultStateName) + if err != nil { + t.Fatalf("failed to read locked state from another backend instance: %s", err) + } + // If the lock ID is blank, assume locking is disabled if lockIDA == "" { t.Logf("TestBackend: %T: empty string returned for lock, assuming disabled", b1) diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/terraform/data_source_state.go b/vendor/github.com/hashicorp/terraform/builtin/providers/terraform/data_source_state.go index 9cb39d0eaf8..1f0fbea4835 100644 --- a/vendor/github.com/hashicorp/terraform/builtin/providers/terraform/data_source_state.go +++ b/vendor/github.com/hashicorp/terraform/builtin/providers/terraform/data_source_state.go @@ -43,6 +43,13 @@ func dataSourceRemoteState() *schema.Resource { }, "environment": { + Type: schema.TypeString, + Optional: true, + Default: backend.DefaultStateName, + Deprecated: "Terraform environments are now called workspaces. Please use the workspace key instead.", + }, + + "workspace": { Type: schema.TypeString, Optional: true, Default: backend.DefaultStateName, @@ -84,9 +91,13 @@ func dataSourceRemoteStateRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error initializing backend: %s", err) } - // Get the state - env := d.Get("environment").(string) - state, err := b.State(env) + // environment is deprecated in favour of workspace. + // If both keys are set workspace should win. + name := d.Get("environment").(string) + if ws, ok := d.GetOk("workspace"); ok { + name = ws.(string) + } + state, err := b.State(name) if err != nil { return fmt.Errorf("error loading the remote state: %s", err) } diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/salt-masterless/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/salt-masterless/resource_provisioner.go index b81a3703789..70942c5d7b6 100644 --- a/vendor/github.com/hashicorp/terraform/builtin/provisioners/salt-masterless/resource_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/salt-masterless/resource_provisioner.go @@ -10,6 +10,7 @@ import ( "context" "errors" "fmt" + "io" "os" "path/filepath" @@ -17,6 +18,7 @@ import ( "github.com/hashicorp/terraform/communicator/remote" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" + linereader "github.com/mitchellh/go-linereader" ) type provisionFn func(terraform.UIOutput, communicator.Communicator) error @@ -139,14 +141,46 @@ func applyFn(ctx context.Context) error { } o.Output(fmt.Sprintf("Downloading saltstack bootstrap to /tmp/install_salt.sh")) if err = comm.Start(cmd); err != nil { - return fmt.Errorf("Unable to download Salt: %s", err) + err = fmt.Errorf("Unable to download Salt: %s", err) } + + if err == nil { + cmd.Wait() + if cmd.ExitStatus != 0 { + err = fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) + } + } + + outR, outW := io.Pipe() + errR, errW := io.Pipe() + outDoneCh := make(chan struct{}) + errDoneCh := make(chan struct{}) + go copyOutput(o, outR, outDoneCh) + go copyOutput(o, errR, errDoneCh) cmd = &remote.Cmd{ Command: fmt.Sprintf("%s /tmp/install_salt.sh %s", p.sudo("sh"), p.BootstrapArgs), + Stdout: outW, + Stderr: errW, } + o.Output(fmt.Sprintf("Installing Salt with command %s", cmd.Command)) if err = comm.Start(cmd); err != nil { - return fmt.Errorf("Unable to install Salt: %s", err) + err = fmt.Errorf("Unable to install Salt: %s", err) + } + + if err == nil { + cmd.Wait() + if cmd.ExitStatus != 0 { + err = fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) + } + } + // Wait for output to clean up + outW.Close() + errW.Close() + <-outDoneCh + <-errDoneCh + if err != nil { + return err } } @@ -212,17 +246,39 @@ func applyFn(ctx context.Context) error { } } + outR, outW := io.Pipe() + errR, errW := io.Pipe() + outDoneCh := make(chan struct{}) + errDoneCh := make(chan struct{}) + + go copyOutput(o, outR, outDoneCh) + go copyOutput(o, errR, errDoneCh) o.Output(fmt.Sprintf("Running: salt-call --local %s", p.CmdArgs)) - cmd := &remote.Cmd{Command: p.sudo(fmt.Sprintf("salt-call --local %s", p.CmdArgs))} + cmd := &remote.Cmd{ + Command: p.sudo(fmt.Sprintf("salt-call --local %s", p.CmdArgs)), + Stdout: outW, + Stderr: errW, + } if err = comm.Start(cmd); err != nil || cmd.ExitStatus != 0 { if err == nil { err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) } - return fmt.Errorf("Error executing salt-call: %s", err) + err = fmt.Errorf("Error executing salt-call: %s", err) + } + if err == nil { + cmd.Wait() + if cmd.ExitStatus != 0 { + err = fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) + } } + // Wait for output to clean up + outW.Close() + errW.Close() + <-outDoneCh + <-errDoneCh - return nil + return err } // Prepends sudo to supplied command if config says to @@ -466,3 +522,12 @@ func decodeConfig(d *schema.ResourceData) (*provisioner, error) { return p, nil } + +func copyOutput( + o terraform.UIOutput, r io.Reader, doneCh chan<- struct{}) { + defer close(doneCh) + lr := linereader.New(r) + for line := range lr.Ch { + o.Output(line) + } +} diff --git a/vendor/github.com/hashicorp/terraform/command/apply.go b/vendor/github.com/hashicorp/terraform/command/apply.go index 18f3f981f39..c65b2df5178 100644 --- a/vendor/github.com/hashicorp/terraform/command/apply.go +++ b/vendor/github.com/hashicorp/terraform/command/apply.go @@ -8,7 +8,8 @@ import ( "sort" "strings" - "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/config" @@ -24,9 +25,6 @@ type ApplyCommand struct { // If true, then this apply command will become the "destroy" // command. It is just like apply but only processes a destroy. Destroy bool - - // When this channel is closed, the apply will be cancelled. - ShutdownCh <-chan struct{} } func (c *ApplyCommand) Run(args []string) int { @@ -121,31 +119,20 @@ func (c *ApplyCommand) Run(args []string) int { configPath = "" } + var diags tfdiags.Diagnostics + // Load the module if we don't have one yet (not running from plan) var mod *module.Tree if plan == nil { - mod, err = c.Module(configPath) - if err != nil { - err = errwrap.Wrapf("Failed to load root config module: {{err}}", err) - c.showDiagnostics(err) + var modDiags tfdiags.Diagnostics + mod, modDiags = c.Module(configPath) + diags = diags.Append(modDiags) + if modDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } } - /* - terraform.SetDebugInfo(DefaultDataDir) - - // Check for the legacy graph - if experiment.Enabled(experiment.X_legacyGraph) { - c.Ui.Output(c.Colorize().Color( - "[reset][bold][yellow]" + - "Legacy graph enabled! This will use the graph from Terraform 0.7.x\n" + - "to execute this operation. This will be removed in the future so\n" + - "please report any issues causing you to use this to the Terraform\n" + - "project.\n\n")) - } - */ - var conf *config.Config if mod != nil { conf = mod.Config() @@ -174,6 +161,7 @@ func (c *ApplyCommand) Run(args []string) int { // Perform the operation ctx, ctxCancel := context.WithCancel(context.Background()) defer ctxCancel() + op, err := b.Operation(ctx, opReq) if err != nil { c.Ui.Error(fmt.Sprintf("Error starting operation: %s", err)) @@ -200,11 +188,15 @@ func (c *ApplyCommand) Run(args []string) int { } case <-op.Done(): if err := op.Err; err != nil { - c.showDiagnostics(err) - return 1 + diags = diags.Append(err) } } + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + if !c.Destroy { // Get the right module that we used. If we ran a plan, then use // that module. diff --git a/vendor/github.com/hashicorp/terraform/command/autocomplete.go b/vendor/github.com/hashicorp/terraform/command/autocomplete.go index cc1ad145bd3..1ce90e4c9fb 100644 --- a/vendor/github.com/hashicorp/terraform/command/autocomplete.go +++ b/vendor/github.com/hashicorp/terraform/command/autocomplete.go @@ -2,7 +2,6 @@ package command import ( "github.com/posener/complete" - "github.com/posener/complete/match" ) // This file contains some re-usable predictors for auto-complete. The @@ -63,17 +62,6 @@ func (m *Meta) completePredictWorkspaceName() complete.Predictor { } names, _ := b.States() - - if a.Last != "" { - // filter for names that match the prefix only - filtered := make([]string, 0, len(names)) - for _, name := range names { - if match.Prefix(name, a.Last) { - filtered = append(filtered, name) - } - } - names = filtered - } return names }) } diff --git a/vendor/github.com/hashicorp/terraform/command/command.go b/vendor/github.com/hashicorp/terraform/command/command.go index 0fbb44df36b..0cd11da0878 100644 --- a/vendor/github.com/hashicorp/terraform/command/command.go +++ b/vendor/github.com/hashicorp/terraform/command/command.go @@ -7,7 +7,6 @@ import ( "runtime" "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/cli" ) // Set to true when we're testing @@ -82,39 +81,18 @@ func ModulePath(args []string) (string, error) { return args[0], nil } -func validateContext(ctx *terraform.Context, ui cli.Ui) bool { +func (m *Meta) validateContext(ctx *terraform.Context) bool { log.Println("[INFO] Validating the context...") - ws, es := ctx.Validate() - log.Printf("[INFO] Validation result: %d warnings, %d errors", len(ws), len(es)) + diags := ctx.Validate() + log.Printf("[INFO] Validation result: %d diagnostics", len(diags)) - if len(ws) > 0 || len(es) > 0 { - ui.Output( + if len(diags) > 0 { + m.Ui.Output( "There are warnings and/or errors related to your configuration. Please\n" + "fix these before continuing.\n") - if len(ws) > 0 { - ui.Warn("Warnings:\n") - for _, w := range ws { - ui.Warn(fmt.Sprintf(" * %s", w)) - } - - if len(es) > 0 { - ui.Output("") - } - } - - if len(es) > 0 { - ui.Error("Errors:\n") - for _, e := range es { - ui.Error(fmt.Sprintf(" * %s", e)) - } - return false - } else { - ui.Warn(fmt.Sprintf("\n"+ - "No errors found. Continuing with %d warning(s).\n", len(ws))) - return true - } + m.showDiagnostics(diags) } - return true + return !diags.HasErrors() } diff --git a/vendor/github.com/hashicorp/terraform/command/console.go b/vendor/github.com/hashicorp/terraform/command/console.go index 82355831129..cf7e15f61e1 100644 --- a/vendor/github.com/hashicorp/terraform/command/console.go +++ b/vendor/github.com/hashicorp/terraform/command/console.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/wrappedstreams" "github.com/hashicorp/terraform/repl" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" ) @@ -17,9 +18,6 @@ import ( // configuration and actually builds or changes infrastructure. type ConsoleCommand struct { Meta - - // When this channel is closed, the apply will be cancelled. - ShutdownCh <-chan struct{} } func (c *ConsoleCommand) Run(args []string) int { @@ -41,10 +39,12 @@ func (c *ConsoleCommand) Run(args []string) int { return 1 } + var diags tfdiags.Diagnostics + // Load the module - mod, err := c.Module(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) + mod, diags := c.Module(configPath) + if diags.HasErrors() { + c.showDiagnostics(diags) return 1 } diff --git a/vendor/github.com/hashicorp/terraform/command/graph.go b/vendor/github.com/hashicorp/terraform/command/graph.go index 2fb29c7b0a5..7723043e8f4 100644 --- a/vendor/github.com/hashicorp/terraform/command/graph.go +++ b/vendor/github.com/hashicorp/terraform/command/graph.go @@ -5,6 +5,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" @@ -56,12 +58,16 @@ func (c *GraphCommand) Run(args []string) int { configPath = "" } + var diags tfdiags.Diagnostics + // Load the module var mod *module.Tree if plan == nil { - mod, err = c.Module(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) + var modDiags tfdiags.Diagnostics + mod, modDiags = c.Module(configPath) + diags = diags.Append(modDiags) + if modDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } } @@ -143,6 +149,14 @@ func (c *GraphCommand) Run(args []string) int { return 1 } + if diags.HasErrors() { + // For this command we only show diagnostics if there are errors, + // because printing out naked warnings could upset a naive program + // consuming our dot output. + c.showDiagnostics(diags) + return 1 + } + c.Ui.Output(graphStr) return 0 diff --git a/vendor/github.com/hashicorp/terraform/command/import.go b/vendor/github.com/hashicorp/terraform/command/import.go index 0dc2003a02b..cbaeec5f492 100644 --- a/vendor/github.com/hashicorp/terraform/command/import.go +++ b/vendor/github.com/hashicorp/terraform/command/import.go @@ -6,10 +6,13 @@ import ( "os" "strings" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // ImportCommand is a cli.Command implementation that imports resources @@ -71,13 +74,29 @@ func (c *ImportCommand) Run(args []string) int { return 1 } + var diags tfdiags.Diagnostics + // Load the module var mod *module.Tree if configPath != "" { - var err error - mod, err = c.Module(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) + if empty, _ := config.IsEmptyDir(configPath); empty { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "No Terraform configuration files", + Detail: fmt.Sprintf( + "The directory %s does not contain any Terraform configuration files (.tf or .tf.json). To specify a different configuration directory, use the -config=\"...\" command line option.", + configPath, + ), + }) + c.showDiagnostics(diags) + return 1 + } + + var modDiags tfdiags.Diagnostics + mod, modDiags = c.Module(configPath) + diags = diags.Append(modDiags) + if modDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } } @@ -89,11 +108,15 @@ func (c *ImportCommand) Run(args []string) int { targetMod := mod.Child(addr.Path) if targetMod == nil { modulePath := addr.WholeModuleAddress().String() - if modulePath == "" { - c.Ui.Error(importCommandMissingConfigMsg) - } else { - c.Ui.Error(fmt.Sprintf(importCommandMissingModuleFmt, modulePath)) - } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Import to non-existent module", + Detail: fmt.Sprintf( + "%s is not defined in the configuration. Please add configuration for this module before importing into it.", + modulePath, + ), + }) + c.showDiagnostics(diags) return 1 } rcs := targetMod.Config().Resources @@ -109,6 +132,14 @@ func (c *ImportCommand) Run(args []string) int { if modulePath == "" { modulePath = "the root module" } + + c.showDiagnostics(diags) + + // This is not a diagnostic because currently our diagnostics printer + // doesn't support having a code example in the detail, and there's + // a code example in this message. + // TODO: Improve the diagnostics printer so we can use it for this + // message. c.Ui.Error(fmt.Sprintf( importCommandMissingResourceFmt, addr, modulePath, addr.Type, addr.Name, @@ -166,7 +197,8 @@ func (c *ImportCommand) Run(args []string) int { }, }) if err != nil { - c.Ui.Error(fmt.Sprintf("Error importing: %s", err)) + diags = diags.Append(err) + c.showDiagnostics(diags) return 1 } @@ -187,6 +219,11 @@ func (c *ImportCommand) Run(args []string) int { c.Ui.Output(c.Colorize().Color("[reset][yellow]\n" + importCommandAllowMissingResourceMsg)) } + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + return 0 } @@ -285,18 +322,7 @@ const importCommandResourceModeMsg = `Error: resource address must refer to a ma Data resources cannot be imported. ` -const importCommandMissingConfigMsg = `Error: no configuration files in this directory. - -"terraform import" can only be run in a Terraform configuration directory. -Create one or more .tf files in this directory to import here. -` - -const importCommandMissingModuleFmt = `Error: %s does not exist in the configuration. - -Please add the configuration for the module before importing resources into it. -` - -const importCommandMissingResourceFmt = `Error: resource address %q does not exist in the configuration. +const importCommandMissingResourceFmt = `[reset][bold][red]Error:[reset][bold] resource address %q does not exist in the configuration.[reset] Before importing this resource, please create its configuration in %s. For example: diff --git a/vendor/github.com/hashicorp/terraform/command/init.go b/vendor/github.com/hashicorp/terraform/command/init.go index 1deb208896e..66b1c7028a4 100644 --- a/vendor/github.com/hashicorp/terraform/command/init.go +++ b/vendor/github.com/hashicorp/terraform/command/init.go @@ -274,19 +274,15 @@ func (c *InitCommand) Run(args []string) int { // Load the complete module tree, and fetch any missing providers. // This method outputs its own Ui. func (c *InitCommand) getProviders(path string, state *terraform.State, upgrade bool) error { - mod, err := c.Module(path) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error getting plugins: %s", err)) - return err - } - - if err := mod.Validate(); err != nil { - c.Ui.Error(fmt.Sprintf("Error getting plugins: %s", err)) - return err + mod, diags := c.Module(path) + if diags.HasErrors() { + c.showDiagnostics(diags) + return diags.Err() } if err := terraform.CheckRequiredVersion(mod); err != nil { - c.Ui.Error(err.Error()) + diags = diags.Append(err) + c.showDiagnostics(diags) return err } @@ -396,7 +392,7 @@ func (c *InitCommand) getProviders(path string, state *terraform.State, upgrade digests[name] = nil } } - err = c.providerPluginsLock().Write(digests) + err := c.providerPluginsLock().Write(digests) if err != nil { c.Ui.Error(fmt.Sprintf("failed to save provider manifest: %s", err)) return err diff --git a/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go b/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go index 6834bf5f7ea..7993e9a548d 100644 --- a/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go +++ b/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go @@ -6,6 +6,7 @@ package command import ( chefprovisioner "github.com/hashicorp/terraform/builtin/provisioners/chef" fileprovisioner "github.com/hashicorp/terraform/builtin/provisioners/file" + habitatprovisioner "github.com/hashicorp/terraform/builtin/provisioners/habitat" localexecprovisioner "github.com/hashicorp/terraform/builtin/provisioners/local-exec" remoteexecprovisioner "github.com/hashicorp/terraform/builtin/provisioners/remote-exec" saltmasterlessprovisioner "github.com/hashicorp/terraform/builtin/provisioners/salt-masterless" @@ -18,6 +19,7 @@ var InternalProviders = map[string]plugin.ProviderFunc{} var InternalProvisioners = map[string]plugin.ProvisionerFunc{ "chef": chefprovisioner.Provisioner, "file": fileprovisioner.Provisioner, + "habitat": habitatprovisioner.Provisioner, "local-exec": localexecprovisioner.Provisioner, "remote-exec": remoteexecprovisioner.Provisioner, "salt-masterless": saltmasterlessprovisioner.Provisioner, diff --git a/vendor/github.com/hashicorp/terraform/command/meta.go b/vendor/github.com/hashicorp/terraform/command/meta.go index 92ddd831541..27f7765f95d 100644 --- a/vendor/github.com/hashicorp/terraform/command/meta.go +++ b/vendor/github.com/hashicorp/terraform/command/meta.go @@ -76,6 +76,9 @@ type Meta struct { // is not suitable, e.g. because of a read-only filesystem. OverrideDataDir string + // When this channel is closed, the command will be cancelled. + ShutdownCh <-chan struct{} + //---------------------------------------------------------- // Protected: commands can set these //---------------------------------------------------------- diff --git a/vendor/github.com/hashicorp/terraform/command/meta_new.go b/vendor/github.com/hashicorp/terraform/command/meta_new.go index 5fc3cdca366..9a935a3ca00 100644 --- a/vendor/github.com/hashicorp/terraform/command/meta_new.go +++ b/vendor/github.com/hashicorp/terraform/command/meta_new.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // NOTE: Temporary file until this branch is cleaned up. @@ -34,7 +35,14 @@ func (m *Meta) Input() bool { // // It expects the modules to already be downloaded. This will never // download any modules. -func (m *Meta) Module(path string) (*module.Tree, error) { +// +// The configuration is validated before returning, so the returned diagnostics +// may contain warnings and/or errors. If the diagnostics contains only +// warnings, the caller may treat the returned module.Tree as valid after +// presenting the warnings to the user. +func (m *Meta) Module(path string) (*module.Tree, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + mod, err := module.NewTreeModule("", path) if err != nil { // Check for the error where we have no config files @@ -42,15 +50,19 @@ func (m *Meta) Module(path string) (*module.Tree, error) { return nil, nil } - return nil, err + diags = diags.Append(err) + return nil, diags } err = mod.Load(m.moduleStorage(m.DataDir(), module.GetModeNone)) if err != nil { - return nil, errwrap.Wrapf("Error loading modules: {{err}}", err) + diags = diags.Append(errwrap.Wrapf("Error loading modules: {{err}}", err)) + return nil, diags } - return mod, nil + diags = diags.Append(mod.Validate()) + + return mod, diags } // Config loads the root config for the path specified. Path may be a directory diff --git a/vendor/github.com/hashicorp/terraform/command/plan.go b/vendor/github.com/hashicorp/terraform/command/plan.go index 757984f8fac..ec882b63937 100644 --- a/vendor/github.com/hashicorp/terraform/command/plan.go +++ b/vendor/github.com/hashicorp/terraform/command/plan.go @@ -5,10 +5,10 @@ import ( "fmt" "strings" - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/tfdiags" ) // PlanCommand is a Command implementation that compares a Terraform @@ -69,13 +69,16 @@ func (c *PlanCommand) Run(args []string) int { configPath = "" } + var diags tfdiags.Diagnostics + // Load the module if we don't have one yet (not running from plan) var mod *module.Tree if plan == nil { - mod, err = c.Module(configPath) - if err != nil { - err = errwrap.Wrapf("Failed to load root config module: {{err}}", err) - c.showDiagnostics(err) + var modDiags tfdiags.Diagnostics + mod, modDiags = c.Module(configPath) + diags = diags.Append(modDiags) + if modDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } } @@ -104,26 +107,41 @@ func (c *PlanCommand) Run(args []string) int { opReq.Type = backend.OperationTypePlan // Perform the operation - op, err := b.Operation(context.Background(), opReq) + ctx, ctxCancel := context.WithCancel(context.Background()) + defer ctxCancel() + + op, err := b.Operation(ctx, opReq) if err != nil { c.Ui.Error(fmt.Sprintf("Error starting operation: %s", err)) return 1 } - // Wait for the operation to complete - <-op.Done() - if err := op.Err; err != nil { - c.showDiagnostics(err) - return 1 - } + select { + case <-c.ShutdownCh: + // Cancel our context so we can start gracefully exiting + ctxCancel() + + // Notify the user + c.Ui.Output(outputInterrupt) - /* - err = terraform.SetDebugInfo(DefaultDataDir) - if err != nil { - c.Ui.Error(err.Error()) + // Still get the result, since there is still one + select { + case <-c.ShutdownCh: + c.Ui.Error( + "Two interrupts received. Exiting immediately") return 1 + case <-op.Done(): } - */ + case <-op.Done(): + if err := op.Err; err != nil { + diags = diags.Append(err) + } + } + + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } if detailed && !op.PlanEmpty { return 2 diff --git a/vendor/github.com/hashicorp/terraform/command/providers.go b/vendor/github.com/hashicorp/terraform/command/providers.go index 2a755f1268c..49d43962ea3 100644 --- a/vendor/github.com/hashicorp/terraform/command/providers.go +++ b/vendor/github.com/hashicorp/terraform/command/providers.go @@ -39,9 +39,9 @@ func (c *ProvidersCommand) Run(args []string) int { } // Load the config - root, err := c.Module(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) + root, diags := c.Module(configPath) + if diags.HasErrors() { + c.showDiagnostics(diags) return 1 } if root == nil { @@ -52,13 +52,6 @@ func (c *ProvidersCommand) Run(args []string) int { return 1 } - // Validate the config (to ensure the version constraints are valid) - err = root.Validate() - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - // Load the backend b, err := c.Backend(&BackendOpts{ Config: root.Config(), @@ -90,6 +83,11 @@ func (c *ProvidersCommand) Run(args []string) int { c.Ui.Output(printRoot.String()) + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + return 0 } diff --git a/vendor/github.com/hashicorp/terraform/command/push.go b/vendor/github.com/hashicorp/terraform/command/push.go index 1cced2d91a5..039696fd38d 100644 --- a/vendor/github.com/hashicorp/terraform/command/push.go +++ b/vendor/github.com/hashicorp/terraform/command/push.go @@ -89,9 +89,9 @@ func (c *PushCommand) Run(args []string) int { } // Load the module - mod, err := c.Module(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) + mod, diags := c.Module(configPath) + if diags.HasErrors() { + c.showDiagnostics(diags) return 1 } if mod == nil { @@ -347,6 +347,12 @@ func (c *PushCommand) Run(args []string) int { c.Ui.Output(c.Colorize().Color(fmt.Sprintf( "[reset][bold][green]Configuration %q uploaded! (v%d)", name, vsn))) + + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + return 0 } diff --git a/vendor/github.com/hashicorp/terraform/command/refresh.go b/vendor/github.com/hashicorp/terraform/command/refresh.go index f4b9921f148..eec74281ce6 100644 --- a/vendor/github.com/hashicorp/terraform/command/refresh.go +++ b/vendor/github.com/hashicorp/terraform/command/refresh.go @@ -5,10 +5,10 @@ import ( "fmt" "strings" - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // RefreshCommand is a cli.Command implementation that refreshes the state @@ -41,11 +41,12 @@ func (c *RefreshCommand) Run(args []string) int { return 1 } + var diags tfdiags.Diagnostics + // Load the module - mod, err := c.Module(configPath) - if err != nil { - err = errwrap.Wrapf("Failed to load root config module: {{err}}", err) - c.showDiagnostics(err) + mod, diags := c.Module(configPath) + if diags.HasErrors() { + c.showDiagnostics(diags) return 1 } @@ -84,7 +85,11 @@ func (c *RefreshCommand) Run(args []string) int { // Wait for the operation to complete <-op.Done() if err := op.Err; err != nil { - c.showDiagnostics(err) + diags = diags.Append(err) + } + + c.showDiagnostics(diags) + if diags.HasErrors() { return 1 } diff --git a/vendor/github.com/hashicorp/terraform/command/validate.go b/vendor/github.com/hashicorp/terraform/command/validate.go index b6aff5a95f9..f48d38e4a50 100644 --- a/vendor/github.com/hashicorp/terraform/command/validate.go +++ b/vendor/github.com/hashicorp/terraform/command/validate.go @@ -5,6 +5,8 @@ import ( "path/filepath" "strings" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/terraform" ) @@ -95,21 +97,27 @@ Options: } func (c *ValidateCommand) validate(dir string, checkVars bool) int { + var diags tfdiags.Diagnostics + cfg, err := config.LoadDir(dir) if err != nil { + diags = diags.Append(err) c.showDiagnostics(err) return 1 } - err = cfg.Validate() - if err != nil { - c.showDiagnostics(err) + + diags = diags.Append(cfg.Validate()) + + if diags.HasErrors() { + c.showDiagnostics(diags) return 1 } if checkVars { - mod, err := c.Module(dir) - if err != nil { - c.showDiagnostics(err) + mod, modDiags := c.Module(dir) + diags = diags.Append(modDiags) + if modDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } @@ -118,13 +126,17 @@ func (c *ValidateCommand) validate(dir string, checkVars bool) int { tfCtx, err := terraform.NewContext(opts) if err != nil { - c.showDiagnostics(err) + diags = diags.Append(err) + c.showDiagnostics(diags) return 1 } - if !validateContext(tfCtx, c.Ui) { - return 1 - } + diags = diags.Append(tfCtx.Validate()) + } + + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 } return 0 diff --git a/vendor/github.com/hashicorp/terraform/commands.go b/vendor/github.com/hashicorp/terraform/commands.go index d65437f6668..3335d2cdb59 100644 --- a/vendor/github.com/hashicorp/terraform/commands.go +++ b/vendor/github.com/hashicorp/terraform/commands.go @@ -63,6 +63,8 @@ func initCommands(config *Config) { RunningInAutomation: inAutomation, PluginCacheDir: config.PluginCacheDir, OverrideDataDir: dataDir, + + ShutdownCh: makeShutdownCh(), } // The command list is included in the terraform -help @@ -80,23 +82,20 @@ func initCommands(config *Config) { Commands = map[string]cli.CommandFactory{ "apply": func() (cli.Command, error) { return &command.ApplyCommand{ - Meta: meta, - ShutdownCh: makeShutdownCh(), + Meta: meta, }, nil }, "console": func() (cli.Command, error) { return &command.ConsoleCommand{ - Meta: meta, - ShutdownCh: makeShutdownCh(), + Meta: meta, }, nil }, "destroy": func() (cli.Command, error) { return &command.ApplyCommand{ - Meta: meta, - Destroy: true, - ShutdownCh: makeShutdownCh(), + Meta: meta, + Destroy: true, }, nil }, diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go index 96d63bf432a..055a7f3306f 100644 --- a/vendor/github.com/hashicorp/terraform/config/config.go +++ b/vendor/github.com/hashicorp/terraform/config/config.go @@ -8,10 +8,11 @@ import ( "strconv" "strings" - "github.com/hashicorp/go-multierror" + hcl2 "github.com/hashicorp/hcl2/hcl" "github.com/hashicorp/hil/ast" "github.com/hashicorp/terraform/helper/hilmapstructure" "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/reflectwalk" ) @@ -69,15 +70,6 @@ type ProviderConfig struct { Alias string Version string RawConfig *RawConfig - - // Path records where the Provider was declared in a module tree, so that - // it can be copied into child module providers yet still interpolated in - // the correct scope. - Path []string - - // Inherited is used to skip validation of this config, since any - // interpolated variables won't be declared at this level. - Inherited bool } // A resource represents a single Terraform resource in the configuration. @@ -270,7 +262,9 @@ func (r *Resource) ProviderFullName() string { // the provider name is inferred from the resource type name. func ResourceProviderFullName(resourceType, explicitProvider string) string { if explicitProvider != "" { - return explicitProvider + // check for an explicit provider name, or return the original + parts := strings.SplitAfter(explicitProvider, "provider.") + return parts[len(parts)-1] } idx := strings.IndexRune(resourceType, '_') @@ -285,30 +279,35 @@ func ResourceProviderFullName(resourceType, explicitProvider string) string { } // Validate does some basic semantic checking of the configuration. -func (c *Config) Validate() error { +func (c *Config) Validate() tfdiags.Diagnostics { if c == nil { return nil } - var errs []error + var diags tfdiags.Diagnostics for _, k := range c.unknownKeys { - errs = append(errs, fmt.Errorf( - "Unknown root level key: %s", k)) + diags = diags.Append( + fmt.Errorf("Unknown root level key: %s", k), + ) } // Validate the Terraform config if tf := c.Terraform; tf != nil { - errs = append(errs, c.Terraform.Validate()...) + errs := c.Terraform.Validate() + for _, err := range errs { + diags = diags.Append(err) + } } vars := c.InterpolatedVariables() varMap := make(map[string]*Variable) for _, v := range c.Variables { if _, ok := varMap[v.Name]; ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "Variable '%s': duplicate found. Variable names must be unique.", - v.Name)) + v.Name, + )) } varMap[v.Name] = v @@ -316,17 +315,19 @@ func (c *Config) Validate() error { for k, _ := range varMap { if !NameRegexp.MatchString(k) { - errs = append(errs, fmt.Errorf( - "variable %q: variable name must match regular expresion %s", - k, NameRegexp)) + diags = diags.Append(fmt.Errorf( + "variable %q: variable name must match regular expression %s", + k, NameRegexp, + )) } } for _, v := range c.Variables { if v.Type() == VariableTypeUnknown { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "Variable '%s': must be a string or a map", - v.Name)) + v.Name, + )) continue } @@ -347,9 +348,10 @@ func (c *Config) Validate() error { if v.Default != nil { if err := reflectwalk.Walk(v.Default, w); err == nil { if interp { - errs = append(errs, fmt.Errorf( - "Variable '%s': cannot contain interpolations", - v.Name)) + diags = diags.Append(fmt.Errorf( + "variable %q: default may not contain interpolations", + v.Name, + )) } } } @@ -365,10 +367,11 @@ func (c *Config) Validate() error { } if _, ok := varMap[uv.Name]; !ok { - errs = append(errs, fmt.Errorf( - "%s: unknown variable referenced: '%s'. define it with 'variable' blocks", + diags = diags.Append(fmt.Errorf( + "%s: unknown variable referenced: '%s'; define it with a 'variable' block", source, - uv.Name)) + uv.Name, + )) } } } @@ -379,17 +382,19 @@ func (c *Config) Validate() error { switch v := rawV.(type) { case *CountVariable: if v.Type == CountValueInvalid { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: invalid count variable: %s", source, - v.FullKey())) + v.FullKey(), + )) } case *PathVariable: if v.Type == PathValueInvalid { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: invalid path variable: %s", source, - v.FullKey())) + v.FullKey(), + )) } } } @@ -397,27 +402,35 @@ func (c *Config) Validate() error { // Check that providers aren't declared multiple times and that their // version constraints, where present, are syntactically valid. - providerSet := make(map[string]struct{}) + providerSet := make(map[string]bool) for _, p := range c.ProviderConfigs { name := p.FullName() if _, ok := providerSet[name]; ok { - errs = append(errs, fmt.Errorf( - "provider.%s: declared multiple times, you can only declare a provider once", - name)) + diags = diags.Append(fmt.Errorf( + "provider.%s: multiple configurations present; only one configuration is allowed per provider", + name, + )) continue } if p.Version != "" { _, err := discovery.ConstraintStr(p.Version).Parse() if err != nil { - errs = append(errs, fmt.Errorf( - "provider.%s: invalid version constraint %q: %s", - name, p.Version, err, - )) + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid provider version constraint", + Detail: fmt.Sprintf( + "The value %q given for provider.%s is not a valid version constraint.", + p.Version, name, + ), + // TODO: include a "Subject" source reference in here, + // once the config loader is able to retain source + // location information. + }) } } - providerSet[name] = struct{}{} + providerSet[name] = true } // Check that all references to modules are valid @@ -429,9 +442,10 @@ func (c *Config) Validate() error { if _, ok := dupped[m.Id()]; !ok { dupped[m.Id()] = struct{}{} - errs = append(errs, fmt.Errorf( - "%s: module repeated multiple times", - m.Id())) + diags = diags.Append(fmt.Errorf( + "module %q: module repeated multiple times", + m.Id(), + )) } // Already seen this module, just skip it @@ -445,21 +459,23 @@ func (c *Config) Validate() error { "root": m.Source, }) if err != nil { - errs = append(errs, fmt.Errorf( - "%s: module source error: %s", - m.Id(), err)) + diags = diags.Append(fmt.Errorf( + "module %q: module source error: %s", + m.Id(), err, + )) } else if len(rc.Interpolations) > 0 { - errs = append(errs, fmt.Errorf( - "%s: module source cannot contain interpolations", - m.Id())) + diags = diags.Append(fmt.Errorf( + "module %q: module source cannot contain interpolations", + m.Id(), + )) } // Check that the name matches our regexp if !NameRegexp.Match([]byte(m.Name)) { - errs = append(errs, fmt.Errorf( - "%s: module name can only contain letters, numbers, "+ - "dashes, and underscores", - m.Id())) + diags = diags.Append(fmt.Errorf( + "module %q: module name must be a letter or underscore followed by only letters, numbers, dashes, and underscores", + m.Id(), + )) } // Check that the configuration can all be strings, lists or maps @@ -483,30 +499,47 @@ func (c *Config) Validate() error { continue } - errs = append(errs, fmt.Errorf( - "%s: variable %s must be a string, list or map value", - m.Id(), k)) + diags = diags.Append(fmt.Errorf( + "module %q: argument %s must have a string, list, or map value", + m.Id(), k, + )) } // Check for invalid count variables for _, v := range m.RawConfig.Variables { switch v.(type) { case *CountVariable: - errs = append(errs, fmt.Errorf( - "%s: count variables are only valid within resources", m.Name)) + diags = diags.Append(fmt.Errorf( + "module %q: count variables are only valid within resources", + m.Name, + )) case *SelfVariable: - errs = append(errs, fmt.Errorf( - "%s: self variables are only valid within resources", m.Name)) + diags = diags.Append(fmt.Errorf( + "module %q: self variables are only valid within resources", + m.Name, + )) } } // Update the raw configuration to only contain the string values m.RawConfig, err = NewRawConfig(raw) if err != nil { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: can't initialize configuration: %s", - m.Id(), err)) + m.Id(), err, + )) + } + + // check that all named providers actually exist + for _, p := range m.Providers { + if !providerSet[p] { + diags = diags.Append(fmt.Errorf( + "module %q: cannot pass non-existent provider %q", + m.Name, p, + )) + } } + } dupped = nil @@ -520,10 +553,10 @@ func (c *Config) Validate() error { } if _, ok := modules[mv.Name]; !ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: unknown module referenced: %s", - source, - mv.Name)) + source, mv.Name, + )) } } } @@ -536,9 +569,10 @@ func (c *Config) Validate() error { if _, ok := dupped[r.Id()]; !ok { dupped[r.Id()] = struct{}{} - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: resource repeated multiple times", - r.Id())) + r.Id(), + )) } } @@ -552,15 +586,15 @@ func (c *Config) Validate() error { for _, v := range r.RawCount.Variables { switch v.(type) { case *CountVariable: - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: resource count can't reference count variable: %s", - n, - v.FullKey())) + n, v.FullKey(), + )) case *SimpleVariable: - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: resource count can't reference variable: %s", - n, - v.FullKey())) + n, v.FullKey(), + )) // Good case *ModuleVariable: @@ -570,21 +604,24 @@ func (c *Config) Validate() error { case *LocalVariable: default: - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "Internal error. Unknown type in count var in %s: %T", - n, v)) + n, v, + )) } } if !r.RawCount.couldBeInteger() { - errs = append(errs, fmt.Errorf( - "%s: resource count must be an integer", - n)) + diags = diags.Append(fmt.Errorf( + "%s: resource count must be an integer", n, + )) } r.RawCount.init() // Validate DependsOn - errs = append(errs, c.validateDependsOn(n, r.DependsOn, resources, modules)...) + for _, err := range c.validateDependsOn(n, r.DependsOn, resources, modules) { + diags = diags.Append(err) + } // Verify provisioners for _, p := range r.Provisioners { @@ -598,9 +635,10 @@ func (c *Config) Validate() error { } if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { - errs = append(errs, fmt.Errorf( - "%s: connection info cannot contain splat variable "+ - "referencing itself", n)) + diags = diags.Append(fmt.Errorf( + "%s: connection info cannot contain splat variable referencing itself", + n, + )) break } } @@ -612,9 +650,10 @@ func (c *Config) Validate() error { } if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { - errs = append(errs, fmt.Errorf( - "%s: connection info cannot contain splat variable "+ - "referencing itself", n)) + diags = diags.Append(fmt.Errorf( + "%s: connection info cannot contain splat variable referencing itself", + n, + )) break } } @@ -622,21 +661,24 @@ func (c *Config) Validate() error { // Check for invalid when/onFailure values, though this should be // picked up by the loader we check here just in case. if p.When == ProvisionerWhenInvalid { - errs = append(errs, fmt.Errorf( - "%s: provisioner 'when' value is invalid", n)) + diags = diags.Append(fmt.Errorf( + "%s: provisioner 'when' value is invalid", n, + )) } if p.OnFailure == ProvisionerOnFailureInvalid { - errs = append(errs, fmt.Errorf( - "%s: provisioner 'on_failure' value is invalid", n)) + diags = diags.Append(fmt.Errorf( + "%s: provisioner 'on_failure' value is invalid", n, + )) } } // Verify ignore_changes contains valid entries for _, v := range r.Lifecycle.IgnoreChanges { if strings.Contains(v, "*") && v != "*" { - errs = append(errs, fmt.Errorf( - "%s: ignore_changes does not support using a partial string "+ - "together with a wildcard: %s", n, v)) + diags = diags.Append(fmt.Errorf( + "%s: ignore_changes does not support using a partial string together with a wildcard: %s", + n, v, + )) } } @@ -645,21 +687,24 @@ func (c *Config) Validate() error { "root": r.Lifecycle.IgnoreChanges, }) if err != nil { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: lifecycle ignore_changes error: %s", - n, err)) + n, err, + )) } else if len(rc.Interpolations) > 0 { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: lifecycle ignore_changes cannot contain interpolations", - n)) + n, + )) } // If it is a data source then it can't have provisioners if r.Mode == DataResourceMode { if _, ok := r.RawConfig.Raw["provisioner"]; ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: data sources cannot have provisioners", - n)) + n, + )) } } } @@ -673,11 +718,12 @@ func (c *Config) Validate() error { id := rv.ResourceId() if _, ok := resources[id]; !ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: unknown resource '%s' referenced in variable %s", source, id, - rv.FullKey())) + rv.FullKey(), + )) continue } } @@ -688,7 +734,7 @@ func (c *Config) Validate() error { found := make(map[string]struct{}) for _, l := range c.Locals { if _, ok := found[l.Name]; ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: duplicate local. local value names must be unique", l.Name, )) @@ -698,7 +744,7 @@ func (c *Config) Validate() error { for _, v := range l.RawConfig.Variables { if _, ok := v.(*CountVariable); ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "local %s: count variables are only valid within resources", l.Name, )) } @@ -712,9 +758,10 @@ func (c *Config) Validate() error { for _, o := range c.Outputs { // Verify the output is new if _, ok := found[o.Name]; ok { - errs = append(errs, fmt.Errorf( - "%s: duplicate output. output names must be unique.", - o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: an output of this name was already defined", + o.Name, + )) continue } found[o.Name] = struct{}{} @@ -734,9 +781,10 @@ func (c *Config) Validate() error { continue } - errs = append(errs, fmt.Errorf( - "%s: value for 'sensitive' must be boolean", - o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: value for 'sensitive' must be boolean", + o.Name, + )) continue } if k == "description" { @@ -745,27 +793,78 @@ func (c *Config) Validate() error { continue } - errs = append(errs, fmt.Errorf( - "%s: value for 'description' must be string", - o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: value for 'description' must be string", + o.Name, + )) continue } invalidKeys = append(invalidKeys, k) } if len(invalidKeys) > 0 { - errs = append(errs, fmt.Errorf( - "%s: output has invalid keys: %s", - o.Name, strings.Join(invalidKeys, ", "))) + diags = diags.Append(fmt.Errorf( + "output %q: invalid keys: %s", + o.Name, strings.Join(invalidKeys, ", "), + )) } if !valueKeyFound { - errs = append(errs, fmt.Errorf( - "%s: output is missing required 'value' key", o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: missing required 'value' argument", o.Name, + )) } for _, v := range o.RawConfig.Variables { if _, ok := v.(*CountVariable); ok { - errs = append(errs, fmt.Errorf( - "%s: count variables are only valid within resources", o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: count variables are only valid within resources", + o.Name, + )) + } + } + + // Detect a common mistake of using a "count"ed resource in + // an output value without using the splat or index form. + // Prior to 0.11 this error was silently ignored, but outputs + // now have their errors checked like all other contexts. + // + // TODO: Remove this in 0.12. + for _, v := range o.RawConfig.Variables { + rv, ok := v.(*ResourceVariable) + if !ok { + continue + } + + // If the variable seems to be treating the referenced + // resource as a singleton (no count specified) then + // we'll check to make sure it is indeed a singleton. + // It's a warning if not. + + if rv.Multi || rv.Index != 0 { + // This reference is treating the resource as a + // multi-resource, so the warning doesn't apply. + continue + } + + for _, r := range c.Resources { + if r.Id() != rv.ResourceId() { + continue + } + + // We test specifically for the raw string "1" here + // because we _do_ want to generate this warning if + // the user has provided an expression that happens + // to return 1 right now, to catch situations where + // a count might dynamically be set to something + // other than 1 and thus splat syntax is still needed + // to be safe. + if r.RawCount != nil && r.RawCount.Raw != nil && r.RawCount.Raw["count"] != "1" && rv.Field != "count" { + diags = diags.Append(tfdiags.SimpleWarning(fmt.Sprintf( + "output %q: must use splat syntax to access %s attribute %q, because it has \"count\" set; use %s.*.%s to obtain a list of the attributes across all instances", + o.Name, + r.Id(), rv.Field, + r.Id(), rv.Field, + ))) + } } } } @@ -781,17 +880,15 @@ func (c *Config) Validate() error { for _, v := range rc.Variables { if _, ok := v.(*SelfVariable); ok { - errs = append(errs, fmt.Errorf( - "%s: cannot contain self-reference %s", source, v.FullKey())) + diags = diags.Append(fmt.Errorf( + "%s: cannot contain self-reference %s", + source, v.FullKey(), + )) } } } - if len(errs) > 0 { - return &multierror.Error{Errors: errs} - } - - return nil + return diags } // InterpolatedVariables is a helper that returns a mapping of all the interpolated @@ -817,10 +914,6 @@ func (c *Config) rawConfigs() map[string]*RawConfig { } for _, pc := range c.ProviderConfigs { - // this was an inherited config, so we don't validate it at this level. - if pc.Inherited { - continue - } source := fmt.Sprintf("provider config '%s'", pc.Name) result[source] = pc.RawConfig } diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go index d9253a20f64..6cb9313e260 100644 --- a/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go +++ b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go @@ -2,7 +2,7 @@ package configschema -import "fmt" +import "strconv" const _NestingMode_name = "nestingModeInvalidNestingSingleNestingListNestingSetNestingMap" @@ -10,7 +10,7 @@ var _NestingMode_index = [...]uint8{0, 18, 31, 42, 52, 62} func (i NestingMode) String() string { if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { - return fmt.Sprintf("NestingMode(%d)", i) + return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" } return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go index 94894ffee47..58dee5cd913 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go @@ -4,12 +4,15 @@ import ( "bytes" "compress/gzip" "crypto/md5" + "crypto/rsa" "crypto/sha1" "crypto/sha256" "crypto/sha512" + "crypto/x509" "encoding/base64" "encoding/hex" "encoding/json" + "encoding/pem" "fmt" "io/ioutil" "math" @@ -103,6 +106,7 @@ func Funcs() map[string]ast.Function { "pow": interpolationFuncPow(), "uuid": interpolationFuncUUID(), "replace": interpolationFuncReplace(), + "rsadecrypt": interpolationFuncRsaDecrypt(), "sha1": interpolationFuncSha1(), "sha256": interpolationFuncSha256(), "sha512": interpolationFuncSha512(), @@ -112,6 +116,7 @@ func Funcs() map[string]ast.Function { "split": interpolationFuncSplit(), "substr": interpolationFuncSubstr(), "timestamp": interpolationFuncTimestamp(), + "timeadd": interpolationFuncTimeAdd(), "title": interpolationFuncTitle(), "transpose": interpolationFuncTranspose(), "trimspace": interpolationFuncTrimSpace(), @@ -1504,6 +1509,29 @@ func interpolationFuncTimestamp() ast.Function { } } +func interpolationFuncTimeAdd() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeString, // input timestamp string in RFC3339 format + ast.TypeString, // duration to add to input timestamp that should be parsable by time.ParseDuration + }, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + + ts, err := time.Parse(time.RFC3339, args[0].(string)) + if err != nil { + return nil, err + } + duration, err := time.ParseDuration(args[1].(string)) + if err != nil { + return nil, err + } + + return ts.Add(duration).Format(time.RFC3339), nil + }, + } +} + // interpolationFuncTitle implements the "title" function that returns a copy of the // string in which first characters of all the words are capitalized. func interpolationFuncTitle() ast.Function { @@ -1657,3 +1685,43 @@ func interpolationFuncAbs() ast.Function { }, } } + +// interpolationFuncRsaDecrypt implements the "rsadecrypt" function that does +// RSA decryption. +func interpolationFuncRsaDecrypt() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString, ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + key := args[1].(string) + + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", fmt.Errorf("Failed to decode input %q: cipher text must be base64-encoded", key) + } + + block, _ := pem.Decode([]byte(key)) + if block == nil { + return "", fmt.Errorf("Failed to read key %q: no key found", key) + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return "", fmt.Errorf( + "Failed to read key %q: password protected keys are\n"+ + "not supported. Please decrypt the key prior to use.", key) + } + + x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return "", err + } + + out, err := rsa.DecryptPKCS1v15(nil, x509Key, b) + if err != nil { + return "", err + } + + return string(out), nil + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/storage.go b/vendor/github.com/hashicorp/terraform/config/module/storage.go index 05065b3c605..0660812b00f 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/storage.go +++ b/vendor/github.com/hashicorp/terraform/config/module/storage.go @@ -9,6 +9,7 @@ import ( "path/filepath" getter "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform/registry" "github.com/hashicorp/terraform/registry/regsrc" "github.com/hashicorp/terraform/svchost/auth" "github.com/hashicorp/terraform/svchost/disco" @@ -73,20 +74,17 @@ type Storage struct { Ui cli.Ui // Mode is the GetMode that will be used for various operations. Mode GetMode + + registry *registry.Client } func NewStorage(dir string, services *disco.Disco, creds auth.CredentialsSource) *Storage { - s := &Storage{ - StorageDir: dir, - Services: services, - Creds: creds, - } + regClient := registry.NewClient(services, creds, nil) - // make sure this isn't nil - if s.Services == nil { - s.Services = disco.NewDisco() + return &Storage{ + StorageDir: dir, + registry: regClient, } - return s } // loadManifest returns the moduleManifest file from the parent directory. @@ -318,7 +316,7 @@ func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, e // we need to lookup available versions // Only on Get if it's not found, on unconditionally on Update if (s.Mode == GetModeGet && !found) || (s.Mode == GetModeUpdate) { - resp, err := s.lookupModuleVersions(mod) + resp, err := s.registry.Versions(mod) if err != nil { return rec, err } @@ -338,12 +336,14 @@ func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, e rec.Version = match.Version - rec.url, err = s.lookupModuleLocation(mod, rec.Version) + rec.url, err = s.registry.Location(mod, rec.Version) if err != nil { return rec, err } - s.output(fmt.Sprintf(" Found version %s of %s on %s", rec.Version, mod.Module(), mod.RawHost.Display())) + // we've already validated this by now + host, _ := mod.SvcHost() + s.output(fmt.Sprintf(" Found version %s of %s on %s", rec.Version, mod.Module(), host.ForDisplay())) } return rec, nil diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree.go b/vendor/github.com/hashicorp/terraform/config/module/tree.go index c649a466300..f56d69b70de 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/tree.go +++ b/vendor/github.com/hashicorp/terraform/config/module/tree.go @@ -9,6 +9,8 @@ import ( "strings" "sync" + "github.com/hashicorp/terraform/tfdiags" + getter "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config" ) @@ -188,11 +190,6 @@ func (t *Tree) Load(s *Storage) error { // Set our tree up t.children = children - // if we're the root module, we can now set the provider inheritance - if len(t.path) == 0 { - t.inheritProviderConfigs(nil) - } - return nil } @@ -348,93 +345,6 @@ func (t *Tree) getChildren(s *Storage) (map[string]*Tree, error) { return children, nil } -// inheritProviderConfig resolves all provider config inheritance after the -// tree is loaded. -// -// If there is a provider block without a config, look in the parent's Module -// block for a provider, and fetch that provider's configuration. If that -// doesn't exist, assume a default empty config. Implicit providers can still -// inherit their config all the way up from the root, so walk up the tree and -// copy the first matching provider into the module. -func (t *Tree) inheritProviderConfigs(stack []*Tree) { - // the recursive calls only append, so we don't need to worry about copying - // this slice. - stack = append(stack, t) - for _, c := range t.children { - c.inheritProviderConfigs(stack) - } - - if len(stack) == 1 { - return - } - - providers := make(map[string]*config.ProviderConfig) - missingProviders := make(map[string]bool) - - for _, p := range t.config.ProviderConfigs { - providers[p.FullName()] = p - } - - for _, r := range t.config.Resources { - p := r.ProviderFullName() - if _, ok := providers[p]; !(ok || strings.Contains(p, ".")) { - missingProviders[p] = true - } - } - - // get our parent's module config block - parent := stack[len(stack)-2] - var parentModule *config.Module - for _, m := range parent.config.Modules { - if m.Name == t.name { - parentModule = m - break - } - } - - if parentModule == nil { - panic("can't be a module without a parent module config") - } - - // now look for providers that need a config - for p, pc := range providers { - if len(pc.RawConfig.RawMap()) > 0 { - log.Printf("[TRACE] provider %q has a config, continuing", p) - continue - } - - // this provider has no config yet, check for one being passed in - parentProviderName, ok := parentModule.Providers[p] - if !ok { - continue - } - - var parentProvider *config.ProviderConfig - // there's a config for us in the parent module - for _, pp := range parent.config.ProviderConfigs { - if pp.FullName() == parentProviderName { - parentProvider = pp - break - } - } - - if parentProvider == nil { - // no config found, assume defaults - continue - } - - // Copy it in, but set an interpolation Scope. - // An interpolation Scope always need to have "root" - pc.Path = append([]string{RootName}, parent.path...) - pc.RawConfig = parentProvider.RawConfig - log.Printf("[TRACE] provider %q inheriting config from %q", - strings.Join(append(t.Path(), pc.FullName()), "."), - strings.Join(append(parent.Path(), parentProvider.FullName()), "."), - ) - } - -} - // Path is the full path to this tree. func (t *Tree) Path() []string { return t.path @@ -475,32 +385,35 @@ func (t *Tree) String() string { // as verifying things such as parameters/outputs between the various modules. // // Load must be called prior to calling Validate or an error will be returned. -func (t *Tree) Validate() error { +func (t *Tree) Validate() tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if !t.Loaded() { - return fmt.Errorf("tree must be loaded before calling Validate") + diags = diags.Append(fmt.Errorf( + "tree must be loaded before calling Validate", + )) + return diags } - // If something goes wrong, here is our error template - newErr := &treeError{Name: []string{t.Name()}} - // Terraform core does not handle root module children named "root". // We plan to fix this in the future but this bug was brought up in // the middle of a release and we don't want to introduce wide-sweeping // changes at that time. if len(t.path) == 1 && t.name == "root" { - return fmt.Errorf("root module cannot contain module named 'root'") + diags = diags.Append(fmt.Errorf( + "root module cannot contain module named 'root'", + )) + return diags } // Validate our configuration first. - if err := t.config.Validate(); err != nil { - newErr.Add(err) - } + diags = diags.Append(t.config.Validate()) // If we're the root, we do extra validation. This validation usually // requires the entire tree (since children don't have parent pointers). if len(t.path) == 0 { if err := t.validateProviderAlias(); err != nil { - newErr.Add(err) + diags = diags.Append(err) } } @@ -509,20 +422,11 @@ func (t *Tree) Validate() error { // Validate all our children for _, c := range children { - err := c.Validate() - if err == nil { + childDiags := c.Validate() + diags = diags.Append(childDiags) + if diags.HasErrors() { continue } - - verr, ok := err.(*treeError) - if !ok { - // Unknown error, just return... - return err - } - - // Append ourselves to the error and then return - verr.Name = append(verr.Name, t.Name()) - newErr.AddChild(verr) } // Go over all the modules and verify that any parameters are valid @@ -548,9 +452,10 @@ func (t *Tree) Validate() error { // Compare to the keys in our raw config for the module for k, _ := range m.RawConfig.Raw { if _, ok := varMap[k]; !ok { - newErr.Add(fmt.Errorf( - "module %s: %s is not a valid parameter", - m.Name, k)) + diags = diags.Append(fmt.Errorf( + "module %q: %q is not a valid argument", + m.Name, k, + )) } // Remove the required @@ -559,9 +464,10 @@ func (t *Tree) Validate() error { // If we have any required left over, they aren't set. for k, _ := range requiredMap { - newErr.Add(fmt.Errorf( - "module %s: required variable %q not set", - m.Name, k)) + diags = diags.Append(fmt.Errorf( + "module %q: missing required argument %q", + m.Name, k, + )) } } @@ -576,9 +482,10 @@ func (t *Tree) Validate() error { tree, ok := children[mv.Name] if !ok { - newErr.Add(fmt.Errorf( - "%s: undefined module referenced %s", - source, mv.Name)) + diags = diags.Append(fmt.Errorf( + "%s: reference to undefined module %q", + source, mv.Name, + )) continue } @@ -590,14 +497,15 @@ func (t *Tree) Validate() error { } } if !found { - newErr.Add(fmt.Errorf( - "%s: %s is not a valid output for module %s", - source, mv.Field, mv.Name)) + diags = diags.Append(fmt.Errorf( + "%s: %q is not a valid output for module %q", + source, mv.Field, mv.Name, + )) } } } - return newErr.ErrOrNil() + return diags } // versionedPathKey returns a path string with every levels full name, version diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go index ea68b4fcdb2..8a55e0603e9 100644 --- a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go +++ b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go @@ -2,7 +2,7 @@ package config -import "fmt" +import "strconv" const _ResourceMode_name = "ManagedResourceModeDataResourceMode" @@ -10,7 +10,7 @@ var _ResourceMode_index = [...]uint8{0, 19, 35} func (i ResourceMode) String() string { if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { - return fmt.Sprintf("ResourceMode(%d)", i) + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" } return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go index 37c586a11a9..c34e21b25c5 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/state.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/state.go @@ -46,7 +46,7 @@ type StateChangeConf struct { // If the Timeout is exceeded before reaching the Target state, return an // error. // -// Otherwise, result the result of the first call to the Refresh function to +// Otherwise, the result is the result of the first call to the Refresh function to // reach the target state. func (conf *StateChangeConf) WaitForState() (interface{}, error) { log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go index 05e7c0feb65..aaaddf5cd77 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go @@ -15,6 +15,7 @@ import ( "testing" "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/errwrap" "github.com/hashicorp/go-multierror" "github.com/hashicorp/logutils" "github.com/hashicorp/terraform/config/module" @@ -487,6 +488,15 @@ func Test(t TestT, c TestCase) { } } + // If we expected an error, but did not get one, fail + if err == nil && step.ExpectError != nil { + errored = true + t.Error(fmt.Sprintf( + "Step %d, no error received, but expected a match to:\n\n%s\n\n", + i, step.ExpectError)) + break + } + // If there was an error, exit if err != nil { // Perhaps we expected an error? Check if it matches @@ -662,18 +672,12 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r if err != nil { return err } - if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { - if len(es) > 0 { - estrs := make([]string, len(es)) - for i, e := range es { - estrs[i] = e.Error() - } - return fmt.Errorf( - "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v", - ws, estrs) + if diags := ctx.Validate(); len(diags) > 0 { + if diags.HasErrors() { + return errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) } - log.Printf("[WARN] Config warnings: %#v", ws) + log.Printf("[WARN] Config warnings:\n%s", diags.Err().Error()) } // Refresh! @@ -841,12 +845,29 @@ func TestCheckResourceAttrSet(name, key string) TestCheckFunc { return err } - if val, ok := is.Attributes[key]; ok && val != "" { - return nil + return testCheckResourceAttrSet(is, name, key) + } +} + +// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with +// support for non-root modules +func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mp, name) + if err != nil { + return err } + return testCheckResourceAttrSet(is, name, key) + } +} + +func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { + if val, ok := is.Attributes[key]; !ok || val == "" { return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) } + + return nil } // TestCheckResourceAttr is a TestCheckFunc which validates @@ -858,23 +879,39 @@ func TestCheckResourceAttr(name, key, value string) TestCheckFunc { return err } - if v, ok := is.Attributes[key]; !ok || v != value { - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", name, key) - } + return testCheckResourceAttr(is, name, key, value) + } +} - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - name, - key, - value, - v) +// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with +// support for non-root modules +func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mp, name) + if err != nil { + return err } - return nil + return testCheckResourceAttr(is, name, key, value) } } +func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { + if v, ok := is.Attributes[key]; !ok || v != value { + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", name, key) + } + + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + name, + key, + value, + v) + } + return nil +} + // TestCheckNoResourceAttr is a TestCheckFunc which ensures that // NO value exists in state for the given name/key combination. func TestCheckNoResourceAttr(name, key string) TestCheckFunc { @@ -884,12 +921,29 @@ func TestCheckNoResourceAttr(name, key string) TestCheckFunc { return err } - if _, ok := is.Attributes[key]; ok { - return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) + return testCheckNoResourceAttr(is, name, key) + } +} + +// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with +// support for non-root modules +func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mp, name) + if err != nil { + return err } - return nil + return testCheckNoResourceAttr(is, name, key) + } +} + +func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { + if _, ok := is.Attributes[key]; ok { + return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) } + + return nil } // TestMatchResourceAttr is a TestCheckFunc which checks that the value @@ -901,19 +955,36 @@ func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { return err } - if !r.MatchString(is.Attributes[key]) { - return fmt.Errorf( - "%s: Attribute '%s' didn't match %q, got %#v", - name, - key, - r.String(), - is.Attributes[key]) + return testMatchResourceAttr(is, name, key, r) + } +} + +// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with +// support for non-root modules +func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mp, name) + if err != nil { + return err } - return nil + return testMatchResourceAttr(is, name, key, r) } } +func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error { + if !r.MatchString(is.Attributes[key]) { + return fmt.Errorf( + "%s: Attribute '%s' didn't match %q, got %#v", + name, + key, + r.String(), + is.Attributes[key]) + } + + return nil +} + // TestCheckResourceAttrPtr is like TestCheckResourceAttr except the // value is a pointer so that it can be updated while the test is running. // It will only be dereferenced at the point this step is run. @@ -923,6 +994,14 @@ func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckF } } +// TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with +// support for non-root modules +func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckModuleResourceAttr(mp, name, key, *value)(s) + } +} + // TestCheckResourceAttrPair is a TestCheckFunc which validates that the values // in state for a pair of name/key combinations are equal. func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { @@ -931,31 +1010,55 @@ func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string if err != nil { return err } - vFirst, ok := isFirst.Attributes[keyFirst] - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst) - } isSecond, err := primaryInstanceState(s, nameSecond) if err != nil { return err } - vSecond, ok := isSecond.Attributes[keySecond] - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond) + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + } +} + +// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with +// support for non-root modules +func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { + return func(s *terraform.State) error { + isFirst, err := modulePathPrimaryInstanceState(s, mpFirst, nameFirst) + if err != nil { + return err } - if vFirst != vSecond { - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - nameFirst, - keyFirst, - vSecond, - vFirst) + isSecond, err := modulePathPrimaryInstanceState(s, mpSecond, nameSecond) + if err != nil { + return err } - return nil + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + } +} + +func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { + vFirst, ok := isFirst.Attributes[keyFirst] + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst) + } + + vSecond, ok := isSecond.Attributes[keySecond] + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond) + } + + if vFirst != vSecond { + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + nameFirst, + keyFirst, + vSecond, + vFirst) } + + return nil } // TestCheckOutput checks an output in the Terraform configuration @@ -1012,18 +1115,32 @@ type TestT interface { // This is set to true by unit tests to alter some behavior var testTesting = false -// primaryInstanceState returns the primary instance state for the given resource name. -func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { - ms := s.RootModule() +// modulePrimaryInstanceState returns the instance state for the given resource +// name in a ModuleState +func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { rs, ok := ms.Resources[name] if !ok { - return nil, fmt.Errorf("Not found: %s", name) + return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) } is := rs.Primary if is == nil { - return nil, fmt.Errorf("No primary instance: %s", name) + return nil, fmt.Errorf("No primary instance: %s in %s", name, ms.Path) } return is, nil } + +// modulePathPrimaryInstanceState returns the primary instance state for the +// given resource name in a given module path. +func modulePathPrimaryInstanceState(s *terraform.State, mp []string, name string) (*terraform.InstanceState, error) { + ms := s.ModuleByPath(mp) + return modulePrimaryInstanceState(s, ms, name) +} + +// primaryInstanceState returns the primary instance state for the given +// resource name in the root module. +func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { + ms := s.RootModule() + return modulePrimaryInstanceState(s, ms, name) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go index 537a11c34ae..300a9ea6eec 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go @@ -5,6 +5,7 @@ import ( "log" "strings" + "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform/terraform" ) @@ -33,17 +34,12 @@ func testStep( if err != nil { return state, fmt.Errorf("Error initializing context: %s", err) } - if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { - if len(es) > 0 { - estrs := make([]string, len(es)) - for i, e := range es { - estrs[i] = e.Error() - } - return state, fmt.Errorf( - "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v", - ws, estrs) + if diags := ctx.Validate(); len(diags) > 0 { + if diags.HasErrors() { + return nil, errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) } - log.Printf("[WARN] Config warnings: %#v", ws) + + log.Printf("[WARN] Config warnings:\n%s", diags) } // Refresh! diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go index 689ed8d1cdf..814c7ba8e3f 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go @@ -39,6 +39,19 @@ func (w *MapFieldWriter) unsafeWriteField(addr string, value string) { w.result[addr] = value } +// clearTree clears a field and any sub-fields of the given address out of the +// map. This should be used to reset some kind of complex structures (namely +// sets) before writing to make sure that any conflicting data is removed (for +// example, if the set was previously written to the writer's layer). +func (w *MapFieldWriter) clearTree(addr []string) { + prefix := strings.Join(addr, ".") + "." + for k := range w.result { + if strings.HasPrefix(k, prefix) { + delete(w.result, k) + } + } +} + func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error { w.lock.Lock() defer w.lock.Unlock() @@ -115,6 +128,14 @@ func (w *MapFieldWriter) setList( return fmt.Errorf("%s: %s", k, err) } + // Wipe the set from the current writer prior to writing if it exists. + // Multiple writes to the same layer is a lot safer for lists than sets due + // to the fact that indexes are always deterministic and the length will + // always be updated with the current length on the last write, but making + // sure we have a clean namespace removes any chance for edge cases to pop up + // and ensures that the last write to the set is the correct value. + w.clearTree(addr) + // Set the entire list. var err error for i, elem := range vs { @@ -162,6 +183,10 @@ func (w *MapFieldWriter) setMap( vs[mk.String()] = mv.Interface() } + // Wipe this address tree. The contents of the map should always reflect the + // last write made to it. + w.clearTree(addr) + // Remove the pure key since we're setting the full map value delete(w.result, k) @@ -308,6 +333,13 @@ func (w *MapFieldWriter) setSet( value = s } + // Clear any keys that match the set address first. This is necessary because + // it's always possible and sometimes may be necessary to write to a certain + // writer layer more than once with different set data each time, which will + // lead to different keys being inserted, which can lead to determinism + // problems when the old data isn't wiped first. + w.clearTree(addr) + for code, elem := range value.(*Set).m { if err := w.set(append(addrCopy, code), elem); err != nil { return err diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go index 3a976293941..38cd8c70d03 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go @@ -2,7 +2,7 @@ package schema -import "fmt" +import "strconv" const ( _getSource_name_0 = "getSourceStategetSourceConfig" @@ -13,8 +13,6 @@ const ( var ( _getSource_index_0 = [...]uint8{0, 14, 29} - _getSource_index_1 = [...]uint8{0, 13} - _getSource_index_2 = [...]uint8{0, 12} _getSource_index_3 = [...]uint8{0, 18, 32} ) @@ -31,6 +29,6 @@ func (i getSource) String() string { i -= 15 return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]] default: - return fmt.Sprintf("getSource(%d)", i) + return "getSource(" + strconv.FormatInt(int64(i), 10) + ")" } } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go index 15aa0b5d49c..9ab8bccaa5b 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go @@ -35,6 +35,8 @@ type ResourceData struct { partialMap map[string]struct{} once sync.Once isNew bool + + panicOnError bool } // getResult is the internal structure that is generated when a Get @@ -184,7 +186,11 @@ func (d *ResourceData) Set(key string, value interface{}) error { } } - return d.setWriter.WriteField(strings.Split(key, "."), value) + err := d.setWriter.WriteField(strings.Split(key, "."), value) + if err != nil && d.panicOnError { + panic(err) + } + return err } // SetPartial adds the key to the final state output while @@ -439,7 +445,7 @@ func (d *ResourceData) init() { } func (d *ResourceData) diffChange( - k string) (interface{}, interface{}, bool, bool) { + k string) (interface{}, interface{}, bool, bool, bool) { // Get the change between the state and the config. o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) if !o.Exists { @@ -450,7 +456,7 @@ func (d *ResourceData) diffChange( } // Return the old, new, and whether there is a change - return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed + return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false } func (d *ResourceData) getChange( diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go index 4fc1dbb685a..822d0dc4db0 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go @@ -236,8 +236,8 @@ func (d *ResourceDiff) clear(key string) error { // diffChange helps to implement resourceDiffer and derives its change values // from ResourceDiff's own change data, in addition to existing diff, config, and state. -func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool) { - old, new := d.getChange(key) +func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) { + old, new, customized := d.getChange(key) if !old.Exists { old.Value = nil @@ -246,7 +246,7 @@ func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, b new.Value = nil } - return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed + return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed, customized } // SetNew is used to set a new diff value for the mentioned key. The value must @@ -327,7 +327,7 @@ func (d *ResourceDiff) Get(key string) interface{} { // results from the exact levels for the new diff, then from state and diff as // per normal. func (d *ResourceDiff) GetChange(key string) (interface{}, interface{}) { - old, new := d.getChange(key) + old, new, _ := d.getChange(key) return old.Value, new.Value } @@ -387,18 +387,17 @@ func (d *ResourceDiff) Id() string { // This implementation differs from ResourceData's in the way that we first get // results from the exact levels for the new diff, then from state and diff as // per normal. -func (d *ResourceDiff) getChange(key string) (getResult, getResult) { +func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) { old := d.get(strings.Split(key, "."), "state") var new getResult for p := range d.updatedKeys { if childAddrOf(key, p) { new = d.getExact(strings.Split(key, "."), "newDiff") - goto done + return old, new, true } } new = d.get(strings.Split(key, "."), "newDiff") -done: - return old, new + return old, new, false } // get performs the appropriate multi-level reader logic for ResourceDiff, diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go index 903d71513f0..d9a7aa1a0c3 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go @@ -25,6 +25,9 @@ import ( "github.com/mitchellh/mapstructure" ) +// Name of ENV variable which (if not empty) prefers panic over error +const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR" + // type used for schema package context keys type contextKey string @@ -293,8 +296,7 @@ func (s *Schema) ZeroValue() interface{} { } } -func (s *Schema) finalizeDiff( - d *terraform.ResourceAttrDiff) *terraform.ResourceAttrDiff { +func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff { if d == nil { return d } @@ -334,14 +336,21 @@ func (s *Schema) finalizeDiff( return d } - if s.Computed && !d.NewComputed { - if d.Old != "" && d.New == "" { - // This is a computed value with an old value set already, - // just let it go. - return nil + if s.Computed { + // FIXME: This is where the customized bool from getChange finally + // comes into play. It allows the previously incorrect behavior + // of an empty string being used as "unset" when the value is + // computed. This should be removed once we can properly + // represent an unset/nil value from the configuration. + if !customized { + if d.Old != "" && d.New == "" { + // This is a computed value with an old value set already, + // just let it go. + return nil + } } - if d.New == "" { + if d.New == "" && !d.NewComputed { // Computed attribute without a new value set d.NewComputed = true } @@ -358,6 +367,13 @@ func (s *Schema) finalizeDiff( // schemaMap is a wrapper that adds nice functions on top of schemas. type schemaMap map[string]*Schema +func (m schemaMap) panicOnError() bool { + if os.Getenv(PanicOnErr) != "" { + return true + } + return false +} + // Data returns a ResourceData for the given schema, state, and diff. // // The diff is optional. @@ -365,9 +381,10 @@ func (m schemaMap) Data( s *terraform.InstanceState, d *terraform.InstanceDiff) (*ResourceData, error) { return &ResourceData{ - schema: m, - state: s, - diff: d, + schema: m, + state: s, + diff: d, + panicOnError: m.panicOnError(), }, nil } @@ -397,9 +414,10 @@ func (m schemaMap) Diff( } d := &ResourceData{ - schema: m, - state: s, - config: c, + schema: m, + state: s, + config: c, + panicOnError: m.panicOnError(), } for k, schema := range m { @@ -732,7 +750,7 @@ func isValidFieldName(name string) bool { // This helps facilitate diff logic for both ResourceData and ResoureDiff with // minimal divergence in code. type resourceDiffer interface { - diffChange(string) (interface{}, interface{}, bool, bool) + diffChange(string) (interface{}, interface{}, bool, bool, bool) Get(string) interface{} GetChange(string) (interface{}, interface{}) GetOk(string) (interface{}, bool) @@ -785,7 +803,7 @@ func (m schemaMap) diffList( diff *terraform.InstanceDiff, d resourceDiffer, all bool) error { - o, n, _, computedList := d.diffChange(k) + o, n, _, computedList, customized := d.diffChange(k) if computedList { n = nil } @@ -852,10 +870,13 @@ func (m schemaMap) diffList( oldStr = "" } - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }) + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) } // Figure out the maximum @@ -908,7 +929,7 @@ func (m schemaMap) diffMap( // First get all the values from the state var stateMap, configMap map[string]string - o, n, _, nComputed := d.diffChange(k) + o, n, _, nComputed, customized := d.diffChange(k) if err := mapstructure.WeakDecode(o, &stateMap); err != nil { return fmt.Errorf("%s: %s", k, err) } @@ -960,6 +981,7 @@ func (m schemaMap) diffMap( Old: oldStr, New: newStr, }, + customized, ) } @@ -977,16 +999,22 @@ func (m schemaMap) diffMap( continue } - diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: old, - New: v, - }) + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: old, + New: v, + }, + customized, + ) } for k, v := range stateMap { - diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: v, - NewRemoved: true, - }) + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: v, + NewRemoved: true, + }, + customized, + ) } return nil @@ -999,7 +1027,7 @@ func (m schemaMap) diffSet( d resourceDiffer, all bool) error { - o, n, _, computedSet := d.diffChange(k) + o, n, _, computedSet, customized := d.diffChange(k) if computedSet { n = nil } @@ -1058,20 +1086,26 @@ func (m schemaMap) diffSet( countStr = "" } - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: countStr, - NewComputed: true, - }) + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: countStr, + NewComputed: true, + }, + customized, + ) return nil } // If the counts are not the same, then record that diff changed := oldLen != newLen if changed || all { - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }) + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) } // Build the list of codes that will make up our set. This is the @@ -1121,7 +1155,7 @@ func (m schemaMap) diffString( all bool) error { var originalN interface{} var os, ns string - o, n, _, computed := d.diffChange(k) + o, n, _, computed, customized := d.diffChange(k) if schema.StateFunc != nil && n != nil { originalN = n n = schema.StateFunc(n) @@ -1158,13 +1192,16 @@ func (m schemaMap) diffString( return nil } - diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: os, - New: ns, - NewExtra: originalN, - NewRemoved: removed, - NewComputed: computed, - }) + diff.Attributes[k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: os, + New: ns, + NewExtra: originalN, + NewRemoved: removed, + NewComputed: computed, + }, + customized, + ) return nil } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go index 1610cec2d32..3bc3ac455ea 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go @@ -2,7 +2,7 @@ package schema -import "fmt" +import "strconv" const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" @@ -10,7 +10,7 @@ var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} func (i ValueType) String() string { if i < 0 || i >= ValueType(len(_ValueType_index)-1) { - return fmt.Sprintf("ValueType(%d)", i) + return "ValueType(" + strconv.FormatInt(int64(i), 10) + ")" } return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/helper/validation/validation.go b/vendor/github.com/hashicorp/terraform/helper/validation/validation.go index 2ac1028cc10..b9291631573 100644 --- a/vendor/github.com/hashicorp/terraform/helper/validation/validation.go +++ b/vendor/github.com/hashicorp/terraform/helper/validation/validation.go @@ -106,6 +106,27 @@ func StringLenBetween(min, max int) schema.SchemaValidateFunc { } } +// StringMatch returns a SchemaValidateFunc which tests if the provided value +// matches a given regexp. Optionally an error message can be provided to +// return something friendlier than "must match some globby regexp". +func StringMatch(r *regexp.Regexp, message string) schema.SchemaValidateFunc { + return func(i interface{}, k string) ([]string, []error) { + v, ok := i.(string) + if !ok { + return nil, []error{fmt.Errorf("expected type of %s to be string", k)} + } + + if ok := r.MatchString(v); !ok { + if message != "" { + return nil, []error{fmt.Errorf("invalid value for %s (%s)", k, message)} + + } + return nil, []error{fmt.Errorf("expected value of %s to match regular expression %q", k, r)} + } + return nil, nil + } +} + // NoZeroValues is a SchemaValidateFunc which tests if the provided value is // not a zero value. It's useful in situations where you want to catch // explicit zero values on things like required fields during validation. diff --git a/vendor/github.com/hashicorp/terraform/main.go b/vendor/github.com/hashicorp/terraform/main.go index 77f441d20ab..1818a91c44b 100644 --- a/vendor/github.com/hashicorp/terraform/main.go +++ b/vendor/github.com/hashicorp/terraform/main.go @@ -141,7 +141,6 @@ func wrappedMain() int { // We continue to run anyway, since Terraform has reasonable defaults. } } - log.Printf("[DEBUG] CLI config is %#v", config) // In tests, Commands may already be set to provide mock commands if Commands == nil { diff --git a/vendor/github.com/hashicorp/terraform/config/module/registry.go b/vendor/github.com/hashicorp/terraform/registry/client.go similarity index 64% rename from vendor/github.com/hashicorp/terraform/config/module/registry.go rename to vendor/github.com/hashicorp/terraform/registry/client.go index 10209c4bfa9..b4cd7989f32 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/registry.go +++ b/vendor/github.com/hashicorp/terraform/registry/client.go @@ -1,4 +1,4 @@ -package module +package registry import ( "encoding/json" @@ -12,76 +12,77 @@ import ( "time" cleanhttp "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/terraform/registry/regsrc" "github.com/hashicorp/terraform/registry/response" "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/auth" + "github.com/hashicorp/terraform/svchost/disco" "github.com/hashicorp/terraform/version" ) const ( - defaultRegistry = "registry.terraform.io" - registryServiceID = "registry.v1" xTerraformGet = "X-Terraform-Get" xTerraformVersion = "X-Terraform-Version" requestTimeout = 10 * time.Second serviceID = "modules.v1" ) -var ( - httpClient *http.Client - tfVersion = version.String() -) +var tfVersion = version.String() -func init() { - httpClient = cleanhttp.DefaultPooledClient() - httpClient.Timeout = requestTimeout -} +// Client provides methods to query Terraform Registries. +type Client struct { + // this is the client to be used for all requests. + client *http.Client -type errModuleNotFound string + // services is a required *disco.Disco, which may have services and + // credentials pre-loaded. + services *disco.Disco -func (e errModuleNotFound) Error() string { - return `module "` + string(e) + `" not found` + // Creds optionally provides credentials for communicating with service + // providers. + creds auth.CredentialsSource } -func (s *Storage) discoverRegURL(module *regsrc.Module) *url.URL { - regURL := s.Services.DiscoverServiceURL(svchost.Hostname(module.RawHost.Normalized()), serviceID) - if regURL == nil { - return nil - } - - if !strings.HasSuffix(regURL.Path, "/") { - regURL.Path += "/" +func NewClient(services *disco.Disco, creds auth.CredentialsSource, client *http.Client) *Client { + if services == nil { + services = disco.NewDisco() } - return regURL -} + services.SetCredentialsSource(creds) -func (s *Storage) addRequestCreds(host svchost.Hostname, req *http.Request) { - if s.Creds == nil { - return + if client == nil { + client = cleanhttp.DefaultPooledClient() + client.Timeout = requestTimeout } - creds, err := s.Creds.ForHost(host) - if err != nil { - log.Printf("[WARNING] Failed to get credentials for %s: %s (ignoring)", host, err) - return + services.Transport = client.Transport.(*http.Transport) + + return &Client{ + client: client, + services: services, + creds: creds, } +} - if creds != nil { - creds.PrepareRequest(req) +// Discover qeuries the host, and returns the url for the registry. +func (c *Client) Discover(host svchost.Hostname) *url.URL { + service := c.services.DiscoverServiceURL(host, serviceID) + if !strings.HasSuffix(service.Path, "/") { + service.Path += "/" } + return service } -// Lookup module versions in the registry. -func (s *Storage) lookupModuleVersions(module *regsrc.Module) (*response.ModuleVersions, error) { - if module.RawHost == nil { - module.RawHost = regsrc.NewFriendlyHost(defaultRegistry) +// Versions queries the registry for a module, and returns the available versions. +func (c *Client) Versions(module *regsrc.Module) (*response.ModuleVersions, error) { + host, err := module.SvcHost() + if err != nil { + return nil, err } - service := s.discoverRegURL(module) + service := c.Discover(host) if service == nil { - return nil, fmt.Errorf("host %s does not provide Terraform modules", module.RawHost.Display()) + return nil, fmt.Errorf("host %s does not provide Terraform modules", host) } p, err := url.Parse(path.Join(module.Module(), "versions")) @@ -98,10 +99,10 @@ func (s *Storage) lookupModuleVersions(module *regsrc.Module) (*response.ModuleV return nil, err } - s.addRequestCreds(svchost.Hostname(module.RawHost.Normalized()), req) + c.addRequestCreds(host, req) req.Header.Set(xTerraformVersion, tfVersion) - resp, err := httpClient.Do(req) + resp, err := c.client.Do(req) if err != nil { return nil, err } @@ -111,7 +112,7 @@ func (s *Storage) lookupModuleVersions(module *regsrc.Module) (*response.ModuleV case http.StatusOK: // OK case http.StatusNotFound: - return nil, errModuleNotFound(module.String()) + return nil, fmt.Errorf("module %q not found", module.String()) default: return nil, fmt.Errorf("error looking up module versions: %s", resp.Status) } @@ -132,19 +133,36 @@ func (s *Storage) lookupModuleVersions(module *regsrc.Module) (*response.ModuleV return &versions, nil } -// lookup the location of a specific module version in the registry -func (s *Storage) lookupModuleLocation(module *regsrc.Module, version string) (string, error) { - if module.RawHost == nil { - module.RawHost = regsrc.NewFriendlyHost(defaultRegistry) +func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { + if c.creds == nil { + return + } + + creds, err := c.creds.ForHost(host) + if err != nil { + log.Printf("[WARNING] Failed to get credentials for %s: %s (ignoring)", host, err) + return + } + + if creds != nil { + creds.PrepareRequest(req) + } +} + +// Location find the download location for a specific version module. +// This returns a string, because the final location may contain special go-getter syntax. +func (c *Client) Location(module *regsrc.Module, version string) (string, error) { + host, err := module.SvcHost() + if err != nil { + return "", err } - service := s.discoverRegURL(module) + service := c.Discover(host) if service == nil { - return "", fmt.Errorf("host %s does not provide Terraform modules", module.RawHost.Display()) + return "", fmt.Errorf("host %s does not provide Terraform modules", host.ForDisplay()) } var p *url.URL - var err error if version == "" { p, err = url.Parse(path.Join(module.Module(), "download")) } else { @@ -162,10 +180,10 @@ func (s *Storage) lookupModuleLocation(module *regsrc.Module, version string) (s return "", err } - s.addRequestCreds(svchost.Hostname(module.RawHost.Normalized()), req) + c.addRequestCreds(host, req) req.Header.Set(xTerraformVersion, tfVersion) - resp, err := httpClient.Do(req) + resp, err := c.client.Do(req) if err != nil { return "", err } diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go index 648e2a1936b..14b4dce9ce0 100644 --- a/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go +++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go @@ -101,20 +101,16 @@ func (h *FriendlyHost) Valid() bool { // Display returns the host formatted for display to the user in CLI or web // output. func (h *FriendlyHost) Display() string { - hostname, err := svchost.ForComparison(h.Raw) - if err != nil { - return InvalidHostString - } - return hostname.ForDisplay() + return svchost.ForDisplay(h.Raw) } // Normalized returns the host formatted for internal reference or comparison. func (h *FriendlyHost) Normalized() string { - hostname, err := svchost.ForComparison(h.Raw) + host, err := svchost.ForComparison(h.Raw) if err != nil { return InvalidHostString } - return hostname.String() + return string(host) } // String returns the host formatted as the user originally typed it assuming it @@ -124,19 +120,21 @@ func (h *FriendlyHost) String() string { } // Equal compares the FriendlyHost against another instance taking normalization -// into account. +// into account. Invalid hosts cannot be compared and will always return false. func (h *FriendlyHost) Equal(other *FriendlyHost) bool { if other == nil { return false } - return h.Normalized() == other.Normalized() -} -func containsPuny(host string) bool { - for _, lbl := range strings.Split(host, ".") { - if strings.HasPrefix(strings.ToLower(lbl), "xn--") { - return true - } + otherHost, err := svchost.ForComparison(other.Raw) + if err != nil { + return false + } + + host, err := svchost.ForComparison(h.Raw) + if err != nil { + return false } - return false + + return otherHost == host } diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go index b6671c8a4a4..325706ec2e0 100644 --- a/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go +++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go @@ -5,6 +5,8 @@ import ( "fmt" "regexp" "strings" + + "github.com/hashicorp/terraform/svchost" ) var ( @@ -33,8 +35,16 @@ var ( fmt.Sprintf("^(%s)\\/(%s)\\/(%s)(?:\\/\\/(.*))?$", nameSubRe, nameSubRe, providerSubRe)) - // disallowed is a set of hostnames that have special usage in modules and - // can't be registry hosts + // NameRe is a regular expression defining the format allowed for namespace + // or name fields in module registry implementations. + NameRe = regexp.MustCompile("^" + nameSubRe + "$") + + // ProviderRe is a regular expression defining the format allowed for + // provider fields in module registry implementations. + ProviderRe = regexp.MustCompile("^" + providerSubRe + "$") + + // these hostnames are not allowed as registry sources, because they are + // already special case module sources in terraform. disallowed = map[string]bool{ "github.com": true, "bitbucket.org": true, @@ -59,7 +69,7 @@ type Module struct { // NewModule construct a new module source from separate parts. Pass empty // string if host or submodule are not needed. -func NewModule(host, namespace, name, provider, submodule string) *Module { +func NewModule(host, namespace, name, provider, submodule string) (*Module, error) { m := &Module{ RawNamespace: namespace, RawName: name, @@ -67,9 +77,16 @@ func NewModule(host, namespace, name, provider, submodule string) *Module { RawSubmodule: submodule, } if host != "" { - m.RawHost = NewFriendlyHost(host) + h := NewFriendlyHost(host) + if h != nil { + fmt.Println("HOST:", h) + if !h.Valid() || disallowed[h.Display()] { + return nil, ErrInvalidModuleSource + } + } + m.RawHost = h } - return m + return m, nil } // ParseModuleSource attempts to parse source as a Terraform registry module @@ -132,12 +149,6 @@ func (m *Module) String() string { return m.formatWithPrefix(hostPrefix, true) } -// Module returns just the registry ID of the module, without a hostname or -// suffix. -func (m *Module) Module() string { - return fmt.Sprintf("%s/%s/%s", m.RawNamespace, m.RawName, m.RawProvider) -} - // Equal compares the module source against another instance taking // normalization into account. func (m *Module) Equal(other *Module) bool { @@ -175,3 +186,20 @@ func (m *Module) formatWithPrefix(hostPrefix string, preserveCase bool) string { } return str } + +// Module returns just the registry ID of the module, without a hostname or +// suffix. +func (m *Module) Module() string { + return fmt.Sprintf("%s/%s/%s", m.RawNamespace, m.RawName, m.RawProvider) +} + +// SvcHost returns the svchost.Hostname for this module. Since FriendlyHost may +// contain an invalid hostname, this also returns an error indicating if it +// could be converted to a svchost.Hostname. If no host is specified, the +// default PublicRegistryHost is returned. +func (m *Module) SvcHost() (svchost.Hostname, error) { + if m.RawHost == nil { + return svchost.ForComparison(PublicRegistryHost.Raw) + } + return svchost.ForComparison(m.RawHost.Raw) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go index ed756c88daf..cede4f817de 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context.go @@ -8,6 +8,8 @@ import ( "strings" "sync" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" "github.com/hashicorp/terraform/config" @@ -671,29 +673,27 @@ func (c *Context) Stop() { } // Validate validates the configuration and returns any warnings or errors. -func (c *Context) Validate() ([]string, []error) { +func (c *Context) Validate() tfdiags.Diagnostics { defer c.acquireRun("validate")() - var errs error + var diags tfdiags.Diagnostics // Validate the configuration itself - if err := c.module.Validate(); err != nil { - errs = multierror.Append(errs, err) - } + diags = diags.Append(c.module.Validate()) // This only needs to be done for the root module, since inter-module // variables are validated in the module tree. if config := c.module.Config(); config != nil { // Validate the user variables - if err := smcUserVariables(config, c.variables); len(err) > 0 { - errs = multierror.Append(errs, err...) + for _, err := range smcUserVariables(config, c.variables) { + diags = diags.Append(err) } } // If we have errors at this point, the graphing has no chance, // so just bail early. - if errs != nil { - return nil, []error{errs} + if diags.HasErrors() { + return diags } // Build the graph so we can walk it and run Validate on nodes. @@ -702,24 +702,29 @@ func (c *Context) Validate() ([]string, []error) { // graph again later after Planning. graph, err := c.Graph(GraphTypeValidate, nil) if err != nil { - return nil, []error{err} + diags = diags.Append(err) + return diags } // Walk walker, err := c.walk(graph, walkValidate) if err != nil { - return nil, multierror.Append(errs, err).Errors + diags = diags.Append(err) } - // Return the result - rerrs := multierror.Append(errs, walker.ValidationErrors...) - sort.Strings(walker.ValidationWarnings) - sort.Slice(rerrs.Errors, func(i, j int) bool { - return rerrs.Errors[i].Error() < rerrs.Errors[j].Error() + sort.Slice(walker.ValidationErrors, func(i, j int) bool { + return walker.ValidationErrors[i].Error() < walker.ValidationErrors[j].Error() }) - return walker.ValidationWarnings, rerrs.Errors + for _, warn := range walker.ValidationWarnings { + diags = diags.Append(tfdiags.SimpleWarning(warn)) + } + for _, err := range walker.ValidationErrors { + diags = diags.Append(err) + } + + return diags } // Module returns the module tree associated with this context. diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go index d6dc5506161..b6651c0a8bb 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go @@ -396,6 +396,11 @@ type ResourceAttrDiff struct { Type DiffAttrType } +// Modified returns the inequality of Old and New for this attr +func (d *ResourceAttrDiff) Modified() bool { + return d.Old != d.New +} + // Empty returns true if the diff for this attr is neutral func (d *ResourceAttrDiff) Empty() bool { return d.Old == d.New && !d.NewComputed && !d.NewRemoved diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_refresh.go new file mode 100644 index 00000000000..6d1a2cfc9d7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_refresh.go @@ -0,0 +1,49 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// EvalCheckDataDependsOn is an EvalNode implementation that returns an +// error if a data source has an explicit dependency that contains a diff. If +// the dependency has a diff, the data source refresh can't be completed until +// apply. +type EvalCheckDataDependsOn struct { + Refresh bool + Info *InstanceInfo + Config *config.Resource + Provider *ResourceProvider + State **InstanceState +} + +func (n *EvalCheckDataDependsOn) Eval(ctx EvalContext) (interface{}, error) { + if len(n.Config.DependsOn) == 0 { + return nil, nil + } + + state := *n.State + provider := *n.Provider + + // The state for the diff must never be nil + diffState := state + if diffState == nil { + diffState = new(InstanceState) + } + diffState.init() + + resourceCfg := new(ResourceConfig) + + diff, err := provider.Diff(n.Info, diffState, resourceCfg) + if err != nil { + return nil, err + } + + fmt.Printf("Refresh:%t Name:%s Modes:%s DIFF: %#v\n", n.Refresh, n.Config.Name, n.Config.Mode, diff) + if len(n.Config.DependsOn) > 0 { + return nil, EvalEarlyExitError{} + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go index 193421b78ea..1b6ee5a625e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go @@ -262,13 +262,8 @@ func (ctx *BuiltinEvalContext) InterpolateProvider( var cfg *config.RawConfig if pc != nil && pc.RawConfig != nil { - path := pc.Path - if len(path) == 0 { - path = ctx.Path() - } - scope := &InterpolationScope{ - Path: path, + Path: ctx.Path(), Resource: r, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go index bbc2b3667e0..c1def91664b 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go @@ -258,9 +258,11 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { for _, v := range containers { if v.keepDiff() { // At least one key has changes, so list all the sibling keys - // to keep in the diff. + // to keep in the diff if any values have changed for k := range v { - keep[k] = true + if v[k].Modified() { + keep[k] = true + } } } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go index cc83938eb6c..a8346276f35 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go @@ -68,9 +68,9 @@ func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { // handling the interpolation error if err != nil { - if n.ContinueOnErr { + if n.ContinueOnErr || flagWarnOutputErrors { log.Printf("[ERROR] Output interpolation %q failed: %s", n.Name, err) - // if we're continueing, make sure the output is included, and + // if we're continuing, make sure the output is included, and // marked as unknown mod.Outputs[n.Name] = &OutputState{ Type: "string", diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go index 1f67e3d86b4..11826907ca4 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go @@ -214,37 +214,6 @@ func writeInstanceToState( return nil, nil } -// EvalClearPrimaryState is an EvalNode implementation that clears the primary -// instance from a resource state. -type EvalClearPrimaryState struct { - Name string -} - -func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() - - // Get a read lock so we can access this instance - lock.RLock() - defer lock.RUnlock() - - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } - - // Look for the resource state. If we don't have one, then it is okay. - rs := mod.Resources[n.Name] - if rs == nil { - return nil, nil - } - - // Clear primary from the resource state - rs.Primary = nil - - return nil, nil -} - // EvalDeposeState is an EvalNode implementation that takes the primary // out of a state and makes it Deposed. This is done at the beginning of // create-before-destroy calls so that the create can create while preserving diff --git a/vendor/github.com/hashicorp/terraform/terraform/features.go b/vendor/github.com/hashicorp/terraform/terraform/features.go index 752076806f9..97c77bdbd00 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/features.go +++ b/vendor/github.com/hashicorp/terraform/terraform/features.go @@ -1,3 +1,7 @@ package terraform +import "os" + // This file holds feature flags for the next release + +var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go index 614da2c852f..1f826e1d98c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go @@ -113,6 +113,9 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer { // Add module variables &ModuleVariableTransformer{Module: b.Module}, + // Remove modules no longer present in the config + &RemovedModuleTransformer{Module: b.Module, State: b.State}, + // Connect references so ordering is correct &ReferenceTransformer{}, diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go index 5d625e051e8..f8dd0fc93a9 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go @@ -84,6 +84,12 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer { Module: b.Module, }, + // Create orphan output nodes + &OrphanOutputTransformer{ + Module: b.Module, + State: b.State, + }, + // Attach the configuration to any resources &AttachResourceConfigTransformer{Module: b.Module}, @@ -109,6 +115,9 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer { Module: b.Module, }, + // Remove modules no longer present in the config + &RemovedModuleTransformer{Module: b.Module, State: b.State}, + // Connect so that the references are ready for targeting. We'll // have to connect again later for providers and so on. &ReferenceTransformer{}, diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go index c2cca149925..89f376e54f8 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go @@ -66,13 +66,12 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { w.interpolaterVarLock.Unlock() ctx := &BuiltinEvalContext{ - StopContext: w.StopContext, - PathValue: path, - Hooks: w.Context.hooks, - InputValue: w.Context.uiInput, - Components: w.Context.components, - ProviderCache: w.providerCache, - //ProviderConfigCache: w.providerConfigCache, + StopContext: w.StopContext, + PathValue: path, + Hooks: w.Context.hooks, + InputValue: w.Context.uiInput, + Components: w.Context.components, + ProviderCache: w.providerCache, ProviderInputConfig: w.Context.providerInputConfig, ProviderLock: &w.providerLock, ProvisionerCache: w.provisionerCache, @@ -150,7 +149,6 @@ func (w *ContextGraphWalker) ExitEvalTree( func (w *ContextGraphWalker) init() { w.contexts = make(map[string]*BuiltinEvalContext, 5) w.providerCache = make(map[string]ResourceProvider, 5) - //w.providerConfigCache = make(map[string]*ResourceConfig, 5) w.provisionerCache = make(map[string]ResourceProvisioner, 5) w.interpolaterVars = make(map[string]map[string]interface{}, 5) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go index e97b4855a97..95ef4e94d48 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go @@ -2,7 +2,7 @@ package terraform -import "fmt" +import "strconv" const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate" @@ -10,7 +10,7 @@ var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125} func (i GraphType) String() string { if i >= GraphType(len(_GraphType_index)-1) { - return fmt.Sprintf("GraphType(%d)", i) + return "GraphType(" + strconv.FormatInt(int64(i), 10) + ")" } return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go index f69267cd52c..b8e7d1fb910 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go @@ -2,7 +2,7 @@ package terraform -import "fmt" +import "strconv" const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" @@ -10,7 +10,7 @@ var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} func (i InstanceType) String() string { if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { - return fmt.Sprintf("InstanceType(%d)", i) + return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")" } return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go index 15d9b8f9cb5..d5ca641a6fd 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go @@ -108,7 +108,9 @@ func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { // Get the state if we have it, if not we build it rs := n.ResourceState if rs == nil { - rs = &ResourceState{} + rs = &ResourceState{ + Provider: n.ResolvedProvider, + } } // If the config isn't empty we update the state @@ -146,7 +148,7 @@ func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { &EvalWriteState{ Name: stateId, ResourceType: rs.Type, - Provider: rs.Provider, + Provider: n.ResolvedProvider, Dependencies: rs.Dependencies, State: &state, // state is nil here }, @@ -208,7 +210,7 @@ func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { &EvalWriteState{ Name: stateId, ResourceType: rs.Type, - Provider: rs.Provider, + Provider: n.ResolvedProvider, Dependencies: rs.Dependencies, State: &state, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go new file mode 100644 index 00000000000..bb3e5ee1e00 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go @@ -0,0 +1,77 @@ +package terraform + +import ( + "fmt" + "log" + "reflect" +) + +// NodeModuleRemoved represents a module that is no longer in the +// config. +type NodeModuleRemoved struct { + PathValue []string +} + +func (n *NodeModuleRemoved) Name() string { + return fmt.Sprintf("%s (removed)", modulePrefixStr(n.PathValue)) +} + +// GraphNodeSubPath +func (n *NodeModuleRemoved) Path() []string { + return n.PathValue +} + +// GraphNodeEvalable +func (n *NodeModuleRemoved) EvalTree() EvalNode { + return &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, + Node: &EvalDeleteModule{ + PathValue: n.PathValue, + }, + } +} + +func (n *NodeModuleRemoved) ReferenceGlobal() bool { + return true +} + +func (n *NodeModuleRemoved) References() []string { + return []string{modulePrefixStr(n.PathValue)} +} + +// EvalDeleteModule is an EvalNode implementation that removes an empty module +// entry from the state. +type EvalDeleteModule struct { + PathValue []string +} + +func (n *EvalDeleteModule) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + if state == nil { + return nil, nil + } + + // Get a write lock so we can access this instance + lock.Lock() + defer lock.Unlock() + + // Make sure we have a clean state + // Destroyed resources aren't deleted, they're written with an ID of "". + state.prune() + + // find the module and delete it + for i, m := range state.Modules { + if reflect.DeepEqual(m.Path, n.PathValue) { + if !m.Empty() { + // a targeted apply may leave module resources even without a config, + // so just log this and return. + log.Printf("[DEBUG] cannot remove module %s, not empty", modulePrefixStr(n.PathValue)) + break + } + state.Modules = append(state.Modules[:i], state.Modules[i+1:]...) + break + } + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go index 636a15df11c..0fd1554a951 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go @@ -19,6 +19,11 @@ func (n *NodeOutputOrphan) Name() string { return result } +// GraphNodeReferenceable +func (n *NodeOutputOrphan) ReferenceableName() []string { + return []string{"output." + n.OutputName} +} + // GraphNodeSubPath func (n *NodeOutputOrphan) Path() []string { return n.PathValue diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go index 3230558e8c9..9e490f7b4f0 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go @@ -2,6 +2,7 @@ package terraform import ( "fmt" + "strings" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/dag" @@ -25,8 +26,13 @@ type NodeAbstractProvider struct { } func ResolveProviderName(name string, path []string) string { + if strings.Contains(name, "provider.") { + // already resolved + return name + } + name = fmt.Sprintf("provider.%s", name) - if len(path) > 1 { + if len(path) >= 1 { name = fmt.Sprintf("%s.%s", modulePrefixStr(path), name) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go index e46c3200997..73509c87f12 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go @@ -178,19 +178,19 @@ func (n *NodeAbstractResource) SetProvider(p string) { } // GraphNodeProviderConsumer -func (n *NodeAbstractResource) ProvidedBy() []string { +func (n *NodeAbstractResource) ProvidedBy() string { // If we have a config we prefer that above all else if n.Config != nil { - return []string{resourceProvider(n.Config.Type, n.Config.Provider)} + return resourceProvider(n.Config.Type, n.Config.Provider) } // If we have state, then we will use the provider from there if n.ResourceState != nil && n.ResourceState.Provider != "" { - return []string{n.ResourceState.Provider} + return n.ResourceState.Provider } // Use our type - return []string{resourceProvider(n.Addr.Type, "")} + return resourceProvider(n.Addr.Type, "") } // GraphNodeProvisionerConsumer diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go index 807b1f41698..9f6d69fd3c7 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go @@ -158,7 +158,7 @@ func (n *NodeApplyableResource) evalTreeDataResource( &EvalWriteState{ Name: stateId, ResourceType: n.Config.Type, - Provider: n.Config.Provider, + Provider: n.ResolvedProvider, Dependencies: stateDeps, State: &state, }, @@ -308,7 +308,7 @@ func (n *NodeApplyableResource) evalTreeManagedResource( &EvalWriteState{ Name: stateId, ResourceType: n.Config.Type, - Provider: n.Config.Provider, + Provider: n.ResolvedProvider, Dependencies: stateDeps, State: &state, }, @@ -332,7 +332,7 @@ func (n *NodeApplyableResource) evalTreeManagedResource( Else: &EvalWriteState{ Name: stateId, ResourceType: n.Config.Type, - Provider: n.Config.Provider, + Provider: n.ResolvedProvider, Dependencies: stateDeps, State: &state, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go index cffb9ae60b1..657bbee7f50 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go @@ -149,7 +149,9 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { // Get our state rs := n.ResourceState if rs == nil { - rs = &ResourceState{} + rs = &ResourceState{ + Provider: n.ResolvedProvider, + } } var diffApply *InstanceDiff @@ -273,7 +275,7 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { &EvalWriteState{ Name: stateId, ResourceType: n.Addr.Type, - Provider: rs.Provider, + Provider: n.ResolvedProvider, Dependencies: rs.Dependencies, State: &state, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go index 25a76a99fc9..7d9fcddb55e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go @@ -112,7 +112,7 @@ func (n *NodePlannableResourceInstance) evalTreeDataResource( &EvalWriteState{ Name: stateId, ResourceType: n.Config.Type, - Provider: n.Config.Provider, + Provider: n.ResolvedProvider, Dependencies: stateDeps, State: &state, }, @@ -177,7 +177,7 @@ func (n *NodePlannableResourceInstance) evalTreeManagedResource( &EvalWriteState{ Name: stateId, ResourceType: n.Config.Type, - Provider: n.Config.Provider, + Provider: n.ResolvedProvider, Dependencies: stateDeps, State: &state, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go index d504e7de417..dbb64edae30 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go @@ -166,7 +166,7 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN &EvalWriteState{ Name: stateId, ResourceType: n.ResourceState.Type, - Provider: n.ResourceState.Provider, + Provider: n.ResolvedProvider, Dependencies: n.ResourceState.Dependencies, State: &state, }, @@ -251,7 +251,7 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState( &EvalWriteState{ Name: stateID, ResourceType: n.Config.Type, - Provider: n.Config.Provider, + Provider: n.ResolvedProvider, Dependencies: stateDeps, State: &state, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go index a8cd8dd9f00..2f5ebb5e721 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go @@ -346,7 +346,7 @@ func (c *ResourceConfig) get( if err != nil { return nil, false } - if i >= int64(cv.Len()) { + if int(i) < 0 || int(i) >= cv.Len() { return nil, false } current = cv.Index(int(i)).Interface() diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go index 73cde0ccb71..9131f0f5fe4 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go @@ -196,13 +196,14 @@ func (p *MockResourceProvider) Diff( info *InstanceInfo, state *InstanceState, desired *ResourceConfig) (*InstanceDiff, error) { - p.Lock() - defer p.Unlock() + p.Lock() p.DiffCalled = true p.DiffInfo = info p.DiffState = state p.DiffDesired = desired + p.Unlock() + if p.DiffFn != nil { return p.DiffFn(info, state, desired) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go index 89a404847de..5bc2f8a04fc 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/state.go @@ -1089,7 +1089,7 @@ func (m *ModuleState) Orphans(c *config.Config) []string { defer m.Unlock() keys := make(map[string]struct{}) - for k, _ := range m.Resources { + for k := range m.Resources { keys[k] = struct{}{} } @@ -1097,7 +1097,7 @@ func (m *ModuleState) Orphans(c *config.Config) []string { for _, r := range c.Resources { delete(keys, r.Id()) - for k, _ := range keys { + for k := range keys { if strings.HasPrefix(k, r.Id()+".") { delete(keys, k) } @@ -1106,7 +1106,32 @@ func (m *ModuleState) Orphans(c *config.Config) []string { } result := make([]string, 0, len(keys)) - for k, _ := range keys { + for k := range keys { + result = append(result, k) + } + + return result +} + +// RemovedOutputs returns a list of outputs that are in the State but aren't +// present in the configuration itself. +func (m *ModuleState) RemovedOutputs(c *config.Config) []string { + m.Lock() + defer m.Unlock() + + keys := make(map[string]struct{}) + for k := range m.Outputs { + keys[k] = struct{}{} + } + + if c != nil { + for _, o := range c.Outputs { + delete(keys, o.Name) + } + } + + result := make([]string, 0, len(keys)) + for k := range keys { result = append(result, k) } @@ -1314,6 +1339,10 @@ func (m *ModuleState) String() string { return buf.String() } +func (m *ModuleState) Empty() bool { + return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0 +} + // ResourceStateKey is a structured representation of the key used for the // ModuleState.Resources mapping type ResourceStateKey struct { diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go index 10506ea0602..39cf097aecc 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go @@ -1,10 +1,7 @@ package terraform import ( - "log" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" ) // GraphNodeAttachProvider is an interface that must be implemented by nodes @@ -19,62 +16,3 @@ type GraphNodeAttachProvider interface { // Sets the configuration AttachProvider(*config.ProviderConfig) } - -// AttachProviderConfigTransformer goes through the graph and attaches -// provider configuration structures to nodes that implement the interfaces -// above. -// -// The attached configuration structures are directly from the configuration. -// If they're going to be modified, a copy should be made. -type AttachProviderConfigTransformer struct { - Module *module.Tree // Module is the root module for the config -} - -func (t *AttachProviderConfigTransformer) Transform(g *Graph) error { - if err := t.attachProviders(g); err != nil { - return err - } - - return nil -} - -func (t *AttachProviderConfigTransformer) attachProviders(g *Graph) error { - // Go through and find GraphNodeAttachProvider - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachProvider implementations - apn, ok := v.(GraphNodeAttachProvider) - if !ok { - continue - } - - // Determine what we're looking for - path := normalizeModulePath(apn.Path()) - path = path[1:] - name := apn.ProviderName() - log.Printf("[TRACE] Attach provider request: %#v %s", path, name) - - // Get the configuration. - tree := t.Module.Child(path) - if tree == nil { - continue - } - - // Go through the provider configs to find the matching config - for _, p := range tree.Config().ProviderConfigs { - // Build the name, which is "name.alias" if an alias exists - current := p.Name - if p.Alias != "" { - current += "." + p.Alias - } - - // If the configs match then attach! - if current == name { - log.Printf("[TRACE] Attaching provider config: %#v", p) - apn.AttachProvider(p) - break - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go index 7ec7744a7da..61bce8532a5 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go @@ -133,78 +133,3 @@ func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { return nil } - -type ProviderConfigTransformer struct { - Providers []string - Concrete ConcreteProviderNodeFunc - - // Module is the module to add resources from. - Module *module.Tree -} - -func (t *ProviderConfigTransformer) Transform(g *Graph) error { - // If no module is given, we don't do anything - if t.Module == nil { - return nil - } - - // If the module isn't loaded, that is simply an error - if !t.Module.Loaded() { - return errors.New("module must be loaded for ProviderConfigTransformer") - } - - // Start the transformation process - return t.transform(g, t.Module) -} - -func (t *ProviderConfigTransformer) transform(g *Graph, m *module.Tree) error { - // If no config, do nothing - if m == nil { - return nil - } - - // Add our resources - if err := t.transformSingle(g, m); err != nil { - return err - } - - // Transform all the children. - for _, c := range m.Children() { - if err := t.transform(g, c); err != nil { - return err - } - } - - return nil -} - -func (t *ProviderConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { - log.Printf("[TRACE] ProviderConfigTransformer: Starting for path: %v", m.Path()) - - // Get the configuration for this module - conf := m.Config() - - // Build the path we're at - path := m.Path() - if len(path) > 0 { - path = append([]string{RootModuleName}, path...) - } - - // Write all the resources out - for _, p := range conf.ProviderConfigs { - name := p.Name - if p.Alias != "" { - name += "." + p.Alias - } - - v := t.Concrete(&NodeAbstractProvider{ - NameValue: name, - PathValue: path, - }).(dag.Vertex) - - // Add it to the graph - g.Add(v) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go index fd920fbdaf5..87a1f9c9873 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go @@ -66,8 +66,8 @@ func (n *graphNodeDeposedResource) Name() string { return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index) } -func (n *graphNodeDeposedResource) ProvidedBy() []string { - return []string{resourceProvider(n.ResourceName, n.ProviderName)} +func (n *graphNodeDeposedResource) ProvidedBy() string { + return resourceProvider(n.ResourceName, n.ProviderName) } func (n *graphNodeDeposedResource) SetProvider(p string) { @@ -108,7 +108,7 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode { &EvalWriteStateDeposed{ Name: n.ResourceName, ResourceType: n.ResourceType, - Provider: n.ProviderName, + Provider: n.ResolvedProvider, State: &state, Index: n.Index, }, @@ -157,7 +157,7 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode { &EvalWriteStateDeposed{ Name: n.ResourceName, ResourceType: n.ResourceType, - Provider: n.ProviderName, + Provider: n.ResolvedProvider, State: &state, Index: n.Index, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go index 762bf1ded53..fcbff653f5e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go @@ -48,8 +48,8 @@ func (n *graphNodeImportState) Name() string { return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID) } -func (n *graphNodeImportState) ProvidedBy() []string { - return []string{resourceProvider(n.Addr.Type, n.ProviderName)} +func (n *graphNodeImportState) ProvidedBy() string { + return resourceProvider(n.Addr.Type, n.ProviderName) } func (n *graphNodeImportState) SetProvider(p string) { @@ -240,7 +240,7 @@ func (n *graphNodeImportStateSub) EvalTree() EvalNode { &EvalWriteState{ Name: key.String(), ResourceType: info.Type, - Provider: resourceProvider(info.Type, n.ProviderName), + Provider: n.ResolvedProvider, State: &state, }, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go index 49568d5bcde..aea2bd0ed72 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go @@ -21,43 +21,32 @@ func (t *OrphanOutputTransformer) Transform(g *Graph) error { return nil } - return t.transform(g, t.Module) -} - -func (t *OrphanOutputTransformer) transform(g *Graph, m *module.Tree) error { - // Get our configuration, and recurse into children - var c *config.Config - if m != nil { - c = m.Config() - for _, child := range m.Children() { - if err := t.transform(g, child); err != nil { - return err - } + for _, ms := range t.State.Modules { + if err := t.transform(g, ms); err != nil { + return err } } + return nil +} - // Get the state. If there is no state, then we have no orphans! - path := normalizeModulePath(m.Path()) - state := t.State.ModuleByPath(path) - if state == nil { +func (t *OrphanOutputTransformer) transform(g *Graph, ms *ModuleState) error { + if ms == nil { return nil } - // Make a map of the valid outputs - valid := make(map[string]struct{}) - for _, o := range c.Outputs { - valid[o.Name] = struct{}{} - } + path := normalizeModulePath(ms.Path) - // Go through the outputs and find the ones that aren't in our config. - for n, _ := range state.Outputs { - // If it is in the valid map, then ignore - if _, ok := valid[n]; ok { - continue - } + // Get the config for this path, which is nil if the entire module has been + // removed. + var c *config.Config + if m := t.Module.Child(path[1:]); m != nil { + c = m.Config() + } - // Orphan! + // add all the orphaned outputs to the graph + for _, n := range ms.RemovedOutputs(c) { g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path}) + } return nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go index 794753f29c3..f8386efe186 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go @@ -1,16 +1,17 @@ package terraform import ( + "errors" "fmt" "log" "strings" "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/dag" ) -// TODO: return the transformers and append them to the list, so we don't lose the log steps func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, mod *module.Tree) GraphTransformer { return GraphTransformMulti( // Add providers from the config @@ -26,14 +27,10 @@ func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, m }, // Connect the providers &ProviderTransformer{}, - // Disable unused providers - &DisableProviderTransformer{}, + // Remove unused providers and proxies + &PruneProviderTransformer{}, // Connect provider to their parent provider nodes &ParentProviderTransformer{}, - // Attach configuration to each provider instance - &AttachProviderConfigTransformer{ - Module: mod, - }, ) } @@ -55,10 +52,10 @@ type GraphNodeCloseProvider interface { // GraphNodeProviderConsumer is an interface that nodes that require // a provider must implement. ProvidedBy must return the name of the provider -// to use. +// to use. This may be a provider by type, type.alias or a fully resolved +// provider name type GraphNodeProviderConsumer interface { - // TODO: make this return s string instead of a []string - ProvidedBy() []string + ProvidedBy() string // Set the resolved provider address for this resource. SetProvider(string) } @@ -74,7 +71,7 @@ func (t *ProviderTransformer) Transform(g *Graph) error { m := providerVertexMap(g) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProviderConsumer); ok { - p := pv.ProvidedBy()[0] + p := pv.ProvidedBy() key := providerMapKey(p, pv) target := m[key] @@ -104,11 +101,20 @@ func (t *ProviderTransformer) Transform(g *Graph) error { if target == nil { err = multierror.Append(err, fmt.Errorf( - "%s: provider %s couldn't be found", - dag.VertexName(v), p)) + "%s: configuration for %s is not present; a provider configuration block is required for all operations", + dag.VertexName(v), p, + )) break } + // see if this in an inherited provider + if p, ok := target.(*graphNodeProxyProvider); ok { + g.Remove(p) + target = p.Target() + key = target.(GraphNodeProvider).Name() + } + + log.Printf("[DEBUG] resource %s using provider %s", dag.VertexName(pv), key) pv.SetProvider(key) g.Connect(dag.BasicEdge(v, target)) } @@ -123,7 +129,6 @@ func (t *ProviderTransformer) Transform(g *Graph) error { // in the graph are evaluated. type CloseProviderTransformer struct{} -// FIXME: this doesn't close providers if the root provider is disabled func (t *CloseProviderTransformer) Transform(g *Graph) error { pm := providerVertexMap(g) cpm := make(map[string]*graphNodeCloseProvider) @@ -187,7 +192,12 @@ func (t *MissingProviderTransformer) Transform(g *Graph) error { continue } - p := pv.ProvidedBy()[0] + p := pv.ProvidedBy() + // this may be the resolved provider from the state, so we need to get + // the base provider name. + parts := strings.SplitAfter(p, "provider.") + p = parts[len(parts)-1] + key := ResolveProviderName(p, nil) provider := m[key] @@ -196,6 +206,14 @@ func (t *MissingProviderTransformer) Transform(g *Graph) error { continue } + // we don't implicitly create aliased providers + if strings.Contains(p, ".") { + log.Println("[DEBUG] not adding missing provider alias:", p) + continue + } + + log.Println("[DEBUG] adding missing provider:", p) + // create the misisng top-level provider provider = t.Concrete(&NodeAbstractProvider{ NameValue: p, @@ -243,22 +261,29 @@ func (t *ParentProviderTransformer) Transform(g *Graph) error { return nil } -// PruneProviderTransformer is a GraphTransformer that prunes all the -// providers that aren't needed from the graph. A provider is unneeded if -// no resource or module is using that provider. +// PruneProviderTransformer removes any providers that are not actually used by +// anything, and provider proxies. This avoids the provider being initialized +// and configured. This both saves resources but also avoids errors since +// configuration may imply initialization which may require auth. type PruneProviderTransformer struct{} func (t *PruneProviderTransformer) Transform(g *Graph) error { for _, v := range g.Vertices() { - // We only care about the providers - if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" { + // We only care about providers + pn, ok := v.(GraphNodeProvider) + if !ok || pn.ProviderName() == "" { continue } - // Does anything depend on this? If not, then prune it. - if s := g.UpEdges(v); s.Len() == 0 { - if nv, ok := v.(dag.NamedVertex); ok { - log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name()) - } + + // ProxyProviders will have up edges, but we're now done with them in the graph + if _, ok := v.(*graphNodeProxyProvider); ok { + log.Printf("[DEBUG] pruning proxy provider %s", dag.VertexName(v)) + g.Remove(v) + } + + // Remove providers with no dependencies. + if g.UpEdges(v).Len() == 0 { + log.Printf("[DEBUG] pruning unused provider %s", dag.VertexName(v)) g.Remove(v) } } @@ -269,6 +294,11 @@ func (t *PruneProviderTransformer) Transform(g *Graph) error { // providerMapKey is a helper that gives us the key to use for the // maps returned by things such as providerVertexMap. func providerMapKey(k string, v dag.Vertex) string { + if strings.Contains(k, "provider.") { + // this is already resolved + return k + } + // we create a dummy provider to var path []string if sp, ok := v.(GraphNodeSubPath); ok { @@ -344,21 +374,233 @@ func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { return true } -// graphNodeProviderConsumerDummy is a struct that never enters the real -// graph (though it could to no ill effect). It implements -// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force -// certain transformations. -type graphNodeProviderConsumerDummy struct { - ProviderValue string - PathValue []string +// graphNodeProxyProvider is a GraphNodeProvider implementation that is used to +// store the name and value of a provider node for inheritance between modules. +// These nodes are only used to store the data while loading the provider +// configurations, and are removed after all the resources have been connected +// to their providers. +type graphNodeProxyProvider struct { + nameValue string + path []string + target GraphNodeProvider +} + +func (n *graphNodeProxyProvider) ProviderName() string { + return n.Target().ProviderName() } -func (n *graphNodeProviderConsumerDummy) Path() []string { - return n.PathValue +func (n *graphNodeProxyProvider) Name() string { + return ResolveProviderName(n.nameValue, n.path) } -func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string { - return []string{n.ProviderValue} +// find the concrete provider instance +func (n *graphNodeProxyProvider) Target() GraphNodeProvider { + switch t := n.target.(type) { + case *graphNodeProxyProvider: + return t.Target() + default: + return n.target + } +} + +// ProviderConfigTransformer adds all provider nodes from the configuration and +// attaches the configs. +type ProviderConfigTransformer struct { + Providers []string + Concrete ConcreteProviderNodeFunc + + // each provider node is stored here so that the proxy nodes can look up + // their targets by name. + providers map[string]GraphNodeProvider + // record providers that can be overriden with a proxy + proxiable map[string]bool + + // Module is the module to add resources from. + Module *module.Tree } -func (n *graphNodeProviderConsumerDummy) SetProvider(string) {} +func (t *ProviderConfigTransformer) Transform(g *Graph) error { + // If no module is given, we don't do anything + if t.Module == nil { + return nil + } + + // If the module isn't loaded, that is simply an error + if !t.Module.Loaded() { + return errors.New("module must be loaded for ProviderConfigTransformer") + } + + t.providers = make(map[string]GraphNodeProvider) + t.proxiable = make(map[string]bool) + + // Start the transformation process + if err := t.transform(g, t.Module); err != nil { + return err + } + + // finally attach the configs to the new nodes + return t.attachProviderConfigs(g) +} + +func (t *ProviderConfigTransformer) transform(g *Graph, m *module.Tree) error { + // If no config, do nothing + if m == nil { + return nil + } + + // Add our resources + if err := t.transformSingle(g, m); err != nil { + return err + } + + // Transform all the children. + for _, c := range m.Children() { + if err := t.transform(g, c); err != nil { + return err + } + } + return nil +} + +func (t *ProviderConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { + log.Printf("[TRACE] ProviderConfigTransformer: Starting for path: %v", m.Path()) + + // Get the configuration for this module + conf := m.Config() + + // Build the path we're at + path := m.Path() + if len(path) > 0 { + path = append([]string{RootModuleName}, path...) + } + + // add all providers from the configuration + for _, p := range conf.ProviderConfigs { + name := p.Name + if p.Alias != "" { + name += "." + p.Alias + } + + v := t.Concrete(&NodeAbstractProvider{ + NameValue: name, + PathValue: path, + }) + + // Add it to the graph + g.Add(v) + fullName := ResolveProviderName(name, path) + t.providers[fullName] = v.(GraphNodeProvider) + t.proxiable[fullName] = len(p.RawConfig.RawMap()) == 0 + } + + // Now replace the provider nodes with proxy nodes if a provider was being + // passed in, and create implicit proxies if there was no config. Any extra + // proxies will be removed in the prune step. + return t.addProxyProviders(g, m) +} + +func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree) error { + path := m.Path() + + // can't add proxies at the root + if len(path) == 0 { + return nil + } + + parentPath := path[:len(path)-1] + parent := t.Module.Child(parentPath) + if parent == nil { + return nil + } + + var parentCfg *config.Module + for _, mod := range parent.Config().Modules { + if mod.Name == m.Name() { + parentCfg = mod + break + } + } + + if parentCfg == nil { + // this can't really happen during normal execution. + return fmt.Errorf("parent module config not found for %s", m.Name()) + } + + // Go through all the providers the parent is passing in, and add proxies to + // the parent provider nodes. + for name, parentName := range parentCfg.Providers { + fullName := ResolveProviderName(name, path) + fullParentName := ResolveProviderName(parentName, parentPath) + + parentProvider := t.providers[fullParentName] + + if parentProvider == nil { + return fmt.Errorf("missing provider %s", fullParentName) + } + + proxy := &graphNodeProxyProvider{ + nameValue: name, + path: path, + target: parentProvider, + } + + concreteProvider := t.providers[fullName] + + // replace the concrete node with the provider passed in + if concreteProvider != nil && t.proxiable[fullName] { + g.Replace(concreteProvider, proxy) + t.providers[fullName] = proxy + continue + } + + // aliased providers can't be implicitly passed in + if strings.Contains(name, ".") { + continue + } + + // There was no concrete provider, so add this as an implicit provider. + // The extra proxy will be pruned later if it's unused. + g.Add(proxy) + t.providers[fullName] = proxy + } + return nil +} + +func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { + for _, v := range g.Vertices() { + // Only care about GraphNodeAttachProvider implementations + apn, ok := v.(GraphNodeAttachProvider) + if !ok { + continue + } + + // Determine what we're looking for + path := normalizeModulePath(apn.Path())[1:] + name := apn.ProviderName() + log.Printf("[TRACE] Attach provider request: %#v %s", path, name) + + // Get the configuration. + tree := t.Module.Child(path) + if tree == nil { + continue + } + + // Go through the provider configs to find the matching config + for _, p := range tree.Config().ProviderConfigs { + // Build the name, which is "name.alias" if an alias exists + current := p.Name + if p.Alias != "" { + current += "." + p.Alias + } + + // If the configs match then attach! + if current == name { + log.Printf("[TRACE] Attaching provider config: %#v", p) + apn.AttachProvider(p) + break + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go deleted file mode 100644 index d9919f3a774..00000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go +++ /dev/null @@ -1,50 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/dag" -) - -// DisableProviderTransformer "disables" any providers that are not actually -// used by anything. This avoids the provider being initialized and configured. -// This both saves resources but also avoids errors since configuration -// may imply initialization which may require auth. -type DisableProviderTransformer struct{} - -func (t *DisableProviderTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - // We only care about providers - pn, ok := v.(GraphNodeProvider) - if !ok || pn.ProviderName() == "" { - continue - } - - // If we have dependencies, then don't disable - if g.UpEdges(v).Len() > 0 { - continue - } - - // Get the path - var path []string - if pn, ok := v.(GraphNodeSubPath); ok { - path = pn.Path() - } - - // Disable the provider by replacing it with a "disabled" provider - disabled := &NodeDisabledProvider{ - NodeAbstractProvider: &NodeAbstractProvider{ - NameValue: pn.ProviderName(), - PathValue: path, - }, - } - - if !g.Replace(v, disabled) { - panic(fmt.Sprintf( - "vertex disappeared from under us: %s", - dag.VertexName(v))) - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go index 2560e5ad64e..85a82a6517c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go @@ -127,6 +127,7 @@ func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) { var matches []dag.Vertex var missing []string prefix := m.prefix(v) + for _, ns := range rn.References() { found := false for _, n := range strings.Split(ns, "/") { @@ -139,19 +140,14 @@ func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) { // Mark that we found a match found = true - // Make sure this isn't a self reference, which isn't included - selfRef := false for _, p := range parents { + // don't include self-references if p == v { - selfRef = true - break + continue } - } - if selfRef { - continue + matches = append(matches, p) } - matches = append(matches, parents...) break } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go new file mode 100644 index 00000000000..2e05edbaaa8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go @@ -0,0 +1,32 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/config/module" +) + +// RemoveModuleTransformer implements GraphTransformer to add nodes indicating +// when a module was removed from the configuration. +type RemovedModuleTransformer struct { + Module *module.Tree // root module + State *State +} + +func (t *RemovedModuleTransformer) Transform(g *Graph) error { + // nothing to remove if there's no state! + if t.State == nil { + return nil + } + + for _, m := range t.State.Modules { + c := t.Module.Child(m.Path[1:]) + if c != nil { + continue + } + + log.Printf("[DEBUG] module %s no longer in config\n", modulePrefixStr(m.Path)) + g.Add(&NodeModuleRemoved{PathValue: m.Path}) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go index cbd78dd93f0..4cfc528ef0c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go @@ -2,7 +2,7 @@ package terraform -import "fmt" +import "strconv" const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport" @@ -10,7 +10,7 @@ var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96} func (i walkOperation) String() string { if i >= walkOperation(len(_walkOperation_index)-1) { - return fmt.Sprintf("walkOperation(%d)", i) + return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")" } return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go index edf9e639fdc..0b1249bbef9 100644 --- a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go +++ b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go @@ -2,18 +2,13 @@ package tfdiags -import "fmt" +import "strconv" const ( _Severity_name_0 = "Error" _Severity_name_1 = "Warning" ) -var ( - _Severity_index_0 = [...]uint8{0, 5} - _Severity_index_1 = [...]uint8{0, 7} -) - func (i Severity) String() string { switch { case i == 69: @@ -21,6 +16,6 @@ func (i Severity) String() string { case i == 87: return _Severity_name_1 default: - return fmt.Sprintf("Severity(%d)", i) + return "Severity(" + strconv.FormatInt(int64(i), 10) + ")" } } diff --git a/vendor/github.com/hashicorp/terraform/version/version.go b/vendor/github.com/hashicorp/terraform/version/version.go index 2d2945376e7..a0942a608fd 100644 --- a/vendor/github.com/hashicorp/terraform/version/version.go +++ b/vendor/github.com/hashicorp/terraform/version/version.go @@ -11,12 +11,12 @@ import ( ) // The main version number that is being run at the moment. -const Version = "0.11.0" +const Version = "0.11.2" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -var Prerelease = "beta1" +var Prerelease = "dev" // SemVer is an instance of version.Version. This has the secondary // benefit of verifying during tests and init time that our version is a diff --git a/vendor/vendor.json b/vendor/vendor.json index 6f570c0f995..0e7ee7d8133 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1738,532 +1738,446 @@ "revisionTime": "2017-10-22T02:00:50Z" }, { - "checksumSHA1": "L1z+CLuSu9PI7WzMJc+2a/aJX1Y=", + "checksumSHA1": "QOrITO2Dm4mWEYA8RXP1x0UeiUw=", "path": "github.com/hashicorp/terraform", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { - "checksumSHA1": "2kIaes8QS4QFlSx8CZLXzbdj0UM=", + "checksumSHA1": "nKKclpNMMvJrUbtMNDujRtJcR6I=", "path": "github.com/hashicorp/terraform/backend", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "ZWqZhZxaT2AMNy4dzCcvMKc46GY=", "path": "github.com/hashicorp/terraform/backend/atlas", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "StxVDAMzeMUdXUcRbjcuDxm8GD0=", "path": "github.com/hashicorp/terraform/backend/init", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "ISwgLoSPkcEYAcwFoYu5FNsMDD0=", "path": "github.com/hashicorp/terraform/backend/legacy", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { - "checksumSHA1": "Ff+vwhG5hM0NzPVz1eC2qevTT9w=", + "checksumSHA1": "1bm/jLoSRCTVo1eu6c3u7dwRQ28=", "path": "github.com/hashicorp/terraform/backend/local", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { - "checksumSHA1": "sWPPRyIsfATpFxTqRc9PgbazRAc=", + "checksumSHA1": "dL2tWGJpT3ohSID91w/6wQaFhX0=", "path": "github.com/hashicorp/terraform/backend/remote-state/azure", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "hs39fP+wdfuvpN/lsMpYwUZUV8I=", "path": "github.com/hashicorp/terraform/backend/remote-state/consul", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "SllujprNPMotiPKfcPsQRF/7r64=", "path": "github.com/hashicorp/terraform/backend/remote-state/etcdv3", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { - "checksumSHA1": "E8JzDuHhPSCBlqLIbayxcTf5tWE=", + "checksumSHA1": "RolC84+BZoeoV8Nf8pdLFv2sUz8=", "path": "github.com/hashicorp/terraform/backend/remote-state/gcs", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "uJi6XL6OFIzU0r3G0YX0L3YzxRE=", "path": "github.com/hashicorp/terraform/backend/remote-state/inmem", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "xH9qq/2HWzIPk4E9AY0PY0AQf2Q=", "path": "github.com/hashicorp/terraform/backend/remote-state/manta", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { - "checksumSHA1": "k4dUi8lAv2+RC7nDdrt4p3VThjg=", + "checksumSHA1": "Px1bBSMVKEsHyXL655w3LX3clRM=", "path": "github.com/hashicorp/terraform/backend/remote-state/s3", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "l0SZPCxWxxlYHOedkUCZUCWw4R0=", "path": "github.com/hashicorp/terraform/backend/remote-state/swift", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { - "checksumSHA1": "CvUjARK6DNC/pOMkY0stqtGI1DA=", + "checksumSHA1": "AELhei0e6ZKXmc7Gl3zeOvKK6Ko=", "path": "github.com/hashicorp/terraform/builtin/providers/terraform", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "RCVWlxGP1rZsVKT8VqSgyWhAte4=", "path": "github.com/hashicorp/terraform/builtin/provisioners/chef", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "EtcHzH4aXhylC1Uu8yBQis6IzfU=", "path": "github.com/hashicorp/terraform/builtin/provisioners/file", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "IWoLiBpleo7Ndc8ECqZS6p+fsGY=", "path": "github.com/hashicorp/terraform/builtin/provisioners/local-exec", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "mprmWWmGibkrqOCI66VJyVNTHaM=", "path": "github.com/hashicorp/terraform/builtin/provisioners/remote-exec", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { - "checksumSHA1": "dzl4u25a9uL1AQaDkuj9yEYQL9U=", + "checksumSHA1": "mMT5zemooXgfhXRWSpHz6RH4BmI=", "path": "github.com/hashicorp/terraform/builtin/provisioners/salt-masterless", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { - "checksumSHA1": "z370JbayOT8GHhzXKNxENoIStuY=", + "checksumSHA1": "z2TjmtULJ0grkdY1eQ0pGB6look=", "path": "github.com/hashicorp/terraform/command", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "HWbnuaEFdfRFeKxZdlYUWZm+DU0=", "path": "github.com/hashicorp/terraform/command/clistate", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "ezmArCBoyFTTvlskRVCOlJ6dhB8=", "path": "github.com/hashicorp/terraform/command/format", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "Pg0fge6Fl6a34pYl2fH1eb6kgNE=", "path": "github.com/hashicorp/terraform/communicator", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "zCAC53a+zRYTwnfw7vUFJmvqxQc=", "path": "github.com/hashicorp/terraform/communicator/remote", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "gOdZ52GCuL8KLiqYGEVNVZyMO5U=", "path": "github.com/hashicorp/terraform/communicator/shared", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "/GfjH+TwNG39FcII4/D7K5h7yq4=", "path": "github.com/hashicorp/terraform/communicator/ssh", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "ishkSV98ykhx7ZA9Q/lZgYChZms=", "path": "github.com/hashicorp/terraform/communicator/winrm", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { - "checksumSHA1": "1IwTOSybgtAvcovhHpACIdRxtYA=", + "checksumSHA1": "FAaycFUNBBVYA8jbjFK7O2bnts8=", "path": "github.com/hashicorp/terraform/config", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { - "checksumSHA1": "qzvSGXa0rLkhSSha9fZQkfk6UG4=", + "checksumSHA1": "WzQP2WfiCYlaALKZVqEFsxZsG1o=", "path": "github.com/hashicorp/terraform/config/configschema", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "3V7300kyZF+AGy/cOKV0+P6M3LY=", "path": "github.com/hashicorp/terraform/config/hcl2shim", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { - "checksumSHA1": "H82Z/4fw0m/Lsm8y4SDPXnlWc/Q=", + "checksumSHA1": "LiHFdmGhFhdghVMMffCGkmPvV9g=", "path": "github.com/hashicorp/terraform/config/module", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "mPbjVPD2enEey45bP4M83W2AxlY=", "path": "github.com/hashicorp/terraform/dag", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "P8gNPDuOzmiK4Lz9xG7OBy4Rlm8=", "path": "github.com/hashicorp/terraform/flatmap", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "zx5DLo5aV0xDqxGTzSibXg7HHAA=", "path": "github.com/hashicorp/terraform/helper/acctest", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "uT6Q9RdSRAkDjyUgQlJ2XKJRab4=", "path": "github.com/hashicorp/terraform/helper/config", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "FH5eOEHfHgdxPC/JnfmCeSBk66U=", "path": "github.com/hashicorp/terraform/helper/encryption", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "Vbo55GDzPgG/L/+W2pcvDhxrPZc=", "path": "github.com/hashicorp/terraform/helper/experiment", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "KNvbU1r5jv0CBeQLnEtDoL3dRtc=", "path": "github.com/hashicorp/terraform/helper/hashcode", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "B267stWNQd0/pBTXHfI/tJsxzfc=", "path": "github.com/hashicorp/terraform/helper/hilmapstructure", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "BAXV9ruAyno3aFgwYI2/wWzB2Gc=", "path": "github.com/hashicorp/terraform/helper/logging", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "twkFd4x71kBnDfrdqO5nhs8dMOY=", "path": "github.com/hashicorp/terraform/helper/mutexkv", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "ImyqbHM/xe3eAT2moIjLI8ksuks=", "path": "github.com/hashicorp/terraform/helper/pathorcontents", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { - "checksumSHA1": "29Nl1i88iAH8AC8y+hFnrIxPa2s=", + "checksumSHA1": "9d4zouxtH24HFa6RuUdq7lG3tgQ=", "path": "github.com/hashicorp/terraform/helper/resource", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { - "checksumSHA1": "k6ZMdJmZrtip/Yym0N40uH5pHoY=", + "checksumSHA1": "5ekoQmrRbRoWLlD8wO6julSv9Lk=", "path": "github.com/hashicorp/terraform/helper/schema", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "1yCGh/Wl4H4ODBBRmIRFcV025b0=", "path": "github.com/hashicorp/terraform/helper/shadow", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "eQ6F8nDi/R+F/SX51xCEY8iPZOE=", "path": "github.com/hashicorp/terraform/helper/slowmessage", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "Fzbv+N7hFXOtrR6E7ZcHT3jEE9s=", "path": "github.com/hashicorp/terraform/helper/structure", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { - "checksumSHA1": "jdwWpJZbTSU87GUlwLTuf6FwpmE=", + "checksumSHA1": "6O4zxgqAD+QZm6plsIfl4MH310Q=", "path": "github.com/hashicorp/terraform/helper/validation", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "a1YCqJht+4G5O0UNTnOTD8vfXb0=", "path": "github.com/hashicorp/terraform/helper/variables", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "ExvF2RbMeCfxuq2eASmOChEcRgQ=", "path": "github.com/hashicorp/terraform/helper/wrappedreadline", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "q96i9foHLGSZ+9dFOV7jUseq7zs=", "path": "github.com/hashicorp/terraform/helper/wrappedstreams", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { "checksumSHA1": "yFWmdS6yEJZpRJzUqd/mULqCYGk=", "path": "github.com/hashicorp/terraform/moduledeps", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "DqaoG++NXRCfvH/OloneLWrM+3k=", "path": "github.com/hashicorp/terraform/plugin", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "tD8r8iNg//TN8c2GFuTnyHKBCPY=", "path": "github.com/hashicorp/terraform/plugin/discovery", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { - "checksumSHA1": "sF6VAY7XsYiFnUQATFWuXUd1B3Y=", + "checksumSHA1": "OzxWbZ+8/ogP/wSfXgcSc+o+ulQ=", + "path": "github.com/hashicorp/terraform/registry", + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" + }, + { + "checksumSHA1": "cR87P4V5aiEfvF+1qoBi2JQyQS4=", "path": "github.com/hashicorp/terraform/registry/regsrc", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "y9IXgIJQq9XNy1zIYUV2Kc0KsnA=", "path": "github.com/hashicorp/terraform/registry/response", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "vW75JRFcEDJNxNCB2mrlFeYOyX4=", "path": "github.com/hashicorp/terraform/repl", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "3vkhsjnBn8rOoO5bW1R4lPtckVE=", "path": "github.com/hashicorp/terraform/state", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "xe9XpHd/H/N6fkZ4iAL8MiHFnKs=", "path": "github.com/hashicorp/terraform/state/remote", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "VXlzRRDVOqeMvnnrbUcR9H64OA4=", "path": "github.com/hashicorp/terraform/svchost", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "GzcKNlFL0N77JVjU8qbltXE4R3k=", "path": "github.com/hashicorp/terraform/svchost/auth", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "iGPn4dJF6fT/b+PFSWuimW3GiX8=", "path": "github.com/hashicorp/terraform/svchost/disco", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { - "checksumSHA1": "YV5MKswo26T8PLJxlQXcv4R8Q9U=", + "checksumSHA1": "BmMTGwYif0bQC4OPok8crtYoSdM=", "path": "github.com/hashicorp/terraform/terraform", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z", + "version": "master", + "versionExact": "master" }, { - "checksumSHA1": "C3c1+sTF/97mT6N+15bVSq4Ryr8=", + "checksumSHA1": "+K+oz9mMTmQMxIA3KVkGRfjvm9I=", "path": "github.com/hashicorp/terraform/tfdiags", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { - "checksumSHA1": "ZVt5KFukReFv2R7t2zTRr1pGPC8=", + "checksumSHA1": "PAaFo8y+78joOavbeHEV0lzLuKU=", "path": "github.com/hashicorp/terraform/version", - "revision": "23caaf9feb50d566329f5b4aa07a57da8cd6f448", - "revisionTime": "2017-11-03T23:17:07Z", - "version": "v0.11.0-beta1", - "versionExact": "v0.11.0-beta1" + "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", + "revisionTime": "2017-12-20T14:20:43Z" }, { "checksumSHA1": "ft77GtqeZEeCXioGpF/s6DlGm/U=", From ebb361ff1ac00b2d8af8da2c09a7e03f1fde65ab Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 20 Dec 2017 09:55:46 -0500 Subject: [PATCH 151/350] govendor remove unused --- vendor/cloud.google.com/go/LICENSE | 202 - .../go/compute/metadata/metadata.go | 437 - vendor/cloud.google.com/go/iam/iam.go | 256 - .../cloud.google.com/go/internal/annotate.go | 54 - .../go/internal/optional/optional.go | 108 - vendor/cloud.google.com/go/internal/retry.go | 55 - .../go/internal/version/update_version.sh | 6 - .../go/internal/version/version.go | 71 - vendor/cloud.google.com/go/storage/acl.go | 235 - vendor/cloud.google.com/go/storage/bucket.go | 767 - vendor/cloud.google.com/go/storage/copy.go | 201 - vendor/cloud.google.com/go/storage/doc.go | 168 - vendor/cloud.google.com/go/storage/go110.go | 30 - vendor/cloud.google.com/go/storage/go17.go | 26 - vendor/cloud.google.com/go/storage/iam.go | 121 - vendor/cloud.google.com/go/storage/invoke.go | 36 - .../cloud.google.com/go/storage/not_go110.go | 40 - .../cloud.google.com/go/storage/not_go17.go | 26 - .../go/storage/notifications.go | 179 - vendor/cloud.google.com/go/storage/reader.go | 80 - vendor/cloud.google.com/go/storage/storage.go | 1143 - vendor/cloud.google.com/go/storage/writer.go | 201 - .../github.com/Azure/azure-sdk-for-go/LICENSE | 202 - .../github.com/Azure/azure-sdk-for-go/NOTICE | 5 - .../azure-sdk-for-go/arm/storage/accounts.go | 931 - .../azure-sdk-for-go/arm/storage/client.go | 51 - .../azure-sdk-for-go/arm/storage/models.go | 602 - .../arm/storage/operations.go | 96 - .../azure-sdk-for-go/arm/storage/skus.go | 100 - .../azure-sdk-for-go/arm/storage/usage.go | 100 - .../azure-sdk-for-go/arm/storage/version.go | 28 - .../Azure/azure-sdk-for-go/storage/README.md | 73 - .../azure-sdk-for-go/storage/appendblob.go | 91 - .../azure-sdk-for-go/storage/authorization.go | 246 - .../Azure/azure-sdk-for-go/storage/blob.go | 652 - .../azure-sdk-for-go/storage/blobsasuri.go | 170 - .../storage/blobserviceclient.go | 126 - .../azure-sdk-for-go/storage/blockblob.go | 270 - .../Azure/azure-sdk-for-go/storage/client.go | 878 - .../azure-sdk-for-go/storage/commonsasuri.go | 38 - .../azure-sdk-for-go/storage/container.go | 545 - .../azure-sdk-for-go/storage/copyblob.go | 237 - .../azure-sdk-for-go/storage/directory.go | 238 - .../Azure/azure-sdk-for-go/storage/entity.go | 453 - .../Azure/azure-sdk-for-go/storage/file.go | 476 - .../storage/fileserviceclient.go | 338 - .../azure-sdk-for-go/storage/leaseblob.go | 201 - .../Azure/azure-sdk-for-go/storage/message.go | 167 - .../Azure/azure-sdk-for-go/storage/odata.go | 47 - .../azure-sdk-for-go/storage/pageblob.go | 204 - .../Azure/azure-sdk-for-go/storage/queue.go | 441 - .../azure-sdk-for-go/storage/queuesasuri.go | 146 - .../storage/queueserviceclient.go | 42 - .../Azure/azure-sdk-for-go/storage/share.go | 216 - .../azure-sdk-for-go/storage/storagepolicy.go | 61 - .../storage/storageservice.go | 131 - .../Azure/azure-sdk-for-go/storage/table.go | 419 - .../azure-sdk-for-go/storage/table_batch.go | 316 - .../storage/tableserviceclient.go | 204 - .../Azure/azure-sdk-for-go/storage/util.go | 235 - .../azure-sdk-for-go/storage/util_1.7.go | 26 - .../azure-sdk-for-go/storage/util_1.8.go | 32 - .../Azure/azure-sdk-for-go/storage/version.go | 19 - vendor/github.com/Azure/go-autorest/LICENSE | 191 - .../Azure/go-autorest/autorest/adal/README.md | 253 - .../Azure/go-autorest/autorest/adal/config.go | 65 - .../go-autorest/autorest/adal/devicetoken.go | 242 - .../Azure/go-autorest/autorest/adal/msi.go | 20 - .../go-autorest/autorest/adal/msi_windows.go | 25 - .../go-autorest/autorest/adal/persist.go | 73 - .../Azure/go-autorest/autorest/adal/sender.go | 60 - .../Azure/go-autorest/autorest/adal/token.go | 427 - .../go-autorest/autorest/authorization.go | 181 - .../Azure/go-autorest/autorest/autorest.go | 129 - .../Azure/go-autorest/autorest/azure/async.go | 316 - .../Azure/go-autorest/autorest/azure/azure.go | 200 - .../autorest/azure/environments.go | 144 - .../Azure/go-autorest/autorest/azure/rp.go | 202 - .../Azure/go-autorest/autorest/client.go | 251 - .../Azure/go-autorest/autorest/date/date.go | 96 - .../Azure/go-autorest/autorest/date/time.go | 103 - .../go-autorest/autorest/date/timerfc1123.go | 100 - .../go-autorest/autorest/date/unixtime.go | 123 - .../go-autorest/autorest/date/utility.go | 25 - .../Azure/go-autorest/autorest/error.go | 98 - .../Azure/go-autorest/autorest/preparer.go | 442 - .../Azure/go-autorest/autorest/responder.go | 250 - .../go-autorest/autorest/retriablerequest.go | 52 - .../autorest/retriablerequest_1.7.go | 54 - .../autorest/retriablerequest_1.8.go | 66 - .../Azure/go-autorest/autorest/sender.go | 307 - .../Azure/go-autorest/autorest/utility.go | 192 - .../autorest/validation/validation.go | 395 - .../Azure/go-autorest/autorest/version.go | 49 - vendor/github.com/Unknwon/com/LICENSE | 191 - vendor/github.com/Unknwon/com/README.md | 20 - vendor/github.com/Unknwon/com/cmd.go | 161 - vendor/github.com/Unknwon/com/convert.go | 167 - vendor/github.com/Unknwon/com/dir.go | 173 - vendor/github.com/Unknwon/com/file.go | 145 - vendor/github.com/Unknwon/com/html.go | 60 - vendor/github.com/Unknwon/com/http.go | 201 - vendor/github.com/Unknwon/com/math.go | 29 - vendor/github.com/Unknwon/com/path.go | 80 - vendor/github.com/Unknwon/com/regex.go | 56 - vendor/github.com/Unknwon/com/slice.go | 87 - vendor/github.com/Unknwon/com/string.go | 253 - vendor/github.com/Unknwon/com/time.go | 115 - vendor/github.com/Unknwon/com/url.go | 41 - vendor/github.com/coreos/etcd/LICENSE | 202 - vendor/github.com/coreos/etcd/NOTICE | 5 - .../coreos/etcd/auth/authpb/auth.pb.go | 824 - .../coreos/etcd/auth/authpb/auth.proto | 37 - .../github.com/coreos/etcd/client/README.md | 117 - .../coreos/etcd/client/auth_role.go | 236 - .../coreos/etcd/client/auth_user.go | 319 - .../coreos/etcd/client/cancelreq.go | 18 - .../github.com/coreos/etcd/client/client.go | 710 - .../coreos/etcd/client/cluster_error.go | 37 - vendor/github.com/coreos/etcd/client/curl.go | 70 - .../github.com/coreos/etcd/client/discover.go | 40 - vendor/github.com/coreos/etcd/client/doc.go | 73 - .../coreos/etcd/client/keys.generated.go | 5218 --- vendor/github.com/coreos/etcd/client/keys.go | 682 - .../github.com/coreos/etcd/client/members.go | 303 - vendor/github.com/coreos/etcd/client/util.go | 53 - .../github.com/coreos/etcd/clientv3/README.md | 85 - .../github.com/coreos/etcd/clientv3/auth.go | 223 - .../coreos/etcd/clientv3/balancer.go | 438 - .../github.com/coreos/etcd/clientv3/client.go | 528 - .../coreos/etcd/clientv3/cluster.go | 92 - .../coreos/etcd/clientv3/compact_op.go | 51 - .../coreos/etcd/clientv3/compare.go | 140 - .../coreos/etcd/clientv3/concurrency/doc.go | 17 - .../etcd/clientv3/concurrency/election.go | 245 - .../coreos/etcd/clientv3/concurrency/key.go | 65 - .../coreos/etcd/clientv3/concurrency/mutex.go | 118 - .../etcd/clientv3/concurrency/session.go | 141 - .../coreos/etcd/clientv3/concurrency/stm.go | 387 - .../github.com/coreos/etcd/clientv3/config.go | 62 - vendor/github.com/coreos/etcd/clientv3/doc.go | 64 - .../coreos/etcd/clientv3/health_balancer.go | 249 - vendor/github.com/coreos/etcd/clientv3/kv.go | 165 - .../github.com/coreos/etcd/clientv3/lease.go | 560 - .../github.com/coreos/etcd/clientv3/logger.go | 95 - .../coreos/etcd/clientv3/maintenance.go | 205 - vendor/github.com/coreos/etcd/clientv3/op.go | 513 - .../coreos/etcd/clientv3/ready_wait.go | 30 - .../github.com/coreos/etcd/clientv3/retry.go | 495 - .../github.com/coreos/etcd/clientv3/sort.go | 37 - vendor/github.com/coreos/etcd/clientv3/txn.go | 147 - .../github.com/coreos/etcd/clientv3/watch.go | 806 - .../etcd/etcdserver/api/v3rpc/rpctypes/doc.go | 16 - .../etcdserver/api/v3rpc/rpctypes/error.go | 212 - .../etcd/etcdserver/api/v3rpc/rpctypes/md.go | 20 - .../etcdserver/etcdserverpb/etcdserver.pb.go | 1052 - .../etcdserver/etcdserverpb/etcdserver.proto | 34 - .../etcdserverpb/raft_internal.pb.go | 2094 - .../etcdserverpb/raft_internal.proto | 74 - .../etcd/etcdserver/etcdserverpb/rpc.pb.go | 18682 --------- .../etcd/etcdserver/etcdserverpb/rpc.proto | 1053 - .../coreos/etcd/mvcc/mvccpb/kv.pb.go | 735 - .../coreos/etcd/mvcc/mvccpb/kv.proto | 49 - .../coreos/etcd/pkg/pathutil/path.go | 31 - vendor/github.com/coreos/etcd/pkg/srv/srv.go | 141 - .../github.com/coreos/etcd/pkg/tlsutil/doc.go | 16 - .../coreos/etcd/pkg/tlsutil/tlsutil.go | 72 - .../coreos/etcd/pkg/transport/doc.go | 17 - .../etcd/pkg/transport/keepalive_listener.go | 94 - .../coreos/etcd/pkg/transport/limit_listen.go | 80 - .../coreos/etcd/pkg/transport/listener.go | 281 - .../coreos/etcd/pkg/transport/listener_tls.go | 272 - .../coreos/etcd/pkg/transport/timeout_conn.go | 44 - .../etcd/pkg/transport/timeout_dialer.go | 36 - .../etcd/pkg/transport/timeout_listener.go | 57 - .../etcd/pkg/transport/timeout_transport.go | 51 - .../coreos/etcd/pkg/transport/tls.go | 49 - .../coreos/etcd/pkg/transport/transport.go | 71 - .../etcd/pkg/transport/unix_listener.go | 40 - .../github.com/coreos/etcd/pkg/types/doc.go | 17 - vendor/github.com/coreos/etcd/pkg/types/id.go | 41 - .../github.com/coreos/etcd/pkg/types/set.go | 178 - .../github.com/coreos/etcd/pkg/types/slice.go | 22 - .../github.com/coreos/etcd/pkg/types/urls.go | 82 - .../coreos/etcd/pkg/types/urlsmap.go | 107 - .../github.com/coreos/etcd/version/version.go | 56 - vendor/github.com/coreos/go-semver/LICENSE | 202 - .../coreos/go-semver/semver/semver.go | 296 - .../coreos/go-semver/semver/sort.go | 38 - vendor/github.com/dgrijalva/jwt-go/LICENSE | 8 - .../dgrijalva/jwt-go/MIGRATION_GUIDE.md | 97 - vendor/github.com/dgrijalva/jwt-go/README.md | 85 - .../dgrijalva/jwt-go/VERSION_HISTORY.md | 111 - vendor/github.com/dgrijalva/jwt-go/claims.go | 134 - vendor/github.com/dgrijalva/jwt-go/doc.go | 4 - vendor/github.com/dgrijalva/jwt-go/ecdsa.go | 147 - .../dgrijalva/jwt-go/ecdsa_utils.go | 67 - vendor/github.com/dgrijalva/jwt-go/errors.go | 59 - vendor/github.com/dgrijalva/jwt-go/hmac.go | 94 - .../github.com/dgrijalva/jwt-go/map_claims.go | 94 - vendor/github.com/dgrijalva/jwt-go/none.go | 52 - vendor/github.com/dgrijalva/jwt-go/parser.go | 131 - vendor/github.com/dgrijalva/jwt-go/rsa.go | 100 - vendor/github.com/dgrijalva/jwt-go/rsa_pss.go | 126 - .../github.com/dgrijalva/jwt-go/rsa_utils.go | 69 - .../dgrijalva/jwt-go/signing_method.go | 35 - vendor/github.com/dgrijalva/jwt-go/token.go | 108 - .../protoc-gen-go/descriptor/Makefile | 37 - .../protoc-gen-go/descriptor/descriptor.pb.go | 2215 -- .../protoc-gen-go/descriptor/descriptor.proto | 849 - .../googleapis/gax-go/CONTRIBUTING.md | 27 - vendor/github.com/googleapis/gax-go/LICENSE | 27 - vendor/github.com/googleapis/gax-go/README.md | 24 - .../googleapis/gax-go/call_option.go | 157 - vendor/github.com/googleapis/gax-go/gax.go | 40 - vendor/github.com/googleapis/gax-go/header.go | 24 - vendor/github.com/googleapis/gax-go/invoke.go | 90 - .../gophercloud/gophercloud/CHANGELOG.md | 0 .../github.com/gophercloud/gophercloud/FAQ.md | 148 - .../gophercloud/gophercloud/LICENSE | 191 - .../gophercloud/gophercloud/MIGRATING.md | 32 - .../gophercloud/gophercloud/README.md | 143 - .../gophercloud/gophercloud/STYLEGUIDE.md | 74 - .../gophercloud/gophercloud/auth_options.go | 354 - .../github.com/gophercloud/gophercloud/doc.go | 93 - .../gophercloud/endpoint_search.go | 76 - .../gophercloud/gophercloud/errors.go | 401 - .../gophercloud/gophercloud/internal/pkg.go | 1 - .../gophercloud/gophercloud/internal/util.go | 34 - .../gophercloud/openstack/auth_env.go | 64 - .../extensions/volumeactions/doc.go | 86 - .../extensions/volumeactions/requests.go | 263 - .../extensions/volumeactions/results.go | 186 - .../extensions/volumeactions/urls.go | 39 - .../openstack/blockstorage/v1/volumes/doc.go | 5 - .../blockstorage/v1/volumes/requests.go | 167 - .../blockstorage/v1/volumes/results.go | 109 - .../openstack/blockstorage/v1/volumes/urls.go | 23 - .../openstack/blockstorage/v1/volumes/util.go | 22 - .../openstack/blockstorage/v2/volumes/doc.go | 5 - .../blockstorage/v2/volumes/requests.go | 182 - .../blockstorage/v2/volumes/results.go | 154 - .../openstack/blockstorage/v2/volumes/urls.go | 23 - .../openstack/blockstorage/v2/volumes/util.go | 22 - .../gophercloud/openstack/client.go | 348 - .../v2/extensions/availabilityzones/doc.go | 26 - .../extensions/availabilityzones/results.go | 8 - .../v2/extensions/bootfromvolume/doc.go | 152 - .../v2/extensions/bootfromvolume/requests.go | 120 - .../v2/extensions/bootfromvolume/results.go | 12 - .../v2/extensions/bootfromvolume/urls.go | 7 - .../compute/v2/extensions/floatingips/doc.go | 68 - .../v2/extensions/floatingips/requests.go | 114 - .../v2/extensions/floatingips/results.go | 115 - .../compute/v2/extensions/floatingips/urls.go | 37 - .../compute/v2/extensions/keypairs/doc.go | 71 - .../v2/extensions/keypairs/requests.go | 86 - .../compute/v2/extensions/keypairs/results.go | 91 - .../compute/v2/extensions/keypairs/urls.go | 25 - .../v2/extensions/schedulerhints/doc.go | 76 - .../v2/extensions/schedulerhints/requests.go | 164 - .../compute/v2/extensions/secgroups/doc.go | 112 - .../v2/extensions/secgroups/requests.go | 183 - .../v2/extensions/secgroups/results.go | 214 - .../compute/v2/extensions/secgroups/urls.go | 32 - .../compute/v2/extensions/servergroups/doc.go | 40 - .../v2/extensions/servergroups/requests.go | 59 - .../v2/extensions/servergroups/results.go | 87 - .../v2/extensions/servergroups/urls.go | 25 - .../compute/v2/extensions/startstop/doc.go | 19 - .../v2/extensions/startstop/requests.go | 19 - .../v2/extensions/startstop/results.go | 15 - .../v2/extensions/tenantnetworks/doc.go | 26 - .../v2/extensions/tenantnetworks/requests.go | 19 - .../v2/extensions/tenantnetworks/results.go | 58 - .../v2/extensions/tenantnetworks/urls.go | 17 - .../compute/v2/extensions/volumeattach/doc.go | 30 - .../v2/extensions/volumeattach/requests.go | 60 - .../v2/extensions/volumeattach/results.go | 77 - .../v2/extensions/volumeattach/urls.go | 25 - .../openstack/compute/v2/flavors/doc.go | 45 - .../openstack/compute/v2/flavors/requests.go | 194 - .../openstack/compute/v2/flavors/results.go | 133 - .../openstack/compute/v2/flavors/urls.go | 21 - .../openstack/compute/v2/images/doc.go | 32 - .../openstack/compute/v2/images/requests.go | 109 - .../openstack/compute/v2/images/results.go | 95 - .../openstack/compute/v2/images/urls.go | 15 - .../openstack/compute/v2/servers/doc.go | 115 - .../openstack/compute/v2/servers/errors.go | 71 - .../openstack/compute/v2/servers/requests.go | 791 - .../openstack/compute/v2/servers/results.go | 404 - .../openstack/compute/v2/servers/urls.go | 51 - .../openstack/compute/v2/servers/util.go | 21 - .../openstack/dns/v2/recordsets/doc.go | 54 - .../openstack/dns/v2/recordsets/requests.go | 166 - .../openstack/dns/v2/recordsets/results.go | 147 - .../openstack/dns/v2/recordsets/urls.go | 11 - .../gophercloud/openstack/dns/v2/zones/doc.go | 48 - .../openstack/dns/v2/zones/requests.go | 174 - .../openstack/dns/v2/zones/results.go | 166 - .../openstack/dns/v2/zones/urls.go | 11 - .../gophercloud/gophercloud/openstack/doc.go | 14 - .../openstack/endpoint_location.go | 107 - .../gophercloud/openstack/errors.go | 71 - .../openstack/identity/v2/tenants/doc.go | 65 - .../openstack/identity/v2/tenants/requests.go | 116 - .../openstack/identity/v2/tenants/results.go | 91 - .../openstack/identity/v2/tenants/urls.go | 23 - .../openstack/identity/v2/tokens/doc.go | 46 - .../openstack/identity/v2/tokens/requests.go | 103 - .../openstack/identity/v2/tokens/results.go | 159 - .../openstack/identity/v2/tokens/urls.go | 13 - .../openstack/identity/v3/groups/doc.go | 60 - .../openstack/identity/v3/groups/requests.go | 158 - .../openstack/identity/v3/groups/results.go | 132 - .../openstack/identity/v3/groups/urls.go | 23 - .../openstack/identity/v3/projects/doc.go | 58 - .../identity/v3/projects/requests.go | 152 - .../openstack/identity/v3/projects/results.go | 103 - .../openstack/identity/v3/projects/urls.go | 23 - .../openstack/identity/v3/tokens/doc.go | 108 - .../openstack/identity/v3/tokens/requests.go | 210 - .../openstack/identity/v3/tokens/results.go | 170 - .../openstack/identity/v3/tokens/urls.go | 7 - .../openstack/identity/v3/users/doc.go | 123 - .../openstack/identity/v3/users/requests.go | 242 - .../openstack/identity/v3/users/results.go | 149 - .../openstack/identity/v3/users/urls.go | 35 - .../imageservice/v2/imagedata/doc.go | 33 - .../imageservice/v2/imagedata/requests.go | 28 - .../imageservice/v2/imagedata/results.go | 28 - .../imageservice/v2/imagedata/urls.go | 13 - .../openstack/imageservice/v2/images/doc.go | 60 - .../imageservice/v2/images/requests.go | 258 - .../imageservice/v2/images/results.go | 200 - .../openstack/imageservice/v2/images/types.go | 79 - .../openstack/imageservice/v2/images/urls.go | 51 - .../v2/extensions/fwaas/firewalls/doc.go | 60 - .../v2/extensions/fwaas/firewalls/errors.go | 11 - .../v2/extensions/fwaas/firewalls/requests.go | 137 - .../v2/extensions/fwaas/firewalls/results.go | 95 - .../v2/extensions/fwaas/firewalls/urls.go | 16 - .../v2/extensions/fwaas/policies/doc.go | 84 - .../v2/extensions/fwaas/policies/requests.go | 177 - .../v2/extensions/fwaas/policies/results.go | 103 - .../v2/extensions/fwaas/policies/urls.go | 26 - .../extensions/fwaas/routerinsertion/doc.go | 68 - .../fwaas/routerinsertion/requests.go | 43 - .../fwaas/routerinsertion/results.go | 7 - .../v2/extensions/fwaas/rules/doc.go | 64 - .../v2/extensions/fwaas/rules/errors.go | 12 - .../v2/extensions/fwaas/rules/requests.go | 188 - .../v2/extensions/fwaas/rules/results.go | 99 - .../v2/extensions/fwaas/rules/urls.go | 16 - .../v2/extensions/layer3/floatingips/doc.go | 71 - .../extensions/layer3/floatingips/requests.go | 147 - .../extensions/layer3/floatingips/results.go | 116 - .../v2/extensions/layer3/floatingips/urls.go | 13 - .../v2/extensions/layer3/routers/doc.go | 108 - .../v2/extensions/layer3/routers/requests.go | 223 - .../v2/extensions/layer3/routers/results.go | 167 - .../v2/extensions/layer3/routers/urls.go | 21 - .../v2/extensions/lbaas/members/doc.go | 59 - .../v2/extensions/lbaas/members/requests.go | 124 - .../v2/extensions/lbaas/members/results.go | 109 - .../v2/extensions/lbaas/members/urls.go | 16 - .../v2/extensions/lbaas/monitors/doc.go | 63 - .../v2/extensions/lbaas/monitors/requests.go | 227 - .../v2/extensions/lbaas/monitors/results.go | 141 - .../v2/extensions/lbaas/monitors/urls.go | 16 - .../v2/extensions/lbaas/pools/doc.go | 81 - .../v2/extensions/lbaas/pools/requests.go | 175 - .../v2/extensions/lbaas/pools/results.go | 137 - .../v2/extensions/lbaas/pools/urls.go | 25 - .../v2/extensions/lbaas/vips/doc.go | 65 - .../v2/extensions/lbaas/vips/requests.go | 180 - .../v2/extensions/lbaas/vips/results.go | 156 - .../v2/extensions/lbaas/vips/urls.go | 16 - .../v2/extensions/lbaas_v2/listeners/doc.go | 63 - .../extensions/lbaas_v2/listeners/requests.go | 194 - .../extensions/lbaas_v2/listeners/results.go | 131 - .../v2/extensions/lbaas_v2/listeners/urls.go | 16 - .../extensions/lbaas_v2/loadbalancers/doc.go | 71 - .../lbaas_v2/loadbalancers/requests.go | 177 - .../lbaas_v2/loadbalancers/results.go | 149 - .../extensions/lbaas_v2/loadbalancers/urls.go | 21 - .../v2/extensions/lbaas_v2/monitors/doc.go | 69 - .../extensions/lbaas_v2/monitors/requests.go | 252 - .../extensions/lbaas_v2/monitors/results.go | 149 - .../v2/extensions/lbaas_v2/monitors/urls.go | 16 - .../v2/extensions/lbaas_v2/pools/doc.go | 124 - .../v2/extensions/lbaas_v2/pools/requests.go | 347 - .../v2/extensions/lbaas_v2/pools/results.go | 273 - .../v2/extensions/lbaas_v2/pools/urls.go | 25 - .../networking/v2/extensions/provider/doc.go | 73 - .../v2/extensions/provider/requests.go | 28 - .../v2/extensions/provider/results.go | 62 - .../v2/extensions/security/groups/doc.go | 58 - .../v2/extensions/security/groups/requests.go | 151 - .../v2/extensions/security/groups/results.go | 102 - .../v2/extensions/security/groups/urls.go | 13 - .../v2/extensions/security/rules/doc.go | 50 - .../v2/extensions/security/rules/requests.go | 154 - .../v2/extensions/security/rules/results.go | 121 - .../v2/extensions/security/rules/urls.go | 13 - .../openstack/networking/v2/networks/doc.go | 65 - .../networking/v2/networks/requests.go | 165 - .../networking/v2/networks/results.go | 111 - .../openstack/networking/v2/networks/urls.go | 31 - .../openstack/networking/v2/ports/doc.go | 73 - .../openstack/networking/v2/ports/requests.go | 177 - .../openstack/networking/v2/ports/results.go | 136 - .../openstack/networking/v2/ports/urls.go | 31 - .../openstack/networking/v2/subnets/doc.go | 133 - .../networking/v2/subnets/requests.go | 231 - .../networking/v2/subnets/results.go | 133 - .../openstack/networking/v2/subnets/urls.go | 31 - .../objectstorage/v1/accounts/doc.go | 29 - .../objectstorage/v1/accounts/requests.go | 100 - .../objectstorage/v1/accounts/results.go | 167 - .../objectstorage/v1/accounts/urls.go | 11 - .../objectstorage/v1/containers/doc.go | 88 - .../objectstorage/v1/containers/requests.go | 191 - .../objectstorage/v1/containers/results.go | 342 - .../objectstorage/v1/containers/urls.go | 23 - .../openstack/objectstorage/v1/objects/doc.go | 102 - .../objectstorage/v1/objects/errors.go | 13 - .../objectstorage/v1/objects/requests.go | 461 - .../objectstorage/v1/objects/results.go | 496 - .../objectstorage/v1/objects/urls.go | 33 - .../openstack/objectstorage/v1/swauth/doc.go | 16 - .../objectstorage/v1/swauth/requests.go | 70 - .../objectstorage/v1/swauth/results.go | 27 - .../openstack/objectstorage/v1/swauth/urls.go | 7 - .../openstack/utils/choose_version.go | 111 - .../gophercloud/pagination/http.go | 60 - .../gophercloud/pagination/linked.go | 92 - .../gophercloud/pagination/marker.go | 58 - .../gophercloud/pagination/pager.go | 238 - .../gophercloud/gophercloud/pagination/pkg.go | 4 - .../gophercloud/pagination/single.go | 33 - .../gophercloud/gophercloud/params.go | 463 - .../gophercloud/provider_client.go | 307 - .../gophercloud/gophercloud/results.go | 382 - .../gophercloud/gophercloud/service_client.go | 122 - .../gophercloud/gophercloud/util.go | 102 - vendor/github.com/hashicorp/consul/LICENSE | 354 - .../github.com/hashicorp/consul/api/README.md | 43 - vendor/github.com/hashicorp/consul/api/acl.go | 193 - .../github.com/hashicorp/consul/api/agent.go | 621 - vendor/github.com/hashicorp/consul/api/api.go | 781 - .../hashicorp/consul/api/catalog.go | 198 - .../hashicorp/consul/api/coordinate.go | 68 - .../github.com/hashicorp/consul/api/event.go | 104 - .../github.com/hashicorp/consul/api/health.go | 200 - vendor/github.com/hashicorp/consul/api/kv.go | 420 - .../github.com/hashicorp/consul/api/lock.go | 385 - .../hashicorp/consul/api/operator.go | 11 - .../hashicorp/consul/api/operator_area.go | 193 - .../consul/api/operator_autopilot.go | 219 - .../hashicorp/consul/api/operator_keyring.go | 86 - .../hashicorp/consul/api/operator_raft.go | 89 - .../hashicorp/consul/api/operator_segment.go | 11 - .../hashicorp/consul/api/prepared_query.go | 198 - vendor/github.com/hashicorp/consul/api/raw.go | 24 - .../hashicorp/consul/api/semaphore.go | 513 - .../hashicorp/consul/api/session.go | 224 - .../hashicorp/consul/api/snapshot.go | 47 - .../github.com/hashicorp/consul/api/status.go | 43 - .../hashicorp/go-retryablehttp/LICENSE | 363 - .../hashicorp/go-retryablehttp/Makefile | 11 - .../hashicorp/go-retryablehttp/README.md | 43 - .../hashicorp/go-retryablehttp/client.go | 311 - vendor/github.com/hashicorp/serf/LICENSE | 354 - .../hashicorp/serf/coordinate/client.go | 232 - .../hashicorp/serf/coordinate/config.go | 70 - .../hashicorp/serf/coordinate/coordinate.go | 203 - .../hashicorp/serf/coordinate/phantom.go | 187 - .../terraform/backend/atlas/backend.go | 163 - .../hashicorp/terraform/backend/atlas/cli.go | 13 - .../terraform/backend/atlas/state_client.go | 319 - .../hashicorp/terraform/backend/backend.go | 169 - .../hashicorp/terraform/backend/cli.go | 83 - .../hashicorp/terraform/backend/init/init.go | 124 - .../terraform/backend/legacy/backend.go | 75 - .../terraform/backend/legacy/legacy.go | 28 - .../terraform/backend/local/backend.go | 421 - .../terraform/backend/local/backend_apply.go | 327 - .../terraform/backend/local/backend_local.go | 133 - .../terraform/backend/local/backend_plan.go | 264 - .../backend/local/backend_refresh.go | 131 - .../hashicorp/terraform/backend/local/cli.go | 24 - .../backend/local/counthookaction_string.go | 16 - .../terraform/backend/local/hook_count.go | 115 - .../backend/local/hook_count_action.go | 11 - .../terraform/backend/local/hook_state.go | 33 - .../terraform/backend/local/testing.go | 100 - .../hashicorp/terraform/backend/nil.go | 39 - .../terraform/backend/operation_type.go | 14 - .../terraform/backend/operationtype_string.go | 16 - .../backend/remote-state/azure/backend.go | 225 - .../remote-state/azure/backend_state.go | 141 - .../backend/remote-state/azure/client.go | 271 - .../backend/remote-state/consul/backend.go | 180 - .../remote-state/consul/backend_state.go | 155 - .../backend/remote-state/consul/client.go | 468 - .../backend/remote-state/etcdv3/backend.go | 157 - .../remote-state/etcdv3/backend_state.go | 103 - .../backend/remote-state/etcdv3/client.go | 211 - .../backend/remote-state/gcs/backend.go | 166 - .../backend/remote-state/gcs/backend_state.go | 158 - .../backend/remote-state/gcs/client.go | 168 - .../backend/remote-state/inmem/backend.go | 208 - .../backend/remote-state/inmem/client.go | 47 - .../backend/remote-state/manta/backend.go | 177 - .../remote-state/manta/backend_state.go | 144 - .../backend/remote-state/manta/client.go | 201 - .../backend/remote-state/s3/backend.go | 267 - .../backend/remote-state/s3/backend_state.go | 189 - .../backend/remote-state/s3/client.go | 416 - .../backend/remote-state/swift/backend.go | 325 - .../remote-state/swift/backend_state.go | 31 - .../backend/remote-state/swift/client.go | 115 - .../hashicorp/terraform/backend/testing.go | 321 - .../hashicorp/terraform/command/apply.go | 398 - .../terraform/command/autocomplete.go | 67 - .../hashicorp/terraform/command/cli_ui.go | 51 - .../hashicorp/terraform/command/command.go | 98 - .../hashicorp/terraform/command/console.go | 159 - .../terraform/command/console_interactive.go | 61 - .../command/console_interactive_solaris.go | 18 - .../terraform/command/debug_command.go | 30 - .../terraform/command/debug_json2dot.go | 66 - .../hashicorp/terraform/command/flag_kv.go | 43 - .../hashicorp/terraform/command/fmt.go | 126 - .../hashicorp/terraform/command/get.go | 90 - .../hashicorp/terraform/command/graph.go | 199 - .../terraform/command/hcl_printer.go | 196 - .../hashicorp/terraform/command/hook_ui.go | 414 - .../hashicorp/terraform/command/import.go | 347 - .../hashicorp/terraform/command/init.go | 676 - .../terraform/command/internal_plugin.go | 90 - .../terraform/command/internal_plugin_list.go | 26 - .../hashicorp/terraform/command/meta.go | 643 - .../terraform/command/meta_backend.go | 1740 - .../terraform/command/meta_backend_migrate.go | 520 - .../hashicorp/terraform/command/meta_new.go | 175 - .../hashicorp/terraform/command/output.go | 292 - .../hashicorp/terraform/command/plan.go | 217 - .../hashicorp/terraform/command/plugins.go | 381 - .../terraform/command/plugins_lock.go | 86 - .../hashicorp/terraform/command/providers.go | 133 - .../hashicorp/terraform/command/push.go | 553 - .../hashicorp/terraform/command/refresh.go | 152 - .../hashicorp/terraform/command/show.go | 146 - .../terraform/command/state_command.go | 40 - .../hashicorp/terraform/command/state_list.go | 119 - .../hashicorp/terraform/command/state_meta.go | 104 - .../hashicorp/terraform/command/state_mv.go | 242 - .../hashicorp/terraform/command/state_pull.go | 83 - .../hashicorp/terraform/command/state_push.go | 167 - .../hashicorp/terraform/command/state_rm.go | 113 - .../hashicorp/terraform/command/state_show.go | 125 - .../hashicorp/terraform/command/taint.go | 227 - .../hashicorp/terraform/command/ui_input.go | 159 - .../hashicorp/terraform/command/unlock.go | 144 - .../hashicorp/terraform/command/untaint.go | 215 - .../hashicorp/terraform/command/validate.go | 143 - .../hashicorp/terraform/command/version.go | 123 - .../terraform/command/workspace_command.go | 148 - .../terraform/command/workspace_delete.go | 191 - .../terraform/command/workspace_list.go | 99 - .../terraform/command/workspace_new.go | 190 - .../terraform/command/workspace_select.go | 130 - .../terraform/command/workspace_show.go | 50 - .../terraform/helper/shadow/closer.go | 83 - .../terraform/helper/shadow/compared_value.go | 128 - .../terraform/helper/shadow/keyed_value.go | 151 - .../terraform/helper/shadow/ordered_value.go | 66 - .../terraform/helper/shadow/value.go | 87 - .../terraform/state/remote/artifactory.go | 117 - .../hashicorp/terraform/state/remote/etcd.go | 78 - .../hashicorp/terraform/state/remote/file.go | 64 - .../hashicorp/terraform/state/remote/gcs.go | 176 - .../hashicorp/terraform/state/remote/http.go | 338 - .../terraform/state/remote/remote.go | 52 - .../hashicorp/terraform/state/remote/state.go | 139 - .../terraform/state/remote/testing.go | 97 - vendor/github.com/joyent/gocommon/LICENSE | 373 - vendor/github.com/joyent/gocommon/README.md | 98 - .../joyent/gocommon/client/client.go | 110 - .../joyent/gocommon/errors/errors.go | 292 - vendor/github.com/joyent/gocommon/gocommon.go | 21 - .../github.com/joyent/gocommon/http/client.go | 427 - vendor/github.com/joyent/gocommon/jpc/jpc.go | 113 - vendor/github.com/joyent/gocommon/version.go | 37 - vendor/github.com/joyent/gocommon/wercker.yml | 40 - vendor/github.com/joyent/gomanta/LICENSE | 373 - .../github.com/joyent/gomanta/manta/manta.go | 459 - vendor/github.com/joyent/gosign/LICENSE | 373 - vendor/github.com/joyent/gosign/auth/auth.go | 132 - .../github.com/joyent/triton-go/CHANGELOG.md | 9 - .../github.com/joyent/triton-go/GNUmakefile | 47 - vendor/github.com/joyent/triton-go/Gopkg.lock | 39 - vendor/github.com/joyent/triton-go/Gopkg.toml | 42 - vendor/github.com/joyent/triton-go/LICENSE | 373 - vendor/github.com/joyent/triton-go/README.md | 215 - .../joyent/triton-go/authentication/dummy.go | 72 - .../authentication/ecdsa_signature.go | 66 - .../authentication/private_key_signer.go | 106 - .../triton-go/authentication/rsa_signature.go | 25 - .../triton-go/authentication/signature.go | 27 - .../joyent/triton-go/authentication/signer.go | 10 - .../authentication/ssh_agent_signer.go | 170 - .../triton-go/authentication/test_signer.go | 27 - .../joyent/triton-go/authentication/util.go | 29 - .../joyent/triton-go/client/client.go | 413 - .../joyent/triton-go/client/errors.go | 190 - .../joyent/triton-go/storage/client.go | 51 - .../joyent/triton-go/storage/directory.go | 144 - .../joyent/triton-go/storage/job.go | 440 - .../joyent/triton-go/storage/objects.go | 208 - .../joyent/triton-go/storage/signing.go | 81 - .../joyent/triton-go/storage/snaplink.go | 46 - vendor/github.com/joyent/triton-go/triton.go | 18 - .../github.com/lusis/go-artifactory/LICENSE | 15 - .../src/artifactory.v401/api.go | 1 - .../src/artifactory.v401/archive.go | 1 - .../src/artifactory.v401/artifact.go | 67 - .../src/artifactory.v401/bintray.go | 1 - .../src/artifactory.v401/build.go | 1 - .../src/artifactory.v401/client.go | 80 - .../src/artifactory.v401/compliance.go | 1 - .../src/artifactory.v401/errors.go | 10 - .../src/artifactory.v401/groups.go | 61 - .../src/artifactory.v401/http.go | 103 - .../src/artifactory.v401/license.go | 23 - .../src/artifactory.v401/mimetypes.go | 11 - .../artifactory.v401/permissions_targets.go | 53 - .../src/artifactory.v401/repos.go | 151 - .../src/artifactory.v401/responses.go | 9 - .../src/artifactory.v401/search.go | 47 - .../src/artifactory.v401/security.go | 6 - .../src/artifactory.v401/storage.go | 18 - .../src/artifactory.v401/system.go | 6 - .../src/artifactory.v401/users.go | 83 - .../src/artifactory.v401/version.go | 3 - .../terraform-provider-openstack/LICENSE | 373 - .../compute_instance_v2_networking.go | 488 - .../openstack/config.go | 223 - .../data_source_openstack_images_image_v2.go | 283 - ..._source_openstack_networking_network_v2.go | 125 - ...source_openstack_networking_secgroup_v2.go | 76 - .../openstack/lb_v2_shared.go | 243 - .../openstack/provider.go | 261 - ...openstack_blockstorage_volume_attach_v2.go | 414 - ...source_openstack_blockstorage_volume_v1.go | 340 - ...source_openstack_blockstorage_volume_v2.go | 351 - .../resource_openstack_compute_flavor_v2.go | 143 - ...enstack_compute_floatingip_associate_v2.go | 235 - ...esource_openstack_compute_floatingip_v2.go | 111 - .../resource_openstack_compute_instance_v2.go | 1082 - .../resource_openstack_compute_keypair_v2.go | 105 - .../resource_openstack_compute_secgroup_v2.go | 398 - ...source_openstack_compute_servergroup_v2.go | 138 - ...urce_openstack_compute_volume_attach_v2.go | 222 - .../resource_openstack_dns_recordset_v2.go | 276 - .../resource_openstack_dns_zone_v2.go | 276 - .../resource_openstack_fw_firewall_v1.go | 323 - .../resource_openstack_fw_policy_v1.go | 231 - .../resource_openstack_fw_rule_v1.go | 258 - .../resource_openstack_identity_project_v3.go | 184 - .../resource_openstack_identity_user_v3.go | 311 - .../resource_openstack_images_image_v2.go | 526 - .../resource_openstack_lb_listener_v2.go | 314 - .../resource_openstack_lb_loadbalancer_v2.go | 287 - .../resource_openstack_lb_member_v1.go | 236 - .../resource_openstack_lb_member_v2.go | 256 - .../resource_openstack_lb_monitor_v1.go | 310 - .../resource_openstack_lb_monitor_v2.go | 280 - .../resource_openstack_lb_pool_v1.go | 344 - .../resource_openstack_lb_pool_v2.go | 350 - .../openstack/resource_openstack_lb_vip_v1.go | 401 - ...urce_openstack_networking_floatingip_v2.go | 298 - ...esource_openstack_networking_network_v2.go | 326 - .../resource_openstack_networking_port_v2.go | 467 - ...penstack_networking_router_interface_v2.go | 208 - ...ce_openstack_networking_router_route_v2.go | 225 - ...resource_openstack_networking_router_v2.go | 262 - ...e_openstack_networking_secgroup_rule_v2.go | 316 - ...source_openstack_networking_secgroup_v2.go | 215 - ...resource_openstack_networking_subnet_v2.go | 423 - ...ce_openstack_objectstorage_container_v1.go | 151 - .../openstack/types.go | 353 - .../openstack/util.go | 114 - vendor/github.com/ugorji/go/LICENSE | 22 - vendor/github.com/ugorji/go/codec/0doc.go | 206 - vendor/github.com/ugorji/go/codec/README.md | 187 - vendor/github.com/ugorji/go/codec/binc.go | 946 - vendor/github.com/ugorji/go/codec/cbor.go | 662 - vendor/github.com/ugorji/go/codec/decode.go | 2469 -- vendor/github.com/ugorji/go/codec/encode.go | 1384 - .../ugorji/go/codec/fast-path.generated.go | 32728 ---------------- .../ugorji/go/codec/fast-path.go.tmpl | 465 - .../ugorji/go/codec/fast-path.not.go | 35 - .../ugorji/go/codec/gen-dec-array.go.tmpl | 77 - .../ugorji/go/codec/gen-dec-map.go.tmpl | 42 - .../ugorji/go/codec/gen-helper.generated.go | 245 - .../ugorji/go/codec/gen-helper.go.tmpl | 220 - .../ugorji/go/codec/gen.generated.go | 132 - vendor/github.com/ugorji/go/codec/gen.go | 2030 - .../go/codec/goversion_arrayof_gte_go15.go | 14 - .../go/codec/goversion_arrayof_lt_go15.go | 14 - .../go/codec/goversion_makemap_gte_go19.go | 15 - .../go/codec/goversion_makemap_lt_go19.go | 12 - .../go/codec/goversion_unsupported_lt_go14.go | 17 - .../go/codec/goversion_vendor_eq_go15.go | 10 - .../go/codec/goversion_vendor_eq_go16.go | 10 - .../go/codec/goversion_vendor_gte_go17.go | 8 - .../go/codec/goversion_vendor_lt_go15.go | 8 - vendor/github.com/ugorji/go/codec/helper.go | 1963 - .../ugorji/go/codec/helper_internal.go | 221 - .../ugorji/go/codec/helper_not_unsafe.go | 160 - .../ugorji/go/codec/helper_unsafe.go | 431 - vendor/github.com/ugorji/go/codec/json.go | 1167 - .../ugorji/go/codec/mammoth-test.go.tmpl | 100 - vendor/github.com/ugorji/go/codec/msgpack.go | 899 - vendor/github.com/ugorji/go/codec/rpc.go | 172 - vendor/github.com/ugorji/go/codec/simple.go | 541 - .../ugorji/go/codec/test-cbor-goldens.json | 639 - vendor/github.com/ugorji/go/codec/test.py | 126 - vendor/github.com/ugorji/go/codec/time.go | 220 - vendor/github.com/ugorji/go/codec/z.go | 23 - .../x/net/context/ctxhttp/ctxhttp.go | 74 - .../x/net/context/ctxhttp/ctxhttp_pre17.go | 147 - vendor/golang.org/x/oauth2/AUTHORS | 3 - vendor/golang.org/x/oauth2/CONTRIBUTING.md | 31 - vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 - vendor/golang.org/x/oauth2/LICENSE | 27 - vendor/golang.org/x/oauth2/README.md | 77 - .../golang.org/x/oauth2/google/appengine.go | 89 - .../x/oauth2/google/appengine_hook.go | 14 - .../x/oauth2/google/appengineflex_hook.go | 11 - vendor/golang.org/x/oauth2/google/default.go | 137 - vendor/golang.org/x/oauth2/google/google.go | 202 - vendor/golang.org/x/oauth2/google/jwt.go | 74 - vendor/golang.org/x/oauth2/google/sdk.go | 172 - vendor/golang.org/x/oauth2/internal/doc.go | 6 - vendor/golang.org/x/oauth2/internal/oauth2.go | 75 - vendor/golang.org/x/oauth2/internal/token.go | 250 - .../golang.org/x/oauth2/internal/transport.go | 68 - vendor/golang.org/x/oauth2/jws/jws.go | 182 - vendor/golang.org/x/oauth2/jwt/jwt.go | 159 - vendor/golang.org/x/oauth2/oauth2.go | 344 - vendor/golang.org/x/oauth2/token.go | 158 - vendor/golang.org/x/oauth2/transport.go | 132 - vendor/google.golang.org/api/LICENSE | 27 - .../api/gensupport/backoff.go | 46 - .../api/gensupport/buffer.go | 77 - .../google.golang.org/api/gensupport/doc.go | 10 - .../api/gensupport/header.go | 22 - .../google.golang.org/api/gensupport/json.go | 211 - .../api/gensupport/jsonfloat.go | 57 - .../google.golang.org/api/gensupport/media.go | 299 - .../api/gensupport/params.go | 50 - .../api/gensupport/resumable.go | 217 - .../google.golang.org/api/gensupport/retry.go | 85 - .../google.golang.org/api/gensupport/send.go | 61 - .../api/googleapi/googleapi.go | 406 - .../googleapi/internal/uritemplates/LICENSE | 18 - .../internal/uritemplates/uritemplates.go | 248 - .../googleapi/internal/uritemplates/utils.go | 17 - .../api/googleapi/transport/apikey.go | 38 - .../google.golang.org/api/googleapi/types.go | 202 - .../google.golang.org/api/internal/creds.go | 104 - vendor/google.golang.org/api/internal/pool.go | 59 - .../api/internal/service-account.json | 12 - .../api/internal/settings.go | 54 - .../api/iterator/iterator.go | 231 - vendor/google.golang.org/api/option/option.go | 175 - .../api/storage/v1/storage-api.json | 3711 -- .../api/storage/v1/storage-gen.go | 10976 ------ .../api/transport/http/dial.go | 112 - .../appengine/CONTRIBUTING.md | 90 - vendor/google.golang.org/appengine/LICENSE | 202 - vendor/google.golang.org/appengine/README.md | 73 - .../google.golang.org/appengine/appengine.go | 113 - .../appengine/appengine_vm.go | 20 - vendor/google.golang.org/appengine/errors.go | 46 - .../google.golang.org/appengine/identity.go | 142 - .../appengine/internal/api.go | 677 - .../appengine/internal/api_common.go | 123 - .../appengine/internal/app_id.go | 28 - .../app_identity/app_identity_service.pb.go | 296 - .../app_identity/app_identity_service.proto | 64 - .../appengine/internal/base/api_base.pb.go | 133 - .../appengine/internal/base/api_base.proto | 33 - .../internal/datastore/datastore_v3.pb.go | 2778 -- .../internal/datastore/datastore_v3.proto | 541 - .../appengine/internal/identity.go | 14 - .../appengine/internal/identity_vm.go | 101 - .../appengine/internal/internal.go | 110 - .../appengine/internal/log/log_service.pb.go | 899 - .../appengine/internal/log/log_service.proto | 150 - .../appengine/internal/main_vm.go | 48 - .../appengine/internal/metadata.go | 61 - .../internal/modules/modules_service.pb.go | 375 - .../internal/modules/modules_service.proto | 80 - .../appengine/internal/net.go | 56 - .../appengine/internal/regen.sh | 40 - .../internal/remote_api/remote_api.pb.go | 231 - .../internal/remote_api/remote_api.proto | 44 - .../appengine/internal/transaction.go | 107 - .../google.golang.org/appengine/namespace.go | 25 - vendor/google.golang.org/appengine/timeout.go | 20 - .../api/annotations/annotations.pb.go | 64 - .../googleapis/api/annotations/http.pb.go | 566 - .../googleapis/iam/v1/iam_policy.pb.go | 337 - .../genproto/googleapis/iam/v1/policy.pb.go | 269 - vendor/vendor.json | 866 - 821 files changed, 217101 deletions(-) delete mode 100644 vendor/cloud.google.com/go/LICENSE delete mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata.go delete mode 100644 vendor/cloud.google.com/go/iam/iam.go delete mode 100644 vendor/cloud.google.com/go/internal/annotate.go delete mode 100644 vendor/cloud.google.com/go/internal/optional/optional.go delete mode 100644 vendor/cloud.google.com/go/internal/retry.go delete mode 100755 vendor/cloud.google.com/go/internal/version/update_version.sh delete mode 100644 vendor/cloud.google.com/go/internal/version/version.go delete mode 100644 vendor/cloud.google.com/go/storage/acl.go delete mode 100644 vendor/cloud.google.com/go/storage/bucket.go delete mode 100644 vendor/cloud.google.com/go/storage/copy.go delete mode 100644 vendor/cloud.google.com/go/storage/doc.go delete mode 100644 vendor/cloud.google.com/go/storage/go110.go delete mode 100644 vendor/cloud.google.com/go/storage/go17.go delete mode 100644 vendor/cloud.google.com/go/storage/iam.go delete mode 100644 vendor/cloud.google.com/go/storage/invoke.go delete mode 100644 vendor/cloud.google.com/go/storage/not_go110.go delete mode 100644 vendor/cloud.google.com/go/storage/not_go17.go delete mode 100644 vendor/cloud.google.com/go/storage/notifications.go delete mode 100644 vendor/cloud.google.com/go/storage/reader.go delete mode 100644 vendor/cloud.google.com/go/storage/storage.go delete mode 100644 vendor/cloud.google.com/go/storage/writer.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/LICENSE delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/NOTICE delete mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go delete mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go delete mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go delete mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go delete mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/README.md delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/container.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/file.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/message.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/share.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/table.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/version.go delete mode 100644 vendor/github.com/Azure/go-autorest/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/README.md delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/config.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/msi.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/persist.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/sender.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/autorest.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/async.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/azure.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/environments.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/rp.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/client.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/date.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/time.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/utility.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/error.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/preparer.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/responder.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/sender.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/validation.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/version.go delete mode 100644 vendor/github.com/Unknwon/com/LICENSE delete mode 100644 vendor/github.com/Unknwon/com/README.md delete mode 100644 vendor/github.com/Unknwon/com/cmd.go delete mode 100644 vendor/github.com/Unknwon/com/convert.go delete mode 100644 vendor/github.com/Unknwon/com/dir.go delete mode 100644 vendor/github.com/Unknwon/com/file.go delete mode 100644 vendor/github.com/Unknwon/com/html.go delete mode 100644 vendor/github.com/Unknwon/com/http.go delete mode 100644 vendor/github.com/Unknwon/com/math.go delete mode 100644 vendor/github.com/Unknwon/com/path.go delete mode 100644 vendor/github.com/Unknwon/com/regex.go delete mode 100644 vendor/github.com/Unknwon/com/slice.go delete mode 100644 vendor/github.com/Unknwon/com/string.go delete mode 100644 vendor/github.com/Unknwon/com/time.go delete mode 100644 vendor/github.com/Unknwon/com/url.go delete mode 100644 vendor/github.com/coreos/etcd/LICENSE delete mode 100644 vendor/github.com/coreos/etcd/NOTICE delete mode 100644 vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go delete mode 100644 vendor/github.com/coreos/etcd/auth/authpb/auth.proto delete mode 100644 vendor/github.com/coreos/etcd/client/README.md delete mode 100644 vendor/github.com/coreos/etcd/client/auth_role.go delete mode 100644 vendor/github.com/coreos/etcd/client/auth_user.go delete mode 100644 vendor/github.com/coreos/etcd/client/cancelreq.go delete mode 100644 vendor/github.com/coreos/etcd/client/client.go delete mode 100644 vendor/github.com/coreos/etcd/client/cluster_error.go delete mode 100644 vendor/github.com/coreos/etcd/client/curl.go delete mode 100644 vendor/github.com/coreos/etcd/client/discover.go delete mode 100644 vendor/github.com/coreos/etcd/client/doc.go delete mode 100644 vendor/github.com/coreos/etcd/client/keys.generated.go delete mode 100644 vendor/github.com/coreos/etcd/client/keys.go delete mode 100644 vendor/github.com/coreos/etcd/client/members.go delete mode 100644 vendor/github.com/coreos/etcd/client/util.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/README.md delete mode 100644 vendor/github.com/coreos/etcd/clientv3/auth.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/balancer.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/client.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/cluster.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/compact_op.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/compare.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/election.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/key.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/session.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/config.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/doc.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/health_balancer.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/kv.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/lease.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/logger.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/maintenance.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/op.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/ready_wait.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/retry.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/sort.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/txn.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/watch.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto delete mode 100644 vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto delete mode 100644 vendor/github.com/coreos/etcd/pkg/pathutil/path.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/srv/srv.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/doc.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/listener.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/tls.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/transport.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/types/doc.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/types/id.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/types/set.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/types/slice.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/types/urls.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/types/urlsmap.go delete mode 100644 vendor/github.com/coreos/etcd/version/version.go delete mode 100644 vendor/github.com/coreos/go-semver/LICENSE delete mode 100644 vendor/github.com/coreos/go-semver/semver/semver.go delete mode 100644 vendor/github.com/coreos/go-semver/semver/sort.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/LICENSE delete mode 100644 vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/README.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/claims.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/doc.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/ecdsa.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/errors.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/hmac.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/map_claims.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/none.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/parser.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa_pss.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa_utils.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/signing_method.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/token.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto delete mode 100644 vendor/github.com/googleapis/gax-go/CONTRIBUTING.md delete mode 100644 vendor/github.com/googleapis/gax-go/LICENSE delete mode 100644 vendor/github.com/googleapis/gax-go/README.md delete mode 100644 vendor/github.com/googleapis/gax-go/call_option.go delete mode 100644 vendor/github.com/googleapis/gax-go/gax.go delete mode 100644 vendor/github.com/googleapis/gax-go/header.go delete mode 100644 vendor/github.com/googleapis/gax-go/invoke.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/CHANGELOG.md delete mode 100644 vendor/github.com/gophercloud/gophercloud/FAQ.md delete mode 100644 vendor/github.com/gophercloud/gophercloud/LICENSE delete mode 100644 vendor/github.com/gophercloud/gophercloud/MIGRATING.md delete mode 100644 vendor/github.com/gophercloud/gophercloud/README.md delete mode 100644 vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md delete mode 100644 vendor/github.com/gophercloud/gophercloud/auth_options.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/endpoint_search.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/errors.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/internal/pkg.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/internal/util.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/util.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/client.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/errors.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/http.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/linked.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/marker.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/pager.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/pkg.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/single.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/params.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/provider_client.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/service_client.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/util.go delete mode 100644 vendor/github.com/hashicorp/consul/LICENSE delete mode 100644 vendor/github.com/hashicorp/consul/api/README.md delete mode 100644 vendor/github.com/hashicorp/consul/api/acl.go delete mode 100644 vendor/github.com/hashicorp/consul/api/agent.go delete mode 100644 vendor/github.com/hashicorp/consul/api/api.go delete mode 100644 vendor/github.com/hashicorp/consul/api/catalog.go delete mode 100644 vendor/github.com/hashicorp/consul/api/coordinate.go delete mode 100644 vendor/github.com/hashicorp/consul/api/event.go delete mode 100644 vendor/github.com/hashicorp/consul/api/health.go delete mode 100644 vendor/github.com/hashicorp/consul/api/kv.go delete mode 100644 vendor/github.com/hashicorp/consul/api/lock.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_area.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_autopilot.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_keyring.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_raft.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_segment.go delete mode 100644 vendor/github.com/hashicorp/consul/api/prepared_query.go delete mode 100644 vendor/github.com/hashicorp/consul/api/raw.go delete mode 100644 vendor/github.com/hashicorp/consul/api/semaphore.go delete mode 100644 vendor/github.com/hashicorp/consul/api/session.go delete mode 100644 vendor/github.com/hashicorp/consul/api/snapshot.go delete mode 100644 vendor/github.com/hashicorp/consul/api/status.go delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/Makefile delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/README.md delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/client.go delete mode 100644 vendor/github.com/hashicorp/serf/LICENSE delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/client.go delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/config.go delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/coordinate.go delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/phantom.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/atlas/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/atlas/cli.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/atlas/state_client.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/cli.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/init/init.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/legacy/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/legacy/legacy.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/local/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/local/backend_apply.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/local/backend_local.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/local/backend_plan.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/local/backend_refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/local/cli.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/local/counthookaction_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/local/hook_count.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/local/hook_count_action.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/local/hook_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/local/testing.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/nil.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/operation_type.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/operationtype_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/azure/client.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/consul/client.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/client.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/client.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/client.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/manta/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/manta/backend_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/manta/client.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/s3/client.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/remote-state/swift/client.go delete mode 100644 vendor/github.com/hashicorp/terraform/backend/testing.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/apply.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/autocomplete.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/cli_ui.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/command.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/console.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/console_interactive.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/console_interactive_solaris.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/debug_command.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/debug_json2dot.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/flag_kv.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/fmt.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/get.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/graph.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/hcl_printer.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/hook_ui.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/import.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/init.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/internal_plugin.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/meta.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/meta_backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/meta_backend_migrate.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/meta_new.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/output.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/plan.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/plugins.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/plugins_lock.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/providers.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/push.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/show.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/state_command.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/state_list.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/state_meta.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/state_mv.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/state_pull.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/state_push.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/state_rm.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/state_show.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/taint.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/ui_input.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/unlock.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/untaint.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/validate.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/version.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/workspace_command.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/workspace_delete.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/workspace_list.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/workspace_new.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/workspace_select.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/workspace_show.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/closer.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/value.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/remote/artifactory.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/remote/etcd.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/remote/file.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/remote/gcs.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/remote/http.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/remote/remote.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/remote/state.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/remote/testing.go delete mode 100644 vendor/github.com/joyent/gocommon/LICENSE delete mode 100644 vendor/github.com/joyent/gocommon/README.md delete mode 100644 vendor/github.com/joyent/gocommon/client/client.go delete mode 100644 vendor/github.com/joyent/gocommon/errors/errors.go delete mode 100644 vendor/github.com/joyent/gocommon/gocommon.go delete mode 100644 vendor/github.com/joyent/gocommon/http/client.go delete mode 100644 vendor/github.com/joyent/gocommon/jpc/jpc.go delete mode 100644 vendor/github.com/joyent/gocommon/version.go delete mode 100644 vendor/github.com/joyent/gocommon/wercker.yml delete mode 100644 vendor/github.com/joyent/gomanta/LICENSE delete mode 100644 vendor/github.com/joyent/gomanta/manta/manta.go delete mode 100755 vendor/github.com/joyent/gosign/LICENSE delete mode 100644 vendor/github.com/joyent/gosign/auth/auth.go delete mode 100644 vendor/github.com/joyent/triton-go/CHANGELOG.md delete mode 100644 vendor/github.com/joyent/triton-go/GNUmakefile delete mode 100644 vendor/github.com/joyent/triton-go/Gopkg.lock delete mode 100644 vendor/github.com/joyent/triton-go/Gopkg.toml delete mode 100644 vendor/github.com/joyent/triton-go/LICENSE delete mode 100644 vendor/github.com/joyent/triton-go/README.md delete mode 100644 vendor/github.com/joyent/triton-go/authentication/dummy.go delete mode 100644 vendor/github.com/joyent/triton-go/authentication/ecdsa_signature.go delete mode 100644 vendor/github.com/joyent/triton-go/authentication/private_key_signer.go delete mode 100644 vendor/github.com/joyent/triton-go/authentication/rsa_signature.go delete mode 100644 vendor/github.com/joyent/triton-go/authentication/signature.go delete mode 100644 vendor/github.com/joyent/triton-go/authentication/signer.go delete mode 100644 vendor/github.com/joyent/triton-go/authentication/ssh_agent_signer.go delete mode 100644 vendor/github.com/joyent/triton-go/authentication/test_signer.go delete mode 100644 vendor/github.com/joyent/triton-go/authentication/util.go delete mode 100644 vendor/github.com/joyent/triton-go/client/client.go delete mode 100644 vendor/github.com/joyent/triton-go/client/errors.go delete mode 100644 vendor/github.com/joyent/triton-go/storage/client.go delete mode 100644 vendor/github.com/joyent/triton-go/storage/directory.go delete mode 100644 vendor/github.com/joyent/triton-go/storage/job.go delete mode 100644 vendor/github.com/joyent/triton-go/storage/objects.go delete mode 100644 vendor/github.com/joyent/triton-go/storage/signing.go delete mode 100644 vendor/github.com/joyent/triton-go/storage/snaplink.go delete mode 100644 vendor/github.com/joyent/triton-go/triton.go delete mode 100644 vendor/github.com/lusis/go-artifactory/LICENSE delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/errors.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/license.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users.go delete mode 100644 vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/LICENSE delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/compute_instance_v2_networking.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/config.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_images_image_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_network_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_secgroup_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/lb_v2_shared.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/provider.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_attach_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v1.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_flavor_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_associate_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_instance_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_keypair_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_secgroup_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_servergroup_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_volume_attach_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_recordset_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_zone_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_firewall_v1.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_policy_v1.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_rule_v1.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_identity_project_v3.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_identity_user_v3.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_images_image_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_listener_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_loadbalancer_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v1.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v1.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v1.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_vip_v1.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_floatingip_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_network_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_port_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_interface_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_route_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_rule_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_subnet_v2.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_objectstorage_container_v1.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/types.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/util.go delete mode 100644 vendor/github.com/ugorji/go/LICENSE delete mode 100644 vendor/github.com/ugorji/go/codec/0doc.go delete mode 100644 vendor/github.com/ugorji/go/codec/README.md delete mode 100644 vendor/github.com/ugorji/go/codec/binc.go delete mode 100644 vendor/github.com/ugorji/go/codec/cbor.go delete mode 100644 vendor/github.com/ugorji/go/codec/decode.go delete mode 100644 vendor/github.com/ugorji/go/codec/encode.go delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.not.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen-helper.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen.go delete mode 100644 vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go delete mode 100644 vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go delete mode 100644 vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go delete mode 100644 vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go delete mode 100644 vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go delete mode 100644 vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go delete mode 100644 vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go delete mode 100644 vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go delete mode 100644 vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_internal.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_not_unsafe.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_unsafe.go delete mode 100644 vendor/github.com/ugorji/go/codec/json.go delete mode 100644 vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/msgpack.go delete mode 100644 vendor/github.com/ugorji/go/codec/rpc.go delete mode 100644 vendor/github.com/ugorji/go/codec/simple.go delete mode 100644 vendor/github.com/ugorji/go/codec/test-cbor-goldens.json delete mode 100755 vendor/github.com/ugorji/go/codec/test.py delete mode 100644 vendor/github.com/ugorji/go/codec/time.go delete mode 100644 vendor/github.com/ugorji/go/codec/z.go delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go delete mode 100644 vendor/golang.org/x/oauth2/AUTHORS delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/oauth2/LICENSE delete mode 100644 vendor/golang.org/x/oauth2/README.md delete mode 100644 vendor/golang.org/x/oauth2/google/appengine.go delete mode 100644 vendor/golang.org/x/oauth2/google/appengine_hook.go delete mode 100644 vendor/golang.org/x/oauth2/google/appengineflex_hook.go delete mode 100644 vendor/golang.org/x/oauth2/google/default.go delete mode 100644 vendor/golang.org/x/oauth2/google/google.go delete mode 100644 vendor/golang.org/x/oauth2/google/jwt.go delete mode 100644 vendor/golang.org/x/oauth2/google/sdk.go delete mode 100644 vendor/golang.org/x/oauth2/internal/doc.go delete mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go delete mode 100644 vendor/golang.org/x/oauth2/internal/token.go delete mode 100644 vendor/golang.org/x/oauth2/internal/transport.go delete mode 100644 vendor/golang.org/x/oauth2/jws/jws.go delete mode 100644 vendor/golang.org/x/oauth2/jwt/jwt.go delete mode 100644 vendor/golang.org/x/oauth2/oauth2.go delete mode 100644 vendor/golang.org/x/oauth2/token.go delete mode 100644 vendor/golang.org/x/oauth2/transport.go delete mode 100644 vendor/google.golang.org/api/LICENSE delete mode 100644 vendor/google.golang.org/api/gensupport/backoff.go delete mode 100644 vendor/google.golang.org/api/gensupport/buffer.go delete mode 100644 vendor/google.golang.org/api/gensupport/doc.go delete mode 100644 vendor/google.golang.org/api/gensupport/header.go delete mode 100644 vendor/google.golang.org/api/gensupport/json.go delete mode 100644 vendor/google.golang.org/api/gensupport/jsonfloat.go delete mode 100644 vendor/google.golang.org/api/gensupport/media.go delete mode 100644 vendor/google.golang.org/api/gensupport/params.go delete mode 100644 vendor/google.golang.org/api/gensupport/resumable.go delete mode 100644 vendor/google.golang.org/api/gensupport/retry.go delete mode 100644 vendor/google.golang.org/api/gensupport/send.go delete mode 100644 vendor/google.golang.org/api/googleapi/googleapi.go delete mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE delete mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go delete mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go delete mode 100644 vendor/google.golang.org/api/googleapi/transport/apikey.go delete mode 100644 vendor/google.golang.org/api/googleapi/types.go delete mode 100644 vendor/google.golang.org/api/internal/creds.go delete mode 100644 vendor/google.golang.org/api/internal/pool.go delete mode 100644 vendor/google.golang.org/api/internal/service-account.json delete mode 100644 vendor/google.golang.org/api/internal/settings.go delete mode 100644 vendor/google.golang.org/api/iterator/iterator.go delete mode 100644 vendor/google.golang.org/api/option/option.go delete mode 100644 vendor/google.golang.org/api/storage/v1/storage-api.json delete mode 100644 vendor/google.golang.org/api/storage/v1/storage-gen.go delete mode 100644 vendor/google.golang.org/api/transport/http/dial.go delete mode 100644 vendor/google.golang.org/appengine/CONTRIBUTING.md delete mode 100644 vendor/google.golang.org/appengine/LICENSE delete mode 100644 vendor/google.golang.org/appengine/README.md delete mode 100644 vendor/google.golang.org/appengine/appengine.go delete mode 100644 vendor/google.golang.org/appengine/appengine_vm.go delete mode 100644 vendor/google.golang.org/appengine/errors.go delete mode 100644 vendor/google.golang.org/appengine/identity.go delete mode 100644 vendor/google.golang.org/appengine/internal/api.go delete mode 100644 vendor/google.golang.org/appengine/internal/api_common.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_id.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto delete mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go delete mode 100755 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto delete mode 100644 vendor/google.golang.org/appengine/internal/identity.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go delete mode 100644 vendor/google.golang.org/appengine/internal/internal.go delete mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go delete mode 100644 vendor/google.golang.org/appengine/internal/metadata.go delete mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/net.go delete mode 100755 vendor/google.golang.org/appengine/internal/regen.sh delete mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto delete mode 100644 vendor/google.golang.org/appengine/internal/transaction.go delete mode 100644 vendor/google.golang.org/appengine/namespace.go delete mode 100644 vendor/google.golang.org/appengine/timeout.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE deleted file mode 100644 index a4c5efd822f..00000000000 --- a/vendor/cloud.google.com/go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2014 Google Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go deleted file mode 100644 index e708c031b95..00000000000 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ /dev/null @@ -1,437 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metadata provides access to Google Compute Engine (GCE) -// metadata and API service accounts. -// -// This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. -package metadata // import "cloud.google.com/go/compute/metadata" - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" -) - -const ( - // metadataIP is the documented metadata server IP address. - metadataIP = "169.254.169.254" - - // metadataHostEnv is the environment variable specifying the - // GCE metadata hostname. If empty, the default value of - // metadataIP ("169.254.169.254") is used instead. - // This is variable name is not defined by any spec, as far as - // I know; it was made up for the Go package. - metadataHostEnv = "GCE_METADATA_HOST" - - userAgent = "gcloud-golang/0.1" -) - -type cachedValue struct { - k string - trim bool - mu sync.Mutex - v string -} - -var ( - projID = &cachedValue{k: "project/project-id", trim: true} - projNum = &cachedValue{k: "project/numeric-project-id", trim: true} - instID = &cachedValue{k: "instance/id", trim: true} -) - -var ( - metaClient = &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 2 * time.Second, - }, - } - subscribeClient = &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - }, - } -) - -// NotDefinedError is returned when requested metadata is not defined. -// -// The underlying string is the suffix after "/computeMetadata/v1/". -// -// This error is not returned if the value is defined to be the empty -// string. -type NotDefinedError string - -func (suffix NotDefinedError) Error() string { - return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) -} - -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func Get(suffix string) (string, error) { - val, _, err := getETag(metaClient, suffix) - return val, err -} - -// getETag returns a value from the metadata service as well as the associated -// ETag using the provided client. This func is otherwise equivalent to Get. -func getETag(client *http.Client, suffix string) (value, etag string, err error) { - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv(metadataHostEnv) - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = metadataIP - } - url := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", url, nil) - req.Header.Set("Metadata-Flavor", "Google") - req.Header.Set("User-Agent", userAgent) - res, err := client.Do(req) - if err != nil { - return "", "", err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - if res.StatusCode != 200 { - return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - return string(all), res.Header.Get("Etag"), nil -} - -func getTrimmed(suffix string) (s string, err error) { - s, err = Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *cachedValue) get() (v string, err error) { - defer c.mu.Unlock() - c.mu.Lock() - if c.v != "" { - return c.v, nil - } - if c.trim { - v, err = getTrimmed(c.k) - } else { - v, err = Get(c.k) - } - if err == nil { - c.v = v - } - return -} - -var ( - onGCEOnce sync.Once - onGCE bool -) - -// OnGCE reports whether this process is running on Google Compute Engine. -func OnGCE() bool { - onGCEOnce.Do(initOnGCE) - return onGCE -} - -func initOnGCE() { - onGCE = testOnGCE() -} - -func testOnGCE() bool { - // The user explicitly said they're on GCE, so trust them. - if os.Getenv(metadataHostEnv) != "" { - return true - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - resc := make(chan bool, 2) - - // Try two strategies in parallel. - // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 - go func() { - req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) - req.Header.Set("User-Agent", userAgent) - res, err := ctxhttp.Do(ctx, metaClient, req) - if err != nil { - resc <- false - return - } - defer res.Body.Close() - resc <- res.Header.Get("Metadata-Flavor") == "Google" - }() - - go func() { - addrs, err := net.LookupHost("metadata.google.internal") - if err != nil || len(addrs) == 0 { - resc <- false - return - } - resc <- strsContains(addrs, metadataIP) - }() - - tryHarder := systemInfoSuggestsGCE() - if tryHarder { - res := <-resc - if res { - // The first strategy succeeded, so let's use it. - return true - } - // Wait for either the DNS or metadata server probe to - // contradict the other one and say we are running on - // GCE. Give it a lot of time to do so, since the system - // info already suggests we're running on a GCE BIOS. - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() - select { - case res = <-resc: - return res - case <-timer.C: - // Too slow. Who knows what this system is. - return false - } - } - - // There's no hint from the system info that we're running on - // GCE, so use the first probe's result as truth, whether it's - // true or false. The goal here is to optimize for speed for - // users who are NOT running on GCE. We can't assume that - // either a DNS lookup or an HTTP request to a blackholed IP - // address is fast. Worst case this should return when the - // metaClient's Transport.ResponseHeaderTimeout or - // Transport.Dial.Timeout fires (in two seconds). - return <-resc -} - -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - -// Subscribe subscribes to a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// The suffix may contain query parameters. -// -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. -func Subscribe(suffix string, fn func(v string, ok bool) error) error { - const failedSubscribeSleep = time.Second * 5 - - // First check to see if the metadata value exists at all. - val, lastETag, err := getETag(subscribeClient, suffix) - if err != nil { - return err - } - - if err := fn(val, true); err != nil { - return err - } - - ok := true - if strings.ContainsRune(suffix, '?') { - suffix += "&wait_for_change=true&last_etag=" - } else { - suffix += "?wait_for_change=true&last_etag=" - } - for { - val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) - if err != nil { - if _, deleted := err.(NotDefinedError); !deleted { - time.Sleep(failedSubscribeSleep) - continue // Retry on other errors. - } - ok = false - } - lastETag = etag - - if err := fn(val, ok); err != nil || !ok { - return err - } - } -} - -// ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return projID.get() } - -// NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return projNum.get() } - -// InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/ip") -} - -// ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") -} - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func Hostname() (string, error) { - return getTrimmed("instance/hostname") -} - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { - var s []string - j, err := Get("instance/tags") - if err != nil { - return nil, err - } - if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { - return nil, err - } - return s, nil -} - -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { - return instID.get() -} - -// InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { - host, err := Hostname() - if err != nil { - return "", err - } - return strings.Split(host, ".")[0], nil -} - -// Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { - zone, err := getTrimmed("instance/zone") - // zone is of the form "projects//zones/". - if err != nil { - return "", err - } - return zone[strings.LastIndex(zone, "/")+1:], nil -} - -// InstanceAttributes returns the list of user-defined attributes, -// assigned when initially creating a GCE VM instance. The value of an -// attribute can be obtained with InstanceAttributeValue. -func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } - -// ProjectAttributes returns the list of user-defined attributes -// applying to the project as a whole, not just this VM. The value of -// an attribute can be obtained with ProjectAttributeValue. -func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } - -func lines(suffix string) ([]string, error) { - j, err := Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} - -// InstanceAttributeValue returns the value of the provided VM -// instance attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// InstanceAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func InstanceAttributeValue(attr string) (string, error) { - return Get("instance/attributes/" + attr) -} - -// ProjectAttributeValue returns the value of the provided -// project attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// ProjectAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func ProjectAttributeValue(attr string) (string, error) { - return Get("project/attributes/" + attr) -} - -// Scopes returns the service account scopes for the given account. -// The account may be empty or the string "default" to use the instance's -// main account. -func Scopes(serviceAccount string) ([]string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return lines("instance/service-accounts/" + serviceAccount + "/scopes") -} - -func strsContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go deleted file mode 100644 index 8722ee8838f..00000000000 --- a/vendor/cloud.google.com/go/iam/iam.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package iam supports the resource-specific operations of Google Cloud -// IAM (Identity and Access Management) for the Google Cloud Libraries. -// See https://cloud.google.com/iam for more about IAM. -// -// Users of the Google Cloud Libraries will typically not use this package -// directly. Instead they will begin with some resource that supports IAM, like -// a pubsub topic, and call its IAM method to get a Handle for that resource. -package iam - -import ( - "golang.org/x/net/context" - pb "google.golang.org/genproto/googleapis/iam/v1" - "google.golang.org/grpc" -) - -// client abstracts the IAMPolicy API to allow multiple implementations. -type client interface { - Get(ctx context.Context, resource string) (*pb.Policy, error) - Set(ctx context.Context, resource string, p *pb.Policy) error - Test(ctx context.Context, resource string, perms []string) ([]string, error) -} - -// grpcClient implements client for the standard gRPC-based IAMPolicy service. -type grpcClient struct { - c pb.IAMPolicyClient -} - -func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { - proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) - if err != nil { - return nil, err - } - return proto, nil -} -func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { - _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ - Resource: resource, - Policy: p, - }) - return err -} - -func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { - res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ - Resource: resource, - Permissions: perms, - }) - if err != nil { - return nil, err - } - return res.Permissions, nil -} - -// A Handle provides IAM operations for a resource. -type Handle struct { - c client - resource string -} - -// InternalNewHandle is for use by the Google Cloud Libraries only. -// -// InternalNewHandle returns a Handle for resource. -// The conn parameter refers to a server that must support the IAMPolicy service. -func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle { - return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource) -} - -// InternalNewHandleClient is for use by the Google Cloud Libraries only. -// -// InternalNewHandleClient returns a Handle for resource using the given -// client implementation. -func InternalNewHandleClient(c client, resource string) *Handle { - return &Handle{ - c: c, - resource: resource, - } -} - -// Policy retrieves the IAM policy for the resource. -func (h *Handle) Policy(ctx context.Context) (*Policy, error) { - proto, err := h.c.Get(ctx, h.resource) - if err != nil { - return nil, err - } - return &Policy{InternalProto: proto}, nil -} - -// SetPolicy replaces the resource's current policy with the supplied Policy. -// -// If policy was created from a prior call to Get, then the modification will -// only succeed if the policy has not changed since the Get. -func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { - return h.c.Set(ctx, h.resource, policy.InternalProto) -} - -// TestPermissions returns the subset of permissions that the caller has on the resource. -func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { - return h.c.Test(ctx, h.resource, permissions) -} - -// A RoleName is a name representing a collection of permissions. -type RoleName string - -// Common role names. -const ( - Owner RoleName = "roles/owner" - Editor RoleName = "roles/editor" - Viewer RoleName = "roles/viewer" -) - -const ( - // AllUsers is a special member that denotes all users, even unauthenticated ones. - AllUsers = "allUsers" - - // AllAuthenticatedUsers is a special member that denotes all authenticated users. - AllAuthenticatedUsers = "allAuthenticatedUsers" -) - -// A Policy is a list of Bindings representing roles -// granted to members. -// -// The zero Policy is a valid policy with no bindings. -type Policy struct { - // TODO(jba): when type aliases are available, put Policy into an internal package - // and provide an exported alias here. - - // This field is exported for use by the Google Cloud Libraries only. - // It may become unexported in a future release. - InternalProto *pb.Policy -} - -// Members returns the list of members with the supplied role. -// The return value should not be modified. Use Add and Remove -// to modify the members of a role. -func (p *Policy) Members(r RoleName) []string { - b := p.binding(r) - if b == nil { - return nil - } - return b.Members -} - -// HasRole reports whether member has role r. -func (p *Policy) HasRole(member string, r RoleName) bool { - return memberIndex(member, p.binding(r)) >= 0 -} - -// Add adds member member to role r if it is not already present. -// A new binding is created if there is no binding for the role. -func (p *Policy) Add(member string, r RoleName) { - b := p.binding(r) - if b == nil { - if p.InternalProto == nil { - p.InternalProto = &pb.Policy{} - } - p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ - Role: string(r), - Members: []string{member}, - }) - return - } - if memberIndex(member, b) < 0 { - b.Members = append(b.Members, member) - return - } -} - -// Remove removes member from role r if it is present. -func (p *Policy) Remove(member string, r RoleName) { - bi := p.bindingIndex(r) - if bi < 0 { - return - } - bindings := p.InternalProto.Bindings - b := bindings[bi] - mi := memberIndex(member, b) - if mi < 0 { - return - } - // Order doesn't matter for bindings or members, so to remove, move the last item - // into the removed spot and shrink the slice. - if len(b.Members) == 1 { - // Remove binding. - last := len(bindings) - 1 - bindings[bi] = bindings[last] - bindings[last] = nil - p.InternalProto.Bindings = bindings[:last] - return - } - // Remove member. - // TODO(jba): worry about multiple copies of m? - last := len(b.Members) - 1 - b.Members[mi] = b.Members[last] - b.Members[last] = "" - b.Members = b.Members[:last] -} - -// Roles returns the names of all the roles that appear in the Policy. -func (p *Policy) Roles() []RoleName { - if p.InternalProto == nil { - return nil - } - var rns []RoleName - for _, b := range p.InternalProto.Bindings { - rns = append(rns, RoleName(b.Role)) - } - return rns -} - -// binding returns the Binding for the suppied role, or nil if there isn't one. -func (p *Policy) binding(r RoleName) *pb.Binding { - i := p.bindingIndex(r) - if i < 0 { - return nil - } - return p.InternalProto.Bindings[i] -} - -func (p *Policy) bindingIndex(r RoleName) int { - if p.InternalProto == nil { - return -1 - } - for i, b := range p.InternalProto.Bindings { - if b.Role == string(r) { - return i - } - } - return -1 -} - -// memberIndex returns the index of m in b's Members, or -1 if not found. -func memberIndex(m string, b *pb.Binding) int { - if b == nil { - return -1 - } - for i, mm := range b.Members { - if mm == m { - return i - } - } - return -1 -} diff --git a/vendor/cloud.google.com/go/internal/annotate.go b/vendor/cloud.google.com/go/internal/annotate.go deleted file mode 100644 index 797809aedab..00000000000 --- a/vendor/cloud.google.com/go/internal/annotate.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "fmt" - - "google.golang.org/api/googleapi" - "google.golang.org/grpc/status" -) - -// Annotate prepends msg to the error message in err, attempting -// to preserve other information in err, like an error code. -// -// Annotate panics if err is nil. -// -// Annotate knows about these error types: -// - "google.golang.org/grpc/status".Status -// - "google.golang.org/api/googleapi".Error -// If the error is not one of these types, Annotate behaves -// like -// fmt.Errorf("%s: %v", msg, err) -func Annotate(err error, msg string) error { - if err == nil { - panic("Annotate called with nil") - } - if s, ok := status.FromError(err); ok { - p := s.Proto() - p.Message = msg + ": " + p.Message - return status.ErrorProto(p) - } - if g, ok := err.(*googleapi.Error); ok { - g.Message = msg + ": " + g.Message - return g - } - return fmt.Errorf("%s: %v", msg, err) -} - -// Annotatef uses format and args to format a string, then calls Annotate. -func Annotatef(err error, format string, args ...interface{}) error { - return Annotate(err, fmt.Sprintf(format, args...)) -} diff --git a/vendor/cloud.google.com/go/internal/optional/optional.go b/vendor/cloud.google.com/go/internal/optional/optional.go deleted file mode 100644 index 4c15410aa04..00000000000 --- a/vendor/cloud.google.com/go/internal/optional/optional.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package optional provides versions of primitive types that can -// be nil. These are useful in methods that update some of an API object's -// fields. -package optional - -import ( - "fmt" - "strings" - "time" -) - -type ( - // Bool is either a bool or nil. - Bool interface{} - - // String is either a string or nil. - String interface{} - - // Int is either an int or nil. - Int interface{} - - // Uint is either a uint or nil. - Uint interface{} - - // Float64 is either a float64 or nil. - Float64 interface{} - - // Duration is either a time.Duration or nil. - Duration interface{} -) - -// ToBool returns its argument as a bool. -// It panics if its argument is nil or not a bool. -func ToBool(v Bool) bool { - x, ok := v.(bool) - if !ok { - doPanic("Bool", v) - } - return x -} - -// ToString returns its argument as a string. -// It panics if its argument is nil or not a string. -func ToString(v String) string { - x, ok := v.(string) - if !ok { - doPanic("String", v) - } - return x -} - -// ToInt returns its argument as an int. -// It panics if its argument is nil or not an int. -func ToInt(v Int) int { - x, ok := v.(int) - if !ok { - doPanic("Int", v) - } - return x -} - -// ToUint returns its argument as a uint. -// It panics if its argument is nil or not a uint. -func ToUint(v Uint) uint { - x, ok := v.(uint) - if !ok { - doPanic("Uint", v) - } - return x -} - -// ToFloat64 returns its argument as a float64. -// It panics if its argument is nil or not a float64. -func ToFloat64(v Float64) float64 { - x, ok := v.(float64) - if !ok { - doPanic("Float64", v) - } - return x -} - -// ToDuration returns its argument as a time.Duration. -// It panics if its argument is nil or not a time.Duration. -func ToDuration(v Duration) time.Duration { - x, ok := v.(time.Duration) - if !ok { - doPanic("Duration", v) - } - return x -} - -func doPanic(capType string, v interface{}) { - panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v)) -} diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go deleted file mode 100644 index e1f9aaad6f5..00000000000 --- a/vendor/cloud.google.com/go/internal/retry.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "time" - - gax "github.com/googleapis/gax-go" - - "golang.org/x/net/context" -) - -// Retry calls the supplied function f repeatedly according to the provided -// backoff parameters. It returns when one of the following occurs: -// When f's first return value is true, Retry immediately returns with f's second -// return value. -// When the provided context is done, Retry returns with an error that -// includes both ctx.Error() and the last error returned by f. -func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { - return retry(ctx, bo, f, gax.Sleep) -} - -func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), - sleep func(context.Context, time.Duration) error) error { - var lastErr error - for { - stop, err := f() - if stop { - return err - } - // Remember the last "real" error from f. - if err != nil && err != context.Canceled && err != context.DeadlineExceeded { - lastErr = err - } - p := bo.Pause() - if cerr := sleep(ctx, p); cerr != nil { - if lastErr != nil { - return Annotatef(lastErr, "retry failed with %v; last error", cerr) - } - return cerr - } - } -} diff --git a/vendor/cloud.google.com/go/internal/version/update_version.sh b/vendor/cloud.google.com/go/internal/version/update_version.sh deleted file mode 100755 index fecf1f03fde..00000000000 --- a/vendor/cloud.google.com/go/internal/version/update_version.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -today=$(date +%Y%m%d) - -sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE - diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go deleted file mode 100644 index 513afa46079..00000000000 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate ./update_version.sh - -// Package version contains version information for Google Cloud Client -// Libraries for Go, as reported in request headers. -package version - -import ( - "runtime" - "strings" - "unicode" -) - -// Repo is the current version of the client libraries in this -// repo. It should be a date in YYYYMMDD format. -const Repo = "20170928" - -// Go returns the Go runtime version. The returned string -// has no whitespace. -func Go() string { - return goVersion -} - -var goVersion = goVer(runtime.Version()) - -const develPrefix = "devel +" - -func goVer(s string) string { - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - s += "-" + prerelease - } - return s - } - return "" -} - -func notSemverRune(r rune) bool { - return strings.IndexRune("0123456789.", r) < 0 -} diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go deleted file mode 100644 index 24f90c924a3..00000000000 --- a/vendor/cloud.google.com/go/storage/acl.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "net/http" - "reflect" - - "golang.org/x/net/context" - "google.golang.org/api/googleapi" - raw "google.golang.org/api/storage/v1" -) - -// ACLRole is the level of access to grant. -type ACLRole string - -const ( - RoleOwner ACLRole = "OWNER" - RoleReader ACLRole = "READER" - RoleWriter ACLRole = "WRITER" -) - -// ACLEntity refers to a user or group. -// They are sometimes referred to as grantees. -// -// It could be in the form of: -// "user-", "user-", "group-", "group-", -// "domain-" and "project-team-". -// -// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. -type ACLEntity string - -const ( - AllUsers ACLEntity = "allUsers" - AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" -) - -// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket. -type ACLRule struct { - Entity ACLEntity - Role ACLRole -} - -// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. -type ACLHandle struct { - c *Client - bucket string - object string - isDefault bool - userProject string // for requester-pays buckets -} - -// Delete permanently deletes the ACL entry for the given entity. -func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error { - if a.object != "" { - return a.objectDelete(ctx, entity) - } - if a.isDefault { - return a.bucketDefaultDelete(ctx, entity) - } - return a.bucketDelete(ctx, entity) -} - -// Set sets the permission level for the given entity. -func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error { - if a.object != "" { - return a.objectSet(ctx, entity, role, false) - } - if a.isDefault { - return a.objectSet(ctx, entity, role, true) - } - return a.bucketSet(ctx, entity, role) -} - -// List retrieves ACL entries. -func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) { - if a.object != "" { - return a.objectList(ctx) - } - if a.isDefault { - return a.bucketDefaultList(ctx) - } - return a.bucketList(ctx) -} - -func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { - var acls *raw.ObjectAccessControls - var err error - err = runWithRetry(ctx, func() error { - req := a.c.raw.DefaultObjectAccessControls.List(a.bucket) - a.configureCall(req, ctx) - acls, err = req.Do() - return err - }) - if err != nil { - return nil, err - } - return toACLRules(acls.Items), nil -} - -func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { - return runWithRetry(ctx, func() error { - req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)) - a.configureCall(req, ctx) - return req.Do() - }) -} - -func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { - var acls *raw.BucketAccessControls - var err error - err = runWithRetry(ctx, func() error { - req := a.c.raw.BucketAccessControls.List(a.bucket) - a.configureCall(req, ctx) - acls, err = req.Do() - return err - }) - if err != nil { - return nil, err - } - r := make([]ACLRule, len(acls.Items)) - for i, v := range acls.Items { - r[i].Entity = ACLEntity(v.Entity) - r[i].Role = ACLRole(v.Role) - } - return r, nil -} - -func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { - acl := &raw.BucketAccessControl{ - Bucket: a.bucket, - Entity: string(entity), - Role: string(role), - } - err := runWithRetry(ctx, func() error { - req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl) - a.configureCall(req, ctx) - _, err := req.Do() - return err - }) - if err != nil { - return err - } - return nil -} - -func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { - err := runWithRetry(ctx, func() error { - req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)) - a.configureCall(req, ctx) - return req.Do() - }) - if err != nil { - return err - } - return nil -} - -func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { - var acls *raw.ObjectAccessControls - var err error - err = runWithRetry(ctx, func() error { - req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object) - a.configureCall(req, ctx) - acls, err = req.Do() - return err - }) - if err != nil { - return nil, err - } - return toACLRules(acls.Items), nil -} - -func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { - type setRequest interface { - Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) - Header() http.Header - } - - acl := &raw.ObjectAccessControl{ - Bucket: a.bucket, - Entity: string(entity), - Role: string(role), - } - var req setRequest - if isBucketDefault { - req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl) - } else { - req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl) - } - a.configureCall(req, ctx) - return runWithRetry(ctx, func() error { - _, err := req.Do() - return err - }) -} - -func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { - return runWithRetry(ctx, func() error { - req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)) - a.configureCall(req, ctx) - return req.Do() - }) -} - -func (a *ACLHandle) configureCall(call interface { - Header() http.Header -}, ctx context.Context) { - vc := reflect.ValueOf(call) - vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) - if a.userProject != "" { - vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)}) - } - setClientHeader(call.Header()) -} - -func toACLRules(items []*raw.ObjectAccessControl) []ACLRule { - r := make([]ACLRule, 0, len(items)) - for _, item := range items { - r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)}) - } - return r -} diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go deleted file mode 100644 index fcaa59db08f..00000000000 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ /dev/null @@ -1,767 +0,0 @@ -// Copyright 2014 Google Inc. LiveAndArchived Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "fmt" - "net/http" - "reflect" - "time" - - "cloud.google.com/go/internal/optional" - "golang.org/x/net/context" - "google.golang.org/api/googleapi" - "google.golang.org/api/iterator" - raw "google.golang.org/api/storage/v1" -) - -// BucketHandle provides operations on a Google Cloud Storage bucket. -// Use Client.Bucket to get a handle. -type BucketHandle struct { - c *Client - name string - acl ACLHandle - defaultObjectACL ACLHandle - conds *BucketConditions - userProject string // project for Requester Pays buckets -} - -// Bucket returns a BucketHandle, which provides operations on the named bucket. -// This call does not perform any network operations. -// -// The supplied name must contain only lowercase letters, numbers, dashes, -// underscores, and dots. The full specification for valid bucket names can be -// found at: -// https://cloud.google.com/storage/docs/bucket-naming -func (c *Client) Bucket(name string) *BucketHandle { - return &BucketHandle{ - c: c, - name: name, - acl: ACLHandle{ - c: c, - bucket: name, - }, - defaultObjectACL: ACLHandle{ - c: c, - bucket: name, - isDefault: true, - }, - } -} - -// Create creates the Bucket in the project. -// If attrs is nil the API defaults will be used. -func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error { - var bkt *raw.Bucket - if attrs != nil { - bkt = attrs.toRawBucket() - } else { - bkt = &raw.Bucket{} - } - bkt.Name = b.name - // If there is lifecycle information but no location, explicitly set - // the location. This is a GCS quirk/bug. - if bkt.Location == "" && bkt.Lifecycle != nil { - bkt.Location = "US" - } - req := b.c.raw.Buckets.Insert(projectID, bkt) - setClientHeader(req.Header()) - return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err }) -} - -// Delete deletes the Bucket. -func (b *BucketHandle) Delete(ctx context.Context) error { - req, err := b.newDeleteCall() - if err != nil { - return err - } - return runWithRetry(ctx, func() error { return req.Context(ctx).Do() }) -} - -func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) { - req := b.c.raw.Buckets.Delete(b.name) - setClientHeader(req.Header()) - if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil { - return nil, err - } - if b.userProject != "" { - req.UserProject(b.userProject) - } - return req, nil -} - -// ACL returns an ACLHandle, which provides access to the bucket's access control list. -// This controls who can list, create or overwrite the objects in a bucket. -// This call does not perform any network operations. -func (b *BucketHandle) ACL() *ACLHandle { - return &b.acl -} - -// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. -// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. -// This call does not perform any network operations. -func (b *BucketHandle) DefaultObjectACL() *ACLHandle { - return &b.defaultObjectACL -} - -// Object returns an ObjectHandle, which provides operations on the named object. -// This call does not perform any network operations. -// -// name must consist entirely of valid UTF-8-encoded runes. The full specification -// for valid object names can be found at: -// https://cloud.google.com/storage/docs/bucket-naming -func (b *BucketHandle) Object(name string) *ObjectHandle { - return &ObjectHandle{ - c: b.c, - bucket: b.name, - object: name, - acl: ACLHandle{ - c: b.c, - bucket: b.name, - object: name, - userProject: b.userProject, - }, - gen: -1, - userProject: b.userProject, - } -} - -// Attrs returns the metadata for the bucket. -func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) { - req, err := b.newGetCall() - if err != nil { - return nil, err - } - var resp *raw.Bucket - err = runWithRetry(ctx, func() error { - resp, err = req.Context(ctx).Do() - return err - }) - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrBucketNotExist - } - if err != nil { - return nil, err - } - return newBucket(resp), nil -} - -func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) { - req := b.c.raw.Buckets.Get(b.name).Projection("full") - setClientHeader(req.Header()) - if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil { - return nil, err - } - if b.userProject != "" { - req.UserProject(b.userProject) - } - return req, nil -} - -func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (*BucketAttrs, error) { - req, err := b.newPatchCall(&uattrs) - if err != nil { - return nil, err - } - // TODO(jba): retry iff metagen is set? - rb, err := req.Context(ctx).Do() - if err != nil { - return nil, err - } - return newBucket(rb), nil -} - -func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) { - rb := uattrs.toRawBucket() - req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full") - setClientHeader(req.Header()) - if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil { - return nil, err - } - if b.userProject != "" { - req.UserProject(b.userProject) - } - return req, nil -} - -// BucketAttrs represents the metadata for a Google Cloud Storage bucket. -// Read-only fields are ignored by BucketHandle.Create. -type BucketAttrs struct { - // Name is the name of the bucket. - // This field is read-only. - Name string - - // ACL is the list of access control rules on the bucket. - ACL []ACLRule - - // DefaultObjectACL is the list of access controls to - // apply to new objects when no object ACL is provided. - DefaultObjectACL []ACLRule - - // Location is the location of the bucket. It defaults to "US". - Location string - - // MetaGeneration is the metadata generation of the bucket. - // This field is read-only. - MetaGeneration int64 - - // StorageClass is the default storage class of the bucket. This defines - // how objects in the bucket are stored and determines the SLA - // and the cost of storage. Typical values are "MULTI_REGIONAL", - // "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and - // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which - // is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on - // the bucket's location settings. - StorageClass string - - // Created is the creation time of the bucket. - // This field is read-only. - Created time.Time - - // VersioningEnabled reports whether this bucket has versioning enabled. - VersioningEnabled bool - - // Labels are the bucket's labels. - Labels map[string]string - - // RequesterPays reports whether the bucket is a Requester Pays bucket. - // Clients performing operations on Requester Pays buckets must provide - // a user project (see BucketHandle.UserProject), which will be billed - // for the operations. - RequesterPays bool - // Lifecycle is the lifecycle configuration for objects in the bucket. - Lifecycle Lifecycle -} - -// Lifecycle is the lifecycle configuration for objects in the bucket. -type Lifecycle struct { - Rules []LifecycleRule -} - -const ( - // RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule. - rfc3339Date = "2006-01-02" - - // DeleteAction is a lifecycle action that deletes a live and/or archived - // objects. Takes precendence over SetStorageClass actions. - DeleteAction = "Delete" - - // SetStorageClassAction changes the storage class of live and/or archived - // objects. - SetStorageClassAction = "SetStorageClass" -) - -// LifecycleRule is a lifecycle configuration rule. -// -// When all the configured conditions are met by an object in the bucket, the -// configured action will automatically be taken on that object. -type LifecycleRule struct { - // Action is the action to take when all of the associated conditions are - // met. - Action LifecycleAction - - // Condition is the set of conditions that must be met for the associated - // action to be taken. - Condition LifecycleCondition -} - -// LifecycleAction is a lifecycle configuration action. -type LifecycleAction struct { - // Type is the type of action to take on matching objects. - // - // Acceptable values are "Delete" to delete matching objects and - // "SetStorageClass" to set the storage class defined in StorageClass on - // matching objects. - Type string - - // StorageClass is the storage class to set on matching objects if the Action - // is "SetStorageClass". - StorageClass string -} - -// Liveness specifies whether the object is live or not. -type Liveness int - -const ( - // LiveAndArchived includes both live and archived objects. - LiveAndArchived Liveness = iota - // Live specifies that the object is still live. - Live - // Archived specifies that the object is archived. - Archived -) - -// LifecycleCondition is a set of conditions used to match objects and take an -// action automatically. -// -// All configured conditions must be met for the associated action to be taken. -type LifecycleCondition struct { - // AgeInDays is the age of the object in days. - AgeInDays int64 - - // CreatedBefore is the time the object was created. - // - // This condition is satisfied when an object is created before midnight of - // the specified date in UTC. - CreatedBefore time.Time - - // Liveness specifies the object's liveness. Relevant only for versioned objects - Liveness Liveness - - // MatchesStorageClasses is the condition matching the object's storage - // class. - // - // Values include "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", - // "STANDARD", and "DURABLE_REDUCED_AVAILABILITY". - MatchesStorageClasses []string - - // NumNewerVersions is the condition matching objects with a number of newer versions. - // - // If the value is N, this condition is satisfied when there are at least N - // versions (including the live version) newer than this version of the - // object. - NumNewerVersions int64 -} - -func newBucket(b *raw.Bucket) *BucketAttrs { - if b == nil { - return nil - } - bucket := &BucketAttrs{ - Name: b.Name, - Location: b.Location, - MetaGeneration: b.Metageneration, - StorageClass: b.StorageClass, - Created: convertTime(b.TimeCreated), - VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, - Labels: b.Labels, - RequesterPays: b.Billing != nil && b.Billing.RequesterPays, - Lifecycle: toLifecycle(b.Lifecycle), - } - acl := make([]ACLRule, len(b.Acl)) - for i, rule := range b.Acl { - acl[i] = ACLRule{ - Entity: ACLEntity(rule.Entity), - Role: ACLRole(rule.Role), - } - } - bucket.ACL = acl - objACL := make([]ACLRule, len(b.DefaultObjectAcl)) - for i, rule := range b.DefaultObjectAcl { - objACL[i] = ACLRule{ - Entity: ACLEntity(rule.Entity), - Role: ACLRole(rule.Role), - } - } - bucket.DefaultObjectACL = objACL - return bucket -} - -// toRawBucket copies the editable attribute from b to the raw library's Bucket type. -func (b *BucketAttrs) toRawBucket() *raw.Bucket { - var acl []*raw.BucketAccessControl - if len(b.ACL) > 0 { - acl = make([]*raw.BucketAccessControl, len(b.ACL)) - for i, rule := range b.ACL { - acl[i] = &raw.BucketAccessControl{ - Entity: string(rule.Entity), - Role: string(rule.Role), - } - } - } - dACL := toRawObjectACL(b.DefaultObjectACL) - // Copy label map. - var labels map[string]string - if len(b.Labels) > 0 { - labels = make(map[string]string, len(b.Labels)) - for k, v := range b.Labels { - labels[k] = v - } - } - // Ignore VersioningEnabled if it is false. This is OK because - // we only call this method when creating a bucket, and by default - // new buckets have versioning off. - var v *raw.BucketVersioning - if b.VersioningEnabled { - v = &raw.BucketVersioning{Enabled: true} - } - var bb *raw.BucketBilling - if b.RequesterPays { - bb = &raw.BucketBilling{RequesterPays: true} - } - return &raw.Bucket{ - Name: b.Name, - DefaultObjectAcl: dACL, - Location: b.Location, - StorageClass: b.StorageClass, - Acl: acl, - Versioning: v, - Labels: labels, - Billing: bb, - Lifecycle: toRawLifecycle(b.Lifecycle), - } -} - -type BucketAttrsToUpdate struct { - // VersioningEnabled, if set, updates whether the bucket uses versioning. - VersioningEnabled optional.Bool - - // RequesterPays, if set, updates whether the bucket is a Requester Pays bucket. - RequesterPays optional.Bool - - setLabels map[string]string - deleteLabels map[string]bool -} - -// SetLabel causes a label to be added or modified when ua is used -// in a call to Bucket.Update. -func (ua *BucketAttrsToUpdate) SetLabel(name, value string) { - if ua.setLabels == nil { - ua.setLabels = map[string]string{} - } - ua.setLabels[name] = value -} - -// DeleteLabel causes a label to be deleted when ua is used in a -// call to Bucket.Update. -func (ua *BucketAttrsToUpdate) DeleteLabel(name string) { - if ua.deleteLabels == nil { - ua.deleteLabels = map[string]bool{} - } - ua.deleteLabels[name] = true -} - -func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { - rb := &raw.Bucket{} - if ua.VersioningEnabled != nil { - rb.Versioning = &raw.BucketVersioning{ - Enabled: optional.ToBool(ua.VersioningEnabled), - ForceSendFields: []string{"Enabled"}, - } - } - if ua.RequesterPays != nil { - rb.Billing = &raw.BucketBilling{ - RequesterPays: optional.ToBool(ua.RequesterPays), - ForceSendFields: []string{"RequesterPays"}, - } - } - if ua.setLabels != nil || ua.deleteLabels != nil { - rb.Labels = map[string]string{} - for k, v := range ua.setLabels { - rb.Labels[k] = v - } - if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 { - rb.ForceSendFields = append(rb.ForceSendFields, "Labels") - } - for l := range ua.deleteLabels { - rb.NullFields = append(rb.NullFields, "Labels."+l) - } - } - return rb -} - -// If returns a new BucketHandle that applies a set of preconditions. -// Preconditions already set on the BucketHandle are ignored. -// Operations on the new handle will only occur if the preconditions are -// satisfied. The only valid preconditions for buckets are MetagenerationMatch -// and MetagenerationNotMatch. -func (b *BucketHandle) If(conds BucketConditions) *BucketHandle { - b2 := *b - b2.conds = &conds - return &b2 -} - -// BucketConditions constrain bucket methods to act on specific metagenerations. -// -// The zero value is an empty set of constraints. -type BucketConditions struct { - // MetagenerationMatch specifies that the bucket must have the given - // metageneration for the operation to occur. - // If MetagenerationMatch is zero, it has no effect. - MetagenerationMatch int64 - - // MetagenerationNotMatch specifies that the bucket must not have the given - // metageneration for the operation to occur. - // If MetagenerationNotMatch is zero, it has no effect. - MetagenerationNotMatch int64 -} - -func (c *BucketConditions) validate(method string) error { - if *c == (BucketConditions{}) { - return fmt.Errorf("storage: %s: empty conditions", method) - } - if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 { - return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) - } - return nil -} - -// UserProject returns a new BucketHandle that passes the project ID as the user -// project for all subsequent calls. Calls with a user project will be billed to that -// project rather than to the bucket's owning project. -// -// A user project is required for all operations on Requester Pays buckets. -func (b *BucketHandle) UserProject(projectID string) *BucketHandle { - b2 := *b - b2.userProject = projectID - b2.acl.userProject = projectID - b2.defaultObjectACL.userProject = projectID - return &b2 -} - -// applyBucketConds modifies the provided call using the conditions in conds. -// call is something that quacks like a *raw.WhateverCall. -func applyBucketConds(method string, conds *BucketConditions, call interface{}) error { - if conds == nil { - return nil - } - if err := conds.validate(method); err != nil { - return err - } - cval := reflect.ValueOf(call) - switch { - case conds.MetagenerationMatch != 0: - if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) - } - case conds.MetagenerationNotMatch != 0: - if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) - } - } - return nil -} - -func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { - var rl raw.BucketLifecycle - if len(l.Rules) == 0 { - return nil - } - for _, r := range l.Rules { - rr := &raw.BucketLifecycleRule{ - Action: &raw.BucketLifecycleRuleAction{ - Type: r.Action.Type, - StorageClass: r.Action.StorageClass, - }, - Condition: &raw.BucketLifecycleRuleCondition{ - Age: r.Condition.AgeInDays, - MatchesStorageClass: r.Condition.MatchesStorageClasses, - NumNewerVersions: r.Condition.NumNewerVersions, - }, - } - - switch r.Condition.Liveness { - case LiveAndArchived: - rr.Condition.IsLive = nil - case Live: - rr.Condition.IsLive = googleapi.Bool(true) - case Archived: - rr.Condition.IsLive = googleapi.Bool(false) - } - - if !r.Condition.CreatedBefore.IsZero() { - rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date) - } - rl.Rule = append(rl.Rule, rr) - } - return &rl -} - -func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { - var l Lifecycle - if rl == nil { - return l - } - for _, rr := range rl.Rule { - r := LifecycleRule{ - Action: LifecycleAction{ - Type: rr.Action.Type, - StorageClass: rr.Action.StorageClass, - }, - Condition: LifecycleCondition{ - AgeInDays: rr.Condition.Age, - MatchesStorageClasses: rr.Condition.MatchesStorageClass, - NumNewerVersions: rr.Condition.NumNewerVersions, - }, - } - - switch { - case rr.Condition.IsLive == nil: - r.Condition.Liveness = LiveAndArchived - case *rr.Condition.IsLive == true: - r.Condition.Liveness = Live - case *rr.Condition.IsLive == false: - r.Condition.Liveness = Archived - } - - if rr.Condition.CreatedBefore != "" { - r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore) - } - l.Rules = append(l.Rules, r) - } - return l -} - -// Objects returns an iterator over the objects in the bucket that match the Query q. -// If q is nil, no filtering is done. -func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { - it := &ObjectIterator{ - ctx: ctx, - bucket: b, - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - it.fetch, - func() int { return len(it.items) }, - func() interface{} { b := it.items; it.items = nil; return b }) - if q != nil { - it.query = *q - } - return it -} - -// An ObjectIterator is an iterator over ObjectAttrs. -type ObjectIterator struct { - ctx context.Context - bucket *BucketHandle - query Query - pageInfo *iterator.PageInfo - nextFunc func() error - items []*ObjectAttrs -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } - -// Next returns the next result. Its second return value is iterator.Done if -// there are no more results. Once Next returns iterator.Done, all subsequent -// calls will return iterator.Done. -// -// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will -// have a non-empty Prefix field, and a zero value for all other fields. These -// represent prefixes. -func (it *ObjectIterator) Next() (*ObjectAttrs, error) { - if err := it.nextFunc(); err != nil { - return nil, err - } - item := it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) { - req := it.bucket.c.raw.Objects.List(it.bucket.name) - setClientHeader(req.Header()) - req.Projection("full") - req.Delimiter(it.query.Delimiter) - req.Prefix(it.query.Prefix) - req.Versions(it.query.Versions) - req.PageToken(pageToken) - if it.bucket.userProject != "" { - req.UserProject(it.bucket.userProject) - } - if pageSize > 0 { - req.MaxResults(int64(pageSize)) - } - var resp *raw.Objects - var err error - err = runWithRetry(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() - return err - }) - if err != nil { - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - err = ErrBucketNotExist - } - return "", err - } - for _, item := range resp.Items { - it.items = append(it.items, newObject(item)) - } - for _, prefix := range resp.Prefixes { - it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) - } - return resp.NextPageToken, nil -} - -// TODO(jbd): Add storage.buckets.update. - -// Buckets returns an iterator over the buckets in the project. You may -// optionally set the iterator's Prefix field to restrict the list to buckets -// whose names begin with the prefix. By default, all buckets in the project -// are returned. -func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { - it := &BucketIterator{ - ctx: ctx, - client: c, - projectID: projectID, - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - it.fetch, - func() int { return len(it.buckets) }, - func() interface{} { b := it.buckets; it.buckets = nil; return b }) - return it -} - -// A BucketIterator is an iterator over BucketAttrs. -type BucketIterator struct { - // Prefix restricts the iterator to buckets whose names begin with it. - Prefix string - - ctx context.Context - client *Client - projectID string - buckets []*BucketAttrs - pageInfo *iterator.PageInfo - nextFunc func() error -} - -// Next returns the next result. Its second return value is iterator.Done if -// there are no more results. Once Next returns iterator.Done, all subsequent -// calls will return iterator.Done. -func (it *BucketIterator) Next() (*BucketAttrs, error) { - if err := it.nextFunc(); err != nil { - return nil, err - } - b := it.buckets[0] - it.buckets = it.buckets[1:] - return b, nil -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } - -func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) { - req := it.client.raw.Buckets.List(it.projectID) - setClientHeader(req.Header()) - req.Projection("full") - req.Prefix(it.Prefix) - req.PageToken(pageToken) - if pageSize > 0 { - req.MaxResults(int64(pageSize)) - } - var resp *raw.Buckets - var err error - err = runWithRetry(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() - return err - }) - if err != nil { - return "", err - } - for _, item := range resp.Items { - it.buckets = append(it.buckets, newBucket(item)) - } - return resp.NextPageToken, nil -} diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go deleted file mode 100644 index d0a999c1b2f..00000000000 --- a/vendor/cloud.google.com/go/storage/copy.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "errors" - "fmt" - - "golang.org/x/net/context" - raw "google.golang.org/api/storage/v1" -) - -// CopierFrom creates a Copier that can copy src to dst. -// You can immediately call Run on the returned Copier, or -// you can configure it first. -// -// For Requester Pays buckets, the user project of dst is billed, unless it is empty, -// in which case the user project of src is billed. -func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { - return &Copier{dst: dst, src: src} -} - -// A Copier copies a source object to a destination. -type Copier struct { - // ObjectAttrs are optional attributes to set on the destination object. - // Any attributes must be initialized before any calls on the Copier. Nil - // or zero-valued attributes are ignored. - ObjectAttrs - - // RewriteToken can be set before calling Run to resume a copy - // operation. After Run returns a non-nil error, RewriteToken will - // have been updated to contain the value needed to resume the copy. - RewriteToken string - - // ProgressFunc can be used to monitor the progress of a multi-RPC copy - // operation. If ProgressFunc is not nil and copying requires multiple - // calls to the underlying service (see - // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then - // ProgressFunc will be invoked after each call with the number of bytes of - // content copied so far and the total size in bytes of the source object. - // - // ProgressFunc is intended to make upload progress available to the - // application. For example, the implementation of ProgressFunc may update - // a progress bar in the application's UI, or log the result of - // float64(copiedBytes)/float64(totalBytes). - // - // ProgressFunc should return quickly without blocking. - ProgressFunc func(copiedBytes, totalBytes uint64) - - dst, src *ObjectHandle -} - -// Run performs the copy. -func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) { - if err := c.src.validate(); err != nil { - return nil, err - } - if err := c.dst.validate(); err != nil { - return nil, err - } - // Convert destination attributes to raw form, omitting the bucket. - // If the bucket is included but name or content-type aren't, the service - // returns a 400 with "Required" as the only message. Omitting the bucket - // does not cause any problems. - rawObject := c.ObjectAttrs.toRawObject("") - for { - res, err := c.callRewrite(ctx, rawObject) - if err != nil { - return nil, err - } - if c.ProgressFunc != nil { - c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize)) - } - if res.Done { // Finished successfully. - return newObject(res.Resource), nil - } - } -} - -func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) { - call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj) - - call.Context(ctx).Projection("full") - if c.RewriteToken != "" { - call.RewriteToken(c.RewriteToken) - } - if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { - return nil, err - } - if c.dst.userProject != "" { - call.UserProject(c.dst.userProject) - } else if c.src.userProject != "" { - call.UserProject(c.src.userProject) - } - if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { - return nil, err - } - if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { - return nil, err - } - if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil { - return nil, err - } - var res *raw.RewriteResponse - var err error - setClientHeader(call.Header()) - err = runWithRetry(ctx, func() error { res, err = call.Do(); return err }) - if err != nil { - return nil, err - } - c.RewriteToken = res.RewriteToken - return res, nil -} - -// ComposerFrom creates a Composer that can compose srcs into dst. -// You can immediately call Run on the returned Composer, or you can -// configure it first. -// -// The encryption key for the destination object will be used to decrypt all -// source objects and encrypt the destination object. It is an error -// to specify an encryption key for any of the source objects. -func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { - return &Composer{dst: dst, srcs: srcs} -} - -// A Composer composes source objects into a destination object. -// -// For Requester Pays buckets, the user project of dst is billed. -type Composer struct { - // ObjectAttrs are optional attributes to set on the destination object. - // Any attributes must be initialized before any calls on the Composer. Nil - // or zero-valued attributes are ignored. - ObjectAttrs - - dst *ObjectHandle - srcs []*ObjectHandle -} - -// Run performs the compose operation. -func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) { - if err := c.dst.validate(); err != nil { - return nil, err - } - if len(c.srcs) == 0 { - return nil, errors.New("storage: at least one source object must be specified") - } - - req := &raw.ComposeRequest{} - // Compose requires a non-empty Destination, so we always set it, - // even if the caller-provided ObjectAttrs is the zero value. - req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) - for _, src := range c.srcs { - if err := src.validate(); err != nil { - return nil, err - } - if src.bucket != c.dst.bucket { - return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket) - } - if src.encryptionKey != nil { - return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) - } - srcObj := &raw.ComposeRequestSourceObjects{ - Name: src.object, - } - if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { - return nil, err - } - req.SourceObjects = append(req.SourceObjects, srcObj) - } - - call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx) - if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { - return nil, err - } - if c.dst.userProject != "" { - call.UserProject(c.dst.userProject) - } - if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { - return nil, err - } - var obj *raw.Object - var err error - setClientHeader(call.Header()) - err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) - if err != nil { - return nil, err - } - return newObject(obj), nil -} diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go deleted file mode 100644 index 06961b6f8cc..00000000000 --- a/vendor/cloud.google.com/go/storage/doc.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package storage provides an easy way to work with Google Cloud Storage. -Google Cloud Storage stores data in named objects, which are grouped into buckets. - -More information about Google Cloud Storage is available at -https://cloud.google.com/storage/docs. - -All of the methods of this package use exponential backoff to retry calls -that fail with certain errors, as described in -https://cloud.google.com/storage/docs/exponential-backoff. - -Note: This package is in beta. Some backwards-incompatible changes may occur. - - -Creating a Client - -To start working with this package, create a client: - - ctx := context.Background() - client, err := storage.NewClient(ctx) - if err != nil { - // TODO: Handle error. - } - -The client will use your default application credentials. - -If you only wish to access public data, you can create -an unauthenticated client with - - client, err := storage.NewClient(ctx, option.WithoutAuthentication()) - -Buckets - -A Google Cloud Storage bucket is a collection of objects. To work with a -bucket, make a bucket handle: - - bkt := client.Bucket(bucketName) - -A handle is a reference to a bucket. You can have a handle even if the -bucket doesn't exist yet. To create a bucket in Google Cloud Storage, -call Create on the handle: - - if err := bkt.Create(ctx, projectID, nil); err != nil { - // TODO: Handle error. - } - -Note that although buckets are associated with projects, bucket names are -global across all projects. - -Each bucket has associated metadata, represented in this package by -BucketAttrs. The third argument to BucketHandle.Create allows you to set -the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use -Attrs: - - attrs, err := bkt.Attrs(ctx) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", - attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) - -Objects - -An object holds arbitrary data as a sequence of bytes, like a file. You -refer to objects using a handle, just as with buckets. You can use the -standard Go io.Reader and io.Writer interfaces to read and write -object data: - - obj := bkt.Object("data") - // Write something to obj. - // w implements io.Writer. - w := obj.NewWriter(ctx) - // Write some text to obj. This will overwrite whatever is there. - if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { - // TODO: Handle error. - } - // Close, just like writing a file. - if err := w.Close(); err != nil { - // TODO: Handle error. - } - - // Read it back. - r, err := obj.NewReader(ctx) - if err != nil { - // TODO: Handle error. - } - defer r.Close() - if _, err := io.Copy(os.Stdout, r); err != nil { - // TODO: Handle error. - } - // Prints "This object contains text." - -Objects also have attributes, which you can fetch with Attrs: - - objAttrs, err := obj.Attrs(ctx) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("object %s has size %d and can be read using %s\n", - objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) - -ACLs - -Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of -ACLRules, each of which specifies the role of a user, group or project. ACLs -are suitable for fine-grained control, but you may prefer using IAM to control -access at the project level (see -https://cloud.google.com/storage/docs/access-control/iam). - -To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method: - - acls, err := obj.ACL().List(ctx) - if err != nil { - // TODO: Handle error. - } - for _, rule := range acls { - fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) - } - -You can also set and delete ACLs. - -Conditions - -Every object has a generation and a metageneration. The generation changes -whenever the content changes, and the metageneration changes whenever the -metadata changes. Conditions let you check these values before an operation; -the operation only executes if the conditions match. You can use conditions to -prevent race conditions in read-modify-write operations. - -For example, say you've read an object's metadata into objAttrs. Now -you want to write to that object, but only if its contents haven't changed -since you read it. Here is how to express that: - - w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) - // Proceed with writing as above. - -Signed URLs - -You can obtain a URL that lets anyone read or write an object for a limited time. -You don't need to create a client to do this. See the documentation of -SignedURL for details. - - url, err := storage.SignedURL(bucketName, "shared-object", opts) - if err != nil { - // TODO: Handle error. - } - fmt.Println(url) - -Authentication - -See examples of authorization and authentication at -https://godoc.org/cloud.google.com/go#pkg-examples. -*/ -package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/go110.go b/vendor/cloud.google.com/go/storage/go110.go deleted file mode 100644 index b85e8c3b9c5..00000000000 --- a/vendor/cloud.google.com/go/storage/go110.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.10 - -package storage - -import "google.golang.org/api/googleapi" - -func shouldRetry(err error) bool { - switch e := err.(type) { - case *googleapi.Error: - // Retry on 429 and 5xx, according to - // https://cloud.google.com/storage/docs/exponential-backoff. - return e.Code == 429 || (e.Code >= 500 && e.Code < 600) - default: - return false - } -} diff --git a/vendor/cloud.google.com/go/storage/go17.go b/vendor/cloud.google.com/go/storage/go17.go deleted file mode 100644 index 982db4e1a16..00000000000 --- a/vendor/cloud.google.com/go/storage/go17.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.7 - -package storage - -import ( - "context" - "net/http" -) - -func withContext(r *http.Request, ctx context.Context) *http.Request { - return r.WithContext(ctx) -} diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go deleted file mode 100644 index 9365509ed74..00000000000 --- a/vendor/cloud.google.com/go/storage/iam.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "cloud.google.com/go/iam" - "golang.org/x/net/context" - raw "google.golang.org/api/storage/v1" - iampb "google.golang.org/genproto/googleapis/iam/v1" -) - -// IAM provides access to IAM access control for the bucket. -func (b *BucketHandle) IAM() *iam.Handle { - return iam.InternalNewHandleClient(&iamClient{ - raw: b.c.raw, - userProject: b.userProject, - }, b.name) -} - -// iamClient implements the iam.client interface. -type iamClient struct { - raw *raw.Service - userProject string -} - -func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) { - call := c.raw.Buckets.GetIamPolicy(resource) - setClientHeader(call.Header()) - if c.userProject != "" { - call.UserProject(c.userProject) - } - var rp *raw.Policy - var err error - err = runWithRetry(ctx, func() error { - rp, err = call.Context(ctx).Do() - return err - }) - if err != nil { - return nil, err - } - return iamFromStoragePolicy(rp), nil -} - -func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error { - rp := iamToStoragePolicy(p) - call := c.raw.Buckets.SetIamPolicy(resource, rp) - setClientHeader(call.Header()) - if c.userProject != "" { - call.UserProject(c.userProject) - } - return runWithRetry(ctx, func() error { - _, err := call.Context(ctx).Do() - return err - }) -} - -func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { - call := c.raw.Buckets.TestIamPermissions(resource, perms) - setClientHeader(call.Header()) - if c.userProject != "" { - call.UserProject(c.userProject) - } - var res *raw.TestIamPermissionsResponse - var err error - err = runWithRetry(ctx, func() error { - res, err = call.Context(ctx).Do() - return err - }) - if err != nil { - return nil, err - } - return res.Permissions, nil -} - -func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { - return &raw.Policy{ - Bindings: iamToStorageBindings(ip.Bindings), - Etag: string(ip.Etag), - } -} - -func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { - var rbs []*raw.PolicyBindings - for _, ib := range ibs { - rbs = append(rbs, &raw.PolicyBindings{ - Role: ib.Role, - Members: ib.Members, - }) - } - return rbs -} - -func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { - return &iampb.Policy{ - Bindings: iamFromStorageBindings(rp.Bindings), - Etag: []byte(rp.Etag), - } -} - -func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { - var ibs []*iampb.Binding - for _, rb := range rbs { - ibs = append(ibs, &iampb.Binding{ - Role: rb.Role, - Members: rb.Members, - }) - } - return ibs -} diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go deleted file mode 100644 index 46423a8b253..00000000000 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "cloud.google.com/go/internal" - gax "github.com/googleapis/gax-go" - "golang.org/x/net/context" -) - -// runWithRetry calls the function until it returns nil or a non-retryable error, or -// the context is done. -func runWithRetry(ctx context.Context, call func() error) error { - return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { - err = call() - if err == nil { - return true, nil - } - if shouldRetry(err) { - return false, nil - } - return true, err - }) -} diff --git a/vendor/cloud.google.com/go/storage/not_go110.go b/vendor/cloud.google.com/go/storage/not_go110.go deleted file mode 100644 index c354e74bf1b..00000000000 --- a/vendor/cloud.google.com/go/storage/not_go110.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.10 - -package storage - -import ( - "net/url" - "strings" - - "google.golang.org/api/googleapi" -) - -func shouldRetry(err error) bool { - switch e := err.(type) { - case *googleapi.Error: - // Retry on 429 and 5xx, according to - // https://cloud.google.com/storage/docs/exponential-backoff. - return e.Code == 429 || (e.Code >= 500 && e.Code < 600) - case *url.Error: - // Retry on REFUSED_STREAM. - // Unfortunately the error type is unexported, so we resort to string - // matching. - return strings.Contains(e.Error(), "REFUSED_STREAM") - default: - return false - } -} diff --git a/vendor/cloud.google.com/go/storage/not_go17.go b/vendor/cloud.google.com/go/storage/not_go17.go deleted file mode 100644 index 1f6f7ae95cf..00000000000 --- a/vendor/cloud.google.com/go/storage/not_go17.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.7 - -package storage - -import ( - "net/http" -) - -func withContext(r *http.Request, _ interface{}) *http.Request { - // In Go 1.6 and below, ignore the context. - return r -} diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go deleted file mode 100644 index b95dd453a56..00000000000 --- a/vendor/cloud.google.com/go/storage/notifications.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "errors" - "fmt" - "regexp" - - "golang.org/x/net/context" - raw "google.golang.org/api/storage/v1" -) - -// A Notification describes how to send Cloud PubSub messages when certain -// events occur in a bucket. -type Notification struct { - //The ID of the notification. - ID string - - // The ID of the topic to which this subscription publishes. - TopicID string - - // The ID of the project to which the topic belongs. - TopicProjectID string - - // Only send notifications about listed event types. If empty, send notifications - // for all event types. - // See https://cloud.google.com/storage/docs/pubsub-notifications#events. - EventTypes []string - - // If present, only apply this notification configuration to object names that - // begin with this prefix. - ObjectNamePrefix string - - // An optional list of additional attributes to attach to each Cloud PubSub - // message published for this notification subscription. - CustomAttributes map[string]string - - // The contents of the message payload. - // See https://cloud.google.com/storage/docs/pubsub-notifications#payload. - PayloadFormat string -} - -// Values for Notification.PayloadFormat. -const ( - // Send no payload with notification messages. - NoPayload = "NONE" - - // Send object metadata as JSON with notification messages. - JSONPayload = "JSON_API_V1" -) - -// Values for Notification.EventTypes. -const ( - // Event that occurs when an object is successfully created. - ObjectFinalizeEvent = "OBJECT_FINALIZE" - - // Event that occurs when the metadata of an existing object changes. - ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE" - - // Event that occurs when an object is permanently deleted. - ObjectDeleteEvent = "OBJECT_DELETE" - - // Event that occurs when the live version of an object becomes an - // archived version. - ObjectArchiveEvent = "OBJECT_ARCHIVE" -) - -func toNotification(rn *raw.Notification) *Notification { - n := &Notification{ - ID: rn.Id, - EventTypes: rn.EventTypes, - ObjectNamePrefix: rn.ObjectNamePrefix, - CustomAttributes: rn.CustomAttributes, - PayloadFormat: rn.PayloadFormat, - } - n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic) - return n -} - -var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") - -// parseNotificationTopic extracts the project and topic IDs from from the full -// resource name returned by the service. If the name is malformed, it returns -// "?" for both IDs. -func parseNotificationTopic(nt string) (projectID, topicID string) { - matches := topicRE.FindStringSubmatch(nt) - if matches == nil { - return "?", "?" - } - return matches[1], matches[2] -} - -func toRawNotification(n *Notification) *raw.Notification { - return &raw.Notification{ - Id: n.ID, - Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", - n.TopicProjectID, n.TopicID), - EventTypes: n.EventTypes, - ObjectNamePrefix: n.ObjectNamePrefix, - CustomAttributes: n.CustomAttributes, - PayloadFormat: string(n.PayloadFormat), - } -} - -// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID -// and PayloadFormat, and must not set its ID. The other fields are all optional. The -// returned Notification's ID can be used to refer to it. -func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (*Notification, error) { - if n.ID != "" { - return nil, errors.New("storage: AddNotification: ID must not be set") - } - if n.TopicProjectID == "" { - return nil, errors.New("storage: AddNotification: missing TopicProjectID") - } - if n.TopicID == "" { - return nil, errors.New("storage: AddNotification: missing TopicID") - } - call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n)) - setClientHeader(call.Header()) - if b.userProject != "" { - call.UserProject(b.userProject) - } - rn, err := call.Context(ctx).Do() - if err != nil { - return nil, err - } - return toNotification(rn), nil -} - -// Notifications returns all the Notifications configured for this bucket, as a map -// indexed by notification ID. -func (b *BucketHandle) Notifications(ctx context.Context) (map[string]*Notification, error) { - call := b.c.raw.Notifications.List(b.name) - setClientHeader(call.Header()) - if b.userProject != "" { - call.UserProject(b.userProject) - } - var res *raw.Notifications - var err error - err = runWithRetry(ctx, func() error { - res, err = call.Context(ctx).Do() - return err - }) - if err != nil { - return nil, err - } - return notificationsToMap(res.Items), nil -} - -func notificationsToMap(rns []*raw.Notification) map[string]*Notification { - m := map[string]*Notification{} - for _, rn := range rns { - m[rn.Id] = toNotification(rn) - } - return m -} - -// DeleteNotification deletes the notification with the given ID. -func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) error { - call := b.c.raw.Notifications.Delete(b.name, id) - setClientHeader(call.Header()) - if b.userProject != "" { - call.UserProject(b.userProject) - } - return call.Context(ctx).Do() -} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go deleted file mode 100644 index c96ca8ae4fd..00000000000 --- a/vendor/cloud.google.com/go/storage/reader.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "fmt" - "hash/crc32" - "io" -) - -var crc32cTable = crc32.MakeTable(crc32.Castagnoli) - -// Reader reads a Cloud Storage object. -// It implements io.Reader. -type Reader struct { - body io.ReadCloser - remain, size int64 - contentType string - cacheControl string - checkCRC bool // should we check the CRC? - wantCRC uint32 // the CRC32c value the server sent in the header - gotCRC uint32 // running crc -} - -// Close closes the Reader. It must be called when done reading. -func (r *Reader) Close() error { - return r.body.Close() -} - -func (r *Reader) Read(p []byte) (int, error) { - n, err := r.body.Read(p) - if r.remain != -1 { - r.remain -= int64(n) - } - if r.checkCRC { - r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) - // Check CRC here. It would be natural to check it in Close, but - // everybody defers Close on the assumption that it doesn't return - // anything worth looking at. - if r.remain == 0 && r.gotCRC != r.wantCRC { - return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", - r.gotCRC, r.wantCRC) - } - } - return n, err -} - -// Size returns the size of the object in bytes. -// The returned value is always the same and is not affected by -// calls to Read or Close. -func (r *Reader) Size() int64 { - return r.size -} - -// Remain returns the number of bytes left to read, or -1 if unknown. -func (r *Reader) Remain() int64 { - return r.remain -} - -// ContentType returns the content type of the object. -func (r *Reader) ContentType() string { - return r.contentType -} - -// CacheControl returns the cache control of the object. -func (r *Reader) CacheControl() string { - return r.cacheControl -} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go deleted file mode 100644 index a1b63dd41b3..00000000000 --- a/vendor/cloud.google.com/go/storage/storage.go +++ /dev/null @@ -1,1143 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/base64" - "encoding/pem" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/api/option" - htransport "google.golang.org/api/transport/http" - - "cloud.google.com/go/internal/optional" - "cloud.google.com/go/internal/version" - "golang.org/x/net/context" - "google.golang.org/api/googleapi" - raw "google.golang.org/api/storage/v1" -) - -var ( - ErrBucketNotExist = errors.New("storage: bucket doesn't exist") - ErrObjectNotExist = errors.New("storage: object doesn't exist") -) - -const userAgent = "gcloud-golang-storage/20151204" - -const ( - // ScopeFullControl grants permissions to manage your - // data and permissions in Google Cloud Storage. - ScopeFullControl = raw.DevstorageFullControlScope - - // ScopeReadOnly grants permissions to - // view your data in Google Cloud Storage. - ScopeReadOnly = raw.DevstorageReadOnlyScope - - // ScopeReadWrite grants permissions to manage your - // data in Google Cloud Storage. - ScopeReadWrite = raw.DevstorageReadWriteScope -) - -var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) - -func setClientHeader(headers http.Header) { - headers.Set("x-goog-api-client", xGoogHeader) -} - -// Client is a client for interacting with Google Cloud Storage. -// -// Clients should be reused instead of created as needed. -// The methods of Client are safe for concurrent use by multiple goroutines. -type Client struct { - hc *http.Client - raw *raw.Service -} - -// NewClient creates a new Google Cloud Storage client. -// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. -func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - o := []option.ClientOption{ - option.WithScopes(ScopeFullControl), - option.WithUserAgent(userAgent), - } - opts = append(o, opts...) - hc, ep, err := htransport.NewClient(ctx, opts...) - if err != nil { - return nil, fmt.Errorf("dialing: %v", err) - } - rawService, err := raw.New(hc) - if err != nil { - return nil, fmt.Errorf("storage client: %v", err) - } - if ep != "" { - rawService.BasePath = ep - } - return &Client{ - hc: hc, - raw: rawService, - }, nil -} - -// Close closes the Client. -// -// Close need not be called at program exit. -func (c *Client) Close() error { - // Set fields to nil so that subsequent uses - // will panic. - c.hc = nil - c.raw = nil - return nil -} - -// SignedURLOptions allows you to restrict the access to the signed URL. -type SignedURLOptions struct { - // GoogleAccessID represents the authorizer of the signed URL generation. - // It is typically the Google service account client email address from - // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". - // Required. - GoogleAccessID string - - // PrivateKey is the Google service account private key. It is obtainable - // from the Google Developers Console. - // At https://console.developers.google.com/project//apiui/credential, - // create a service account client ID or reuse one of your existing service account - // credentials. Click on the "Generate new P12 key" to generate and download - // a new private key. Once you download the P12 file, use the following command - // to convert it into a PEM file. - // - // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes - // - // Provide the contents of the PEM file as a byte slice. - // Exactly one of PrivateKey or SignBytes must be non-nil. - PrivateKey []byte - - // SignBytes is a function for implementing custom signing. - // If your application is running on Google App Engine, you can use appengine's internal signing function: - // ctx := appengine.NewContext(request) - // acc, _ := appengine.ServiceAccount(ctx) - // url, err := SignedURL("bucket", "object", &SignedURLOptions{ - // GoogleAccessID: acc, - // SignBytes: func(b []byte) ([]byte, error) { - // _, signedBytes, err := appengine.SignBytes(ctx, b) - // return signedBytes, err - // }, - // // etc. - // }) - // - // Exactly one of PrivateKey or SignBytes must be non-nil. - SignBytes func([]byte) ([]byte, error) - - // Method is the HTTP method to be used with the signed URL. - // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. - // Required. - Method string - - // Expires is the expiration time on the signed URL. It must be - // a datetime in the future. - // Required. - Expires time.Time - - // ContentType is the content type header the client must provide - // to use the generated signed URL. - // Optional. - ContentType string - - // Headers is a list of extension headers the client must provide - // in order to use the generated signed URL. - // Optional. - Headers []string - - // MD5 is the base64 encoded MD5 checksum of the file. - // If provided, the client should provide the exact value on the request - // header in order to use the signed URL. - // Optional. - MD5 string -} - -// SignedURL returns a URL for the specified object. Signed URLs allow -// the users access to a restricted resource for a limited time without having a -// Google account or signing in. For more information about the signed -// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. -func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { - if opts == nil { - return "", errors.New("storage: missing required SignedURLOptions") - } - if opts.GoogleAccessID == "" { - return "", errors.New("storage: missing required GoogleAccessID") - } - if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { - return "", errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") - } - if opts.Method == "" { - return "", errors.New("storage: missing required method option") - } - if opts.Expires.IsZero() { - return "", errors.New("storage: missing required expires option") - } - if opts.MD5 != "" { - md5, err := base64.StdEncoding.DecodeString(opts.MD5) - if err != nil || len(md5) != 16 { - return "", errors.New("storage: invalid MD5 checksum") - } - } - - signBytes := opts.SignBytes - if opts.PrivateKey != nil { - key, err := parseKey(opts.PrivateKey) - if err != nil { - return "", err - } - signBytes = func(b []byte) ([]byte, error) { - sum := sha256.Sum256(b) - return rsa.SignPKCS1v15( - rand.Reader, - key, - crypto.SHA256, - sum[:], - ) - } - } - - u := &url.URL{ - Path: fmt.Sprintf("/%s/%s", bucket, name), - } - - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%s\n", opts.Method) - fmt.Fprintf(buf, "%s\n", opts.MD5) - fmt.Fprintf(buf, "%s\n", opts.ContentType) - fmt.Fprintf(buf, "%d\n", opts.Expires.Unix()) - if len(opts.Headers) > 0 { - fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n")) - } - fmt.Fprintf(buf, "%s", u.String()) - - b, err := signBytes(buf.Bytes()) - if err != nil { - return "", err - } - encoded := base64.StdEncoding.EncodeToString(b) - u.Scheme = "https" - u.Host = "storage.googleapis.com" - q := u.Query() - q.Set("GoogleAccessId", opts.GoogleAccessID) - q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) - q.Set("Signature", string(encoded)) - u.RawQuery = q.Encode() - return u.String(), nil -} - -// ObjectHandle provides operations on an object in a Google Cloud Storage bucket. -// Use BucketHandle.Object to get a handle. -type ObjectHandle struct { - c *Client - bucket string - object string - acl ACLHandle - gen int64 // a negative value indicates latest - conds *Conditions - encryptionKey []byte // AES-256 key - userProject string // for requester-pays buckets - readCompressed bool // Accept-Encoding: gzip -} - -// ACL provides access to the object's access control list. -// This controls who can read and write this object. -// This call does not perform any network operations. -func (o *ObjectHandle) ACL() *ACLHandle { - return &o.acl -} - -// Generation returns a new ObjectHandle that operates on a specific generation -// of the object. -// By default, the handle operates on the latest generation. Not -// all operations work when given a specific generation; check the API -// endpoints at https://cloud.google.com/storage/docs/json_api/ for details. -func (o *ObjectHandle) Generation(gen int64) *ObjectHandle { - o2 := *o - o2.gen = gen - return &o2 -} - -// If returns a new ObjectHandle that applies a set of preconditions. -// Preconditions already set on the ObjectHandle are ignored. -// Operations on the new handle will only occur if the preconditions are -// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions -// for more details. -func (o *ObjectHandle) If(conds Conditions) *ObjectHandle { - o2 := *o - o2.conds = &conds - return &o2 -} - -// Key returns a new ObjectHandle that uses the supplied encryption -// key to encrypt and decrypt the object's contents. -// -// Encryption key must be a 32-byte AES-256 key. -// See https://cloud.google.com/storage/docs/encryption for details. -func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle { - o2 := *o - o2.encryptionKey = encryptionKey - return &o2 -} - -// Attrs returns meta information about the object. -// ErrObjectNotExist will be returned if the object is not found. -func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) { - if err := o.validate(); err != nil { - return nil, err - } - call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx) - if err := applyConds("Attrs", o.gen, o.conds, call); err != nil { - return nil, err - } - if o.userProject != "" { - call.UserProject(o.userProject) - } - if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { - return nil, err - } - var obj *raw.Object - var err error - setClientHeader(call.Header()) - err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(obj), nil -} - -// Update updates an object with the provided attributes. -// All zero-value attributes are ignored. -// ErrObjectNotExist will be returned if the object is not found. -func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error) { - if err := o.validate(); err != nil { - return nil, err - } - var attrs ObjectAttrs - // Lists of fields to send, and set to null, in the JSON. - var forceSendFields, nullFields []string - if uattrs.ContentType != nil { - attrs.ContentType = optional.ToString(uattrs.ContentType) - // For ContentType, sending the empty string is a no-op. - // Instead we send a null. - if attrs.ContentType == "" { - nullFields = append(nullFields, "ContentType") - } else { - forceSendFields = append(forceSendFields, "ContentType") - } - } - if uattrs.ContentLanguage != nil { - attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) - // For ContentLanguage it's an error to send the empty string. - // Instead we send a null. - if attrs.ContentLanguage == "" { - nullFields = append(nullFields, "ContentLanguage") - } else { - forceSendFields = append(forceSendFields, "ContentLanguage") - } - } - if uattrs.ContentEncoding != nil { - attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) - forceSendFields = append(forceSendFields, "ContentEncoding") - } - if uattrs.ContentDisposition != nil { - attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) - forceSendFields = append(forceSendFields, "ContentDisposition") - } - if uattrs.CacheControl != nil { - attrs.CacheControl = optional.ToString(uattrs.CacheControl) - forceSendFields = append(forceSendFields, "CacheControl") - } - if uattrs.Metadata != nil { - attrs.Metadata = uattrs.Metadata - if len(attrs.Metadata) == 0 { - // Sending the empty map is a no-op. We send null instead. - nullFields = append(nullFields, "Metadata") - } else { - forceSendFields = append(forceSendFields, "Metadata") - } - } - if uattrs.ACL != nil { - attrs.ACL = uattrs.ACL - // It's an error to attempt to delete the ACL, so - // we don't append to nullFields here. - forceSendFields = append(forceSendFields, "Acl") - } - rawObj := attrs.toRawObject(o.bucket) - rawObj.ForceSendFields = forceSendFields - rawObj.NullFields = nullFields - call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx) - if err := applyConds("Update", o.gen, o.conds, call); err != nil { - return nil, err - } - if o.userProject != "" { - call.UserProject(o.userProject) - } - if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { - return nil, err - } - var obj *raw.Object - var err error - setClientHeader(call.Header()) - err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(obj), nil -} - -// ObjectAttrsToUpdate is used to update the attributes of an object. -// Only fields set to non-nil values will be updated. -// Set a field to its zero value to delete it. -// -// For example, to change ContentType and delete ContentEncoding and -// Metadata, use -// ObjectAttrsToUpdate{ -// ContentType: "text/html", -// ContentEncoding: "", -// Metadata: map[string]string{}, -// } -type ObjectAttrsToUpdate struct { - ContentType optional.String - ContentLanguage optional.String - ContentEncoding optional.String - ContentDisposition optional.String - CacheControl optional.String - Metadata map[string]string // set to map[string]string{} to delete - ACL []ACLRule -} - -// Delete deletes the single specified object. -func (o *ObjectHandle) Delete(ctx context.Context) error { - if err := o.validate(); err != nil { - return err - } - call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx) - if err := applyConds("Delete", o.gen, o.conds, call); err != nil { - return err - } - if o.userProject != "" { - call.UserProject(o.userProject) - } - // Encryption doesn't apply to Delete. - setClientHeader(call.Header()) - err := runWithRetry(ctx, func() error { return call.Do() }) - switch e := err.(type) { - case nil: - return nil - case *googleapi.Error: - if e.Code == http.StatusNotFound { - return ErrObjectNotExist - } - } - return err -} - -// ReadCompressed when true causes the read to happen without decompressing. -func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { - o2 := *o - o2.readCompressed = compressed - return &o2 -} - -// NewReader creates a new Reader to read the contents of the -// object. -// ErrObjectNotExist will be returned if the object is not found. -// -// The caller must call Close on the returned Reader when done reading. -func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { - return o.NewRangeReader(ctx, 0, -1) -} - -// NewRangeReader reads part of an object, reading at most length bytes -// starting at the given offset. If length is negative, the object is read -// until the end. -func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) { - if err := o.validate(); err != nil { - return nil, err - } - if offset < 0 { - return nil, fmt.Errorf("storage: invalid offset %d < 0", offset) - } - if o.conds != nil { - if err := o.conds.validate("NewRangeReader"); err != nil { - return nil, err - } - } - u := &url.URL{ - Scheme: "https", - Host: "storage.googleapis.com", - Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), - RawQuery: conditionsQuery(o.gen, o.conds), - } - verb := "GET" - if length == 0 { - verb = "HEAD" - } - req, err := http.NewRequest(verb, u.String(), nil) - if err != nil { - return nil, err - } - req = withContext(req, ctx) - if length < 0 && offset > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) - } else if length > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) - } - if o.userProject != "" { - req.Header.Set("X-Goog-User-Project", o.userProject) - } - if o.readCompressed { - req.Header.Set("Accept-Encoding", "gzip") - } - if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { - return nil, err - } - var res *http.Response - err = runWithRetry(ctx, func() error { - res, err = o.c.hc.Do(req) - if err != nil { - return err - } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return ErrObjectNotExist - } - if res.StatusCode < 200 || res.StatusCode > 299 { - body, _ := ioutil.ReadAll(res.Body) - res.Body.Close() - return &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - Body: string(body), - } - } - if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { - res.Body.Close() - return errors.New("storage: partial request not satisfied") - } - return nil - }) - if err != nil { - return nil, err - } - - var size int64 // total size of object, even if a range was requested. - if res.StatusCode == http.StatusPartialContent { - cr := strings.TrimSpace(res.Header.Get("Content-Range")) - if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - } else { - size = res.ContentLength - } - - remain := res.ContentLength - body := res.Body - if length == 0 { - remain = 0 - body.Close() - body = emptyBody - } - var ( - checkCRC bool - crc uint32 - ) - // Even if there is a CRC header, we can't compute the hash on partial data. - if remain == size { - crc, checkCRC = parseCRC32c(res) - } - return &Reader{ - body: body, - size: size, - remain: remain, - contentType: res.Header.Get("Content-Type"), - cacheControl: res.Header.Get("Cache-Control"), - wantCRC: crc, - checkCRC: checkCRC, - }, nil -} - -func parseCRC32c(res *http.Response) (uint32, bool) { - const prefix = "crc32c=" - for _, spec := range res.Header["X-Goog-Hash"] { - if strings.HasPrefix(spec, prefix) { - c, err := decodeUint32(spec[len(prefix):]) - if err == nil { - return c, true - } - } - } - return 0, false -} - -var emptyBody = ioutil.NopCloser(strings.NewReader("")) - -// NewWriter returns a storage Writer that writes to the GCS object -// associated with this ObjectHandle. -// -// A new object will be created unless an object with this name already exists. -// Otherwise any previous object with the same name will be replaced. -// The object will not be available (and any previous object will remain) -// until Close has been called. -// -// Attributes can be set on the object by modifying the returned Writer's -// ObjectAttrs field before the first call to Write. If no ContentType -// attribute is specified, the content type will be automatically sniffed -// using net/http.DetectContentType. -// -// It is the caller's responsibility to call Close when writing is done. -func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { - return &Writer{ - ctx: ctx, - o: o, - donec: make(chan struct{}), - ObjectAttrs: ObjectAttrs{Name: o.object}, - ChunkSize: googleapi.DefaultUploadChunkSize, - } -} - -func (o *ObjectHandle) validate() error { - if o.bucket == "" { - return errors.New("storage: bucket name is empty") - } - if o.object == "" { - return errors.New("storage: object name is empty") - } - if !utf8.ValidString(o.object) { - return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) - } - return nil -} - -// parseKey converts the binary contents of a private key file to an -// *rsa.PrivateKey. It detects whether the private key is in a PEM container or -// not. If so, it extracts the private key from PEM container before -// conversion. It only supports PEM containers with no passphrase. -func parseKey(key []byte) (*rsa.PrivateKey, error) { - if block, _ := pem.Decode(key); block != nil { - key = block.Bytes - } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, err - } - } - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("oauth2: private key is invalid") - } - return parsed, nil -} - -func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl { - var acl []*raw.ObjectAccessControl - if len(oldACL) > 0 { - acl = make([]*raw.ObjectAccessControl, len(oldACL)) - for i, rule := range oldACL { - acl[i] = &raw.ObjectAccessControl{ - Entity: string(rule.Entity), - Role: string(rule.Role), - } - } - } - return acl -} - -// toRawObject copies the editable attributes from o to the raw library's Object type. -func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { - acl := toRawObjectACL(o.ACL) - return &raw.Object{ - Bucket: bucket, - Name: o.Name, - ContentType: o.ContentType, - ContentEncoding: o.ContentEncoding, - ContentLanguage: o.ContentLanguage, - CacheControl: o.CacheControl, - ContentDisposition: o.ContentDisposition, - StorageClass: o.StorageClass, - Acl: acl, - Metadata: o.Metadata, - } -} - -// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. -type ObjectAttrs struct { - // Bucket is the name of the bucket containing this GCS object. - // This field is read-only. - Bucket string - - // Name is the name of the object within the bucket. - // This field is read-only. - Name string - - // ContentType is the MIME type of the object's content. - ContentType string - - // ContentLanguage is the content language of the object's content. - ContentLanguage string - - // CacheControl is the Cache-Control header to be sent in the response - // headers when serving the object data. - CacheControl string - - // ACL is the list of access control rules for the object. - ACL []ACLRule - - // Owner is the owner of the object. This field is read-only. - // - // If non-zero, it is in the form of "user-". - Owner string - - // Size is the length of the object's content. This field is read-only. - Size int64 - - // ContentEncoding is the encoding of the object's content. - ContentEncoding string - - // ContentDisposition is the optional Content-Disposition header of the object - // sent in the response headers. - ContentDisposition string - - // MD5 is the MD5 hash of the object's content. This field is read-only, - // except when used from a Writer. If set on a Writer, the uploaded - // data is rejected if its MD5 hash does not match this field. - MD5 []byte - - // CRC32C is the CRC32 checksum of the object's content using - // the Castagnoli93 polynomial. This field is read-only, except when - // used from a Writer. If set on a Writer and Writer.SendCRC32C - // is true, the uploaded data is rejected if its CRC32c hash does not - // match this field. - CRC32C uint32 - - // MediaLink is an URL to the object's content. This field is read-only. - MediaLink string - - // Metadata represents user-provided metadata, in key/value pairs. - // It can be nil if no metadata is provided. - Metadata map[string]string - - // Generation is the generation number of the object's content. - // This field is read-only. - Generation int64 - - // Metageneration is the version of the metadata for this - // object at this generation. This field is used for preconditions - // and for detecting changes in metadata. A metageneration number - // is only meaningful in the context of a particular generation - // of a particular object. This field is read-only. - Metageneration int64 - - // StorageClass is the storage class of the object. - // This value defines how objects in the bucket are stored and - // determines the SLA and the cost of storage. Typical values are - // "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" - // and "DURABLE_REDUCED_AVAILABILITY". - // It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL" - // or "REGIONAL" depending on the bucket's location settings. - StorageClass string - - // Created is the time the object was created. This field is read-only. - Created time.Time - - // Deleted is the time the object was deleted. - // If not deleted, it is the zero value. This field is read-only. - Deleted time.Time - - // Updated is the creation or modification time of the object. - // For buckets with versioning enabled, changing an object's - // metadata does not change this property. This field is read-only. - Updated time.Time - - // CustomerKeySHA256 is the base64-encoded SHA-256 hash of the - // customer-supplied encryption key for the object. It is empty if there is - // no customer-supplied encryption key. - // See // https://cloud.google.com/storage/docs/encryption for more about - // encryption in Google Cloud Storage. - CustomerKeySHA256 string - - // Prefix is set only for ObjectAttrs which represent synthetic "directory - // entries" when iterating over buckets using Query.Delimiter. See - // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be - // populated. - Prefix string -} - -// convertTime converts a time in RFC3339 format to time.Time. -// If any error occurs in parsing, the zero-value time.Time is silently returned. -func convertTime(t string) time.Time { - var r time.Time - if t != "" { - r, _ = time.Parse(time.RFC3339, t) - } - return r -} - -func newObject(o *raw.Object) *ObjectAttrs { - if o == nil { - return nil - } - acl := make([]ACLRule, len(o.Acl)) - for i, rule := range o.Acl { - acl[i] = ACLRule{ - Entity: ACLEntity(rule.Entity), - Role: ACLRole(rule.Role), - } - } - owner := "" - if o.Owner != nil { - owner = o.Owner.Entity - } - md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) - crc32c, _ := decodeUint32(o.Crc32c) - var sha256 string - if o.CustomerEncryption != nil { - sha256 = o.CustomerEncryption.KeySha256 - } - return &ObjectAttrs{ - Bucket: o.Bucket, - Name: o.Name, - ContentType: o.ContentType, - ContentLanguage: o.ContentLanguage, - CacheControl: o.CacheControl, - ACL: acl, - Owner: owner, - ContentEncoding: o.ContentEncoding, - ContentDisposition: o.ContentDisposition, - Size: int64(o.Size), - MD5: md5, - CRC32C: crc32c, - MediaLink: o.MediaLink, - Metadata: o.Metadata, - Generation: o.Generation, - Metageneration: o.Metageneration, - StorageClass: o.StorageClass, - CustomerKeySHA256: sha256, - Created: convertTime(o.TimeCreated), - Deleted: convertTime(o.TimeDeleted), - Updated: convertTime(o.Updated), - } -} - -// Decode a uint32 encoded in Base64 in big-endian byte order. -func decodeUint32(b64 string) (uint32, error) { - d, err := base64.StdEncoding.DecodeString(b64) - if err != nil { - return 0, err - } - if len(d) != 4 { - return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d) - } - return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil -} - -// Encode a uint32 as Base64 in big-endian byte order. -func encodeUint32(u uint32) string { - b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)} - return base64.StdEncoding.EncodeToString(b) -} - -// Query represents a query to filter objects from a bucket. -type Query struct { - // Delimiter returns results in a directory-like fashion. - // Results will contain only objects whose names, aside from the - // prefix, do not contain delimiter. Objects whose names, - // aside from the prefix, contain delimiter will have their name, - // truncated after the delimiter, returned in prefixes. - // Duplicate prefixes are omitted. - // Optional. - Delimiter string - - // Prefix is the prefix filter to query objects - // whose names begin with this prefix. - // Optional. - Prefix string - - // Versions indicates whether multiple versions of the same - // object will be included in the results. - Versions bool -} - -// contentTyper implements ContentTyper to enable an -// io.ReadCloser to specify its MIME type. -type contentTyper struct { - io.Reader - t string -} - -func (c *contentTyper) ContentType() string { - return c.t -} - -// Conditions constrain methods to act on specific generations of -// objects. -// -// The zero value is an empty set of constraints. Not all conditions or -// combinations of conditions are applicable to all methods. -// See https://cloud.google.com/storage/docs/generations-preconditions -// for details on how these operate. -type Conditions struct { - // Generation constraints. - // At most one of the following can be set to a non-zero value. - - // GenerationMatch specifies that the object must have the given generation - // for the operation to occur. - // If GenerationMatch is zero, it has no effect. - // Use DoesNotExist to specify that the object does not exist in the bucket. - GenerationMatch int64 - - // GenerationNotMatch specifies that the object must not have the given - // generation for the operation to occur. - // If GenerationNotMatch is zero, it has no effect. - GenerationNotMatch int64 - - // DoesNotExist specifies that the object must not exist in the bucket for - // the operation to occur. - // If DoesNotExist is false, it has no effect. - DoesNotExist bool - - // Metadata generation constraints. - // At most one of the following can be set to a non-zero value. - - // MetagenerationMatch specifies that the object must have the given - // metageneration for the operation to occur. - // If MetagenerationMatch is zero, it has no effect. - MetagenerationMatch int64 - - // MetagenerationNotMatch specifies that the object must not have the given - // metageneration for the operation to occur. - // If MetagenerationNotMatch is zero, it has no effect. - MetagenerationNotMatch int64 -} - -func (c *Conditions) validate(method string) error { - if *c == (Conditions{}) { - return fmt.Errorf("storage: %s: empty conditions", method) - } - if !c.isGenerationValid() { - return fmt.Errorf("storage: %s: multiple conditions specified for generation", method) - } - if !c.isMetagenerationValid() { - return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) - } - return nil -} - -func (c *Conditions) isGenerationValid() bool { - n := 0 - if c.GenerationMatch != 0 { - n++ - } - if c.GenerationNotMatch != 0 { - n++ - } - if c.DoesNotExist { - n++ - } - return n <= 1 -} - -func (c *Conditions) isMetagenerationValid() bool { - return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0 -} - -// applyConds modifies the provided call using the conditions in conds. -// call is something that quacks like a *raw.WhateverCall. -func applyConds(method string, gen int64, conds *Conditions, call interface{}) error { - cval := reflect.ValueOf(call) - if gen >= 0 { - if !setConditionField(cval, "Generation", gen) { - return fmt.Errorf("storage: %s: generation not supported", method) - } - } - if conds == nil { - return nil - } - if err := conds.validate(method); err != nil { - return err - } - switch { - case conds.GenerationMatch != 0: - if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) { - return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) - } - case conds.GenerationNotMatch != 0: - if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) { - return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) - } - case conds.DoesNotExist: - if !setConditionField(cval, "IfGenerationMatch", int64(0)) { - return fmt.Errorf("storage: %s: DoesNotExist not supported", method) - } - } - switch { - case conds.MetagenerationMatch != 0: - if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) - } - case conds.MetagenerationNotMatch != 0: - if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) - } - } - return nil -} - -func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error { - if gen >= 0 { - call.SourceGeneration(gen) - } - if conds == nil { - return nil - } - if err := conds.validate("CopyTo source"); err != nil { - return err - } - switch { - case conds.GenerationMatch != 0: - call.IfSourceGenerationMatch(conds.GenerationMatch) - case conds.GenerationNotMatch != 0: - call.IfSourceGenerationNotMatch(conds.GenerationNotMatch) - case conds.DoesNotExist: - call.IfSourceGenerationMatch(0) - } - switch { - case conds.MetagenerationMatch != 0: - call.IfSourceMetagenerationMatch(conds.MetagenerationMatch) - case conds.MetagenerationNotMatch != 0: - call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch) - } - return nil -} - -// setConditionField sets a field on a *raw.WhateverCall. -// We can't use anonymous interfaces because the return type is -// different, since the field setters are builders. -func setConditionField(call reflect.Value, name string, value interface{}) bool { - m := call.MethodByName(name) - if !m.IsValid() { - return false - } - m.Call([]reflect.Value{reflect.ValueOf(value)}) - return true -} - -// conditionsQuery returns the generation and conditions as a URL query -// string suitable for URL.RawQuery. It assumes that the conditions -// have been validated. -func conditionsQuery(gen int64, conds *Conditions) string { - // URL escapes are elided because integer strings are URL-safe. - var buf []byte - - appendParam := func(s string, n int64) { - if len(buf) > 0 { - buf = append(buf, '&') - } - buf = append(buf, s...) - buf = strconv.AppendInt(buf, n, 10) - } - - if gen >= 0 { - appendParam("generation=", gen) - } - if conds == nil { - return string(buf) - } - switch { - case conds.GenerationMatch != 0: - appendParam("ifGenerationMatch=", conds.GenerationMatch) - case conds.GenerationNotMatch != 0: - appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch) - case conds.DoesNotExist: - appendParam("ifGenerationMatch=", 0) - } - switch { - case conds.MetagenerationMatch != 0: - appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch) - case conds.MetagenerationNotMatch != 0: - appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch) - } - return string(buf) -} - -// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods -// that modifyCall searches for by name. -type composeSourceObj struct { - src *raw.ComposeRequestSourceObjects -} - -func (c composeSourceObj) Generation(gen int64) { - c.src.Generation = gen -} - -func (c composeSourceObj) IfGenerationMatch(gen int64) { - // It's safe to overwrite ObjectPreconditions, since its only field is - // IfGenerationMatch. - c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{ - IfGenerationMatch: gen, - } -} - -func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error { - if key == nil { - return nil - } - // TODO(jbd): Ask the API team to return a more user-friendly error - // and avoid doing this check at the client level. - if len(key) != 32 { - return errors.New("storage: not a 32-byte AES-256 key") - } - var cs string - if copySource { - cs = "copy-source-" - } - headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256") - headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) - keyHash := sha256.Sum256(key) - headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) - return nil -} - -// TODO(jbd): Add storage.objects.watch. diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go deleted file mode 100644 index 28eb74afd2e..00000000000 --- a/vendor/cloud.google.com/go/storage/writer.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "encoding/base64" - "errors" - "fmt" - "io" - "unicode/utf8" - - "golang.org/x/net/context" - "google.golang.org/api/googleapi" - raw "google.golang.org/api/storage/v1" -) - -// A Writer writes a Cloud Storage object. -type Writer struct { - // ObjectAttrs are optional attributes to set on the object. Any attributes - // must be initialized before the first Write call. Nil or zero-valued - // attributes are ignored. - ObjectAttrs - - // SendCRC specifies whether to transmit a CRC32C field. It should be set - // to true in addition to setting the Writer's CRC32C field, because zero - // is a valid CRC and normally a zero would not be transmitted. - // If a CRC32C is sent, and the data written does not match the checksum, - // the write will be rejected. - SendCRC32C bool - - // ChunkSize controls the maximum number of bytes of the object that the - // Writer will attempt to send to the server in a single request. Objects - // smaller than the size will be sent in a single request, while larger - // objects will be split over multiple requests. The size will be rounded up - // to the nearest multiple of 256K. If zero, chunking will be disabled and - // the object will be uploaded in a single request. - // - // ChunkSize will default to a reasonable value. Any custom configuration - // must be done before the first Write call. - ChunkSize int - - // ProgressFunc can be used to monitor the progress of a large write. - // operation. If ProgressFunc is not nil and writing requires multiple - // calls to the underlying service (see - // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload), - // then ProgressFunc will be invoked after each call with the number of bytes of - // content copied so far. - // - // ProgressFunc should return quickly without blocking. - ProgressFunc func(int64) - - ctx context.Context - o *ObjectHandle - - opened bool - pw *io.PipeWriter - - donec chan struct{} // closed after err and obj are set. - err error - obj *ObjectAttrs -} - -func (w *Writer) open() error { - attrs := w.ObjectAttrs - // Check the developer didn't change the object Name (this is unfortunate, but - // we don't want to store an object under the wrong name). - if attrs.Name != w.o.object { - return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) - } - if !utf8.ValidString(attrs.Name) { - return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) - } - pr, pw := io.Pipe() - w.pw = pw - w.opened = true - - if w.ChunkSize < 0 { - return errors.New("storage: Writer.ChunkSize must be non-negative") - } - mediaOpts := []googleapi.MediaOption{ - googleapi.ChunkSize(w.ChunkSize), - } - if c := attrs.ContentType; c != "" { - mediaOpts = append(mediaOpts, googleapi.ContentType(c)) - } - - go func() { - defer close(w.donec) - - rawObj := attrs.toRawObject(w.o.bucket) - if w.SendCRC32C { - rawObj.Crc32c = encodeUint32(attrs.CRC32C) - } - if w.MD5 != nil { - rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5) - } - call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj). - Media(pr, mediaOpts...). - Projection("full"). - Context(w.ctx) - if w.ProgressFunc != nil { - call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) - } - if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { - w.err = err - pr.CloseWithError(w.err) - return - } - var resp *raw.Object - err := applyConds("NewWriter", w.o.gen, w.o.conds, call) - if err == nil { - if w.o.userProject != "" { - call.UserProject(w.o.userProject) - } - setClientHeader(call.Header()) - // If the chunk size is zero, then no chunking is done on the Reader, - // which means we cannot retry: the first call will read the data, and if - // it fails, there is no way to re-read. - if w.ChunkSize == 0 { - resp, err = call.Do() - } else { - // We will only retry here if the initial POST, which obtains a URI for - // the resumable upload, fails with a retryable error. The upload itself - // has its own retry logic. - err = runWithRetry(w.ctx, func() error { - var err2 error - resp, err2 = call.Do() - return err2 - }) - } - } - if err != nil { - w.err = err - pr.CloseWithError(w.err) - return - } - w.obj = newObject(resp) - }() - return nil -} - -// Write appends to w. It implements the io.Writer interface. -// -// Since writes happen asynchronously, Write may return a nil -// error even though the write failed (or will fail). Always -// use the error returned from Writer.Close to determine if -// the upload was successful. -func (w *Writer) Write(p []byte) (n int, err error) { - if w.err != nil { - return 0, w.err - } - if !w.opened { - if err := w.open(); err != nil { - return 0, err - } - } - return w.pw.Write(p) -} - -// Close completes the write operation and flushes any buffered data. -// If Close doesn't return an error, metadata about the written object -// can be retrieved by calling Attrs. -func (w *Writer) Close() error { - if !w.opened { - if err := w.open(); err != nil { - return err - } - } - if err := w.pw.Close(); err != nil { - return err - } - <-w.donec - return w.err -} - -// CloseWithError aborts the write operation with the provided error. -// CloseWithError always returns nil. -func (w *Writer) CloseWithError(err error) error { - if !w.opened { - return nil - } - return w.pw.CloseWithError(err) -} - -// Attrs returns metadata about a successfully-written object. -// It's only valid to call it after Close returns nil. -func (w *Writer) Attrs() *ObjectAttrs { - return w.obj -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE deleted file mode 100644 index af39a91e703..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE deleted file mode 100644 index 2d1d72608c2..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Microsoft Azure-SDK-for-Go -Copyright 2014-2017 Microsoft - -This product includes software developed at -the Microsoft Corporation (https://www.microsoft.com). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go deleted file mode 100755 index 0870a03ded6..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go +++ /dev/null @@ -1,931 +0,0 @@ -package storage - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "net/http" -) - -// AccountsClient is the the Azure Storage Management API. -type AccountsClient struct { - ManagementClient -} - -// NewAccountsClient creates an instance of the AccountsClient client. -func NewAccountsClient(subscriptionID string) AccountsClient { - return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewAccountsClientWithBaseURI creates an instance of the AccountsClient client. -func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient { - return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CheckNameAvailability checks that the storage account name is valid and is not already in use. -// -// accountName is the name of the storage account within the specified resource group. Storage account names must be -// between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client AccountsClient) CheckNameAvailability(accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName.Name", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "accountName.Type", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "CheckNameAvailability") - } - - req, err := client.CheckNameAvailabilityPreparer(accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", nil, "Failure preparing request") - return - } - - resp, err := client.CheckNameAvailabilitySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure sending request") - return - } - - result, err = client.CheckNameAvailabilityResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure responding to request") - } - - return -} - -// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. -func (client AccountsClient) CheckNameAvailabilityPreparer(accountName AccountCheckNameAvailabilityParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-06-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability", pathParameters), - autorest.WithJSON(accountName), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) -} - -// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) -} - -// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always -// closes the http.Response Body. -func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Create asynchronously creates a new storage account with the specified parameters. If an account is already created -// and a subsequent create request is issued with different properties, the account properties will be updated. If an -// account is already created and a subsequent create or update request is issued with the exact same set of -// properties, the request will succeed. This method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. -// -// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. -// accountName is the name of the storage account within the specified resource group. Storage account names must be -// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to -// provide for the created account. -func (client AccountsClient) Create(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (<-chan Account, <-chan error) { - resultChan := make(chan Account, 1) - errChan := make(chan error, 1) - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.Identity", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.Identity.Type", Name: validation.Null, Rule: true, Chain: nil}}}, - {Target: "parameters.AccountPropertiesCreateParameters", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain.Name", Name: validation.Null, Rule: true, Chain: nil}}}, - }}}}}); err != nil { - errChan <- validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Create") - close(errChan) - close(resultChan) - return resultChan, errChan - } - - go func() { - var err error - var result Account - defer func() { - if err != nil { - errChan <- err - } - resultChan <- result - close(resultChan) - close(errChan) - }() - req, err := client.CreatePreparer(resourceGroupName, accountName, parameters, cancel) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", nil, "Failure preparing request") - return - } - - resp, err := client.CreateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", resp, "Failure sending request") - return - } - - result, err = client.CreateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", resp, "Failure responding to request") - } - }() - return resultChan, errChan -} - -// CreatePreparer prepares the Create request. -func (client AccountsClient) CreatePreparer(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-06-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{Cancel: cancel}) -} - -// CreateSender sends the Create request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) CreateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, - req, - azure.DoPollForAsynchronous(client.PollingDelay)) -} - -// CreateResponder handles the response to the Create request. The method always -// closes the http.Response Body. -func (client AccountsClient) CreateResponder(resp *http.Response) (result Account, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes a storage account in Microsoft Azure. -// -// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. -// accountName is the name of the storage account within the specified resource group. Storage account names must be -// between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client AccountsClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Delete") - } - - req, err := client.DeletePreparer(resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure responding to request") - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client AccountsClient) DeletePreparer(resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-06-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client AccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// GetProperties returns the properties for the specified storage account including but not limited to name, SKU name, -// location, and account status. The ListKeys operation should be used to retrieve storage keys. -// -// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. -// accountName is the name of the storage account within the specified resource group. Storage account names must be -// between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client AccountsClient) GetProperties(resourceGroupName string, accountName string) (result Account, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "GetProperties") - } - - req, err := client.GetPropertiesPreparer(resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", nil, "Failure preparing request") - return - } - - resp, err := client.GetPropertiesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure sending request") - return - } - - result, err = client.GetPropertiesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure responding to request") - } - - return -} - -// GetPropertiesPreparer prepares the GetProperties request. -func (client AccountsClient) GetPropertiesPreparer(resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-06-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) -} - -// GetPropertiesSender sends the GetProperties request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) -} - -// GetPropertiesResponder handles the response to the GetProperties request. The method always -// closes the http.Response Body. -func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result Account, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List lists all the storage accounts available under the subscription. Note that storage keys are not returned; use -// the ListKeys operation for this. -func (client AccountsClient) List() (result AccountListResult, err error) { - req, err := client.ListPreparer() - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client AccountsClient) ListPreparer() (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-06-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client AccountsClient) ListResponder(resp *http.Response) (result AccountListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListAccountSAS list SAS credentials of a storage account. -// -// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. -// accountName is the name of the storage account within the specified resource group. Storage account names must be -// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to -// provide to list SAS credentials for the storage account. -func (client AccountsClient) ListAccountSAS(resourceGroupName string, accountName string, parameters AccountSasParameters) (result ListAccountSasResponse, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.SharedAccessExpiryTime", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListAccountSAS") - } - - req, err := client.ListAccountSASPreparer(resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", nil, "Failure preparing request") - return - } - - resp, err := client.ListAccountSASSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure sending request") - return - } - - result, err = client.ListAccountSASResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure responding to request") - } - - return -} - -// ListAccountSASPreparer prepares the ListAccountSAS request. -func (client AccountsClient) ListAccountSASPreparer(resourceGroupName string, accountName string, parameters AccountSasParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-06-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) -} - -// ListAccountSASSender sends the ListAccountSAS request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) ListAccountSASSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) -} - -// ListAccountSASResponder handles the response to the ListAccountSAS request. The method always -// closes the http.Response Body. -func (client AccountsClient) ListAccountSASResponder(resp *http.Response) (result ListAccountSasResponse, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByResourceGroup lists all the storage accounts available under the given resource group. Note that storage keys -// are not returned; use the ListKeys operation for this. -// -// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. -func (client AccountsClient) ListByResourceGroup(resourceGroupName string) (result AccountListResult, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListByResourceGroup") - } - - req, err := client.ListByResourceGroupPreparer(resourceGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure responding to request") - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client AccountsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-06-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) (result AccountListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListKeys lists the access keys for the specified storage account. -// -// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. -// accountName is the name of the storage account within the specified resource group. Storage account names must be -// between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client AccountsClient) ListKeys(resourceGroupName string, accountName string) (result AccountListKeysResult, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListKeys") - } - - req, err := client.ListKeysPreparer(resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", nil, "Failure preparing request") - return - } - - resp, err := client.ListKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure sending request") - return - } - - result, err = client.ListKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure responding to request") - } - - return -} - -// ListKeysPreparer prepares the ListKeys request. -func (client AccountsClient) ListKeysPreparer(resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-06-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) -} - -// ListKeysSender sends the ListKeys request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) -} - -// ListKeysResponder handles the response to the ListKeys request. The method always -// closes the http.Response Body. -func (client AccountsClient) ListKeysResponder(resp *http.Response) (result AccountListKeysResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListServiceSAS list service SAS credentials of a specific resource. -// -// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. -// accountName is the name of the storage account within the specified resource group. Storage account names must be -// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to -// provide to list service SAS credentials. -func (client AccountsClient) ListServiceSAS(resourceGroupName string, accountName string, parameters ServiceSasParameters) (result ListServiceSasResponse, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.CanonicalizedResource", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.Identifier", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.Identifier", Name: validation.MaxLength, Rule: 64, Chain: nil}}}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListServiceSAS") - } - - req, err := client.ListServiceSASPreparer(resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", nil, "Failure preparing request") - return - } - - resp, err := client.ListServiceSASSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure sending request") - return - } - - result, err = client.ListServiceSASResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure responding to request") - } - - return -} - -// ListServiceSASPreparer prepares the ListServiceSAS request. -func (client AccountsClient) ListServiceSASPreparer(resourceGroupName string, accountName string, parameters ServiceSasParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-06-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) -} - -// ListServiceSASSender sends the ListServiceSAS request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) ListServiceSASSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) -} - -// ListServiceSASResponder handles the response to the ListServiceSAS request. The method always -// closes the http.Response Body. -func (client AccountsClient) ListServiceSASResponder(resp *http.Response) (result ListServiceSasResponse, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RegenerateKey regenerates one of the access keys for the specified storage account. -// -// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. -// accountName is the name of the storage account within the specified resource group. Storage account names must be -// between 3 and 24 characters in length and use numbers and lower-case letters only. regenerateKey is specifies name -// of the key which should be regenerated -- key1 or key2. -func (client AccountsClient) RegenerateKey(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountListKeysResult, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: regenerateKey, - Constraints: []validation.Constraint{{Target: "regenerateKey.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "RegenerateKey") - } - - req, err := client.RegenerateKeyPreparer(resourceGroupName, accountName, regenerateKey) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", nil, "Failure preparing request") - return - } - - resp, err := client.RegenerateKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure sending request") - return - } - - result, err = client.RegenerateKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure responding to request") - } - - return -} - -// RegenerateKeyPreparer prepares the RegenerateKey request. -func (client AccountsClient) RegenerateKeyPreparer(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-06-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey", pathParameters), - autorest.WithJSON(regenerateKey), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) -} - -// RegenerateKeySender sends the RegenerateKey request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) -} - -// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always -// closes the http.Response Body. -func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result AccountListKeysResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Update the update operation can be used to update the SKU, encryption, access tier, or tags for a storage account. -// It can also be used to map the account to a custom domain. Only one custom domain is supported per storage account; -// the replacement/change of custom domain is not supported. In order to replace an old custom domain, the old value -// must be cleared/unregistered before a new value can be set. The update of multiple properties is supported. This -// call does not change the storage keys for the account. If you want to change the storage account keys, use the -// regenerate keys operation. The location and name of the storage account cannot be changed after creation. -// -// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. -// accountName is the name of the storage account within the specified resource group. Storage account names must be -// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to -// provide for the updated account. -func (client AccountsClient) Update(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Update") - } - - req, err := client.UpdatePreparer(resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure responding to request") - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client AccountsClient) UpdatePreparer(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-06-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsJSON(), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client AccountsClient) UpdateResponder(resp *http.Response) (result Account, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go deleted file mode 100755 index 133386ddbeb..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go +++ /dev/null @@ -1,51 +0,0 @@ -// Package storage implements the Azure ARM Storage service API version 2017-06-01. -// -// The Azure Storage Management API. -package storage - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" -) - -const ( - // DefaultBaseURI is the default URI used for the service Storage - DefaultBaseURI = "https://management.azure.com" -) - -// ManagementClient is the base client for Storage. -type ManagementClient struct { - autorest.Client - BaseURI string - SubscriptionID string -} - -// New creates an instance of the ManagementClient client. -func New(subscriptionID string) ManagementClient { - return NewWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWithBaseURI creates an instance of the ManagementClient client. -func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { - return ManagementClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: baseURI, - SubscriptionID: subscriptionID, - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go deleted file mode 100755 index 37c1679ee84..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go +++ /dev/null @@ -1,602 +0,0 @@ -package storage - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/date" -) - -// AccessTier enumerates the values for access tier. -type AccessTier string - -const ( - // Cool specifies the cool state for access tier. - Cool AccessTier = "Cool" - // Hot specifies the hot state for access tier. - Hot AccessTier = "Hot" -) - -// AccountStatus enumerates the values for account status. -type AccountStatus string - -const ( - // Available specifies the available state for account status. - Available AccountStatus = "available" - // Unavailable specifies the unavailable state for account status. - Unavailable AccountStatus = "unavailable" -) - -// Action enumerates the values for action. -type Action string - -const ( - // Allow specifies the allow state for action. - Allow Action = "Allow" -) - -// Bypass enumerates the values for bypass. -type Bypass string - -const ( - // AzureServices specifies the azure services state for bypass. - AzureServices Bypass = "AzureServices" - // Logging specifies the logging state for bypass. - Logging Bypass = "Logging" - // Metrics specifies the metrics state for bypass. - Metrics Bypass = "Metrics" - // None specifies the none state for bypass. - None Bypass = "None" -) - -// DefaultAction enumerates the values for default action. -type DefaultAction string - -const ( - // DefaultActionAllow specifies the default action allow state for default action. - DefaultActionAllow DefaultAction = "Allow" - // DefaultActionDeny specifies the default action deny state for default action. - DefaultActionDeny DefaultAction = "Deny" -) - -// HTTPProtocol enumerates the values for http protocol. -type HTTPProtocol string - -const ( - // HTTPS specifies the https state for http protocol. - HTTPS HTTPProtocol = "https" - // Httpshttp specifies the httpshttp state for http protocol. - Httpshttp HTTPProtocol = "https,http" -) - -// KeyPermission enumerates the values for key permission. -type KeyPermission string - -const ( - // Full specifies the full state for key permission. - Full KeyPermission = "Full" - // Read specifies the read state for key permission. - Read KeyPermission = "Read" -) - -// KeySource enumerates the values for key source. -type KeySource string - -const ( - // MicrosoftKeyvault specifies the microsoft keyvault state for key source. - MicrosoftKeyvault KeySource = "Microsoft.Keyvault" - // MicrosoftStorage specifies the microsoft storage state for key source. - MicrosoftStorage KeySource = "Microsoft.Storage" -) - -// Kind enumerates the values for kind. -type Kind string - -const ( - // BlobStorage specifies the blob storage state for kind. - BlobStorage Kind = "BlobStorage" - // Storage specifies the storage state for kind. - Storage Kind = "Storage" -) - -// Permissions enumerates the values for permissions. -type Permissions string - -const ( - // A specifies the a state for permissions. - A Permissions = "a" - // C specifies the c state for permissions. - C Permissions = "c" - // D specifies the d state for permissions. - D Permissions = "d" - // L specifies the l state for permissions. - L Permissions = "l" - // P specifies the p state for permissions. - P Permissions = "p" - // R specifies the r state for permissions. - R Permissions = "r" - // U specifies the u state for permissions. - U Permissions = "u" - // W specifies the w state for permissions. - W Permissions = "w" -) - -// ProvisioningState enumerates the values for provisioning state. -type ProvisioningState string - -const ( - // Creating specifies the creating state for provisioning state. - Creating ProvisioningState = "Creating" - // ResolvingDNS specifies the resolving dns state for provisioning state. - ResolvingDNS ProvisioningState = "ResolvingDNS" - // Succeeded specifies the succeeded state for provisioning state. - Succeeded ProvisioningState = "Succeeded" -) - -// Reason enumerates the values for reason. -type Reason string - -const ( - // AccountNameInvalid specifies the account name invalid state for reason. - AccountNameInvalid Reason = "AccountNameInvalid" - // AlreadyExists specifies the already exists state for reason. - AlreadyExists Reason = "AlreadyExists" -) - -// ReasonCode enumerates the values for reason code. -type ReasonCode string - -const ( - // NotAvailableForSubscription specifies the not available for subscription state for reason code. - NotAvailableForSubscription ReasonCode = "NotAvailableForSubscription" - // QuotaID specifies the quota id state for reason code. - QuotaID ReasonCode = "QuotaId" -) - -// Services enumerates the values for services. -type Services string - -const ( - // B specifies the b state for services. - B Services = "b" - // F specifies the f state for services. - F Services = "f" - // Q specifies the q state for services. - Q Services = "q" - // T specifies the t state for services. - T Services = "t" -) - -// SignedResource enumerates the values for signed resource. -type SignedResource string - -const ( - // SignedResourceB specifies the signed resource b state for signed resource. - SignedResourceB SignedResource = "b" - // SignedResourceC specifies the signed resource c state for signed resource. - SignedResourceC SignedResource = "c" - // SignedResourceF specifies the signed resource f state for signed resource. - SignedResourceF SignedResource = "f" - // SignedResourceS specifies the signed resource s state for signed resource. - SignedResourceS SignedResource = "s" -) - -// SignedResourceTypes enumerates the values for signed resource types. -type SignedResourceTypes string - -const ( - // SignedResourceTypesC specifies the signed resource types c state for signed resource types. - SignedResourceTypesC SignedResourceTypes = "c" - // SignedResourceTypesO specifies the signed resource types o state for signed resource types. - SignedResourceTypesO SignedResourceTypes = "o" - // SignedResourceTypesS specifies the signed resource types s state for signed resource types. - SignedResourceTypesS SignedResourceTypes = "s" -) - -// SkuName enumerates the values for sku name. -type SkuName string - -const ( - // PremiumLRS specifies the premium lrs state for sku name. - PremiumLRS SkuName = "Premium_LRS" - // StandardGRS specifies the standard grs state for sku name. - StandardGRS SkuName = "Standard_GRS" - // StandardLRS specifies the standard lrs state for sku name. - StandardLRS SkuName = "Standard_LRS" - // StandardRAGRS specifies the standard ragrs state for sku name. - StandardRAGRS SkuName = "Standard_RAGRS" - // StandardZRS specifies the standard zrs state for sku name. - StandardZRS SkuName = "Standard_ZRS" -) - -// SkuTier enumerates the values for sku tier. -type SkuTier string - -const ( - // Premium specifies the premium state for sku tier. - Premium SkuTier = "Premium" - // Standard specifies the standard state for sku tier. - Standard SkuTier = "Standard" -) - -// State enumerates the values for state. -type State string - -const ( - // StateDeprovisioning specifies the state deprovisioning state for state. - StateDeprovisioning State = "deprovisioning" - // StateFailed specifies the state failed state for state. - StateFailed State = "failed" - // StateNetworkSourceDeleted specifies the state network source deleted state for state. - StateNetworkSourceDeleted State = "networkSourceDeleted" - // StateProvisioning specifies the state provisioning state for state. - StateProvisioning State = "provisioning" - // StateSucceeded specifies the state succeeded state for state. - StateSucceeded State = "succeeded" -) - -// UsageUnit enumerates the values for usage unit. -type UsageUnit string - -const ( - // Bytes specifies the bytes state for usage unit. - Bytes UsageUnit = "Bytes" - // BytesPerSecond specifies the bytes per second state for usage unit. - BytesPerSecond UsageUnit = "BytesPerSecond" - // Count specifies the count state for usage unit. - Count UsageUnit = "Count" - // CountsPerSecond specifies the counts per second state for usage unit. - CountsPerSecond UsageUnit = "CountsPerSecond" - // Percent specifies the percent state for usage unit. - Percent UsageUnit = "Percent" - // Seconds specifies the seconds state for usage unit. - Seconds UsageUnit = "Seconds" -) - -// Account is the storage account. -type Account struct { - autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` - Location *string `json:"location,omitempty"` - Tags *map[string]*string `json:"tags,omitempty"` - Sku *Sku `json:"sku,omitempty"` - Kind Kind `json:"kind,omitempty"` - Identity *Identity `json:"identity,omitempty"` - *AccountProperties `json:"properties,omitempty"` -} - -// AccountCheckNameAvailabilityParameters is the parameters used to check the availabity of the storage account name. -type AccountCheckNameAvailabilityParameters struct { - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` -} - -// AccountCreateParameters is the parameters used when creating a storage account. -type AccountCreateParameters struct { - Sku *Sku `json:"sku,omitempty"` - Kind Kind `json:"kind,omitempty"` - Location *string `json:"location,omitempty"` - Tags *map[string]*string `json:"tags,omitempty"` - Identity *Identity `json:"identity,omitempty"` - *AccountPropertiesCreateParameters `json:"properties,omitempty"` -} - -// AccountKey is an access key for the storage account. -type AccountKey struct { - KeyName *string `json:"keyName,omitempty"` - Value *string `json:"value,omitempty"` - Permissions KeyPermission `json:"permissions,omitempty"` -} - -// AccountListKeysResult is the response from the ListKeys operation. -type AccountListKeysResult struct { - autorest.Response `json:"-"` - Keys *[]AccountKey `json:"keys,omitempty"` -} - -// AccountListResult is the response from the List Storage Accounts operation. -type AccountListResult struct { - autorest.Response `json:"-"` - Value *[]Account `json:"value,omitempty"` -} - -// AccountProperties is properties of the storage account. -type AccountProperties struct { - ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` - PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"` - PrimaryLocation *string `json:"primaryLocation,omitempty"` - StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"` - LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"` - SecondaryLocation *string `json:"secondaryLocation,omitempty"` - StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"` - CreationTime *date.Time `json:"creationTime,omitempty"` - CustomDomain *CustomDomain `json:"customDomain,omitempty"` - SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"` - Encryption *Encryption `json:"encryption,omitempty"` - AccessTier AccessTier `json:"accessTier,omitempty"` - EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` - NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` -} - -// AccountPropertiesCreateParameters is the parameters used to create the storage account. -type AccountPropertiesCreateParameters struct { - CustomDomain *CustomDomain `json:"customDomain,omitempty"` - Encryption *Encryption `json:"encryption,omitempty"` - NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` - AccessTier AccessTier `json:"accessTier,omitempty"` - EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` -} - -// AccountPropertiesUpdateParameters is the parameters used when updating a storage account. -type AccountPropertiesUpdateParameters struct { - CustomDomain *CustomDomain `json:"customDomain,omitempty"` - Encryption *Encryption `json:"encryption,omitempty"` - AccessTier AccessTier `json:"accessTier,omitempty"` - EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` - NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` -} - -// AccountRegenerateKeyParameters is the parameters used to regenerate the storage account key. -type AccountRegenerateKeyParameters struct { - KeyName *string `json:"keyName,omitempty"` -} - -// AccountSasParameters is the parameters to list SAS credentials of a storage account. -type AccountSasParameters struct { - Services Services `json:"signedServices,omitempty"` - ResourceTypes SignedResourceTypes `json:"signedResourceTypes,omitempty"` - Permissions Permissions `json:"signedPermission,omitempty"` - IPAddressOrRange *string `json:"signedIp,omitempty"` - Protocols HTTPProtocol `json:"signedProtocol,omitempty"` - SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` - SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` - KeyToSign *string `json:"keyToSign,omitempty"` -} - -// AccountUpdateParameters is the parameters that can be provided when updating the storage account properties. -type AccountUpdateParameters struct { - Sku *Sku `json:"sku,omitempty"` - Tags *map[string]*string `json:"tags,omitempty"` - Identity *Identity `json:"identity,omitempty"` - *AccountPropertiesUpdateParameters `json:"properties,omitempty"` -} - -// CheckNameAvailabilityResult is the CheckNameAvailability operation response. -type CheckNameAvailabilityResult struct { - autorest.Response `json:"-"` - NameAvailable *bool `json:"nameAvailable,omitempty"` - Reason Reason `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` -} - -// CustomDomain is the custom domain assigned to this storage account. This can be set via Update. -type CustomDomain struct { - Name *string `json:"name,omitempty"` - UseSubDomain *bool `json:"useSubDomain,omitempty"` -} - -// Dimension is dimension of blobs, possiblly be blob type or access tier. -type Dimension struct { - Name *string `json:"name,omitempty"` - DisplayName *string `json:"displayName,omitempty"` -} - -// Encryption is the encryption settings on the storage account. -type Encryption struct { - Services *EncryptionServices `json:"services,omitempty"` - KeySource KeySource `json:"keySource,omitempty"` - KeyVaultProperties *KeyVaultProperties `json:"keyvaultproperties,omitempty"` -} - -// EncryptionService is a service that allows server-side encryption to be used. -type EncryptionService struct { - Enabled *bool `json:"enabled,omitempty"` - LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"` -} - -// EncryptionServices is a list of services that support encryption. -type EncryptionServices struct { - Blob *EncryptionService `json:"blob,omitempty"` - File *EncryptionService `json:"file,omitempty"` - Table *EncryptionService `json:"table,omitempty"` - Queue *EncryptionService `json:"queue,omitempty"` -} - -// Endpoints is the URIs that are used to perform a retrieval of a public blob, queue, or table object. -type Endpoints struct { - Blob *string `json:"blob,omitempty"` - Queue *string `json:"queue,omitempty"` - Table *string `json:"table,omitempty"` - File *string `json:"file,omitempty"` -} - -// Identity is identity for the resource. -type Identity struct { - PrincipalID *string `json:"principalId,omitempty"` - TenantID *string `json:"tenantId,omitempty"` - Type *string `json:"type,omitempty"` -} - -// IPRule is IP rule with specific IP or IP range in CIDR format. -type IPRule struct { - IPAddressOrRange *string `json:"value,omitempty"` - Action Action `json:"action,omitempty"` -} - -// KeyVaultProperties is properties of key vault. -type KeyVaultProperties struct { - KeyName *string `json:"keyname,omitempty"` - KeyVersion *string `json:"keyversion,omitempty"` - KeyVaultURI *string `json:"keyvaulturi,omitempty"` -} - -// ListAccountSasResponse is the List SAS credentials operation response. -type ListAccountSasResponse struct { - autorest.Response `json:"-"` - AccountSasToken *string `json:"accountSasToken,omitempty"` -} - -// ListServiceSasResponse is the List service SAS credentials operation response. -type ListServiceSasResponse struct { - autorest.Response `json:"-"` - ServiceSasToken *string `json:"serviceSasToken,omitempty"` -} - -// MetricSpecification is metric specification of operation. -type MetricSpecification struct { - Name *string `json:"name,omitempty"` - DisplayName *string `json:"displayName,omitempty"` - DisplayDescription *string `json:"displayDescription,omitempty"` - Unit *string `json:"unit,omitempty"` - Dimensions *[]Dimension `json:"dimensions,omitempty"` - AggregationType *string `json:"aggregationType,omitempty"` - FillGapWithZero *bool `json:"fillGapWithZero,omitempty"` - Category *string `json:"category,omitempty"` - ResourceIDDimensionNameOverride *string `json:"resourceIdDimensionNameOverride,omitempty"` -} - -// NetworkRuleSet is network rule set -type NetworkRuleSet struct { - Bypass Bypass `json:"bypass,omitempty"` - VirtualNetworkRules *[]VirtualNetworkRule `json:"virtualNetworkRules,omitempty"` - IPRules *[]IPRule `json:"ipRules,omitempty"` - DefaultAction DefaultAction `json:"defaultAction,omitempty"` -} - -// Operation is storage REST API operation definition. -type Operation struct { - Name *string `json:"name,omitempty"` - Display *OperationDisplay `json:"display,omitempty"` - Origin *string `json:"origin,omitempty"` - *OperationProperties `json:"properties,omitempty"` -} - -// OperationDisplay is display metadata associated with the operation. -type OperationDisplay struct { - Provider *string `json:"provider,omitempty"` - Resource *string `json:"resource,omitempty"` - Operation *string `json:"operation,omitempty"` -} - -// OperationListResult is result of the request to list Storage operations. It contains a list of operations and a URL -// link to get the next set of results. -type OperationListResult struct { - autorest.Response `json:"-"` - Value *[]Operation `json:"value,omitempty"` -} - -// OperationProperties is properties of operation, include metric specifications. -type OperationProperties struct { - ServiceSpecification *ServiceSpecification `json:"serviceSpecification,omitempty"` -} - -// Resource is describes a storage resource. -type Resource struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` - Location *string `json:"location,omitempty"` - Tags *map[string]*string `json:"tags,omitempty"` -} - -// Restriction is the restriction because of which SKU cannot be used. -type Restriction struct { - Type *string `json:"type,omitempty"` - Values *[]string `json:"values,omitempty"` - ReasonCode ReasonCode `json:"reasonCode,omitempty"` -} - -// ServiceSasParameters is the parameters to list service SAS credentials of a speicific resource. -type ServiceSasParameters struct { - CanonicalizedResource *string `json:"canonicalizedResource,omitempty"` - Resource SignedResource `json:"signedResource,omitempty"` - Permissions Permissions `json:"signedPermission,omitempty"` - IPAddressOrRange *string `json:"signedIp,omitempty"` - Protocols HTTPProtocol `json:"signedProtocol,omitempty"` - SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` - SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` - Identifier *string `json:"signedIdentifier,omitempty"` - PartitionKeyStart *string `json:"startPk,omitempty"` - PartitionKeyEnd *string `json:"endPk,omitempty"` - RowKeyStart *string `json:"startRk,omitempty"` - RowKeyEnd *string `json:"endRk,omitempty"` - KeyToSign *string `json:"keyToSign,omitempty"` - CacheControl *string `json:"rscc,omitempty"` - ContentDisposition *string `json:"rscd,omitempty"` - ContentEncoding *string `json:"rsce,omitempty"` - ContentLanguage *string `json:"rscl,omitempty"` - ContentType *string `json:"rsct,omitempty"` -} - -// ServiceSpecification is one property of operation, include metric specifications. -type ServiceSpecification struct { - MetricSpecifications *[]MetricSpecification `json:"metricSpecifications,omitempty"` -} - -// Sku is the SKU of the storage account. -type Sku struct { - Name SkuName `json:"name,omitempty"` - Tier SkuTier `json:"tier,omitempty"` - ResourceType *string `json:"resourceType,omitempty"` - Kind Kind `json:"kind,omitempty"` - Locations *[]string `json:"locations,omitempty"` - Capabilities *[]SKUCapability `json:"capabilities,omitempty"` - Restrictions *[]Restriction `json:"restrictions,omitempty"` -} - -// SKUCapability is the capability information in the specified sku, including file encryption, network acls, change -// notification, etc. -type SKUCapability struct { - Name *string `json:"name,omitempty"` - Value *string `json:"value,omitempty"` -} - -// SkuListResult is the response from the List Storage SKUs operation. -type SkuListResult struct { - autorest.Response `json:"-"` - Value *[]Sku `json:"value,omitempty"` -} - -// Usage is describes Storage Resource Usage. -type Usage struct { - Unit UsageUnit `json:"unit,omitempty"` - CurrentValue *int32 `json:"currentValue,omitempty"` - Limit *int32 `json:"limit,omitempty"` - Name *UsageName `json:"name,omitempty"` -} - -// UsageListResult is the response from the List Usages operation. -type UsageListResult struct { - autorest.Response `json:"-"` - Value *[]Usage `json:"value,omitempty"` -} - -// UsageName is the usage names that can be used; currently limited to StorageAccount. -type UsageName struct { - Value *string `json:"value,omitempty"` - LocalizedValue *string `json:"localizedValue,omitempty"` -} - -// VirtualNetworkRule is virtual Network rule. -type VirtualNetworkRule struct { - VirtualNetworkResourceID *string `json:"id,omitempty"` - Action Action `json:"action,omitempty"` - State State `json:"state,omitempty"` -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go deleted file mode 100644 index cc46c699792..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go +++ /dev/null @@ -1,96 +0,0 @@ -package storage - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "net/http" -) - -// OperationsClient is the the Azure Storage Management API. -type OperationsClient struct { - ManagementClient -} - -// NewOperationsClient creates an instance of the OperationsClient client. -func NewOperationsClient(subscriptionID string) OperationsClient { - return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client. -func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { - return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List lists all of the available Storage Rest API operations. -func (client OperationsClient) List() (result OperationListResult, err error) { - req, err := client.ListPreparer() - if err != nil { - err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client OperationsClient) ListPreparer() (*http.Request, error) { - const APIVersion = "2017-06-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/Microsoft.Storage/operations"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go deleted file mode 100644 index 94d4d6f83ec..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go +++ /dev/null @@ -1,100 +0,0 @@ -package storage - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "net/http" -) - -// SkusClient is the the Azure Storage Management API. -type SkusClient struct { - ManagementClient -} - -// NewSkusClient creates an instance of the SkusClient client. -func NewSkusClient(subscriptionID string) SkusClient { - return NewSkusClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewSkusClientWithBaseURI creates an instance of the SkusClient client. -func NewSkusClientWithBaseURI(baseURI string, subscriptionID string) SkusClient { - return SkusClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List lists the available SKUs supported by Microsoft.Storage for given subscription. -func (client SkusClient) List() (result SkuListResult, err error) { - req, err := client.ListPreparer() - if err != nil { - err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client SkusClient) ListPreparer() (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-06-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/skus", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client SkusClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client SkusClient) ListResponder(resp *http.Response) (result SkuListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go deleted file mode 100755 index 682e5c16c36..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go +++ /dev/null @@ -1,100 +0,0 @@ -package storage - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "net/http" -) - -// UsageClient is the the Azure Storage Management API. -type UsageClient struct { - ManagementClient -} - -// NewUsageClient creates an instance of the UsageClient client. -func NewUsageClient(subscriptionID string) UsageClient { - return NewUsageClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewUsageClientWithBaseURI creates an instance of the UsageClient client. -func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient { - return UsageClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List gets the current usage count and the limit for the resources under the subscription. -func (client UsageClient) List() (result UsageListResult, err error) { - req, err := client.ListPreparer() - if err != nil { - err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", resp, "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client UsageClient) ListPreparer() (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-06-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/usages", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client UsageClient) ListResponder(resp *http.Response) (result UsageListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go deleted file mode 100755 index 467102d5973..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go +++ /dev/null @@ -1,28 +0,0 @@ -package storage - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/v11.0.0-beta arm-storage/2017-06-01" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return "v11.0.0-beta" -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md deleted file mode 100644 index 6dc348e02af..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Azure Storage SDK for Go - -The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform REST operations against the [Azure Storage Service](https://docs.microsoft.com/en-us/azure/storage/). To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](https://github.com/Azure/azure-sdk-for-go/tree/master/arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](https://github.com/Azure/azure-sdk-for-go/tree/master/management/storageservice) package. - -This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/). - -# Getting Started - - 1. Go get the SDK `go get -u github.com/Azure/azure-sdk-for=go/storage` - 1. If you don't already have one, [create a Storage Account](https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account). - - Take note of your Azure Storage Account Name and Azure Storage Account Key. They'll both be necessary for using this library. - - This option is production ready, but can also be used for development. - 1. (Optional, Windows only) Download and start the [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/). - 1. Checkout our existing [samples](https://github.com/Azure-Samples?q=Storage&language=go). - -# Contributing - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -When contributing, please conform to the following practices: - - Run [gofmt](https://golang.org/cmd/gofmt/) to use standard go formatting. - - Run [golint](https://github.com/golang/lint) to conform to standard naming conventions. - - Run [go vet](https://golang.org/cmd/vet/) to catch common Go mistakes. - - Use [GoASTScanner/gas](https://github.com/GoASTScanner/gas) to ensure there are no common security violations in your contribution. - - Run [go test](https://golang.org/cmd/go/#hdr-Test_packages) to catch possible bugs in the code: `go test ./storage/...`. - - This project uses HTTP recordings for testing. - - The recorder should be attached to the client before calling the functions to test and later stopped. - - If you updated an existing test, its recording might need to be updated. Run `go test ./storage/... -ow -check.f TestName` to rerecord the test. - - Important note: all HTTP requests in the recording must be unique: different bodies, headers (`User-Agent`, `Authorization` and `Date` or `x-ms-date` headers are ignored), URLs and methods. As opposed to the example above, the following test is not suitable for recording: - -``` go -func (s *StorageQueueSuite) TestQueueExists(c *chk.C) { -cli := getQueueClient(c) -rec := cli.client.appendRecorder(c) -defer rec.Stop() - -queue := cli.GetQueueReference(queueName(c)) -ok, err := queue.Exists() -c.Assert(err, chk.IsNil) -c.Assert(ok, chk.Equals, false) - -c.Assert(queue.Create(nil), chk.IsNil) -defer queue.Delete(nil) - -ok, err = queue.Exists() // This is the very same request as the one 5 lines above -// The test replayer gets confused and the test fails in the last line -c.Assert(err, chk.IsNil) -c.Assert(ok, chk.Equals, true) -} -``` - - - On the other side, this test does not repeat requests: the URLs are different. - -``` go -func (s *StorageQueueSuite) TestQueueExists(c *chk.C) { -cli := getQueueClient(c) -rec := cli.client.appendRecorder(c) -defer rec.Stop() - -queue1 := cli.GetQueueReference(queueName(c, "nonexistent")) -ok, err := queue1.Exists() -c.Assert(err, chk.IsNil) -c.Assert(ok, chk.Equals, false) - -queue2 := cli.GetQueueReference(queueName(c, "exisiting")) -c.Assert(queue2.Create(nil), chk.IsNil) -defer queue2.Delete(nil) - -ok, err = queue2.Exists() -c.Assert(err, chk.IsNil) -c.Assert(ok, chk.Equals, true) -} -``` \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go deleted file mode 100644 index 8b5b96d4884..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go +++ /dev/null @@ -1,91 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "time" -) - -// PutAppendBlob initializes an empty append blob with specified name. An -// append blob must be created using this method before appending blocks. -// -// See CreateBlockBlobFromReader for more info on creating blobs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob -func (b *Blob) PutAppendBlob(options *PutBlobOptions) error { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypeAppend) -} - -// AppendBlockOptions includes the options for an append block operation -type AppendBlockOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - MaxSize *uint `header:"x-ms-blob-condition-maxsize"` - AppendPosition *uint `header:"x-ms-blob-condition-appendpos"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` - ContentMD5 bool -} - -// AppendBlock appends a block to an append blob. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block -func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error { - params := url.Values{"comp": {"appendblock"}} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - if options.ContentMD5 { - md5sum := md5.Sum(chunk) - headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:]) - } - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypeAppend) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go deleted file mode 100644 index 76794c30518..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go +++ /dev/null @@ -1,246 +0,0 @@ -// Package storage provides clients for Microsoft Azure Storage Services. -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "net/url" - "sort" - "strings" -) - -// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services - -type authentication string - -const ( - sharedKey authentication = "sharedKey" - sharedKeyForTable authentication = "sharedKeyTable" - sharedKeyLite authentication = "sharedKeyLite" - sharedKeyLiteForTable authentication = "sharedKeyLiteTable" - - // headers - headerAcceptCharset = "Accept-Charset" - headerAuthorization = "Authorization" - headerContentLength = "Content-Length" - headerDate = "Date" - headerXmsDate = "x-ms-date" - headerXmsVersion = "x-ms-version" - headerContentEncoding = "Content-Encoding" - headerContentLanguage = "Content-Language" - headerContentType = "Content-Type" - headerContentMD5 = "Content-MD5" - headerIfModifiedSince = "If-Modified-Since" - headerIfMatch = "If-Match" - headerIfNoneMatch = "If-None-Match" - headerIfUnmodifiedSince = "If-Unmodified-Since" - headerRange = "Range" - headerDataServiceVersion = "DataServiceVersion" - headerMaxDataServiceVersion = "MaxDataServiceVersion" - headerContentTransferEncoding = "Content-Transfer-Encoding" -) - -func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) { - if !c.sasClient { - authHeader, err := c.getSharedKey(verb, url, headers, auth) - if err != nil { - return nil, err - } - headers[headerAuthorization] = authHeader - } - return headers, nil -} - -func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) { - canRes, err := c.buildCanonicalizedResource(url, auth, false) - if err != nil { - return "", err - } - - canString, err := buildCanonicalizedString(verb, headers, canRes, auth) - if err != nil { - return "", err - } - return c.createAuthorizationHeader(canString, auth), nil -} - -func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) { - errMsg := "buildCanonicalizedResource error: %s" - u, err := url.Parse(uri) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - cr := bytes.NewBufferString("") - if c.accountName != StorageEmulatorAccountName || !sas { - cr.WriteString("/") - cr.WriteString(c.getCanonicalizedAccountName()) - } - - if len(u.Path) > 0 { - // Any portion of the CanonicalizedResource string that is derived from - // the resource's URI should be encoded exactly as it is in the URI. - // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx - cr.WriteString(u.EscapedPath()) - } - - params, err := url.ParseQuery(u.RawQuery) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 - if auth == sharedKey { - if len(params) > 0 { - cr.WriteString("\n") - - keys := []string{} - for key := range params { - keys = append(keys, key) - } - sort.Strings(keys) - - completeParams := []string{} - for _, key := range keys { - if len(params[key]) > 1 { - sort.Strings(params[key]) - } - - completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) - } - cr.WriteString(strings.Join(completeParams, "\n")) - } - } else { - // search for "comp" parameter, if exists then add it to canonicalizedresource - if v, ok := params["comp"]; ok { - cr.WriteString("?comp=" + v[0]) - } - } - - return string(cr.Bytes()), nil -} - -func (c *Client) getCanonicalizedAccountName() string { - // since we may be trying to access a secondary storage account, we need to - // remove the -secondary part of the storage name - return strings.TrimSuffix(c.accountName, "-secondary") -} - -func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) { - contentLength := headers[headerContentLength] - if contentLength == "0" { - contentLength = "" - } - date := headers[headerDate] - if v, ok := headers[headerXmsDate]; ok { - if auth == sharedKey || auth == sharedKeyLite { - date = "" - } else { - date = v - } - } - var canString string - switch auth { - case sharedKey: - canString = strings.Join([]string{ - verb, - headers[headerContentEncoding], - headers[headerContentLanguage], - contentLength, - headers[headerContentMD5], - headers[headerContentType], - date, - headers[headerIfModifiedSince], - headers[headerIfMatch], - headers[headerIfNoneMatch], - headers[headerIfUnmodifiedSince], - headers[headerRange], - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - case sharedKeyForTable: - canString = strings.Join([]string{ - verb, - headers[headerContentMD5], - headers[headerContentType], - date, - canonicalizedResource, - }, "\n") - case sharedKeyLite: - canString = strings.Join([]string{ - verb, - headers[headerContentMD5], - headers[headerContentType], - date, - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - case sharedKeyLiteForTable: - canString = strings.Join([]string{ - date, - canonicalizedResource, - }, "\n") - default: - return "", fmt.Errorf("%s authentication is not supported yet", auth) - } - return canString, nil -} - -func buildCanonicalizedHeader(headers map[string]string) string { - cm := make(map[string]string) - - for k, v := range headers { - headerName := strings.TrimSpace(strings.ToLower(k)) - if strings.HasPrefix(headerName, "x-ms-") { - cm[headerName] = v - } - } - - if len(cm) == 0 { - return "" - } - - keys := []string{} - for key := range cm { - keys = append(keys, key) - } - - sort.Strings(keys) - - ch := bytes.NewBufferString("") - - for _, key := range keys { - ch.WriteString(key) - ch.WriteRune(':') - ch.WriteString(cm[key]) - ch.WriteRune('\n') - } - - return strings.TrimSuffix(string(ch.Bytes()), "\n") -} - -func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string { - signature := c.computeHmac256(canonicalizedString) - var key string - switch auth { - case sharedKey, sharedKeyForTable: - key = "SharedKey" - case sharedKeyLite, sharedKeyLiteForTable: - key = "SharedKeyLite" - } - return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go deleted file mode 100644 index a9d3cfccb68..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ /dev/null @@ -1,652 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// A Blob is an entry in BlobListResponse. -type Blob struct { - Container *Container - Name string `xml:"Name"` - Snapshot time.Time `xml:"Snapshot"` - Properties BlobProperties `xml:"Properties"` - Metadata BlobMetadata `xml:"Metadata"` -} - -// PutBlobOptions includes the options any put blob operation -// (page, block, append) -type PutBlobOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - Origin string `header:"Origin"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// BlobMetadata is a set of custom name/value pairs. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx -type BlobMetadata map[string]string - -type blobMetadataEntries struct { - Entries []blobMetadataEntry `xml:",any"` -} -type blobMetadataEntry struct { - XMLName xml.Name - Value string `xml:",chardata"` -} - -// UnmarshalXML converts the xml:Metadata into Metadata map -func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var entries blobMetadataEntries - if err := d.DecodeElement(&entries, &start); err != nil { - return err - } - for _, entry := range entries.Entries { - if *bm == nil { - *bm = make(BlobMetadata) - } - (*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value - } - return nil -} - -// MarshalXML implements the xml.Marshaler interface. It encodes -// metadata name/value pairs as they would appear in an Azure -// ListBlobs response. -func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - entries := make([]blobMetadataEntry, 0, len(bm)) - for k, v := range bm { - entries = append(entries, blobMetadataEntry{ - XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)}, - Value: v, - }) - } - return enc.EncodeElement(blobMetadataEntries{ - Entries: entries, - }, start) -} - -// BlobProperties contains various properties of a blob -// returned in various endpoints like ListBlobs or GetBlobProperties. -type BlobProperties struct { - LastModified TimeRFC1123 `xml:"Last-Modified"` - Etag string `xml:"Etag"` - ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"` - ContentLength int64 `xml:"Content-Length"` - ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"` - ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"` - CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"` - ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"` - ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"` - BlobType BlobType `xml:"BlobType"` - SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` - CopyID string `xml:"CopyId"` - CopyStatus string `xml:"CopyStatus"` - CopySource string `xml:"CopySource"` - CopyProgress string `xml:"CopyProgress"` - CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"` - CopyStatusDescription string `xml:"CopyStatusDescription"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` - LeaseDuration string `xml:"LeaseDuration"` - ServerEncrypted bool `xml:"ServerEncrypted"` - IncrementalCopy bool `xml:"IncrementalCopy"` -} - -// BlobType defines the type of the Azure Blob. -type BlobType string - -// Types of page blobs -const ( - BlobTypeBlock BlobType = "BlockBlob" - BlobTypePage BlobType = "PageBlob" - BlobTypeAppend BlobType = "AppendBlob" -) - -func (b *Blob) buildPath() string { - return b.Container.buildPath() + "/" + b.Name -} - -// Exists returns true if a blob with given name exists on the specified -// container of the storage account. -func (b *Blob) Exists() (bool, error) { - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil) - headers := b.Container.bsc.client.getStandardHeaders() - resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } - } - return false, err -} - -// GetURL gets the canonical URL to the blob with the specified name in the -// specified container. -// This method does not create a publicly accessible URL if the blob or container -// is private and this method does not check if the blob exists. -func (b *Blob) GetURL() string { - container := b.Container.Name - if container == "" { - container = "$root" - } - return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil) -} - -// GetBlobRangeOptions includes the options for a get blob range operation -type GetBlobRangeOptions struct { - Range *BlobRange - GetRangeContentMD5 bool - *GetBlobOptions -} - -// GetBlobOptions includes the options for a get blob operation -type GetBlobOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - Origin string `header:"Origin"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// BlobRange represents the bytes range to be get -type BlobRange struct { - Start uint64 - End uint64 -} - -func (br BlobRange) String() string { - if br.End == 0 { - return fmt.Sprintf("bytes=%d-", br.Start) - } - return fmt.Sprintf("bytes=%d-%d", br.Start, br.End) -} - -// Get returns a stream to read the blob. Caller must call both Read and Close() -// to correctly close the underlying connection. -// -// See the GetRange method for use with a Range header. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob -func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) { - rangeOptions := GetBlobRangeOptions{ - GetBlobOptions: options, - } - resp, err := b.getRange(&rangeOptions) - if err != nil { - return nil, err - } - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - if err := b.writeProperties(resp.headers, true); err != nil { - return resp.body, err - } - return resp.body, nil -} - -// GetRange reads the specified range of a blob to a stream. The bytesRange -// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. -// Caller must call both Read and Close()// to correctly close the underlying -// connection. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob -func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) { - resp, err := b.getRange(options) - if err != nil { - return nil, err - } - - if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { - return nil, err - } - // Content-Length header should not be updated, as the service returns the range length - // (which is not alwys the full blob length) - if err := b.writeProperties(resp.headers, false); err != nil { - return resp.body, err - } - return resp.body, nil -} - -func (b *Blob) getRange(options *GetBlobRangeOptions) (*storageResponse, error) { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - if options.Range != nil { - headers["Range"] = options.Range.String() - if options.GetRangeContentMD5 { - headers["x-ms-range-get-content-md5"] = "true" - } - } - if options.GetBlobOptions != nil { - headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions)) - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - } - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return nil, err - } - return resp, err -} - -// SnapshotOptions includes the options for a snapshot blob operation -type SnapshotOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// CreateSnapshot creates a snapshot for a blob -// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx -func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) { - params := url.Values{"comp": {"snapshot"}} - headers := b.Container.bsc.client.getStandardHeaders() - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil || resp == nil { - return nil, err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { - return nil, err - } - - snapshotResponse := resp.headers.Get(http.CanonicalHeaderKey("x-ms-snapshot")) - if snapshotResponse != "" { - snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse) - if err != nil { - return nil, err - } - return &snapshotTimestamp, nil - } - - return nil, errors.New("Snapshot not created") -} - -// GetBlobPropertiesOptions includes the options for a get blob properties operation -type GetBlobPropertiesOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetProperties provides various information about the specified blob. -// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx -func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return err - } - return b.writeProperties(resp.headers, true) -} - -func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error { - var err error - - contentLength := b.Properties.ContentLength - if includeContentLen { - contentLengthStr := h.Get("Content-Length") - if contentLengthStr != "" { - contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) - if err != nil { - return err - } - } - } - - var sequenceNum int64 - sequenceNumStr := h.Get("x-ms-blob-sequence-number") - if sequenceNumStr != "" { - sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) - if err != nil { - return err - } - } - - lastModified, err := getTimeFromHeaders(h, "Last-Modified") - if err != nil { - return err - } - - copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time") - if err != nil { - return err - } - - b.Properties = BlobProperties{ - LastModified: TimeRFC1123(*lastModified), - Etag: h.Get("Etag"), - ContentMD5: h.Get("Content-MD5"), - ContentLength: contentLength, - ContentEncoding: h.Get("Content-Encoding"), - ContentType: h.Get("Content-Type"), - ContentDisposition: h.Get("Content-Disposition"), - CacheControl: h.Get("Cache-Control"), - ContentLanguage: h.Get("Content-Language"), - SequenceNumber: sequenceNum, - CopyCompletionTime: TimeRFC1123(*copyCompletionTime), - CopyStatusDescription: h.Get("x-ms-copy-status-description"), - CopyID: h.Get("x-ms-copy-id"), - CopyProgress: h.Get("x-ms-copy-progress"), - CopySource: h.Get("x-ms-copy-source"), - CopyStatus: h.Get("x-ms-copy-status"), - BlobType: BlobType(h.Get("x-ms-blob-type")), - LeaseStatus: h.Get("x-ms-lease-status"), - LeaseState: h.Get("x-ms-lease-state"), - } - b.writeMetadata(h) - return nil -} - -// SetBlobPropertiesOptions contains various properties of a blob and is an entry -// in SetProperties -type SetBlobPropertiesOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - Origin string `header:"Origin"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - SequenceNumberAction *SequenceNumberAction - RequestID string `header:"x-ms-client-request-id"` -} - -// SequenceNumberAction defines how the blob's sequence number should be modified -type SequenceNumberAction string - -// Options for sequence number action -const ( - SequenceNumberActionMax SequenceNumberAction = "max" - SequenceNumberActionUpdate SequenceNumberAction = "update" - SequenceNumberActionIncrement SequenceNumberAction = "increment" -) - -// SetProperties replaces the BlobHeaders for the specified blob. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetBlobProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties -func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error { - params := url.Values{"comp": {"properties"}} - headers := b.Container.bsc.client.getStandardHeaders() - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - if b.Properties.BlobType == BlobTypePage { - headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength)) - if options != nil && options.SequenceNumberAction != nil { - headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction)) - if *options.SequenceNumberAction != SequenceNumberActionIncrement { - headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber)) - } - } - } - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusOK}) -} - -// SetBlobMetadataOptions includes the options for a set blob metadata operation -type SetBlobMetadataOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// SetMetadata replaces the metadata for the specified blob. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetBlobMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error { - params := url.Values{"comp": {"metadata"}} - headers := b.Container.bsc.client.getStandardHeaders() - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusOK}) -} - -// GetBlobMetadataOptions includes the options for a get blob metadata operation -type GetBlobMetadataOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetMetadata returns all user-defined metadata for the specified blob. -// -// All metadata keys will be returned in lower case. (HTTP header -// names are case-insensitive.) -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error { - params := url.Values{"comp": {"metadata"}} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return err - } - - b.writeMetadata(resp.headers) - return nil -} - -func (b *Blob) writeMetadata(h http.Header) { - metadata := make(map[string]string) - for k, v := range h { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["lol"] = content of the last X-Ms-Meta-Lol header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - - b.Metadata = BlobMetadata(metadata) -} - -// DeleteBlobOptions includes the options for a delete blob operation -type DeleteBlobOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - DeleteSnapshots *bool - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// Delete deletes the given blob from the specified container. -// If the blob does not exists at the time of the Delete Blob operation, it -// returns error. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob -func (b *Blob) Delete(options *DeleteBlobOptions) error { - resp, err := b.delete(options) - if err != nil { - return err - } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} - -// DeleteIfExists deletes the given blob from the specified container If the -// blob is deleted with this call, returns true. Otherwise returns false. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob -func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) { - resp, err := b.delete(options) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -func (b *Blob) delete(options *DeleteBlobOptions) (*storageResponse, error) { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - headers = mergeHeaders(headers, headersFromStruct(*options)) - if options.DeleteSnapshots != nil { - if *options.DeleteSnapshots { - headers["x-ms-delete-snapshots"] = "include" - } else { - headers["x-ms-delete-snapshots"] = "only" - } - } - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth) -} - -// helper method to construct the path to either a blob or container -func pathForResource(container, name string) string { - if name != "" { - return fmt.Sprintf("/%s/%s", container, name) - } - return fmt.Sprintf("/%s", container) -} - -func (b *Blob) respondCreation(resp *storageResponse, bt BlobType) error { - readAndCloseBody(resp.body) - err := checkRespCode(resp.statusCode, []int{http.StatusCreated}) - if err != nil { - return err - } - b.Properties.BlobType = bt - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go deleted file mode 100644 index e11af77441e..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go +++ /dev/null @@ -1,170 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "net/url" - "strings" - "time" -) - -// OverrideHeaders defines overridable response heaedrs in -// a request using a SAS URI. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -type OverrideHeaders struct { - CacheControl string - ContentDisposition string - ContentEncoding string - ContentLanguage string - ContentType string -} - -// BlobSASOptions are options to construct a blob SAS -// URI. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -type BlobSASOptions struct { - BlobServiceSASPermissions - OverrideHeaders - SASOptions -} - -// BlobServiceSASPermissions includes the available permissions for -// blob service SAS URI. -type BlobServiceSASPermissions struct { - Read bool - Add bool - Create bool - Write bool - Delete bool -} - -func (p BlobServiceSASPermissions) buildString() string { - permissions := "" - if p.Read { - permissions += "r" - } - if p.Add { - permissions += "a" - } - if p.Create { - permissions += "c" - } - if p.Write { - permissions += "w" - } - if p.Delete { - permissions += "d" - } - return permissions -} - -// GetSASURI creates an URL to the blob which contains the Shared -// Access Signature with the specified options. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) { - uri := b.GetURL() - signedResource := "b" - canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true) - if err != nil { - return "", err - } - - permissions := options.BlobServiceSASPermissions.buildString() - return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) -} - -func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) { - start := "" - if options.Start != (time.Time{}) { - start = options.Start.UTC().Format(time.RFC3339) - } - - expiry := options.Expiry.UTC().Format(time.RFC3339) - - // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). - canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) - canonicalizedResource, err := url.QueryUnescape(canonicalizedResource) - if err != nil { - return "", err - } - - protocols := "" - if options.UseHTTPS { - protocols = "https" - } - stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, headers) - if err != nil { - return "", err - } - - sig := c.computeHmac256(stringToSign) - sasParams := url.Values{ - "sv": {c.apiVersion}, - "se": {expiry}, - "sr": {signedResource}, - "sp": {permissions}, - "sig": {sig}, - } - - if c.apiVersion >= "2015-04-05" { - if protocols != "" { - sasParams.Add("spr", protocols) - } - if options.IP != "" { - sasParams.Add("sip", options.IP) - } - } - - // Add override response hedaers - addQueryParameter(sasParams, "rscc", headers.CacheControl) - addQueryParameter(sasParams, "rscd", headers.ContentDisposition) - addQueryParameter(sasParams, "rsce", headers.ContentEncoding) - addQueryParameter(sasParams, "rscl", headers.ContentLanguage) - addQueryParameter(sasParams, "rsct", headers.ContentType) - - sasURL, err := url.Parse(uri) - if err != nil { - return "", err - } - sasURL.RawQuery = sasParams.Encode() - return sasURL.String(), nil -} - -func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion string, headers OverrideHeaders) (string, error) { - rscc := headers.CacheControl - rscd := headers.ContentDisposition - rsce := headers.ContentEncoding - rscl := headers.ContentLanguage - rsct := headers.ContentType - - if signedVersion >= "2015-02-21" { - canonicalizedResource = "/blob" + canonicalizedResource - } - - // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 - if signedVersion >= "2015-04-05" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - - // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - if signedVersion >= "2013-08-15" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - - return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go deleted file mode 100644 index 8fe21b0cfd9..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go +++ /dev/null @@ -1,126 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" - "net/url" - "strconv" - "strings" -) - -// BlobStorageClient contains operations for Microsoft Azure Blob Storage -// Service. -type BlobStorageClient struct { - client Client - auth authentication -} - -// GetServiceProperties gets the properties of your storage account's blob service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties -func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) { - return b.client.getServiceProperties(blobServiceName, b.auth) -} - -// SetServiceProperties sets the properties of your storage account's blob service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties -func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error { - return b.client.setServiceProperties(props, blobServiceName, b.auth) -} - -// ListContainersParameters defines the set of customizable parameters to make a -// List Containers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ListContainersParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} - -// GetContainerReference returns a Container object for the specified container name. -func (b *BlobStorageClient) GetContainerReference(name string) *Container { - return &Container{ - bsc: b, - Name: name, - } -} - -// GetContainerReferenceFromSASURI returns a Container object for the specified -// container SASURI -func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) { - path := strings.Split(sasuri.Path, "/") - if len(path) <= 1 { - return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String()) - } - cli := newSASClient().GetBlobService() - return &Container{ - bsc: &cli, - Name: path[1], - sasuri: sasuri, - }, nil -} - -// ListContainers returns the list of containers in a storage account along with -// pagination token and other response details. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - uri := b.client.getEndpoint(blobServiceName, "", q) - headers := b.client.getStandardHeaders() - - var out ContainerListResponse - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return nil, err - } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &out) - if err != nil { - return nil, err - } - - // assign our client to the newly created Container objects - for i := range out.Containers { - out.Containers[i].bsc = &b - } - return &out, err -} - -func (p ListContainersParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) - } - if p.Timeout != 0 { - out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) - } - - return out -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go deleted file mode 100644 index e0176d664ad..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go +++ /dev/null @@ -1,270 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// BlockListType is used to filter out types of blocks in a Get Blocks List call -// for a block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all -// block types. -type BlockListType string - -// Filters for listing blocks in block blobs -const ( - BlockListTypeAll BlockListType = "all" - BlockListTypeCommitted BlockListType = "committed" - BlockListTypeUncommitted BlockListType = "uncommitted" -) - -// Maximum sizes (per REST API) for various concepts -const ( - MaxBlobBlockSize = 100 * 1024 * 1024 - MaxBlobPageSize = 4 * 1024 * 1024 -) - -// BlockStatus defines states a block for a block blob can -// be in. -type BlockStatus string - -// List of statuses that can be used to refer to a block in a block list -const ( - BlockStatusUncommitted BlockStatus = "Uncommitted" - BlockStatusCommitted BlockStatus = "Committed" - BlockStatusLatest BlockStatus = "Latest" -) - -// Block is used to create Block entities for Put Block List -// call. -type Block struct { - ID string - Status BlockStatus -} - -// BlockListResponse contains the response fields from Get Block List call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -type BlockListResponse struct { - XMLName xml.Name `xml:"BlockList"` - CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` - UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` -} - -// BlockResponse contains the block information returned -// in the GetBlockListCall. -type BlockResponse struct { - Name string `xml:"Name"` - Size int64 `xml:"Size"` -} - -// CreateBlockBlob initializes an empty block blob with no blocks. -// -// See CreateBlockBlobFromReader for more info on creating blobs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob -func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error { - return b.CreateBlockBlobFromReader(nil, options) -} - -// CreateBlockBlobFromReader initializes a block blob using data from -// reader. Size must be the number of bytes read from reader. To -// create an empty blob, use size==0 and reader==nil. -// -// Any headers set in blob.Properties or metadata in blob.Metadata -// will be set on the blob. -// -// The API rejects requests with size > 256 MiB (but this limit is not -// checked by the SDK). To write a larger blob, use CreateBlockBlob, -// PutBlock, and PutBlockList. -// -// To create a blob from scratch, call container.GetBlobReference() to -// get an empty blob, fill in blob.Properties and blob.Metadata as -// appropriate then call this method. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob -func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - - headers["Content-Length"] = "0" - var n int64 - var err error - if blob != nil { - type lener interface { - Len() int - } - // TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc. - if l, ok := blob.(lener); ok { - n = int64(l.Len()) - } else { - var buf bytes.Buffer - n, err = io.Copy(&buf, blob) - if err != nil { - return err - } - blob = &buf - } - - headers["Content-Length"] = strconv.FormatInt(n, 10) - } - b.Properties.ContentLength = n - - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypeBlock) -} - -// PutBlockOptions includes the options for a put block operation -type PutBlockOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - ContentMD5 string `header:"Content-MD5"` - RequestID string `header:"x-ms-client-request-id"` -} - -// PutBlock saves the given data chunk to the specified block blob with -// given ID. -// -// The API rejects chunks larger than 100 MiB (but this limit is not -// checked by the SDK). -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block -func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error { - return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options) -} - -// PutBlockWithLength saves the given data stream of exactly specified size to -// the block blob with given ID. It is an alternative to PutBlocks where data -// comes as stream but the length is known in advance. -// -// The API rejects requests with size > 100 MiB (but this limit is not -// checked by the SDK). -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block -func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error { - query := url.Values{ - "comp": {"block"}, - "blockid": {blockID}, - } - headers := b.Container.bsc.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%v", size) - - if options != nil { - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypeBlock) -} - -// PutBlockListOptions includes the options for a put block list operation -type PutBlockListOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// PutBlockList saves list of blocks to the specified block blob. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List -func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error { - params := url.Values{"comp": {"blocklist"}} - blockListXML := prepareBlockListRequest(blocks) - headers := b.Container.bsc.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth) - if err != nil { - return err - } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetBlockListOptions includes the options for a get block list operation -type GetBlockListOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetBlockList retrieves list of blocks in the specified block blob. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List -func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) { - params := url.Values{ - "comp": {"blocklist"}, - "blocklisttype": {string(blockType)}, - } - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - var out BlockListResponse - resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go deleted file mode 100644 index 8f6cd95da71..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ /dev/null @@ -1,878 +0,0 @@ -// Package storage provides clients for Microsoft Azure Storage Services. -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bufio" - "bytes" - "encoding/base64" - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "io" - "io/ioutil" - "mime" - "mime/multipart" - "net/http" - "net/url" - "regexp" - "runtime" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" -) - -const ( - // DefaultBaseURL is the domain name used for storage requests in the - // public cloud when a default client is created. - DefaultBaseURL = "core.windows.net" - - // DefaultAPIVersion is the Azure Storage API version string used when a - // basic client is created. - DefaultAPIVersion = "2016-05-31" - - defaultUseHTTPS = true - defaultRetryAttempts = 5 - defaultRetryDuration = time.Second * 5 - - // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator - StorageEmulatorAccountName = "devstoreaccount1" - - // StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator - StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" - - blobServiceName = "blob" - tableServiceName = "table" - queueServiceName = "queue" - fileServiceName = "file" - - storageEmulatorBlob = "127.0.0.1:10000" - storageEmulatorTable = "127.0.0.1:10002" - storageEmulatorQueue = "127.0.0.1:10001" - - userAgentHeader = "User-Agent" - - userDefinedMetadataHeaderPrefix = "x-ms-meta-" -) - -var ( - validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$") - defaultValidStatusCodes = []int{ - http.StatusRequestTimeout, // 408 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } -) - -// Sender sends a request -type Sender interface { - Send(*Client, *http.Request) (*http.Response, error) -} - -// DefaultSender is the default sender for the client. It implements -// an automatic retry strategy. -type DefaultSender struct { - RetryAttempts int - RetryDuration time.Duration - ValidStatusCodes []int - attempts int // used for testing -} - -// Send is the default retry strategy in the client -func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) { - rr := autorest.NewRetriableRequest(req) - for attempts := 0; attempts < ds.RetryAttempts; attempts++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = c.HTTPClient.Do(rr.Request()) - if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) { - return resp, err - } - autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel) - ds.attempts = attempts - } - ds.attempts++ - return resp, err -} - -// Client is the object that needs to be constructed to perform -// operations on the storage account. -type Client struct { - // HTTPClient is the http.Client used to initiate API - // requests. http.DefaultClient is used when creating a - // client. - HTTPClient *http.Client - - // Sender is an interface that sends the request. Clients are - // created with a DefaultSender. The DefaultSender has an - // automatic retry strategy built in. The Sender can be customized. - Sender Sender - - accountName string - accountKey []byte - useHTTPS bool - UseSharedKeyLite bool - baseURL string - apiVersion string - userAgent string - sasClient bool - accountSASToken url.Values -} - -type storageResponse struct { - statusCode int - headers http.Header - body io.ReadCloser -} - -type odataResponse struct { - storageResponse - odata odataErrorWrapper -} - -// AzureStorageServiceError contains fields of the error response from -// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx -// Some fields might be specific to certain calls. -type AzureStorageServiceError struct { - Code string `xml:"Code"` - Message string `xml:"Message"` - AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` - QueryParameterName string `xml:"QueryParameterName"` - QueryParameterValue string `xml:"QueryParameterValue"` - Reason string `xml:"Reason"` - Lang string - StatusCode int - RequestID string - Date string - APIVersion string -} - -type odataErrorMessage struct { - Lang string `json:"lang"` - Value string `json:"value"` -} - -type odataError struct { - Code string `json:"code"` - Message odataErrorMessage `json:"message"` -} - -type odataErrorWrapper struct { - Err odataError `json:"odata.error"` -} - -// UnexpectedStatusCodeError is returned when a storage service responds with neither an error -// nor with an HTTP status code indicating success. -type UnexpectedStatusCodeError struct { - allowed []int - got int -} - -func (e UnexpectedStatusCodeError) Error() string { - s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } - - got := s(e.got) - expected := []string{} - for _, v := range e.allowed { - expected = append(expected, s(v)) - } - return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or ")) -} - -// Got is the actual status code returned by Azure. -func (e UnexpectedStatusCodeError) Got() int { - return e.got -} - -// NewBasicClient constructs a Client with given storage service name and -// key. -func NewBasicClient(accountName, accountKey string) (Client, error) { - if accountName == StorageEmulatorAccountName { - return NewEmulatorClient() - } - return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) -} - -// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and -// key in the referenced cloud. -func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) { - if accountName == StorageEmulatorAccountName { - return NewEmulatorClient() - } - return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS) -} - -//NewEmulatorClient contructs a Client intended to only work with Azure -//Storage Emulator -func NewEmulatorClient() (Client, error) { - return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false) -} - -// NewClient constructs a Client. This should be used if the caller wants -// to specify whether to use HTTPS, a specific REST API version or a custom -// storage endpoint than Azure Public Cloud. -func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { - var c Client - if !IsValidStorageAccount(accountName) { - return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName) - } else if accountKey == "" { - return c, fmt.Errorf("azure: account key required") - } else if serviceBaseURL == "" { - return c, fmt.Errorf("azure: base storage service url required") - } - - key, err := base64.StdEncoding.DecodeString(accountKey) - if err != nil { - return c, fmt.Errorf("azure: malformed storage account key: %v", err) - } - - c = Client{ - HTTPClient: http.DefaultClient, - accountName: accountName, - accountKey: key, - useHTTPS: useHTTPS, - baseURL: serviceBaseURL, - apiVersion: apiVersion, - sasClient: false, - UseSharedKeyLite: false, - Sender: &DefaultSender{ - RetryAttempts: defaultRetryAttempts, - ValidStatusCodes: defaultValidStatusCodes, - RetryDuration: defaultRetryDuration, - }, - } - c.userAgent = c.getDefaultUserAgent() - return c, nil -} - -// IsValidStorageAccount checks if the storage account name is valid. -// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account -func IsValidStorageAccount(account string) bool { - return validStorageAccount.MatchString(account) -} - -// NewAccountSASClient contructs a client that uses accountSAS authorization -// for its operations. -func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client { - c := newSASClient() - c.accountSASToken = token - c.accountName = account - c.baseURL = env.StorageEndpointSuffix - - // Get API version and protocol from token - c.apiVersion = token.Get("sv") - c.useHTTPS = token.Get("spr") == "https" - return c -} - -func newSASClient() Client { - c := Client{ - HTTPClient: http.DefaultClient, - apiVersion: DefaultAPIVersion, - sasClient: true, - Sender: &DefaultSender{ - RetryAttempts: defaultRetryAttempts, - ValidStatusCodes: defaultValidStatusCodes, - RetryDuration: defaultRetryDuration, - }, - } - c.userAgent = c.getDefaultUserAgent() - return c -} - -func (c Client) isServiceSASClient() bool { - return c.sasClient && c.accountSASToken == nil -} - -func (c Client) isAccountSASClient() bool { - return c.sasClient && c.accountSASToken != nil -} - -func (c Client) getDefaultUserAgent() string { - return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - sdkVersion, - c.apiVersion, - ) -} - -// AddToUserAgent adds an extension to the current user agent -func (c *Client) AddToUserAgent(extension string) error { - if extension != "" { - c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent) -} - -// protectUserAgent is used in funcs that include extraheaders as a parameter. -// It prevents the User-Agent header to be overwritten, instead if it happens to -// be present, it gets added to the current User-Agent. Use it before getStandardHeaders -func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string { - if v, ok := extraheaders[userAgentHeader]; ok { - c.AddToUserAgent(v) - delete(extraheaders, userAgentHeader) - } - return extraheaders -} - -func (c Client) getBaseURL(service string) *url.URL { - scheme := "http" - if c.useHTTPS { - scheme = "https" - } - host := "" - if c.accountName == StorageEmulatorAccountName { - switch service { - case blobServiceName: - host = storageEmulatorBlob - case tableServiceName: - host = storageEmulatorTable - case queueServiceName: - host = storageEmulatorQueue - } - } else { - host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) - } - - return &url.URL{ - Scheme: scheme, - Host: host, - } -} - -func (c Client) getEndpoint(service, path string, params url.Values) string { - u := c.getBaseURL(service) - - // API doesn't accept path segments not starting with '/' - if !strings.HasPrefix(path, "/") { - path = fmt.Sprintf("/%v", path) - } - - if c.accountName == StorageEmulatorAccountName { - path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path) - } - - u.Path = path - u.RawQuery = params.Encode() - return u.String() -} - -// AccountSASTokenOptions includes options for constructing -// an account SAS token. -// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas -type AccountSASTokenOptions struct { - APIVersion string - Services Services - ResourceTypes ResourceTypes - Permissions Permissions - Start time.Time - Expiry time.Time - IP string - UseHTTPS bool -} - -// Services specify services accessible with an account SAS. -type Services struct { - Blob bool - Queue bool - Table bool - File bool -} - -// ResourceTypes specify the resources accesible with an -// account SAS. -type ResourceTypes struct { - Service bool - Container bool - Object bool -} - -// Permissions specifies permissions for an accountSAS. -type Permissions struct { - Read bool - Write bool - Delete bool - List bool - Add bool - Create bool - Update bool - Process bool -} - -// GetAccountSASToken creates an account SAS token -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas -func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) { - if options.APIVersion == "" { - options.APIVersion = c.apiVersion - } - - if options.APIVersion < "2015-04-05" { - return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion) - } - - // build services string - services := "" - if options.Services.Blob { - services += "b" - } - if options.Services.Queue { - services += "q" - } - if options.Services.Table { - services += "t" - } - if options.Services.File { - services += "f" - } - - // build resources string - resources := "" - if options.ResourceTypes.Service { - resources += "s" - } - if options.ResourceTypes.Container { - resources += "c" - } - if options.ResourceTypes.Object { - resources += "o" - } - - // build permissions string - permissions := "" - if options.Permissions.Read { - permissions += "r" - } - if options.Permissions.Write { - permissions += "w" - } - if options.Permissions.Delete { - permissions += "d" - } - if options.Permissions.List { - permissions += "l" - } - if options.Permissions.Add { - permissions += "a" - } - if options.Permissions.Create { - permissions += "c" - } - if options.Permissions.Update { - permissions += "u" - } - if options.Permissions.Process { - permissions += "p" - } - - // build start time, if exists - start := "" - if options.Start != (time.Time{}) { - start = options.Start.Format(time.RFC3339) - // For some reason I don't understand, it fails when the rest of the string is included - start = start[:10] - } - - // build expiry time - expiry := options.Expiry.Format(time.RFC3339) - // For some reason I don't understand, it fails when the rest of the string is included - expiry = expiry[:10] - - protocol := "https,http" - if options.UseHTTPS { - protocol = "https" - } - - stringToSign := strings.Join([]string{ - c.accountName, - permissions, - services, - resources, - start, - expiry, - options.IP, - protocol, - options.APIVersion, - "", - }, "\n") - signature := c.computeHmac256(stringToSign) - - sasParams := url.Values{ - "sv": {options.APIVersion}, - "ss": {services}, - "srt": {resources}, - "sp": {permissions}, - "se": {expiry}, - "spr": {protocol}, - "sig": {signature}, - } - if start != "" { - sasParams.Add("st", start) - } - if options.IP != "" { - sasParams.Add("sip", options.IP) - } - - return sasParams, nil -} - -// GetBlobService returns a BlobStorageClient which can operate on the blob -// service of the storage account. -func (c Client) GetBlobService() BlobStorageClient { - b := BlobStorageClient{ - client: c, - } - b.client.AddToUserAgent(blobServiceName) - b.auth = sharedKey - if c.UseSharedKeyLite { - b.auth = sharedKeyLite - } - return b -} - -// GetQueueService returns a QueueServiceClient which can operate on the queue -// service of the storage account. -func (c Client) GetQueueService() QueueServiceClient { - q := QueueServiceClient{ - client: c, - } - q.client.AddToUserAgent(queueServiceName) - q.auth = sharedKey - if c.UseSharedKeyLite { - q.auth = sharedKeyLite - } - return q -} - -// GetTableService returns a TableServiceClient which can operate on the table -// service of the storage account. -func (c Client) GetTableService() TableServiceClient { - t := TableServiceClient{ - client: c, - } - t.client.AddToUserAgent(tableServiceName) - t.auth = sharedKeyForTable - if c.UseSharedKeyLite { - t.auth = sharedKeyLiteForTable - } - return t -} - -// GetFileService returns a FileServiceClient which can operate on the file -// service of the storage account. -func (c Client) GetFileService() FileServiceClient { - f := FileServiceClient{ - client: c, - } - f.client.AddToUserAgent(fileServiceName) - f.auth = sharedKey - if c.UseSharedKeyLite { - f.auth = sharedKeyLite - } - return f -} - -func (c Client) getStandardHeaders() map[string]string { - return map[string]string{ - userAgentHeader: c.userAgent, - "x-ms-version": c.apiVersion, - "x-ms-date": currentTimeRfc1123Formatted(), - } -} - -func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*storageResponse, error) { - headers, err := c.addAuthorizationHeader(verb, url, headers, auth) - if err != nil { - return nil, err - } - - req, err := http.NewRequest(verb, url, body) - if err != nil { - return nil, errors.New("azure/storage: error creating request: " + err.Error()) - } - - // if a body was provided ensure that the content length was set. - // http.NewRequest() will automatically do this for a handful of types - // and for those that it doesn't we will handle here. - if body != nil && req.ContentLength < 1 { - if lr, ok := body.(*io.LimitedReader); ok { - setContentLengthFromLimitedReader(req, lr) - } - } - - for k, v := range headers { - req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645 - } - - resp, err := c.Sender.Send(&c, req) - if err != nil { - return nil, err - } - - if resp.StatusCode >= 400 && resp.StatusCode <= 505 { - var respBody []byte - respBody, err = readAndCloseBody(resp.Body) - if err != nil { - return nil, err - } - - requestID, date, version := getDebugHeaders(resp.Header) - if len(respBody) == 0 { - // no error in response body, might happen in HEAD requests - err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) - } else { - storageErr := AzureStorageServiceError{ - StatusCode: resp.StatusCode, - RequestID: requestID, - Date: date, - APIVersion: version, - } - // response contains storage service error object, unmarshal - if resp.Header.Get("Content-Type") == "application/xml" { - errIn := serviceErrFromXML(respBody, &storageErr) - if err != nil { // error unmarshaling the error response - err = errIn - } - } else { - errIn := serviceErrFromJSON(respBody, &storageErr) - if err != nil { // error unmarshaling the error response - err = errIn - } - } - err = storageErr - } - return &storageResponse{ - statusCode: resp.StatusCode, - headers: resp.Header, - body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */ - }, err - } - - return &storageResponse{ - statusCode: resp.StatusCode, - headers: resp.Header, - body: resp.Body}, nil -} - -func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) { - headers, err := c.addAuthorizationHeader(verb, url, headers, auth) - if err != nil { - return nil, nil, nil, err - } - - req, err := http.NewRequest(verb, url, body) - for k, v := range headers { - req.Header.Add(k, v) - } - - resp, err := c.Sender.Send(&c, req) - if err != nil { - return nil, nil, nil, err - } - - respToRet := &odataResponse{} - respToRet.body = resp.Body - respToRet.statusCode = resp.StatusCode - respToRet.headers = resp.Header - - statusCode := resp.StatusCode - if statusCode >= 400 && statusCode <= 505 { - var respBody []byte - respBody, err = readAndCloseBody(resp.Body) - if err != nil { - return nil, nil, nil, err - } - - requestID, date, version := getDebugHeaders(resp.Header) - if len(respBody) == 0 { - // no error in response body, might happen in HEAD requests - err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) - return respToRet, req, resp, err - } - // try unmarshal as odata.error json - err = json.Unmarshal(respBody, &respToRet.odata) - } - - return respToRet, req, resp, err -} - -func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { - respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth) - return respToRet, err -} - -func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { - // execute common query, get back generated request, response etc... for more processing. - respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth) - if err != nil { - return nil, err - } - - // return the OData in the case of executing batch commands. - // In this case we need to read the outer batch boundary and contents. - // Then we read the changeset information within the batch - var respBody []byte - respBody, err = readAndCloseBody(resp.Body) - if err != nil { - return nil, err - } - - // outer multipart body - _, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0]) - if err != nil { - return nil, err - } - - // batch details. - batchBoundary := batchHeader["boundary"] - batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody) - if err != nil { - return nil, err - } - - // changeset details. - err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary) - if err != nil { - return nil, err - } - - return respToRet, nil -} - -func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error { - changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary) - changesetPart, err := changesetMultiReader.NextPart() - if err != nil { - return err - } - - changesetPartBufioReader := bufio.NewReader(changesetPart) - changesetResp, err := http.ReadResponse(changesetPartBufioReader, req) - if err != nil { - return err - } - - if changesetResp.StatusCode != http.StatusNoContent { - changesetBody, err := readAndCloseBody(changesetResp.Body) - err = json.Unmarshal(changesetBody, &respToRet.odata) - if err != nil { - return err - } - respToRet.statusCode = changesetResp.StatusCode - } - - return nil -} - -func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) { - respBodyString := string(respBody) - respBodyReader := strings.NewReader(respBodyString) - - // reading batchresponse - batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary) - batchPart, err := batchMultiReader.NextPart() - if err != nil { - return nil, "", err - } - batchPartBufioReader := bufio.NewReader(batchPart) - - _, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type")) - if err != nil { - return nil, "", err - } - changesetBoundary := changesetHeader["boundary"] - return batchPartBufioReader, changesetBoundary, nil -} - -func readAndCloseBody(body io.ReadCloser) ([]byte, error) { - defer body.Close() - out, err := ioutil.ReadAll(body) - if err == io.EOF { - err = nil - } - return out, err -} - -func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error { - if err := xml.Unmarshal(body, storageErr); err != nil { - storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) - return err - } - return nil -} - -func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error { - odataError := odataErrorWrapper{} - if err := json.Unmarshal(body, &odataError); err != nil { - storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) - return err - } - storageErr.Code = odataError.Err.Code - storageErr.Message = odataError.Err.Message.Value - storageErr.Lang = odataError.Err.Message.Lang - return nil -} - -func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError { - return AzureStorageServiceError{ - StatusCode: code, - Code: status, - RequestID: requestID, - Date: date, - APIVersion: version, - Message: "no response body was available for error status code", - } -} - -func (e AzureStorageServiceError) Error() string { - return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s", - e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue) -} - -// checkRespCode returns UnexpectedStatusError if the given response code is not -// one of the allowed status codes; otherwise nil. -func checkRespCode(respCode int, allowed []int) error { - for _, v := range allowed { - if respCode == v { - return nil - } - } - return UnexpectedStatusCodeError{allowed, respCode} -} - -func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string { - metadata = c.protectUserAgent(metadata) - for k, v := range metadata { - h[userDefinedMetadataHeaderPrefix+k] = v - } - return h -} - -func getDebugHeaders(h http.Header) (requestID, date, version string) { - requestID = h.Get("x-ms-request-id") - version = h.Get("x-ms-version") - date = h.Get("Date") - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go deleted file mode 100644 index e898e9bfaf9..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go +++ /dev/null @@ -1,38 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "net/url" - "time" -) - -// SASOptions includes options used by SAS URIs for different -// services and resources. -type SASOptions struct { - APIVersion string - Start time.Time - Expiry time.Time - IP string - UseHTTPS bool - Identifier string -} - -func addQueryParameter(query url.Values, key, value string) url.Values { - if value != "" { - query.Add(key, value) - } - return query -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go deleted file mode 100644 index 8963c7a89b3..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go +++ /dev/null @@ -1,545 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// Container represents an Azure container. -type Container struct { - bsc *BlobStorageClient - Name string `xml:"Name"` - Properties ContainerProperties `xml:"Properties"` - Metadata map[string]string - sasuri url.URL -} - -// Client returns the HTTP client used by the Container reference. -func (c *Container) Client() *Client { - return &c.bsc.client -} - -func (c *Container) buildPath() string { - return fmt.Sprintf("/%s", c.Name) -} - -// GetURL gets the canonical URL to the container. -// This method does not create a publicly accessible URL if the container -// is private and this method does not check if the blob exists. -func (c *Container) GetURL() string { - container := c.Name - if container == "" { - container = "$root" - } - return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil) -} - -// ContainerSASOptions are options to construct a container SAS -// URI. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -type ContainerSASOptions struct { - ContainerSASPermissions - OverrideHeaders - SASOptions -} - -// ContainerSASPermissions includes the available permissions for -// a container SAS URI. -type ContainerSASPermissions struct { - BlobServiceSASPermissions - List bool -} - -// GetSASURI creates an URL to the container which contains the Shared -// Access Signature with the specified options. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) { - uri := c.GetURL() - signedResource := "c" - canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true) - if err != nil { - return "", err - } - - // build permissions string - permissions := options.BlobServiceSASPermissions.buildString() - if options.List { - permissions += "l" - } - - return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) -} - -// ContainerProperties contains various properties of a container returned from -// various endpoints like ListContainers. -type ContainerProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` - LeaseDuration string `xml:"LeaseDuration"` -} - -// ContainerListResponse contains the response fields from -// ListContainers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ContainerListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Containers []Container `xml:"Containers>Container"` -} - -// BlobListResponse contains the response fields from ListBlobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type BlobListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Blobs []Blob `xml:"Blobs>Blob"` - - // BlobPrefix is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - // The list here can be thought of as "folders" that may contain - // other folders or blobs. - BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` - - // Delimiter is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - Delimiter string `xml:"Delimiter"` -} - -// IncludeBlobDataset has options to include in a list blobs operation -type IncludeBlobDataset struct { - Snapshots bool - Metadata bool - UncommittedBlobs bool - Copy bool -} - -// ListBlobsParameters defines the set of customizable -// parameters to make a List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type ListBlobsParameters struct { - Prefix string - Delimiter string - Marker string - Include *IncludeBlobDataset - MaxResults uint - Timeout uint - RequestID string -} - -func (p ListBlobsParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Delimiter != "" { - out.Set("delimiter", p.Delimiter) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != nil { - include := []string{} - include = addString(include, p.Include.Snapshots, "snapshots") - include = addString(include, p.Include.Metadata, "metadata") - include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs") - include = addString(include, p.Include.Copy, "copy") - fullInclude := strings.Join(include, ",") - out.Set("include", fullInclude) - } - if p.MaxResults != 0 { - out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) - } - if p.Timeout != 0 { - out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) - } - - return out -} - -func addString(datasets []string, include bool, text string) []string { - if include { - datasets = append(datasets, text) - } - return datasets -} - -// ContainerAccessType defines the access level to the container from a public -// request. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- -// blob-public-access" header. -type ContainerAccessType string - -// Access options for containers -const ( - ContainerAccessTypePrivate ContainerAccessType = "" - ContainerAccessTypeBlob ContainerAccessType = "blob" - ContainerAccessTypeContainer ContainerAccessType = "container" -) - -// ContainerAccessPolicy represents each access policy in the container ACL. -type ContainerAccessPolicy struct { - ID string - StartTime time.Time - ExpiryTime time.Time - CanRead bool - CanWrite bool - CanDelete bool -} - -// ContainerPermissions represents the container ACLs. -type ContainerPermissions struct { - AccessType ContainerAccessType - AccessPolicies []ContainerAccessPolicy -} - -// ContainerAccessHeader references header used when setting/getting container ACL -const ( - ContainerAccessHeader string = "x-ms-blob-public-access" -) - -// GetBlobReference returns a Blob object for the specified blob name. -func (c *Container) GetBlobReference(name string) *Blob { - return &Blob{ - Container: c, - Name: name, - } -} - -// CreateContainerOptions includes the options for a create container operation -type CreateContainerOptions struct { - Timeout uint - Access ContainerAccessType `header:"x-ms-blob-public-access"` - RequestID string `header:"x-ms-client-request-id"` -} - -// Create creates a blob container within the storage account -// with given name and access level. Returns error if container already exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container -func (c *Container) Create(options *CreateContainerOptions) error { - resp, err := c.create(options) - if err != nil { - return err - } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// CreateIfNotExists creates a blob container if it does not exist. Returns -// true if container is newly created or false if container already exists. -func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) { - resp, err := c.create(options) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - return resp.statusCode == http.StatusCreated, nil - } - } - return false, err -} - -func (c *Container) create(options *CreateContainerOptions) (*storageResponse, error) { - query := url.Values{"restype": {"container"}} - headers := c.bsc.client.getStandardHeaders() - headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) - - if options != nil { - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) - - return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) -} - -// Exists returns true if a container with given name exists -// on the storage account, otherwise returns false. -func (c *Container) Exists() (bool, error) { - q := url.Values{"restype": {"container"}} - var uri string - if c.bsc.client.isServiceSASClient() { - q = mergeParams(q, c.sasuri.Query()) - newURI := c.sasuri - newURI.RawQuery = q.Encode() - uri = newURI.String() - - } else { - if c.bsc.client.isAccountSASClient() { - q = mergeParams(q, c.bsc.client.accountSASToken) - } - uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) - } - headers := c.bsc.client.getStandardHeaders() - - resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } - } - return false, err -} - -// SetContainerPermissionOptions includes options for a set container permissions operation -type SetContainerPermissionOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - RequestID string `header:"x-ms-client-request-id"` -} - -// SetPermissions sets up container permissions -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL -func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error { - body, length, err := generateContainerACLpayload(permissions.AccessPolicies) - if err != nil { - return err - } - params := url.Values{ - "restype": {"container"}, - "comp": {"acl"}, - } - headers := c.bsc.client.getStandardHeaders() - headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType)) - headers["Content-Length"] = strconv.Itoa(length) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) - - resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return errors.New("Unable to set permissions") - } - - return nil -} - -// GetContainerPermissionOptions includes options for a get container permissions operation -type GetContainerPermissionOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx -// If timeout is 0 then it will not be passed to Azure -// leaseID will only be passed to Azure if populated -func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) { - params := url.Values{ - "restype": {"container"}, - "comp": {"acl"}, - } - headers := c.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) - - resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) - if err != nil { - return nil, err - } - defer resp.body.Close() - - var ap AccessPolicy - err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) - if err != nil { - return nil, err - } - return buildAccessPolicy(ap, &resp.headers), nil -} - -func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions { - // containerAccess. Blob, Container, empty - containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader)) - permissions := ContainerPermissions{ - AccessType: ContainerAccessType(containerAccess), - AccessPolicies: []ContainerAccessPolicy{}, - } - - for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { - capd := ContainerAccessPolicy{ - ID: policy.ID, - StartTime: policy.AccessPolicy.StartTime, - ExpiryTime: policy.AccessPolicy.ExpiryTime, - } - capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") - capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w") - capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") - - permissions.AccessPolicies = append(permissions.AccessPolicies, capd) - } - return &permissions -} - -// DeleteContainerOptions includes options for a delete container operation -type DeleteContainerOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - RequestID string `header:"x-ms-client-request-id"` -} - -// Delete deletes the container with given name on the storage -// account. If the container does not exist returns error. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container -func (c *Container) Delete(options *DeleteContainerOptions) error { - resp, err := c.delete(options) - if err != nil { - return err - } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} - -// DeleteIfExists deletes the container with given name on the storage -// account if it exists. Returns true if container is deleted with this call, or -// false if the container did not exist at the time of the Delete Container -// operation. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container -func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) { - resp, err := c.delete(options) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -func (c *Container) delete(options *DeleteContainerOptions) (*storageResponse, error) { - query := url.Values{"restype": {"container"}} - headers := c.bsc.client.getStandardHeaders() - - if options != nil { - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) - - return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth) -} - -// ListBlobs returns an object that contains list of blobs in the container, -// pagination token and other information in the response of List Blobs call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs -func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{ - "restype": {"container"}, - "comp": {"list"}, - }) - var uri string - if c.bsc.client.isServiceSASClient() { - q = mergeParams(q, c.sasuri.Query()) - newURI := c.sasuri - newURI.RawQuery = q.Encode() - uri = newURI.String() - } else { - if c.bsc.client.isAccountSASClient() { - q = mergeParams(q, c.bsc.client.accountSASToken) - } - uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) - } - - headers := c.bsc.client.getStandardHeaders() - headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID) - - var out BlobListResponse - resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - for i := range out.Blobs { - out.Blobs[i].Container = c - } - return out, err -} - -func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) { - sil := SignedIdentifiers{ - SignedIdentifiers: []SignedIdentifier{}, - } - for _, capd := range policies { - permission := capd.generateContainerPermissions() - signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission) - sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) - } - return xmlMarshal(sil) -} - -func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) { - // generate the permissions string (rwd). - // still want the end user API to have bool flags. - permissions = "" - - if capd.CanRead { - permissions += "r" - } - - if capd.CanWrite { - permissions += "w" - } - - if capd.CanDelete { - permissions += "d" - } - - return permissions -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go deleted file mode 100644 index a4cc2527b67..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go +++ /dev/null @@ -1,237 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "time" -) - -const ( - blobCopyStatusPending = "pending" - blobCopyStatusSuccess = "success" - blobCopyStatusAborted = "aborted" - blobCopyStatusFailed = "failed" -) - -// CopyOptions includes the options for a copy blob operation -type CopyOptions struct { - Timeout uint - Source CopyOptionsConditions - Destiny CopyOptionsConditions - RequestID string -} - -// IncrementalCopyOptions includes the options for an incremental copy blob operation -type IncrementalCopyOptions struct { - Timeout uint - Destination IncrementalCopyOptionsConditions - RequestID string -} - -// CopyOptionsConditions includes some conditional options in a copy blob operation -type CopyOptionsConditions struct { - LeaseID string - IfModifiedSince *time.Time - IfUnmodifiedSince *time.Time - IfMatch string - IfNoneMatch string -} - -// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation -type IncrementalCopyOptionsConditions struct { - IfModifiedSince *time.Time - IfUnmodifiedSince *time.Time - IfMatch string - IfNoneMatch string -} - -// Copy starts a blob copy operation and waits for the operation to -// complete. sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using the GetURL method.) There is no SLA on blob copy and therefore -// this helper method works faster on smaller files. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob -func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error { - copyID, err := b.StartCopy(sourceBlob, options) - if err != nil { - return err - } - - return b.WaitForCopy(copyID) -} - -// StartCopy starts a blob copy operation. -// sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using the GetURL method.) -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob -func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-copy-source"] = sourceBlob - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) - // source - headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID) - headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince) - headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince) - headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch) - headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch) - //destiny - headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID) - headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince) - headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince) - headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch) - headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return "", err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { - return "", err - } - - copyID := resp.headers.Get("x-ms-copy-id") - if copyID == "" { - return "", errors.New("Got empty copy id header") - } - return copyID, nil -} - -// AbortCopyOptions includes the options for an abort blob operation -type AbortCopyOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - RequestID string `header:"x-ms-client-request-id"` -} - -// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function. -// copyID is generated from StartBlobCopy function. -// currentLeaseID is required IF the destination blob has an active lease on it. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob -func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error { - params := url.Values{ - "comp": {"copy"}, - "copyid": {copyID}, - } - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-copy-action"] = "abort" - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// WaitForCopy loops until a BlobCopy operation is completed (or fails with error) -func (b *Blob) WaitForCopy(copyID string) error { - for { - err := b.GetProperties(nil) - if err != nil { - return err - } - - if b.Properties.CopyID != copyID { - return errBlobCopyIDMismatch - } - - switch b.Properties.CopyStatus { - case blobCopyStatusSuccess: - return nil - case blobCopyStatusPending: - continue - case blobCopyStatusAborted: - return errBlobCopyAborted - case blobCopyStatusFailed: - return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription) - default: - return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus) - } - } -} - -// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob -// sourceBlob parameter must be a valid snapshot URL of the original blob. -// THe original blob mut be public, or use a Shared Access Signature. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob . -func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) { - params := url.Values{"comp": {"incrementalcopy"}} - - // need formatting to 7 decimal places so it's friendly to Windows and *nix - snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z") - u, err := url.Parse(sourceBlobURL) - if err != nil { - return "", err - } - query := u.Query() - query.Add("snapshot", snapshotTimeFormatted) - encodedQuery := query.Encode() - encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1) - u.RawQuery = encodedQuery - snapshotURL := u.String() - - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-copy-source"] = snapshotURL - - if options != nil { - addTimeout(params, options.Timeout) - headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) - headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince) - headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince) - headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch) - headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch) - } - - // get URI of destination blob - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return "", err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted}); err != nil { - return "", err - } - - copyID := resp.headers.Get("x-ms-copy-id") - if copyID == "" { - return "", errors.New("Got empty copy id header") - } - return copyID, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go deleted file mode 100644 index 189e0380244..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go +++ /dev/null @@ -1,238 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "net/http" - "net/url" - "sync" -) - -// Directory represents a directory on a share. -type Directory struct { - fsc *FileServiceClient - Metadata map[string]string - Name string `xml:"Name"` - parent *Directory - Properties DirectoryProperties - share *Share -} - -// DirectoryProperties contains various properties of a directory. -type DirectoryProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` -} - -// ListDirsAndFilesParameters defines the set of customizable parameters to -// make a List Files and Directories call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files -type ListDirsAndFilesParameters struct { - Prefix string - Marker string - MaxResults uint - Timeout uint -} - -// DirsAndFilesListResponse contains the response fields from -// a List Files and Directories call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files -type DirsAndFilesListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Marker string `xml:"Marker"` - MaxResults int64 `xml:"MaxResults"` - Directories []Directory `xml:"Entries>Directory"` - Files []File `xml:"Entries>File"` - NextMarker string `xml:"NextMarker"` -} - -// builds the complete directory path for this directory object. -func (d *Directory) buildPath() string { - path := "" - current := d - for current.Name != "" { - path = "/" + current.Name + path - current = current.parent - } - return d.share.buildPath() + path -} - -// Create this directory in the associated share. -// If a directory with the same name already exists, the operation fails. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory -func (d *Directory) Create(options *FileRequestOptions) error { - // if this is the root directory exit early - if d.parent == nil { - return nil - } - - params := prepareOptions(options) - headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated}) - if err != nil { - return err - } - - d.updateEtagAndLastModified(headers) - return nil -} - -// CreateIfNotExists creates this directory under the associated share if the -// directory does not exists. Returns true if the directory is newly created or -// false if the directory already exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory -func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) { - // if this is the root directory exit early - if d.parent == nil { - return false, nil - } - - params := prepareOptions(options) - resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - if resp.statusCode == http.StatusCreated { - d.updateEtagAndLastModified(resp.headers) - return true, nil - } - - return false, d.FetchAttributes(nil) - } - } - - return false, err -} - -// Delete removes this directory. It must be empty in order to be deleted. -// If the directory does not exist the operation fails. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory -func (d *Directory) Delete(options *FileRequestOptions) error { - return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options) -} - -// DeleteIfExists removes this directory if it exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory -func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) { - resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -// Exists returns true if this directory exists. -func (d *Directory) Exists() (bool, error) { - exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory) - if exists { - d.updateEtagAndLastModified(headers) - } - return exists, err -} - -// FetchAttributes retrieves metadata for this directory. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties -func (d *Directory) FetchAttributes(options *FileRequestOptions) error { - params := prepareOptions(options) - headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead) - if err != nil { - return err - } - - d.updateEtagAndLastModified(headers) - d.Metadata = getMetadataFromHeaders(headers) - - return nil -} - -// GetDirectoryReference returns a child Directory object for this directory. -func (d *Directory) GetDirectoryReference(name string) *Directory { - return &Directory{ - fsc: d.fsc, - Name: name, - parent: d, - share: d.share, - } -} - -// GetFileReference returns a child File object for this directory. -func (d *Directory) GetFileReference(name string) *File { - return &File{ - fsc: d.fsc, - Name: name, - parent: d, - share: d.share, - mutex: &sync.Mutex{}, - } -} - -// ListDirsAndFiles returns a list of files and directories under this directory. -// It also contains a pagination token and other response details. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files -func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) { - q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory)) - - resp, err := d.fsc.listContent(d.buildPath(), q, nil) - if err != nil { - return nil, err - } - - defer resp.body.Close() - var out DirsAndFilesListResponse - err = xmlUnmarshal(resp.body, &out) - return &out, err -} - -// SetMetadata replaces the metadata for this directory. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetDirectoryMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata -func (d *Directory) SetMetadata(options *FileRequestOptions) error { - headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options) - if err != nil { - return err - } - - d.updateEtagAndLastModified(headers) - return nil -} - -// updates Etag and last modified date -func (d *Directory) updateEtagAndLastModified(headers http.Header) { - d.Properties.Etag = headers.Get("Etag") - d.Properties.LastModified = headers.Get("Last-Modified") -} - -// URL gets the canonical URL to this directory. -// This method does not create a publicly accessible URL if the directory -// is private and this method does not check if the directory exists. -func (d *Directory) URL() string { - return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{}) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go deleted file mode 100644 index 9668ea66949..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go +++ /dev/null @@ -1,453 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/satori/uuid" -) - -// Annotating as secure for gas scanning -/* #nosec */ -const ( - partitionKeyNode = "PartitionKey" - rowKeyNode = "RowKey" - etagErrorTemplate = "Etag didn't match: %v" -) - -var ( - errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation") - errNilPreviousResult = errors.New("The previous results page is nil") - errNilNextLink = errors.New("There are no more pages in this query results") -) - -// Entity represents an entity inside an Azure table. -type Entity struct { - Table *Table - PartitionKey string - RowKey string - TimeStamp time.Time - OdataMetadata string - OdataType string - OdataID string - OdataEtag string - OdataEditLink string - Properties map[string]interface{} -} - -// GetEntityReference returns an Entity object with the specified -// partition key and row key. -func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity { - return &Entity{ - PartitionKey: partitionKey, - RowKey: rowKey, - Table: t, - } -} - -// EntityOptions includes options for entity operations. -type EntityOptions struct { - Timeout uint - RequestID string `header:"x-ms-client-request-id"` -} - -// GetEntityOptions includes options for a get entity operation -type GetEntityOptions struct { - Select []string - RequestID string `header:"x-ms-client-request-id"` -} - -// Get gets the referenced entity. Which properties to get can be -// specified using the select option. -// See: -// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities -// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities -func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error { - if ml == EmptyPayload { - return errEmptyPayload - } - // RowKey and PartitionKey could be lost if not included in the query - // As those are the entity identifiers, it is best if they are not lost - rk := e.RowKey - pk := e.PartitionKey - - query := url.Values{ - "timeout": {strconv.FormatUint(uint64(timeout), 10)}, - } - headers := e.Table.tsc.client.getStandardHeaders() - headers[headerAccept] = string(ml) - - if options != nil { - if len(options.Select) > 0 { - query.Add("$select", strings.Join(options.Select, ",")) - } - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) - resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return err - } - - respBody, err := ioutil.ReadAll(resp.body) - if err != nil { - return err - } - err = json.Unmarshal(respBody, e) - if err != nil { - return err - } - e.PartitionKey = pk - e.RowKey = rk - - return nil -} - -// Insert inserts the referenced entity in its table. -// The function fails if there is an entity with the same -// PartitionKey and RowKey in the table. -// ml determines the level of detail of metadata in the operation response, -// or no data at all. -// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity -func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error { - query, headers := options.getParameters() - headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) - - body, err := json.Marshal(e) - if err != nil { - return err - } - headers = addBodyRelatedHeaders(headers, len(body)) - headers = addReturnContentHeaders(headers, ml) - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query) - resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) - if err != nil { - return err - } - defer resp.body.Close() - - data, err := ioutil.ReadAll(resp.body) - if err != nil { - return err - } - - if ml != EmptyPayload { - if err = checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { - return err - } - if err = e.UnmarshalJSON(data); err != nil { - return err - } - } else { - if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - } - - return nil -} - -// Update updates the contents of an entity. The function fails if there is no entity -// with the same PartitionKey and RowKey in the table or if the ETag is different -// than the one in Azure. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2 -func (e *Entity) Update(force bool, options *EntityOptions) error { - return e.updateMerge(force, http.MethodPut, options) -} - -// Merge merges the contents of entity specified with PartitionKey and RowKey -// with the content specified in Properties. -// The function fails if there is no entity with the same PartitionKey and -// RowKey in the table or if the ETag is different than the one in Azure. -// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity -func (e *Entity) Merge(force bool, options *EntityOptions) error { - return e.updateMerge(force, "MERGE", options) -} - -// Delete deletes the entity. -// The function fails if there is no entity with the same PartitionKey and -// RowKey in the table or if the ETag is different than the one in Azure. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1 -func (e *Entity) Delete(force bool, options *EntityOptions) error { - query, headers := options.getParameters() - headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) - - headers = addIfMatchHeader(headers, force, e.OdataEtag) - headers = addReturnContentHeaders(headers, EmptyPayload) - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) - resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth) - if err != nil { - if resp.statusCode == http.StatusPreconditionFailed { - return fmt.Errorf(etagErrorTemplate, err) - } - return err - } - defer readAndCloseBody(resp.body) - - if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - - return e.updateTimestamp(resp.headers) -} - -// InsertOrReplace inserts an entity or replaces the existing one. -// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity -func (e *Entity) InsertOrReplace(options *EntityOptions) error { - return e.insertOr(http.MethodPut, options) -} - -// InsertOrMerge inserts an entity or merges the existing one. -// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity -func (e *Entity) InsertOrMerge(options *EntityOptions) error { - return e.insertOr("MERGE", options) -} - -func (e *Entity) buildPath() string { - return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey) -} - -// MarshalJSON is a custom marshaller for entity -func (e *Entity) MarshalJSON() ([]byte, error) { - completeMap := map[string]interface{}{} - completeMap[partitionKeyNode] = e.PartitionKey - completeMap[rowKeyNode] = e.RowKey - for k, v := range e.Properties { - typeKey := strings.Join([]string{k, OdataTypeSuffix}, "") - switch t := v.(type) { - case []byte: - completeMap[typeKey] = OdataBinary - completeMap[k] = string(t) - case time.Time: - completeMap[typeKey] = OdataDateTime - completeMap[k] = t.Format(time.RFC3339Nano) - case uuid.UUID: - completeMap[typeKey] = OdataGUID - completeMap[k] = t.String() - case int64: - completeMap[typeKey] = OdataInt64 - completeMap[k] = fmt.Sprintf("%v", v) - default: - completeMap[k] = v - } - if strings.HasSuffix(k, OdataTypeSuffix) { - if !(completeMap[k] == OdataBinary || - completeMap[k] == OdataDateTime || - completeMap[k] == OdataGUID || - completeMap[k] == OdataInt64) { - return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k) - } - valueKey := strings.TrimSuffix(k, OdataTypeSuffix) - if _, ok := completeMap[valueKey]; !ok { - return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k) - } - } - } - return json.Marshal(completeMap) -} - -// UnmarshalJSON is a custom unmarshaller for entities -func (e *Entity) UnmarshalJSON(data []byte) error { - errorTemplate := "Deserializing error: %v" - - props := map[string]interface{}{} - err := json.Unmarshal(data, &props) - if err != nil { - return err - } - - // deselialize metadata - e.OdataMetadata = stringFromMap(props, "odata.metadata") - e.OdataType = stringFromMap(props, "odata.type") - e.OdataID = stringFromMap(props, "odata.id") - e.OdataEtag = stringFromMap(props, "odata.etag") - e.OdataEditLink = stringFromMap(props, "odata.editLink") - e.PartitionKey = stringFromMap(props, partitionKeyNode) - e.RowKey = stringFromMap(props, rowKeyNode) - - // deserialize timestamp - timeStamp, ok := props["Timestamp"] - if ok { - str, ok := timeStamp.(string) - if !ok { - return fmt.Errorf(errorTemplate, "Timestamp casting error") - } - t, err := time.Parse(time.RFC3339Nano, str) - if err != nil { - return fmt.Errorf(errorTemplate, err) - } - e.TimeStamp = t - } - delete(props, "Timestamp") - delete(props, "Timestamp@odata.type") - - // deserialize entity (user defined fields) - for k, v := range props { - if strings.HasSuffix(k, OdataTypeSuffix) { - valueKey := strings.TrimSuffix(k, OdataTypeSuffix) - str, ok := props[valueKey].(string) - if !ok { - return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v)) - } - switch v { - case OdataBinary: - props[valueKey] = []byte(str) - case OdataDateTime: - t, err := time.Parse("2006-01-02T15:04:05Z", str) - if err != nil { - return fmt.Errorf(errorTemplate, err) - } - props[valueKey] = t - case OdataGUID: - props[valueKey] = uuid.FromStringOrNil(str) - case OdataInt64: - i, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return fmt.Errorf(errorTemplate, err) - } - props[valueKey] = i - default: - return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v)) - } - delete(props, k) - } - } - - e.Properties = props - return nil -} - -func getAndDelete(props map[string]interface{}, key string) interface{} { - if value, ok := props[key]; ok { - delete(props, key) - return value - } - return nil -} - -func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string { - if force { - h[headerIfMatch] = "*" - } else { - h[headerIfMatch] = etag - } - return h -} - -// updates Etag and timestamp -func (e *Entity) updateEtagAndTimestamp(headers http.Header) error { - e.OdataEtag = headers.Get(headerEtag) - return e.updateTimestamp(headers) -} - -func (e *Entity) updateTimestamp(headers http.Header) error { - str := headers.Get(headerDate) - t, err := time.Parse(time.RFC1123, str) - if err != nil { - return fmt.Errorf("Update timestamp error: %v", err) - } - e.TimeStamp = t - return nil -} - -func (e *Entity) insertOr(verb string, options *EntityOptions) error { - query, headers := options.getParameters() - headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) - - body, err := json.Marshal(e) - if err != nil { - return err - } - headers = addBodyRelatedHeaders(headers, len(body)) - headers = addReturnContentHeaders(headers, EmptyPayload) - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) - resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - - return e.updateEtagAndTimestamp(resp.headers) -} - -func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error { - query, headers := options.getParameters() - headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) - - body, err := json.Marshal(e) - if err != nil { - return err - } - headers = addBodyRelatedHeaders(headers, len(body)) - headers = addIfMatchHeader(headers, force, e.OdataEtag) - headers = addReturnContentHeaders(headers, EmptyPayload) - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) - resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) - if err != nil { - if resp.statusCode == http.StatusPreconditionFailed { - return fmt.Errorf(etagErrorTemplate, err) - } - return err - } - defer readAndCloseBody(resp.body) - - if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - - return e.updateEtagAndTimestamp(resp.headers) -} - -func stringFromMap(props map[string]interface{}, key string) string { - value := getAndDelete(props, key) - if value != nil { - return value.(string) - } - return "" -} - -func (options *EntityOptions) getParameters() (url.Values, map[string]string) { - query := url.Values{} - headers := map[string]string{} - if options != nil { - query = addTimeout(query, options.Timeout) - headers = headersFromStruct(*options) - } - return query, headers -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go deleted file mode 100644 index 5fb516c55fd..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go +++ /dev/null @@ -1,476 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "sync" -) - -const fourMB = uint64(4194304) -const oneTB = uint64(1099511627776) - -// File represents a file on a share. -type File struct { - fsc *FileServiceClient - Metadata map[string]string - Name string `xml:"Name"` - parent *Directory - Properties FileProperties `xml:"Properties"` - share *Share - FileCopyProperties FileCopyState - mutex *sync.Mutex -} - -// FileProperties contains various properties of a file. -type FileProperties struct { - CacheControl string `header:"x-ms-cache-control"` - Disposition string `header:"x-ms-content-disposition"` - Encoding string `header:"x-ms-content-encoding"` - Etag string - Language string `header:"x-ms-content-language"` - LastModified string - Length uint64 `xml:"Content-Length" header:"x-ms-content-length"` - MD5 string `header:"x-ms-content-md5"` - Type string `header:"x-ms-content-type"` -} - -// FileCopyState contains various properties of a file copy operation. -type FileCopyState struct { - CompletionTime string - ID string `header:"x-ms-copy-id"` - Progress string - Source string - Status string `header:"x-ms-copy-status"` - StatusDesc string -} - -// FileStream contains file data returned from a call to GetFile. -type FileStream struct { - Body io.ReadCloser - ContentMD5 string -} - -// FileRequestOptions will be passed to misc file operations. -// Currently just Timeout (in seconds) but could expand. -type FileRequestOptions struct { - Timeout uint // timeout duration in seconds. -} - -func prepareOptions(options *FileRequestOptions) url.Values { - params := url.Values{} - if options != nil { - params = addTimeout(params, options.Timeout) - } - return params -} - -// FileRanges contains a list of file range information for a file. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges -type FileRanges struct { - ContentLength uint64 - LastModified string - ETag string - FileRanges []FileRange `xml:"Range"` -} - -// FileRange contains range information for a file. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges -type FileRange struct { - Start uint64 `xml:"Start"` - End uint64 `xml:"End"` -} - -func (fr FileRange) String() string { - return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End) -} - -// builds the complete file path for this file object -func (f *File) buildPath() string { - return f.parent.buildPath() + "/" + f.Name -} - -// ClearRange releases the specified range of space in a file. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range -func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error { - var timeout *uint - if options != nil { - timeout = &options.Timeout - } - headers, err := f.modifyRange(nil, fileRange, timeout, nil) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - return nil -} - -// Create creates a new file or replaces an existing one. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File -func (f *File) Create(maxSize uint64, options *FileRequestOptions) error { - if maxSize > oneTB { - return fmt.Errorf("max file size is 1TB") - } - params := prepareOptions(options) - headers := headersFromStruct(f.Properties) - headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10) - headers["x-ms-type"] = "file" - - outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated}) - if err != nil { - return err - } - - f.Properties.Length = maxSize - f.updateEtagAndLastModified(outputHeaders) - return nil -} - -// CopyFile operation copied a file/blob from the sourceURL to the path provided. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file -func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error { - extraHeaders := map[string]string{ - "x-ms-type": "file", - "x-ms-copy-source": sourceURL, - } - params := prepareOptions(options) - - headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted}) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id") - f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status") - return nil -} - -// Delete immediately removes this file from the storage account. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 -func (f *File) Delete(options *FileRequestOptions) error { - return f.fsc.deleteResource(f.buildPath(), resourceFile, options) -} - -// DeleteIfExists removes this file if it exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 -func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) { - resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -// GetFileOptions includes options for a get file operation -type GetFileOptions struct { - Timeout uint - GetContentMD5 bool -} - -// DownloadToStream operation downloads the file. -// -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file -func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) { - params := prepareOptions(options) - resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil) - if err != nil { - return nil, err - } - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - readAndCloseBody(resp.body) - return nil, err - } - return resp.body, nil -} - -// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file -func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) { - extraHeaders := map[string]string{ - "Range": fileRange.String(), - } - params := url.Values{} - if options != nil { - if options.GetContentMD5 { - if isRangeTooBig(fileRange) { - return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true") - } - extraHeaders["x-ms-range-get-content-md5"] = "true" - } - params = addTimeout(params, options.Timeout) - } - - resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders) - if err != nil { - return fs, err - } - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil { - readAndCloseBody(resp.body) - return fs, err - } - - fs.Body = resp.body - if options != nil && options.GetContentMD5 { - fs.ContentMD5 = resp.headers.Get("Content-MD5") - } - return fs, nil -} - -// Exists returns true if this file exists. -func (f *File) Exists() (bool, error) { - exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile) - if exists { - f.updateEtagAndLastModified(headers) - f.updateProperties(headers) - } - return exists, err -} - -// FetchAttributes updates metadata and properties for this file. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties -func (f *File) FetchAttributes(options *FileRequestOptions) error { - params := prepareOptions(options) - headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - f.updateProperties(headers) - f.Metadata = getMetadataFromHeaders(headers) - return nil -} - -// returns true if the range is larger than 4MB -func isRangeTooBig(fileRange FileRange) bool { - if fileRange.End-fileRange.Start > fourMB { - return true - } - - return false -} - -// ListRangesOptions includes options for a list file ranges operation -type ListRangesOptions struct { - Timeout uint - ListRange *FileRange -} - -// ListRanges returns the list of valid ranges for this file. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges -func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) { - params := url.Values{"comp": {"rangelist"}} - - // add optional range to list - var headers map[string]string - if options != nil { - params = addTimeout(params, options.Timeout) - if options.ListRange != nil { - headers = make(map[string]string) - headers["Range"] = options.ListRange.String() - } - } - - resp, err := f.fsc.listContent(f.buildPath(), params, headers) - if err != nil { - return nil, err - } - - defer resp.body.Close() - var cl uint64 - cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64) - if err != nil { - ioutil.ReadAll(resp.body) - return nil, err - } - - var out FileRanges - out.ContentLength = cl - out.ETag = resp.headers.Get("ETag") - out.LastModified = resp.headers.Get("Last-Modified") - - err = xmlUnmarshal(resp.body, &out) - return &out, err -} - -// modifies a range of bytes in this file -func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) { - if err := f.fsc.checkForStorageEmulator(); err != nil { - return nil, err - } - if fileRange.End < fileRange.Start { - return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart") - } - if bytes != nil && isRangeTooBig(fileRange) { - return nil, errors.New("range cannot exceed 4MB in size") - } - - params := url.Values{"comp": {"range"}} - if timeout != nil { - params = addTimeout(params, *timeout) - } - - uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params) - - // default to clear - write := "clear" - cl := uint64(0) - - // if bytes is not nil then this is an update operation - if bytes != nil { - write = "update" - cl = (fileRange.End - fileRange.Start) + 1 - } - - extraHeaders := map[string]string{ - "Content-Length": strconv.FormatUint(cl, 10), - "Range": fileRange.String(), - "x-ms-write": write, - } - - if contentMD5 != nil { - extraHeaders["Content-MD5"] = *contentMD5 - } - - headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders) - resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.body) - return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// SetMetadata replaces the metadata for this file. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetFileMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata -func (f *File) SetMetadata(options *FileRequestOptions) error { - headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - return nil -} - -// SetProperties sets system properties on this file. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by SetFileProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties -func (f *File) SetProperties(options *FileRequestOptions) error { - headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - return nil -} - -// updates Etag and last modified date -func (f *File) updateEtagAndLastModified(headers http.Header) { - f.Properties.Etag = headers.Get("Etag") - f.Properties.LastModified = headers.Get("Last-Modified") -} - -// updates file properties from the specified HTTP header -func (f *File) updateProperties(header http.Header) { - size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) - if err == nil { - f.Properties.Length = size - } - - f.updateEtagAndLastModified(header) - f.Properties.CacheControl = header.Get("Cache-Control") - f.Properties.Disposition = header.Get("Content-Disposition") - f.Properties.Encoding = header.Get("Content-Encoding") - f.Properties.Language = header.Get("Content-Language") - f.Properties.MD5 = header.Get("Content-MD5") - f.Properties.Type = header.Get("Content-Type") -} - -// URL gets the canonical URL to this file. -// This method does not create a publicly accessible URL if the file -// is private and this method does not check if the file exists. -func (f *File) URL() string { - return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil) -} - -// WriteRangeOptions includes options for a write file range operation -type WriteRangeOptions struct { - Timeout uint - ContentMD5 string -} - -// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside -// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with -// a maximum size of 4MB. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range -func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error { - if bytes == nil { - return errors.New("bytes cannot be nil") - } - var timeout *uint - var md5 *string - if options != nil { - timeout = &options.Timeout - md5 = &options.ContentMD5 - } - - headers, err := f.modifyRange(bytes, fileRange, timeout, md5) - if err != nil { - return err - } - // it's perfectly legal for multiple go routines to call WriteRange - // on the same *File (e.g. concurrently writing non-overlapping ranges) - // so we must take the file mutex before updating our properties. - f.mutex.Lock() - f.updateEtagAndLastModified(headers) - f.mutex.Unlock() - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go deleted file mode 100644 index 295e3d3e25c..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go +++ /dev/null @@ -1,338 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "fmt" - "net/http" - "net/url" - "strconv" -) - -// FileServiceClient contains operations for Microsoft Azure File Service. -type FileServiceClient struct { - client Client - auth authentication -} - -// ListSharesParameters defines the set of customizable parameters to make a -// List Shares call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares -type ListSharesParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} - -// ShareListResponse contains the response fields from -// ListShares call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares -type ShareListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Shares []Share `xml:"Shares>Share"` -} - -type compType string - -const ( - compNone compType = "" - compList compType = "list" - compMetadata compType = "metadata" - compProperties compType = "properties" - compRangeList compType = "rangelist" -) - -func (ct compType) String() string { - return string(ct) -} - -type resourceType string - -const ( - resourceDirectory resourceType = "directory" - resourceFile resourceType = "" - resourceShare resourceType = "share" -) - -func (rt resourceType) String() string { - return string(rt) -} - -func (p ListSharesParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) - } - if p.Timeout != 0 { - out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) - } - - return out -} - -func (p ListDirsAndFilesParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.MaxResults != 0 { - out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) - } - out = addTimeout(out, p.Timeout) - - return out -} - -// returns url.Values for the specified types -func getURLInitValues(comp compType, res resourceType) url.Values { - values := url.Values{} - if comp != compNone { - values.Set("comp", comp.String()) - } - if res != resourceFile { - values.Set("restype", res.String()) - } - return values -} - -// GetShareReference returns a Share object for the specified share name. -func (f *FileServiceClient) GetShareReference(name string) *Share { - return &Share{ - fsc: f, - Name: name, - Properties: ShareProperties{ - Quota: -1, - }, - } -} - -// ListShares returns the list of shares in a storage account along with -// pagination token and other response details. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares -func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - - var out ShareListResponse - resp, err := f.listContent("", q, nil) - if err != nil { - return nil, err - } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &out) - - // assign our client to the newly created Share objects - for i := range out.Shares { - out.Shares[i].fsc = &f - } - return &out, err -} - -// GetServiceProperties gets the properties of your storage account's file service. -// File service does not support logging -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties -func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) { - return f.client.getServiceProperties(fileServiceName, f.auth) -} - -// SetServiceProperties sets the properties of your storage account's file service. -// File service does not support logging -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties -func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error { - return f.client.setServiceProperties(props, fileServiceName, f.auth) -} - -// retrieves directory or share content -func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - uri := f.client.getEndpoint(fileServiceName, path, params) - extraHeaders = f.client.protectUserAgent(extraHeaders) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth) - if err != nil { - return nil, err - } - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - readAndCloseBody(resp.body) - return nil, err - } - - return resp, nil -} - -// returns true if the specified resource exists -func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) { - if err := f.checkForStorageEmulator(); err != nil { - return false, nil, err - } - - uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res)) - headers := f.client.getStandardHeaders() - - resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, resp.headers, nil - } - } - return false, nil, err -} - -// creates a resource depending on the specified resource type -func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) { - resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.body) - return resp.headers, checkRespCode(resp.statusCode, expectedResponseCodes) -} - -// creates a resource depending on the specified resource type, doesn't close the response body -func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - values := getURLInitValues(compNone, res) - combinedParams := mergeParams(values, urlParams) - uri := f.client.getEndpoint(fileServiceName, path, combinedParams) - extraHeaders = f.client.protectUserAgent(extraHeaders) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - return f.client.exec(http.MethodPut, uri, headers, nil, f.auth) -} - -// returns HTTP header data for the specified directory or share -func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) { - resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.body) - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - return resp.headers, nil -} - -// gets the specified resource, doesn't close the response body -func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - params = mergeParams(params, getURLInitValues(comp, res)) - uri := f.client.getEndpoint(fileServiceName, path, params) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - return f.client.exec(verb, uri, headers, nil, f.auth) -} - -// deletes the resource and returns the response -func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error { - resp, err := f.deleteResourceNoClose(path, res, options) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} - -// deletes the resource and returns the response, doesn't close the response body -func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options)) - uri := f.client.getEndpoint(fileServiceName, path, values) - return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth) -} - -// merges metadata into extraHeaders and returns extraHeaders -func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string { - if metadata == nil && extraHeaders == nil { - return nil - } - if extraHeaders == nil { - extraHeaders = make(map[string]string) - } - for k, v := range metadata { - extraHeaders[userDefinedMetadataHeaderPrefix+k] = v - } - return extraHeaders -} - -// sets extra header data for the specified resource -func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - params := mergeParams(getURLInitValues(comp, res), prepareOptions(options)) - uri := f.client.getEndpoint(fileServiceName, path, params) - extraHeaders = f.client.protectUserAgent(extraHeaders) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.body) - - return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK}) -} - -//checkForStorageEmulator determines if the client is setup for use with -//Azure Storage Emulator, and returns a relevant error -func (f FileServiceClient) checkForStorageEmulator() error { - if f.client.accountName == StorageEmulatorAccountName { - return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go deleted file mode 100644 index 3d9d52d8e35..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go +++ /dev/null @@ -1,201 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "net/http" - "net/url" - "strconv" - "time" -) - -// lease constants. -const ( - leaseHeaderPrefix = "x-ms-lease-" - headerLeaseID = "x-ms-lease-id" - leaseAction = "x-ms-lease-action" - leaseBreakPeriod = "x-ms-lease-break-period" - leaseDuration = "x-ms-lease-duration" - leaseProposedID = "x-ms-proposed-lease-id" - leaseTime = "x-ms-lease-time" - - acquireLease = "acquire" - renewLease = "renew" - changeLease = "change" - releaseLease = "release" - breakLease = "break" -) - -// leasePut is common PUT code for the various acquire/release/break etc functions. -func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) { - params := url.Values{"comp": {"lease"}} - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil { - return nil, err - } - - return resp.headers, nil -} - -// LeaseOptions includes options for all operations regarding leasing blobs -type LeaseOptions struct { - Timeout uint - Origin string `header:"Origin"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - RequestID string `header:"x-ms-client-request-id"` -} - -// AcquireLease creates a lease for a blob -// returns leaseID acquired -// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum -// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = acquireLease - - if leaseTimeInSeconds == -1 { - // Do nothing, but don't trigger the following clauses. - } else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" { - leaseTimeInSeconds = 60 - } else if leaseTimeInSeconds < 15 { - leaseTimeInSeconds = 15 - } - - headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) - - if proposedLeaseID != "" { - headers[leaseProposedID] = proposedLeaseID - } - - respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options) - if err != nil { - return "", err - } - - returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) - - if returnedLeaseID != "" { - return returnedLeaseID, nil - } - - return "", errors.New("LeaseID not returned") -} - -// BreakLease breaks the lease for a blob -// Returns the timeout remaining in the lease in seconds -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = breakLease - return b.breakLeaseCommon(headers, options) -} - -// BreakLeaseWithBreakPeriod breaks the lease for a blob -// breakPeriodInSeconds is used to determine how long until new lease can be created. -// Returns the timeout remaining in the lease in seconds -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = breakLease - headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) - return b.breakLeaseCommon(headers, options) -} - -// breakLeaseCommon is common code for both version of BreakLease (with and without break period) -func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) { - - respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options) - if err != nil { - return 0, err - } - - breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) - if breakTimeoutStr != "" { - breakTimeout, err = strconv.Atoi(breakTimeoutStr) - if err != nil { - return 0, err - } - } - - return breakTimeout, nil -} - -// ChangeLease changes a lease ID for a blob -// Returns the new LeaseID acquired -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = changeLease - headers[headerLeaseID] = currentLeaseID - headers[leaseProposedID] = proposedLeaseID - - respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options) - if err != nil { - return "", err - } - - newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) - if newLeaseID != "" { - return newLeaseID, nil - } - - return "", errors.New("LeaseID not returned") -} - -// ReleaseLease releases the lease for a blob -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = releaseLease - headers[headerLeaseID] = currentLeaseID - - _, err := b.leaseCommonPut(headers, http.StatusOK, options) - if err != nil { - return err - } - - return nil -} - -// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = renewLease - headers[headerLeaseID] = currentLeaseID - - _, err := b.leaseCommonPut(headers, http.StatusOK, options) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go deleted file mode 100644 index 7d9038a5f71..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go +++ /dev/null @@ -1,167 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "fmt" - "net/http" - "net/url" - "strconv" - "time" -) - -// Message represents an Azure message. -type Message struct { - Queue *Queue - Text string `xml:"MessageText"` - ID string `xml:"MessageId"` - Insertion TimeRFC1123 `xml:"InsertionTime"` - Expiration TimeRFC1123 `xml:"ExpirationTime"` - PopReceipt string `xml:"PopReceipt"` - NextVisible TimeRFC1123 `xml:"TimeNextVisible"` - DequeueCount int `xml:"DequeueCount"` -} - -func (m *Message) buildPath() string { - return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID) -} - -// PutMessageOptions is the set of options can be specified for Put Messsage -// operation. A zero struct does not use any preferences for the request. -type PutMessageOptions struct { - Timeout uint - VisibilityTimeout int - MessageTTL int - RequestID string `header:"x-ms-client-request-id"` -} - -// Put operation adds a new message to the back of the message queue. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message -func (m *Message) Put(options *PutMessageOptions) error { - query := url.Values{} - headers := m.Queue.qsc.client.getStandardHeaders() - - req := putMessageRequest{MessageText: m.Text} - body, nn, err := xmlMarshal(req) - if err != nil { - return err - } - headers["Content-Length"] = strconv.Itoa(nn) - - if options != nil { - if options.VisibilityTimeout != 0 { - query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) - } - if options.MessageTTL != 0 { - query.Set("messagettl", strconv.Itoa(options.MessageTTL)) - } - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - - uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query) - resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - err = xmlUnmarshal(resp.body, m) - if err != nil { - return err - } - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// UpdateMessageOptions is the set of options can be specified for Update Messsage -// operation. A zero struct does not use any preferences for the request. -type UpdateMessageOptions struct { - Timeout uint - VisibilityTimeout int - RequestID string `header:"x-ms-client-request-id"` -} - -// Update operation updates the specified message. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message -func (m *Message) Update(options *UpdateMessageOptions) error { - query := url.Values{} - if m.PopReceipt != "" { - query.Set("popreceipt", m.PopReceipt) - } - - headers := m.Queue.qsc.client.getStandardHeaders() - req := putMessageRequest{MessageText: m.Text} - body, nn, err := xmlMarshal(req) - if err != nil { - return err - } - headers["Content-Length"] = strconv.Itoa(nn) - - if options != nil { - if options.VisibilityTimeout != 0 { - query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) - } - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query) - - resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - m.PopReceipt = resp.headers.Get("x-ms-popreceipt") - nextTimeStr := resp.headers.Get("x-ms-time-next-visible") - if nextTimeStr != "" { - nextTime, err := time.Parse(time.RFC1123, nextTimeStr) - if err != nil { - return err - } - m.NextVisible = TimeRFC1123(nextTime) - } - - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// Delete operation deletes the specified message. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx -func (m *Message) Delete(options *QueueServiceOptions) error { - params := url.Values{"popreceipt": {m.PopReceipt}} - headers := m.Queue.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params) - - resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth) - if err != nil { - return err - } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -type putMessageRequest struct { - XMLName xml.Name `xml:"QueueMessage"` - MessageText string `xml:"MessageText"` -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go deleted file mode 100644 index 800adf129dc..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go +++ /dev/null @@ -1,47 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// MetadataLevel determines if operations should return a paylod, -// and it level of detail. -type MetadataLevel string - -// This consts are meant to help with Odata supported operations -const ( - OdataTypeSuffix = "@odata.type" - - // Types - - OdataBinary = "Edm.Binary" - OdataDateTime = "Edm.DateTime" - OdataGUID = "Edm.Guid" - OdataInt64 = "Edm.Int64" - - // Query options - - OdataFilter = "$filter" - OdataOrderBy = "$orderby" - OdataTop = "$top" - OdataSkip = "$skip" - OdataCount = "$count" - OdataExpand = "$expand" - OdataSelect = "$select" - OdataSearch = "$search" - - EmptyPayload MetadataLevel = "" - NoMetadata MetadataLevel = "application/json;odata=nometadata" - MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata" - FullMetadata MetadataLevel = "application/json;odata=fullmetadata" -) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go deleted file mode 100644 index c59fd4b50b9..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go +++ /dev/null @@ -1,204 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "time" -) - -// GetPageRangesResponse contains the response fields from -// Get Page Ranges call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type GetPageRangesResponse struct { - XMLName xml.Name `xml:"PageList"` - PageList []PageRange `xml:"PageRange"` -} - -// PageRange contains information about a page of a page blob from -// Get Pages Range call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type PageRange struct { - Start int64 `xml:"Start"` - End int64 `xml:"End"` -} - -var ( - errBlobCopyAborted = errors.New("storage: blob copy is aborted") - errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") -) - -// PutPageOptions includes the options for a put page operation -type PutPageOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"` - IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"` - IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// WriteRange writes a range of pages to a page blob. -// Ranges must be aligned with 512-byte boundaries and chunk must be of size -// multiplies by 512. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page -func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { - if bytes == nil { - return errors.New("bytes cannot be nil") - } - return b.modifyRange(blobRange, bytes, options) -} - -// ClearRange clears the given range in a page blob. -// Ranges must be aligned with 512-byte boundaries and chunk must be of size -// multiplies by 512. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page -func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error { - return b.modifyRange(blobRange, nil, options) -} - -func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { - if blobRange.End < blobRange.Start { - return errors.New("the value for rangeEnd must be greater than or equal to rangeStart") - } - if blobRange.Start%512 != 0 { - return errors.New("the value for rangeStart must be a modulus of 512") - } - if blobRange.End%512 != 511 { - return errors.New("the value for rangeEnd must be a modulus of 511") - } - - params := url.Values{"comp": {"page"}} - - // default to clear - write := "clear" - var cl uint64 - - // if bytes is not nil then this is an update operation - if bytes != nil { - write = "update" - cl = (blobRange.End - blobRange.Start) + 1 - } - - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-page-write"] = write - headers["x-ms-range"] = blobRange.String() - headers["Content-Length"] = fmt.Sprintf("%v", cl) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth) - if err != nil { - return err - } - readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetPageRangesOptions includes the options for a get page ranges operation -type GetPageRangesOptions struct { - Timeout uint - Snapshot *time.Time - PreviousSnapshot *time.Time - Range *BlobRange - LeaseID string `header:"x-ms-lease-id"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetPageRanges returns the list of valid page ranges for a page blob. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges -func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) { - params := url.Values{"comp": {"pagelist"}} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - if options.PreviousSnapshot != nil { - params.Add("prevsnapshot", timeRfc1123Formatted(*options.PreviousSnapshot)) - } - if options.Range != nil { - headers["Range"] = options.Range.String() - } - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - var out GetPageRangesResponse - resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return out, err - } - defer resp.body.Close() - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return out, err - } - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// PutPageBlob initializes an empty page blob with specified name and maximum -// size in bytes (size must be aligned to a 512-byte boundary). A page blob must -// be created using this method before writing pages. -// -// See CreateBlockBlobFromReader for more info on creating blobs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob -func (b *Blob) PutPageBlob(options *PutBlobOptions) error { - if b.Properties.ContentLength%512 != 0 { - return errors.New("Content length must be aligned to a 512-byte boundary") - } - - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength) - headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber) - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypePage) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go deleted file mode 100644 index 499592ebd12..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ /dev/null @@ -1,441 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "time" -) - -const ( - // casing is per Golang's http.Header canonicalizing the header names. - approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" -) - -// QueueAccessPolicy represents each access policy in the queue ACL. -type QueueAccessPolicy struct { - ID string - StartTime time.Time - ExpiryTime time.Time - CanRead bool - CanAdd bool - CanUpdate bool - CanProcess bool -} - -// QueuePermissions represents the queue ACLs. -type QueuePermissions struct { - AccessPolicies []QueueAccessPolicy -} - -// SetQueuePermissionOptions includes options for a set queue permissions operation -type SetQueuePermissionOptions struct { - Timeout uint - RequestID string `header:"x-ms-client-request-id"` -} - -// Queue represents an Azure queue. -type Queue struct { - qsc *QueueServiceClient - Name string - Metadata map[string]string - AproxMessageCount uint64 -} - -func (q *Queue) buildPath() string { - return fmt.Sprintf("/%s", q.Name) -} - -func (q *Queue) buildPathMessages() string { - return fmt.Sprintf("%s/messages", q.buildPath()) -} - -// QueueServiceOptions includes options for some queue service operations -type QueueServiceOptions struct { - Timeout uint - RequestID string `header:"x-ms-client-request-id"` -} - -// Create operation creates a queue under the given account. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4 -func (q *Queue) Create(options *QueueServiceOptions) error { - params := url.Values{} - headers := q.qsc.client.getStandardHeaders() - headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - - resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// Delete operation permanently deletes the specified queue. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3 -func (q *Queue) Delete(options *QueueServiceOptions) error { - params := url.Values{} - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// Exists returns true if a queue with given name exists. -func (q *Queue) Exists() (bool, error) { - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) - resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } - } - return false, err -} - -// SetMetadata operation sets user-defined metadata on the specified queue. -// Metadata is associated with the queue as name-value pairs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata -func (q *Queue) SetMetadata(options *QueueServiceOptions) error { - params := url.Values{"comp": {"metadata"}} - headers := q.qsc.client.getStandardHeaders() - headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - - resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// GetMetadata operation retrieves user-defined metadata and queue -// properties on the specified queue. Metadata is associated with -// the queue as name-values pairs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata -// -// Because the way Golang's http client (and http.Header in particular) -// canonicalize header names, the returned metadata names would always -// be all lower case. -func (q *Queue) GetMetadata(options *QueueServiceOptions) error { - params := url.Values{"comp": {"metadata"}} - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) - - resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return err - } - - aproxMessagesStr := resp.headers.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader)) - if aproxMessagesStr != "" { - aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64) - if err != nil { - return err - } - q.AproxMessageCount = aproxMessages - } - - q.Metadata = getMetadataFromHeaders(resp.headers) - return nil -} - -// GetMessageReference returns a message object with the specified text. -func (q *Queue) GetMessageReference(text string) *Message { - return &Message{ - Queue: q, - Text: text, - } -} - -// GetMessagesOptions is the set of options can be specified for Get -// Messsages operation. A zero struct does not use any preferences for the -// request. -type GetMessagesOptions struct { - Timeout uint - NumOfMessages int - VisibilityTimeout int - RequestID string `header:"x-ms-client-request-id"` -} - -type messages struct { - XMLName xml.Name `xml:"QueueMessagesList"` - Messages []Message `xml:"QueueMessage"` -} - -// GetMessages operation retrieves one or more messages from the front of the -// queue. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages -func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) { - query := url.Values{} - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - if options.NumOfMessages != 0 { - query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) - } - if options.VisibilityTimeout != 0 { - query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) - } - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) - - resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) - if err != nil { - return []Message{}, err - } - defer readAndCloseBody(resp.body) - - var out messages - err = xmlUnmarshal(resp.body, &out) - if err != nil { - return []Message{}, err - } - for i := range out.Messages { - out.Messages[i].Queue = q - } - return out.Messages, err -} - -// PeekMessagesOptions is the set of options can be specified for Peek -// Messsage operation. A zero struct does not use any preferences for the -// request. -type PeekMessagesOptions struct { - Timeout uint - NumOfMessages int - RequestID string `header:"x-ms-client-request-id"` -} - -// PeekMessages retrieves one or more messages from the front of the queue, but -// does not alter the visibility of the message. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages -func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) { - query := url.Values{"peekonly": {"true"}} // Required for peek operation - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - if options.NumOfMessages != 0 { - query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) - } - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) - - resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) - if err != nil { - return []Message{}, err - } - defer readAndCloseBody(resp.body) - - var out messages - err = xmlUnmarshal(resp.body, &out) - if err != nil { - return []Message{}, err - } - for i := range out.Messages { - out.Messages[i].Queue = q - } - return out.Messages, err -} - -// ClearMessages operation deletes all messages from the specified queue. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages -func (q *Queue) ClearMessages(options *QueueServiceOptions) error { - params := url.Values{} - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params) - - resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// SetPermissions sets up queue permissions -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl -func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error { - body, length, err := generateQueueACLpayload(permissions.AccessPolicies) - if err != nil { - return err - } - - params := url.Values{ - "comp": {"acl"}, - } - headers := q.qsc.client.getStandardHeaders() - headers["Content-Length"] = strconv.Itoa(length) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return errors.New("Unable to set permissions") - } - - return nil -} - -func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) { - sil := SignedIdentifiers{ - SignedIdentifiers: []SignedIdentifier{}, - } - for _, qapd := range policies { - permission := qapd.generateQueuePermissions() - signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission) - sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) - } - return xmlMarshal(sil) -} - -func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) { - // generate the permissions string (raup). - // still want the end user API to have bool flags. - permissions = "" - - if qapd.CanRead { - permissions += "r" - } - - if qapd.CanAdd { - permissions += "a" - } - - if qapd.CanUpdate { - permissions += "u" - } - - if qapd.CanProcess { - permissions += "p" - } - - return permissions -} - -// GetQueuePermissionOptions includes options for a get queue permissions operation -type GetQueuePermissionOptions struct { - Timeout uint - RequestID string `header:"x-ms-client-request-id"` -} - -// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl -// If timeout is 0 then it will not be passed to Azure -func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) { - params := url.Values{ - "comp": {"acl"}, - } - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) - if err != nil { - return nil, err - } - defer resp.body.Close() - - var ap AccessPolicy - err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) - if err != nil { - return nil, err - } - return buildQueueAccessPolicy(ap, &resp.headers), nil -} - -func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions { - permissions := QueuePermissions{ - AccessPolicies: []QueueAccessPolicy{}, - } - - for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { - qapd := QueueAccessPolicy{ - ID: policy.ID, - StartTime: policy.AccessPolicy.StartTime, - ExpiryTime: policy.AccessPolicy.ExpiryTime, - } - qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") - qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a") - qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") - qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p") - - permissions.AccessPolicies = append(permissions.AccessPolicies, qapd) - } - return &permissions -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go deleted file mode 100644 index 28d9ab937e2..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go +++ /dev/null @@ -1,146 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "net/url" - "strings" - "time" -) - -// QueueSASOptions are options to construct a blob SAS -// URI. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -type QueueSASOptions struct { - QueueSASPermissions - SASOptions -} - -// QueueSASPermissions includes the available permissions for -// a queue SAS URI. -type QueueSASPermissions struct { - Read bool - Add bool - Update bool - Process bool -} - -func (q QueueSASPermissions) buildString() string { - permissions := "" - - if q.Read { - permissions += "r" - } - if q.Add { - permissions += "a" - } - if q.Update { - permissions += "u" - } - if q.Process { - permissions += "p" - } - return permissions -} - -// GetSASURI creates an URL to the specified queue which contains the Shared -// Access Signature with specified permissions and expiration time. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) { - canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true) - if err != nil { - return "", err - } - - // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. - // It must include the service name (blob, table, queue or file) for version 2015-02-21 or - // later, the storage account name, and the resource name, and must be URL-decoded. - // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). - canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) - canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) - if err != nil { - return "", err - } - - signedStart := "" - if options.Start != (time.Time{}) { - signedStart = options.Start.UTC().Format(time.RFC3339) - } - signedExpiry := options.Expiry.UTC().Format(time.RFC3339) - - protocols := "https,http" - if options.UseHTTPS { - protocols = "https" - } - - permissions := options.QueueSASPermissions.buildString() - stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier) - if err != nil { - return "", err - } - - sig := q.qsc.client.computeHmac256(stringToSign) - sasParams := url.Values{ - "sv": {q.qsc.client.apiVersion}, - "se": {signedExpiry}, - "sp": {permissions}, - "sig": {sig}, - } - - if q.qsc.client.apiVersion >= "2015-04-05" { - sasParams.Add("spr", protocols) - addQueryParameter(sasParams, "sip", options.IP) - } - - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil) - sasURL, err := url.Parse(uri) - if err != nil { - return "", err - } - sasURL.RawQuery = sasParams.Encode() - return sasURL.String(), nil -} - -func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) { - - if signedVersion >= "2015-02-21" { - canonicalizedResource = "/queue" + canonicalizedResource - } - - // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 - if signedVersion >= "2015-04-05" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", - signedPermissions, - signedStart, - signedExpiry, - canonicalizedResource, - signedIdentifier, - signedIP, - protocols, - signedVersion), nil - - } - - // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - if signedVersion >= "2013-08-15" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil - } - - return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go deleted file mode 100644 index 29febe146f6..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go +++ /dev/null @@ -1,42 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// QueueServiceClient contains operations for Microsoft Azure Queue Storage -// Service. -type QueueServiceClient struct { - client Client - auth authentication -} - -// GetServiceProperties gets the properties of your storage account's queue service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties -func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) { - return q.client.getServiceProperties(queueServiceName, q.auth) -} - -// SetServiceProperties sets the properties of your storage account's queue service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties -func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error { - return q.client.setServiceProperties(props, queueServiceName, q.auth) -} - -// GetQueueReference returns a Container object for the specified queue name. -func (q *QueueServiceClient) GetQueueReference(name string) *Queue { - return &Queue{ - qsc: q, - Name: name, - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go deleted file mode 100644 index a14d9d32447..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go +++ /dev/null @@ -1,216 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" - "net/url" - "strconv" -) - -// Share represents an Azure file share. -type Share struct { - fsc *FileServiceClient - Name string `xml:"Name"` - Properties ShareProperties `xml:"Properties"` - Metadata map[string]string -} - -// ShareProperties contains various properties of a share. -type ShareProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - Quota int `xml:"Quota"` -} - -// builds the complete path for this share object. -func (s *Share) buildPath() string { - return fmt.Sprintf("/%s", s.Name) -} - -// Create this share under the associated account. -// If a share with the same name already exists, the operation fails. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share -func (s *Share) Create(options *FileRequestOptions) error { - extraheaders := map[string]string{} - if s.Properties.Quota > 0 { - extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) - } - - params := prepareOptions(options) - headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated}) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - return nil -} - -// CreateIfNotExists creates this share under the associated account if -// it does not exist. Returns true if the share is newly created or false if -// the share already exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share -func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) { - extraheaders := map[string]string{} - if s.Properties.Quota > 0 { - extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) - } - - params := prepareOptions(options) - resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - if resp.statusCode == http.StatusCreated { - s.updateEtagAndLastModified(resp.headers) - return true, nil - } - return false, s.FetchAttributes(nil) - } - } - - return false, err -} - -// Delete marks this share for deletion. The share along with any files -// and directories contained within it are later deleted during garbage -// collection. If the share does not exist the operation fails -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share -func (s *Share) Delete(options *FileRequestOptions) error { - return s.fsc.deleteResource(s.buildPath(), resourceShare, options) -} - -// DeleteIfExists operation marks this share for deletion if it exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share -func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) { - resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -// Exists returns true if this share already exists -// on the storage account, otherwise returns false. -func (s *Share) Exists() (bool, error) { - exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare) - if exists { - s.updateEtagAndLastModified(headers) - s.updateQuota(headers) - } - return exists, err -} - -// FetchAttributes retrieves metadata and properties for this share. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties -func (s *Share) FetchAttributes(options *FileRequestOptions) error { - params := prepareOptions(options) - headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - s.updateQuota(headers) - s.Metadata = getMetadataFromHeaders(headers) - - return nil -} - -// GetRootDirectoryReference returns a Directory object at the root of this share. -func (s *Share) GetRootDirectoryReference() *Directory { - return &Directory{ - fsc: s.fsc, - share: s, - } -} - -// ServiceClient returns the FileServiceClient associated with this share. -func (s *Share) ServiceClient() *FileServiceClient { - return s.fsc -} - -// SetMetadata replaces the metadata for this share. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetShareMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata -func (s *Share) SetMetadata(options *FileRequestOptions) error { - headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - return nil -} - -// SetProperties sets system properties for this share. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by SetShareProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties -func (s *Share) SetProperties(options *FileRequestOptions) error { - extraheaders := map[string]string{} - if s.Properties.Quota > 0 { - if s.Properties.Quota > 5120 { - return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota) - } - extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) - } - - headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - return nil -} - -// updates Etag and last modified date -func (s *Share) updateEtagAndLastModified(headers http.Header) { - s.Properties.Etag = headers.Get("Etag") - s.Properties.LastModified = headers.Get("Last-Modified") -} - -// updates quota value -func (s *Share) updateQuota(headers http.Header) { - quota, err := strconv.Atoi(headers.Get("x-ms-share-quota")) - if err == nil { - s.Properties.Quota = quota - } -} - -// URL gets the canonical URL to this share. This method does not create a publicly accessible -// URL if the share is private and this method does not check if the share exists. -func (s *Share) URL() string { - return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{}) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go deleted file mode 100644 index 056ab398a81..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go +++ /dev/null @@ -1,61 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "strings" - "time" -) - -// AccessPolicyDetailsXML has specifics about an access policy -// annotated with XML details. -type AccessPolicyDetailsXML struct { - StartTime time.Time `xml:"Start"` - ExpiryTime time.Time `xml:"Expiry"` - Permission string `xml:"Permission"` -} - -// SignedIdentifier is a wrapper for a specific policy -type SignedIdentifier struct { - ID string `xml:"Id"` - AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"` -} - -// SignedIdentifiers part of the response from GetPermissions call. -type SignedIdentifiers struct { - SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` -} - -// AccessPolicy is the response type from the GetPermissions call. -type AccessPolicy struct { - SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"` -} - -// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the -// AccessPolicy struct which will get converted to XML. -func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier { - return SignedIdentifier{ - ID: id, - AccessPolicy: AccessPolicyDetailsXML{ - StartTime: startTime.UTC().Round(time.Second), - ExpiryTime: expiryTime.UTC().Round(time.Second), - Permission: permissions, - }, - } -} - -func updatePermissions(permissions, permission string) bool { - return strings.Contains(permissions, permission) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go deleted file mode 100644 index c102619c987..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go +++ /dev/null @@ -1,131 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "net/http" - "net/url" - "strconv" -) - -// ServiceProperties represents the storage account service properties -type ServiceProperties struct { - Logging *Logging - HourMetrics *Metrics - MinuteMetrics *Metrics - Cors *Cors -} - -// Logging represents the Azure Analytics Logging settings -type Logging struct { - Version string - Delete bool - Read bool - Write bool - RetentionPolicy *RetentionPolicy -} - -// RetentionPolicy indicates if retention is enabled and for how many days -type RetentionPolicy struct { - Enabled bool - Days *int -} - -// Metrics provide request statistics. -type Metrics struct { - Version string - Enabled bool - IncludeAPIs *bool - RetentionPolicy *RetentionPolicy -} - -// Cors includes all the CORS rules -type Cors struct { - CorsRule []CorsRule -} - -// CorsRule includes all settings for a Cors rule -type CorsRule struct { - AllowedOrigins string - AllowedMethods string - MaxAgeInSeconds int - ExposedHeaders string - AllowedHeaders string -} - -func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) { - query := url.Values{ - "restype": {"service"}, - "comp": {"properties"}, - } - uri := c.getEndpoint(service, "", query) - headers := c.getStandardHeaders() - - resp, err := c.exec(http.MethodGet, uri, headers, nil, auth) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - var out ServiceProperties - err = xmlUnmarshal(resp.body, &out) - if err != nil { - return nil, err - } - - return &out, nil -} - -func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error { - query := url.Values{ - "restype": {"service"}, - "comp": {"properties"}, - } - uri := c.getEndpoint(service, "", query) - - // Ideally, StorageServiceProperties would be the output struct - // This is to avoid golint stuttering, while generating the correct XML - type StorageServiceProperties struct { - Logging *Logging - HourMetrics *Metrics - MinuteMetrics *Metrics - Cors *Cors - } - input := StorageServiceProperties{ - Logging: props.Logging, - HourMetrics: props.HourMetrics, - MinuteMetrics: props.MinuteMetrics, - Cors: props.Cors, - } - - body, length, err := xmlMarshal(input) - if err != nil { - return err - } - - headers := c.getStandardHeaders() - headers["Content-Length"] = strconv.Itoa(length) - - resp, err := c.exec(http.MethodPut, uri, headers, body, auth) - if err != nil { - return err - } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go deleted file mode 100644 index 6c01d32ee13..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go +++ /dev/null @@ -1,419 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -const ( - tablesURIPath = "/Tables" - nextTableQueryParameter = "NextTableName" - headerNextPartitionKey = "x-ms-continuation-NextPartitionKey" - headerNextRowKey = "x-ms-continuation-NextRowKey" - nextPartitionKeyQueryParameter = "NextPartitionKey" - nextRowKeyQueryParameter = "NextRowKey" -) - -// TableAccessPolicy are used for SETTING table policies -type TableAccessPolicy struct { - ID string - StartTime time.Time - ExpiryTime time.Time - CanRead bool - CanAppend bool - CanUpdate bool - CanDelete bool -} - -// Table represents an Azure table. -type Table struct { - tsc *TableServiceClient - Name string `json:"TableName"` - OdataEditLink string `json:"odata.editLink"` - OdataID string `json:"odata.id"` - OdataMetadata string `json:"odata.metadata"` - OdataType string `json:"odata.type"` -} - -// EntityQueryResult contains the response from -// ExecuteQuery and ExecuteQueryNextResults functions. -type EntityQueryResult struct { - OdataMetadata string `json:"odata.metadata"` - Entities []*Entity `json:"value"` - QueryNextLink - table *Table -} - -type continuationToken struct { - NextPartitionKey string - NextRowKey string -} - -func (t *Table) buildPath() string { - return fmt.Sprintf("/%s", t.Name) -} - -func (t *Table) buildSpecificPath() string { - return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name) -} - -// Get gets the referenced table. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities -func (t *Table) Get(timeout uint, ml MetadataLevel) error { - if ml == EmptyPayload { - return errEmptyPayload - } - - query := url.Values{ - "timeout": {strconv.FormatUint(uint64(timeout), 10)}, - } - headers := t.tsc.client.getStandardHeaders() - headers[headerAccept] = string(ml) - - uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query) - resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return err - } - - respBody, err := ioutil.ReadAll(resp.body) - if err != nil { - return err - } - err = json.Unmarshal(respBody, t) - if err != nil { - return err - } - return nil -} - -// Create creates the referenced table. -// This function fails if the name is not compliant -// with the specification or the tables already exists. -// ml determines the level of detail of metadata in the operation response, -// or no data at all. -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table -func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error { - uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{ - "timeout": {strconv.FormatUint(uint64(timeout), 10)}, - }) - - type createTableRequest struct { - TableName string `json:"TableName"` - } - req := createTableRequest{TableName: t.Name} - buf := new(bytes.Buffer) - if err := json.NewEncoder(buf).Encode(req); err != nil { - return err - } - - headers := t.tsc.client.getStandardHeaders() - headers = addReturnContentHeaders(headers, ml) - headers = addBodyRelatedHeaders(headers, buf.Len()) - headers = options.addToHeaders(headers) - - resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if ml == EmptyPayload { - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - } else { - if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { - return err - } - } - - if ml != EmptyPayload { - data, err := ioutil.ReadAll(resp.body) - if err != nil { - return err - } - err = json.Unmarshal(data, t) - if err != nil { - return err - } - } - - return nil -} - -// Delete deletes the referenced table. -// This function fails if the table is not present. -// Be advised: Delete deletes all the entries that may be present. -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table -func (t *Table) Delete(timeout uint, options *TableOptions) error { - uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{ - "timeout": {strconv.Itoa(int(timeout))}, - }) - - headers := t.tsc.client.getStandardHeaders() - headers = addReturnContentHeaders(headers, EmptyPayload) - headers = options.addToHeaders(headers) - - resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// QueryOptions includes options for a query entities operation. -// Top, filter and select are OData query options. -type QueryOptions struct { - Top uint - Filter string - Select []string - RequestID string -} - -func (options *QueryOptions) getParameters() (url.Values, map[string]string) { - query := url.Values{} - headers := map[string]string{} - if options != nil { - if options.Top > 0 { - query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) - } - if options.Filter != "" { - query.Add(OdataFilter, options.Filter) - } - if len(options.Select) > 0 { - query.Add(OdataSelect, strings.Join(options.Select, ",")) - } - headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) - } - return query, headers -} - -// QueryEntities returns the entities in the table. -// You can use query options defined by the OData Protocol specification. -// -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities -func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) { - if ml == EmptyPayload { - return nil, errEmptyPayload - } - query, headers := options.getParameters() - query = addTimeout(query, timeout) - uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query) - return t.queryEntities(uri, headers, ml) -} - -// NextResults returns the next page of results -// from a QueryEntities or NextResults operation. -// -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination -func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) { - if eqr == nil { - return nil, errNilPreviousResult - } - if eqr.NextLink == nil { - return nil, errNilNextLink - } - headers := options.addToHeaders(map[string]string{}) - return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml) -} - -// SetPermissions sets up table ACL permissions -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL -func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error { - params := url.Values{"comp": {"acl"}, - "timeout": {strconv.Itoa(int(timeout))}, - } - - uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) - headers := t.tsc.client.getStandardHeaders() - headers = options.addToHeaders(headers) - - body, length, err := generateTableACLPayload(tap) - if err != nil { - return err - } - headers["Content-Length"] = strconv.Itoa(length) - - resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) { - sil := SignedIdentifiers{ - SignedIdentifiers: []SignedIdentifier{}, - } - for _, tap := range policies { - permission := generateTablePermissions(&tap) - signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission) - sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) - } - return xmlMarshal(sil) -} - -// GetPermissions gets the table ACL permissions -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl -func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) { - params := url.Values{"comp": {"acl"}, - "timeout": {strconv.Itoa(int(timeout))}, - } - - uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) - headers := t.tsc.client.getStandardHeaders() - headers = options.addToHeaders(headers) - - resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - var ap AccessPolicy - err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) - if err != nil { - return nil, err - } - return updateTableAccessPolicy(ap), nil -} - -func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) { - headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders()) - if ml != EmptyPayload { - headers[headerAccept] = string(ml) - } - - resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - data, err := ioutil.ReadAll(resp.body) - if err != nil { - return nil, err - } - var entities EntityQueryResult - err = json.Unmarshal(data, &entities) - if err != nil { - return nil, err - } - - for i := range entities.Entities { - entities.Entities[i].Table = t - } - entities.table = t - - contToken := extractContinuationTokenFromHeaders(resp.headers) - if contToken == nil { - entities.NextLink = nil - } else { - originalURI, err := url.Parse(uri) - if err != nil { - return nil, err - } - v := originalURI.Query() - v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey) - v.Set(nextRowKeyQueryParameter, contToken.NextRowKey) - newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v) - entities.NextLink = &newURI - entities.ml = ml - } - - return &entities, nil -} - -func extractContinuationTokenFromHeaders(h http.Header) *continuationToken { - ct := continuationToken{ - NextPartitionKey: h.Get(headerNextPartitionKey), - NextRowKey: h.Get(headerNextRowKey), - } - - if ct.NextPartitionKey != "" && ct.NextRowKey != "" { - return &ct - } - return nil -} - -func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy { - taps := []TableAccessPolicy{} - for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { - tap := TableAccessPolicy{ - ID: policy.ID, - StartTime: policy.AccessPolicy.StartTime, - ExpiryTime: policy.AccessPolicy.ExpiryTime, - } - tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") - tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a") - tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") - tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") - - taps = append(taps, tap) - } - return taps -} - -func generateTablePermissions(tap *TableAccessPolicy) (permissions string) { - // generate the permissions string (raud). - // still want the end user API to have bool flags. - permissions = "" - - if tap.CanRead { - permissions += "r" - } - - if tap.CanAppend { - permissions += "a" - } - - if tap.CanUpdate { - permissions += "u" - } - - if tap.CanDelete { - permissions += "d" - } - return permissions -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go deleted file mode 100644 index 3f882417c65..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go +++ /dev/null @@ -1,316 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "mime/multipart" - "net/http" - "net/textproto" - "sort" - "strings" - - "github.com/satori/uuid" -) - -// Operation type. Insert, Delete, Replace etc. -type Operation int - -// consts for batch operations. -const ( - InsertOp = Operation(1) - DeleteOp = Operation(2) - ReplaceOp = Operation(3) - MergeOp = Operation(4) - InsertOrReplaceOp = Operation(5) - InsertOrMergeOp = Operation(6) -) - -// BatchEntity used for tracking Entities to operate on and -// whether operations (replace/merge etc) should be forced. -// Wrapper for regular Entity with additional data specific for the entity. -type BatchEntity struct { - *Entity - Force bool - Op Operation -} - -// TableBatch stores all the entities that will be operated on during a batch process. -// Entities can be inserted, replaced or deleted. -type TableBatch struct { - BatchEntitySlice []BatchEntity - - // reference to table we're operating on. - Table *Table -} - -// defaultChangesetHeaders for changeSets -var defaultChangesetHeaders = map[string]string{ - "Accept": "application/json;odata=minimalmetadata", - "Content-Type": "application/json", - "Prefer": "return-no-content", -} - -// NewBatch return new TableBatch for populating. -func (t *Table) NewBatch() *TableBatch { - return &TableBatch{ - Table: t, - } -} - -// InsertEntity adds an entity in preparation for a batch insert. -func (t *TableBatch) InsertEntity(entity *Entity) { - be := BatchEntity{Entity: entity, Force: false, Op: InsertOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace. -func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) { - be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag -func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) { - t.InsertOrReplaceEntity(entity, true) -} - -// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge. -func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) { - be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag -func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) { - t.InsertOrMergeEntity(entity, true) -} - -// ReplaceEntity adds an entity in preparation for a batch replace. -func (t *TableBatch) ReplaceEntity(entity *Entity) { - be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// DeleteEntity adds an entity in preparation for a batch delete -func (t *TableBatch) DeleteEntity(entity *Entity, force bool) { - be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag -func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) { - t.DeleteEntity(entity, true) -} - -// MergeEntity adds an entity in preparation for a batch merge -func (t *TableBatch) MergeEntity(entity *Entity) { - be := BatchEntity{Entity: entity, Force: false, Op: MergeOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// ExecuteBatch executes many table operations in one request to Azure. -// The operations can be combinations of Insert, Delete, Replace and Merge -// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses -// the changesets. -// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions -func (t *TableBatch) ExecuteBatch() error { - changesetBoundary := fmt.Sprintf("changeset_%s", uuid.NewV1()) - uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil) - changesetBody, err := t.generateChangesetBody(changesetBoundary) - if err != nil { - return err - } - - boundary := fmt.Sprintf("batch_%s", uuid.NewV1()) - body, err := generateBody(changesetBody, changesetBoundary, boundary) - if err != nil { - return err - } - - headers := t.Table.tsc.client.getStandardHeaders() - headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary) - - resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth) - if err != nil { - return err - } - defer resp.body.Close() - - if err = checkRespCode(resp.statusCode, []int{http.StatusAccepted}); err != nil { - - // check which batch failed. - operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value) - requestID, date, version := getDebugHeaders(resp.headers) - return AzureStorageServiceError{ - StatusCode: resp.statusCode, - Code: resp.odata.Err.Code, - RequestID: requestID, - Date: date, - APIVersion: version, - Message: operationFailedMessage, - } - } - - return nil -} - -// getFailedOperation parses the original Azure error string and determines which operation failed -// and generates appropriate message. -func (t *TableBatch) getFailedOperation(errorMessage string) string { - // errorMessage consists of "number:string" we just need the number. - sp := strings.Split(errorMessage, ":") - if len(sp) > 1 { - msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage) - return msg - } - - // cant parse the message, just return the original message to client - return errorMessage -} - -// generateBody generates the complete body for the batch request. -func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) { - - body := new(bytes.Buffer) - writer := multipart.NewWriter(body) - writer.SetBoundary(boundary) - h := make(textproto.MIMEHeader) - h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary)) - batchWriter, err := writer.CreatePart(h) - if err != nil { - return nil, err - } - batchWriter.Write(changeSetBody.Bytes()) - writer.Close() - return body, nil -} - -// generateChangesetBody generates the individual changesets for the various operations within the batch request. -// There is a changeset for Insert, Delete, Merge etc. -func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) { - - body := new(bytes.Buffer) - writer := multipart.NewWriter(body) - writer.SetBoundary(changesetBoundary) - - for _, be := range t.BatchEntitySlice { - t.generateEntitySubset(&be, writer) - } - - writer.Close() - return body, nil -} - -// generateVerb generates the HTTP request VERB required for each changeset. -func generateVerb(op Operation) (string, error) { - switch op { - case InsertOp: - return http.MethodPost, nil - case DeleteOp: - return http.MethodDelete, nil - case ReplaceOp, InsertOrReplaceOp: - return http.MethodPut, nil - case MergeOp, InsertOrMergeOp: - return "MERGE", nil - default: - return "", errors.New("Unable to detect operation") - } -} - -// generateQueryPath generates the query path for within the changesets -// For inserts it will just be a table query path (table name) -// but for other operations (modifying an existing entity) then -// the partition/row keys need to be generated. -func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string { - if op == InsertOp { - return entity.Table.buildPath() - } - return entity.buildPath() -} - -// generateGenericOperationHeaders generates common headers for a given operation. -func generateGenericOperationHeaders(be *BatchEntity) map[string]string { - retval := map[string]string{} - - for k, v := range defaultChangesetHeaders { - retval[k] = v - } - - if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp { - if be.Force || be.Entity.OdataEtag == "" { - retval["If-Match"] = "*" - } else { - retval["If-Match"] = be.Entity.OdataEtag - } - } - - return retval -} - -// generateEntitySubset generates body payload for particular batch entity -func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error { - - h := make(textproto.MIMEHeader) - h.Set(headerContentType, "application/http") - h.Set(headerContentTransferEncoding, "binary") - - verb, err := generateVerb(batchEntity.Op) - if err != nil { - return err - } - - genericOpHeadersMap := generateGenericOperationHeaders(batchEntity) - queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity) - uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil) - - operationWriter, err := writer.CreatePart(h) - if err != nil { - return err - } - - urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri) - operationWriter.Write([]byte(urlAndVerb)) - writeHeaders(genericOpHeadersMap, &operationWriter) - operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body. - - // delete operation doesn't need a body. - if batchEntity.Op != DeleteOp { - //var e Entity = batchEntity.Entity - body, err := json.Marshal(batchEntity.Entity) - if err != nil { - return err - } - operationWriter.Write(body) - } - - return nil -} - -func writeHeaders(h map[string]string, writer *io.Writer) { - // This way it is guaranteed the headers will be written in a sorted order - var keys []string - for k := range h { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - (*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k]))) - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go deleted file mode 100644 index 456bee7733a..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go +++ /dev/null @@ -1,204 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strconv" -) - -const ( - headerAccept = "Accept" - headerEtag = "Etag" - headerPrefer = "Prefer" - headerXmsContinuation = "x-ms-Continuation-NextTableName" -) - -// TableServiceClient contains operations for Microsoft Azure Table Storage -// Service. -type TableServiceClient struct { - client Client - auth authentication -} - -// TableOptions includes options for some table operations -type TableOptions struct { - RequestID string -} - -func (options *TableOptions) addToHeaders(h map[string]string) map[string]string { - if options != nil { - h = addToHeaders(h, "x-ms-client-request-id", options.RequestID) - } - return h -} - -// QueryNextLink includes information for getting the next page of -// results in query operations -type QueryNextLink struct { - NextLink *string - ml MetadataLevel -} - -// GetServiceProperties gets the properties of your storage account's table service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties -func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) { - return t.client.getServiceProperties(tableServiceName, t.auth) -} - -// SetServiceProperties sets the properties of your storage account's table service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties -func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error { - return t.client.setServiceProperties(props, tableServiceName, t.auth) -} - -// GetTableReference returns a Table object for the specified table name. -func (t *TableServiceClient) GetTableReference(name string) *Table { - return &Table{ - tsc: t, - Name: name, - } -} - -// QueryTablesOptions includes options for some table operations -type QueryTablesOptions struct { - Top uint - Filter string - RequestID string -} - -func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) { - query := url.Values{} - headers := map[string]string{} - if options != nil { - if options.Top > 0 { - query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) - } - if options.Filter != "" { - query.Add(OdataFilter, options.Filter) - } - headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) - } - return query, headers -} - -// QueryTables returns the tables in the storage account. -// You can use query options defined by the OData Protocol specification. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables -func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) { - query, headers := options.getParameters() - uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query) - return t.queryTables(uri, headers, ml) -} - -// NextResults returns the next page of results -// from a QueryTables or a NextResults operation. -// -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination -func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) { - if tqr == nil { - return nil, errNilPreviousResult - } - if tqr.NextLink == nil { - return nil, errNilNextLink - } - headers := options.addToHeaders(map[string]string{}) - - return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml) -} - -// TableQueryResult contains the response from -// QueryTables and QueryTablesNextResults functions. -type TableQueryResult struct { - OdataMetadata string `json:"odata.metadata"` - Tables []Table `json:"value"` - QueryNextLink - tsc *TableServiceClient -} - -func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) { - if ml == EmptyPayload { - return nil, errEmptyPayload - } - headers = mergeHeaders(headers, t.client.getStandardHeaders()) - headers[headerAccept] = string(ml) - - resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - respBody, err := ioutil.ReadAll(resp.body) - if err != nil { - return nil, err - } - var out TableQueryResult - err = json.Unmarshal(respBody, &out) - if err != nil { - return nil, err - } - - for i := range out.Tables { - out.Tables[i].tsc = t - } - out.tsc = t - - nextLink := resp.headers.Get(http.CanonicalHeaderKey(headerXmsContinuation)) - if nextLink == "" { - out.NextLink = nil - } else { - originalURI, err := url.Parse(uri) - if err != nil { - return nil, err - } - v := originalURI.Query() - v.Set(nextTableQueryParameter, nextLink) - newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v) - out.NextLink = &newURI - out.ml = ml - } - - return &out, nil -} - -func addBodyRelatedHeaders(h map[string]string, length int) map[string]string { - h[headerContentType] = "application/json" - h[headerContentLength] = fmt.Sprintf("%v", length) - h[headerAcceptCharset] = "UTF-8" - return h -} - -func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string { - if ml != EmptyPayload { - h[headerPrefer] = "return-content" - h[headerAccept] = string(ml) - } else { - h[headerPrefer] = "return-no-content" - // From API version 2015-12-11 onwards, Accept header is required - h[headerAccept] = string(NoMetadata) - } - return h -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go deleted file mode 100644 index 7734b8f886f..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go +++ /dev/null @@ -1,235 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -var ( - fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)) - accountSASOptions = AccountSASTokenOptions{ - Services: Services{ - Blob: true, - }, - ResourceTypes: ResourceTypes{ - Service: true, - Container: true, - Object: true, - }, - Permissions: Permissions{ - Read: true, - Write: true, - Delete: true, - List: true, - Add: true, - Create: true, - Update: true, - Process: true, - }, - Expiry: fixedTime, - UseHTTPS: true, - } -) - -func (c Client) computeHmac256(message string) string { - h := hmac.New(sha256.New, c.accountKey) - h.Write([]byte(message)) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func currentTimeRfc1123Formatted() string { - return timeRfc1123Formatted(time.Now().UTC()) -} - -func timeRfc1123Formatted(t time.Time) string { - return t.Format(http.TimeFormat) -} - -func mergeParams(v1, v2 url.Values) url.Values { - out := url.Values{} - for k, v := range v1 { - out[k] = v - } - for k, v := range v2 { - vals, ok := out[k] - if ok { - vals = append(vals, v...) - out[k] = vals - } else { - out[k] = v - } - } - return out -} - -func prepareBlockListRequest(blocks []Block) string { - s := `` - for _, v := range blocks { - s += fmt.Sprintf("<%s>%s", v.Status, v.ID, v.Status) - } - s += `` - return s -} - -func xmlUnmarshal(body io.Reader, v interface{}) error { - data, err := ioutil.ReadAll(body) - if err != nil { - return err - } - return xml.Unmarshal(data, v) -} - -func xmlMarshal(v interface{}) (io.Reader, int, error) { - b, err := xml.Marshal(v) - if err != nil { - return nil, 0, err - } - return bytes.NewReader(b), len(b), nil -} - -func headersFromStruct(v interface{}) map[string]string { - headers := make(map[string]string) - value := reflect.ValueOf(v) - for i := 0; i < value.NumField(); i++ { - key := value.Type().Field(i).Tag.Get("header") - if key != "" { - reflectedValue := reflect.Indirect(value.Field(i)) - var val string - if reflectedValue.IsValid() { - switch reflectedValue.Type() { - case reflect.TypeOf(fixedTime): - val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time)) - case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)): - val = strconv.FormatUint(reflectedValue.Uint(), 10) - case reflect.TypeOf(int(0)): - val = strconv.FormatInt(reflectedValue.Int(), 10) - default: - val = reflectedValue.String() - } - } - if val != "" { - headers[key] = val - } - } - } - return headers -} - -// merges extraHeaders into headers and returns headers -func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { - for k, v := range extraHeaders { - headers[k] = v - } - return headers -} - -func addToHeaders(h map[string]string, key, value string) map[string]string { - if value != "" { - h[key] = value - } - return h -} - -func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string { - if value != nil { - h = addToHeaders(h, key, timeRfc1123Formatted(*value)) - } - return h -} - -func addTimeout(params url.Values, timeout uint) url.Values { - if timeout > 0 { - params.Add("timeout", fmt.Sprintf("%v", timeout)) - } - return params -} - -func addSnapshot(params url.Values, snapshot *time.Time) url.Values { - if snapshot != nil { - params.Add("snapshot", snapshot.Format("2006-01-02T15:04:05.0000000Z")) - } - return params -} - -func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) { - var out time.Time - var err error - outStr := h.Get(key) - if outStr != "" { - out, err = time.Parse(time.RFC1123, outStr) - if err != nil { - return nil, err - } - } - return &out, nil -} - -// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling -type TimeRFC1123 time.Time - -// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout. -func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var value string - d.DecodeElement(&value, &start) - parse, err := time.Parse(time.RFC1123, value) - if err != nil { - return err - } - *t = TimeRFC1123(parse) - return nil -} - -// returns a map of custom metadata values from the specified HTTP header -func getMetadataFromHeaders(header http.Header) map[string]string { - metadata := make(map[string]string) - for k, v := range header { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["lol"] = content of the last X-Ms-Meta-Lol header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - - if len(metadata) == 0 { - return nil - } - - return metadata -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go deleted file mode 100644 index 67ff6ca03fe..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "io" - "net/http" -) - -func setContentLengthFromLimitedReader(req *http.Request, lr *io.LimitedReader) { - req.ContentLength = lr.N -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go deleted file mode 100644 index eada102c0cf..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "io" - "io/ioutil" - "net/http" -) - -func setContentLengthFromLimitedReader(req *http.Request, lr *io.LimitedReader) { - req.ContentLength = lr.N - snapshot := *lr - req.GetBody = func() (io.ReadCloser, error) { - r := snapshot - return ioutil.NopCloser(&r), nil - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go deleted file mode 100644 index 1cd3e03d12a..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go +++ /dev/null @@ -1,19 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -var ( - sdkVersion = "10.0.2" -) diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE deleted file mode 100644 index b9d6a27ea92..00000000000 --- a/vendor/github.com/Azure/go-autorest/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md deleted file mode 100644 index a17cf98c621..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ /dev/null @@ -1,253 +0,0 @@ -# Azure Active Directory library for Go - -This project provides a stand alone Azure Active Directory library for Go. The code was extracted -from [go-autorest](https://github.com/Azure/go-autorest/) project, which is used as a base for -[azure-sdk-for-go](https://github.com/Azure/azure-sdk-for-go). - - -## Installation - -``` -go get -u github.com/Azure/go-autorest/autorest/adal -``` - -## Usage - -An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) follow these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). - -### Register an Azure AD Application with secret - - -1. Register a new application with a `secret` credential - - ``` - az ad app create \ - --display-name example-app \ - --homepage https://example-app/home \ - --identifier-uris https://example-app/app \ - --password secret - ``` - -2. Create a service principal using the `Application ID` from previous step - - ``` - az ad sp create --id "Application ID" - ``` - - * Replace `Application ID` with `appId` from step 1. - -### Register an Azure AD Application with certificate - -1. Create a private key - - ``` - openssl genrsa -out "example-app.key" 2048 - ``` - -2. Create the certificate - - ``` - openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" - openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 - ``` - -3. Create the PKCS12 version of the certificate containing also the private key - - ``` - openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: - - ``` - -4. Register a new application with the certificate content form `example-app.crt` - - ``` - certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" - - az ad app create \ - --display-name example-app \ - --homepage https://example-app/home \ - --identifier-uris https://example-app/app \ - --key-usage Verify --end-date 2018-01-01 \ - --key-value "${certificateContents}" - ``` - -5. Create a service principal using the `Application ID` from previous step - - ``` - az ad sp create --id "APPLICATION_ID" - ``` - - * Replace `APPLICATION_ID` with `appId` from step 4. - - -### Grant the necessary permissions - -Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained -level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) -which can be assigned to a service principal of an Azure AD application depending of your needs. - -``` -az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" -``` - -* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. -* Replace the `ROLE_NAME` with a role name of your choice. - -It is also possible to define custom role definitions. - -``` -az role definition create --role-definition role-definition.json -``` - -* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. - - -### Acquire Access Token - -The common configuration used by all flows: - -```Go -const activeDirectoryEndpoint = "https://login.microsoftonline.com/" -tenantID := "TENANT_ID" -oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) - -applicationID := "APPLICATION_ID" - -callback := func(token adal.Token) error { - // This is called after the token is acquired -} - -// The resource for which the token is acquired -resource := "https://management.core.windows.net/" -``` - -* Replace the `TENANT_ID` with your tenant ID. -* Replace the `APPLICATION_ID` with the value from previous section. - -#### Client Credentials - -```Go -applicationSecret := "APPLICATION_SECRET" - -spt, err := adal.NewServicePrincipalToken( - oauthConfig, - appliationID, - applicationSecret, - resource, - callbacks...) -if err != nil { - return nil, err -} - -// Acquire a new access token -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -* Replace the `APPLICATION_SECRET` with the `password` value from previous section. - -#### Client Certificate - -```Go -certificatePath := "./example-app.pfx" - -certData, err := ioutil.ReadFile(certificatePath) -if err != nil { - return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) -} - -// Get the certificate and private key from pfx file -certificate, rsaPrivateKey, err := decodePkcs12(certData, "") -if err != nil { - return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) -} - -spt, err := adal.NewServicePrincipalTokenFromCertificate( - oauthConfig, - applicationID, - certificate, - rsaPrivateKey, - resource, - callbacks...) - -// Acquire a new access token -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -* Update the certificate path to point to the example-app.pfx file which was created in previous section. - - -#### Device Code - -```Go -oauthClient := &http.Client{} - -// Acquire the device code -deviceCode, err := adal.InitiateDeviceAuth( - oauthClient, - oauthConfig, - applicationID, - resource) -if err != nil { - return nil, fmt.Errorf("Failed to start device auth flow: %s", err) -} - -// Display the authentication message -fmt.Println(*deviceCode.Message) - -// Wait here until the user is authenticated -token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) -if err != nil { - return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) -} - -spt, err := adal.NewServicePrincipalTokenFromManualToken( - oauthConfig, - applicationID, - resource, - *token, - callbacks...) - -if (err == nil) { - token := spt.Token -} -``` - -### Command Line Tool - -A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. - -``` -adal -h - -Usage of ./adal: - -applicationId string - application id - -certificatePath string - path to pk12/PFC application certificate - -mode string - authentication mode (device, secret, cert, refresh) (default "device") - -resource string - resource for which the token is requested - -secret string - application secret - -tenantId string - tenant id - -tokenCachePath string - location of oath token cache (default "/home/cgc/.adal/accessToken.json") -``` - -Example acquire a token for `https://management.core.windows.net/` using device code flow: - -``` -adal -mode device \ - -applicationId "APPLICATION_ID" \ - -tenantId "TENANT_ID" \ - -resource https://management.core.windows.net/ - -``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go deleted file mode 100644 index 49e9214d598..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ /dev/null @@ -1,65 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/url" -) - -const ( - activeDirectoryAPIVersion = "1.0" -) - -// OAuthConfig represents the endpoints needed -// in OAuth operations -type OAuthConfig struct { - AuthorityEndpoint url.URL - AuthorizeEndpoint url.URL - TokenEndpoint url.URL - DeviceCodeEndpoint url.URL -} - -// NewOAuthConfig returns an OAuthConfig with tenant specific urls -func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { - const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" - u, err := url.Parse(activeDirectoryEndpoint) - if err != nil { - return nil, err - } - authorityURL, err := u.Parse(tenantID) - if err != nil { - return nil, err - } - authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - - return &OAuthConfig{ - AuthorityEndpoint: *authorityURL, - AuthorizeEndpoint: *authorizeURL, - TokenEndpoint: *tokenURL, - DeviceCodeEndpoint: *deviceCodeURL, - }, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go deleted file mode 100644 index b38f4c24589..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go +++ /dev/null @@ -1,242 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* - This file is largely based on rjw57/oauth2device's code, with the follow differences: - * scope -> resource, and only allow a single one - * receive "Message" in the DeviceCode struct and show it to users as the prompt - * azure-xplat-cli has the following behavior that this emulates: - - does not send client_secret during the token exchange - - sends resource again in the token exchange request -*/ - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" -) - -const ( - logPrefix = "autorest/adal/devicetoken:" -) - -var ( - // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow - ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) - - // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow - ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) - - // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow - ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) - - // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow - ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) - - // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow - ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) - - // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow - ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) - - // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow - ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) - - errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" - errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" - errTokenSendingFails = "Error occurred while sending request with device code for a token" - errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" - errStatusNotOK = "Error HTTP status != 200" -) - -// DeviceCode is the object returned by the device auth endpoint -// It contains information to instruct the user to complete the auth flow -type DeviceCode struct { - DeviceCode *string `json:"device_code,omitempty"` - UserCode *string `json:"user_code,omitempty"` - VerificationURL *string `json:"verification_url,omitempty"` - ExpiresIn *int64 `json:"expires_in,string,omitempty"` - Interval *int64 `json:"interval,string,omitempty"` - - Message *string `json:"message"` // Azure specific - Resource string // store the following, stored when initiating, used when exchanging - OAuthConfig OAuthConfig - ClientID string -} - -// TokenError is the object returned by the token exchange endpoint -// when something is amiss -type TokenError struct { - Error *string `json:"error,omitempty"` - ErrorCodes []int `json:"error_codes,omitempty"` - ErrorDescription *string `json:"error_description,omitempty"` - Timestamp *string `json:"timestamp,omitempty"` - TraceID *string `json:"trace_id,omitempty"` -} - -// DeviceToken is the object return by the token exchange endpoint -// It can either look like a Token or an ErrorToken, so put both here -// and check for presence of "Error" to know if we are in error state -type deviceToken struct { - Token - TokenError -} - -// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode -// that can be used with CheckForUserCompletion or WaitForUserCompletion. -func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - v := url.Values{ - "client_id": []string{clientID}, - "resource": []string{resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) - } - - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrDeviceCodeEmpty - } - - var code DeviceCode - err = json.Unmarshal(rb, &code) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - code.ClientID = clientID - code.Resource = resource - code.OAuthConfig = oauthConfig - - return &code, nil -} - -// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint -// to see if the device flow has: been completed, timed out, or otherwise failed -func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - v := url.Values{ - "client_id": []string{code.ClientID}, - "code": []string{*code.DeviceCode}, - "grant_type": []string{OAuthGrantTypeDeviceCode}, - "resource": []string{code.Resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrOAuthTokenEmpty - } - - var token deviceToken - err = json.Unmarshal(rb, &token) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if token.Error == nil { - return &token.Token, nil - } - - switch *token.Error { - case "authorization_pending": - return nil, ErrDeviceAuthorizationPending - case "slow_down": - return nil, ErrDeviceSlowDown - case "access_denied": - return nil, ErrDeviceAccessDenied - case "code_expired": - return nil, ErrDeviceCodeExpired - default: - return nil, ErrDeviceGeneric - } -} - -// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. -// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - intervalDuration := time.Duration(*code.Interval) * time.Second - waitDuration := intervalDuration - - for { - token, err := CheckForUserCompletion(sender, code) - - if err == nil { - return token, nil - } - - switch err { - case ErrDeviceSlowDown: - waitDuration += waitDuration - case ErrDeviceAuthorizationPending: - // noop - default: // everything else is "fatal" to us - return nil, err - } - - if waitDuration > (intervalDuration * 3) { - return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) - } - - time.Sleep(waitDuration) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go deleted file mode 100644 index 5e02d52ac27..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows - -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// msiPath is the path to the MSI Extension settings file (to discover the endpoint) -var msiPath = "/var/lib/waagent/ManagedIdentity-Settings" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go deleted file mode 100644 index 261b568829c..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "os" - "strings" -) - -// msiPath is the path to the MSI Extension settings file (to discover the endpoint) -var msiPath = strings.Join([]string{os.Getenv("SystemDrive"), "WindowsAzure/Config/ManagedIdentity-Settings"}, "/") diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go deleted file mode 100644 index 9e15f2751f2..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go +++ /dev/null @@ -1,73 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// LoadToken restores a Token object from a file located at 'path'. -func LoadToken(path string) (*Token, error) { - file, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) - } - defer file.Close() - - var token Token - - dec := json.NewDecoder(file) - if err = dec.Decode(&token); err != nil { - return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) - } - return &token, nil -} - -// SaveToken persists an oauth token at the given location on disk. -// It moves the new file into place so it can safely be used to replace an existing file -// that maybe accessed by multiple processes. -func SaveToken(path string, mode os.FileMode, token Token) error { - dir := filepath.Dir(path) - err := os.MkdirAll(dir, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) - } - - newFile, err := ioutil.TempFile(dir, "token") - if err != nil { - return fmt.Errorf("failed to create the temp file to write the token: %v", err) - } - tempPath := newFile.Name() - - if err := json.NewEncoder(newFile).Encode(token); err != nil { - return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) - } - if err := newFile.Close(); err != nil { - return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) - } - - // Atomic replace to avoid multi-writer file corruptions - if err := os.Rename(tempPath, path); err != nil { - return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) - } - if err := os.Chmod(path, mode); err != nil { - return fmt.Errorf("failed to chmod the token file %s: %v", path, err) - } - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go deleted file mode 100644 index 0e5ad14d396..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ /dev/null @@ -1,60 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "net/http" -) - -const ( - contentType = "Content-Type" - mimeTypeFormPost = "application/x-www-form-urlencoded" -) - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(&http.Client{}, decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go deleted file mode 100644 index 67dd97a18c1..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ /dev/null @@ -1,427 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest/date" - "github.com/dgrijalva/jwt-go" -) - -const ( - defaultRefresh = 5 * time.Minute - - // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow - OAuthGrantTypeDeviceCode = "device_code" - - // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows - OAuthGrantTypeClientCredentials = "client_credentials" - - // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows - OAuthGrantTypeRefreshToken = "refresh_token" - - // metadataHeader is the header required by MSI extension - metadataHeader = "Metadata" -) - -// OAuthTokenProvider is an interface which should be implemented by an access token retriever -type OAuthTokenProvider interface { - OAuthToken() string -} - -// Refresher is an interface for token refresh functionality -type Refresher interface { - Refresh() error - RefreshExchange(resource string) error - EnsureFresh() error -} - -// TokenRefreshCallback is the type representing callbacks that will be called after -// a successful token refresh -type TokenRefreshCallback func(Token) error - -// Token encapsulates the access token used to authorize Azure requests. -type Token struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - - ExpiresIn string `json:"expires_in"` - ExpiresOn string `json:"expires_on"` - NotBefore string `json:"not_before"` - - Resource string `json:"resource"` - Type string `json:"token_type"` -} - -// Expires returns the time.Time when the Token expires. -func (t Token) Expires() time.Time { - s, err := strconv.Atoi(t.ExpiresOn) - if err != nil { - s = -3600 - } - - expiration := date.NewUnixTimeFromSeconds(float64(s)) - - return time.Time(expiration).UTC() -} - -// IsExpired returns true if the Token is expired, false otherwise. -func (t Token) IsExpired() bool { - return t.WillExpireIn(0) -} - -// WillExpireIn returns true if the Token will expire after the passed time.Duration interval -// from now, false otherwise. -func (t Token) WillExpireIn(d time.Duration) bool { - return !t.Expires().After(time.Now().Add(d)) -} - -//OAuthToken return the current access token -func (t *Token) OAuthToken() string { - return t.AccessToken -} - -// ServicePrincipalNoSecret represents a secret type that contains no secret -// meaning it is not valid for fetching a fresh token. This is used by Manual -type ServicePrincipalNoSecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret -// It only returns an error for the ServicePrincipalNoSecret type -func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") -} - -// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form -// that is submitted when acquiring an oAuth token. -type ServicePrincipalSecret interface { - SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error -} - -// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. -type ServicePrincipalTokenSecret struct { - ClientSecret string -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using the client_secret. -func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("client_secret", tokenSecret.ClientSecret) - return nil -} - -// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. -type ServicePrincipalCertificateSecret struct { - Certificate *x509.Certificate - PrivateKey *rsa.PrivateKey -} - -// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. -type ServicePrincipalMSISecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return nil -} - -// SignJwt returns the JWT signed with the certificate's private key. -func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { - hasher := sha1.New() - _, err := hasher.Write(secret.Certificate.Raw) - if err != nil { - return "", err - } - - thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) - - // The jti (JWT ID) claim provides a unique identifier for the JWT. - jti := make([]byte, 20) - _, err = rand.Read(jti) - if err != nil { - return "", err - } - - token := jwt.New(jwt.SigningMethodRS256) - token.Header["x5t"] = thumbprint - token.Claims = jwt.MapClaims{ - "aud": spt.oauthConfig.TokenEndpoint.String(), - "iss": spt.clientID, - "sub": spt.clientID, - "jti": base64.URLEncoding.EncodeToString(jti), - "nbf": time.Now().Unix(), - "exp": time.Now().Add(time.Hour * 24).Unix(), - } - - signedString, err := token.SignedString(secret.PrivateKey) - return signedString, err -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. -func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - jwt, err := secret.SignJwt(spt) - if err != nil { - return err - } - - v.Set("client_assertion", jwt) - v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") - return nil -} - -// ServicePrincipalToken encapsulates a Token created for a Service Principal. -type ServicePrincipalToken struct { - Token - - secret ServicePrincipalSecret - oauthConfig OAuthConfig - clientID string - resource string - autoRefresh bool - refreshWithin time.Duration - sender Sender - - refreshCallbacks []TokenRefreshCallback -} - -// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. -func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - spt := &ServicePrincipalToken{ - oauthConfig: oauthConfig, - secret: secret, - clientID: id, - resource: resource, - autoRefresh: true, - refreshWithin: defaultRefresh, - sender: &http.Client{}, - refreshCallbacks: callbacks, - } - return spt, nil -} - -// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token -func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalNoSecret{}, - callbacks...) - if err != nil { - return nil, err - } - - spt.Token = token - - return spt, nil -} - -// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal -// credentials scoped to the named resource. -func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalTokenSecret{ - ClientSecret: secret, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes. -func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalCertificateSecret{ - PrivateKey: privateKey, - Certificate: certificate, - }, - callbacks..., - ) -} - -// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. -func GetMSIVMEndpoint() (string, error) { - return getMSIVMEndpoint(msiPath) -} - -func getMSIVMEndpoint(path string) (string, error) { - // Read MSI settings - bytes, err := ioutil.ReadFile(path) - if err != nil { - return "", err - } - msiSettings := struct { - URL string `json:"url"` - }{} - err = json.Unmarshal(bytes, &msiSettings) - if err != nil { - return "", err - } - - return msiSettings.URL, nil -} - -// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. -func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - // We set the oauth config token endpoint to be MSI's endpoint - msiEndpointURL, err := url.Parse(msiEndpoint) - if err != nil { - return nil, err - } - - oauthConfig, err := NewOAuthConfig(msiEndpointURL.String(), "") - if err != nil { - return nil, err - } - - spt := &ServicePrincipalToken{ - oauthConfig: *oauthConfig, - secret: &ServicePrincipalMSISecret{}, - resource: resource, - autoRefresh: true, - refreshWithin: defaultRefresh, - sender: &http.Client{}, - refreshCallbacks: callbacks, - } - - return spt, nil -} - -// EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. -func (spt *ServicePrincipalToken) EnsureFresh() error { - if spt.autoRefresh && spt.WillExpireIn(spt.refreshWithin) { - return spt.Refresh() - } - return nil -} - -// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization -func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { - if spt.refreshCallbacks != nil { - for _, callback := range spt.refreshCallbacks { - err := callback(spt.Token) - if err != nil { - return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) - } - } - } - return nil -} - -// Refresh obtains a fresh token for the Service Principal. -func (spt *ServicePrincipalToken) Refresh() error { - return spt.refreshInternal(spt.resource) -} - -// RefreshExchange refreshes the token, but for a different resource. -func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { - return spt.refreshInternal(resource) -} - -func (spt *ServicePrincipalToken) refreshInternal(resource string) error { - v := url.Values{} - v.Set("client_id", spt.clientID) - v.Set("resource", resource) - - if spt.RefreshToken != "" { - v.Set("grant_type", OAuthGrantTypeRefreshToken) - v.Set("refresh_token", spt.RefreshToken) - } else { - v.Set("grant_type", OAuthGrantTypeClientCredentials) - err := spt.secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - req, err := http.NewRequest(http.MethodPost, spt.oauthConfig.TokenEndpoint.String(), body) - if err != nil { - return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok { - req.Header.Set(metadataHeader, "true") - } - resp, err := spt.sender.Do(req) - if err != nil { - return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) - } - - defer resp.Body.Close() - rb, err := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != http.StatusOK { - if err != nil { - return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode) - } - return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)) - } - - if err != nil { - return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return fmt.Errorf("adal: Empty service principal token received during refresh") - } - var token Token - err = json.Unmarshal(rb, &token) - if err != nil { - return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) - } - - spt.Token = token - - return spt.InvokeRefreshCallbacks(token) -} - -// SetAutoRefresh enables or disables automatic refreshing of stale tokens. -func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { - spt.autoRefresh = autoRefresh -} - -// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will -// refresh the token. -func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { - spt.refreshWithin = d - return -} - -// SetSender sets the http.Client used when obtaining the Service Principal token. An -// undecorated http.Client is used by default. -func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go deleted file mode 100644 index 71e3ced2d6a..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ /dev/null @@ -1,181 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/Azure/go-autorest/autorest/adal" -) - -const ( - bearerChallengeHeader = "Www-Authenticate" - bearer = "Bearer" - tenantID = "tenantID" -) - -// Authorizer is the interface that provides a PrepareDecorator used to supply request -// authorization. Most often, the Authorizer decorator runs last so it has access to the full -// state of the formed HTTP request. -type Authorizer interface { - WithAuthorization() PrepareDecorator -} - -// NullAuthorizer implements a default, "do nothing" Authorizer. -type NullAuthorizer struct{} - -// WithAuthorization returns a PrepareDecorator that does nothing. -func (na NullAuthorizer) WithAuthorization() PrepareDecorator { - return WithNothing() -} - -// BearerAuthorizer implements the bearer authorization -type BearerAuthorizer struct { - tokenProvider adal.OAuthTokenProvider -} - -// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider -func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { - return &BearerAuthorizer{tokenProvider: tp} -} - -func (ba *BearerAuthorizer) withBearerAuthorization() PrepareDecorator { - return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken())) -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the token. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - refresher, ok := ba.tokenProvider.(adal.Refresher) - if ok { - err := refresher.EnsureFresh() - if err != nil { - return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", nil, - "Failed to refresh the Token for request to %s", r.URL) - } - } - return (ba.withBearerAuthorization()(p)).Prepare(r) - }) - } -} - -// BearerAuthorizerCallbackFunc is the authentication callback signature. -type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) - -// BearerAuthorizerCallback implements bearer authorization via a callback. -type BearerAuthorizerCallback struct { - sender Sender - callback BearerAuthorizerCallbackFunc -} - -// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback -// is invoked when the HTTP request is submitted. -func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { - if sender == nil { - sender = &http.Client{} - } - return &BearerAuthorizerCallback{sender: sender, callback: callback} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value -// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - // make a copy of the request and remove the body as it's not - // required and avoids us having to create a copy of it. - rCopy := *r - removeRequestBody(&rCopy) - - resp, err := bacb.sender.Do(&rCopy) - if err == nil && resp.StatusCode == 401 { - defer resp.Body.Close() - if hasBearerChallenge(resp) { - bc, err := newBearerChallenge(resp) - if err != nil { - return r, err - } - if bacb.callback != nil { - ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) - if err != nil { - return r, err - } - return ba.WithAuthorization()(p).Prepare(r) - } - } - } - return r, err - }) - } -} - -// returns true if the HTTP response contains a bearer challenge -func hasBearerChallenge(resp *http.Response) bool { - authHeader := resp.Header.Get(bearerChallengeHeader) - if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { - return false - } - return true -} - -type bearerChallenge struct { - values map[string]string -} - -func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { - challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) - trimmedChallenge := challenge[len(bearer)+1:] - - // challenge is a set of key=value pairs that are comma delimited - pairs := strings.Split(trimmedChallenge, ",") - if len(pairs) < 1 { - err = fmt.Errorf("challenge '%s' contains no pairs", challenge) - return bc, err - } - - bc.values = make(map[string]string) - for i := range pairs { - trimmedPair := strings.TrimSpace(pairs[i]) - pair := strings.Split(trimmedPair, "=") - if len(pair) == 2 { - // remove the enclosing quotes - key := strings.Trim(pair[0], "\"") - value := strings.Trim(pair[1], "\"") - - switch key { - case "authorization", "authorization_uri": - // strip the tenant ID from the authorization URL - asURL, err := url.Parse(value) - if err != nil { - return bc, err - } - bc.values[tenantID] = asURL.Path[1:] - default: - bc.values[key] = value - } - } - } - - return bc, err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go deleted file mode 100644 index 37b907c77f5..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ /dev/null @@ -1,129 +0,0 @@ -/* -Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines -and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) -generated Go code. - -The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, -and Responding. A typical pattern is: - - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) - - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) - - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) - -Each phase relies on decorators to modify and / or manage processing. Decorators may first modify -and then pass the data along, pass the data first and then modify the result, or wrap themselves -around passing the data (such as a logger might do). Decorators run in the order provided. For -example, the following: - - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) - -will set the URL to: - - https://microsoft.com/a/b/c - -Preparers and Responders may be shared and re-used (assuming the underlying decorators support -sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders -shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, -all bound together by means of input / output channels. - -Decorators hold their passed state within a closure (such as the path components in the example -above). Be careful to share Preparers and Responders only in a context where such held state -applies. For example, it may not make sense to share a Preparer that applies a query string from a -fixed set of values. Similarly, sharing a Responder that reads the response body into a passed -struct (e.g., ByUnmarshallingJson) is likely incorrect. - -Lastly, the Swagger specification (https://swagger.io) that drives AutoRest -(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The -github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure -correct parsing and formatting. - -Errors raised by autorest objects and methods will conform to the autorest.Error interface. - -See the included examples for more detail. For details on the suggested use of this package by -generated clients, see the Client described below. -*/ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "net/http" - "time" -) - -const ( - // HeaderLocation specifies the HTTP Location header. - HeaderLocation = "Location" - - // HeaderRetryAfter specifies the HTTP Retry-After header. - HeaderRetryAfter = "Retry-After" -) - -// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set -// and false otherwise. -func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { - return containsInt(codes, resp.StatusCode) -} - -// GetLocation retrieves the URL from the Location header of the passed response. -func GetLocation(resp *http.Response) string { - return resp.Header.Get(HeaderLocation) -} - -// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If -// the header is absent or is malformed, it will return the supplied default delay time.Duration. -func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { - retry := resp.Header.Get(HeaderRetryAfter) - if retry == "" { - return defaultDelay - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - return defaultDelay - } - - return d -} - -// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. -func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare(&http.Request{Cancel: cancel}, - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go deleted file mode 100644 index ffbc8da28e5..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ /dev/null @@ -1,316 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/date" -) - -const ( - headerAsyncOperation = "Azure-AsyncOperation" -) - -const ( - operationInProgress string = "InProgress" - operationCanceled string = "Canceled" - operationFailed string = "Failed" - operationSucceeded string = "Succeeded" -) - -// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure -// long-running operation. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by -// closing the optional channel on the http.Request. -func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - resp, err = s.Do(r) - if err != nil { - return resp, err - } - pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK} - if !autorest.ResponseHasStatusCode(resp, pollingCodes...) { - return resp, nil - } - - ps := pollingState{} - for err == nil { - err = updatePollingState(resp, &ps) - if err != nil { - break - } - if ps.hasTerminated() { - if !ps.hasSucceeded() { - err = ps - } - break - } - - r, err = newPollingRequest(resp, ps) - if err != nil { - return resp, err - } - - delay = autorest.GetRetryAfter(resp, delay) - resp, err = autorest.SendWithSender(s, r, - autorest.AfterDelay(delay)) - } - - return resp, err - }) - } -} - -func getAsyncOperation(resp *http.Response) string { - return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) -} - -func hasSucceeded(state string) bool { - return state == operationSucceeded -} - -func hasTerminated(state string) bool { - switch state { - case operationCanceled, operationFailed, operationSucceeded: - return true - default: - return false - } -} - -func hasFailed(state string) bool { - return state == operationFailed -} - -type provisioningTracker interface { - state() string - hasSucceeded() bool - hasTerminated() bool -} - -type operationResource struct { - // Note: - // The specification states services should return the "id" field. However some return it as - // "operationId". - ID string `json:"id"` - OperationID string `json:"operationId"` - Name string `json:"name"` - Status string `json:"status"` - Properties map[string]interface{} `json:"properties"` - OperationError ServiceError `json:"error"` - StartTime date.Time `json:"startTime"` - EndTime date.Time `json:"endTime"` - PercentComplete float64 `json:"percentComplete"` -} - -func (or operationResource) state() string { - return or.Status -} - -func (or operationResource) hasSucceeded() bool { - return hasSucceeded(or.state()) -} - -func (or operationResource) hasTerminated() bool { - return hasTerminated(or.state()) -} - -type provisioningProperties struct { - ProvisioningState string `json:"provisioningState"` -} - -type provisioningStatus struct { - Properties provisioningProperties `json:"properties,omitempty"` - ProvisioningError ServiceError `json:"error,omitempty"` -} - -func (ps provisioningStatus) state() string { - return ps.Properties.ProvisioningState -} - -func (ps provisioningStatus) hasSucceeded() bool { - return hasSucceeded(ps.state()) -} - -func (ps provisioningStatus) hasTerminated() bool { - return hasTerminated(ps.state()) -} - -func (ps provisioningStatus) hasProvisioningError() bool { - return ps.ProvisioningError != ServiceError{} -} - -type pollingResponseFormat string - -const ( - usesOperationResponse pollingResponseFormat = "OperationResponse" - usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus" - formatIsUnknown pollingResponseFormat = "" -) - -type pollingState struct { - responseFormat pollingResponseFormat - uri string - state string - code string - message string -} - -func (ps pollingState) hasSucceeded() bool { - return hasSucceeded(ps.state) -} - -func (ps pollingState) hasTerminated() bool { - return hasTerminated(ps.state) -} - -func (ps pollingState) hasFailed() bool { - return hasFailed(ps.state) -} - -func (ps pollingState) Error() string { - return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message) -} - -// updatePollingState maps the operation status -- retrieved from either a provisioningState -// field, the status field of an OperationResource, or inferred from the HTTP status code -- -// into a well-known states. Since the process begins from the initial request, the state -// always comes from either a the provisioningState returned or is inferred from the HTTP -// status code. Subsequent requests will read an Azure OperationResource object if the -// service initially returned the Azure-AsyncOperation header. The responseFormat field notes -// the expected response format. -func updatePollingState(resp *http.Response, ps *pollingState) error { - // Determine the response shape - // -- The first response will always be a provisioningStatus response; only the polling requests, - // depending on the header returned, may be something otherwise. - var pt provisioningTracker - if ps.responseFormat == usesOperationResponse { - pt = &operationResource{} - } else { - pt = &provisioningStatus{} - } - - // If this is the first request (that is, the polling response shape is unknown), determine how - // to poll and what to expect - if ps.responseFormat == formatIsUnknown { - req := resp.Request - if req == nil { - return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing") - } - - // Prefer the Azure-AsyncOperation header - ps.uri = getAsyncOperation(resp) - if ps.uri != "" { - ps.responseFormat = usesOperationResponse - } else { - ps.responseFormat = usesProvisioningStatus - } - - // Else, use the Location header - if ps.uri == "" { - ps.uri = autorest.GetLocation(resp) - } - - // Lastly, requests against an existing resource, use the last request URI - if ps.uri == "" { - m := strings.ToUpper(req.Method) - if m == http.MethodPatch || m == http.MethodPut || m == http.MethodGet { - ps.uri = req.URL.String() - } - } - } - - // Read and interpret the response (saving the Body in case no polling is necessary) - b := &bytes.Buffer{} - err := autorest.Respond(resp, - autorest.ByCopying(b), - autorest.ByUnmarshallingJSON(pt), - autorest.ByClosing()) - resp.Body = ioutil.NopCloser(b) - if err != nil { - return err - } - - // Interpret the results - // -- Terminal states apply regardless - // -- Unknown states are per-service inprogress states - // -- Otherwise, infer state from HTTP status code - if pt.hasTerminated() { - ps.state = pt.state() - } else if pt.state() != "" { - ps.state = operationInProgress - } else { - switch resp.StatusCode { - case http.StatusAccepted: - ps.state = operationInProgress - - case http.StatusNoContent, http.StatusCreated, http.StatusOK: - ps.state = operationSucceeded - - default: - ps.state = operationFailed - } - } - - if ps.state == operationInProgress && ps.uri == "" { - return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL) - } - - // For failed operation, check for error code and message in - // -- Operation resource - // -- Response - // -- Otherwise, Unknown - if ps.hasFailed() { - if ps.responseFormat == usesOperationResponse { - or := pt.(*operationResource) - ps.code = or.OperationError.Code - ps.message = or.OperationError.Message - } else { - p := pt.(*provisioningStatus) - if p.hasProvisioningError() { - ps.code = p.ProvisioningError.Code - ps.message = p.ProvisioningError.Message - } else { - ps.code = "Unknown" - ps.message = "None" - } - } - } - return nil -} - -func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) { - req := resp.Request - if req == nil { - return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing") - } - - reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel}, - autorest.AsGet(), - autorest.WithBaseURL(ps.uri)) - if err != nil { - return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri) - } - - return reqPoll, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go deleted file mode 100644 index fa18356476b..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Package azure provides Azure-specific implementations used with AutoRest. - -See the included examples for more detail. -*/ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strconv" - - "github.com/Azure/go-autorest/autorest" -) - -const ( - // HeaderClientID is the Azure extension header to set a user-specified request ID. - HeaderClientID = "x-ms-client-request-id" - - // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID - // should be included in the response. - HeaderReturnClientID = "x-ms-return-client-request-id" - - // HeaderRequestID is the Azure extension header of the service generated request ID returned - // in the response. - HeaderRequestID = "x-ms-request-id" -) - -// ServiceError encapsulates the error response from an Azure service. -type ServiceError struct { - Code string `json:"code"` - Message string `json:"message"` - Details *[]interface{} `json:"details"` -} - -func (se ServiceError) Error() string { - if se.Details != nil { - d, err := json.Marshal(*(se.Details)) - if err != nil { - return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details) - } - return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d)) - } - return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) -} - -// RequestError describes an error response returned by Azure service. -type RequestError struct { - autorest.DetailedError - - // The error returned by the Azure service. - ServiceError *ServiceError `json:"error"` - - // The request id (from the x-ms-request-id-header) of the request. - RequestID string -} - -// Error returns a human-friendly error message from service error. -func (e RequestError) Error() string { - return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", - e.StatusCode, e.ServiceError) -} - -// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. -func IsAzureError(e error) bool { - _, ok := e.(*RequestError) - return ok -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { - if v, ok := original.(*RequestError); ok { - return *v - } - - statusCode := autorest.UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - return RequestError{ - DetailedError: autorest.DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - }, - } -} - -// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id -// header to true such that UUID accompanies the http.Response. -func WithReturningClientID(uuid string) autorest.PrepareDecorator { - preparer := autorest.CreatePreparer( - WithClientID(uuid), - WithReturnClientID(true)) - - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - return preparer.Prepare(r) - }) - } -} - -// WithClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). -func WithClientID(uuid string) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderClientID, uuid) -} - -// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-return-client-request-id whose boolean value indicates if the value of the -// x-ms-client-request-id header should be included in the http.Response. -func WithReturnClientID(b bool) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) -} - -// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the -// http.Request sent to the service (and returned in the http.Response) -func ExtractClientID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderClientID, resp) -} - -// ExtractRequestID extracts the Azure server generated request identifier from the -// x-ms-request-id header. -func ExtractRequestID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderRequestID, resp) -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an -// azure.RequestError by reading the response body unless the response HTTP status code -// is among the set passed. -// -// If there is a chance service may return responses other than the Azure error -// format and the response cannot be parsed into an error, a decoding error will -// be returned containing the response body. In any case, the Responder will -// return an error if the status code is not satisfied. -// -// If this Responder returns an error, the response body will be replaced with -// an in-memory reader, which needs no further closing. -func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { - return func(r autorest.Responder) autorest.Responder { - return autorest.ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { - var e RequestError - defer resp.Body.Close() - - // Copy and replace the Body in case it does not contain an error object. - // This will leave the Body available to the caller. - b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) - resp.Body = ioutil.NopCloser(&b) - if decodeErr != nil { - return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) - } else if e.ServiceError == nil { - // Check if error is unwrapped ServiceError - if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil || e.ServiceError.Message == "" { - e.ServiceError = &ServiceError{ - Code: "Unknown", - Message: "Unknown service error", - } - } - } - - e.RequestID = ExtractRequestID(resp) - if e.StatusCode == nil { - e.StatusCode = resp.StatusCode - } - err = &e - } - return err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go deleted file mode 100644 index 30c4351a576..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ /dev/null @@ -1,144 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "strings" -) - -var environments = map[string]Environment{ - "AZURECHINACLOUD": ChinaCloud, - "AZUREGERMANCLOUD": GermanCloud, - "AZUREPUBLICCLOUD": PublicCloud, - "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, -} - -// Environment represents a set of endpoints for each of Azure's Clouds. -type Environment struct { - Name string `json:"name"` - ManagementPortalURL string `json:"managementPortalURL"` - PublishSettingsURL string `json:"publishSettingsURL"` - ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` - ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` - ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` - GalleryEndpoint string `json:"galleryEndpoint"` - KeyVaultEndpoint string `json:"keyVaultEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - StorageEndpointSuffix string `json:"storageEndpointSuffix"` - SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` - TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` - KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` - ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` - ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` - ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` - ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` -} - -var ( - // PublicCloud is the default public Azure cloud environment - PublicCloud = Environment{ - Name: "AzurePublicCloud", - ManagementPortalURL: "https://manage.windowsazure.com/", - PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.windows.net/", - ResourceManagerEndpoint: "https://management.azure.com/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", - GalleryEndpoint: "https://gallery.azure.com/", - KeyVaultEndpoint: "https://vault.azure.net/", - GraphEndpoint: "https://graph.windows.net/", - StorageEndpointSuffix: "core.windows.net", - SQLDatabaseDNSSuffix: "database.windows.net", - TrafficManagerDNSSuffix: "trafficmanager.net", - KeyVaultDNSSuffix: "vault.azure.net", - ServiceBusEndpointSuffix: "servicebus.azure.com", - ServiceManagementVMDNSSuffix: "cloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.azure.com", - ContainerRegistryDNSSuffix: "azurecr.io", - } - - // USGovernmentCloud is the cloud environment for the US Government - USGovernmentCloud = Environment{ - Name: "AzureUSGovernmentCloud", - ManagementPortalURL: "https://manage.windowsazure.us/", - PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", - ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", - GalleryEndpoint: "https://gallery.usgovcloudapi.net/", - KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", - GraphEndpoint: "https://graph.usgovcloudapi.net/", - StorageEndpointSuffix: "core.usgovcloudapi.net", - SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", - TrafficManagerDNSSuffix: "usgovtrafficmanager.net", - KeyVaultDNSSuffix: "vault.usgovcloudapi.net", - ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", - ServiceManagementVMDNSSuffix: "usgovcloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", - ContainerRegistryDNSSuffix: "azurecr.io", - } - - // ChinaCloud is the cloud environment operated in China - ChinaCloud = Environment{ - Name: "AzureChinaCloud", - ManagementPortalURL: "https://manage.chinacloudapi.com/", - PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", - ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", - ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", - GalleryEndpoint: "https://gallery.chinacloudapi.cn/", - KeyVaultEndpoint: "https://vault.azure.cn/", - GraphEndpoint: "https://graph.chinacloudapi.cn/", - StorageEndpointSuffix: "core.chinacloudapi.cn", - SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", - TrafficManagerDNSSuffix: "trafficmanager.cn", - KeyVaultDNSSuffix: "vault.azure.cn", - ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net", - ServiceManagementVMDNSSuffix: "chinacloudapp.cn", - ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", - ContainerRegistryDNSSuffix: "azurecr.io", - } - - // GermanCloud is the cloud environment operated in Germany - GermanCloud = Environment{ - Name: "AzureGermanCloud", - ManagementPortalURL: "http://portal.microsoftazure.de/", - PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.cloudapi.de/", - ResourceManagerEndpoint: "https://management.microsoftazure.de/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", - GalleryEndpoint: "https://gallery.cloudapi.de/", - KeyVaultEndpoint: "https://vault.microsoftazure.de/", - GraphEndpoint: "https://graph.cloudapi.de/", - StorageEndpointSuffix: "core.cloudapi.de", - SQLDatabaseDNSSuffix: "database.cloudapi.de", - TrafficManagerDNSSuffix: "azuretrafficmanager.de", - KeyVaultDNSSuffix: "vault.microsoftazure.de", - ServiceBusEndpointSuffix: "servicebus.cloudapi.de", - ServiceManagementVMDNSSuffix: "azurecloudapp.de", - ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", - ContainerRegistryDNSSuffix: "azurecr.io", - } -) - -// EnvironmentFromName returns an Environment based on the common name specified -func EnvironmentFromName(name string) (Environment, error) { - name = strings.ToUpper(name) - env, ok := environments[name] - if !ok { - return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) - } - return env, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go deleted file mode 100644 index 6036d069a12..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package azure - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" -) - -// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. -// It also handles request retries -func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := autorest.NewRetriableRequest(r) - for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - - resp, err = autorest.SendWithSender(s, rr.Request(), - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return resp, err - } - - if resp.StatusCode != http.StatusConflict { - return resp, err - } - var re RequestError - err = autorest.Respond( - resp, - autorest.ByUnmarshallingJSON(&re), - ) - if err != nil { - return resp, err - } - - if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { - err = register(client, r, re) - if err != nil { - return resp, fmt.Errorf("failed auto registering Resource Provider: %s", err) - } - } - } - return resp, errors.New("failed request and resource provider registration") - }) - } -} - -func getProvider(re RequestError) (string, error) { - if re.ServiceError != nil { - if re.ServiceError.Details != nil && len(*re.ServiceError.Details) > 0 { - detail := (*re.ServiceError.Details)[0].(map[string]interface{}) - return detail["target"].(string), nil - } - } - return "", errors.New("provider was not found in the response") -} - -func register(client autorest.Client, originalReq *http.Request, re RequestError) error { - subID := getSubscription(originalReq.URL.Path) - if subID == "" { - return errors.New("missing parameter subscriptionID to register resource provider") - } - providerName, err := getProvider(re) - if err != nil { - return fmt.Errorf("missing parameter provider to register resource provider: %s", err) - } - newURL := url.URL{ - Scheme: originalReq.URL.Scheme, - Host: originalReq.URL.Host, - } - - // taken from the resources SDK - // with almost identical code, this sections are easier to mantain - // It is also not a good idea to import the SDK here - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": autorest.Encode("path", providerName), - "subscriptionId": autorest.Encode("path", subID), - } - - const APIVersion = "2016-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - - req, err := preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req.Cancel = originalReq.Cancel - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - type Provider struct { - RegistrationState *string `json:"registrationState,omitempty"` - } - var provider Provider - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - // poll for registered provisioning state - now := time.Now() - for err == nil && time.Since(now) < client.PollingDuration { - // taken from the resources SDK - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - req, err = preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req.Cancel = originalReq.Cancel - - resp, err := autorest.SendWithSender(client.Sender, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - if provider.RegistrationState != nil && - *provider.RegistrationState == "Registered" { - break - } - - delayed := autorest.DelayWithRetryAfter(resp, originalReq.Cancel) - if !delayed { - autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Cancel) - } - } - if !(time.Since(now) < client.PollingDuration) { - return errors.New("polling for resource provider registration has exceeded the polling duration") - } - return err -} - -func getSubscription(path string) string { - parts := strings.Split(path, "/") - for i, v := range parts { - if v == "subscriptions" && (i+1) < len(parts) { - return parts[i+1] - } - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go deleted file mode 100644 index ce7a605f896..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ /dev/null @@ -1,251 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "net/http/cookiejar" - "runtime" - "time" -) - -const ( - // DefaultPollingDelay is a reasonable delay between polling requests. - DefaultPollingDelay = 60 * time.Second - - // DefaultPollingDuration is a reasonable total polling duration. - DefaultPollingDuration = 15 * time.Minute - - // DefaultRetryAttempts is number of attempts for retry status codes (5xx). - DefaultRetryAttempts = 3 -) - -var ( - // defaultUserAgent builds a string containing the Go version, system archityecture and OS, - // and the go-autorest version. - defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - Version(), - ) - - // StatusCodesForRetry are a defined group of status code for which the client will retry - StatusCodesForRetry = []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } -) - -const ( - requestFormat = `HTTP Request Begin =================================================== -%s -===================================================== HTTP Request End -` - responseFormat = `HTTP Response Begin =================================================== -%s -===================================================== HTTP Response End -` -) - -// Response serves as the base for all responses from generated clients. It provides access to the -// last http.Response. -type Response struct { - *http.Response `json:"-"` -} - -// LoggingInspector implements request and response inspectors that log the full request and -// response to a supplied log. -type LoggingInspector struct { - Logger *log.Logger -} - -// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) WithInspection() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - var body, b bytes.Buffer - - defer r.Body.Close() - - r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) - if err := r.Write(&b); err != nil { - return nil, fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(requestFormat, b.String()) - - r.Body = ioutil.NopCloser(&body) - return p.Prepare(r) - }) - } -} - -// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) ByInspecting() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - var body, b bytes.Buffer - defer resp.Body.Close() - resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) - if err := resp.Write(&b); err != nil { - return fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(responseFormat, b.String()) - - resp.Body = ioutil.NopCloser(&body) - return r.Respond(resp) - }) - } -} - -// Client is the base for autorest generated clients. It provides default, "do nothing" -// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the -// standard, undecorated http.Client as a default Sender. -// -// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and -// return responses that compose with Response. -// -// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom -// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit -// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence -// sending the request by providing a decorated Sender. -type Client struct { - Authorizer Authorizer - Sender Sender - RequestInspector PrepareDecorator - ResponseInspector RespondDecorator - - // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header - PollingDelay time.Duration - - // PollingDuration sets the maximum polling time after which an error is returned. - PollingDuration time.Duration - - // RetryAttempts sets the default number of retry attempts for client. - RetryAttempts int - - // RetryDuration sets the delay duration for retries. - RetryDuration time.Duration - - // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent - // through the Do method. - UserAgent string - - Jar http.CookieJar -} - -// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed -// string. -func NewClientWithUserAgent(ua string) Client { - c := Client{ - PollingDelay: DefaultPollingDelay, - PollingDuration: DefaultPollingDuration, - RetryAttempts: DefaultRetryAttempts, - RetryDuration: 30 * time.Second, - UserAgent: defaultUserAgent, - } - c.Sender = c.sender() - c.AddToUserAgent(ua) - return c -} - -// AddToUserAgent adds an extension to the current user agent -func (c *Client) AddToUserAgent(extension string) error { - if extension != "" { - c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) -} - -// Do implements the Sender interface by invoking the active Sender after applying authorization. -// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent -// is set, apply set the User-Agent header. -func (c Client) Do(r *http.Request) (*http.Response, error) { - if r.UserAgent() == "" { - r, _ = Prepare(r, - WithUserAgent(c.UserAgent)) - } - r, err := Prepare(r, - c.WithInspection(), - c.WithAuthorization()) - if err != nil { - return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") - } - - resp, err := SendWithSender(c.sender(), r) - Respond(resp, c.ByInspecting()) - return resp, err -} - -// sender returns the Sender to which to send requests. -func (c Client) sender() Sender { - if c.Sender == nil { - j, _ := cookiejar.New(nil) - return &http.Client{Jar: j} - } - return c.Sender -} - -// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator -// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. -func (c Client) WithAuthorization() PrepareDecorator { - return c.authorizer().WithAuthorization() -} - -// authorizer returns the Authorizer to use. -func (c Client) authorizer() Authorizer { - if c.Authorizer == nil { - return NullAuthorizer{} - } - return c.Authorizer -} - -// WithInspection is a convenience method that passes the request to the supplied RequestInspector, -// if present, or returns the WithNothing PrepareDecorator otherwise. -func (c Client) WithInspection() PrepareDecorator { - if c.RequestInspector == nil { - return WithNothing() - } - return c.RequestInspector -} - -// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, -// if present, or returns the ByIgnoring RespondDecorator otherwise. -func (c Client) ByInspecting() RespondDecorator { - if c.ResponseInspector == nil { - return ByIgnoring() - } - return c.ResponseInspector -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go deleted file mode 100644 index c4571065685..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/date.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) -defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of -time.Time types. And both convert to time.Time through a ToTime method. -*/ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "time" -) - -const ( - fullDate = "2006-01-02" - fullDateJSON = `"2006-01-02"` - dateFormat = "%04d-%02d-%02d" - jsonFormat = `"%04d-%02d-%02d"` -) - -// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., -// 2006-01-02). -type Date struct { - time.Time -} - -// ParseDate create a new Date from the passed string. -func ParseDate(date string) (d Date, err error) { - return parseDate(date, fullDate) -} - -func parseDate(date string, format string) (Date, error) { - d, err := time.Parse(format, date) - return Date{Time: d}, err -} - -// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalBinary() ([]byte, error) { - return d.MarshalText() -} - -// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalBinary(data []byte) error { - return d.UnmarshalText(data) -} - -// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalJSON() (json []byte, err error) { - return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalJSON(data []byte) (err error) { - d.Time, err = time.Parse(fullDateJSON, string(data)) - return err -} - -// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalText() (text []byte, err error) { - return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalText(data []byte) (err error) { - d.Time, err = time.Parse(fullDate, string(data)) - return err -} - -// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). -func (d Date) String() string { - return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) -} - -// ToTime returns a Date as a time.Time -func (d Date) ToTime() time.Time { - return d.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go deleted file mode 100644 index b453fad0491..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/time.go +++ /dev/null @@ -1,103 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "regexp" - "time" -) - -// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -const ( - azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` - azureUtcFormat = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` - rfc3339 = time.RFC3339Nano - tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` -) - -// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -type Time struct { - time.Time -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalBinary() ([]byte, error) { - return t.Time.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalJSON() (json []byte, err error) { - return t.Time.MarshalJSON() -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalJSON(data []byte) (err error) { - timeFormat := azureUtcFormatJSON - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339JSON - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalText() (text []byte, err error) { - return t.Time.MarshalText() -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalText(data []byte) (err error) { - timeFormat := azureUtcFormat - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339 - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// String returns the Time formatted as an RFC3339 date-time string (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) String() string { - // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} - -// ToTime returns a Time as a time.Time -func (t Time) ToTime() time.Time { - return t.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go deleted file mode 100644 index 48fb39ba9b9..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go +++ /dev/null @@ -1,100 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "time" -) - -const ( - rfc1123JSON = `"` + time.RFC1123 + `"` - rfc1123 = time.RFC1123 -) - -// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -type TimeRFC1123 struct { - time.Time -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123JSON, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalJSON() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") - } - b := []byte(t.Format(rfc1123JSON)) - return b, nil -} - -// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalText() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") - } - - b := []byte(t.Format(rfc1123)) - return b, nil -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalBinary() ([]byte, error) { - return t.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// ToTime returns a Time as a time.Time -func (t TimeRFC1123) ToTime() time.Time { - return t.Time -} - -// String returns the Time formatted as an RFC1123 date-time string (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) String() string { - // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go deleted file mode 100644 index 7073959b2a9..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go +++ /dev/null @@ -1,123 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "time" -) - -// unixEpoch is the moment in time that should be treated as timestamp 0. -var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) - -// UnixTime marshals and unmarshals a time that is represented as the number -// of seconds (ignoring skip-seconds) since the Unix Epoch. -type UnixTime time.Time - -// Duration returns the time as a Duration since the UnixEpoch. -func (t UnixTime) Duration() time.Duration { - return time.Time(t).Sub(unixEpoch) -} - -// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. -func NewUnixTimeFromSeconds(seconds float64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) -} - -// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. -func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(nanoseconds)) -} - -// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. -func NewUnixTimeFromDuration(dur time.Duration) UnixTime { - return UnixTime(unixEpoch.Add(dur)) -} - -// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' -func UnixEpoch() time.Time { - return unixEpoch -} - -// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. -// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) -func (t UnixTime) MarshalJSON() ([]byte, error) { - buffer := &bytes.Buffer{} - enc := json.NewEncoder(buffer) - err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) - if err != nil { - return nil, err - } - return buffer.Bytes(), nil -} - -// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since -// midnight January 1st, 1970. -func (t *UnixTime) UnmarshalJSON(text []byte) error { - dec := json.NewDecoder(bytes.NewReader(text)) - - var secondsSinceEpoch float64 - if err := dec.Decode(&secondsSinceEpoch); err != nil { - return err - } - - *t = NewUnixTimeFromSeconds(secondsSinceEpoch) - - return nil -} - -// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. -func (t UnixTime) MarshalText() ([]byte, error) { - cast := time.Time(t) - return cast.MarshalText() -} - -// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. -func (t *UnixTime) UnmarshalText(raw []byte) error { - var unmarshaled time.Time - - if err := unmarshaled.UnmarshalText(raw); err != nil { - return err - } - - *t = UnixTime(unmarshaled) - return nil -} - -// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. -func (t UnixTime) MarshalBinary() ([]byte, error) { - buf := &bytes.Buffer{} - - payload := int64(t.Duration()) - - if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. -func (t *UnixTime) UnmarshalBinary(raw []byte) error { - var nanosecondsSinceEpoch int64 - - if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { - return err - } - *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go deleted file mode 100644 index 12addf0ebb4..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go +++ /dev/null @@ -1,25 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "strings" - "time" -) - -// ParseTime to parse Time string to specified format. -func ParseTime(format string, t string) (d time.Time, err error) { - return time.Parse(format, strings.ToUpper(t)) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go deleted file mode 100644 index f724f33327e..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/error.go +++ /dev/null @@ -1,98 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" -) - -const ( - // UndefinedStatusCode is used when HTTP status code is not available for an error. - UndefinedStatusCode = 0 -) - -// DetailedError encloses a error with details of the package, method, and associated HTTP -// status code (if any). -type DetailedError struct { - Original error - - // PackageType is the package type of the object emitting the error. For types, the value - // matches that produced the the '%T' format specifier of the fmt package. For other elements, - // such as functions, it is just the package name (e.g., "autorest"). - PackageType string - - // Method is the name of the method raising the error. - Method string - - // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. - StatusCode interface{} - - // Message is the error message. - Message string - - // Service Error is the response body of failed API in bytes - ServiceError []byte - - // Response is the response object that was returned during failure if applicable. - Response *http.Response -} - -// NewError creates a new Error conforming object from the passed packageType, method, and -// message. message is treated as a format string to which the optional args apply. -func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, nil, message, args...) -} - -// NewErrorWithResponse creates a new Error conforming object from the passed -// packageType, method, statusCode of the given resp (UndefinedStatusCode if -// resp is nil), and message. message is treated as a format string to which the -// optional args apply. -func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, resp, message, args...) -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - if v, ok := original.(DetailedError); ok { - return v - } - - statusCode := UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - - return DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - Response: resp, - } -} - -// Error returns a formatted containing all available details (i.e., PackageType, Method, -// StatusCode, Message, and original error (if any)). -func (e DetailedError) Error() string { - if e.Original == nil { - return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) - } - return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go deleted file mode 100644 index 2290c401003..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ /dev/null @@ -1,442 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/url" - "strings" -) - -const ( - mimeTypeJSON = "application/json" - mimeTypeFormPost = "application/x-www-form-urlencoded" - - headerAuthorization = "Authorization" - headerContentType = "Content-Type" - headerUserAgent = "User-Agent" -) - -// Preparer is the interface that wraps the Prepare method. -// -// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations -// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. -type Preparer interface { - Prepare(*http.Request) (*http.Request, error) -} - -// PreparerFunc is a method that implements the Preparer interface. -type PreparerFunc func(*http.Request) (*http.Request, error) - -// Prepare implements the Preparer interface on PreparerFunc. -func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { - return pf(r) -} - -// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then affect the result. -type PrepareDecorator func(Preparer) Preparer - -// CreatePreparer creates, decorates, and returns a Preparer. -// Without decorators, the returned Preparer returns the passed http.Request unmodified. -// Preparers are safe to share and re-use. -func CreatePreparer(decorators ...PrepareDecorator) Preparer { - return DecoratePreparer( - Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), - decorators...) -} - -// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it -// applies to the Preparer. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (change the http.Request and then pass it -// along) or a post-decorator (pass the http.Request along and alter it on return). -func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { - for _, decorate := range decorators { - p = decorate(p) - } - return p -} - -// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. -// It creates a Preparer from the decorators which it then applies to the passed http.Request. -func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { - if r == nil { - return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") - } - return CreatePreparer(decorators...).Prepare(r) -} - -// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed -// http.Request. -func WithNothing() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - return p.Prepare(r) - }) - } -} - -// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to -// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before -// adding the header. -func WithHeader(header string, value string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(header), value) - } - return r, err - }) - } -} - -// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the supplied token. -func WithBearerAuthorization(token string) PrepareDecorator { - return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) -} - -// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value -// is the passed contentType. -func AsContentType(contentType string) PrepareDecorator { - return WithHeader(headerContentType, contentType) -} - -// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the -// passed string. -func WithUserAgent(ua string) PrepareDecorator { - return WithHeader(headerUserAgent, ua) -} - -// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/x-www-form-urlencoded". -func AsFormURLEncoded() PrepareDecorator { - return AsContentType(mimeTypeFormPost) -} - -// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/json". -func AsJSON() PrepareDecorator { - return AsContentType(mimeTypeJSON) -} - -// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The -// decorator does not validate that the passed method string is a known HTTP method. -func WithMethod(method string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r.Method = method - return p.Prepare(r) - }) - } -} - -// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. -func AsDelete() PrepareDecorator { return WithMethod("DELETE") } - -// AsGet returns a PrepareDecorator that sets the HTTP method to GET. -func AsGet() PrepareDecorator { return WithMethod("GET") } - -// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. -func AsHead() PrepareDecorator { return WithMethod("HEAD") } - -// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. -func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } - -// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. -func AsPatch() PrepareDecorator { return WithMethod("PATCH") } - -// AsPost returns a PrepareDecorator that sets the HTTP method to POST. -func AsPost() PrepareDecorator { return WithMethod("POST") } - -// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. -func AsPut() PrepareDecorator { return WithMethod("PUT") } - -// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed -// from the supplied baseUrl. -func WithBaseURL(baseURL string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var u *url.URL - if u, err = url.Parse(baseURL); err != nil { - return r, err - } - if u.Scheme == "" { - err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) - } - if err == nil { - r.URL = u - } - } - return r, err - }) - } -} - -// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the -// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. -func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(urlParameters) - for key, value := range parameters { - baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) - } - return WithBaseURL(baseURL) -} - -// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the -// http.Request body. -func WithFormData(v url.Values) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - s := v.Encode() - r.ContentLength = int64(len(s)) - r.Body = ioutil.NopCloser(strings.NewReader(s)) - } - return r, err - }) - } -} - -// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters -// into the http.Request body. -func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var body bytes.Buffer - writer := multipart.NewWriter(&body) - for key, value := range formDataParameters { - if rc, ok := value.(io.ReadCloser); ok { - var fd io.Writer - if fd, err = writer.CreateFormFile(key, key); err != nil { - return r, err - } - if _, err = io.Copy(fd, rc); err != nil { - return r, err - } - } else { - if err = writer.WriteField(key, ensureValueString(value)); err != nil { - return r, err - } - } - } - if err = writer.Close(); err != nil { - return r, err - } - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) - r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) - r.ContentLength = int64(body.Len()) - return r, err - } - return r, err - }) - } -} - -// WithFile returns a PrepareDecorator that sends file in request body. -func WithFile(f io.ReadCloser) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := ioutil.ReadAll(f) - if err != nil { - return r, err - } - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - r.ContentLength = int64(len(b)) - } - return r, err - }) - } -} - -// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request -// and sets the Content-Length header. -func WithBool(v bool) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the -// request and sets the Content-Length header. -func WithFloat32(v float32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the -// request and sets the Content-Length header. -func WithFloat64(v float64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request -// and sets the Content-Length header. -func WithInt32(v int32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request -// and sets the Content-Length header. -func WithInt64(v int64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithString returns a PrepareDecorator that encodes the passed string into the body of the request -// and sets the Content-Length header. -func WithString(v string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - r.ContentLength = int64(len(v)) - r.Body = ioutil.NopCloser(strings.NewReader(v)) - } - return r, err - }) - } -} - -// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the -// request and sets the Content-Length header. -func WithJSON(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := json.Marshal(v) - if err == nil { - r.ContentLength = int64(len(b)) - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - } - return r, err - }) - } -} - -// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path -// is absolute (that is, it begins with a "/"), it replaces the existing path. -func WithPath(path string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPath", "Invoked with a nil URL") - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The -// values will be escaped (aka URL encoded) before insertion into the path. -func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := escapeValueStrings(ensureValueStrings(pathParameters)) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. -func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(pathParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -func parseURL(u *url.URL, path string) (*url.URL, error) { - p := strings.TrimRight(u.String(), "/") - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - return url.Parse(p + path) -} - -// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters -// given in the supplied map (i.e., key=value). -func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(queryParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") - } - v := r.URL.Query() - for key, value := range parameters { - v.Add(key, value) - } - r.URL.RawQuery = createQuery(v) - } - return r, err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go deleted file mode 100644 index a908a0adb70..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ /dev/null @@ -1,250 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" -) - -// Responder is the interface that wraps the Respond method. -// -// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold -// state since Responders may be shared and re-used. -type Responder interface { - Respond(*http.Response) error -} - -// ResponderFunc is a method that implements the Responder interface. -type ResponderFunc func(*http.Response) error - -// Respond implements the Responder interface on ResponderFunc. -func (rf ResponderFunc) Respond(r *http.Response) error { - return rf(r) -} - -// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to -// the http.Response and pass it along or, first, pass the http.Response along then react. -type RespondDecorator func(Responder) Responder - -// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned -// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share -// and re-used: It depends on the applied decorators. For example, a standard decorator that closes -// the response body is fine to share whereas a decorator that reads the body into a passed struct -// is not. -// -// To prevent memory leaks, ensure that at least one Responder closes the response body. -func CreateResponder(decorators ...RespondDecorator) Responder { - return DecorateResponder( - Responder(ResponderFunc(func(r *http.Response) error { return nil })), - decorators...) -} - -// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it -// applies to the Responder. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (react to the http.Response and then pass it -// along) or a post-decorator (pass the http.Response along and then react). -func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { - for _, decorate := range decorators { - r = decorate(r) - } - return r -} - -// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. -// It creates a Responder from the decorators it then applies to the passed http.Response. -func Respond(r *http.Response, decorators ...RespondDecorator) error { - if r == nil { - return nil - } - return CreateResponder(decorators...).Respond(r) -} - -// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined -// to the next RespondDecorator. -func ByIgnoring() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - return r.Respond(resp) - }) - } -} - -// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as -// the Body is read. -func ByCopying(b *bytes.Buffer) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - resp.Body = TeeReadCloser(resp.Body, b) - } - return err - }) - } -} - -// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which -// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed -// Responder is invoked prior to discarding the response body, the decorator may occur anywhere -// within the set. -func ByDiscardingBody() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { - return fmt.Errorf("Error discarding the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it -// closes the response body. Since the passed Responder is invoked prior to closing the response -// body, the decorator may occur anywhere within the set. -func ByClosing() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which -// it closes the response if the passed Responder returns an error and the response body exists. -func ByClosingIfError() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err != nil && resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingJSON(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - // Some responses might include a BOM, remove for successful unmarshalling - b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else if len(strings.Trim(string(b), " ")) > 0 { - errInner = json.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingXML(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else { - errInner = xml.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response -// StatusCode is among the set passed. On error, response body is fully read into a buffer and -// presented in the returned error, as well as in the response body. -func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - if resp.Body != nil { - defer resp.Body.Close() - b, _ := ioutil.ReadAll(resp.Body) - derr.ServiceError = b - resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - err = derr - } - return err - }) - } -} - -// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is -// anything other than HTTP 200. -func WithErrorUnlessOK() RespondDecorator { - return WithErrorUnlessStatusCode(http.StatusOK) -} - -// ExtractHeader extracts all values of the specified header from the http.Response. It returns an -// empty string slice if the passed http.Response is nil or the header does not exist. -func ExtractHeader(header string, resp *http.Response) []string { - if resp != nil && resp.Header != nil { - return resp.Header[http.CanonicalHeaderKey(header)] - } - return nil -} - -// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It -// returns an empty string if the passed http.Response is nil or the header does not exist. -func ExtractHeaderValue(header string, resp *http.Response) string { - h := ExtractHeader(header, resp) - if len(h) > 0 { - return h[0] - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go deleted file mode 100644 index fa11dbed79b..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go +++ /dev/null @@ -1,52 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. -func NewRetriableRequest(req *http.Request) *RetriableRequest { - return &RetriableRequest{req: req} -} - -// Request returns the wrapped HTTP request. -func (rr *RetriableRequest) Request() *http.Request { - return rr.req -} - -func (rr *RetriableRequest) prepareFromByteReader() (err error) { - // fall back to making a copy (only do this once) - b := []byte{} - if rr.req.ContentLength > 0 { - b = make([]byte, rr.req.ContentLength) - _, err = io.ReadFull(rr.req.Body, b) - if err != nil { - return err - } - } else { - b, err = ioutil.ReadAll(rr.req.Body) - if err != nil { - return err - } - } - rr.br = bytes.NewReader(b) - rr.req.Body = ioutil.NopCloser(rr.br) - return err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go deleted file mode 100644 index 7143cc61b58..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build !go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.br != nil { - _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.ContentLength = 0 -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go deleted file mode 100644 index ae15c6bf962..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go +++ /dev/null @@ -1,66 +0,0 @@ -// +build go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - rc io.ReadCloser - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.rc != nil { - rr.req.Body = rr.rc - } else if rr.br != nil { - _, err = rr.br.Seek(0, io.SeekStart) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.req.GetBody != nil { - // this will allow us to preserve the body without having to - // make a copy. note we need to do this on each iteration - rr.rc, err = rr.req.GetBody() - if err != nil { - return err - } - } else if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.GetBody = nil - req.ContentLength = 0 -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go deleted file mode 100644 index 7264c32f27d..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ /dev/null @@ -1,307 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "log" - "math" - "net/http" - "strconv" - "time" -) - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(&http.Client{}, decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} - -// Send sends, by means of the default http.Client, the passed http.Request, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// Send is a convenience method and not recommended for production. Advanced users should use -// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). -// -// Send will not poll or retry requests. -func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return SendWithSender(&http.Client{}, r, decorators...) -} - -// SendWithSender sends the passed http.Request, through the provided Sender, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// SendWithSender will not poll or retry requests. -func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return DecorateSender(s, decorators...).Do(r) -} - -// AfterDelay returns a SendDecorator that delays for the passed time.Duration before -// invoking the Sender. The delay may be terminated by closing the optional channel on the -// http.Request. If canceled, no further Senders are invoked. -func AfterDelay(d time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - if !DelayForBackoff(d, 0, r.Cancel) { - return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") - } - return s.Do(r) - }) - } -} - -// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. -func AsIs() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return s.Do(r) - }) - } -} - -// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which -// it closes the response if the passed Sender returns an error and the response body exists. -func DoCloseIfError() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - Respond(resp, ByDiscardingBody(), ByClosing()) - } - return resp, err - }) - } -} - -// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is -// among the set passed. Since these are artificial errors, the response body may still require -// closing. -func DoErrorIfStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response -// StatusCode is among the set passed. Since these are artificial errors, the response body -// may still require closing. -func DoErrorUnlessStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the -// passed status codes. It expects the http.Response to contain a Location header providing the -// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than -// the supplied duration. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by -// closing the optional channel on the http.Request. -func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - resp, err = s.Do(r) - - if err == nil && ResponseHasStatusCode(resp, codes...) { - r, err = NewPollingRequest(resp, r.Cancel) - - for err == nil && ResponseHasStatusCode(resp, codes...) { - Respond(resp, - ByDiscardingBody(), - ByClosing()) - resp, err = SendWithSender(s, r, - AfterDelay(GetRetryAfter(resp, delay))) - } - } - - return resp, err - }) - } -} - -// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. -func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - for attempt := 0; attempt < attempts; attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - DelayForBackoff(backoff, attempt, r.Cancel) - } - return resp, err - }) - } -} - -// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. -func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - // Increment to add the first call (attempts denotes number of retries) - attempts++ - for attempt := 0; attempt < attempts; attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - if err != nil || !ResponseHasStatusCode(resp, codes...) { - return resp, err - } - delayed := DelayWithRetryAfter(resp, r.Cancel) - if !delayed { - DelayForBackoff(backoff, attempt, r.Cancel) - } - } - return resp, err - }) - } -} - -// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in -// responses with status code 429 -func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { - retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) - if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { - select { - case <-time.After(time.Duration(retryAfter) * time.Second): - return true - case <-cancel: - return false - } - } - return false -} - -// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal -// to or greater than the specified duration, exponentially backing off between requests using the -// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the -// optional channel on the http.Request. -func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - end := time.Now().Add(d) - for attempt := 0; time.Now().Before(end); attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - DelayForBackoff(backoff, attempt, r.Cancel) - } - return resp, err - }) - } -} - -// WithLogging returns a SendDecorator that implements simple before and after logging of the -// request. -func WithLogging(logger *log.Logger) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - logger.Printf("Sending %s %s", r.Method, r.URL) - resp, err := s.Do(r) - if err != nil { - logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) - } else { - logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) - } - return resp, err - }) - } -} - -// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set -// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, -// returns false. -// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt -// count. -func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { - select { - case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): - return true - case <-cancel: - return false - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go deleted file mode 100644 index dfdc6efdff0..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ /dev/null @@ -1,192 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "net/url" - "reflect" - "sort" - "strings" -) - -// EncodedAs is a series of constants specifying various data encodings -type EncodedAs string - -const ( - // EncodedAsJSON states that data is encoded as JSON - EncodedAsJSON EncodedAs = "JSON" - - // EncodedAsXML states that data is encoded as Xml - EncodedAsXML EncodedAs = "XML" -) - -// Decoder defines the decoding method json.Decoder and xml.Decoder share -type Decoder interface { - Decode(v interface{}) error -} - -// NewDecoder creates a new decoder appropriate to the passed encoding. -// encodedAs specifies the type of encoding and r supplies the io.Reader containing the -// encoded data. -func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { - if encodedAs == EncodedAsJSON { - return json.NewDecoder(r) - } else if encodedAs == EncodedAsXML { - return xml.NewDecoder(r) - } - return nil -} - -// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy -// is especially useful if there is a chance the data will fail to decode. -// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v -// is the decoding destination. -func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { - b := bytes.Buffer{} - return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) -} - -// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. -// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. -// Further, when it is closed, it ensures that rc is closed as well. -func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { - return &teeReadCloser{rc, io.TeeReader(rc, w)} -} - -type teeReadCloser struct { - rc io.ReadCloser - r io.Reader -} - -func (t *teeReadCloser) Read(p []byte) (int, error) { - return t.r.Read(p) -} - -func (t *teeReadCloser) Close() error { - return t.rc.Close() -} - -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true - } - } - return false -} - -func escapeValueStrings(m map[string]string) map[string]string { - for key, value := range m { - m[key] = url.QueryEscape(value) - } - return m -} - -func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { - mapOfStrings := make(map[string]string) - for key, value := range mapOfInterface { - mapOfStrings[key] = ensureValueString(value) - } - return mapOfStrings -} - -func ensureValueString(value interface{}) string { - if value == nil { - return "" - } - switch v := value.(type) { - case string: - return v - case []byte: - return string(v) - default: - return fmt.Sprintf("%v", v) - } -} - -// MapToValues method converts map[string]interface{} to url.Values. -func MapToValues(m map[string]interface{}) url.Values { - v := url.Values{} - for key, value := range m { - x := reflect.ValueOf(value) - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - for i := 0; i < x.Len(); i++ { - v.Add(key, ensureValueString(x.Index(i))) - } - } else { - v.Add(key, ensureValueString(value)) - } - } - return v -} - -// String method converts interface v to string. If interface is a list, it -// joins list elements using separator. -func String(v interface{}, sep ...string) string { - if len(sep) > 0 { - return ensureValueString(strings.Join(v.([]string), sep[0])) - } - return ensureValueString(v) -} - -// Encode method encodes url path and query parameters. -func Encode(location string, v interface{}, sep ...string) string { - s := String(v, sep...) - switch strings.ToLower(location) { - case "path": - return pathEscape(s) - case "query": - return queryEscape(s) - default: - return s - } -} - -func pathEscape(s string) string { - return strings.Replace(url.QueryEscape(s), "+", "%20", -1) -} - -func queryEscape(s string) string { - return url.QueryEscape(s) -} - -// This method is same as Encode() method of "net/url" go package, -// except it does not encode the query parameters because they -// already come encoded. It formats values map in query format (bar=foo&a=b). -func createQuery(v url.Values) string { - var buf bytes.Buffer - keys := make([]string, 0, len(v)) - for k := range v { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - vs := v[k] - prefix := url.QueryEscape(k) + "=" - for _, v := range vs { - if buf.Len() > 0 { - buf.WriteByte('&') - } - buf.WriteString(prefix) - buf.WriteString(v) - } - } - return buf.String() -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go deleted file mode 100644 index 3fe62c93056..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go +++ /dev/null @@ -1,395 +0,0 @@ -/* -Package validation provides methods for validating parameter value using reflection. -*/ -package validation - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "reflect" - "regexp" - "strings" -) - -// Constraint stores constraint name, target field name -// Rule and chain validations. -type Constraint struct { - - // Target field name for validation. - Target string - - // Constraint name e.g. minLength, MaxLength, Pattern, etc. - Name string - - // Rule for constraint e.g. greater than 10, less than 5 etc. - Rule interface{} - - // Chain Validations for struct type - Chain []Constraint -} - -// Validation stores parameter-wise validation. -type Validation struct { - TargetValue interface{} - Constraints []Constraint -} - -// Constraint list -const ( - Empty = "Empty" - Null = "Null" - ReadOnly = "ReadOnly" - Pattern = "Pattern" - MaxLength = "MaxLength" - MinLength = "MinLength" - MaxItems = "MaxItems" - MinItems = "MinItems" - MultipleOf = "MultipleOf" - UniqueItems = "UniqueItems" - InclusiveMaximum = "InclusiveMaximum" - ExclusiveMaximum = "ExclusiveMaximum" - ExclusiveMinimum = "ExclusiveMinimum" - InclusiveMinimum = "InclusiveMinimum" -) - -// Validate method validates constraints on parameter -// passed in validation array. -func Validate(m []Validation) error { - for _, item := range m { - v := reflect.ValueOf(item.TargetValue) - for _, constraint := range item.Constraints { - var err error - switch v.Kind() { - case reflect.Ptr: - err = validatePtr(v, constraint) - case reflect.String: - err = validateString(v, constraint) - case reflect.Struct: - err = validateStruct(v, constraint) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - err = validateInt(v, constraint) - case reflect.Float32, reflect.Float64: - err = validateFloat(v, constraint) - case reflect.Array, reflect.Slice, reflect.Map: - err = validateArrayMap(v, constraint) - default: - err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) - } - - if err != nil { - return err - } - } - } - return nil -} - -func validateStruct(x reflect.Value, v Constraint, name ...string) error { - //Get field name from target name which is in format a.b.c - s := strings.Split(v.Target, ".") - f := x.FieldByName(s[len(s)-1]) - if isZero(f) { - return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target)) - } - - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(f), - Constraints: []Constraint{v}, - }, - }) -} - -func validatePtr(x reflect.Value, v Constraint) error { - if v.Name == ReadOnly { - if !x.IsNil() { - return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") - } - return nil - } - if x.IsNil() { - return checkNil(x, v) - } - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x.Elem()), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func validateInt(x reflect.Value, v Constraint) error { - i := x.Int() - r, ok := v.Rule.(int) - if !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - switch v.Name { - case MultipleOf: - if i%int64(r) != 0 { - return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) - } - case ExclusiveMinimum: - if i <= int64(r) { - return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) - } - case ExclusiveMaximum: - if i >= int64(r) { - return createError(x, v, fmt.Sprintf("value must be less than %v", r)) - } - case InclusiveMinimum: - if i < int64(r) { - return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) - } - case InclusiveMaximum: - if i > int64(r) { - return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) - } - default: - return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name)) - } - return nil -} - -func validateFloat(x reflect.Value, v Constraint) error { - f := x.Float() - r, ok := v.Rule.(float64) - if !ok { - return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule)) - } - switch v.Name { - case ExclusiveMinimum: - if f <= r { - return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) - } - case ExclusiveMaximum: - if f >= r { - return createError(x, v, fmt.Sprintf("value must be less than %v", r)) - } - case InclusiveMinimum: - if f < r { - return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) - } - case InclusiveMaximum: - if f > r { - return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) - } - default: - return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name)) - } - return nil -} - -func validateString(x reflect.Value, v Constraint) error { - s := x.String() - switch v.Name { - case Empty: - if len(s) == 0 { - return checkEmpty(x, v) - } - case Pattern: - reg, err := regexp.Compile(v.Rule.(string)) - if err != nil { - return createError(x, v, err.Error()) - } - if !reg.MatchString(s) { - return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule)) - } - case MaxLength: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - if len(s) > v.Rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule)) - } - case MinLength: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - if len(s) < v.Rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule)) - } - case ReadOnly: - if len(s) > 0 { - return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") - } - default: - return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name)) - } - - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func validateArrayMap(x reflect.Value, v Constraint) error { - switch v.Name { - case Null: - if x.IsNil() { - return checkNil(x, v) - } - case Empty: - if x.IsNil() || x.Len() == 0 { - return checkEmpty(x, v) - } - case MaxItems: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) - } - if x.Len() > v.Rule.(int) { - return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len())) - } - case MinItems: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) - } - if x.Len() < v.Rule.(int) { - return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len())) - } - case UniqueItems: - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - if !checkForUniqueInArray(x) { - return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) - } - } else if x.Kind() == reflect.Map { - if !checkForUniqueInMap(x) { - return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) - } - } else { - return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind())) - } - case ReadOnly: - if x.Len() != 0 { - return createError(x, v, "readonly parameter; must send as nil or empty in request") - } - case Pattern: - reg, err := regexp.Compile(v.Rule.(string)) - if err != nil { - return createError(x, v, err.Error()) - } - keys := x.MapKeys() - for _, k := range keys { - if !reg.MatchString(k.String()) { - return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule)) - } - } - default: - return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name)) - } - - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func checkNil(x reflect.Value, v Constraint) error { - if _, ok := v.Rule.(bool); !ok { - return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) - } - if v.Rule.(bool) { - return createError(x, v, "value can not be null; required parameter") - } - return nil -} - -func checkEmpty(x reflect.Value, v Constraint) error { - if _, ok := v.Rule.(bool); !ok { - return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) - } - - if v.Rule.(bool) { - return createError(x, v, "value can not be null or empty; required parameter") - } - return nil -} - -func checkForUniqueInArray(x reflect.Value) bool { - if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { - return false - } - arrOfInterface := make([]interface{}, x.Len()) - - for i := 0; i < x.Len(); i++ { - arrOfInterface[i] = x.Index(i).Interface() - } - - m := make(map[interface{}]bool) - for _, val := range arrOfInterface { - if m[val] { - return false - } - m[val] = true - } - return true -} - -func checkForUniqueInMap(x reflect.Value) bool { - if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { - return false - } - mapOfInterface := make(map[interface{}]interface{}, x.Len()) - - keys := x.MapKeys() - for _, k := range keys { - mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() - } - - m := make(map[interface{}]bool) - for _, val := range mapOfInterface { - if m[val] { - return false - } - m[val] = true - } - return true -} - -func getInterfaceValue(x reflect.Value) interface{} { - if x.Kind() == reflect.Invalid { - return nil - } - return x.Interface() -} - -func isZero(x interface{}) bool { - return x == reflect.Zero(reflect.TypeOf(x)).Interface() -} - -func createError(x reflect.Value, v Constraint, err string) error { - return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s", - v.Target, v.Name, getInterfaceValue(x), err) -} - -// NewErrorWithValidationError appends package type and method name in -// validation error. -func NewErrorWithValidationError(err error, packageType, method string) error { - return fmt.Errorf("%s#%s: Invalid input: %v", packageType, method, err) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go deleted file mode 100644 index f588807dbb9..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ /dev/null @@ -1,49 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "strings" - "sync" -) - -const ( - major = 8 - minor = 0 - patch = 0 - tag = "" -) - -var once sync.Once -var version string - -// Version returns the semantic version (see http://semver.org). -func Version() string { - once.Do(func() { - semver := fmt.Sprintf("%d.%d.%d", major, minor, patch) - verBuilder := bytes.NewBufferString(semver) - if tag != "" && tag != "-" { - updated := strings.TrimPrefix(tag, "-") - _, err := verBuilder.WriteString("-" + updated) - if err == nil { - verBuilder = bytes.NewBufferString(semver) - } - } - version = verBuilder.String() - }) - return version -} diff --git a/vendor/github.com/Unknwon/com/LICENSE b/vendor/github.com/Unknwon/com/LICENSE deleted file mode 100644 index 8405e89a0b1..00000000000 --- a/vendor/github.com/Unknwon/com/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/Unknwon/com/README.md b/vendor/github.com/Unknwon/com/README.md deleted file mode 100644 index 8d821abd65e..00000000000 --- a/vendor/github.com/Unknwon/com/README.md +++ /dev/null @@ -1,20 +0,0 @@ -Common Functions -================ - -[![Build Status](https://travis-ci.org/Unknwon/com.svg)](https://travis-ci.org/Unknwon/com) [![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/Unknwon/com) - -This is an open source project for commonly used functions for the Go programming language. - -This package need >= **go 1.2** - -Code Convention: based on [Go Code Convention](https://github.com/Unknwon/go-code-convention). - -## Contribute - -Your contribute is welcome, but you have to check following steps after you added some functions and commit them: - -1. Make sure you wrote user-friendly comments for **all functions** . -2. Make sure you wrote test cases with any possible condition for **all functions** in file `*_test.go`. -3. Make sure you wrote benchmarks for **all functions** in file `*_test.go`. -4. Make sure you wrote useful examples for **all functions** in file `example_test.go`. -5. Make sure you ran `go test` and got **PASS** . diff --git a/vendor/github.com/Unknwon/com/cmd.go b/vendor/github.com/Unknwon/com/cmd.go deleted file mode 100644 index dc7086d8f71..00000000000 --- a/vendor/github.com/Unknwon/com/cmd.go +++ /dev/null @@ -1,161 +0,0 @@ -// +build go1.2 - -// Copyright 2013 com authors -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package com is an open source project for commonly used functions for the Go programming language. -package com - -import ( - "bytes" - "fmt" - "os/exec" - "runtime" - "strings" -) - -// ExecCmdDirBytes executes system command in given directory -// and return stdout, stderr in bytes type, along with possible error. -func ExecCmdDirBytes(dir, cmdName string, args ...string) ([]byte, []byte, error) { - bufOut := new(bytes.Buffer) - bufErr := new(bytes.Buffer) - - cmd := exec.Command(cmdName, args...) - cmd.Dir = dir - cmd.Stdout = bufOut - cmd.Stderr = bufErr - - err := cmd.Run() - return bufOut.Bytes(), bufErr.Bytes(), err -} - -// ExecCmdBytes executes system command -// and return stdout, stderr in bytes type, along with possible error. -func ExecCmdBytes(cmdName string, args ...string) ([]byte, []byte, error) { - return ExecCmdDirBytes("", cmdName, args...) -} - -// ExecCmdDir executes system command in given directory -// and return stdout, stderr in string type, along with possible error. -func ExecCmdDir(dir, cmdName string, args ...string) (string, string, error) { - bufOut, bufErr, err := ExecCmdDirBytes(dir, cmdName, args...) - return string(bufOut), string(bufErr), err -} - -// ExecCmd executes system command -// and return stdout, stderr in string type, along with possible error. -func ExecCmd(cmdName string, args ...string) (string, string, error) { - return ExecCmdDir("", cmdName, args...) -} - -// _________ .__ .____ -// \_ ___ \ ____ | | ___________ | | ____ ____ -// / \ \/ / _ \| | / _ \_ __ \ | | / _ \ / ___\ -// \ \___( <_> ) |_( <_> ) | \/ | |__( <_> ) /_/ > -// \______ /\____/|____/\____/|__| |_______ \____/\___ / -// \/ \/ /_____/ - -// Color number constants. -const ( - Gray = uint8(iota + 90) - Red - Green - Yellow - Blue - Magenta - //NRed = uint8(31) // Normal - EndColor = "\033[0m" -) - -// getColorLevel returns colored level string by given level. -func getColorLevel(level string) string { - level = strings.ToUpper(level) - switch level { - case "TRAC": - return fmt.Sprintf("\033[%dm%s\033[0m", Blue, level) - case "ERRO": - return fmt.Sprintf("\033[%dm%s\033[0m", Red, level) - case "WARN": - return fmt.Sprintf("\033[%dm%s\033[0m", Magenta, level) - case "SUCC": - return fmt.Sprintf("\033[%dm%s\033[0m", Green, level) - default: - return level - } -} - -// ColorLogS colors log and return colored content. -// Log format: [ error ]. -// Level: TRAC -> blue; ERRO -> red; WARN -> Magenta; SUCC -> green; others -> default. -// Content: default; path: yellow; error -> red. -// Level has to be surrounded by "[" and "]". -// Highlights have to be surrounded by "# " and " #"(space), "#" will be deleted. -// Paths have to be surrounded by "( " and " )"(space). -// Errors have to be surrounded by "[ " and " ]"(space). -// Note: it hasn't support windows yet, contribute is welcome. -func ColorLogS(format string, a ...interface{}) string { - log := fmt.Sprintf(format, a...) - - var clog string - - if runtime.GOOS != "windows" { - // Level. - i := strings.Index(log, "]") - if log[0] == '[' && i > -1 { - clog += "[" + getColorLevel(log[1:i]) + "]" - } - - log = log[i+1:] - - // Error. - log = strings.Replace(log, "[ ", fmt.Sprintf("[\033[%dm", Red), -1) - log = strings.Replace(log, " ]", EndColor+"]", -1) - - // Path. - log = strings.Replace(log, "( ", fmt.Sprintf("(\033[%dm", Yellow), -1) - log = strings.Replace(log, " )", EndColor+")", -1) - - // Highlights. - log = strings.Replace(log, "# ", fmt.Sprintf("\033[%dm", Gray), -1) - log = strings.Replace(log, " #", EndColor, -1) - - } else { - // Level. - i := strings.Index(log, "]") - if log[0] == '[' && i > -1 { - clog += "[" + log[1:i] + "]" - } - - log = log[i+1:] - - // Error. - log = strings.Replace(log, "[ ", "[", -1) - log = strings.Replace(log, " ]", "]", -1) - - // Path. - log = strings.Replace(log, "( ", "(", -1) - log = strings.Replace(log, " )", ")", -1) - - // Highlights. - log = strings.Replace(log, "# ", "", -1) - log = strings.Replace(log, " #", "", -1) - } - return clog + log -} - -// ColorLog prints colored log to stdout. -// See color rules in function 'ColorLogS'. -func ColorLog(format string, a ...interface{}) { - fmt.Print(ColorLogS(format, a...)) -} diff --git a/vendor/github.com/Unknwon/com/convert.go b/vendor/github.com/Unknwon/com/convert.go deleted file mode 100644 index bf24aa8bc30..00000000000 --- a/vendor/github.com/Unknwon/com/convert.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2014 com authors -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package com - -import ( - "fmt" - "strconv" -) - -// Convert string to specify type. -type StrTo string - -func (f StrTo) Exist() bool { - return string(f) != string(0x1E) -} - -func (f StrTo) Uint8() (uint8, error) { - v, err := strconv.ParseUint(f.String(), 10, 8) - return uint8(v), err -} - -func (f StrTo) Int() (int, error) { - v, err := strconv.ParseInt(f.String(), 10, 0) - return int(v), err -} - -func (f StrTo) Int64() (int64, error) { - v, err := strconv.ParseInt(f.String(), 10, 64) - return int64(v), err -} - -func (f StrTo) Float64() (float64, error) { - v, err := strconv.ParseFloat(f.String(), 64) - return float64(v), err -} - -func (f StrTo) MustUint8() uint8 { - v, _ := f.Uint8() - return v -} - -func (f StrTo) MustInt() int { - v, _ := f.Int() - return v -} - -func (f StrTo) MustInt64() int64 { - v, _ := f.Int64() - return v -} - -func (f StrTo) MustFloat64() float64 { - v, _ := f.Float64() - return v -} - -func (f StrTo) String() string { - if f.Exist() { - return string(f) - } - return "" -} - -// Convert any type to string. -func ToStr(value interface{}, args ...int) (s string) { - switch v := value.(type) { - case bool: - s = strconv.FormatBool(v) - case float32: - s = strconv.FormatFloat(float64(v), 'f', argInt(args).Get(0, -1), argInt(args).Get(1, 32)) - case float64: - s = strconv.FormatFloat(v, 'f', argInt(args).Get(0, -1), argInt(args).Get(1, 64)) - case int: - s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) - case int8: - s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) - case int16: - s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) - case int32: - s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) - case int64: - s = strconv.FormatInt(v, argInt(args).Get(0, 10)) - case uint: - s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) - case uint8: - s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) - case uint16: - s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) - case uint32: - s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) - case uint64: - s = strconv.FormatUint(v, argInt(args).Get(0, 10)) - case string: - s = v - case []byte: - s = string(v) - default: - s = fmt.Sprintf("%v", v) - } - return s -} - -type argInt []int - -func (a argInt) Get(i int, args ...int) (r int) { - if i >= 0 && i < len(a) { - r = a[i] - } else if len(args) > 0 { - r = args[0] - } - return -} - -// HexStr2int converts hex format string to decimal number. -func HexStr2int(hexStr string) (int, error) { - num := 0 - length := len(hexStr) - for i := 0; i < length; i++ { - char := hexStr[length-i-1] - factor := -1 - - switch { - case char >= '0' && char <= '9': - factor = int(char) - '0' - case char >= 'a' && char <= 'f': - factor = int(char) - 'a' + 10 - default: - return -1, fmt.Errorf("invalid hex: %s", string(char)) - } - - num += factor * PowInt(16, i) - } - return num, nil -} - -// Int2HexStr converts decimal number to hex format string. -func Int2HexStr(num int) (hex string) { - if num == 0 { - return "0" - } - - for num > 0 { - r := num % 16 - - c := "?" - if r >= 0 && r <= 9 { - c = string(r + '0') - } else { - c = string(r + 'a' - 10) - } - hex = c + hex - num = num / 16 - } - return hex -} diff --git a/vendor/github.com/Unknwon/com/dir.go b/vendor/github.com/Unknwon/com/dir.go deleted file mode 100644 index c126d79da86..00000000000 --- a/vendor/github.com/Unknwon/com/dir.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2013 com authors -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package com - -import ( - "errors" - "fmt" - "os" - "path" - "strings" -) - -// IsDir returns true if given path is a directory, -// or returns false when it's a file or does not exist. -func IsDir(dir string) bool { - f, e := os.Stat(dir) - if e != nil { - return false - } - return f.IsDir() -} - -func statDir(dirPath, recPath string, includeDir, isDirOnly bool) ([]string, error) { - dir, err := os.Open(dirPath) - if err != nil { - return nil, err - } - defer dir.Close() - - fis, err := dir.Readdir(0) - if err != nil { - return nil, err - } - - statList := make([]string, 0) - for _, fi := range fis { - if strings.Contains(fi.Name(), ".DS_Store") { - continue - } - - relPath := path.Join(recPath, fi.Name()) - curPath := path.Join(dirPath, fi.Name()) - if fi.IsDir() { - if includeDir { - statList = append(statList, relPath+"/") - } - s, err := statDir(curPath, relPath, includeDir, isDirOnly) - if err != nil { - return nil, err - } - statList = append(statList, s...) - } else if !isDirOnly { - statList = append(statList, relPath) - } - } - return statList, nil -} - -// StatDir gathers information of given directory by depth-first. -// It returns slice of file list and includes subdirectories if enabled; -// it returns error and nil slice when error occurs in underlying functions, -// or given path is not a directory or does not exist. -// -// Slice does not include given path itself. -// If subdirectories is enabled, they will have suffix '/'. -func StatDir(rootPath string, includeDir ...bool) ([]string, error) { - if !IsDir(rootPath) { - return nil, errors.New("not a directory or does not exist: " + rootPath) - } - - isIncludeDir := false - if len(includeDir) >= 1 { - isIncludeDir = includeDir[0] - } - return statDir(rootPath, "", isIncludeDir, false) -} - -// GetAllSubDirs returns all subdirectories of given root path. -// Slice does not include given path itself. -func GetAllSubDirs(rootPath string) ([]string, error) { - if !IsDir(rootPath) { - return nil, errors.New("not a directory or does not exist: " + rootPath) - } - return statDir(rootPath, "", true, true) -} - -// GetFileListBySuffix returns an ordered list of file paths. -// It recognize if given path is a file, and don't do recursive find. -func GetFileListBySuffix(dirPath, suffix string) ([]string, error) { - if !IsExist(dirPath) { - return nil, fmt.Errorf("given path does not exist: %s", dirPath) - } else if IsFile(dirPath) { - return []string{dirPath}, nil - } - - // Given path is a directory. - dir, err := os.Open(dirPath) - if err != nil { - return nil, err - } - - fis, err := dir.Readdir(0) - if err != nil { - return nil, err - } - - files := make([]string, 0, len(fis)) - for _, fi := range fis { - if strings.HasSuffix(fi.Name(), suffix) { - files = append(files, path.Join(dirPath, fi.Name())) - } - } - - return files, nil -} - -// CopyDir copy files recursively from source to target directory. -// -// The filter accepts a function that process the path info. -// and should return true for need to filter. -// -// It returns error when error occurs in underlying functions. -func CopyDir(srcPath, destPath string, filters ...func(filePath string) bool) error { - // Check if target directory exists. - if IsExist(destPath) { - return errors.New("file or directory alreay exists: " + destPath) - } - - err := os.MkdirAll(destPath, os.ModePerm) - if err != nil { - return err - } - - // Gather directory info. - infos, err := StatDir(srcPath, true) - if err != nil { - return err - } - - var filter func(filePath string) bool - if len(filters) > 0 { - filter = filters[0] - } - - for _, info := range infos { - if filter != nil && filter(info) { - continue - } - - curPath := path.Join(destPath, info) - if strings.HasSuffix(info, "/") { - err = os.MkdirAll(curPath, os.ModePerm) - } else { - err = Copy(path.Join(srcPath, info), curPath) - } - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/Unknwon/com/file.go b/vendor/github.com/Unknwon/com/file.go deleted file mode 100644 index b51502c9176..00000000000 --- a/vendor/github.com/Unknwon/com/file.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2013 com authors -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package com - -import ( - "fmt" - "io" - "io/ioutil" - "math" - "os" - "path" -) - -// Storage unit constants. -const ( - Byte = 1 - KByte = Byte * 1024 - MByte = KByte * 1024 - GByte = MByte * 1024 - TByte = GByte * 1024 - PByte = TByte * 1024 - EByte = PByte * 1024 -) - -func logn(n, b float64) float64 { - return math.Log(n) / math.Log(b) -} - -func humanateBytes(s uint64, base float64, sizes []string) string { - if s < 10 { - return fmt.Sprintf("%dB", s) - } - e := math.Floor(logn(float64(s), base)) - suffix := sizes[int(e)] - val := float64(s) / math.Pow(base, math.Floor(e)) - f := "%.0f" - if val < 10 { - f = "%.1f" - } - - return fmt.Sprintf(f+"%s", val, suffix) -} - -// HumaneFileSize calculates the file size and generate user-friendly string. -func HumaneFileSize(s uint64) string { - sizes := []string{"B", "KB", "MB", "GB", "TB", "PB", "EB"} - return humanateBytes(s, 1024, sizes) -} - -// FileMTime returns file modified time and possible error. -func FileMTime(file string) (int64, error) { - f, err := os.Stat(file) - if err != nil { - return 0, err - } - return f.ModTime().Unix(), nil -} - -// FileSize returns file size in bytes and possible error. -func FileSize(file string) (int64, error) { - f, err := os.Stat(file) - if err != nil { - return 0, err - } - return f.Size(), nil -} - -// Copy copies file from source to target path. -func Copy(src, dest string) error { - // Gather file information to set back later. - si, err := os.Lstat(src) - if err != nil { - return err - } - - // Handle symbolic link. - if si.Mode()&os.ModeSymlink != 0 { - target, err := os.Readlink(src) - if err != nil { - return err - } - // NOTE: os.Chmod and os.Chtimes don't recoganize symbolic link, - // which will lead "no such file or directory" error. - return os.Symlink(target, dest) - } - - sr, err := os.Open(src) - if err != nil { - return err - } - defer sr.Close() - - dw, err := os.Create(dest) - if err != nil { - return err - } - defer dw.Close() - - if _, err = io.Copy(dw, sr); err != nil { - return err - } - - // Set back file information. - if err = os.Chtimes(dest, si.ModTime(), si.ModTime()); err != nil { - return err - } - return os.Chmod(dest, si.Mode()) -} - -// WriteFile writes data to a file named by filename. -// If the file does not exist, WriteFile creates it -// and its upper level paths. -func WriteFile(filename string, data []byte) error { - os.MkdirAll(path.Dir(filename), os.ModePerm) - return ioutil.WriteFile(filename, data, 0655) -} - -// IsFile returns true if given path is a file, -// or returns false when it's a directory or does not exist. -func IsFile(filePath string) bool { - f, e := os.Stat(filePath) - if e != nil { - return false - } - return !f.IsDir() -} - -// IsExist checks whether a file or directory exists. -// It returns false when the file or directory does not exist. -func IsExist(path string) bool { - _, err := os.Stat(path) - return err == nil || os.IsExist(err) -} diff --git a/vendor/github.com/Unknwon/com/html.go b/vendor/github.com/Unknwon/com/html.go deleted file mode 100644 index 762d94b050d..00000000000 --- a/vendor/github.com/Unknwon/com/html.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2013 com authors -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package com - -import ( - "html" - "regexp" - "strings" -) - -// Html2JS converts []byte type of HTML content into JS format. -func Html2JS(data []byte) []byte { - s := string(data) - s = strings.Replace(s, `\`, `\\`, -1) - s = strings.Replace(s, "\n", `\n`, -1) - s = strings.Replace(s, "\r", "", -1) - s = strings.Replace(s, "\"", `\"`, -1) - s = strings.Replace(s, "", "<table>", -1) - return []byte(s) -} - -// encode html chars to string -func HtmlEncode(str string) string { - return html.EscapeString(str) -} - -// decode string to html chars -func HtmlDecode(str string) string { - return html.UnescapeString(str) -} - -// strip tags in html string -func StripTags(src string) string { - //去除style,script,html tag - re := regexp.MustCompile(`(?s)<(?:style|script)[^<>]*>.*?|]*>|`) - src = re.ReplaceAllString(src, "") - - //trim all spaces(2+) into \n - re = regexp.MustCompile(`\s{2,}`) - src = re.ReplaceAllString(src, "\n") - - return strings.TrimSpace(src) -} - -// change \n to
-func Nl2br(str string) string { - return strings.Replace(str, "\n", "
", -1) -} diff --git a/vendor/github.com/Unknwon/com/http.go b/vendor/github.com/Unknwon/com/http.go deleted file mode 100644 index 3415059ae90..00000000000 --- a/vendor/github.com/Unknwon/com/http.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2013 com authors -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package com - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "path" -) - -type NotFoundError struct { - Message string -} - -func (e NotFoundError) Error() string { - return e.Message -} - -type RemoteError struct { - Host string - Err error -} - -func (e *RemoteError) Error() string { - return e.Err.Error() -} - -var UserAgent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1541.0 Safari/537.36" - -// HttpCall makes HTTP method call. -func HttpCall(client *http.Client, method, url string, header http.Header, body io.Reader) (io.ReadCloser, error) { - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", UserAgent) - for k, vs := range header { - req.Header[k] = vs - } - resp, err := client.Do(req) - if err != nil { - return nil, err - } - if resp.StatusCode == 200 { - return resp.Body, nil - } - resp.Body.Close() - if resp.StatusCode == 404 { // 403 can be rate limit error. || resp.StatusCode == 403 { - err = fmt.Errorf("resource not found: %s", url) - } else { - err = fmt.Errorf("%s %s -> %d", method, url, resp.StatusCode) - } - return nil, err -} - -// HttpGet gets the specified resource. -// ErrNotFound is returned if the server responds with status 404. -func HttpGet(client *http.Client, url string, header http.Header) (io.ReadCloser, error) { - return HttpCall(client, "GET", url, header, nil) -} - -// HttpPost posts the specified resource. -// ErrNotFound is returned if the server responds with status 404. -func HttpPost(client *http.Client, url string, header http.Header, body []byte) (io.ReadCloser, error) { - return HttpCall(client, "POST", url, header, bytes.NewBuffer(body)) -} - -// HttpGetToFile gets the specified resource and writes to file. -// ErrNotFound is returned if the server responds with status 404. -func HttpGetToFile(client *http.Client, url string, header http.Header, fileName string) error { - rc, err := HttpGet(client, url, header) - if err != nil { - return err - } - defer rc.Close() - - os.MkdirAll(path.Dir(fileName), os.ModePerm) - f, err := os.Create(fileName) - if err != nil { - return err - } - defer f.Close() - _, err = io.Copy(f, rc) - return err -} - -// HttpGetBytes gets the specified resource. ErrNotFound is returned if the server -// responds with status 404. -func HttpGetBytes(client *http.Client, url string, header http.Header) ([]byte, error) { - rc, err := HttpGet(client, url, header) - if err != nil { - return nil, err - } - defer rc.Close() - return ioutil.ReadAll(rc) -} - -// HttpGetJSON gets the specified resource and mapping to struct. -// ErrNotFound is returned if the server responds with status 404. -func HttpGetJSON(client *http.Client, url string, v interface{}) error { - rc, err := HttpGet(client, url, nil) - if err != nil { - return err - } - defer rc.Close() - err = json.NewDecoder(rc).Decode(v) - if _, ok := err.(*json.SyntaxError); ok { - return fmt.Errorf("JSON syntax error at %s", url) - } - return nil -} - -// HttpPostJSON posts the specified resource with struct values, -// and maps results to struct. -// ErrNotFound is returned if the server responds with status 404. -func HttpPostJSON(client *http.Client, url string, body, v interface{}) error { - data, err := json.Marshal(body) - if err != nil { - return err - } - rc, err := HttpPost(client, url, http.Header{"content-type": []string{"application/json"}}, data) - if err != nil { - return err - } - defer rc.Close() - err = json.NewDecoder(rc).Decode(v) - if _, ok := err.(*json.SyntaxError); ok { - return fmt.Errorf("JSON syntax error at %s", url) - } - return nil -} - -// A RawFile describes a file that can be downloaded. -type RawFile interface { - Name() string - RawUrl() string - Data() []byte - SetData([]byte) -} - -// FetchFiles fetches files specified by the rawURL field in parallel. -func FetchFiles(client *http.Client, files []RawFile, header http.Header) error { - ch := make(chan error, len(files)) - for i := range files { - go func(i int) { - p, err := HttpGetBytes(client, files[i].RawUrl(), nil) - if err != nil { - ch <- err - return - } - files[i].SetData(p) - ch <- nil - }(i) - } - for _ = range files { - if err := <-ch; err != nil { - return err - } - } - return nil -} - -// FetchFiles uses command `curl` to fetch files specified by the rawURL field in parallel. -func FetchFilesCurl(files []RawFile, curlOptions ...string) error { - ch := make(chan error, len(files)) - for i := range files { - go func(i int) { - stdout, _, err := ExecCmd("curl", append(curlOptions, files[i].RawUrl())...) - if err != nil { - ch <- err - return - } - - files[i].SetData([]byte(stdout)) - ch <- nil - }(i) - } - for _ = range files { - if err := <-ch; err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/Unknwon/com/math.go b/vendor/github.com/Unknwon/com/math.go deleted file mode 100644 index 99c56b65947..00000000000 --- a/vendor/github.com/Unknwon/com/math.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2014 com authors -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package com - -// PowInt is int type of math.Pow function. -func PowInt(x int, y int) int { - if y <= 0 { - return 1 - } else { - if y % 2 == 0 { - sqrt := PowInt(x, y/2) - return sqrt * sqrt - } else { - return PowInt(x, y-1) * x - } - } -} diff --git a/vendor/github.com/Unknwon/com/path.go b/vendor/github.com/Unknwon/com/path.go deleted file mode 100644 index b1e860def42..00000000000 --- a/vendor/github.com/Unknwon/com/path.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2013 com authors -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package com - -import ( - "errors" - "os" - "path/filepath" - "runtime" - "strings" -) - -// GetGOPATHs returns all paths in GOPATH variable. -func GetGOPATHs() []string { - gopath := os.Getenv("GOPATH") - var paths []string - if runtime.GOOS == "windows" { - gopath = strings.Replace(gopath, "\\", "/", -1) - paths = strings.Split(gopath, ";") - } else { - paths = strings.Split(gopath, ":") - } - return paths -} - -// GetSrcPath returns app. source code path. -// It only works when you have src. folder in GOPATH, -// it returns error not able to locate source folder path. -func GetSrcPath(importPath string) (appPath string, err error) { - paths := GetGOPATHs() - for _, p := range paths { - if IsExist(p + "/src/" + importPath + "/") { - appPath = p + "/src/" + importPath + "/" - break - } - } - - if len(appPath) == 0 { - return "", errors.New("Unable to locate source folder path") - } - - appPath = filepath.Dir(appPath) + "/" - if runtime.GOOS == "windows" { - // Replace all '\' to '/'. - appPath = strings.Replace(appPath, "\\", "/", -1) - } - - return appPath, nil -} - -// HomeDir returns path of '~'(in Linux) on Windows, -// it returns error when the variable does not exist. -func HomeDir() (home string, err error) { - if runtime.GOOS == "windows" { - home = os.Getenv("USERPROFILE") - if len(home) == 0 { - home = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") - } - } else { - home = os.Getenv("HOME") - } - - if len(home) == 0 { - return "", errors.New("Cannot specify home directory because it's empty") - } - - return home, nil -} diff --git a/vendor/github.com/Unknwon/com/regex.go b/vendor/github.com/Unknwon/com/regex.go deleted file mode 100644 index 765bfc43113..00000000000 --- a/vendor/github.com/Unknwon/com/regex.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2013 com authors -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package com - -import "regexp" - -const ( - regex_email_pattern = `(?i)[A-Z0-9._%+-]+@(?:[A-Z0-9-]+\.)+[A-Z]{2,6}` - regex_strict_email_pattern = `(?i)[A-Z0-9!#$%&'*+/=?^_{|}~-]+` + - `(?:\.[A-Z0-9!#$%&'*+/=?^_{|}~-]+)*` + - `@(?:[A-Z0-9](?:[A-Z0-9-]*[A-Z0-9])?\.)+` + - `[A-Z0-9](?:[A-Z0-9-]*[A-Z0-9])?` - regex_url_pattern = `(ftp|http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?` -) - -var ( - regex_email *regexp.Regexp - regex_strict_email *regexp.Regexp - regex_url *regexp.Regexp -) - -func init() { - regex_email = regexp.MustCompile(regex_email_pattern) - regex_strict_email = regexp.MustCompile(regex_strict_email_pattern) - regex_url = regexp.MustCompile(regex_url_pattern) -} - -// validate string is an email address, if not return false -// basically validation can match 99% cases -func IsEmail(email string) bool { - return regex_email.MatchString(email) -} - -// validate string is an email address, if not return false -// this validation omits RFC 2822 -func IsEmailRFC(email string) bool { - return regex_strict_email.MatchString(email) -} - -// validate string is a url link, if not return false -// simple validation can match 99% cases -func IsUrl(url string) bool { - return regex_url.MatchString(url) -} diff --git a/vendor/github.com/Unknwon/com/slice.go b/vendor/github.com/Unknwon/com/slice.go deleted file mode 100644 index 27801a4d7df..00000000000 --- a/vendor/github.com/Unknwon/com/slice.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2013 com authors -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package com - -import ( - "strings" -) - -// AppendStr appends string to slice with no duplicates. -func AppendStr(strs []string, str string) []string { - for _, s := range strs { - if s == str { - return strs - } - } - return append(strs, str) -} - -// CompareSliceStr compares two 'string' type slices. -// It returns true if elements and order are both the same. -func CompareSliceStr(s1, s2 []string) bool { - if len(s1) != len(s2) { - return false - } - - for i := range s1 { - if s1[i] != s2[i] { - return false - } - } - - return true -} - -// CompareSliceStr compares two 'string' type slices. -// It returns true if elements are the same, and ignores the order. -func CompareSliceStrU(s1, s2 []string) bool { - if len(s1) != len(s2) { - return false - } - - for i := range s1 { - for j := len(s2) - 1; j >= 0; j-- { - if s1[i] == s2[j] { - s2 = append(s2[:j], s2[j+1:]...) - break - } - } - } - if len(s2) > 0 { - return false - } - return true -} - -// IsSliceContainsStr returns true if the string exists in given slice, ignore case. -func IsSliceContainsStr(sl []string, str string) bool { - str = strings.ToLower(str) - for _, s := range sl { - if strings.ToLower(s) == str { - return true - } - } - return false -} - -// IsSliceContainsInt64 returns true if the int64 exists in given slice. -func IsSliceContainsInt64(sl []int64, i int64) bool { - for _, s := range sl { - if s == i { - return true - } - } - return false -} diff --git a/vendor/github.com/Unknwon/com/string.go b/vendor/github.com/Unknwon/com/string.go deleted file mode 100644 index 7080d174a81..00000000000 --- a/vendor/github.com/Unknwon/com/string.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2013 com authors -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package com - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "errors" - r "math/rand" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" -) - -// AESGCMEncrypt encrypts plaintext with the given key using AES in GCM mode. -func AESGCMEncrypt(key, plaintext []byte) ([]byte, error) { - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - gcm, err := cipher.NewGCM(block) - if err != nil { - return nil, err - } - - nonce := make([]byte, gcm.NonceSize()) - if _, err := rand.Read(nonce); err != nil { - return nil, err - } - - ciphertext := gcm.Seal(nil, nonce, plaintext, nil) - return append(nonce, ciphertext...), nil -} - -// AESGCMDecrypt decrypts ciphertext with the given key using AES in GCM mode. -func AESGCMDecrypt(key, ciphertext []byte) ([]byte, error) { - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - gcm, err := cipher.NewGCM(block) - if err != nil { - return nil, err - } - - size := gcm.NonceSize() - if len(ciphertext)-size <= 0 { - return nil, errors.New("Ciphertext is empty") - } - - nonce := ciphertext[:size] - ciphertext = ciphertext[size:] - - plainText, err := gcm.Open(nil, nonce, ciphertext, nil) - if err != nil { - return nil, err - } - - return plainText, nil -} - -// IsLetter returns true if the 'l' is an English letter. -func IsLetter(l uint8) bool { - n := (l | 0x20) - 'a' - if n >= 0 && n < 26 { - return true - } - return false -} - -// Expand replaces {k} in template with match[k] or subs[atoi(k)] if k is not in match. -func Expand(template string, match map[string]string, subs ...string) string { - var p []byte - var i int - for { - i = strings.Index(template, "{") - if i < 0 { - break - } - p = append(p, template[:i]...) - template = template[i+1:] - i = strings.Index(template, "}") - if s, ok := match[template[:i]]; ok { - p = append(p, s...) - } else { - j, _ := strconv.Atoi(template[:i]) - if j >= len(subs) { - p = append(p, []byte("Missing")...) - } else { - p = append(p, subs[j]...) - } - } - template = template[i+1:] - } - p = append(p, template...) - return string(p) -} - -// Reverse s string, support unicode -func Reverse(s string) string { - n := len(s) - runes := make([]rune, n) - for _, rune := range s { - n-- - runes[n] = rune - } - return string(runes[n:]) -} - -// RandomCreateBytes generate random []byte by specify chars. -func RandomCreateBytes(n int, alphabets ...byte) []byte { - const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - var bytes = make([]byte, n) - var randby bool - if num, err := rand.Read(bytes); num != n || err != nil { - r.Seed(time.Now().UnixNano()) - randby = true - } - for i, b := range bytes { - if len(alphabets) == 0 { - if randby { - bytes[i] = alphanum[r.Intn(len(alphanum))] - } else { - bytes[i] = alphanum[b%byte(len(alphanum))] - } - } else { - if randby { - bytes[i] = alphabets[r.Intn(len(alphabets))] - } else { - bytes[i] = alphabets[b%byte(len(alphabets))] - } - } - } - return bytes -} - -// ToSnakeCase can convert all upper case characters in a string to -// underscore format. -// -// Some samples. -// "FirstName" => "first_name" -// "HTTPServer" => "http_server" -// "NoHTTPS" => "no_https" -// "GO_PATH" => "go_path" -// "GO PATH" => "go_path" // space is converted to underscore. -// "GO-PATH" => "go_path" // hyphen is converted to underscore. -// -// From https://github.com/huandu/xstrings -func ToSnakeCase(str string) string { - if len(str) == 0 { - return "" - } - - buf := &bytes.Buffer{} - var prev, r0, r1 rune - var size int - - r0 = '_' - - for len(str) > 0 { - prev = r0 - r0, size = utf8.DecodeRuneInString(str) - str = str[size:] - - switch { - case r0 == utf8.RuneError: - buf.WriteByte(byte(str[0])) - - case unicode.IsUpper(r0): - if prev != '_' { - buf.WriteRune('_') - } - - buf.WriteRune(unicode.ToLower(r0)) - - if len(str) == 0 { - break - } - - r0, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if !unicode.IsUpper(r0) { - buf.WriteRune(r0) - break - } - - // find next non-upper-case character and insert `_` properly. - // it's designed to convert `HTTPServer` to `http_server`. - // if there are more than 2 adjacent upper case characters in a word, - // treat them as an abbreviation plus a normal word. - for len(str) > 0 { - r1 = r0 - r0, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if r0 == utf8.RuneError { - buf.WriteRune(unicode.ToLower(r1)) - buf.WriteByte(byte(str[0])) - break - } - - if !unicode.IsUpper(r0) { - if r0 == '_' || r0 == ' ' || r0 == '-' { - r0 = '_' - - buf.WriteRune(unicode.ToLower(r1)) - } else { - buf.WriteRune('_') - buf.WriteRune(unicode.ToLower(r1)) - buf.WriteRune(r0) - } - - break - } - - buf.WriteRune(unicode.ToLower(r1)) - } - - if len(str) == 0 || r0 == '_' { - buf.WriteRune(unicode.ToLower(r0)) - break - } - - default: - if r0 == ' ' || r0 == '-' { - r0 = '_' - } - - buf.WriteRune(r0) - } - } - - return buf.String() -} diff --git a/vendor/github.com/Unknwon/com/time.go b/vendor/github.com/Unknwon/com/time.go deleted file mode 100644 index dd1cdbcf2a0..00000000000 --- a/vendor/github.com/Unknwon/com/time.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2013 com authors -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package com - -import ( - "fmt" - "strconv" - "strings" - "time" -) - -// Format unix time int64 to string -func Date(ti int64, format string) string { - t := time.Unix(int64(ti), 0) - return DateT(t, format) -} - -// Format unix time string to string -func DateS(ts string, format string) string { - i, _ := strconv.ParseInt(ts, 10, 64) - return Date(i, format) -} - -// Format time.Time struct to string -// MM - month - 01 -// M - month - 1, single bit -// DD - day - 02 -// D - day 2 -// YYYY - year - 2006 -// YY - year - 06 -// HH - 24 hours - 03 -// H - 24 hours - 3 -// hh - 12 hours - 03 -// h - 12 hours - 3 -// mm - minute - 04 -// m - minute - 4 -// ss - second - 05 -// s - second = 5 -func DateT(t time.Time, format string) string { - res := strings.Replace(format, "MM", t.Format("01"), -1) - res = strings.Replace(res, "M", t.Format("1"), -1) - res = strings.Replace(res, "DD", t.Format("02"), -1) - res = strings.Replace(res, "D", t.Format("2"), -1) - res = strings.Replace(res, "YYYY", t.Format("2006"), -1) - res = strings.Replace(res, "YY", t.Format("06"), -1) - res = strings.Replace(res, "HH", fmt.Sprintf("%02d", t.Hour()), -1) - res = strings.Replace(res, "H", fmt.Sprintf("%d", t.Hour()), -1) - res = strings.Replace(res, "hh", t.Format("03"), -1) - res = strings.Replace(res, "h", t.Format("3"), -1) - res = strings.Replace(res, "mm", t.Format("04"), -1) - res = strings.Replace(res, "m", t.Format("4"), -1) - res = strings.Replace(res, "ss", t.Format("05"), -1) - res = strings.Replace(res, "s", t.Format("5"), -1) - return res -} - -// DateFormat pattern rules. -var datePatterns = []string{ - // year - "Y", "2006", // A full numeric representation of a year, 4 digits Examples: 1999 or 2003 - "y", "06", //A two digit representation of a year Examples: 99 or 03 - - // month - "m", "01", // Numeric representation of a month, with leading zeros 01 through 12 - "n", "1", // Numeric representation of a month, without leading zeros 1 through 12 - "M", "Jan", // A short textual representation of a month, three letters Jan through Dec - "F", "January", // A full textual representation of a month, such as January or March January through December - - // day - "d", "02", // Day of the month, 2 digits with leading zeros 01 to 31 - "j", "2", // Day of the month without leading zeros 1 to 31 - - // week - "D", "Mon", // A textual representation of a day, three letters Mon through Sun - "l", "Monday", // A full textual representation of the day of the week Sunday through Saturday - - // time - "g", "3", // 12-hour format of an hour without leading zeros 1 through 12 - "G", "15", // 24-hour format of an hour without leading zeros 0 through 23 - "h", "03", // 12-hour format of an hour with leading zeros 01 through 12 - "H", "15", // 24-hour format of an hour with leading zeros 00 through 23 - - "a", "pm", // Lowercase Ante meridiem and Post meridiem am or pm - "A", "PM", // Uppercase Ante meridiem and Post meridiem AM or PM - - "i", "04", // Minutes with leading zeros 00 to 59 - "s", "05", // Seconds, with leading zeros 00 through 59 - - // time zone - "T", "MST", - "P", "-07:00", - "O", "-0700", - - // RFC 2822 - "r", time.RFC1123Z, -} - -// Parse Date use PHP time format. -func DateParse(dateString, format string) (time.Time, error) { - replacer := strings.NewReplacer(datePatterns...) - format = replacer.Replace(format) - return time.ParseInLocation(format, dateString, time.Local) -} diff --git a/vendor/github.com/Unknwon/com/url.go b/vendor/github.com/Unknwon/com/url.go deleted file mode 100644 index b0b7c0e12b2..00000000000 --- a/vendor/github.com/Unknwon/com/url.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2013 com authors -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package com - -import ( - "encoding/base64" - "net/url" -) - -// url encode string, is + not %20 -func UrlEncode(str string) string { - return url.QueryEscape(str) -} - -// url decode string -func UrlDecode(str string) (string, error) { - return url.QueryUnescape(str) -} - -// base64 encode -func Base64Encode(str string) string { - return base64.StdEncoding.EncodeToString([]byte(str)) -} - -// base64 decode -func Base64Decode(str string) (string, error) { - s, e := base64.StdEncoding.DecodeString(str) - return string(s), e -} diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/vendor/github.com/coreos/etcd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE deleted file mode 100644 index b39ddfa5cbd..00000000000 --- a/vendor/github.com/coreos/etcd/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2014 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go deleted file mode 100644 index 009ebda70ca..00000000000 --- a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go +++ /dev/null @@ -1,824 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: auth.proto -// DO NOT EDIT! - -/* - Package authpb is a generated protocol buffer package. - - It is generated from these files: - auth.proto - - It has these top-level messages: - User - Permission - Role -*/ -package authpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Permission_Type int32 - -const ( - READ Permission_Type = 0 - WRITE Permission_Type = 1 - READWRITE Permission_Type = 2 -) - -var Permission_Type_name = map[int32]string{ - 0: "READ", - 1: "WRITE", - 2: "READWRITE", -} -var Permission_Type_value = map[string]int32{ - "READ": 0, - "WRITE": 1, - "READWRITE": 2, -} - -func (x Permission_Type) String() string { - return proto.EnumName(Permission_Type_name, int32(x)) -} -func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1, 0} } - -// User is a single entry in the bucket authUsers -type User struct { - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} -func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} } - -// Permission is a single entity -type Permission struct { - PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` -} - -func (m *Permission) Reset() { *m = Permission{} } -func (m *Permission) String() string { return proto.CompactTextString(m) } -func (*Permission) ProtoMessage() {} -func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} } - -// Role is a single entry in the bucket authRoles -type Role struct { - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"` -} - -func (m *Role) Reset() { *m = Role{} } -func (m *Role) String() string { return proto.CompactTextString(m) } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} } - -func init() { - proto.RegisterType((*User)(nil), "authpb.User") - proto.RegisterType((*Permission)(nil), "authpb.Permission") - proto.RegisterType((*Role)(nil), "authpb.Role") - proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value) -} -func (m *User) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *User) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *Permission) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Permission) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PermType != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.PermType)) - } - if len(m.Key) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - return i, nil -} - -func (m *Role) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.KeyPermission) > 0 { - for _, msg := range m.KeyPermission { - dAtA[i] = 0x12 - i++ - i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeFixed64Auth(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Auth(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *User) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - l = len(s) - n += 1 + l + sovAuth(uint64(l)) - } - } - return n -} - -func (m *Permission) Size() (n int) { - var l int - _ = l - if m.PermType != 0 { - n += 1 + sovAuth(uint64(m.PermType)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - return n -} - -func (m *Role) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if len(m.KeyPermission) > 0 { - for _, e := range m.KeyPermission { - l = e.Size() - n += 1 + l + sovAuth(uint64(l)) - } - } - return n -} - -func sovAuth(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozAuth(x uint64) (n int) { - return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *User) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: User: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...) - if m.Password == nil { - m.Password = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Permission) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Permission: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType) - } - m.PermType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PermType |= (Permission_Type(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Role) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Role: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KeyPermission = append(m.KeyPermission, &Permission{}) - if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAuth(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthAuth - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipAuth(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } - -var fileDescriptorAuth = []byte{ - // 288 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, - 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78, - 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c, - 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d, - 0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d, - 0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd, - 0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51, - 0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef, - 0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00, - 0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc, - 0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70, - 0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41, - 0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc, - 0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b, - 0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1, - 0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee, - 0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4, - 0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto deleted file mode 100644 index 001d3343548..00000000000 --- a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; -package authpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_enum_prefix_all) = false; - -// User is a single entry in the bucket authUsers -message User { - bytes name = 1; - bytes password = 2; - repeated string roles = 3; -} - -// Permission is a single entity -message Permission { - enum Type { - READ = 0; - WRITE = 1; - READWRITE = 2; - } - Type permType = 1; - - bytes key = 2; - bytes range_end = 3; -} - -// Role is a single entry in the bucket authRoles -message Role { - bytes name = 1; - - repeated Permission keyPermission = 2; -} diff --git a/vendor/github.com/coreos/etcd/client/README.md b/vendor/github.com/coreos/etcd/client/README.md deleted file mode 100644 index 2be731ede0b..00000000000 --- a/vendor/github.com/coreos/etcd/client/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# etcd/client - -etcd/client is the Go client library for etcd. - -[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client) - -etcd uses `cmd/vendor` directory to store external dependencies, which are -to be compiled into etcd release binaries. `client` can be imported without -vendoring. For full compatibility, it is recommended to vendor builds using -etcd's vendored packages, using tools like godep, as in -[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). -For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). - -## Install - -```bash -go get github.com/coreos/etcd/client -``` - -## Usage - -```go -package main - -import ( - "log" - "time" - "context" - - "github.com/coreos/etcd/client" -) - -func main() { - cfg := client.Config{ - Endpoints: []string{"http://127.0.0.1:2379"}, - Transport: client.DefaultTransport, - // set timeout per request to fail fast when the target endpoint is unavailable - HeaderTimeoutPerRequest: time.Second, - } - c, err := client.New(cfg) - if err != nil { - log.Fatal(err) - } - kapi := client.NewKeysAPI(c) - // set "/foo" key with "bar" value - log.Print("Setting '/foo' key with 'bar' value") - resp, err := kapi.Set(context.Background(), "/foo", "bar", nil) - if err != nil { - log.Fatal(err) - } else { - // print common key info - log.Printf("Set is done. Metadata is %q\n", resp) - } - // get "/foo" key's value - log.Print("Getting '/foo' key value") - resp, err = kapi.Get(context.Background(), "/foo", nil) - if err != nil { - log.Fatal(err) - } else { - // print common key info - log.Printf("Get is done. Metadata is %q\n", resp) - // print value - log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value) - } -} -``` - -## Error Handling - -etcd client might return three types of errors. - -- context error - -Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered. - -- cluster error - -Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned. - -- response error - -If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error. - -Here is the example code to handle client errors: - -```go -cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}} -c, err := client.New(cfg) -if err != nil { - log.Fatal(err) -} - -kapi := client.NewKeysAPI(c) -resp, err := kapi.Set(ctx, "test", "bar", nil) -if err != nil { - if err == context.Canceled { - // ctx is canceled by another routine - } else if err == context.DeadlineExceeded { - // ctx is attached with a deadline and it exceeded - } else if cerr, ok := err.(*client.ClusterError); ok { - // process (cerr.Errors) - } else { - // bad cluster endpoints, which are not etcd servers - } -} -``` - - -## Caveat - -1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process. - -2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened. - -3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention. - -4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information. diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/github.com/coreos/etcd/client/auth_role.go deleted file mode 100644 index b6ba7e150dc..00000000000 --- a/vendor/github.com/coreos/etcd/client/auth_role.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "context" - "encoding/json" - "net/http" - "net/url" -) - -type Role struct { - Role string `json:"role"` - Permissions Permissions `json:"permissions"` - Grant *Permissions `json:"grant,omitempty"` - Revoke *Permissions `json:"revoke,omitempty"` -} - -type Permissions struct { - KV rwPermission `json:"kv"` -} - -type rwPermission struct { - Read []string `json:"read"` - Write []string `json:"write"` -} - -type PermissionType int - -const ( - ReadPermission PermissionType = iota - WritePermission - ReadWritePermission -) - -// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to -// interact with etcd's role creation and modification features. -func NewAuthRoleAPI(c Client) AuthRoleAPI { - return &httpAuthRoleAPI{ - client: c, - } -} - -type AuthRoleAPI interface { - // AddRole adds a role. - AddRole(ctx context.Context, role string) error - - // RemoveRole removes a role. - RemoveRole(ctx context.Context, role string) error - - // GetRole retrieves role details. - GetRole(ctx context.Context, role string) (*Role, error) - - // GrantRoleKV grants a role some permission prefixes for the KV store. - GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) - - // RevokeRoleKV revokes some permission prefixes for a role on the KV store. - RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) - - // ListRoles lists roles. - ListRoles(ctx context.Context) ([]string, error) -} - -type httpAuthRoleAPI struct { - client httpClient -} - -type authRoleAPIAction struct { - verb string - name string - role *Role -} - -type authRoleAPIList struct{} - -func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "roles", "") - req, _ := http.NewRequest("GET", u.String(), nil) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "roles", l.name) - if l.role == nil { - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req - } - b, err := json.Marshal(l.role) - if err != nil { - panic(err) - } - body := bytes.NewReader(b) - req, _ := http.NewRequest(l.verb, u.String(), body) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { - resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - var roleList struct { - Roles []Role `json:"roles"` - } - if err = json.Unmarshal(body, &roleList); err != nil { - return nil, err - } - ret := make([]string, 0, len(roleList.Roles)) - for _, r := range roleList.Roles { - ret = append(ret, r.Role) - } - return ret, nil -} - -func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { - role := &Role{ - Role: rolename, - } - return r.addRemoveRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { - return r.addRemoveRole(ctx, &authRoleAPIAction{ - verb: "DELETE", - name: rolename, - }) -} - -func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { - resp, body, err := r.client.Do(ctx, req) - if err != nil { - return err - } - if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err := json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { - return r.modRole(ctx, &authRoleAPIAction{ - verb: "GET", - name: rolename, - }) -} - -func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { - var out rwPermission - switch permType { - case ReadPermission: - out.Read = prefixes - case WritePermission: - out.Write = prefixes - case ReadWritePermission: - out.Read = prefixes - out.Write = prefixes - } - return out -} - -func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { - rwp := buildRWPermission(prefixes, permType) - role := &Role{ - Role: rolename, - Grant: &Permissions{ - KV: rwp, - }, - } - return r.modRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { - rwp := buildRWPermission(prefixes, permType) - role := &Role{ - Role: rolename, - Revoke: &Permissions{ - KV: rwp, - }, - } - return r.modRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { - resp, body, err := r.client.Do(ctx, req) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - var role Role - if err = json.Unmarshal(body, &role); err != nil { - return nil, err - } - return &role, nil -} diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/github.com/coreos/etcd/client/auth_user.go deleted file mode 100644 index 8e7e2efe833..00000000000 --- a/vendor/github.com/coreos/etcd/client/auth_user.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "context" - "encoding/json" - "net/http" - "net/url" - "path" -) - -var ( - defaultV2AuthPrefix = "/v2/auth" -) - -type User struct { - User string `json:"user"` - Password string `json:"password,omitempty"` - Roles []string `json:"roles"` - Grant []string `json:"grant,omitempty"` - Revoke []string `json:"revoke,omitempty"` -} - -// userListEntry is the user representation given by the server for ListUsers -type userListEntry struct { - User string `json:"user"` - Roles []Role `json:"roles"` -} - -type UserRoles struct { - User string `json:"user"` - Roles []Role `json:"roles"` -} - -func v2AuthURL(ep url.URL, action string, name string) *url.URL { - if name != "" { - ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) - return &ep - } - ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) - return &ep -} - -// NewAuthAPI constructs a new AuthAPI that uses HTTP to -// interact with etcd's general auth features. -func NewAuthAPI(c Client) AuthAPI { - return &httpAuthAPI{ - client: c, - } -} - -type AuthAPI interface { - // Enable auth. - Enable(ctx context.Context) error - - // Disable auth. - Disable(ctx context.Context) error -} - -type httpAuthAPI struct { - client httpClient -} - -func (s *httpAuthAPI) Enable(ctx context.Context) error { - return s.enableDisable(ctx, &authAPIAction{"PUT"}) -} - -func (s *httpAuthAPI) Disable(ctx context.Context) error { - return s.enableDisable(ctx, &authAPIAction{"DELETE"}) -} - -func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { - resp, body, err := s.client.Do(ctx, req) - if err != nil { - return err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -type authAPIAction struct { - verb string -} - -func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "enable", "") - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req -} - -type authError struct { - Message string `json:"message"` - Code int `json:"-"` -} - -func (e authError) Error() string { - return e.Message -} - -// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to -// interact with etcd's user creation and modification features. -func NewAuthUserAPI(c Client) AuthUserAPI { - return &httpAuthUserAPI{ - client: c, - } -} - -type AuthUserAPI interface { - // AddUser adds a user. - AddUser(ctx context.Context, username string, password string) error - - // RemoveUser removes a user. - RemoveUser(ctx context.Context, username string) error - - // GetUser retrieves user details. - GetUser(ctx context.Context, username string) (*User, error) - - // GrantUser grants a user some permission roles. - GrantUser(ctx context.Context, username string, roles []string) (*User, error) - - // RevokeUser revokes some permission roles from a user. - RevokeUser(ctx context.Context, username string, roles []string) (*User, error) - - // ChangePassword changes the user's password. - ChangePassword(ctx context.Context, username string, password string) (*User, error) - - // ListUsers lists the users. - ListUsers(ctx context.Context) ([]string, error) -} - -type httpAuthUserAPI struct { - client httpClient -} - -type authUserAPIAction struct { - verb string - username string - user *User -} - -type authUserAPIList struct{} - -func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "users", "") - req, _ := http.NewRequest("GET", u.String(), nil) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "users", l.username) - if l.user == nil { - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req - } - b, err := json.Marshal(l.user) - if err != nil { - panic(err) - } - body := bytes.NewReader(b) - req, _ := http.NewRequest(l.verb, u.String(), body) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { - resp, body, err := u.client.Do(ctx, &authUserAPIList{}) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - - var userList struct { - Users []userListEntry `json:"users"` - } - - if err = json.Unmarshal(body, &userList); err != nil { - return nil, err - } - - ret := make([]string, 0, len(userList.Users)) - for _, u := range userList.Users { - ret = append(ret, u.User) - } - return ret, nil -} - -func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { - user := &User{ - User: username, - Password: password, - } - return u.addRemoveUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { - return u.addRemoveUser(ctx, &authUserAPIAction{ - verb: "DELETE", - username: username, - }) -} - -func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { - resp, body, err := u.client.Do(ctx, req) - if err != nil { - return err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { - return u.modUser(ctx, &authUserAPIAction{ - verb: "GET", - username: username, - }) -} - -func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { - user := &User{ - User: username, - Grant: roles, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { - user := &User{ - User: username, - Revoke: roles, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { - user := &User{ - User: username, - Password: password, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { - resp, body, err := u.client.Do(ctx, req) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - var user User - if err = json.Unmarshal(body, &user); err != nil { - var userR UserRoles - if urerr := json.Unmarshal(body, &userR); urerr != nil { - return nil, err - } - user.User = userR.User - for _, r := range userR.Roles { - user.Roles = append(user.Roles, r.Role) - } - } - return &user, nil -} diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/github.com/coreos/etcd/client/cancelreq.go deleted file mode 100644 index 76d1f040198..00000000000 --- a/vendor/github.com/coreos/etcd/client/cancelreq.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// borrowed from golang/net/context/ctxhttp/cancelreq.go - -package client - -import "net/http" - -func requestCanceler(tr CancelableTransport, req *http.Request) func() { - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go deleted file mode 100644 index e6874505666..00000000000 --- a/vendor/github.com/coreos/etcd/client/client.go +++ /dev/null @@ -1,710 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "math/rand" - "net" - "net/http" - "net/url" - "sort" - "strconv" - "sync" - "time" - - "github.com/coreos/etcd/version" -) - -var ( - ErrNoEndpoints = errors.New("client: no endpoints available") - ErrTooManyRedirects = errors.New("client: too many redirects") - ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") - ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") - errTooManyRedirectChecks = errors.New("client: too many redirect checks") - - // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so - // that Do() will not retry a request - oneShotCtxValue interface{} -) - -var DefaultRequestTimeout = 5 * time.Second - -var DefaultTransport CancelableTransport = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, -} - -type EndpointSelectionMode int - -const ( - // EndpointSelectionRandom is the default value of the 'SelectionMode'. - // As the name implies, the client object will pick a node from the members - // of the cluster in a random fashion. If the cluster has three members, A, B, - // and C, the client picks any node from its three members as its request - // destination. - EndpointSelectionRandom EndpointSelectionMode = iota - - // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', - // requests are sent directly to the cluster leader. This reduces - // forwarding roundtrips compared to making requests to etcd followers - // who then forward them to the cluster leader. In the event of a leader - // failure, however, clients configured this way cannot prioritize among - // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' - // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to - // maintain its knowledge of current cluster state. - // - // This mode should be used with Client.AutoSync(). - EndpointSelectionPrioritizeLeader -) - -type Config struct { - // Endpoints defines a set of URLs (schemes, hosts and ports only) - // that can be used to communicate with a logical etcd cluster. For - // example, a three-node cluster could be provided like so: - // - // Endpoints: []string{ - // "http://node1.example.com:2379", - // "http://node2.example.com:2379", - // "http://node3.example.com:2379", - // } - // - // If multiple endpoints are provided, the Client will attempt to - // use them all in the event that one or more of them are unusable. - // - // If Client.Sync is ever called, the Client may cache an alternate - // set of endpoints to continue operation. - Endpoints []string - - // Transport is used by the Client to drive HTTP requests. If not - // provided, DefaultTransport will be used. - Transport CancelableTransport - - // CheckRedirect specifies the policy for handling HTTP redirects. - // If CheckRedirect is not nil, the Client calls it before - // following an HTTP redirect. The sole argument is the number of - // requests that have already been made. If CheckRedirect returns - // an error, Client.Do will not make any further requests and return - // the error back it to the caller. - // - // If CheckRedirect is nil, the Client uses its default policy, - // which is to stop after 10 consecutive requests. - CheckRedirect CheckRedirectFunc - - // Username specifies the user credential to add as an authorization header - Username string - - // Password is the password for the specified user to add as an authorization header - // to the request. - Password string - - // HeaderTimeoutPerRequest specifies the time limit to wait for response - // header in a single request made by the Client. The timeout includes - // connection time, any redirects, and header wait time. - // - // For non-watch GET request, server returns the response body immediately. - // For PUT/POST/DELETE request, server will attempt to commit request - // before responding, which is expected to take `100ms + 2 * RTT`. - // For watch request, server returns the header immediately to notify Client - // watch start. But if server is behind some kind of proxy, the response - // header may be cached at proxy, and Client cannot rely on this behavior. - // - // Especially, wait request will ignore this timeout. - // - // One API call may send multiple requests to different etcd servers until it - // succeeds. Use context of the API to specify the overall timeout. - // - // A HeaderTimeoutPerRequest of zero means no timeout. - HeaderTimeoutPerRequest time.Duration - - // SelectionMode is an EndpointSelectionMode enum that specifies the - // policy for choosing the etcd cluster node to which requests are sent. - SelectionMode EndpointSelectionMode -} - -func (cfg *Config) transport() CancelableTransport { - if cfg.Transport == nil { - return DefaultTransport - } - return cfg.Transport -} - -func (cfg *Config) checkRedirect() CheckRedirectFunc { - if cfg.CheckRedirect == nil { - return DefaultCheckRedirect - } - return cfg.CheckRedirect -} - -// CancelableTransport mimics net/http.Transport, but requires that -// the object also support request cancellation. -type CancelableTransport interface { - http.RoundTripper - CancelRequest(req *http.Request) -} - -type CheckRedirectFunc func(via int) error - -// DefaultCheckRedirect follows up to 10 redirects, but no more. -var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { - if via > 10 { - return ErrTooManyRedirects - } - return nil -} - -type Client interface { - // Sync updates the internal cache of the etcd cluster's membership. - Sync(context.Context) error - - // AutoSync periodically calls Sync() every given interval. - // The recommended sync interval is 10 seconds to 1 minute, which does - // not bring too much overhead to server and makes client catch up the - // cluster change in time. - // - // The example to use it: - // - // for { - // err := client.AutoSync(ctx, 10*time.Second) - // if err == context.DeadlineExceeded || err == context.Canceled { - // break - // } - // log.Print(err) - // } - AutoSync(context.Context, time.Duration) error - - // Endpoints returns a copy of the current set of API endpoints used - // by Client to resolve HTTP requests. If Sync has ever been called, - // this may differ from the initial Endpoints provided in the Config. - Endpoints() []string - - // SetEndpoints sets the set of API endpoints used by Client to resolve - // HTTP requests. If the given endpoints are not valid, an error will be - // returned - SetEndpoints(eps []string) error - - // GetVersion retrieves the current etcd server and cluster version - GetVersion(ctx context.Context) (*version.Versions, error) - - httpClient -} - -func New(cfg Config) (Client, error) { - c := &httpClusterClient{ - clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), - rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), - selectionMode: cfg.SelectionMode, - } - if cfg.Username != "" { - c.credentials = &credentials{ - username: cfg.Username, - password: cfg.Password, - } - } - if err := c.SetEndpoints(cfg.Endpoints); err != nil { - return nil, err - } - return c, nil -} - -type httpClient interface { - Do(context.Context, httpAction) (*http.Response, []byte, error) -} - -func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { - return func(ep url.URL) httpClient { - return &redirectFollowingHTTPClient{ - checkRedirect: cr, - client: &simpleHTTPClient{ - transport: tr, - endpoint: ep, - headerTimeout: headerTimeout, - }, - } - } -} - -type credentials struct { - username string - password string -} - -type httpClientFactory func(url.URL) httpClient - -type httpAction interface { - HTTPRequest(url.URL) *http.Request -} - -type httpClusterClient struct { - clientFactory httpClientFactory - endpoints []url.URL - pinned int - credentials *credentials - sync.RWMutex - rand *rand.Rand - selectionMode EndpointSelectionMode -} - -func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) { - ceps := make([]url.URL, len(eps)) - copy(ceps, eps) - - // To perform a lookup on the new endpoint list without using the current - // client, we'll copy it - clientCopy := &httpClusterClient{ - clientFactory: c.clientFactory, - credentials: c.credentials, - rand: c.rand, - - pinned: 0, - endpoints: ceps, - } - - mAPI := NewMembersAPI(clientCopy) - leader, err := mAPI.Leader(ctx) - if err != nil { - return "", err - } - if len(leader.ClientURLs) == 0 { - return "", ErrNoLeaderEndpoint - } - - return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? -} - -func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) { - if len(eps) == 0 { - return []url.URL{}, ErrNoEndpoints - } - - neps := make([]url.URL, len(eps)) - for i, ep := range eps { - u, err := url.Parse(ep) - if err != nil { - return []url.URL{}, err - } - neps[i] = *u - } - return neps, nil -} - -func (c *httpClusterClient) SetEndpoints(eps []string) error { - neps, err := c.parseEndpoints(eps) - if err != nil { - return err - } - - c.Lock() - defer c.Unlock() - - c.endpoints = shuffleEndpoints(c.rand, neps) - // We're not doing anything for PrioritizeLeader here. This is - // due to not having a context meaning we can't call getLeaderEndpoint - // However, if you're using PrioritizeLeader, you've already been told - // to regularly call sync, where we do have a ctx, and can figure the - // leader. PrioritizeLeader is also quite a loose guarantee, so deal - // with it - c.pinned = 0 - - return nil -} - -func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - action := act - c.RLock() - leps := len(c.endpoints) - eps := make([]url.URL, leps) - n := copy(eps, c.endpoints) - pinned := c.pinned - - if c.credentials != nil { - action = &authedAction{ - act: act, - credentials: *c.credentials, - } - } - c.RUnlock() - - if leps == 0 { - return nil, nil, ErrNoEndpoints - } - - if leps != n { - return nil, nil, errors.New("unable to pick endpoint: copy failed") - } - - var resp *http.Response - var body []byte - var err error - cerr := &ClusterError{} - isOneShot := ctx.Value(&oneShotCtxValue) != nil - - for i := pinned; i < leps+pinned; i++ { - k := i % leps - hc := c.clientFactory(eps[k]) - resp, body, err = hc.Do(ctx, action) - if err != nil { - cerr.Errors = append(cerr.Errors, err) - if err == ctx.Err() { - return nil, nil, ctx.Err() - } - if err == context.Canceled || err == context.DeadlineExceeded { - return nil, nil, err - } - } else if resp.StatusCode/100 == 5 { - switch resp.StatusCode { - case http.StatusInternalServerError, http.StatusServiceUnavailable: - // TODO: make sure this is a no leader response - cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) - default: - cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) - } - err = cerr.Errors[0] - } - if err != nil { - if !isOneShot { - continue - } - c.Lock() - c.pinned = (k + 1) % leps - c.Unlock() - return nil, nil, err - } - if k != pinned { - c.Lock() - c.pinned = k - c.Unlock() - } - return resp, body, nil - } - - return nil, nil, cerr -} - -func (c *httpClusterClient) Endpoints() []string { - c.RLock() - defer c.RUnlock() - - eps := make([]string, len(c.endpoints)) - for i, ep := range c.endpoints { - eps[i] = ep.String() - } - - return eps -} - -func (c *httpClusterClient) Sync(ctx context.Context) error { - mAPI := NewMembersAPI(c) - ms, err := mAPI.List(ctx) - if err != nil { - return err - } - - var eps []string - for _, m := range ms { - eps = append(eps, m.ClientURLs...) - } - - neps, err := c.parseEndpoints(eps) - if err != nil { - return err - } - - npin := 0 - - switch c.selectionMode { - case EndpointSelectionRandom: - c.RLock() - eq := endpointsEqual(c.endpoints, neps) - c.RUnlock() - - if eq { - return nil - } - // When items in the endpoint list changes, we choose a new pin - neps = shuffleEndpoints(c.rand, neps) - case EndpointSelectionPrioritizeLeader: - nle, err := c.getLeaderEndpoint(ctx, neps) - if err != nil { - return ErrNoLeaderEndpoint - } - - for i, n := range neps { - if n.String() == nle { - npin = i - break - } - } - default: - return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode) - } - - c.Lock() - defer c.Unlock() - c.endpoints = neps - c.pinned = npin - - return nil -} - -func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - err := c.Sync(ctx) - if err != nil { - return err - } - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - } - } -} - -func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { - act := &getAction{Prefix: "/version"} - - resp, body, err := c.Do(ctx, act) - if err != nil { - return nil, err - } - - switch resp.StatusCode { - case http.StatusOK: - if len(body) == 0 { - return nil, ErrEmptyBody - } - var vresp version.Versions - if err := json.Unmarshal(body, &vresp); err != nil { - return nil, ErrInvalidJSON - } - return &vresp, nil - default: - var etcdErr Error - if err := json.Unmarshal(body, &etcdErr); err != nil { - return nil, ErrInvalidJSON - } - return nil, etcdErr - } -} - -type roundTripResponse struct { - resp *http.Response - err error -} - -type simpleHTTPClient struct { - transport CancelableTransport - endpoint url.URL - headerTimeout time.Duration -} - -func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - req := act.HTTPRequest(c.endpoint) - - if err := printcURL(req); err != nil { - return nil, nil, err - } - - isWait := false - if req != nil && req.URL != nil { - ws := req.URL.Query().Get("wait") - if len(ws) != 0 { - var err error - isWait, err = strconv.ParseBool(ws) - if err != nil { - return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) - } - } - } - - var hctx context.Context - var hcancel context.CancelFunc - if !isWait && c.headerTimeout > 0 { - hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) - } else { - hctx, hcancel = context.WithCancel(ctx) - } - defer hcancel() - - reqcancel := requestCanceler(c.transport, req) - - rtchan := make(chan roundTripResponse, 1) - go func() { - resp, err := c.transport.RoundTrip(req) - rtchan <- roundTripResponse{resp: resp, err: err} - close(rtchan) - }() - - var resp *http.Response - var err error - - select { - case rtresp := <-rtchan: - resp, err = rtresp.resp, rtresp.err - case <-hctx.Done(): - // cancel and wait for request to actually exit before continuing - reqcancel() - rtresp := <-rtchan - resp = rtresp.resp - switch { - case ctx.Err() != nil: - err = ctx.Err() - case hctx.Err() != nil: - err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) - default: - panic("failed to get error from context") - } - } - - // always check for resp nil-ness to deal with possible - // race conditions between channels above - defer func() { - if resp != nil { - resp.Body.Close() - } - }() - - if err != nil { - return nil, nil, err - } - - var body []byte - done := make(chan struct{}) - go func() { - body, err = ioutil.ReadAll(resp.Body) - done <- struct{}{} - }() - - select { - case <-ctx.Done(): - resp.Body.Close() - <-done - return nil, nil, ctx.Err() - case <-done: - } - - return resp, body, err -} - -type authedAction struct { - act httpAction - credentials credentials -} - -func (a *authedAction) HTTPRequest(url url.URL) *http.Request { - r := a.act.HTTPRequest(url) - r.SetBasicAuth(a.credentials.username, a.credentials.password) - return r -} - -type redirectFollowingHTTPClient struct { - client httpClient - checkRedirect CheckRedirectFunc -} - -func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - next := act - for i := 0; i < 100; i++ { - if i > 0 { - if err := r.checkRedirect(i); err != nil { - return nil, nil, err - } - } - resp, body, err := r.client.Do(ctx, next) - if err != nil { - return nil, nil, err - } - if resp.StatusCode/100 == 3 { - hdr := resp.Header.Get("Location") - if hdr == "" { - return nil, nil, fmt.Errorf("Location header not set") - } - loc, err := url.Parse(hdr) - if err != nil { - return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) - } - next = &redirectedHTTPAction{ - action: act, - location: *loc, - } - continue - } - return resp, body, nil - } - - return nil, nil, errTooManyRedirectChecks -} - -type redirectedHTTPAction struct { - action httpAction - location url.URL -} - -func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { - orig := r.action.HTTPRequest(ep) - orig.URL = &r.location - return orig -} - -func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { - // copied from Go 1.9<= rand.Rand.Perm - n := len(eps) - p := make([]int, n) - for i := 0; i < n; i++ { - j := r.Intn(i + 1) - p[i] = p[j] - p[j] = i - } - neps := make([]url.URL, n) - for i, k := range p { - neps[i] = eps[k] - } - return neps -} - -func endpointsEqual(left, right []url.URL) bool { - if len(left) != len(right) { - return false - } - - sLeft := make([]string, len(left)) - sRight := make([]string, len(right)) - for i, l := range left { - sLeft[i] = l.String() - } - for i, r := range right { - sRight[i] = r.String() - } - - sort.Strings(sLeft) - sort.Strings(sRight) - for i := range sLeft { - if sLeft[i] != sRight[i] { - return false - } - } - return true -} diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/github.com/coreos/etcd/client/cluster_error.go deleted file mode 100644 index 34618cdbd9e..00000000000 --- a/vendor/github.com/coreos/etcd/client/cluster_error.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import "fmt" - -type ClusterError struct { - Errors []error -} - -func (ce *ClusterError) Error() string { - s := ErrClusterUnavailable.Error() - for i, e := range ce.Errors { - s += fmt.Sprintf("; error #%d: %s\n", i, e) - } - return s -} - -func (ce *ClusterError) Detail() string { - s := "" - for i, e := range ce.Errors { - s += fmt.Sprintf("error #%d: %s\n", i, e) - } - return s -} diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/github.com/coreos/etcd/client/curl.go deleted file mode 100644 index c8bc9fba20e..00000000000 --- a/vendor/github.com/coreos/etcd/client/curl.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "os" -) - -var ( - cURLDebug = false -) - -func EnablecURLDebug() { - cURLDebug = true -} - -func DisablecURLDebug() { - cURLDebug = false -} - -// printcURL prints the cURL equivalent request to stderr. -// It returns an error if the body of the request cannot -// be read. -// The caller MUST cancel the request if there is an error. -func printcURL(req *http.Request) error { - if !cURLDebug { - return nil - } - var ( - command string - b []byte - err error - ) - - if req.URL != nil { - command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) - } - - if req.Body != nil { - b, err = ioutil.ReadAll(req.Body) - if err != nil { - return err - } - command += fmt.Sprintf(" -d %q", string(b)) - } - - fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) - - // reset body - body := bytes.NewBuffer(b) - req.Body = ioutil.NopCloser(body) - - return nil -} diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go deleted file mode 100644 index 442e35fe543..00000000000 --- a/vendor/github.com/coreos/etcd/client/discover.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "github.com/coreos/etcd/pkg/srv" -) - -// Discoverer is an interface that wraps the Discover method. -type Discoverer interface { - // Discover looks up the etcd servers for the domain. - Discover(domain string) ([]string, error) -} - -type srvDiscover struct{} - -// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. -func NewSRVDiscover() Discoverer { - return &srvDiscover{} -} - -func (d *srvDiscover) Discover(domain string) ([]string, error) { - srvs, err := srv.GetClient("etcd-client", domain) - if err != nil { - return nil, err - } - return srvs.Endpoints, nil -} diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/github.com/coreos/etcd/client/doc.go deleted file mode 100644 index ad4eca4e163..00000000000 --- a/vendor/github.com/coreos/etcd/client/doc.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package client provides bindings for the etcd APIs. - -Create a Config and exchange it for a Client: - - import ( - "net/http" - "context" - - "github.com/coreos/etcd/client" - ) - - cfg := client.Config{ - Endpoints: []string{"http://127.0.0.1:2379"}, - Transport: DefaultTransport, - } - - c, err := client.New(cfg) - if err != nil { - // handle error - } - -Clients are safe for concurrent use by multiple goroutines. - -Create a KeysAPI using the Client, then use it to interact with etcd: - - kAPI := client.NewKeysAPI(c) - - // create a new key /foo with the value "bar" - _, err = kAPI.Create(context.Background(), "/foo", "bar") - if err != nil { - // handle error - } - - // delete the newly created key only if the value is still "bar" - _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) - if err != nil { - // handle error - } - -Use a custom context to set timeouts on your operations: - - import "time" - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - // set a new key, ignoring its previous state - _, err := kAPI.Set(ctx, "/ping", "pong", nil) - if err != nil { - if err == context.DeadlineExceeded { - // request took longer than 5s - } else { - // handle error - } - } - -*/ -package client diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go deleted file mode 100644 index 237fdbe8ffd..00000000000 --- a/vendor/github.com/coreos/etcd/client/keys.generated.go +++ /dev/null @@ -1,5218 +0,0 @@ -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package client - -import ( - "errors" - "fmt" - "reflect" - "runtime" - time "time" - - codec1978 "github.com/ugorji/go/codec" -) - -const ( - // ----- content types ---- - codecSelferC_UTF87612 = 1 - codecSelferC_RAW7612 = 0 - // ----- value types used ---- - codecSelferValueTypeArray7612 = 10 - codecSelferValueTypeMap7612 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey7612 = 2 - codecSelfer_containerMapValue7612 = 3 - codecSelfer_containerMapEnd7612 = 4 - codecSelfer_containerArrayElem7612 = 6 - codecSelfer_containerArrayEnd7612 = 7 -) - -var ( - codecSelferBitsize7612 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr7612 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer7612 struct{} - -func init() { - if codec1978.GenVersion != 8 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 8, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 time.Duration - _ = v0 - } -} - -func (x *Error) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(4) - } else { - r.WriteMapStart(4) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Code)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("errorCode")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Code)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Message)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("message")) - r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Message)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Cause)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("cause")) - r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Cause)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeUint(uint64(x.Index)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("index")) - r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeUint(uint64(x.Index)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *Error) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *Error) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "errorCode": - if r.TryDecodeAsNil() { - x.Code = 0 - } else { - yyv4 := &x.Code - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int)(yyv4)) = int(r.DecodeInt(codecSelferBitsize7612)) - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv6 := &x.Message - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "cause": - if r.TryDecodeAsNil() { - x.Cause = "" - } else { - yyv8 := &x.Cause - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "index": - if r.TryDecodeAsNil() { - x.Index = 0 - } else { - yyv10 := &x.Index - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *Error) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Code = 0 - } else { - yyv13 := &x.Code - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int)(yyv13)) = int(r.DecodeInt(codecSelferBitsize7612)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv15 := &x.Message - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Cause = "" - } else { - yyv17 := &x.Cause - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Index = 0 - } else { - yyv19 := &x.Index - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*uint64)(yyv19)) = uint64(r.DecodeUint(64)) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj12-1, "") - } - r.ReadArrayEnd() -} - -func (x PrevExistType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x)) - } -} - -func (x *PrevExistType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *WatcherOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(2) - } else { - r.WriteMapStart(2) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeUint(uint64(x.AfterIndex)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("AfterIndex")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeUint(uint64(x.AfterIndex)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Recursive")) - r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *WatcherOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *WatcherOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "AfterIndex": - if r.TryDecodeAsNil() { - x.AfterIndex = 0 - } else { - yyv4 := &x.AfterIndex - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*uint64)(yyv4)) = uint64(r.DecodeUint(64)) - } - } - case "Recursive": - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv6 := &x.Recursive - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*bool)(yyv6)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *WatcherOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.AfterIndex = 0 - } else { - yyv9 := &x.AfterIndex - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*uint64)(yyv9)) = uint64(r.DecodeUint(64)) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv11 := &x.Recursive - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(yyv11)) = r.DecodeBool() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj8-1, "") - } - r.ReadArrayEnd() -} - -func (x *CreateInOrderOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(1) - } else { - r.WriteMapStart(1) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { - } else { - r.EncodeInt(int64(x.TTL)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("TTL")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { - } else { - r.EncodeInt(int64(x.TTL)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *CreateInOrderOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *CreateInOrderOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "TTL": - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv4 := &x.TTL - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - *((*int64)(yyv4)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *CreateInOrderOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv7 := &x.TTL - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else { - *((*int64)(yyv7)) = int64(r.DecodeInt(64)) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj6-1, "") - } - r.ReadArrayEnd() -} - -func (x *SetOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(7) - } else { - r.WriteMapStart(7) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeUint(uint64(x.PrevIndex)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) - r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeUint(uint64(x.PrevIndex)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - x.PrevExist.CodecEncodeSelf(e) - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevExist")) - r.WriteMapElemValue() - x.PrevExist.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { - } else { - r.EncodeInt(int64(x.TTL)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("TTL")) - r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { - } else { - r.EncodeInt(int64(x.TTL)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(x.Refresh)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Refresh")) - r.WriteMapElemValue() - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeBool(bool(x.Refresh)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Dir")) - r.WriteMapElemValue() - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeBool(bool(x.NoValueOnSuccess)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess")) - r.WriteMapElemValue() - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeBool(bool(x.NoValueOnSuccess)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *SetOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *SetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "PrevValue": - if r.TryDecodeAsNil() { - x.PrevValue = "" - } else { - yyv4 := &x.PrevValue - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "PrevIndex": - if r.TryDecodeAsNil() { - x.PrevIndex = 0 - } else { - yyv6 := &x.PrevIndex - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) - } - } - case "PrevExist": - if r.TryDecodeAsNil() { - x.PrevExist = "" - } else { - yyv8 := &x.PrevExist - yyv8.CodecDecodeSelf(d) - } - case "TTL": - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv9 := &x.TTL - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else { - *((*int64)(yyv9)) = int64(r.DecodeInt(64)) - } - } - case "Refresh": - if r.TryDecodeAsNil() { - x.Refresh = false - } else { - yyv11 := &x.Refresh - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(yyv11)) = r.DecodeBool() - } - } - case "Dir": - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv13 := &x.Dir - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*bool)(yyv13)) = r.DecodeBool() - } - } - case "NoValueOnSuccess": - if r.TryDecodeAsNil() { - x.NoValueOnSuccess = false - } else { - yyv15 := &x.NoValueOnSuccess - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(yyv15)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *SetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj17 int - var yyb17 bool - var yyhl17 bool = l >= 0 - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.PrevValue = "" - } else { - yyv18 := &x.PrevValue - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*string)(yyv18)) = r.DecodeString() - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.PrevIndex = 0 - } else { - yyv20 := &x.PrevIndex - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - *((*uint64)(yyv20)) = uint64(r.DecodeUint(64)) - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.PrevExist = "" - } else { - yyv22 := &x.PrevExist - yyv22.CodecDecodeSelf(d) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv23 := &x.TTL - yym24 := z.DecBinary() - _ = yym24 - if false { - } else if z.HasExtensions() && z.DecExt(yyv23) { - } else { - *((*int64)(yyv23)) = int64(r.DecodeInt(64)) - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Refresh = false - } else { - yyv25 := &x.Refresh - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*bool)(yyv25)) = r.DecodeBool() - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv27 := &x.Dir - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*bool)(yyv27)) = r.DecodeBool() - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.NoValueOnSuccess = false - } else { - yyv29 := &x.NoValueOnSuccess - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*bool)(yyv29)) = r.DecodeBool() - } - } - for { - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj17-1, "") - } - r.ReadArrayEnd() -} - -func (x *GetOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(3) - } else { - r.WriteMapStart(3) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Recursive")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Sort)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Sort")) - r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Sort)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Quorum)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Quorum")) - r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Quorum)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *GetOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *GetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "Recursive": - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv4 := &x.Recursive - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*bool)(yyv4)) = r.DecodeBool() - } - } - case "Sort": - if r.TryDecodeAsNil() { - x.Sort = false - } else { - yyv6 := &x.Sort - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*bool)(yyv6)) = r.DecodeBool() - } - } - case "Quorum": - if r.TryDecodeAsNil() { - x.Quorum = false - } else { - yyv8 := &x.Quorum - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *GetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv11 := &x.Recursive - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(yyv11)) = r.DecodeBool() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Sort = false - } else { - yyv13 := &x.Sort - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*bool)(yyv13)) = r.DecodeBool() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Quorum = false - } else { - yyv15 := &x.Quorum - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(yyv15)) = r.DecodeBool() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj10-1, "") - } - r.ReadArrayEnd() -} - -func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(4) - } else { - r.WriteMapStart(4) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeUint(uint64(x.PrevIndex)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) - r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeUint(uint64(x.PrevIndex)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Recursive")) - r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Dir")) - r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "PrevValue": - if r.TryDecodeAsNil() { - x.PrevValue = "" - } else { - yyv4 := &x.PrevValue - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "PrevIndex": - if r.TryDecodeAsNil() { - x.PrevIndex = 0 - } else { - yyv6 := &x.PrevIndex - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) - } - } - case "Recursive": - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv8 := &x.Recursive - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } - } - case "Dir": - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv10 := &x.Dir - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(yyv10)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.PrevValue = "" - } else { - yyv13 := &x.PrevValue - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.PrevIndex = 0 - } else { - yyv15 := &x.PrevIndex - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*uint64)(yyv15)) = uint64(r.DecodeUint(64)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv17 := &x.Recursive - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*bool)(yyv17)) = r.DecodeBool() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv19 := &x.Dir - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(yyv19)) = r.DecodeBool() - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj12-1, "") - } - r.ReadArrayEnd() -} - -func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(3) - } else { - r.WriteMapStart(3) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Action)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("action")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Action)) - } - } - var yyn6 bool - if x.Node == nil { - yyn6 = true - goto LABEL6 - } - LABEL6: - if yyr2 || yy2arr2 { - if yyn6 { - r.WriteArrayElem() - r.EncodeNil() - } else { - r.WriteArrayElem() - if x.Node == nil { - r.EncodeNil() - } else { - x.Node.CodecEncodeSelf(e) - } - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("node")) - r.WriteMapElemValue() - if yyn6 { - r.EncodeNil() - } else { - if x.Node == nil { - r.EncodeNil() - } else { - x.Node.CodecEncodeSelf(e) - } - } - } - var yyn9 bool - if x.PrevNode == nil { - yyn9 = true - goto LABEL9 - } - LABEL9: - if yyr2 || yy2arr2 { - if yyn9 { - r.WriteArrayElem() - r.EncodeNil() - } else { - r.WriteArrayElem() - if x.PrevNode == nil { - r.EncodeNil() - } else { - x.PrevNode.CodecEncodeSelf(e) - } - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("prevNode")) - r.WriteMapElemValue() - if yyn9 { - r.EncodeNil() - } else { - if x.PrevNode == nil { - r.EncodeNil() - } else { - x.PrevNode.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "action": - if r.TryDecodeAsNil() { - x.Action = "" - } else { - yyv4 := &x.Action - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "node": - if x.Node == nil { - x.Node = new(Node) - } - if r.TryDecodeAsNil() { - if x.Node != nil { - x.Node = nil - } - } else { - if x.Node == nil { - x.Node = new(Node) - } - x.Node.CodecDecodeSelf(d) - } - case "prevNode": - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - if r.TryDecodeAsNil() { - if x.PrevNode != nil { - x.PrevNode = nil - } - } else { - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - x.PrevNode.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Action = "" - } else { - yyv9 := &x.Action - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - if x.Node == nil { - x.Node = new(Node) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - if x.Node != nil { - x.Node = nil - } - } else { - if x.Node == nil { - x.Node = new(Node) - } - x.Node.CodecDecodeSelf(d) - } - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - if x.PrevNode != nil { - x.PrevNode = nil - } - } else { - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - x.PrevNode.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj8-1, "") - } - r.ReadArrayEnd() -} - -func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _ = yyq2 - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Dir != false - yyq2[6] = x.Expiration != nil - yyq2[7] = x.TTL != 0 - if yyr2 || yy2arr2 { - r.WriteArrayStart(8) - } else { - var yynn2 = 5 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.WriteMapStart(yynn2) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("key")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("dir")) - r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Value)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("value")) - r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Value)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - if x.Nodes == nil { - r.EncodeNil() - } else { - x.Nodes.CodecEncodeSelf(e) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("nodes")) - r.WriteMapElemValue() - if x.Nodes == nil { - r.EncodeNil() - } else { - x.Nodes.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeUint(uint64(x.CreatedIndex)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("createdIndex")) - r.WriteMapElemValue() - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeUint(uint64(x.CreatedIndex)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeUint(uint64(x.ModifiedIndex)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("modifiedIndex")) - r.WriteMapElemValue() - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeUint(uint64(x.ModifiedIndex)) - } - } - var yyn21 bool - if x.Expiration == nil { - yyn21 = true - goto LABEL21 - } - LABEL21: - if yyr2 || yy2arr2 { - if yyn21 { - r.WriteArrayElem() - r.EncodeNil() - } else { - r.WriteArrayElem() - if yyq2[6] { - if x.Expiration == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 { - r.EncodeBuiltin(yym23, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym22 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym22 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[6] { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("expiration")) - r.WriteMapElemValue() - if yyn21 { - r.EncodeNil() - } else { - if x.Expiration == nil { - r.EncodeNil() - } else { - yym24 := z.EncBinary() - _ = yym24 - if false { - } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 { - r.EncodeBuiltin(yym25, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym24 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym24 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - if yyq2[7] { - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeInt(int64(x.TTL)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[7] { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("ttl")) - r.WriteMapElemValue() - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeInt(int64(x.TTL)) - } - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv4 := &x.Key - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "dir": - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv6 := &x.Dir - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*bool)(yyv6)) = r.DecodeBool() - } - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv8 := &x.Value - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "nodes": - if r.TryDecodeAsNil() { - x.Nodes = nil - } else { - yyv10 := &x.Nodes - yyv10.CodecDecodeSelf(d) - } - case "createdIndex": - if r.TryDecodeAsNil() { - x.CreatedIndex = 0 - } else { - yyv11 := &x.CreatedIndex - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*uint64)(yyv11)) = uint64(r.DecodeUint(64)) - } - } - case "modifiedIndex": - if r.TryDecodeAsNil() { - x.ModifiedIndex = 0 - } else { - yyv13 := &x.ModifiedIndex - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) - } - } - case "expiration": - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - if r.TryDecodeAsNil() { - if x.Expiration != nil { - x.Expiration = nil - } - } else { - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if yym17 := z.TimeRtidIfBinc(); yym17 != 0 { - r.DecodeBuiltin(yym17, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym16 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) - } else { - z.DecFallback(x.Expiration, false) - } - } - case "ttl": - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv18 := &x.TTL - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*int64)(yyv18)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj20 int - var yyb20 bool - var yyhl20 bool = l >= 0 - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv21 := &x.Key - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv23 := &x.Dir - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*bool)(yyv23)) = r.DecodeBool() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv25 := &x.Value - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*string)(yyv25)) = r.DecodeString() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Nodes = nil - } else { - yyv27 := &x.Nodes - yyv27.CodecDecodeSelf(d) - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.CreatedIndex = 0 - } else { - yyv28 := &x.CreatedIndex - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - *((*uint64)(yyv28)) = uint64(r.DecodeUint(64)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.ModifiedIndex = 0 - } else { - yyv30 := &x.ModifiedIndex - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - *((*uint64)(yyv30)) = uint64(r.DecodeUint(64)) - } - } - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - if x.Expiration != nil { - x.Expiration = nil - } - } else { - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yym33 := z.DecBinary() - _ = yym33 - if false { - } else if yym34 := z.TimeRtidIfBinc(); yym34 != 0 { - r.DecodeBuiltin(yym34, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym33 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym33 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) - } else { - z.DecFallback(x.Expiration, false) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv35 := &x.TTL - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*int64)(yyv35)) = int64(r.DecodeInt(64)) - } - } - for { - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj20-1, "") - } - r.ReadArrayEnd() -} - -func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encNodes((Nodes)(x), e) - } - } -} - -func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decNodes((*Nodes)(x), d) - } -} - -func (x *httpKeysAPI) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(0) - } else { - r.WriteMapStart(0) - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *httpKeysAPI) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *httpKeysAPI) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *httpKeysAPI) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4 int - var yyb4 bool - var yyhl4 bool = l >= 0 - for { - yyj4++ - if yyhl4 { - yyb4 = yyj4 > l - } else { - yyb4 = r.CheckBreak() - } - if yyb4 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj4-1, "") - } - r.ReadArrayEnd() -} - -func (x *httpWatcher) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(0) - } else { - r.WriteMapStart(0) - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *httpWatcher) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *httpWatcher) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *httpWatcher) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj4 int - var yyb4 bool - var yyhl4 bool = l >= 0 - for { - yyj4++ - if yyhl4 { - yyb4 = yyj4 > l - } else { - yyb4 = r.CheckBreak() - } - if yyb4 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj4-1, "") - } - r.ReadArrayEnd() -} - -func (x *getAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(5) - } else { - r.WriteMapStart(5) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Prefix")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Key")) - r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Recursive")) - r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.Sorted)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Sorted")) - r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.Sorted)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(x.Quorum)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Quorum")) - r.WriteMapElemValue() - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeBool(bool(x.Quorum)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *getAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *getAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "Prefix": - if r.TryDecodeAsNil() { - x.Prefix = "" - } else { - yyv4 := &x.Prefix - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "Key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv6 := &x.Key - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "Recursive": - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv8 := &x.Recursive - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } - } - case "Sorted": - if r.TryDecodeAsNil() { - x.Sorted = false - } else { - yyv10 := &x.Sorted - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(yyv10)) = r.DecodeBool() - } - } - case "Quorum": - if r.TryDecodeAsNil() { - x.Quorum = false - } else { - yyv12 := &x.Quorum - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*bool)(yyv12)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *getAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Prefix = "" - } else { - yyv15 := &x.Prefix - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv17 := &x.Key - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv19 := &x.Recursive - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(yyv19)) = r.DecodeBool() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Sorted = false - } else { - yyv21 := &x.Sorted - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*bool)(yyv21)) = r.DecodeBool() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Quorum = false - } else { - yyv23 := &x.Quorum - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*bool)(yyv23)) = r.DecodeBool() - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj14-1, "") - } - r.ReadArrayEnd() -} - -func (x *waitAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(4) - } else { - r.WriteMapStart(4) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Prefix")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Key")) - r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeUint(uint64(x.WaitIndex)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("WaitIndex")) - r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeUint(uint64(x.WaitIndex)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Recursive")) - r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *waitAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *waitAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "Prefix": - if r.TryDecodeAsNil() { - x.Prefix = "" - } else { - yyv4 := &x.Prefix - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "Key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv6 := &x.Key - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "WaitIndex": - if r.TryDecodeAsNil() { - x.WaitIndex = 0 - } else { - yyv8 := &x.WaitIndex - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*uint64)(yyv8)) = uint64(r.DecodeUint(64)) - } - } - case "Recursive": - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv10 := &x.Recursive - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(yyv10)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *waitAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Prefix = "" - } else { - yyv13 := &x.Prefix - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv15 := &x.Key - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.WaitIndex = 0 - } else { - yyv17 := &x.WaitIndex - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*uint64)(yyv17)) = uint64(r.DecodeUint(64)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv19 := &x.Recursive - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(yyv19)) = r.DecodeBool() - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj12-1, "") - } - r.ReadArrayEnd() -} - -func (x *setAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(10) - } else { - r.WriteMapStart(10) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Prefix")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Key")) - r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Value)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Value")) - r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Value)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) - r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeUint(uint64(x.PrevIndex)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) - r.WriteMapElemValue() - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeUint(uint64(x.PrevIndex)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - x.PrevExist.CodecEncodeSelf(e) - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevExist")) - r.WriteMapElemValue() - x.PrevExist.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { - } else { - r.EncodeInt(int64(x.TTL)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("TTL")) - r.WriteMapElemValue() - yym23 := z.EncBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { - } else { - r.EncodeInt(int64(x.TTL)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(x.Refresh)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Refresh")) - r.WriteMapElemValue() - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeBool(bool(x.Refresh)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Dir")) - r.WriteMapElemValue() - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeBool(bool(x.NoValueOnSuccess)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess")) - r.WriteMapElemValue() - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeBool(bool(x.NoValueOnSuccess)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *setAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *setAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "Prefix": - if r.TryDecodeAsNil() { - x.Prefix = "" - } else { - yyv4 := &x.Prefix - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "Key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv6 := &x.Key - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "Value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv8 := &x.Value - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "PrevValue": - if r.TryDecodeAsNil() { - x.PrevValue = "" - } else { - yyv10 := &x.PrevValue - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "PrevIndex": - if r.TryDecodeAsNil() { - x.PrevIndex = 0 - } else { - yyv12 := &x.PrevIndex - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*uint64)(yyv12)) = uint64(r.DecodeUint(64)) - } - } - case "PrevExist": - if r.TryDecodeAsNil() { - x.PrevExist = "" - } else { - yyv14 := &x.PrevExist - yyv14.CodecDecodeSelf(d) - } - case "TTL": - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv15 := &x.TTL - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else { - *((*int64)(yyv15)) = int64(r.DecodeInt(64)) - } - } - case "Refresh": - if r.TryDecodeAsNil() { - x.Refresh = false - } else { - yyv17 := &x.Refresh - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*bool)(yyv17)) = r.DecodeBool() - } - } - case "Dir": - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv19 := &x.Dir - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(yyv19)) = r.DecodeBool() - } - } - case "NoValueOnSuccess": - if r.TryDecodeAsNil() { - x.NoValueOnSuccess = false - } else { - yyv21 := &x.NoValueOnSuccess - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*bool)(yyv21)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj23 int - var yyb23 bool - var yyhl23 bool = l >= 0 - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Prefix = "" - } else { - yyv24 := &x.Prefix - yym25 := z.DecBinary() - _ = yym25 - if false { - } else { - *((*string)(yyv24)) = r.DecodeString() - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv26 := &x.Key - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - *((*string)(yyv26)) = r.DecodeString() - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv28 := &x.Value - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - *((*string)(yyv28)) = r.DecodeString() - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.PrevValue = "" - } else { - yyv30 := &x.PrevValue - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - *((*string)(yyv30)) = r.DecodeString() - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.PrevIndex = 0 - } else { - yyv32 := &x.PrevIndex - yym33 := z.DecBinary() - _ = yym33 - if false { - } else { - *((*uint64)(yyv32)) = uint64(r.DecodeUint(64)) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.PrevExist = "" - } else { - yyv34 := &x.PrevExist - yyv34.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv35 := &x.TTL - yym36 := z.DecBinary() - _ = yym36 - if false { - } else if z.HasExtensions() && z.DecExt(yyv35) { - } else { - *((*int64)(yyv35)) = int64(r.DecodeInt(64)) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Refresh = false - } else { - yyv37 := &x.Refresh - yym38 := z.DecBinary() - _ = yym38 - if false { - } else { - *((*bool)(yyv37)) = r.DecodeBool() - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv39 := &x.Dir - yym40 := z.DecBinary() - _ = yym40 - if false { - } else { - *((*bool)(yyv39)) = r.DecodeBool() - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.NoValueOnSuccess = false - } else { - yyv41 := &x.NoValueOnSuccess - yym42 := z.DecBinary() - _ = yym42 - if false { - } else { - *((*bool)(yyv41)) = r.DecodeBool() - } - } - for { - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj23-1, "") - } - r.ReadArrayEnd() -} - -func (x *deleteAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(6) - } else { - r.WriteMapStart(6) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Prefix")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Key")) - r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) - r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeUint(uint64(x.PrevIndex)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) - r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeUint(uint64(x.PrevIndex)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Dir")) - r.WriteMapElemValue() - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Recursive")) - r.WriteMapElemValue() - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *deleteAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *deleteAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "Prefix": - if r.TryDecodeAsNil() { - x.Prefix = "" - } else { - yyv4 := &x.Prefix - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "Key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv6 := &x.Key - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "PrevValue": - if r.TryDecodeAsNil() { - x.PrevValue = "" - } else { - yyv8 := &x.PrevValue - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "PrevIndex": - if r.TryDecodeAsNil() { - x.PrevIndex = 0 - } else { - yyv10 := &x.PrevIndex - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) - } - } - case "Dir": - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv12 := &x.Dir - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*bool)(yyv12)) = r.DecodeBool() - } - } - case "Recursive": - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv14 := &x.Recursive - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*bool)(yyv14)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Prefix = "" - } else { - yyv17 := &x.Prefix - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv19 := &x.Key - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.PrevValue = "" - } else { - yyv21 := &x.PrevValue - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.PrevIndex = 0 - } else { - yyv23 := &x.PrevIndex - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*uint64)(yyv23)) = uint64(r.DecodeUint(64)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv25 := &x.Dir - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*bool)(yyv25)) = r.DecodeBool() - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv27 := &x.Recursive - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*bool)(yyv27)) = r.DecodeBool() - } - } - for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj16-1, "") - } - r.ReadArrayEnd() -} - -func (x *createInOrderAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(4) - } else { - r.WriteMapStart(4) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Prefix")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Dir)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Dir")) - r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Dir)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Value)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Value")) - r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Value)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { - } else { - r.EncodeInt(int64(x.TTL)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("TTL")) - r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { - } else { - r.EncodeInt(int64(x.TTL)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *createInOrderAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *createInOrderAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "Prefix": - if r.TryDecodeAsNil() { - x.Prefix = "" - } else { - yyv4 := &x.Prefix - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "Dir": - if r.TryDecodeAsNil() { - x.Dir = "" - } else { - yyv6 := &x.Dir - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "Value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv8 := &x.Value - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "TTL": - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv10 := &x.TTL - yym11 := z.DecBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.DecExt(yyv10) { - } else { - *((*int64)(yyv10)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *createInOrderAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Prefix = "" - } else { - yyv13 := &x.Prefix - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Dir = "" - } else { - yyv15 := &x.Dir - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv17 := &x.Value - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv19 := &x.TTL - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { - } else { - *((*int64)(yyv19)) = int64(r.DecodeInt(64)) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj12-1, "") - } - r.ReadArrayEnd() -} - -func (x codecSelfer7612) encNodes(v Nodes, e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.WriteArrayStart(len(v)) - for _, yyv1 := range v { - r.WriteArrayElem() - if yyv1 == nil { - r.EncodeNil() - } else { - yyv1.CodecEncodeSelf(e) - } - } - r.WriteArrayEnd() -} - -func (x codecSelfer7612) decNodes(v *Nodes, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []*Node{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else { - yyhl1 := yyl1 > 0 - var yyrl1 int - _ = yyrl1 - if yyhl1 { - if yyl1 > cap(yyv1) { - yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]*Node, yyrl1) - } - yyc1 = true - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - } - var yyj1 int - // var yydn1 bool - for ; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || r.CheckBreak()); yyj1++ { - if yyj1 == 0 && len(yyv1) == 0 { - if yyhl1 { - yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - } else { - yyrl1 = 8 - } - yyv1 = make([]*Node, yyrl1) - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - // yydn1 = r.TryDecodeAsNil() - - // if indefinite, etc, then expand the slice if necessary - var yydb1 bool - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, nil) - yyc1 = true - - } - if yydb1 { - z.DecSwallow() - } else { - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw2 := yyv1[yyj1] - yyw2.CodecDecodeSelf(d) - } - - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = make([]*Node, 0) - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } - -} diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go deleted file mode 100644 index e8373b94507..00000000000 --- a/vendor/github.com/coreos/etcd/client/keys.go +++ /dev/null @@ -1,682 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/coreos/etcd/pkg/pathutil" - "github.com/ugorji/go/codec" -) - -const ( - ErrorCodeKeyNotFound = 100 - ErrorCodeTestFailed = 101 - ErrorCodeNotFile = 102 - ErrorCodeNotDir = 104 - ErrorCodeNodeExist = 105 - ErrorCodeRootROnly = 107 - ErrorCodeDirNotEmpty = 108 - ErrorCodeUnauthorized = 110 - - ErrorCodePrevValueRequired = 201 - ErrorCodeTTLNaN = 202 - ErrorCodeIndexNaN = 203 - ErrorCodeInvalidField = 209 - ErrorCodeInvalidForm = 210 - - ErrorCodeRaftInternal = 300 - ErrorCodeLeaderElect = 301 - - ErrorCodeWatcherCleared = 400 - ErrorCodeEventIndexCleared = 401 -) - -type Error struct { - Code int `json:"errorCode"` - Message string `json:"message"` - Cause string `json:"cause"` - Index uint64 `json:"index"` -} - -func (e Error) Error() string { - return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index) -} - -var ( - ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") - ErrEmptyBody = errors.New("client: response body is empty") -) - -// PrevExistType is used to define an existence condition when setting -// or deleting Nodes. -type PrevExistType string - -const ( - PrevIgnore = PrevExistType("") - PrevExist = PrevExistType("true") - PrevNoExist = PrevExistType("false") -) - -var ( - defaultV2KeysPrefix = "/v2/keys" -) - -// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value -// API over HTTP. -func NewKeysAPI(c Client) KeysAPI { - return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix) -} - -// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller -// to provide a custom base URL path. This should only be used in -// very rare cases. -func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { - return &httpKeysAPI{ - client: c, - prefix: p, - } -} - -type KeysAPI interface { - // Get retrieves a set of Nodes from etcd - Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) - - // Set assigns a new value to a Node identified by a given key. The caller - // may define a set of conditions in the SetOptions. If SetOptions.Dir=true - // then value is ignored. - Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error) - - // Delete removes a Node identified by the given key, optionally destroying - // all of its children as well. The caller may define a set of required - // conditions in an DeleteOptions object. - Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) - - // Create is an alias for Set w/ PrevExist=false - Create(ctx context.Context, key, value string) (*Response, error) - - // CreateInOrder is used to atomically create in-order keys within the given directory. - CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error) - - // Update is an alias for Set w/ PrevExist=true - Update(ctx context.Context, key, value string) (*Response, error) - - // Watcher builds a new Watcher targeted at a specific Node identified - // by the given key. The Watcher may be configured at creation time - // through a WatcherOptions object. The returned Watcher is designed - // to emit events that happen to a Node, and optionally to its children. - Watcher(key string, opts *WatcherOptions) Watcher -} - -type WatcherOptions struct { - // AfterIndex defines the index after-which the Watcher should - // start emitting events. For example, if a value of 5 is - // provided, the first event will have an index >= 6. - // - // Setting AfterIndex to 0 (default) means that the Watcher - // should start watching for events starting at the current - // index, whatever that may be. - AfterIndex uint64 - - // Recursive specifies whether or not the Watcher should emit - // events that occur in children of the given keyspace. If set - // to false (default), events will be limited to those that - // occur for the exact key. - Recursive bool -} - -type CreateInOrderOptions struct { - // TTL defines a period of time after-which the Node should - // expire and no longer exist. Values <= 0 are ignored. Given - // that the zero-value is ignored, TTL cannot be used to set - // a TTL of 0. - TTL time.Duration -} - -type SetOptions struct { - // PrevValue specifies what the current value of the Node must - // be in order for the Set operation to succeed. - // - // Leaving this field empty means that the caller wishes to - // ignore the current value of the Node. This cannot be used - // to compare the Node's current value to an empty string. - // - // PrevValue is ignored if Dir=true - PrevValue string - - // PrevIndex indicates what the current ModifiedIndex of the - // Node must be in order for the Set operation to succeed. - // - // If PrevIndex is set to 0 (default), no comparison is made. - PrevIndex uint64 - - // PrevExist specifies whether the Node must currently exist - // (PrevExist) or not (PrevNoExist). If the caller does not - // care about existence, set PrevExist to PrevIgnore, or simply - // leave it unset. - PrevExist PrevExistType - - // TTL defines a period of time after-which the Node should - // expire and no longer exist. Values <= 0 are ignored. Given - // that the zero-value is ignored, TTL cannot be used to set - // a TTL of 0. - TTL time.Duration - - // Refresh set to true means a TTL value can be updated - // without firing a watch or changing the node value. A - // value must not be provided when refreshing a key. - Refresh bool - - // Dir specifies whether or not this Node should be created as a directory. - Dir bool - - // NoValueOnSuccess specifies whether the response contains the current value of the Node. - // If set, the response will only contain the current value when the request fails. - NoValueOnSuccess bool -} - -type GetOptions struct { - // Recursive defines whether or not all children of the Node - // should be returned. - Recursive bool - - // Sort instructs the server whether or not to sort the Nodes. - // If true, the Nodes are sorted alphabetically by key in - // ascending order (A to z). If false (default), the Nodes will - // not be sorted and the ordering used should not be considered - // predictable. - Sort bool - - // Quorum specifies whether it gets the latest committed value that - // has been applied in quorum of members, which ensures external - // consistency (or linearizability). - Quorum bool -} - -type DeleteOptions struct { - // PrevValue specifies what the current value of the Node must - // be in order for the Delete operation to succeed. - // - // Leaving this field empty means that the caller wishes to - // ignore the current value of the Node. This cannot be used - // to compare the Node's current value to an empty string. - PrevValue string - - // PrevIndex indicates what the current ModifiedIndex of the - // Node must be in order for the Delete operation to succeed. - // - // If PrevIndex is set to 0 (default), no comparison is made. - PrevIndex uint64 - - // Recursive defines whether or not all children of the Node - // should be deleted. If set to true, all children of the Node - // identified by the given key will be deleted. If left unset - // or explicitly set to false, only a single Node will be - // deleted. - Recursive bool - - // Dir specifies whether or not this Node should be removed as a directory. - Dir bool -} - -type Watcher interface { - // Next blocks until an etcd event occurs, then returns a Response - // representing that event. The behavior of Next depends on the - // WatcherOptions used to construct the Watcher. Next is designed to - // be called repeatedly, each time blocking until a subsequent event - // is available. - // - // If the provided context is cancelled, Next will return a non-nil - // error. Any other failures encountered while waiting for the next - // event (connection issues, deserialization failures, etc) will - // also result in a non-nil error. - Next(context.Context) (*Response, error) -} - -type Response struct { - // Action is the name of the operation that occurred. Possible values - // include get, set, delete, update, create, compareAndSwap, - // compareAndDelete and expire. - Action string `json:"action"` - - // Node represents the state of the relevant etcd Node. - Node *Node `json:"node"` - - // PrevNode represents the previous state of the Node. PrevNode is non-nil - // only if the Node existed before the action occurred and the action - // caused a change to the Node. - PrevNode *Node `json:"prevNode"` - - // Index holds the cluster-level index at the time the Response was generated. - // This index is not tied to the Node(s) contained in this Response. - Index uint64 `json:"-"` - - // ClusterID holds the cluster-level ID reported by the server. This - // should be different for different etcd clusters. - ClusterID string `json:"-"` -} - -type Node struct { - // Key represents the unique location of this Node (e.g. "/foo/bar"). - Key string `json:"key"` - - // Dir reports whether node describes a directory. - Dir bool `json:"dir,omitempty"` - - // Value is the current data stored on this Node. If this Node - // is a directory, Value will be empty. - Value string `json:"value"` - - // Nodes holds the children of this Node, only if this Node is a directory. - // This slice of will be arbitrarily deep (children, grandchildren, great- - // grandchildren, etc.) if a recursive Get or Watch request were made. - Nodes Nodes `json:"nodes"` - - // CreatedIndex is the etcd index at-which this Node was created. - CreatedIndex uint64 `json:"createdIndex"` - - // ModifiedIndex is the etcd index at-which this Node was last modified. - ModifiedIndex uint64 `json:"modifiedIndex"` - - // Expiration is the server side expiration time of the key. - Expiration *time.Time `json:"expiration,omitempty"` - - // TTL is the time to live of the key in second. - TTL int64 `json:"ttl,omitempty"` -} - -func (n *Node) String() string { - return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL) -} - -// TTLDuration returns the Node's TTL as a time.Duration object -func (n *Node) TTLDuration() time.Duration { - return time.Duration(n.TTL) * time.Second -} - -type Nodes []*Node - -// interfaces for sorting - -func (ns Nodes) Len() int { return len(ns) } -func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key } -func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } - -type httpKeysAPI struct { - client httpClient - prefix string -} - -func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) { - act := &setAction{ - Prefix: k.prefix, - Key: key, - Value: val, - } - - if opts != nil { - act.PrevValue = opts.PrevValue - act.PrevIndex = opts.PrevIndex - act.PrevExist = opts.PrevExist - act.TTL = opts.TTL - act.Refresh = opts.Refresh - act.Dir = opts.Dir - act.NoValueOnSuccess = opts.NoValueOnSuccess - } - - doCtx := ctx - if act.PrevExist == PrevNoExist { - doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue) - } - resp, body, err := k.client.Do(doCtx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) { - return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist}) -} - -func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) { - act := &createInOrderAction{ - Prefix: k.prefix, - Dir: dir, - Value: val, - } - - if opts != nil { - act.TTL = opts.TTL - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) { - return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist}) -} - -func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) { - act := &deleteAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.PrevValue = opts.PrevValue - act.PrevIndex = opts.PrevIndex - act.Dir = opts.Dir - act.Recursive = opts.Recursive - } - - doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue) - resp, body, err := k.client.Do(doCtx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) { - act := &getAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.Recursive = opts.Recursive - act.Sorted = opts.Sort - act.Quorum = opts.Quorum - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher { - act := waitAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.Recursive = opts.Recursive - if opts.AfterIndex > 0 { - act.WaitIndex = opts.AfterIndex + 1 - } - } - - return &httpWatcher{ - client: k.client, - nextWait: act, - } -} - -type httpWatcher struct { - client httpClient - nextWait waitAction -} - -func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { - for { - httpresp, body, err := hw.client.Do(ctx, &hw.nextWait) - if err != nil { - return nil, err - } - - resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body) - if err != nil { - if err == ErrEmptyBody { - continue - } - return nil, err - } - - hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1 - return resp, nil - } -} - -// v2KeysURL forms a URL representing the location of a key. -// The endpoint argument represents the base URL of an etcd -// server. The prefix is the path needed to route from the -// provided endpoint's path to the root of the keys API -// (typically "/v2/keys"). -func v2KeysURL(ep url.URL, prefix, key string) *url.URL { - // We concatenate all parts together manually. We cannot use - // path.Join because it does not reserve trailing slash. - // We call CanonicalURLPath to further cleanup the path. - if prefix != "" && prefix[0] != '/' { - prefix = "/" + prefix - } - if key != "" && key[0] != '/' { - key = "/" + key - } - ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) - return &ep -} - -type getAction struct { - Prefix string - Key string - Recursive bool - Sorted bool - Quorum bool -} - -func (g *getAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, g.Prefix, g.Key) - - params := u.Query() - params.Set("recursive", strconv.FormatBool(g.Recursive)) - params.Set("sorted", strconv.FormatBool(g.Sorted)) - params.Set("quorum", strconv.FormatBool(g.Quorum)) - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type waitAction struct { - Prefix string - Key string - WaitIndex uint64 - Recursive bool -} - -func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, w.Prefix, w.Key) - - params := u.Query() - params.Set("wait", "true") - params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10)) - params.Set("recursive", strconv.FormatBool(w.Recursive)) - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type setAction struct { - Prefix string - Key string - Value string - PrevValue string - PrevIndex uint64 - PrevExist PrevExistType - TTL time.Duration - Refresh bool - Dir bool - NoValueOnSuccess bool -} - -func (a *setAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Key) - - params := u.Query() - form := url.Values{} - - // we're either creating a directory or setting a key - if a.Dir { - params.Set("dir", strconv.FormatBool(a.Dir)) - } else { - // These options are only valid for setting a key - if a.PrevValue != "" { - params.Set("prevValue", a.PrevValue) - } - form.Add("value", a.Value) - } - - // Options which apply to both setting a key and creating a dir - if a.PrevIndex != 0 { - params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) - } - if a.PrevExist != PrevIgnore { - params.Set("prevExist", string(a.PrevExist)) - } - if a.TTL > 0 { - form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) - } - - if a.Refresh { - form.Add("refresh", "true") - } - if a.NoValueOnSuccess { - params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess)) - } - - u.RawQuery = params.Encode() - body := strings.NewReader(form.Encode()) - - req, _ := http.NewRequest("PUT", u.String(), body) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - return req -} - -type deleteAction struct { - Prefix string - Key string - PrevValue string - PrevIndex uint64 - Dir bool - Recursive bool -} - -func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Key) - - params := u.Query() - if a.PrevValue != "" { - params.Set("prevValue", a.PrevValue) - } - if a.PrevIndex != 0 { - params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) - } - if a.Dir { - params.Set("dir", "true") - } - if a.Recursive { - params.Set("recursive", "true") - } - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("DELETE", u.String(), nil) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - return req -} - -type createInOrderAction struct { - Prefix string - Dir string - Value string - TTL time.Duration -} - -func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Dir) - - form := url.Values{} - form.Add("value", a.Value) - if a.TTL > 0 { - form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) - } - body := strings.NewReader(form.Encode()) - - req, _ := http.NewRequest("POST", u.String(), body) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - return req -} - -func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) { - switch code { - case http.StatusOK, http.StatusCreated: - if len(body) == 0 { - return nil, ErrEmptyBody - } - res, err = unmarshalSuccessfulKeysResponse(header, body) - default: - err = unmarshalFailedKeysResponse(body) - } - - return -} - -func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { - var res Response - err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res) - if err != nil { - return nil, ErrInvalidJSON - } - if header.Get("X-Etcd-Index") != "" { - res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64) - if err != nil { - return nil, err - } - } - res.ClusterID = header.Get("X-Etcd-Cluster-ID") - return &res, nil -} - -func unmarshalFailedKeysResponse(body []byte) error { - var etcdErr Error - if err := json.Unmarshal(body, &etcdErr); err != nil { - return ErrInvalidJSON - } - return etcdErr -} diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/github.com/coreos/etcd/client/members.go deleted file mode 100644 index aafa3d1b870..00000000000 --- a/vendor/github.com/coreos/etcd/client/members.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "path" - - "github.com/coreos/etcd/pkg/types" -) - -var ( - defaultV2MembersPrefix = "/v2/members" - defaultLeaderSuffix = "/leader" -) - -type Member struct { - // ID is the unique identifier of this Member. - ID string `json:"id"` - - // Name is a human-readable, non-unique identifier of this Member. - Name string `json:"name"` - - // PeerURLs represents the HTTP(S) endpoints this Member uses to - // participate in etcd's consensus protocol. - PeerURLs []string `json:"peerURLs"` - - // ClientURLs represents the HTTP(S) endpoints on which this Member - // serves its client-facing APIs. - ClientURLs []string `json:"clientURLs"` -} - -type memberCollection []Member - -func (c *memberCollection) UnmarshalJSON(data []byte) error { - d := struct { - Members []Member - }{} - - if err := json.Unmarshal(data, &d); err != nil { - return err - } - - if d.Members == nil { - *c = make([]Member, 0) - return nil - } - - *c = d.Members - return nil -} - -type memberCreateOrUpdateRequest struct { - PeerURLs types.URLs -} - -func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) { - s := struct { - PeerURLs []string `json:"peerURLs"` - }{ - PeerURLs: make([]string, len(m.PeerURLs)), - } - - for i, u := range m.PeerURLs { - s.PeerURLs[i] = u.String() - } - - return json.Marshal(&s) -} - -// NewMembersAPI constructs a new MembersAPI that uses HTTP to -// interact with etcd's membership API. -func NewMembersAPI(c Client) MembersAPI { - return &httpMembersAPI{ - client: c, - } -} - -type MembersAPI interface { - // List enumerates the current cluster membership. - List(ctx context.Context) ([]Member, error) - - // Add instructs etcd to accept a new Member into the cluster. - Add(ctx context.Context, peerURL string) (*Member, error) - - // Remove demotes an existing Member out of the cluster. - Remove(ctx context.Context, mID string) error - - // Update instructs etcd to update an existing Member in the cluster. - Update(ctx context.Context, mID string, peerURLs []string) error - - // Leader gets current leader of the cluster - Leader(ctx context.Context) (*Member, error) -} - -type httpMembersAPI struct { - client httpClient -} - -func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { - req := &membersAPIActionList{} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - - var mCollection memberCollection - if err := json.Unmarshal(body, &mCollection); err != nil { - return nil, err - } - - return []Member(mCollection), nil -} - -func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { - urls, err := types.NewURLs([]string{peerURL}) - if err != nil { - return nil, err - } - - req := &membersAPIActionAdd{peerURLs: urls} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusCreated { - var merr membersError - if err := json.Unmarshal(body, &merr); err != nil { - return nil, err - } - return nil, merr - } - - var memb Member - if err := json.Unmarshal(body, &memb); err != nil { - return nil, err - } - - return &memb, nil -} - -func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error { - urls, err := types.NewURLs(peerURLs) - if err != nil { - return err - } - - req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil { - return err - } - - if resp.StatusCode != http.StatusNoContent { - var merr membersError - if err := json.Unmarshal(body, &merr); err != nil { - return err - } - return merr - } - - return nil -} - -func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error { - req := &membersAPIActionRemove{memberID: memberID} - resp, _, err := m.client.Do(ctx, req) - if err != nil { - return err - } - - return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone) -} - -func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) { - req := &membersAPIActionLeader{} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - - var leader Member - if err := json.Unmarshal(body, &leader); err != nil { - return nil, err - } - - return &leader, nil -} - -type membersAPIActionList struct{} - -func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type membersAPIActionRemove struct { - memberID string -} - -func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - u.Path = path.Join(u.Path, d.memberID) - req, _ := http.NewRequest("DELETE", u.String(), nil) - return req -} - -type membersAPIActionAdd struct { - peerURLs types.URLs -} - -func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} - b, _ := json.Marshal(&m) - req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b)) - req.Header.Set("Content-Type", "application/json") - return req -} - -type membersAPIActionUpdate struct { - memberID string - peerURLs types.URLs -} - -func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} - u.Path = path.Join(u.Path, a.memberID) - b, _ := json.Marshal(&m) - req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b)) - req.Header.Set("Content-Type", "application/json") - return req -} - -func assertStatusCode(got int, want ...int) (err error) { - for _, w := range want { - if w == got { - return nil - } - } - return fmt.Errorf("unexpected status code %d", got) -} - -type membersAPIActionLeader struct{} - -func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - u.Path = path.Join(u.Path, defaultLeaderSuffix) - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -// v2MembersURL add the necessary path to the provided endpoint -// to route requests to the default v2 members API. -func v2MembersURL(ep url.URL) *url.URL { - ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) - return &ep -} - -type membersError struct { - Message string `json:"message"` - Code int `json:"-"` -} - -func (e membersError) Error() string { - return e.Message -} diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go deleted file mode 100644 index 15a8babff4d..00000000000 --- a/vendor/github.com/coreos/etcd/client/util.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "regexp" -) - -var ( - roleNotFoundRegExp *regexp.Regexp - userNotFoundRegExp *regexp.Regexp -) - -func init() { - roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.") - userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.") -} - -// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. -func IsKeyNotFound(err error) bool { - if cErr, ok := err.(Error); ok { - return cErr.Code == ErrorCodeKeyNotFound - } - return false -} - -// IsRoleNotFound returns true if the error means role not found of v2 API. -func IsRoleNotFound(err error) bool { - if ae, ok := err.(authError); ok { - return roleNotFoundRegExp.MatchString(ae.Message) - } - return false -} - -// IsUserNotFound returns true if the error means user not found of v2 API. -func IsUserNotFound(err error) bool { - if ae, ok := err.(authError); ok { - return userNotFoundRegExp.MatchString(ae.Message) - } - return false -} diff --git a/vendor/github.com/coreos/etcd/clientv3/README.md b/vendor/github.com/coreos/etcd/clientv3/README.md deleted file mode 100644 index 376bfba7614..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/README.md +++ /dev/null @@ -1,85 +0,0 @@ -# etcd/clientv3 - -[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) - -`etcd/clientv3` is the official Go etcd client for v3. - -## Install - -```bash -go get github.com/coreos/etcd/clientv3 -``` - -## Get started - -Create client using `clientv3.New`: - -```go -cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, - DialTimeout: 5 * time.Second, -}) -if err != nil { - // handle error! -} -defer cli.Close() -``` - -etcd v3 uses [`gRPC`](http://www.grpc.io) for remote procedure calls. And `clientv3` uses -[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it. -If the client is not closed, the connection will have leaky goroutines. To specify client request timeout, -pass `context.WithTimeout` to APIs: - -```go -ctx, cancel := context.WithTimeout(context.Background(), timeout) -resp, err := cli.Put(ctx, "sample_key", "sample_value") -cancel() -if err != nil { - // handle error! -} -// use the response -``` - -etcd uses `cmd/vendor` directory to store external dependencies, which are -to be compiled into etcd release binaries. `client` can be imported without -vendoring. For full compatibility, it is recommended to vendor builds using -etcd's vendored packages, using tools like godep, as in -[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). -For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). - -## Error Handling - -etcd client returns 2 types of errors: - -1. context error: canceled or deadline exceeded. -2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes). - -Here is the example code to handle client errors: - -```go -resp, err := cli.Put(ctx, "", "") -if err != nil { - switch err { - case context.Canceled: - log.Fatalf("ctx is canceled by another routine: %v", err) - case context.DeadlineExceeded: - log.Fatalf("ctx is attached with a deadline is exceeded: %v", err) - case rpctypes.ErrEmptyKey: - log.Fatalf("client-side error: %v", err) - default: - log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err) - } -} -``` - -## Metrics - -The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go). - -## Namespacing - -The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. - -## Examples - -More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3). diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go deleted file mode 100644 index 8df670f163a..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/auth.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "fmt" - "strings" - - "github.com/coreos/etcd/auth/authpb" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" -) - -type ( - AuthEnableResponse pb.AuthEnableResponse - AuthDisableResponse pb.AuthDisableResponse - AuthenticateResponse pb.AuthenticateResponse - AuthUserAddResponse pb.AuthUserAddResponse - AuthUserDeleteResponse pb.AuthUserDeleteResponse - AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse - AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse - AuthUserGetResponse pb.AuthUserGetResponse - AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse - AuthRoleAddResponse pb.AuthRoleAddResponse - AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse - AuthRoleGetResponse pb.AuthRoleGetResponse - AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse - AuthRoleDeleteResponse pb.AuthRoleDeleteResponse - AuthUserListResponse pb.AuthUserListResponse - AuthRoleListResponse pb.AuthRoleListResponse - - PermissionType authpb.Permission_Type - Permission authpb.Permission -) - -const ( - PermRead = authpb.READ - PermWrite = authpb.WRITE - PermReadWrite = authpb.READWRITE -) - -type Auth interface { - // AuthEnable enables auth of an etcd cluster. - AuthEnable(ctx context.Context) (*AuthEnableResponse, error) - - // AuthDisable disables auth of an etcd cluster. - AuthDisable(ctx context.Context) (*AuthDisableResponse, error) - - // UserAdd adds a new user to an etcd cluster. - UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) - - // UserDelete deletes a user from an etcd cluster. - UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) - - // UserChangePassword changes a password of a user. - UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) - - // UserGrantRole grants a role to a user. - UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) - - // UserGet gets a detailed information of a user. - UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) - - // UserList gets a list of all users. - UserList(ctx context.Context) (*AuthUserListResponse, error) - - // UserRevokeRole revokes a role of a user. - UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) - - // RoleAdd adds a new role to an etcd cluster. - RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) - - // RoleGrantPermission grants a permission to a role. - RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) - - // RoleGet gets a detailed information of a role. - RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) - - // RoleList gets a list of all roles. - RoleList(ctx context.Context) (*AuthRoleListResponse, error) - - // RoleRevokePermission revokes a permission from a role. - RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) - - // RoleDelete deletes a role. - RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) -} - -type auth struct { - remote pb.AuthClient -} - -func NewAuth(c *Client) Auth { - return &auth{remote: RetryAuthClient(c)} -} - -func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { - resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}) - return (*AuthEnableResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { - resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}) - return (*AuthDisableResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { - resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}) - return (*AuthUserAddResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { - resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}) - return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { - resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}) - return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { - resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}) - return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { - resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}) - return (*AuthUserGetResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { - resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}) - return (*AuthUserListResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { - resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}) - return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { - resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}) - return (*AuthRoleAddResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { - perm := &authpb.Permission{ - Key: []byte(key), - RangeEnd: []byte(rangeEnd), - PermType: authpb.Permission_Type(permType), - } - resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}) - return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { - resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}) - return (*AuthRoleGetResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { - resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}) - return (*AuthRoleListResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { - resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}) - return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { - resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}) - return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) -} - -func StrToPermissionType(s string) (PermissionType, error) { - val, ok := authpb.Permission_Type_value[strings.ToUpper(s)] - if ok { - return PermissionType(val), nil - } - return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s) -} - -type authenticator struct { - conn *grpc.ClientConn // conn in-use - remote pb.AuthClient -} - -func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { - resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}) - return (*AuthenticateResponse)(resp), toErr(ctx, err) -} - -func (auth *authenticator) close() { - auth.conn.Close() -} - -func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return nil, err - } - - return &authenticator{ - conn: conn, - remote: pb.NewAuthClient(conn), - }, nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer.go deleted file mode 100644 index 2c8c2981d30..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/balancer.go +++ /dev/null @@ -1,438 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "net/url" - "strings" - "sync" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -// ErrNoAddrAvilable is returned by Get() when the balancer does not have -// any active connection to endpoints at the time. -// This error is returned only when opts.BlockingWait is true. -var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available") - -type notifyMsg int - -const ( - notifyReset notifyMsg = iota - notifyNext -) - -// simpleBalancer does the bare minimum to expose multiple eps -// to the grpc reconnection code path -type simpleBalancer struct { - // addrs are the client's endpoint addresses for grpc - addrs []grpc.Address - - // eps holds the raw endpoints from the client - eps []string - - // notifyCh notifies grpc of the set of addresses for connecting - notifyCh chan []grpc.Address - - // readyc closes once the first connection is up - readyc chan struct{} - readyOnce sync.Once - - // mu protects all fields below. - mu sync.RWMutex - - // upc closes when pinAddr transitions from empty to non-empty or the balancer closes. - upc chan struct{} - - // downc closes when grpc calls down() on pinAddr - downc chan struct{} - - // stopc is closed to signal updateNotifyLoop should stop. - stopc chan struct{} - - // donec closes when all goroutines are exited - donec chan struct{} - - // updateAddrsC notifies updateNotifyLoop to update addrs. - updateAddrsC chan notifyMsg - - // grpc issues TLS cert checks using the string passed into dial so - // that string must be the host. To recover the full scheme://host URL, - // have a map from hosts to the original endpoint. - hostPort2ep map[string]string - - // pinAddr is the currently pinned address; set to the empty string on - // initialization and shutdown. - pinAddr string - - closed bool -} - -func newSimpleBalancer(eps []string) *simpleBalancer { - notifyCh := make(chan []grpc.Address) - addrs := eps2addrs(eps) - sb := &simpleBalancer{ - addrs: addrs, - eps: eps, - notifyCh: notifyCh, - readyc: make(chan struct{}), - upc: make(chan struct{}), - stopc: make(chan struct{}), - downc: make(chan struct{}), - donec: make(chan struct{}), - updateAddrsC: make(chan notifyMsg), - hostPort2ep: getHostPort2ep(eps), - } - close(sb.downc) - go sb.updateNotifyLoop() - return sb -} - -func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } - -func (b *simpleBalancer) ConnectNotify() <-chan struct{} { - b.mu.Lock() - defer b.mu.Unlock() - return b.upc -} - -func (b *simpleBalancer) ready() <-chan struct{} { return b.readyc } - -func (b *simpleBalancer) endpoint(hostPort string) string { - b.mu.Lock() - defer b.mu.Unlock() - return b.hostPort2ep[hostPort] -} - -func (b *simpleBalancer) endpoints() []string { - b.mu.RLock() - defer b.mu.RUnlock() - return b.eps -} - -func (b *simpleBalancer) pinned() string { - b.mu.RLock() - defer b.mu.RUnlock() - return b.pinAddr -} - -func getHostPort2ep(eps []string) map[string]string { - hm := make(map[string]string, len(eps)) - for i := range eps { - _, host, _ := parseEndpoint(eps[i]) - hm[host] = eps[i] - } - return hm -} - -func (b *simpleBalancer) updateAddrs(eps ...string) { - np := getHostPort2ep(eps) - - b.mu.Lock() - - match := len(np) == len(b.hostPort2ep) - for k, v := range np { - if b.hostPort2ep[k] != v { - match = false - break - } - } - if match { - // same endpoints, so no need to update address - b.mu.Unlock() - return - } - - b.hostPort2ep = np - b.addrs, b.eps = eps2addrs(eps), eps - - // updating notifyCh can trigger new connections, - // only update addrs if all connections are down - // or addrs does not include pinAddr. - update := !hasAddr(b.addrs, b.pinAddr) - b.mu.Unlock() - - if update { - select { - case b.updateAddrsC <- notifyNext: - case <-b.stopc: - } - } -} - -func (b *simpleBalancer) next() { - b.mu.RLock() - downc := b.downc - b.mu.RUnlock() - select { - case b.updateAddrsC <- notifyNext: - case <-b.stopc: - } - // wait until disconnect so new RPCs are not issued on old connection - select { - case <-downc: - case <-b.stopc: - } -} - -func hasAddr(addrs []grpc.Address, targetAddr string) bool { - for _, addr := range addrs { - if targetAddr == addr.Addr { - return true - } - } - return false -} - -func (b *simpleBalancer) updateNotifyLoop() { - defer close(b.donec) - - for { - b.mu.RLock() - upc, downc, addr := b.upc, b.downc, b.pinAddr - b.mu.RUnlock() - // downc or upc should be closed - select { - case <-downc: - downc = nil - default: - } - select { - case <-upc: - upc = nil - default: - } - switch { - case downc == nil && upc == nil: - // stale - select { - case <-b.stopc: - return - default: - } - case downc == nil: - b.notifyAddrs(notifyReset) - select { - case <-upc: - case msg := <-b.updateAddrsC: - b.notifyAddrs(msg) - case <-b.stopc: - return - } - case upc == nil: - select { - // close connections that are not the pinned address - case b.notifyCh <- []grpc.Address{{Addr: addr}}: - case <-downc: - case <-b.stopc: - return - } - select { - case <-downc: - b.notifyAddrs(notifyReset) - case msg := <-b.updateAddrsC: - b.notifyAddrs(msg) - case <-b.stopc: - return - } - } - } -} - -func (b *simpleBalancer) notifyAddrs(msg notifyMsg) { - if msg == notifyNext { - select { - case b.notifyCh <- []grpc.Address{}: - case <-b.stopc: - return - } - } - b.mu.RLock() - addrs := b.addrs - pinAddr := b.pinAddr - downc := b.downc - b.mu.RUnlock() - - var waitDown bool - if pinAddr != "" { - waitDown = true - for _, a := range addrs { - if a.Addr == pinAddr { - waitDown = false - } - } - } - - select { - case b.notifyCh <- addrs: - if waitDown { - select { - case <-downc: - case <-b.stopc: - } - } - case <-b.stopc: - } -} - -func (b *simpleBalancer) Up(addr grpc.Address) func(error) { - f, _ := b.up(addr) - return f -} - -func (b *simpleBalancer) up(addr grpc.Address) (func(error), bool) { - b.mu.Lock() - defer b.mu.Unlock() - - // gRPC might call Up after it called Close. We add this check - // to "fix" it up at application layer. Otherwise, will panic - // if b.upc is already closed. - if b.closed { - return func(err error) {}, false - } - // gRPC might call Up on a stale address. - // Prevent updating pinAddr with a stale address. - if !hasAddr(b.addrs, addr.Addr) { - return func(err error) {}, false - } - if b.pinAddr != "" { - if logger.V(4) { - logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr) - } - return func(err error) {}, false - } - // notify waiting Get()s and pin first connected address - close(b.upc) - b.downc = make(chan struct{}) - b.pinAddr = addr.Addr - if logger.V(4) { - logger.Infof("clientv3/balancer: pin %q", addr.Addr) - } - // notify client that a connection is up - b.readyOnce.Do(func() { close(b.readyc) }) - return func(err error) { - b.mu.Lock() - b.upc = make(chan struct{}) - close(b.downc) - b.pinAddr = "" - b.mu.Unlock() - if logger.V(4) { - logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error()) - } - }, true -} - -func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { - var ( - addr string - closed bool - ) - - // If opts.BlockingWait is false (for fail-fast RPCs), it should return - // an address it has notified via Notify immediately instead of blocking. - if !opts.BlockingWait { - b.mu.RLock() - closed = b.closed - addr = b.pinAddr - b.mu.RUnlock() - if closed { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - if addr == "" { - return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable - } - return grpc.Address{Addr: addr}, func() {}, nil - } - - for { - b.mu.RLock() - ch := b.upc - b.mu.RUnlock() - select { - case <-ch: - case <-b.donec: - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - case <-ctx.Done(): - return grpc.Address{Addr: ""}, nil, ctx.Err() - } - b.mu.RLock() - closed = b.closed - addr = b.pinAddr - b.mu.RUnlock() - // Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed. - if closed { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - if addr != "" { - break - } - } - return grpc.Address{Addr: addr}, func() {}, nil -} - -func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } - -func (b *simpleBalancer) Close() error { - b.mu.Lock() - // In case gRPC calls close twice. TODO: remove the checking - // when we are sure that gRPC wont call close twice. - if b.closed { - b.mu.Unlock() - <-b.donec - return nil - } - b.closed = true - close(b.stopc) - b.pinAddr = "" - - // In the case of following scenario: - // 1. upc is not closed; no pinned address - // 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks - // 3. client.conn.Close() calls balancer.Close(); closed = true - // 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled - // we must close upc so Get() exits from blocking on upc - select { - case <-b.upc: - default: - // terminate all waiting Get()s - close(b.upc) - } - - b.mu.Unlock() - - // wait for updateNotifyLoop to finish - <-b.donec - close(b.notifyCh) - - return nil -} - -func getHost(ep string) string { - url, uerr := url.Parse(ep) - if uerr != nil || !strings.Contains(ep, "://") { - return ep - } - return url.Host -} - -func eps2addrs(eps []string) []grpc.Address { - addrs := make([]grpc.Address, len(eps)) - for i := range eps { - addrs[i].Addr = getHost(eps[i]) - } - return addrs -} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go deleted file mode 100644 index bff7d7cc638..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ /dev/null @@ -1,528 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "net" - "net/url" - "strconv" - "strings" - "sync" - "time" - - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -var ( - ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") - ErrOldCluster = errors.New("etcdclient: old cluster version") -) - -// Client provides and manages an etcd v3 client session. -type Client struct { - Cluster - KV - Lease - Watcher - Auth - Maintenance - - conn *grpc.ClientConn - dialerrc chan error - - cfg Config - creds *credentials.TransportCredentials - balancer *healthBalancer - mu sync.Mutex - - ctx context.Context - cancel context.CancelFunc - - // Username is a user name for authentication. - Username string - // Password is a password for authentication. - Password string - // tokenCred is an instance of WithPerRPCCredentials()'s argument - tokenCred *authTokenCredential -} - -// New creates a new etcdv3 client from a given configuration. -func New(cfg Config) (*Client, error) { - if len(cfg.Endpoints) == 0 { - return nil, ErrNoAvailableEndpoints - } - - return newClient(&cfg) -} - -// NewCtxClient creates a client with a context but no underlying grpc -// connection. This is useful for embedded cases that override the -// service interface implementations and do not need connection management. -func NewCtxClient(ctx context.Context) *Client { - cctx, cancel := context.WithCancel(ctx) - return &Client{ctx: cctx, cancel: cancel} -} - -// NewFromURL creates a new etcdv3 client from a URL. -func NewFromURL(url string) (*Client, error) { - return New(Config{Endpoints: []string{url}}) -} - -// Close shuts down the client's etcd connections. -func (c *Client) Close() error { - c.cancel() - c.Watcher.Close() - c.Lease.Close() - if c.conn != nil { - return toErr(c.ctx, c.conn.Close()) - } - return c.ctx.Err() -} - -// Ctx is a context for "out of band" messages (e.g., for sending -// "clean up" message when another context is canceled). It is -// canceled on client Close(). -func (c *Client) Ctx() context.Context { return c.ctx } - -// Endpoints lists the registered endpoints for the client. -func (c *Client) Endpoints() (eps []string) { - // copy the slice; protect original endpoints from being changed - eps = make([]string, len(c.cfg.Endpoints)) - copy(eps, c.cfg.Endpoints) - return -} - -// SetEndpoints updates client's endpoints. -func (c *Client) SetEndpoints(eps ...string) { - c.mu.Lock() - c.cfg.Endpoints = eps - c.mu.Unlock() - c.balancer.updateAddrs(eps...) -} - -// Sync synchronizes client's endpoints with the known endpoints from the etcd membership. -func (c *Client) Sync(ctx context.Context) error { - mresp, err := c.MemberList(ctx) - if err != nil { - return err - } - var eps []string - for _, m := range mresp.Members { - eps = append(eps, m.ClientURLs...) - } - c.SetEndpoints(eps...) - return nil -} - -func (c *Client) autoSync() { - if c.cfg.AutoSyncInterval == time.Duration(0) { - return - } - - for { - select { - case <-c.ctx.Done(): - return - case <-time.After(c.cfg.AutoSyncInterval): - ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) - err := c.Sync(ctx) - cancel() - if err != nil && err != c.ctx.Err() { - logger.Println("Auto sync endpoints failed:", err) - } - } - } -} - -type authTokenCredential struct { - token string - tokenMu *sync.RWMutex -} - -func (cred authTokenCredential) RequireTransportSecurity() bool { - return false -} - -func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) { - cred.tokenMu.RLock() - defer cred.tokenMu.RUnlock() - return map[string]string{ - "token": cred.token, - }, nil -} - -func parseEndpoint(endpoint string) (proto string, host string, scheme string) { - proto = "tcp" - host = endpoint - url, uerr := url.Parse(endpoint) - if uerr != nil || !strings.Contains(endpoint, "://") { - return - } - scheme = url.Scheme - - // strip scheme:// prefix since grpc dials by host - host = url.Host - switch url.Scheme { - case "http", "https": - case "unix", "unixs": - proto = "unix" - host = url.Host + url.Path - default: - proto, host = "", "" - } - return -} - -func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { - creds = c.creds - switch scheme { - case "unix": - case "http": - creds = nil - case "https", "unixs": - if creds != nil { - break - } - tlsconfig := &tls.Config{} - emptyCreds := credentials.NewTLS(tlsconfig) - creds = &emptyCreds - default: - creds = nil - } - return -} - -// dialSetupOpts gives the dial opts prior to any authentication -func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) { - if c.cfg.DialTimeout > 0 { - opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} - } - if c.cfg.DialKeepAliveTime > 0 { - params := keepalive.ClientParameters{ - Time: c.cfg.DialKeepAliveTime, - Timeout: c.cfg.DialKeepAliveTimeout, - } - opts = append(opts, grpc.WithKeepaliveParams(params)) - } - opts = append(opts, dopts...) - - f := func(host string, t time.Duration) (net.Conn, error) { - proto, host, _ := parseEndpoint(c.balancer.endpoint(host)) - if host == "" && endpoint != "" { - // dialing an endpoint not in the balancer; use - // endpoint passed into dial - proto, host, _ = parseEndpoint(endpoint) - } - if proto == "" { - return nil, fmt.Errorf("unknown scheme for %q", host) - } - select { - case <-c.ctx.Done(): - return nil, c.ctx.Err() - default: - } - dialer := &net.Dialer{Timeout: t} - conn, err := dialer.DialContext(c.ctx, proto, host) - if err != nil { - select { - case c.dialerrc <- err: - default: - } - } - return conn, err - } - opts = append(opts, grpc.WithDialer(f)) - - creds := c.creds - if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 { - creds = c.processCreds(scheme) - } - if creds != nil { - opts = append(opts, grpc.WithTransportCredentials(*creds)) - } else { - opts = append(opts, grpc.WithInsecure()) - } - - return opts -} - -// Dial connects to a single endpoint using the client's config. -func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) { - return c.dial(endpoint) -} - -func (c *Client) getToken(ctx context.Context) error { - var err error // return last error in a case of fail - var auth *authenticator - - for i := 0; i < len(c.cfg.Endpoints); i++ { - endpoint := c.cfg.Endpoints[i] - host := getHost(endpoint) - // use dial options without dopts to avoid reusing the client balancer - auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint)) - if err != nil { - continue - } - defer auth.close() - - var resp *AuthenticateResponse - resp, err = auth.authenticate(ctx, c.Username, c.Password) - if err != nil { - continue - } - - c.tokenCred.tokenMu.Lock() - c.tokenCred.token = resp.Token - c.tokenCred.tokenMu.Unlock() - - return nil - } - - return err -} - -func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { - opts := c.dialSetupOpts(endpoint, dopts...) - host := getHost(endpoint) - if c.Username != "" && c.Password != "" { - c.tokenCred = &authTokenCredential{ - tokenMu: &sync.RWMutex{}, - } - - ctx := c.ctx - if c.cfg.DialTimeout > 0 { - cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout) - defer cancel() - ctx = cctx - } - - err := c.getToken(ctx) - if err != nil { - if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { - if err == ctx.Err() && ctx.Err() != c.ctx.Err() { - err = context.DeadlineExceeded - } - return nil, err - } - } else { - opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) - } - } - - opts = append(opts, c.cfg.DialOptions...) - - conn, err := grpc.DialContext(c.ctx, host, opts...) - if err != nil { - return nil, err - } - return conn, nil -} - -// WithRequireLeader requires client requests to only succeed -// when the cluster has a leader. -func WithRequireLeader(ctx context.Context) context.Context { - md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - return metadata.NewOutgoingContext(ctx, md) -} - -func newClient(cfg *Config) (*Client, error) { - if cfg == nil { - cfg = &Config{} - } - var creds *credentials.TransportCredentials - if cfg.TLS != nil { - c := credentials.NewTLS(cfg.TLS) - creds = &c - } - - // use a temporary skeleton client to bootstrap first connection - baseCtx := context.TODO() - if cfg.Context != nil { - baseCtx = cfg.Context - } - - ctx, cancel := context.WithCancel(baseCtx) - client := &Client{ - conn: nil, - dialerrc: make(chan error, 1), - cfg: *cfg, - creds: creds, - ctx: ctx, - cancel: cancel, - } - if cfg.Username != "" && cfg.Password != "" { - client.Username = cfg.Username - client.Password = cfg.Password - } - - sb := newSimpleBalancer(cfg.Endpoints) - hc := func(ep string) (bool, error) { return grpcHealthCheck(client, ep) } - client.balancer = newHealthBalancer(sb, cfg.DialTimeout, hc) - - // use Endpoints[0] so that for https:// without any tls config given, then - // grpc will assume the certificate server name is the endpoint host. - conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer)) - if err != nil { - client.cancel() - client.balancer.Close() - return nil, err - } - client.conn = conn - - // wait for a connection - if cfg.DialTimeout > 0 { - hasConn := false - waitc := time.After(cfg.DialTimeout) - select { - case <-client.balancer.ready(): - hasConn = true - case <-ctx.Done(): - case <-waitc: - } - if !hasConn { - err := context.DeadlineExceeded - select { - case err = <-client.dialerrc: - default: - } - client.cancel() - client.balancer.Close() - conn.Close() - return nil, err - } - } - - client.Cluster = NewCluster(client) - client.KV = NewKV(client) - client.Lease = NewLease(client) - client.Watcher = NewWatcher(client) - client.Auth = NewAuth(client) - client.Maintenance = NewMaintenance(client) - - if cfg.RejectOldCluster { - if err := client.checkVersion(); err != nil { - client.Close() - return nil, err - } - } - - go client.autoSync() - return client, nil -} - -func (c *Client) checkVersion() (err error) { - var wg sync.WaitGroup - errc := make(chan error, len(c.cfg.Endpoints)) - ctx, cancel := context.WithCancel(c.ctx) - if c.cfg.DialTimeout > 0 { - ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) - } - wg.Add(len(c.cfg.Endpoints)) - for _, ep := range c.cfg.Endpoints { - // if cluster is current, any endpoint gives a recent version - go func(e string) { - defer wg.Done() - resp, rerr := c.Status(ctx, e) - if rerr != nil { - errc <- rerr - return - } - vs := strings.Split(resp.Version, ".") - maj, min := 0, 0 - if len(vs) >= 2 { - maj, _ = strconv.Atoi(vs[0]) - min, rerr = strconv.Atoi(vs[1]) - } - if maj < 3 || (maj == 3 && min < 2) { - rerr = ErrOldCluster - } - errc <- rerr - }(ep) - } - // wait for success - for i := 0; i < len(c.cfg.Endpoints); i++ { - if err = <-errc; err == nil { - break - } - } - cancel() - wg.Wait() - return err -} - -// ActiveConnection returns the current in-use connection -func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } - -// isHaltErr returns true if the given error and context indicate no forward -// progress can be made, even after reconnecting. -func isHaltErr(ctx context.Context, err error) bool { - if ctx != nil && ctx.Err() != nil { - return true - } - if err == nil { - return false - } - ev, _ := status.FromError(err) - // Unavailable codes mean the system will be right back. - // (e.g., can't connect, lost leader) - // Treat Internal codes as if something failed, leaving the - // system in an inconsistent state, but retrying could make progress. - // (e.g., failed in middle of send, corrupted frame) - // TODO: are permanent Internal errors possible from grpc? - return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal -} - -func toErr(ctx context.Context, err error) error { - if err == nil { - return nil - } - err = rpctypes.Error(err) - if _, ok := err.(rpctypes.EtcdError); ok { - return err - } - ev, _ := status.FromError(err) - code := ev.Code() - switch code { - case codes.DeadlineExceeded: - fallthrough - case codes.Canceled: - if ctx.Err() != nil { - err = ctx.Err() - } - case codes.Unavailable: - case codes.FailedPrecondition: - err = grpc.ErrClientConnClosing - } - return err -} - -func canceledByCaller(stopCtx context.Context, err error) bool { - if stopCtx.Err() == nil || err == nil { - return false - } - - return err == context.Canceled || err == context.DeadlineExceeded -} diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go deleted file mode 100644 index 8beba58a67b..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/cluster.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -type ( - Member pb.Member - MemberListResponse pb.MemberListResponse - MemberAddResponse pb.MemberAddResponse - MemberRemoveResponse pb.MemberRemoveResponse - MemberUpdateResponse pb.MemberUpdateResponse -) - -type Cluster interface { - // MemberList lists the current cluster membership. - MemberList(ctx context.Context) (*MemberListResponse, error) - - // MemberAdd adds a new member into the cluster. - MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) - - // MemberRemove removes an existing member from the cluster. - MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) - - // MemberUpdate updates the peer addresses of the member. - MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) -} - -type cluster struct { - remote pb.ClusterClient -} - -func NewCluster(c *Client) Cluster { - return &cluster{remote: RetryClusterClient(c)} -} - -func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster { - return &cluster{remote: remote} -} - -func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { - r := &pb.MemberAddRequest{PeerURLs: peerAddrs} - resp, err := c.remote.MemberAdd(ctx, r) - if err != nil { - return nil, toErr(ctx, err) - } - return (*MemberAddResponse)(resp), nil -} - -func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { - r := &pb.MemberRemoveRequest{ID: id} - resp, err := c.remote.MemberRemove(ctx, r) - if err != nil { - return nil, toErr(ctx, err) - } - return (*MemberRemoveResponse)(resp), nil -} - -func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { - // it is safe to retry on update. - r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} - resp, err := c.remote.MemberUpdate(ctx, r) - if err == nil { - return (*MemberUpdateResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { - // it is safe to retry on list. - resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}) - if err == nil { - return (*MemberListResponse)(resp), nil - } - return nil, toErr(ctx, err) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go deleted file mode 100644 index 41e80c1da5d..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/compact_op.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -// CompactOp represents a compact operation. -type CompactOp struct { - revision int64 - physical bool -} - -// CompactOption configures compact operation. -type CompactOption func(*CompactOp) - -func (op *CompactOp) applyCompactOpts(opts []CompactOption) { - for _, opt := range opts { - opt(op) - } -} - -// OpCompact wraps slice CompactOption to create a CompactOp. -func OpCompact(rev int64, opts ...CompactOption) CompactOp { - ret := CompactOp{revision: rev} - ret.applyCompactOpts(opts) - return ret -} - -func (op CompactOp) toRequest() *pb.CompactionRequest { - return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} -} - -// WithCompactPhysical makes Compact wait until all compacted entries are -// removed from the etcd server's storage. -func WithCompactPhysical() CompactOption { - return func(op *CompactOp) { op.physical = true } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go deleted file mode 100644 index b5f0a255279..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/compare.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -type CompareTarget int -type CompareResult int - -const ( - CompareVersion CompareTarget = iota - CompareCreated - CompareModified - CompareValue -) - -type Cmp pb.Compare - -func Compare(cmp Cmp, result string, v interface{}) Cmp { - var r pb.Compare_CompareResult - - switch result { - case "=": - r = pb.Compare_EQUAL - case "!=": - r = pb.Compare_NOT_EQUAL - case ">": - r = pb.Compare_GREATER - case "<": - r = pb.Compare_LESS - default: - panic("Unknown result op") - } - - cmp.Result = r - switch cmp.Target { - case pb.Compare_VALUE: - val, ok := v.(string) - if !ok { - panic("bad compare value") - } - cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)} - case pb.Compare_VERSION: - cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)} - case pb.Compare_CREATE: - cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)} - case pb.Compare_MOD: - cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)} - case pb.Compare_LEASE: - cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)} - default: - panic("Unknown compare type") - } - return cmp -} - -func Value(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_VALUE} -} - -func Version(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_VERSION} -} - -func CreateRevision(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_CREATE} -} - -func ModRevision(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_MOD} -} - -// LeaseValue compares a key's LeaseID to a value of your choosing. The empty -// LeaseID is 0, otherwise known as `NoLease`. -func LeaseValue(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_LEASE} -} - -// KeyBytes returns the byte slice holding with the comparison key. -func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } - -// WithKeyBytes sets the byte slice for the comparison key. -func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key } - -// ValueBytes returns the byte slice holding the comparison value, if any. -func (cmp *Cmp) ValueBytes() []byte { - if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok { - return tu.Value - } - return nil -} - -// WithValueBytes sets the byte slice for the comparison's value. -func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } - -// WithRange sets the comparison to scan the range [key, end). -func (cmp Cmp) WithRange(end string) Cmp { - cmp.RangeEnd = []byte(end) - return cmp -} - -// WithPrefix sets the comparison to scan all keys prefixed by the key. -func (cmp Cmp) WithPrefix() Cmp { - cmp.RangeEnd = getPrefix(cmp.Key) - return cmp -} - -// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. -func mustInt64(val interface{}) int64 { - if v, ok := val.(int64); ok { - return v - } - if v, ok := val.(int); ok { - return int64(v) - } - panic("bad value") -} - -// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an -// int64 otherwise. -func mustInt64orLeaseID(val interface{}) int64 { - if v, ok := val.(LeaseID); ok { - return int64(v) - } - return mustInt64(val) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go deleted file mode 100644 index dcdbf511d1b..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package concurrency implements concurrency operations on top of -// etcd such as distributed locks, barriers, and elections. -package concurrency diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go deleted file mode 100644 index e18a0ed4ad9..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "context" - "errors" - "fmt" - - v3 "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -var ( - ErrElectionNotLeader = errors.New("election: not leader") - ErrElectionNoLeader = errors.New("election: no leader") -) - -type Election struct { - session *Session - - keyPrefix string - - leaderKey string - leaderRev int64 - leaderSession *Session - hdr *pb.ResponseHeader -} - -// NewElection returns a new election on a given key prefix. -func NewElection(s *Session, pfx string) *Election { - return &Election{session: s, keyPrefix: pfx + "/"} -} - -// ResumeElection initializes an election with a known leader. -func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election { - return &Election{ - session: s, - leaderKey: leaderKey, - leaderRev: leaderRev, - leaderSession: s, - } -} - -// Campaign puts a value as eligible for the election. It blocks until -// it is elected, an error occurs, or the context is cancelled. -func (e *Election) Campaign(ctx context.Context, val string) error { - s := e.session - client := e.session.Client() - - k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease()) - txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0)) - txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease()))) - txn = txn.Else(v3.OpGet(k)) - resp, err := txn.Commit() - if err != nil { - return err - } - e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s - if !resp.Succeeded { - kv := resp.Responses[0].GetResponseRange().Kvs[0] - e.leaderRev = kv.CreateRevision - if string(kv.Value) != val { - if err = e.Proclaim(ctx, val); err != nil { - e.Resign(ctx) - return err - } - } - } - - _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1) - if err != nil { - // clean up in case of context cancel - select { - case <-ctx.Done(): - e.Resign(client.Ctx()) - default: - e.leaderSession = nil - } - return err - } - e.hdr = resp.Header - - return nil -} - -// Proclaim lets the leader announce a new value without another election. -func (e *Election) Proclaim(ctx context.Context, val string) error { - if e.leaderSession == nil { - return ErrElectionNotLeader - } - client := e.session.Client() - cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) - txn := client.Txn(ctx).If(cmp) - txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease()))) - tresp, terr := txn.Commit() - if terr != nil { - return terr - } - if !tresp.Succeeded { - e.leaderKey = "" - return ErrElectionNotLeader - } - - e.hdr = tresp.Header - return nil -} - -// Resign lets a leader start a new election. -func (e *Election) Resign(ctx context.Context) (err error) { - if e.leaderSession == nil { - return nil - } - client := e.session.Client() - cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) - resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit() - if err == nil { - e.hdr = resp.Header - } - e.leaderKey = "" - e.leaderSession = nil - return err -} - -// Leader returns the leader value for the current election. -func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) { - client := e.session.Client() - resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) - if err != nil { - return nil, err - } else if len(resp.Kvs) == 0 { - // no leader currently elected - return nil, ErrElectionNoLeader - } - return resp, nil -} - -// Observe returns a channel that reliably observes ordered leader proposals -// as GetResponse values on every current elected leader key. It will not -// necessarily fetch all historical leader updates, but will always post the -// most recent leader value. -// -// The channel closes when the context is canceled or the underlying watcher -// is otherwise disrupted. -func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse { - retc := make(chan v3.GetResponse) - go e.observe(ctx, retc) - return retc -} - -func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { - client := e.session.Client() - - defer close(ch) - for { - resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) - if err != nil { - return - } - - var kv *mvccpb.KeyValue - var hdr *pb.ResponseHeader - - if len(resp.Kvs) == 0 { - cctx, cancel := context.WithCancel(ctx) - // wait for first key put on prefix - opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()} - wch := client.Watch(cctx, e.keyPrefix, opts...) - for kv == nil { - wr, ok := <-wch - if !ok || wr.Err() != nil { - cancel() - return - } - // only accept puts; a delete will make observe() spin - for _, ev := range wr.Events { - if ev.Type == mvccpb.PUT { - hdr, kv = &wr.Header, ev.Kv - // may have multiple revs; hdr.rev = the last rev - // set to kv's rev in case batch has multiple Puts - hdr.Revision = kv.ModRevision - break - } - } - } - cancel() - } else { - hdr, kv = resp.Header, resp.Kvs[0] - } - - select { - case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}: - case <-ctx.Done(): - return - } - - cctx, cancel := context.WithCancel(ctx) - wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1)) - keyDeleted := false - for !keyDeleted { - wr, ok := <-wch - if !ok { - cancel() - return - } - for _, ev := range wr.Events { - if ev.Type == mvccpb.DELETE { - keyDeleted = true - break - } - resp.Header = &wr.Header - resp.Kvs = []*mvccpb.KeyValue{ev.Kv} - select { - case ch <- *resp: - case <-cctx.Done(): - cancel() - return - } - } - } - cancel() - } -} - -// Key returns the leader key if elected, empty string otherwise. -func (e *Election) Key() string { return e.leaderKey } - -// Rev returns the leader key's creation revision, if elected. -func (e *Election) Rev() int64 { return e.leaderRev } - -// Header is the response header from the last successful election proposal. -func (e *Election) Header() *pb.ResponseHeader { return e.hdr } diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go deleted file mode 100644 index 4b6e399bd4e..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "context" - "fmt" - - v3 "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { - cctx, cancel := context.WithCancel(ctx) - defer cancel() - - var wr v3.WatchResponse - wch := client.Watch(cctx, key, v3.WithRev(rev)) - for wr = range wch { - for _, ev := range wr.Events { - if ev.Type == mvccpb.DELETE { - return nil - } - } - } - if err := wr.Err(); err != nil { - return err - } - if err := ctx.Err(); err != nil { - return err - } - return fmt.Errorf("lost watcher waiting for delete") -} - -// waitDeletes efficiently waits until all keys matching the prefix and no greater -// than the create revision. -func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) { - getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev)) - for { - resp, err := client.Get(ctx, pfx, getOpts...) - if err != nil { - return nil, err - } - if len(resp.Kvs) == 0 { - return resp.Header, nil - } - lastKey := string(resp.Kvs[0].Key) - if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil { - return nil, err - } - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go deleted file mode 100644 index dac9ba5a2a3..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "context" - "fmt" - "sync" - - v3 "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -// Mutex implements the sync Locker interface with etcd -type Mutex struct { - s *Session - - pfx string - myKey string - myRev int64 - hdr *pb.ResponseHeader -} - -func NewMutex(s *Session, pfx string) *Mutex { - return &Mutex{s, pfx + "/", "", -1, nil} -} - -// Lock locks the mutex with a cancelable context. If the context is canceled -// while trying to acquire the lock, the mutex tries to clean its stale lock entry. -func (m *Mutex) Lock(ctx context.Context) error { - s := m.s - client := m.s.Client() - - m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease()) - cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0) - // put self in lock waiters via myKey; oldest waiter holds lock - put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) - // reuse key in case this session already holds the lock - get := v3.OpGet(m.myKey) - // fetch current holder to complete uncontended path with only one RPC - getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) - resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() - if err != nil { - return err - } - m.myRev = resp.Header.Revision - if !resp.Succeeded { - m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision - } - // if no key on prefix / the minimum rev is key, already hold the lock - ownerKey := resp.Responses[1].GetResponseRange().Kvs - if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { - m.hdr = resp.Header - return nil - } - - // wait for deletion revisions prior to myKey - hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) - // release lock key if cancelled - select { - case <-ctx.Done(): - m.Unlock(client.Ctx()) - default: - m.hdr = hdr - } - return werr -} - -func (m *Mutex) Unlock(ctx context.Context) error { - client := m.s.Client() - if _, err := client.Delete(ctx, m.myKey); err != nil { - return err - } - m.myKey = "\x00" - m.myRev = -1 - return nil -} - -func (m *Mutex) IsOwner() v3.Cmp { - return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev) -} - -func (m *Mutex) Key() string { return m.myKey } - -// Header is the response header received from etcd on acquiring the lock. -func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr } - -type lockerMutex struct{ *Mutex } - -func (lm *lockerMutex) Lock() { - client := lm.s.Client() - if err := lm.Mutex.Lock(client.Ctx()); err != nil { - panic(err) - } -} -func (lm *lockerMutex) Unlock() { - client := lm.s.Client() - if err := lm.Mutex.Unlock(client.Ctx()); err != nil { - panic(err) - } -} - -// NewLocker creates a sync.Locker backed by an etcd mutex. -func NewLocker(s *Session, pfx string) sync.Locker { - return &lockerMutex{NewMutex(s, pfx)} -} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go deleted file mode 100644 index c399d64a61d..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "context" - "time" - - v3 "github.com/coreos/etcd/clientv3" -) - -const defaultSessionTTL = 60 - -// Session represents a lease kept alive for the lifetime of a client. -// Fault-tolerant applications may use sessions to reason about liveness. -type Session struct { - client *v3.Client - opts *sessionOptions - id v3.LeaseID - - cancel context.CancelFunc - donec <-chan struct{} -} - -// NewSession gets the leased session for a client. -func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { - ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()} - for _, opt := range opts { - opt(ops) - } - - id := ops.leaseID - if id == v3.NoLease { - resp, err := client.Grant(ops.ctx, int64(ops.ttl)) - if err != nil { - return nil, err - } - id = v3.LeaseID(resp.ID) - } - - ctx, cancel := context.WithCancel(ops.ctx) - keepAlive, err := client.KeepAlive(ctx, id) - if err != nil || keepAlive == nil { - cancel() - return nil, err - } - - donec := make(chan struct{}) - s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec} - - // keep the lease alive until client error or cancelled context - go func() { - defer close(donec) - for range keepAlive { - // eat messages until keep alive channel closes - } - }() - - return s, nil -} - -// Client is the etcd client that is attached to the session. -func (s *Session) Client() *v3.Client { - return s.client -} - -// Lease is the lease ID for keys bound to the session. -func (s *Session) Lease() v3.LeaseID { return s.id } - -// Done returns a channel that closes when the lease is orphaned, expires, or -// is otherwise no longer being refreshed. -func (s *Session) Done() <-chan struct{} { return s.donec } - -// Orphan ends the refresh for the session lease. This is useful -// in case the state of the client connection is indeterminate (revoke -// would fail) or when transferring lease ownership. -func (s *Session) Orphan() { - s.cancel() - <-s.donec -} - -// Close orphans the session and revokes the session lease. -func (s *Session) Close() error { - s.Orphan() - // if revoke takes longer than the ttl, lease is expired anyway - ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second) - _, err := s.client.Revoke(ctx, s.id) - cancel() - return err -} - -type sessionOptions struct { - ttl int - leaseID v3.LeaseID - ctx context.Context -} - -// SessionOption configures Session. -type SessionOption func(*sessionOptions) - -// WithTTL configures the session's TTL in seconds. -// If TTL is <= 0, the default 60 seconds TTL will be used. -func WithTTL(ttl int) SessionOption { - return func(so *sessionOptions) { - if ttl > 0 { - so.ttl = ttl - } - } -} - -// WithLease specifies the existing leaseID to be used for the session. -// This is useful in process restart scenario, for example, to reclaim -// leadership from an election prior to restart. -func WithLease(leaseID v3.LeaseID) SessionOption { - return func(so *sessionOptions) { - so.leaseID = leaseID - } -} - -// WithContext assigns a context to the session instead of defaulting to -// using the client context. This is useful for canceling NewSession and -// Close operations immediately without having to close the client. If the -// context is canceled before Close() completes, the session's lease will be -// abandoned and left to expire instead of being revoked. -func WithContext(ctx context.Context) SessionOption { - return func(so *sessionOptions) { - so.ctx = ctx - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go deleted file mode 100644 index d11023ebe36..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "context" - "math" - - v3 "github.com/coreos/etcd/clientv3" -) - -// STM is an interface for software transactional memory. -type STM interface { - // Get returns the value for a key and inserts the key in the txn's read set. - // If Get fails, it aborts the transaction with an error, never returning. - Get(key ...string) string - // Put adds a value for a key to the write set. - Put(key, val string, opts ...v3.OpOption) - // Rev returns the revision of a key in the read set. - Rev(key string) int64 - // Del deletes a key. - Del(key string) - - // commit attempts to apply the txn's changes to the server. - commit() *v3.TxnResponse - reset() -} - -// Isolation is an enumeration of transactional isolation levels which -// describes how transactions should interfere and conflict. -type Isolation int - -const ( - // SerializableSnapshot provides serializable isolation and also checks - // for write conflicts. - SerializableSnapshot Isolation = iota - // Serializable reads within the same transaction attempt return data - // from the at the revision of the first read. - Serializable - // RepeatableReads reads within the same transaction attempt always - // return the same data. - RepeatableReads - // ReadCommitted reads keys from any committed revision. - ReadCommitted -) - -// stmError safely passes STM errors through panic to the STM error channel. -type stmError struct{ err error } - -type stmOptions struct { - iso Isolation - ctx context.Context - prefetch []string -} - -type stmOption func(*stmOptions) - -// WithIsolation specifies the transaction isolation level. -func WithIsolation(lvl Isolation) stmOption { - return func(so *stmOptions) { so.iso = lvl } -} - -// WithAbortContext specifies the context for permanently aborting the transaction. -func WithAbortContext(ctx context.Context) stmOption { - return func(so *stmOptions) { so.ctx = ctx } -} - -// WithPrefetch is a hint to prefetch a list of keys before trying to apply. -// If an STM transaction will unconditionally fetch a set of keys, prefetching -// those keys will save the round-trip cost from requesting each key one by one -// with Get(). -func WithPrefetch(keys ...string) stmOption { - return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) } -} - -// NewSTM initiates a new STM instance, using serializable snapshot isolation by default. -func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) { - opts := &stmOptions{ctx: c.Ctx()} - for _, f := range so { - f(opts) - } - if len(opts.prefetch) != 0 { - f := apply - apply = func(s STM) error { - s.Get(opts.prefetch...) - return f(s) - } - } - return runSTM(mkSTM(c, opts), apply) -} - -func mkSTM(c *v3.Client, opts *stmOptions) STM { - switch opts.iso { - case SerializableSnapshot: - s := &stmSerializable{ - stm: stm{client: c, ctx: opts.ctx}, - prefetch: make(map[string]*v3.GetResponse), - } - s.conflicts = func() []v3.Cmp { - return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...) - } - return s - case Serializable: - s := &stmSerializable{ - stm: stm{client: c, ctx: opts.ctx}, - prefetch: make(map[string]*v3.GetResponse), - } - s.conflicts = func() []v3.Cmp { return s.rset.cmps() } - return s - case RepeatableReads: - s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} - s.conflicts = func() []v3.Cmp { return s.rset.cmps() } - return s - case ReadCommitted: - s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} - s.conflicts = func() []v3.Cmp { return nil } - return s - default: - panic("unsupported stm") - } -} - -type stmResponse struct { - resp *v3.TxnResponse - err error -} - -func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) { - outc := make(chan stmResponse, 1) - go func() { - defer func() { - if r := recover(); r != nil { - e, ok := r.(stmError) - if !ok { - // client apply panicked - panic(r) - } - outc <- stmResponse{nil, e.err} - } - }() - var out stmResponse - for { - s.reset() - if out.err = apply(s); out.err != nil { - break - } - if out.resp = s.commit(); out.resp != nil { - break - } - } - outc <- out - }() - r := <-outc - return r.resp, r.err -} - -// stm implements repeatable-read software transactional memory over etcd -type stm struct { - client *v3.Client - ctx context.Context - // rset holds read key values and revisions - rset readSet - // wset holds overwritten keys and their values - wset writeSet - // getOpts are the opts used for gets - getOpts []v3.OpOption - // conflicts computes the current conflicts on the txn - conflicts func() []v3.Cmp -} - -type stmPut struct { - val string - op v3.Op -} - -type readSet map[string]*v3.GetResponse - -func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) { - for i, resp := range txnresp.Responses { - rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange()) - } -} - -// first returns the store revision from the first fetch -func (rs readSet) first() int64 { - ret := int64(math.MaxInt64 - 1) - for _, resp := range rs { - if rev := resp.Header.Revision; rev < ret { - ret = rev - } - } - return ret -} - -// cmps guards the txn from updates to read set -func (rs readSet) cmps() []v3.Cmp { - cmps := make([]v3.Cmp, 0, len(rs)) - for k, rk := range rs { - cmps = append(cmps, isKeyCurrent(k, rk)) - } - return cmps -} - -type writeSet map[string]stmPut - -func (ws writeSet) get(keys ...string) *stmPut { - for _, key := range keys { - if wv, ok := ws[key]; ok { - return &wv - } - } - return nil -} - -// cmps returns a cmp list testing no writes have happened past rev -func (ws writeSet) cmps(rev int64) []v3.Cmp { - cmps := make([]v3.Cmp, 0, len(ws)) - for key := range ws { - cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev)) - } - return cmps -} - -// puts is the list of ops for all pending writes -func (ws writeSet) puts() []v3.Op { - puts := make([]v3.Op, 0, len(ws)) - for _, v := range ws { - puts = append(puts, v.op) - } - return puts -} - -func (s *stm) Get(keys ...string) string { - if wv := s.wset.get(keys...); wv != nil { - return wv.val - } - return respToValue(s.fetch(keys...)) -} - -func (s *stm) Put(key, val string, opts ...v3.OpOption) { - s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)} -} - -func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} } - -func (s *stm) Rev(key string) int64 { - if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 { - return resp.Kvs[0].ModRevision - } - return 0 -} - -func (s *stm) commit() *v3.TxnResponse { - txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit() - if err != nil { - panic(stmError{err}) - } - if txnresp.Succeeded { - return txnresp - } - return nil -} - -func (s *stm) fetch(keys ...string) *v3.GetResponse { - if len(keys) == 0 { - return nil - } - ops := make([]v3.Op, len(keys)) - for i, key := range keys { - if resp, ok := s.rset[key]; ok { - return resp - } - ops[i] = v3.OpGet(key, s.getOpts...) - } - txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit() - if err != nil { - panic(stmError{err}) - } - s.rset.add(keys, txnresp) - return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange()) -} - -func (s *stm) reset() { - s.rset = make(map[string]*v3.GetResponse) - s.wset = make(map[string]stmPut) -} - -type stmSerializable struct { - stm - prefetch map[string]*v3.GetResponse -} - -func (s *stmSerializable) Get(keys ...string) string { - if wv := s.wset.get(keys...); wv != nil { - return wv.val - } - firstRead := len(s.rset) == 0 - for _, key := range keys { - if resp, ok := s.prefetch[key]; ok { - delete(s.prefetch, key) - s.rset[key] = resp - } - } - resp := s.stm.fetch(keys...) - if firstRead { - // txn's base revision is defined by the first read - s.getOpts = []v3.OpOption{ - v3.WithRev(resp.Header.Revision), - v3.WithSerializable(), - } - } - return respToValue(resp) -} - -func (s *stmSerializable) Rev(key string) int64 { - s.Get(key) - return s.stm.Rev(key) -} - -func (s *stmSerializable) gets() ([]string, []v3.Op) { - keys := make([]string, 0, len(s.rset)) - ops := make([]v3.Op, 0, len(s.rset)) - for k := range s.rset { - keys = append(keys, k) - ops = append(ops, v3.OpGet(k)) - } - return keys, ops -} - -func (s *stmSerializable) commit() *v3.TxnResponse { - keys, getops := s.gets() - txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...) - // use Else to prefetch keys in case of conflict to save a round trip - txnresp, err := txn.Else(getops...).Commit() - if err != nil { - panic(stmError{err}) - } - if txnresp.Succeeded { - return txnresp - } - // load prefetch with Else data - s.rset.add(keys, txnresp) - s.prefetch = s.rset - s.getOpts = nil - return nil -} - -func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp { - if len(r.Kvs) != 0 { - return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision) - } - return v3.Compare(v3.ModRevision(k), "=", 0) -} - -func respToValue(resp *v3.GetResponse) string { - if resp == nil || len(resp.Kvs) == 0 { - return "" - } - return string(resp.Kvs[0].Value) -} - -// NewSTMRepeatable is deprecated. -func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { - return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads)) -} - -// NewSTMSerializable is deprecated. -func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { - return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable)) -} - -// NewSTMReadCommitted is deprecated. -func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { - return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted)) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go deleted file mode 100644 index 87d61cf577d..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/config.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "crypto/tls" - "time" - - "google.golang.org/grpc" -) - -type Config struct { - // Endpoints is a list of URLs. - Endpoints []string `json:"endpoints"` - - // AutoSyncInterval is the interval to update endpoints with its latest members. - // 0 disables auto-sync. By default auto-sync is disabled. - AutoSyncInterval time.Duration `json:"auto-sync-interval"` - - // DialTimeout is the timeout for failing to establish a connection. - DialTimeout time.Duration `json:"dial-timeout"` - - // DialKeepAliveTime is the time in seconds after which client pings the server to see if - // transport is alive. - DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` - - // DialKeepAliveTimeout is the time in seconds that the client waits for a response for the - // keep-alive probe. If the response is not received in this time, the connection is closed. - DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` - - // TLS holds the client secure credentials, if any. - TLS *tls.Config - - // Username is a user name for authentication. - Username string `json:"username"` - - // Password is a password for authentication. - Password string `json:"password"` - - // RejectOldCluster when set will refuse to create a client against an outdated cluster. - RejectOldCluster bool `json:"reject-old-cluster"` - - // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). - DialOptions []grpc.DialOption - - // Context is the default client context; it can be used to cancel grpc dial out and - // other operations that do not have an explicit context. - Context context.Context -} diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go deleted file mode 100644 index dacc5bb346f..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/doc.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package clientv3 implements the official Go etcd client for v3. -// -// Create client using `clientv3.New`: -// -// cli, err := clientv3.New(clientv3.Config{ -// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, -// DialTimeout: 5 * time.Second, -// }) -// if err != nil { -// // handle error! -// } -// defer cli.Close() -// -// Make sure to close the client after using it. If the client is not closed, the -// connection will have leaky goroutines. -// -// To specify a client request timeout, wrap the context with context.WithTimeout: -// -// ctx, cancel := context.WithTimeout(context.Background(), timeout) -// resp, err := kvc.Put(ctx, "sample_key", "sample_value") -// cancel() -// if err != nil { -// // handle error! -// } -// // use the response -// -// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed. -// Clients are safe for concurrent use by multiple goroutines. -// -// etcd client returns 2 types of errors: -// -// 1. context error: canceled or deadline exceeded. -// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go -// -// Here is the example code to handle client errors: -// -// resp, err := kvc.Put(ctx, "", "") -// if err != nil { -// if err == context.Canceled { -// // ctx is canceled by another routine -// } else if err == context.DeadlineExceeded { -// // ctx is attached with a deadline and it exceeded -// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok { -// // process (verr.Errors) -// } else { -// // bad cluster endpoints, which are not etcd servers -// } -// } -// -package clientv3 diff --git a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go deleted file mode 100644 index 8f4ba08ae69..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "sync" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/status" -) - -const minHealthRetryDuration = 3 * time.Second -const unknownService = "unknown service grpc.health.v1.Health" - -type healthCheckFunc func(ep string) (bool, error) - -// healthBalancer wraps a balancer so that it uses health checking -// to choose its endpoints. -type healthBalancer struct { - *simpleBalancer - - // healthCheck checks an endpoint's health. - healthCheck healthCheckFunc - healthCheckTimeout time.Duration - - // mu protects addrs, eps, unhealthy map, and stopc. - mu sync.RWMutex - - // addrs stores all grpc addresses associated with the balancer. - addrs []grpc.Address - - // eps stores all client endpoints - eps []string - - // unhealthy tracks the last unhealthy time of endpoints. - unhealthy map[string]time.Time - - stopc chan struct{} - stopOnce sync.Once - - hostPort2ep map[string]string - - wg sync.WaitGroup -} - -func newHealthBalancer(b *simpleBalancer, timeout time.Duration, hc healthCheckFunc) *healthBalancer { - hb := &healthBalancer{ - simpleBalancer: b, - healthCheck: hc, - eps: b.endpoints(), - addrs: eps2addrs(b.endpoints()), - hostPort2ep: getHostPort2ep(b.endpoints()), - unhealthy: make(map[string]time.Time), - stopc: make(chan struct{}), - } - if timeout < minHealthRetryDuration { - timeout = minHealthRetryDuration - } - hb.healthCheckTimeout = timeout - - hb.wg.Add(1) - go func() { - defer hb.wg.Done() - hb.updateUnhealthy(timeout) - }() - - return hb -} - -func (hb *healthBalancer) Up(addr grpc.Address) func(error) { - f, used := hb.up(addr) - if !used { - return f - } - return func(err error) { - // If connected to a black hole endpoint or a killed server, the gRPC ping - // timeout will induce a network I/O error, and retrying until success; - // finding healthy endpoint on retry could take several timeouts and redials. - // To avoid wasting retries, gray-list unhealthy endpoints. - hb.hostPortError(addr.Addr, err) - f(err) - } -} - -func (hb *healthBalancer) up(addr grpc.Address) (func(error), bool) { - if !hb.mayPin(addr) { - return func(err error) {}, false - } - return hb.simpleBalancer.up(addr) -} - -func (hb *healthBalancer) Close() error { - hb.stopOnce.Do(func() { close(hb.stopc) }) - hb.wg.Wait() - return hb.simpleBalancer.Close() -} - -func (hb *healthBalancer) updateAddrs(eps ...string) { - addrs, hostPort2ep := eps2addrs(eps), getHostPort2ep(eps) - hb.mu.Lock() - hb.addrs, hb.eps, hb.hostPort2ep = addrs, eps, hostPort2ep - hb.unhealthy = make(map[string]time.Time) - hb.mu.Unlock() - hb.simpleBalancer.updateAddrs(eps...) -} - -func (hb *healthBalancer) endpoint(host string) string { - hb.mu.RLock() - defer hb.mu.RUnlock() - return hb.hostPort2ep[host] -} - -func (hb *healthBalancer) endpoints() []string { - hb.mu.RLock() - defer hb.mu.RUnlock() - return hb.eps -} - -func (hb *healthBalancer) updateUnhealthy(timeout time.Duration) { - for { - select { - case <-time.After(timeout): - hb.mu.Lock() - for k, v := range hb.unhealthy { - if time.Since(v) > timeout { - delete(hb.unhealthy, k) - if logger.V(4) { - logger.Infof("clientv3/health-balancer: removes %q from unhealthy after %v", k, timeout) - } - } - } - hb.mu.Unlock() - eps := []string{} - for _, addr := range hb.liveAddrs() { - eps = append(eps, hb.endpoint(addr.Addr)) - } - hb.simpleBalancer.updateAddrs(eps...) - case <-hb.stopc: - return - } - } -} - -func (hb *healthBalancer) liveAddrs() []grpc.Address { - hb.mu.RLock() - defer hb.mu.RUnlock() - hbAddrs := hb.addrs - if len(hb.addrs) == 1 || len(hb.unhealthy) == 0 || len(hb.unhealthy) == len(hb.addrs) { - return hbAddrs - } - addrs := make([]grpc.Address, 0, len(hb.addrs)-len(hb.unhealthy)) - for _, addr := range hb.addrs { - if _, unhealthy := hb.unhealthy[addr.Addr]; !unhealthy { - addrs = append(addrs, addr) - } - } - return addrs -} - -func (hb *healthBalancer) hostPortError(hostPort string, err error) { - hb.mu.Lock() - if _, ok := hb.hostPort2ep[hostPort]; ok { - hb.unhealthy[hostPort] = time.Now() - if logger.V(4) { - logger.Infof("clientv3/health-balancer: marking %q as unhealthy (%q)", hostPort, err.Error()) - } - } - hb.mu.Unlock() -} - -func (hb *healthBalancer) mayPin(addr grpc.Address) bool { - hb.mu.RLock() - if _, ok := hb.hostPort2ep[addr.Addr]; !ok { // stale host:port - hb.mu.RUnlock() - return false - } - skip := len(hb.addrs) == 1 || len(hb.unhealthy) == 0 || len(hb.addrs) == len(hb.unhealthy) - failedTime, bad := hb.unhealthy[addr.Addr] - dur := hb.healthCheckTimeout - hb.mu.RUnlock() - if skip || !bad { - return true - } - // prevent isolated member's endpoint from being infinitely retried, as follows: - // 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm - // 2. balancer 'Up' unpins with grpc: failed with network I/O error - // 3. grpc-healthcheck still SERVING, thus retry to pin - // instead, return before grpc-healthcheck if failed within healthcheck timeout - if elapsed := time.Since(failedTime); elapsed < dur { - if logger.V(4) { - logger.Infof("clientv3/health-balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, dur) - } - return false - } - if ok, _ := hb.healthCheck(addr.Addr); ok { - hb.mu.Lock() - delete(hb.unhealthy, addr.Addr) - hb.mu.Unlock() - if logger.V(4) { - logger.Infof("clientv3/health-balancer: %q is healthy (health check success)", addr.Addr) - } - return true - } - hb.mu.Lock() - hb.unhealthy[addr.Addr] = time.Now() - hb.mu.Unlock() - if logger.V(4) { - logger.Infof("clientv3/health-balancer: %q becomes unhealthy (health check failed)", addr.Addr) - } - return false -} - -func grpcHealthCheck(client *Client, ep string) (bool, error) { - conn, err := client.dial(ep) - if err != nil { - return false, err - } - defer conn.Close() - cli := healthpb.NewHealthClient(conn) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{}) - cancel() - if err != nil { - if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable { - if s.Message() == unknownService { - // etcd < v3.3.0 - return true, nil - } - } - return false, err - } - return resp.Status == healthpb.HealthCheckResponse_SERVING, nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go deleted file mode 100644 index b578d9ebe46..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/kv.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -type ( - CompactResponse pb.CompactionResponse - PutResponse pb.PutResponse - GetResponse pb.RangeResponse - DeleteResponse pb.DeleteRangeResponse - TxnResponse pb.TxnResponse -) - -type KV interface { - // Put puts a key-value pair into etcd. - // Note that key,value can be plain bytes array and string is - // an immutable representation of that bytes array. - // To get a string of bytes, do string([]byte{0x10, 0x20}). - Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) - - // Get retrieves keys. - // By default, Get will return the value for "key", if any. - // When passed WithRange(end), Get will return the keys in the range [key, end). - // When passed WithFromKey(), Get returns keys greater than or equal to key. - // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision; - // if the required revision is compacted, the request will fail with ErrCompacted . - // When passed WithLimit(limit), the number of returned keys is bounded by limit. - // When passed WithSort(), the keys will be sorted. - Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) - - // Delete deletes a key, or optionally using WithRange(end), [key, end). - Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) - - // Compact compacts etcd KV history before the given rev. - Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) - - // Do applies a single Op on KV without a transaction. - // Do is useful when creating arbitrary operations to be issued at a - // later time; the user can range over the operations, calling Do to - // execute them. Get/Put/Delete, on the other hand, are best suited - // for when the operation should be issued at the time of declaration. - Do(ctx context.Context, op Op) (OpResponse, error) - - // Txn creates a transaction. - Txn(ctx context.Context) Txn -} - -type OpResponse struct { - put *PutResponse - get *GetResponse - del *DeleteResponse - txn *TxnResponse -} - -func (op OpResponse) Put() *PutResponse { return op.put } -func (op OpResponse) Get() *GetResponse { return op.get } -func (op OpResponse) Del() *DeleteResponse { return op.del } -func (op OpResponse) Txn() *TxnResponse { return op.txn } - -func (resp *PutResponse) OpResponse() OpResponse { - return OpResponse{put: resp} -} -func (resp *GetResponse) OpResponse() OpResponse { - return OpResponse{get: resp} -} -func (resp *DeleteResponse) OpResponse() OpResponse { - return OpResponse{del: resp} -} -func (resp *TxnResponse) OpResponse() OpResponse { - return OpResponse{txn: resp} -} - -type kv struct { - remote pb.KVClient -} - -func NewKV(c *Client) KV { - return &kv{remote: RetryKVClient(c)} -} - -func NewKVFromKVClient(remote pb.KVClient) KV { - return &kv{remote: remote} -} - -func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { - r, err := kv.Do(ctx, OpPut(key, val, opts...)) - return r.put, toErr(ctx, err) -} - -func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { - r, err := kv.Do(ctx, OpGet(key, opts...)) - return r.get, toErr(ctx, err) -} - -func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { - r, err := kv.Do(ctx, OpDelete(key, opts...)) - return r.del, toErr(ctx, err) -} - -func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { - resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest()) - if err != nil { - return nil, toErr(ctx, err) - } - return (*CompactResponse)(resp), err -} - -func (kv *kv) Txn(ctx context.Context) Txn { - return &txn{ - kv: kv, - ctx: ctx, - } -} - -func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { - var err error - switch op.t { - case tRange: - var resp *pb.RangeResponse - resp, err = kv.remote.Range(ctx, op.toRangeRequest()) - if err == nil { - return OpResponse{get: (*GetResponse)(resp)}, nil - } - case tPut: - var resp *pb.PutResponse - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} - resp, err = kv.remote.Put(ctx, r) - if err == nil { - return OpResponse{put: (*PutResponse)(resp)}, nil - } - case tDeleteRange: - var resp *pb.DeleteRangeResponse - r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} - resp, err = kv.remote.DeleteRange(ctx, r) - if err == nil { - return OpResponse{del: (*DeleteResponse)(resp)}, nil - } - case tTxn: - var resp *pb.TxnResponse - resp, err = kv.remote.Txn(ctx, op.toTxnRequest()) - if err == nil { - return OpResponse{txn: (*TxnResponse)(resp)}, nil - } - default: - panic("Unknown op") - } - return OpResponse{}, toErr(ctx, err) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go deleted file mode 100644 index aa9ea2d78aa..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/lease.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "sync" - "time" - - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc/metadata" -) - -type ( - LeaseRevokeResponse pb.LeaseRevokeResponse - LeaseID int64 -) - -// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. -type LeaseGrantResponse struct { - *pb.ResponseHeader - ID LeaseID - TTL int64 - Error string -} - -// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. -type LeaseKeepAliveResponse struct { - *pb.ResponseHeader - ID LeaseID - TTL int64 -} - -// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. -type LeaseTimeToLiveResponse struct { - *pb.ResponseHeader - ID LeaseID `json:"id"` - - // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. - TTL int64 `json:"ttl"` - - // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. - GrantedTTL int64 `json:"granted-ttl"` - - // Keys is the list of keys attached to this lease. - Keys [][]byte `json:"keys"` -} - -// LeaseStatus represents a lease status. -type LeaseStatus struct { - ID LeaseID `json:"id"` - // TODO: TTL int64 -} - -// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse. -type LeaseLeasesResponse struct { - *pb.ResponseHeader - Leases []LeaseStatus `json:"leases"` -} - -const ( - // defaultTTL is the assumed lease TTL used for the first keepalive - // deadline before the actual TTL is known to the client. - defaultTTL = 5 * time.Second - // a small buffer to store unsent lease responses. - leaseResponseChSize = 16 - // NoLease is a lease ID for the absence of a lease. - NoLease LeaseID = 0 - - // retryConnWait is how long to wait before retrying request due to an error - retryConnWait = 500 * time.Millisecond -) - -// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. -// -// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected. -type ErrKeepAliveHalted struct { - Reason error -} - -func (e ErrKeepAliveHalted) Error() string { - s := "etcdclient: leases keep alive halted" - if e.Reason != nil { - s += ": " + e.Reason.Error() - } - return s -} - -type Lease interface { - // Grant creates a new lease. - Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) - - // Revoke revokes the given lease. - Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) - - // TimeToLive retrieves the lease information of the given lease ID. - TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) - - // Leases retrieves all leases. - Leases(ctx context.Context) (*LeaseLeasesResponse, error) - - // KeepAlive keeps the given lease alive forever. - KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) - - // KeepAliveOnce renews the lease once. In most of the cases, KeepAlive - // should be used instead of KeepAliveOnce. - KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) - - // Close releases all resources Lease keeps for efficient communication - // with the etcd server. - Close() error -} - -type lessor struct { - mu sync.Mutex // guards all fields - - // donec is closed and loopErr is set when recvKeepAliveLoop stops - donec chan struct{} - loopErr error - - remote pb.LeaseClient - - stream pb.Lease_LeaseKeepAliveClient - streamCancel context.CancelFunc - - stopCtx context.Context - stopCancel context.CancelFunc - - keepAlives map[LeaseID]*keepAlive - - // firstKeepAliveTimeout is the timeout for the first keepalive request - // before the actual TTL is known to the lease client - firstKeepAliveTimeout time.Duration - - // firstKeepAliveOnce ensures stream starts after first KeepAlive call. - firstKeepAliveOnce sync.Once -} - -// keepAlive multiplexes a keepalive for a lease over multiple channels -type keepAlive struct { - chs []chan<- *LeaseKeepAliveResponse - ctxs []context.Context - // deadline is the time the keep alive channels close if no response - deadline time.Time - // nextKeepAlive is when to send the next keep alive message - nextKeepAlive time.Time - // donec is closed on lease revoke, expiration, or cancel. - donec chan struct{} -} - -func NewLease(c *Client) Lease { - return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second) -} - -func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease { - l := &lessor{ - donec: make(chan struct{}), - keepAlives: make(map[LeaseID]*keepAlive), - remote: remote, - firstKeepAliveTimeout: keepAliveTimeout, - } - if l.firstKeepAliveTimeout == time.Second { - l.firstKeepAliveTimeout = defaultTTL - } - reqLeaderCtx := WithRequireLeader(context.Background()) - l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) - return l -} - -func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { - r := &pb.LeaseGrantRequest{TTL: ttl} - resp, err := l.remote.LeaseGrant(ctx, r) - if err == nil { - gresp := &LeaseGrantResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - Error: resp.Error, - } - return gresp, nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { - r := &pb.LeaseRevokeRequest{ID: int64(id)} - resp, err := l.remote.LeaseRevoke(ctx, r) - if err == nil { - return (*LeaseRevokeResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { - r := toLeaseTimeToLiveRequest(id, opts...) - resp, err := l.remote.LeaseTimeToLive(ctx, r) - if err == nil { - gresp := &LeaseTimeToLiveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - GrantedTTL: resp.GrantedTTL, - Keys: resp.Keys, - } - return gresp, nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { - resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}) - if err == nil { - leases := make([]LeaseStatus, len(resp.Leases)) - for i := range resp.Leases { - leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)} - } - return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { - ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize) - - l.mu.Lock() - // ensure that recvKeepAliveLoop is still running - select { - case <-l.donec: - err := l.loopErr - l.mu.Unlock() - close(ch) - return ch, ErrKeepAliveHalted{Reason: err} - default: - } - ka, ok := l.keepAlives[id] - if !ok { - // create fresh keep alive - ka = &keepAlive{ - chs: []chan<- *LeaseKeepAliveResponse{ch}, - ctxs: []context.Context{ctx}, - deadline: time.Now().Add(l.firstKeepAliveTimeout), - nextKeepAlive: time.Now(), - donec: make(chan struct{}), - } - l.keepAlives[id] = ka - } else { - // add channel and context to existing keep alive - ka.ctxs = append(ka.ctxs, ctx) - ka.chs = append(ka.chs, ch) - } - l.mu.Unlock() - - go l.keepAliveCtxCloser(id, ctx, ka.donec) - l.firstKeepAliveOnce.Do(func() { - go l.recvKeepAliveLoop() - go l.deadlineLoop() - }) - - return ch, nil -} - -func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { - for { - resp, err := l.keepAliveOnce(ctx, id) - if err == nil { - if resp.TTL <= 0 { - err = rpctypes.ErrLeaseNotFound - } - return resp, err - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } - } -} - -func (l *lessor) Close() error { - l.stopCancel() - // close for synchronous teardown if stream goroutines never launched - l.firstKeepAliveOnce.Do(func() { close(l.donec) }) - <-l.donec - return nil -} - -func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) { - select { - case <-donec: - return - case <-l.donec: - return - case <-ctx.Done(): - } - - l.mu.Lock() - defer l.mu.Unlock() - - ka, ok := l.keepAlives[id] - if !ok { - return - } - - // close channel and remove context if still associated with keep alive - for i, c := range ka.ctxs { - if c == ctx { - close(ka.chs[i]) - ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) - ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) - break - } - } - // remove if no one more listeners - if len(ka.chs) == 0 { - delete(l.keepAlives, id) - } -} - -// closeRequireLeader scans keepAlives for ctxs that have require leader -// and closes the associated channels. -func (l *lessor) closeRequireLeader() { - l.mu.Lock() - defer l.mu.Unlock() - for _, ka := range l.keepAlives { - reqIdxs := 0 - // find all required leader channels, close, mark as nil - for i, ctx := range ka.ctxs { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - continue - } - ks := md[rpctypes.MetadataRequireLeaderKey] - if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { - continue - } - close(ka.chs[i]) - ka.chs[i] = nil - reqIdxs++ - } - if reqIdxs == 0 { - continue - } - // remove all channels that required a leader from keepalive - newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) - newCtxs := make([]context.Context, len(newChs)) - newIdx := 0 - for i := range ka.chs { - if ka.chs[i] == nil { - continue - } - newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] - newIdx++ - } - ka.chs, ka.ctxs = newChs, newCtxs - } -} - -func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { - cctx, cancel := context.WithCancel(ctx) - defer cancel() - - stream, err := l.remote.LeaseKeepAlive(cctx) - if err != nil { - return nil, toErr(ctx, err) - } - - err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) - if err != nil { - return nil, toErr(ctx, err) - } - - resp, rerr := stream.Recv() - if rerr != nil { - return nil, toErr(ctx, rerr) - } - - karesp := &LeaseKeepAliveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - } - return karesp, nil -} - -func (l *lessor) recvKeepAliveLoop() (gerr error) { - defer func() { - l.mu.Lock() - close(l.donec) - l.loopErr = gerr - for _, ka := range l.keepAlives { - ka.close() - } - l.keepAlives = make(map[LeaseID]*keepAlive) - l.mu.Unlock() - }() - - for { - stream, err := l.resetRecv() - if err != nil { - if canceledByCaller(l.stopCtx, err) { - return err - } - } else { - for { - resp, err := stream.Recv() - if err != nil { - if canceledByCaller(l.stopCtx, err) { - return err - } - - if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { - l.closeRequireLeader() - } - break - } - - l.recvKeepAlive(resp) - } - } - - select { - case <-time.After(retryConnWait): - continue - case <-l.stopCtx.Done(): - return l.stopCtx.Err() - } - } -} - -// resetRecv opens a new lease stream and starts sending keep alive requests. -func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { - sctx, cancel := context.WithCancel(l.stopCtx) - stream, err := l.remote.LeaseKeepAlive(sctx) - if err != nil { - cancel() - return nil, err - } - - l.mu.Lock() - defer l.mu.Unlock() - if l.stream != nil && l.streamCancel != nil { - l.streamCancel() - } - - l.streamCancel = cancel - l.stream = stream - - go l.sendKeepAliveLoop(stream) - return stream, nil -} - -// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse -func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { - karesp := &LeaseKeepAliveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - } - - l.mu.Lock() - defer l.mu.Unlock() - - ka, ok := l.keepAlives[karesp.ID] - if !ok { - return - } - - if karesp.TTL <= 0 { - // lease expired; close all keep alive channels - delete(l.keepAlives, karesp.ID) - ka.close() - return - } - - // send update to all channels - nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0) - ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second) - for _, ch := range ka.chs { - select { - case ch <- karesp: - ka.nextKeepAlive = nextKeepAlive - default: - } - } -} - -// deadlineLoop reaps any keep alive channels that have not received a response -// within the lease TTL -func (l *lessor) deadlineLoop() { - for { - select { - case <-time.After(time.Second): - case <-l.donec: - return - } - now := time.Now() - l.mu.Lock() - for id, ka := range l.keepAlives { - if ka.deadline.Before(now) { - // waited too long for response; lease may be expired - ka.close() - delete(l.keepAlives, id) - } - } - l.mu.Unlock() - } -} - -// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. -func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { - for { - var tosend []LeaseID - - now := time.Now() - l.mu.Lock() - for id, ka := range l.keepAlives { - if ka.nextKeepAlive.Before(now) { - tosend = append(tosend, id) - } - } - l.mu.Unlock() - - for _, id := range tosend { - r := &pb.LeaseKeepAliveRequest{ID: int64(id)} - if err := stream.Send(r); err != nil { - // TODO do something with this error? - return - } - } - - select { - case <-time.After(500 * time.Millisecond): - case <-stream.Context().Done(): - return - case <-l.donec: - return - case <-l.stopCtx.Done(): - return - } - } -} - -func (ka *keepAlive) close() { - close(ka.donec) - for _, ch := range ka.chs { - close(ch) - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go deleted file mode 100644 index 784c395c765..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/logger.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "io/ioutil" - "sync" - - "google.golang.org/grpc/grpclog" -) - -// Logger is the logger used by client library. -// It implements grpclog.LoggerV2 interface. -type Logger grpclog.LoggerV2 - -var ( - logger settableLogger -) - -type settableLogger struct { - l grpclog.LoggerV2 - mu sync.RWMutex -} - -func init() { - // disable client side logs by default - logger.mu.Lock() - logger.l = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard) - - // logger has to override the grpclog at initialization so that - // any changes to the grpclog go through logger with locking - // instead of through SetLogger - // - // now updates only happen through settableLogger.set - grpclog.SetLoggerV2(&logger) - logger.mu.Unlock() -} - -// SetLogger sets client-side Logger. By default, logs are disabled. -func SetLogger(l Logger) { - logger.set(l) -} - -// GetLogger returns the current logger. -func GetLogger() Logger { - return logger.get() -} - -func (s *settableLogger) set(l Logger) { - s.mu.Lock() - logger.l = l - s.mu.Unlock() -} - -func (s *settableLogger) get() Logger { - s.mu.RLock() - l := logger.l - s.mu.RUnlock() - return l -} - -// implement the grpclog.LoggerV2 interface - -func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) } -func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) } -func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) } -func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) } -func (s *settableLogger) Warningf(format string, args ...interface{}) { - s.get().Warningf(format, args...) -} -func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) } -func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) } -func (s *settableLogger) Errorf(format string, args ...interface{}) { - s.get().Errorf(format, args...) -} -func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) } -func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } -func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } -func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } -func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) } -func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) } -func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) } -func (s *settableLogger) V(l int) bool { return s.get().V(l) } diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go deleted file mode 100644 index 25abc9c9100..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/maintenance.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "io" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -type ( - DefragmentResponse pb.DefragmentResponse - AlarmResponse pb.AlarmResponse - AlarmMember pb.AlarmMember - StatusResponse pb.StatusResponse - HashKVResponse pb.HashKVResponse - MoveLeaderResponse pb.MoveLeaderResponse -) - -type Maintenance interface { - // AlarmList gets all active alarms. - AlarmList(ctx context.Context) (*AlarmResponse, error) - - // AlarmDisarm disarms a given alarm. - AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) - - // Defragment releases wasted space from internal fragmentation on a given etcd member. - // Defragment is only needed when deleting a large number of keys and want to reclaim - // the resources. - // Defragment is an expensive operation. User should avoid defragmenting multiple members - // at the same time. - // To defragment multiple members in the cluster, user need to call defragment multiple - // times with different endpoints. - Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) - - // Status gets the status of the endpoint. - Status(ctx context.Context, endpoint string) (*StatusResponse, error) - - // HashKV returns a hash of the KV state at the time of the RPC. - // If revision is zero, the hash is computed on all keys. If the revision - // is non-zero, the hash is computed on all keys at or below the given revision. - HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) - - // Snapshot provides a reader for a point-in-time snapshot of etcd. - Snapshot(ctx context.Context) (io.ReadCloser, error) - - // MoveLeader requests current leader to transfer its leadership to the transferee. - // Request must be made to the leader. - MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) -} - -type maintenance struct { - dial func(endpoint string) (pb.MaintenanceClient, func(), error) - remote pb.MaintenanceClient -} - -func NewMaintenance(c *Client) Maintenance { - return &maintenance{ - dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { - conn, err := c.dial(endpoint) - if err != nil { - return nil, nil, err - } - cancel := func() { conn.Close() } - return RetryMaintenanceClient(c, conn), cancel, nil - }, - remote: RetryMaintenanceClient(c, c.conn), - } -} - -func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient) Maintenance { - return &maintenance{ - dial: func(string) (pb.MaintenanceClient, func(), error) { - return remote, func() {}, nil - }, - remote: remote, - } -} - -func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { - req := &pb.AlarmRequest{ - Action: pb.AlarmRequest_GET, - MemberID: 0, // all - Alarm: pb.AlarmType_NONE, // all - } - resp, err := m.remote.Alarm(ctx, req) - if err == nil { - return (*AlarmResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { - req := &pb.AlarmRequest{ - Action: pb.AlarmRequest_DEACTIVATE, - MemberID: am.MemberID, - Alarm: am.Alarm, - } - - if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { - ar, err := m.AlarmList(ctx) - if err != nil { - return nil, toErr(ctx, err) - } - ret := AlarmResponse{} - for _, am := range ar.Alarms { - dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) - if derr != nil { - return nil, toErr(ctx, derr) - } - ret.Alarms = append(ret.Alarms, dresp.Alarms...) - } - return &ret, nil - } - - resp, err := m.remote.Alarm(ctx, req) - if err == nil { - return (*AlarmResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { - remote, cancel, err := m.dial(endpoint) - if err != nil { - return nil, toErr(ctx, err) - } - defer cancel() - resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}) - if err != nil { - return nil, toErr(ctx, err) - } - return (*DefragmentResponse)(resp), nil -} - -func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { - remote, cancel, err := m.dial(endpoint) - if err != nil { - return nil, toErr(ctx, err) - } - defer cancel() - resp, err := remote.Status(ctx, &pb.StatusRequest{}) - if err != nil { - return nil, toErr(ctx, err) - } - return (*StatusResponse)(resp), nil -} - -func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) { - remote, cancel, err := m.dial(endpoint) - if err != nil { - return nil, toErr(ctx, err) - } - defer cancel() - resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}) - if err != nil { - return nil, toErr(ctx, err) - } - return (*HashKVResponse)(resp), nil -} - -func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { - ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}) - if err != nil { - return nil, toErr(ctx, err) - } - - pr, pw := io.Pipe() - go func() { - for { - resp, err := ss.Recv() - if err != nil { - pw.CloseWithError(err) - return - } - if resp == nil && err == nil { - break - } - if _, werr := pw.Write(resp.Blob); werr != nil { - pw.CloseWithError(werr) - return - } - } - pw.Close() - }() - return pr, nil -} - -func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { - resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}) - return (*MoveLeaderResponse)(resp), toErr(ctx, err) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go deleted file mode 100644 index c6ec5bf5200..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/op.go +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - -type opType int - -const ( - // A default Op has opType 0, which is invalid. - tRange opType = iota + 1 - tPut - tDeleteRange - tTxn -) - -var ( - noPrefixEnd = []byte{0} -) - -// Op represents an Operation that kv can execute. -type Op struct { - t opType - key []byte - end []byte - - // for range - limit int64 - sort *SortOption - serializable bool - keysOnly bool - countOnly bool - minModRev int64 - maxModRev int64 - minCreateRev int64 - maxCreateRev int64 - - // for range, watch - rev int64 - - // for watch, put, delete - prevKV bool - - // for put - ignoreValue bool - ignoreLease bool - - // progressNotify is for progress updates. - progressNotify bool - // createdNotify is for created event - createdNotify bool - // filters for watchers - filterPut bool - filterDelete bool - - // for put - val []byte - leaseID LeaseID - - // txn - cmps []Cmp - thenOps []Op - elseOps []Op -} - -// accessors / mutators - -func (op Op) IsTxn() bool { return op.t == tTxn } -func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps } - -// KeyBytes returns the byte slice holding the Op's key. -func (op Op) KeyBytes() []byte { return op.key } - -// WithKeyBytes sets the byte slice for the Op's key. -func (op *Op) WithKeyBytes(key []byte) { op.key = key } - -// RangeBytes returns the byte slice holding with the Op's range end, if any. -func (op Op) RangeBytes() []byte { return op.end } - -// Rev returns the requested revision, if any. -func (op Op) Rev() int64 { return op.rev } - -// IsPut returns true iff the operation is a Put. -func (op Op) IsPut() bool { return op.t == tPut } - -// IsGet returns true iff the operation is a Get. -func (op Op) IsGet() bool { return op.t == tRange } - -// IsDelete returns true iff the operation is a Delete. -func (op Op) IsDelete() bool { return op.t == tDeleteRange } - -// IsSerializable returns true if the serializable field is true. -func (op Op) IsSerializable() bool { return op.serializable == true } - -// IsKeysOnly returns whether keysOnly is set. -func (op Op) IsKeysOnly() bool { return op.keysOnly == true } - -// IsCountOnly returns whether countOnly is set. -func (op Op) IsCountOnly() bool { return op.countOnly == true } - -// MinModRev returns the operation's minimum modify revision. -func (op Op) MinModRev() int64 { return op.minModRev } - -// MaxModRev returns the operation's maximum modify revision. -func (op Op) MaxModRev() int64 { return op.maxModRev } - -// MinCreateRev returns the operation's minimum create revision. -func (op Op) MinCreateRev() int64 { return op.minCreateRev } - -// MaxCreateRev returns the operation's maximum create revision. -func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } - -// WithRangeBytes sets the byte slice for the Op's range end. -func (op *Op) WithRangeBytes(end []byte) { op.end = end } - -// ValueBytes returns the byte slice holding the Op's value, if any. -func (op Op) ValueBytes() []byte { return op.val } - -// WithValueBytes sets the byte slice for the Op's value. -func (op *Op) WithValueBytes(v []byte) { op.val = v } - -func (op Op) toRangeRequest() *pb.RangeRequest { - if op.t != tRange { - panic("op.t != tRange") - } - r := &pb.RangeRequest{ - Key: op.key, - RangeEnd: op.end, - Limit: op.limit, - Revision: op.rev, - Serializable: op.serializable, - KeysOnly: op.keysOnly, - CountOnly: op.countOnly, - MinModRevision: op.minModRev, - MaxModRevision: op.maxModRev, - MinCreateRevision: op.minCreateRev, - MaxCreateRevision: op.maxCreateRev, - } - if op.sort != nil { - r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) - r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) - } - return r -} - -func (op Op) toTxnRequest() *pb.TxnRequest { - thenOps := make([]*pb.RequestOp, len(op.thenOps)) - for i, tOp := range op.thenOps { - thenOps[i] = tOp.toRequestOp() - } - elseOps := make([]*pb.RequestOp, len(op.elseOps)) - for i, eOp := range op.elseOps { - elseOps[i] = eOp.toRequestOp() - } - cmps := make([]*pb.Compare, len(op.cmps)) - for i := range op.cmps { - cmps[i] = (*pb.Compare)(&op.cmps[i]) - } - return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} -} - -func (op Op) toRequestOp() *pb.RequestOp { - switch op.t { - case tRange: - return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} - case tPut: - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} - return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} - case tDeleteRange: - r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} - return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} - case tTxn: - return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}} - default: - panic("Unknown Op") - } -} - -func (op Op) isWrite() bool { - if op.t == tTxn { - for _, tOp := range op.thenOps { - if tOp.isWrite() { - return true - } - } - for _, tOp := range op.elseOps { - if tOp.isWrite() { - return true - } - } - return false - } - return op.t != tRange -} - -func OpGet(key string, opts ...OpOption) Op { - ret := Op{t: tRange, key: []byte(key)} - ret.applyOpts(opts) - return ret -} - -func OpDelete(key string, opts ...OpOption) Op { - ret := Op{t: tDeleteRange, key: []byte(key)} - ret.applyOpts(opts) - switch { - case ret.leaseID != 0: - panic("unexpected lease in delete") - case ret.limit != 0: - panic("unexpected limit in delete") - case ret.rev != 0: - panic("unexpected revision in delete") - case ret.sort != nil: - panic("unexpected sort in delete") - case ret.serializable: - panic("unexpected serializable in delete") - case ret.countOnly: - panic("unexpected countOnly in delete") - case ret.minModRev != 0, ret.maxModRev != 0: - panic("unexpected mod revision filter in delete") - case ret.minCreateRev != 0, ret.maxCreateRev != 0: - panic("unexpected create revision filter in delete") - case ret.filterDelete, ret.filterPut: - panic("unexpected filter in delete") - case ret.createdNotify: - panic("unexpected createdNotify in delete") - } - return ret -} - -func OpPut(key, val string, opts ...OpOption) Op { - ret := Op{t: tPut, key: []byte(key), val: []byte(val)} - ret.applyOpts(opts) - switch { - case ret.end != nil: - panic("unexpected range in put") - case ret.limit != 0: - panic("unexpected limit in put") - case ret.rev != 0: - panic("unexpected revision in put") - case ret.sort != nil: - panic("unexpected sort in put") - case ret.serializable: - panic("unexpected serializable in put") - case ret.countOnly: - panic("unexpected countOnly in put") - case ret.minModRev != 0, ret.maxModRev != 0: - panic("unexpected mod revision filter in put") - case ret.minCreateRev != 0, ret.maxCreateRev != 0: - panic("unexpected create revision filter in put") - case ret.filterDelete, ret.filterPut: - panic("unexpected filter in put") - case ret.createdNotify: - panic("unexpected createdNotify in put") - } - return ret -} - -func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { - return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} -} - -func opWatch(key string, opts ...OpOption) Op { - ret := Op{t: tRange, key: []byte(key)} - ret.applyOpts(opts) - switch { - case ret.leaseID != 0: - panic("unexpected lease in watch") - case ret.limit != 0: - panic("unexpected limit in watch") - case ret.sort != nil: - panic("unexpected sort in watch") - case ret.serializable: - panic("unexpected serializable in watch") - case ret.countOnly: - panic("unexpected countOnly in watch") - case ret.minModRev != 0, ret.maxModRev != 0: - panic("unexpected mod revision filter in watch") - case ret.minCreateRev != 0, ret.maxCreateRev != 0: - panic("unexpected create revision filter in watch") - } - return ret -} - -func (op *Op) applyOpts(opts []OpOption) { - for _, opt := range opts { - opt(op) - } -} - -// OpOption configures Operations like Get, Put, Delete. -type OpOption func(*Op) - -// WithLease attaches a lease ID to a key in 'Put' request. -func WithLease(leaseID LeaseID) OpOption { - return func(op *Op) { op.leaseID = leaseID } -} - -// WithLimit limits the number of results to return from 'Get' request. -// If WithLimit is given a 0 limit, it is treated as no limit. -func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } - -// WithRev specifies the store revision for 'Get' request. -// Or the start revision of 'Watch' request. -func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } } - -// WithSort specifies the ordering in 'Get' request. It requires -// 'WithRange' and/or 'WithPrefix' to be specified too. -// 'target' specifies the target to sort by: key, version, revisions, value. -// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'. -func WithSort(target SortTarget, order SortOrder) OpOption { - return func(op *Op) { - if target == SortByKey && order == SortAscend { - // If order != SortNone, server fetches the entire key-space, - // and then applies the sort and limit, if provided. - // Since by default the server returns results sorted by keys - // in lexicographically ascending order, the client should ignore - // SortOrder if the target is SortByKey. - order = SortNone - } - op.sort = &SortOption{target, order} - } -} - -// GetPrefixRangeEnd gets the range end of the prefix. -// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'. -func GetPrefixRangeEnd(prefix string) string { - return string(getPrefix([]byte(prefix))) -} - -func getPrefix(key []byte) []byte { - end := make([]byte, len(key)) - copy(end, key) - for i := len(end) - 1; i >= 0; i-- { - if end[i] < 0xff { - end[i] = end[i] + 1 - end = end[:i+1] - return end - } - } - // next prefix does not exist (e.g., 0xffff); - // default to WithFromKey policy - return noPrefixEnd -} - -// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate -// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())' -// can return 'foo1', 'foo2', and so on. -func WithPrefix() OpOption { - return func(op *Op) { - if len(op.key) == 0 { - op.key, op.end = []byte{0}, []byte{0} - return - } - op.end = getPrefix(op.key) - } -} - -// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests. -// For example, 'Get' requests with 'WithRange(end)' returns -// the keys in the range [key, end). -// endKey must be lexicographically greater than start key. -func WithRange(endKey string) OpOption { - return func(op *Op) { op.end = []byte(endKey) } -} - -// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests -// to be equal or greater than the key in the argument. -func WithFromKey() OpOption { return WithRange("\x00") } - -// WithSerializable makes 'Get' request serializable. By default, -// it's linearizable. Serializable requests are better for lower latency -// requirement. -func WithSerializable() OpOption { - return func(op *Op) { op.serializable = true } -} - -// WithKeysOnly makes the 'Get' request return only the keys and the corresponding -// values will be omitted. -func WithKeysOnly() OpOption { - return func(op *Op) { op.keysOnly = true } -} - -// WithCountOnly makes the 'Get' request return only the count of keys. -func WithCountOnly() OpOption { - return func(op *Op) { op.countOnly = true } -} - -// WithMinModRev filters out keys for Get with modification revisions less than the given revision. -func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } } - -// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision. -func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } } - -// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision. -func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } } - -// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision. -func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } } - -// WithFirstCreate gets the key with the oldest creation revision in the request range. -func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) } - -// WithLastCreate gets the key with the latest creation revision in the request range. -func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) } - -// WithFirstKey gets the lexically first key in the request range. -func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) } - -// WithLastKey gets the lexically last key in the request range. -func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) } - -// WithFirstRev gets the key with the oldest modification revision in the request range. -func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) } - -// WithLastRev gets the key with the latest modification revision in the request range. -func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) } - -// withTop gets the first key over the get's prefix given a sort order -func withTop(target SortTarget, order SortOrder) []OpOption { - return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)} -} - -// WithProgressNotify makes watch server send periodic progress updates -// every 10 minutes when there is no incoming events. -// Progress updates have zero events in WatchResponse. -func WithProgressNotify() OpOption { - return func(op *Op) { - op.progressNotify = true - } -} - -// WithCreatedNotify makes watch server sends the created event. -func WithCreatedNotify() OpOption { - return func(op *Op) { - op.createdNotify = true - } -} - -// WithFilterPut discards PUT events from the watcher. -func WithFilterPut() OpOption { - return func(op *Op) { op.filterPut = true } -} - -// WithFilterDelete discards DELETE events from the watcher. -func WithFilterDelete() OpOption { - return func(op *Op) { op.filterDelete = true } -} - -// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted, -// nothing will be returned. -func WithPrevKV() OpOption { - return func(op *Op) { - op.prevKV = true - } -} - -// WithIgnoreValue updates the key using its current value. -// This option can not be combined with non-empty values. -// Returns an error if the key does not exist. -func WithIgnoreValue() OpOption { - return func(op *Op) { - op.ignoreValue = true - } -} - -// WithIgnoreLease updates the key using its current lease. -// This option can not be combined with WithLease. -// Returns an error if the key does not exist. -func WithIgnoreLease() OpOption { - return func(op *Op) { - op.ignoreLease = true - } -} - -// LeaseOp represents an Operation that lease can execute. -type LeaseOp struct { - id LeaseID - - // for TimeToLive - attachedKeys bool -} - -// LeaseOption configures lease operations. -type LeaseOption func(*LeaseOp) - -func (op *LeaseOp) applyOpts(opts []LeaseOption) { - for _, opt := range opts { - opt(op) - } -} - -// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID. -func WithAttachedKeys() LeaseOption { - return func(op *LeaseOp) { op.attachedKeys = true } -} - -func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest { - ret := &LeaseOp{id: id} - ret.applyOpts(opts) - return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys} -} diff --git a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go deleted file mode 100644 index c6ef585b5b4..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import "context" - -// TODO: remove this when "FailFast=false" is fixed. -// See https://github.com/grpc/grpc-go/issues/1532. -func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error { - select { - case <-ready: - return nil - case <-rpcCtx.Done(): - return rpcCtx.Err() - case <-clientCtx.Done(): - return clientCtx.Err() - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go deleted file mode 100644 index e6d17d0320e..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/retry.go +++ /dev/null @@ -1,495 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type rpcFunc func(ctx context.Context) error -type retryRPCFunc func(context.Context, rpcFunc) error -type retryStopErrFunc func(error) bool - -func isRepeatableStopError(err error) bool { - eErr := rpctypes.Error(err) - // always stop retry on etcd errors - if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { - return true - } - // only retry if unavailable - ev, _ := status.FromError(err) - return ev.Code() != codes.Unavailable -} - -func isNonRepeatableStopError(err error) bool { - ev, _ := status.FromError(err) - if ev.Code() != codes.Unavailable { - return true - } - return rpctypes.ErrorDesc(err) != "there is no address available" -} - -func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc { - return func(rpcCtx context.Context, f rpcFunc) error { - for { - if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil { - return err - } - pinned := c.balancer.pinned() - err := f(rpcCtx) - if err == nil { - return nil - } - if logger.V(4) { - logger.Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned) - } - - if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) { - // mark this before endpoint switch is triggered - c.balancer.hostPortError(pinned, err) - c.balancer.next() - if logger.V(4) { - logger.Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error()) - } - } - - if isStop(err) { - return err - } - } - } -} - -func (c *Client) newAuthRetryWrapper() retryRPCFunc { - return func(rpcCtx context.Context, f rpcFunc) error { - for { - pinned := c.balancer.pinned() - err := f(rpcCtx) - if err == nil { - return nil - } - if logger.V(4) { - logger.Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned) - } - // always stop retry on etcd errors other than invalid auth token - if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { - gterr := c.getToken(rpcCtx) - if gterr != nil { - if logger.V(4) { - logger.Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned) - } - return err // return the original error for simplicity - } - continue - } - return err - } - } -} - -// RetryKVClient implements a KVClient. -func RetryKVClient(c *Client) pb.KVClient { - repeatableRetry := c.newRetryWrapper(isRepeatableStopError) - nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) - conn := pb.NewKVClient(c.conn) - retryBasic := &retryKVClient{&nonRepeatableKVClient{conn, nonRepeatableRetry}, repeatableRetry} - retryAuthWrapper := c.newAuthRetryWrapper() - return &retryKVClient{ - &nonRepeatableKVClient{retryBasic, retryAuthWrapper}, - retryAuthWrapper} -} - -type retryKVClient struct { - *nonRepeatableKVClient - repeatableRetry retryRPCFunc -} - -func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { - err = rkv.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.Range(rctx, in, opts...) - return err - }) - return resp, err -} - -type nonRepeatableKVClient struct { - kc pb.KVClient - nonRepeatableRetry retryRPCFunc -} - -func (rkv *nonRepeatableKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { - err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.Put(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rkv *nonRepeatableKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { - err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.DeleteRange(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rkv *nonRepeatableKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { - // TODO: repeatableRetry if read-only txn - err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.Txn(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rkv *nonRepeatableKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { - err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.Compact(rctx, in, opts...) - return err - }) - return resp, err -} - -type retryLeaseClient struct { - lc pb.LeaseClient - repeatableRetry retryRPCFunc -} - -// RetryLeaseClient implements a LeaseClient. -func RetryLeaseClient(c *Client) pb.LeaseClient { - retry := &retryLeaseClient{ - pb.NewLeaseClient(c.conn), - c.newRetryWrapper(isRepeatableStopError), - } - return &retryLeaseClient{retry, c.newAuthRetryWrapper()} -} - -func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { - err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) { - err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rlc.lc.LeaseLeases(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { - err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rlc.lc.LeaseGrant(rctx, in, opts...) - return err - }) - return resp, err - -} - -func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { - err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { - err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { - stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...) - return err - }) - return stream, err -} - -type retryClusterClient struct { - *nonRepeatableClusterClient - repeatableRetry retryRPCFunc -} - -// RetryClusterClient implements a ClusterClient. -func RetryClusterClient(c *Client) pb.ClusterClient { - repeatableRetry := c.newRetryWrapper(isRepeatableStopError) - nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) - cc := pb.NewClusterClient(c.conn) - return &retryClusterClient{&nonRepeatableClusterClient{cc, nonRepeatableRetry}, repeatableRetry} -} - -func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { - err = rcc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rcc.cc.MemberList(rctx, in, opts...) - return err - }) - return resp, err -} - -type nonRepeatableClusterClient struct { - cc pb.ClusterClient - nonRepeatableRetry retryRPCFunc -} - -func (rcc *nonRepeatableClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { - err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rcc.cc.MemberAdd(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rcc *nonRepeatableClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { - err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rcc.cc.MemberRemove(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rcc *nonRepeatableClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { - err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rcc.cc.MemberUpdate(rctx, in, opts...) - return err - }) - return resp, err -} - -// RetryMaintenanceClient implements a Maintenance. -func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { - repeatableRetry := c.newRetryWrapper(isRepeatableStopError) - nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) - mc := pb.NewMaintenanceClient(conn) - return &retryMaintenanceClient{&nonRepeatableMaintenanceClient{mc, nonRepeatableRetry}, repeatableRetry} -} - -type retryMaintenanceClient struct { - *nonRepeatableMaintenanceClient - repeatableRetry retryRPCFunc -} - -func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { - err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.Alarm(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { - err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.Status(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { - err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.Hash(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) { - err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.HashKV(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { - err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { - stream, err = rmc.mc.Snapshot(rctx, in, opts...) - return err - }) - return stream, err -} - -func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) { - err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.MoveLeader(rctx, in, opts...) - return err - }) - return resp, err -} - -type nonRepeatableMaintenanceClient struct { - mc pb.MaintenanceClient - nonRepeatableRetry retryRPCFunc -} - -func (rmc *nonRepeatableMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { - err = rmc.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.Defragment(rctx, in, opts...) - return err - }) - return resp, err -} - -type retryAuthClient struct { - *nonRepeatableAuthClient - repeatableRetry retryRPCFunc -} - -// RetryAuthClient implements a AuthClient. -func RetryAuthClient(c *Client) pb.AuthClient { - repeatableRetry := c.newRetryWrapper(isRepeatableStopError) - nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) - ac := pb.NewAuthClient(c.conn) - return &retryAuthClient{&nonRepeatableAuthClient{ac, nonRepeatableRetry}, repeatableRetry} -} - -func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { - err = rac.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserList(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { - err = rac.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserGet(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { - err = rac.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleGet(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { - err = rac.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleList(rctx, in, opts...) - return err - }) - return resp, err -} - -type nonRepeatableAuthClient struct { - ac pb.AuthClient - nonRepeatableRetry retryRPCFunc -} - -func (rac *nonRepeatableAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.AuthEnable(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.AuthDisable(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserAdd(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserDelete(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserChangePassword(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserGrantRole(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserRevokeRole(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleAdd(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleDelete(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.Authenticate(rctx, in, opts...) - return err - }) - return resp, err -} diff --git a/vendor/github.com/coreos/etcd/clientv3/sort.go b/vendor/github.com/coreos/etcd/clientv3/sort.go deleted file mode 100644 index 2bb9d9a13b7..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/sort.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -type SortTarget int -type SortOrder int - -const ( - SortNone SortOrder = iota - SortAscend - SortDescend -) - -const ( - SortByKey SortTarget = iota - SortByVersion - SortByCreateRevision - SortByModRevision - SortByValue -) - -type SortOption struct { - Target SortTarget - Order SortOrder -} diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go deleted file mode 100644 index 8169b621509..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/txn.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "sync" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -// Txn is the interface that wraps mini-transactions. -// -// Txn(context.TODO()).If( -// Compare(Value(k1), ">", v1), -// Compare(Version(k1), "=", 2) -// ).Then( -// OpPut(k2,v2), OpPut(k3,v3) -// ).Else( -// OpPut(k4,v4), OpPut(k5,v5) -// ).Commit() -// -type Txn interface { - // If takes a list of comparison. If all comparisons passed in succeed, - // the operations passed into Then() will be executed. Or the operations - // passed into Else() will be executed. - If(cs ...Cmp) Txn - - // Then takes a list of operations. The Ops list will be executed, if the - // comparisons passed in If() succeed. - Then(ops ...Op) Txn - - // Else takes a list of operations. The Ops list will be executed, if the - // comparisons passed in If() fail. - Else(ops ...Op) Txn - - // Commit tries to commit the transaction. - Commit() (*TxnResponse, error) -} - -type txn struct { - kv *kv - ctx context.Context - - mu sync.Mutex - cif bool - cthen bool - celse bool - - isWrite bool - - cmps []*pb.Compare - - sus []*pb.RequestOp - fas []*pb.RequestOp -} - -func (txn *txn) If(cs ...Cmp) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.cif { - panic("cannot call If twice!") - } - - if txn.cthen { - panic("cannot call If after Then!") - } - - if txn.celse { - panic("cannot call If after Else!") - } - - txn.cif = true - - for i := range cs { - txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i])) - } - - return txn -} - -func (txn *txn) Then(ops ...Op) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.cthen { - panic("cannot call Then twice!") - } - if txn.celse { - panic("cannot call Then after Else!") - } - - txn.cthen = true - - for _, op := range ops { - txn.isWrite = txn.isWrite || op.isWrite() - txn.sus = append(txn.sus, op.toRequestOp()) - } - - return txn -} - -func (txn *txn) Else(ops ...Op) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.celse { - panic("cannot call Else twice!") - } - - txn.celse = true - - for _, op := range ops { - txn.isWrite = txn.isWrite || op.isWrite() - txn.fas = append(txn.fas, op.toRequestOp()) - } - - return txn -} - -func (txn *txn) Commit() (*TxnResponse, error) { - txn.mu.Lock() - defer txn.mu.Unlock() - - r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} - - var resp *pb.TxnResponse - var err error - resp, err = txn.kv.remote.Txn(txn.ctx, r) - if err != nil { - return nil, toErr(txn.ctx, err) - } - return (*TxnResponse)(resp), nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go deleted file mode 100644 index 91e6db26a03..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/watch.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "fmt" - "sync" - "time" - - v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - mvccpb "github.com/coreos/etcd/mvcc/mvccpb" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" -) - -const ( - EventTypeDelete = mvccpb.DELETE - EventTypePut = mvccpb.PUT - - closeSendErrTimeout = 250 * time.Millisecond -) - -type Event mvccpb.Event - -type WatchChan <-chan WatchResponse - -type Watcher interface { - // Watch watches on a key or prefix. The watched events will be returned - // through the returned channel. If revisions waiting to be sent over the - // watch are compacted, then the watch will be canceled by the server, the - // client will post a compacted error watch response, and the channel will close. - Watch(ctx context.Context, key string, opts ...OpOption) WatchChan - - // Close closes the watcher and cancels all watch requests. - Close() error -} - -type WatchResponse struct { - Header pb.ResponseHeader - Events []*Event - - // CompactRevision is the minimum revision the watcher may receive. - CompactRevision int64 - - // Canceled is used to indicate watch failure. - // If the watch failed and the stream was about to close, before the channel is closed, - // the channel sends a final response that has Canceled set to true with a non-nil Err(). - Canceled bool - - // Created is used to indicate the creation of the watcher. - Created bool - - closeErr error - - // cancelReason is a reason of canceling watch - cancelReason string -} - -// IsCreate returns true if the event tells that the key is newly created. -func (e *Event) IsCreate() bool { - return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision -} - -// IsModify returns true if the event tells that a new value is put on existing key. -func (e *Event) IsModify() bool { - return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision -} - -// Err is the error value if this WatchResponse holds an error. -func (wr *WatchResponse) Err() error { - switch { - case wr.closeErr != nil: - return v3rpc.Error(wr.closeErr) - case wr.CompactRevision != 0: - return v3rpc.ErrCompacted - case wr.Canceled: - if len(wr.cancelReason) != 0 { - return v3rpc.Error(grpc.Errorf(codes.FailedPrecondition, "%s", wr.cancelReason)) - } - return v3rpc.ErrFutureRev - } - return nil -} - -// IsProgressNotify returns true if the WatchResponse is progress notification. -func (wr *WatchResponse) IsProgressNotify() bool { - return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 -} - -// watcher implements the Watcher interface -type watcher struct { - remote pb.WatchClient - - // mu protects the grpc streams map - mu sync.RWMutex - - // streams holds all the active grpc streams keyed by ctx value. - streams map[string]*watchGrpcStream -} - -// watchGrpcStream tracks all watch resources attached to a single grpc stream. -type watchGrpcStream struct { - owner *watcher - remote pb.WatchClient - - // ctx controls internal remote.Watch requests - ctx context.Context - // ctxKey is the key used when looking up this stream's context - ctxKey string - cancel context.CancelFunc - - // substreams holds all active watchers on this grpc stream - substreams map[int64]*watcherStream - // resuming holds all resuming watchers on this grpc stream - resuming []*watcherStream - - // reqc sends a watch request from Watch() to the main goroutine - reqc chan *watchRequest - // respc receives data from the watch client - respc chan *pb.WatchResponse - // donec closes to broadcast shutdown - donec chan struct{} - // errc transmits errors from grpc Recv to the watch stream reconnect logic - errc chan error - // closingc gets the watcherStream of closing watchers - closingc chan *watcherStream - // wg is Done when all substream goroutines have exited - wg sync.WaitGroup - - // resumec closes to signal that all substreams should begin resuming - resumec chan struct{} - // closeErr is the error that closed the watch stream - closeErr error -} - -// watchRequest is issued by the subscriber to start a new watcher -type watchRequest struct { - ctx context.Context - key string - end string - rev int64 - // send created notification event if this field is true - createdNotify bool - // progressNotify is for progress updates - progressNotify bool - // filters is the list of events to filter out - filters []pb.WatchCreateRequest_FilterType - // get the previous key-value pair before the event happens - prevKV bool - // retc receives a chan WatchResponse once the watcher is established - retc chan chan WatchResponse -} - -// watcherStream represents a registered watcher -type watcherStream struct { - // initReq is the request that initiated this request - initReq watchRequest - - // outc publishes watch responses to subscriber - outc chan WatchResponse - // recvc buffers watch responses before publishing - recvc chan *WatchResponse - // donec closes when the watcherStream goroutine stops. - donec chan struct{} - // closing is set to true when stream should be scheduled to shutdown. - closing bool - // id is the registered watch id on the grpc stream - id int64 - - // buf holds all events received from etcd but not yet consumed by the client - buf []*WatchResponse -} - -func NewWatcher(c *Client) Watcher { - return NewWatchFromWatchClient(pb.NewWatchClient(c.conn)) -} - -func NewWatchFromWatchClient(wc pb.WatchClient) Watcher { - return &watcher{ - remote: wc, - streams: make(map[string]*watchGrpcStream), - } -} - -// never closes -var valCtxCh = make(chan struct{}) -var zeroTime = time.Unix(0, 0) - -// ctx with only the values; never Done -type valCtx struct{ context.Context } - -func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false } -func (vc *valCtx) Done() <-chan struct{} { return valCtxCh } -func (vc *valCtx) Err() error { return nil } - -func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { - ctx, cancel := context.WithCancel(&valCtx{inctx}) - wgs := &watchGrpcStream{ - owner: w, - remote: w.remote, - ctx: ctx, - ctxKey: streamKeyFromCtx(inctx), - cancel: cancel, - substreams: make(map[int64]*watcherStream), - respc: make(chan *pb.WatchResponse), - reqc: make(chan *watchRequest), - donec: make(chan struct{}), - errc: make(chan error, 1), - closingc: make(chan *watcherStream), - resumec: make(chan struct{}), - } - go wgs.run() - return wgs -} - -// Watch posts a watch request to run() and waits for a new watcher channel -func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { - ow := opWatch(key, opts...) - - var filters []pb.WatchCreateRequest_FilterType - if ow.filterPut { - filters = append(filters, pb.WatchCreateRequest_NOPUT) - } - if ow.filterDelete { - filters = append(filters, pb.WatchCreateRequest_NODELETE) - } - - wr := &watchRequest{ - ctx: ctx, - createdNotify: ow.createdNotify, - key: string(ow.key), - end: string(ow.end), - rev: ow.rev, - progressNotify: ow.progressNotify, - filters: filters, - prevKV: ow.prevKV, - retc: make(chan chan WatchResponse, 1), - } - - ok := false - ctxKey := streamKeyFromCtx(ctx) - - // find or allocate appropriate grpc watch stream - w.mu.Lock() - if w.streams == nil { - // closed - w.mu.Unlock() - ch := make(chan WatchResponse) - close(ch) - return ch - } - wgs := w.streams[ctxKey] - if wgs == nil { - wgs = w.newWatcherGrpcStream(ctx) - w.streams[ctxKey] = wgs - } - donec := wgs.donec - reqc := wgs.reqc - w.mu.Unlock() - - // couldn't create channel; return closed channel - closeCh := make(chan WatchResponse, 1) - - // submit request - select { - case reqc <- wr: - ok = true - case <-wr.ctx.Done(): - case <-donec: - if wgs.closeErr != nil { - closeCh <- WatchResponse{closeErr: wgs.closeErr} - break - } - // retry; may have dropped stream from no ctxs - return w.Watch(ctx, key, opts...) - } - - // receive channel - if ok { - select { - case ret := <-wr.retc: - return ret - case <-ctx.Done(): - case <-donec: - if wgs.closeErr != nil { - closeCh <- WatchResponse{closeErr: wgs.closeErr} - break - } - // retry; may have dropped stream from no ctxs - return w.Watch(ctx, key, opts...) - } - } - - close(closeCh) - return closeCh -} - -func (w *watcher) Close() (err error) { - w.mu.Lock() - streams := w.streams - w.streams = nil - w.mu.Unlock() - for _, wgs := range streams { - if werr := wgs.close(); werr != nil { - err = werr - } - } - return err -} - -func (w *watchGrpcStream) close() (err error) { - w.cancel() - <-w.donec - select { - case err = <-w.errc: - default: - } - return toErr(w.ctx, err) -} - -func (w *watcher) closeStream(wgs *watchGrpcStream) { - w.mu.Lock() - close(wgs.donec) - wgs.cancel() - if w.streams != nil { - delete(w.streams, wgs.ctxKey) - } - w.mu.Unlock() -} - -func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { - if resp.WatchId == -1 { - // failed; no channel - close(ws.recvc) - return - } - ws.id = resp.WatchId - w.substreams[ws.id] = ws -} - -func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { - select { - case ws.outc <- *resp: - case <-ws.initReq.ctx.Done(): - case <-time.After(closeSendErrTimeout): - } - close(ws.outc) -} - -func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { - // send channel response in case stream was never established - select { - case ws.initReq.retc <- ws.outc: - default: - } - // close subscriber's channel - if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil { - go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr}) - } else if ws.outc != nil { - close(ws.outc) - } - if ws.id != -1 { - delete(w.substreams, ws.id) - return - } - for i := range w.resuming { - if w.resuming[i] == ws { - w.resuming[i] = nil - return - } - } -} - -// run is the root of the goroutines for managing a watcher client -func (w *watchGrpcStream) run() { - var wc pb.Watch_WatchClient - var closeErr error - - // substreams marked to close but goroutine still running; needed for - // avoiding double-closing recvc on grpc stream teardown - closing := make(map[*watcherStream]struct{}) - - defer func() { - w.closeErr = closeErr - // shutdown substreams and resuming substreams - for _, ws := range w.substreams { - if _, ok := closing[ws]; !ok { - close(ws.recvc) - closing[ws] = struct{}{} - } - } - for _, ws := range w.resuming { - if _, ok := closing[ws]; ws != nil && !ok { - close(ws.recvc) - closing[ws] = struct{}{} - } - } - w.joinSubstreams() - for range closing { - w.closeSubstream(<-w.closingc) - } - w.wg.Wait() - w.owner.closeStream(w) - }() - - // start a stream with the etcd grpc server - if wc, closeErr = w.newWatchClient(); closeErr != nil { - return - } - - cancelSet := make(map[int64]struct{}) - - for { - select { - // Watch() requested - case wreq := <-w.reqc: - outc := make(chan WatchResponse, 1) - ws := &watcherStream{ - initReq: *wreq, - id: -1, - outc: outc, - // unbuffered so resumes won't cause repeat events - recvc: make(chan *WatchResponse), - } - - ws.donec = make(chan struct{}) - w.wg.Add(1) - go w.serveSubstream(ws, w.resumec) - - // queue up for watcher creation/resume - w.resuming = append(w.resuming, ws) - if len(w.resuming) == 1 { - // head of resume queue, can register a new watcher - wc.Send(ws.initReq.toPB()) - } - // New events from the watch client - case pbresp := <-w.respc: - switch { - case pbresp.Created: - // response to head of queue creation - if ws := w.resuming[0]; ws != nil { - w.addSubstream(pbresp, ws) - w.dispatchEvent(pbresp) - w.resuming[0] = nil - } - if ws := w.nextResume(); ws != nil { - wc.Send(ws.initReq.toPB()) - } - case pbresp.Canceled && pbresp.CompactRevision == 0: - delete(cancelSet, pbresp.WatchId) - if ws, ok := w.substreams[pbresp.WatchId]; ok { - // signal to stream goroutine to update closingc - close(ws.recvc) - closing[ws] = struct{}{} - } - default: - // dispatch to appropriate watch stream - if ok := w.dispatchEvent(pbresp); ok { - break - } - // watch response on unexpected watch id; cancel id - if _, ok := cancelSet[pbresp.WatchId]; ok { - break - } - cancelSet[pbresp.WatchId] = struct{}{} - cr := &pb.WatchRequest_CancelRequest{ - CancelRequest: &pb.WatchCancelRequest{ - WatchId: pbresp.WatchId, - }, - } - req := &pb.WatchRequest{RequestUnion: cr} - wc.Send(req) - } - // watch client failed on Recv; spawn another if possible - case err := <-w.errc: - if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { - closeErr = err - return - } - if wc, closeErr = w.newWatchClient(); closeErr != nil { - return - } - if ws := w.nextResume(); ws != nil { - wc.Send(ws.initReq.toPB()) - } - cancelSet = make(map[int64]struct{}) - case <-w.ctx.Done(): - return - case ws := <-w.closingc: - w.closeSubstream(ws) - delete(closing, ws) - if len(w.substreams)+len(w.resuming) == 0 { - // no more watchers on this stream, shutdown - return - } - } - } -} - -// nextResume chooses the next resuming to register with the grpc stream. Abandoned -// streams are marked as nil in the queue since the head must wait for its inflight registration. -func (w *watchGrpcStream) nextResume() *watcherStream { - for len(w.resuming) != 0 { - if w.resuming[0] != nil { - return w.resuming[0] - } - w.resuming = w.resuming[1:len(w.resuming)] - } - return nil -} - -// dispatchEvent sends a WatchResponse to the appropriate watcher stream -func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { - events := make([]*Event, len(pbresp.Events)) - for i, ev := range pbresp.Events { - events[i] = (*Event)(ev) - } - wr := &WatchResponse{ - Header: *pbresp.Header, - Events: events, - CompactRevision: pbresp.CompactRevision, - Created: pbresp.Created, - Canceled: pbresp.Canceled, - cancelReason: pbresp.CancelReason, - } - ws, ok := w.substreams[pbresp.WatchId] - if !ok { - return false - } - select { - case ws.recvc <- wr: - case <-ws.donec: - return false - } - return true -} - -// serveWatchClient forwards messages from the grpc stream to run() -func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { - for { - resp, err := wc.Recv() - if err != nil { - select { - case w.errc <- err: - case <-w.donec: - } - return - } - select { - case w.respc <- resp: - case <-w.donec: - return - } - } -} - -// serveSubstream forwards watch responses from run() to the subscriber -func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { - if ws.closing { - panic("created substream goroutine but substream is closing") - } - - // nextRev is the minimum expected next revision - nextRev := ws.initReq.rev - resuming := false - defer func() { - if !resuming { - ws.closing = true - } - close(ws.donec) - if !resuming { - w.closingc <- ws - } - w.wg.Done() - }() - - emptyWr := &WatchResponse{} - for { - curWr := emptyWr - outc := ws.outc - - if len(ws.buf) > 0 { - curWr = ws.buf[0] - } else { - outc = nil - } - select { - case outc <- *curWr: - if ws.buf[0].Err() != nil { - return - } - ws.buf[0] = nil - ws.buf = ws.buf[1:] - case wr, ok := <-ws.recvc: - if !ok { - // shutdown from closeSubstream - return - } - - if wr.Created { - if ws.initReq.retc != nil { - ws.initReq.retc <- ws.outc - // to prevent next write from taking the slot in buffered channel - // and posting duplicate create events - ws.initReq.retc = nil - - // send first creation event only if requested - if ws.initReq.createdNotify { - ws.outc <- *wr - } - // once the watch channel is returned, a current revision - // watch must resume at the store revision. This is necessary - // for the following case to work as expected: - // wch := m1.Watch("a") - // m2.Put("a", "b") - // <-wch - // If the revision is only bound on the first observed event, - // if wch is disconnected before the Put is issued, then reconnects - // after it is committed, it'll miss the Put. - if ws.initReq.rev == 0 { - nextRev = wr.Header.Revision - } - } - } else { - // current progress of watch; <= store revision - nextRev = wr.Header.Revision - } - - if len(wr.Events) > 0 { - nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 - } - ws.initReq.rev = nextRev - - // created event is already sent above, - // watcher should not post duplicate events - if wr.Created { - continue - } - - // TODO pause channel if buffer gets too large - ws.buf = append(ws.buf, wr) - case <-w.ctx.Done(): - return - case <-ws.initReq.ctx.Done(): - return - case <-resumec: - resuming = true - return - } - } - // lazily send cancel message if events on missing id -} - -func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { - // mark all substreams as resuming - close(w.resumec) - w.resumec = make(chan struct{}) - w.joinSubstreams() - for _, ws := range w.substreams { - ws.id = -1 - w.resuming = append(w.resuming, ws) - } - // strip out nils, if any - var resuming []*watcherStream - for _, ws := range w.resuming { - if ws != nil { - resuming = append(resuming, ws) - } - } - w.resuming = resuming - w.substreams = make(map[int64]*watcherStream) - - // connect to grpc stream while accepting watcher cancelation - stopc := make(chan struct{}) - donec := w.waitCancelSubstreams(stopc) - wc, err := w.openWatchClient() - close(stopc) - <-donec - - // serve all non-closing streams, even if there's a client error - // so that the teardown path can shutdown the streams as expected. - for _, ws := range w.resuming { - if ws.closing { - continue - } - ws.donec = make(chan struct{}) - w.wg.Add(1) - go w.serveSubstream(ws, w.resumec) - } - - if err != nil { - return nil, v3rpc.Error(err) - } - - // receive data from new grpc stream - go w.serveWatchClient(wc) - return wc, nil -} - -func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { - var wg sync.WaitGroup - wg.Add(len(w.resuming)) - donec := make(chan struct{}) - for i := range w.resuming { - go func(ws *watcherStream) { - defer wg.Done() - if ws.closing { - if ws.initReq.ctx.Err() != nil && ws.outc != nil { - close(ws.outc) - ws.outc = nil - } - return - } - select { - case <-ws.initReq.ctx.Done(): - // closed ws will be removed from resuming - ws.closing = true - close(ws.outc) - ws.outc = nil - w.wg.Add(1) - go func() { - defer w.wg.Done() - w.closingc <- ws - }() - case <-stopc: - } - }(w.resuming[i]) - } - go func() { - defer close(donec) - wg.Wait() - }() - return donec -} - -// joinSubstreams waits for all substream goroutines to complete. -func (w *watchGrpcStream) joinSubstreams() { - for _, ws := range w.substreams { - <-ws.donec - } - for _, ws := range w.resuming { - if ws != nil { - <-ws.donec - } - } -} - -// openWatchClient retries opening a watch client until success or halt. -// manually retry in case "ws==nil && err==nil" -// TODO: remove FailFast=false -func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { - for { - select { - case <-w.ctx.Done(): - if err == nil { - return nil, w.ctx.Err() - } - return nil, err - default: - } - if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil { - break - } - if isHaltErr(w.ctx, err) { - return nil, v3rpc.Error(err) - } - } - return ws, nil -} - -// toPB converts an internal watch request structure to its protobuf WatchRequest structure. -func (wr *watchRequest) toPB() *pb.WatchRequest { - req := &pb.WatchCreateRequest{ - StartRevision: wr.rev, - Key: []byte(wr.key), - RangeEnd: []byte(wr.end), - ProgressNotify: wr.progressNotify, - Filters: wr.filters, - PrevKv: wr.prevKV, - } - cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} - return &pb.WatchRequest{RequestUnion: cr} -} - -func streamKeyFromCtx(ctx context.Context) string { - if md, ok := metadata.FromOutgoingContext(ctx); ok { - return fmt.Sprintf("%+v", md) - } - return "" -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go deleted file mode 100644 index f72c6a644f3..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction. -package rpctypes diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go deleted file mode 100644 index 446e4f6b870..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpctypes - -import ( - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// server-side error -var ( - ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err() - ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err() - ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err() - ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err() - ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err() - ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err() - ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err() - ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err() - ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err() - - ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err() - ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err() - - ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err() - ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err() - ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err() - ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err() - ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err() - - ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err() - ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err() - - ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err() - ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err() - ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err() - ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err() - ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err() - ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err() - ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err() - ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err() - ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err() - ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err() - ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err() - ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err() - ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err() - ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err() - - ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err() - ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err() - ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err() - ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err() - ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err() - ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err() - ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err() - ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err() - ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err() - - errStringToError = map[string]error{ - ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, - ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound, - ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, - ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, - - ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, - ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, - ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, - ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, - ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, - - ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, - ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, - - ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist, - ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist, - ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted, - ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs, - ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound, - - ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, - ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, - - ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist, - ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist, - ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist, - ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty, - ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound, - ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist, - ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound, - ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed, - ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied, - ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted, - ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, - ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, - ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, - ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, - - ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, - ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader, - ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, - ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, - ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, - ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail, - ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, - ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, - ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt, - } -) - -// client-side error -var ( - ErrEmptyKey = Error(ErrGRPCEmptyKey) - ErrKeyNotFound = Error(ErrGRPCKeyNotFound) - ErrValueProvided = Error(ErrGRPCValueProvided) - ErrLeaseProvided = Error(ErrGRPCLeaseProvided) - ErrTooManyOps = Error(ErrGRPCTooManyOps) - ErrDuplicateKey = Error(ErrGRPCDuplicateKey) - ErrCompacted = Error(ErrGRPCCompacted) - ErrFutureRev = Error(ErrGRPCFutureRev) - ErrNoSpace = Error(ErrGRPCNoSpace) - - ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) - ErrLeaseExist = Error(ErrGRPCLeaseExist) - - ErrMemberExist = Error(ErrGRPCMemberExist) - ErrPeerURLExist = Error(ErrGRPCPeerURLExist) - ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted) - ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs) - ErrMemberNotFound = Error(ErrGRPCMemberNotFound) - - ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge) - ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests) - - ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist) - ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist) - ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist) - ErrUserEmpty = Error(ErrGRPCUserEmpty) - ErrUserNotFound = Error(ErrGRPCUserNotFound) - ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist) - ErrRoleNotFound = Error(ErrGRPCRoleNotFound) - ErrAuthFailed = Error(ErrGRPCAuthFailed) - ErrPermissionDenied = Error(ErrGRPCPermissionDenied) - ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted) - ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) - ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) - ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) - ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) - - ErrNoLeader = Error(ErrGRPCNoLeader) - ErrNotLeader = Error(ErrGRPCNotLeader) - ErrNotCapable = Error(ErrGRPCNotCapable) - ErrStopped = Error(ErrGRPCStopped) - ErrTimeout = Error(ErrGRPCTimeout) - ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail) - ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) - ErrUnhealthy = Error(ErrGRPCUnhealthy) - ErrCorrupt = Error(ErrGRPCCorrupt) -) - -// EtcdError defines gRPC server errors. -// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323) -type EtcdError struct { - code codes.Code - desc string -} - -// Code returns grpc/codes.Code. -// TODO: define clientv3/codes.Code. -func (e EtcdError) Code() codes.Code { - return e.code -} - -func (e EtcdError) Error() string { - return e.desc -} - -func Error(err error) error { - if err == nil { - return nil - } - verr, ok := errStringToError[ErrorDesc(err)] - if !ok { // not gRPC error - return err - } - ev, ok := status.FromError(verr) - var desc string - if ok { - desc = ev.Message() - } else { - desc = verr.Error() - } - return EtcdError{code: ev.Code(), desc: desc} -} - -func ErrorDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go deleted file mode 100644 index 5c590e1aec9..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpctypes - -var ( - MetadataRequireLeaderKey = "hasleader" - MetadataHasLeader = "true" -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go deleted file mode 100644 index e4007f5de73..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go +++ /dev/null @@ -1,1052 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: etcdserver.proto -// DO NOT EDIT! - -/* - Package etcdserverpb is a generated protocol buffer package. - - It is generated from these files: - etcdserver.proto - raft_internal.proto - rpc.proto - - It has these top-level messages: - Request - Metadata - RequestHeader - InternalRaftRequest - EmptyResponse - InternalAuthenticateRequest - ResponseHeader - RangeRequest - RangeResponse - PutRequest - PutResponse - DeleteRangeRequest - DeleteRangeResponse - RequestOp - ResponseOp - Compare - TxnRequest - TxnResponse - CompactionRequest - CompactionResponse - HashRequest - HashKVRequest - HashKVResponse - HashResponse - SnapshotRequest - SnapshotResponse - WatchRequest - WatchCreateRequest - WatchCancelRequest - WatchResponse - LeaseGrantRequest - LeaseGrantResponse - LeaseRevokeRequest - LeaseRevokeResponse - LeaseKeepAliveRequest - LeaseKeepAliveResponse - LeaseTimeToLiveRequest - LeaseTimeToLiveResponse - LeaseLeasesRequest - LeaseStatus - LeaseLeasesResponse - Member - MemberAddRequest - MemberAddResponse - MemberRemoveRequest - MemberRemoveResponse - MemberUpdateRequest - MemberUpdateResponse - MemberListRequest - MemberListResponse - DefragmentRequest - DefragmentResponse - MoveLeaderRequest - MoveLeaderResponse - AlarmRequest - AlarmMember - AlarmResponse - StatusRequest - StatusResponse - AuthEnableRequest - AuthDisableRequest - AuthenticateRequest - AuthUserAddRequest - AuthUserGetRequest - AuthUserDeleteRequest - AuthUserChangePasswordRequest - AuthUserGrantRoleRequest - AuthUserRevokeRoleRequest - AuthRoleAddRequest - AuthRoleGetRequest - AuthUserListRequest - AuthRoleListRequest - AuthRoleDeleteRequest - AuthRoleGrantPermissionRequest - AuthRoleRevokePermissionRequest - AuthEnableResponse - AuthDisableResponse - AuthenticateResponse - AuthUserAddResponse - AuthUserGetResponse - AuthUserDeleteResponse - AuthUserChangePasswordResponse - AuthUserGrantRoleResponse - AuthUserRevokeRoleResponse - AuthRoleAddResponse - AuthRoleGetResponse - AuthRoleListResponse - AuthUserListResponse - AuthRoleDeleteResponse - AuthRoleGrantPermissionResponse - AuthRoleRevokePermissionResponse -*/ -package etcdserverpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Request struct { - ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` - Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` - Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` - Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` - Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` - PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` - PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` - PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` - Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` - Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` - Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` - Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` - Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` - Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` - Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` - Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` - Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} -func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} } - -type Metadata struct { - NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` - ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{1} } - -func init() { - proto.RegisterType((*Request)(nil), "etcdserverpb.Request") - proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata") -} -func (m *Request) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Request) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID)) - dAtA[i] = 0x12 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method))) - i += copy(dAtA[i:], m.Method) - dAtA[i] = 0x1a - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x22 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val))) - i += copy(dAtA[i:], m.Val) - dAtA[i] = 0x28 - i++ - if m.Dir { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x32 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue))) - i += copy(dAtA[i:], m.PrevValue) - dAtA[i] = 0x38 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex)) - if m.PrevExist != nil { - dAtA[i] = 0x40 - i++ - if *m.PrevExist { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - dAtA[i] = 0x48 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration)) - dAtA[i] = 0x50 - i++ - if m.Wait { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x58 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since)) - dAtA[i] = 0x60 - i++ - if m.Recursive { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x68 - i++ - if m.Sorted { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x70 - i++ - if m.Quorum { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x78 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time)) - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if m.Stream { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.Refresh != nil { - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - if *m.Refresh { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Metadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID)) - dAtA[i] = 0x10 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID)) - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Etcdserver(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Etcdserver(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Request) Size() (n int) { - var l int - _ = l - n += 1 + sovEtcdserver(uint64(m.ID)) - l = len(m.Method) - n += 1 + l + sovEtcdserver(uint64(l)) - l = len(m.Path) - n += 1 + l + sovEtcdserver(uint64(l)) - l = len(m.Val) - n += 1 + l + sovEtcdserver(uint64(l)) - n += 2 - l = len(m.PrevValue) - n += 1 + l + sovEtcdserver(uint64(l)) - n += 1 + sovEtcdserver(uint64(m.PrevIndex)) - if m.PrevExist != nil { - n += 2 - } - n += 1 + sovEtcdserver(uint64(m.Expiration)) - n += 2 - n += 1 + sovEtcdserver(uint64(m.Since)) - n += 2 - n += 2 - n += 2 - n += 1 + sovEtcdserver(uint64(m.Time)) - n += 3 - if m.Refresh != nil { - n += 3 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Metadata) Size() (n int) { - var l int - _ = l - n += 1 + sovEtcdserver(uint64(m.NodeID)) - n += 1 + sovEtcdserver(uint64(m.ClusterID)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovEtcdserver(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozEtcdserver(x uint64) (n int) { - return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Request) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Request: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Method = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Val = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Dir = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PrevValue = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType) - } - m.PrevIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PrevIndex |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.PrevExist = &b - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType) - } - m.Expiration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Expiration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Wait = bool(v != 0) - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) - } - m.Since = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Since |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Recursive = bool(v != 0) - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Sorted = bool(v != 0) - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Quorum = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) - } - m.Time = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Time |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stream = bool(v != 0) - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Refresh = &b - default: - iNdEx = preIndex - skippy, err := skipEtcdserver(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthEtcdserver - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) - } - m.NodeID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NodeID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) - } - m.ClusterID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ClusterID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipEtcdserver(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthEtcdserver - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipEtcdserver(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthEtcdserver - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipEtcdserver(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) } - -var fileDescriptorEtcdserver = []byte{ - // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, - 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, - 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, - 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, - 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, - 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, - 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, - 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, - 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, - 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, - 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, - 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, - 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, - 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, - 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, - 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, - 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, - 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, - 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, - 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, - 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, - 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, - 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, - 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto deleted file mode 100644 index 25e0aca5d9f..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto2"; -package etcdserverpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; - -message Request { - optional uint64 ID = 1 [(gogoproto.nullable) = false]; - optional string Method = 2 [(gogoproto.nullable) = false]; - optional string Path = 3 [(gogoproto.nullable) = false]; - optional string Val = 4 [(gogoproto.nullable) = false]; - optional bool Dir = 5 [(gogoproto.nullable) = false]; - optional string PrevValue = 6 [(gogoproto.nullable) = false]; - optional uint64 PrevIndex = 7 [(gogoproto.nullable) = false]; - optional bool PrevExist = 8 [(gogoproto.nullable) = true]; - optional int64 Expiration = 9 [(gogoproto.nullable) = false]; - optional bool Wait = 10 [(gogoproto.nullable) = false]; - optional uint64 Since = 11 [(gogoproto.nullable) = false]; - optional bool Recursive = 12 [(gogoproto.nullable) = false]; - optional bool Sorted = 13 [(gogoproto.nullable) = false]; - optional bool Quorum = 14 [(gogoproto.nullable) = false]; - optional int64 Time = 15 [(gogoproto.nullable) = false]; - optional bool Stream = 16 [(gogoproto.nullable) = false]; - optional bool Refresh = 17 [(gogoproto.nullable) = true]; -} - -message Metadata { - optional uint64 NodeID = 1 [(gogoproto.nullable) = false]; - optional uint64 ClusterID = 2 [(gogoproto.nullable) = false]; -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go deleted file mode 100644 index 44a3b6f69eb..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go +++ /dev/null @@ -1,2094 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: raft_internal.proto -// DO NOT EDIT! - -package etcdserverpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type RequestHeader struct { - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // username is a username that is associated with an auth token of gRPC connection - Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` - // auth_revision is a revision number of auth.authStore. It is not related to mvcc - AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` -} - -func (m *RequestHeader) Reset() { *m = RequestHeader{} } -func (m *RequestHeader) String() string { return proto.CompactTextString(m) } -func (*RequestHeader) ProtoMessage() {} -func (*RequestHeader) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{0} } - -// An InternalRaftRequest is the union of all requests which can be -// sent via raft. -type InternalRaftRequest struct { - Header *RequestHeader `protobuf:"bytes,100,opt,name=header" json:"header,omitempty"` - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - V2 *Request `protobuf:"bytes,2,opt,name=v2" json:"v2,omitempty"` - Range *RangeRequest `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"` - Put *PutRequest `protobuf:"bytes,4,opt,name=put" json:"put,omitempty"` - DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange" json:"delete_range,omitempty"` - Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn" json:"txn,omitempty"` - Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction" json:"compaction,omitempty"` - LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant" json:"lease_grant,omitempty"` - LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke" json:"lease_revoke,omitempty"` - Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm" json:"alarm,omitempty"` - AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable" json:"auth_enable,omitempty"` - AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable" json:"auth_disable,omitempty"` - Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate" json:"authenticate,omitempty"` - AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd" json:"auth_user_add,omitempty"` - AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete" json:"auth_user_delete,omitempty"` - AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet" json:"auth_user_get,omitempty"` - AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword" json:"auth_user_change_password,omitempty"` - AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole" json:"auth_user_grant_role,omitempty"` - AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole" json:"auth_user_revoke_role,omitempty"` - AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList" json:"auth_user_list,omitempty"` - AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList" json:"auth_role_list,omitempty"` - AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd" json:"auth_role_add,omitempty"` - AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete" json:"auth_role_delete,omitempty"` - AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet" json:"auth_role_get,omitempty"` - AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission" json:"auth_role_grant_permission,omitempty"` - AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission" json:"auth_role_revoke_permission,omitempty"` -} - -func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } -func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } -func (*InternalRaftRequest) ProtoMessage() {} -func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{1} } - -type EmptyResponse struct { -} - -func (m *EmptyResponse) Reset() { *m = EmptyResponse{} } -func (m *EmptyResponse) String() string { return proto.CompactTextString(m) } -func (*EmptyResponse) ProtoMessage() {} -func (*EmptyResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{2} } - -// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? -// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. -// For avoiding misusage the field, we have an internal version of AuthenticateRequest. -type InternalAuthenticateRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - // simple_token is generated in API layer (etcdserver/v3_server.go) - SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"` -} - -func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} } -func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) } -func (*InternalAuthenticateRequest) ProtoMessage() {} -func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRaftInternal, []int{3} -} - -func init() { - proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader") - proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest") - proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse") - proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest") -} -func (m *RequestHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) - } - if len(m.Username) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - } - if m.AuthRevision != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision)) - } - return i, nil -} - -func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) - } - if m.V2 != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.V2.Size())) - n1, err := m.V2.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.Range != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Range.Size())) - n2, err := m.Range.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.Put != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Put.Size())) - n3, err := m.Put.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if m.DeleteRange != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.DeleteRange.Size())) - n4, err := m.DeleteRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.Txn != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Txn.Size())) - n5, err := m.Txn.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - if m.Compaction != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Compaction.Size())) - n6, err := m.Compaction.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.LeaseGrant != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseGrant.Size())) - n7, err := m.LeaseGrant.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.LeaseRevoke != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseRevoke.Size())) - n8, err := m.LeaseRevoke.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.Alarm != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Alarm.Size())) - n9, err := m.Alarm.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - if m.Header != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x6 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Header.Size())) - n10, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - if m.AuthEnable != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x3e - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthEnable.Size())) - n11, err := m.AuthEnable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - if m.AuthDisable != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x3f - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthDisable.Size())) - n12, err := m.AuthDisable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - if m.Authenticate != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x3f - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Authenticate.Size())) - n13, err := m.Authenticate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - if m.AuthUserAdd != nil { - dAtA[i] = 0xe2 - i++ - dAtA[i] = 0x44 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserAdd.Size())) - n14, err := m.AuthUserAdd.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.AuthUserDelete != nil { - dAtA[i] = 0xea - i++ - dAtA[i] = 0x44 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserDelete.Size())) - n15, err := m.AuthUserDelete.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - } - if m.AuthUserGet != nil { - dAtA[i] = 0xf2 - i++ - dAtA[i] = 0x44 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGet.Size())) - n16, err := m.AuthUserGet.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - if m.AuthUserChangePassword != nil { - dAtA[i] = 0xfa - i++ - dAtA[i] = 0x44 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserChangePassword.Size())) - n17, err := m.AuthUserChangePassword.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - if m.AuthUserGrantRole != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x45 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGrantRole.Size())) - n18, err := m.AuthUserGrantRole.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.AuthUserRevokeRole != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x45 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserRevokeRole.Size())) - n19, err := m.AuthUserRevokeRole.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - if m.AuthUserList != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x45 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserList.Size())) - n20, err := m.AuthUserList.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - } - if m.AuthRoleList != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x45 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleList.Size())) - n21, err := m.AuthRoleList.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - } - if m.AuthRoleAdd != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleAdd.Size())) - n22, err := m.AuthRoleAdd.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - } - if m.AuthRoleDelete != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleDelete.Size())) - n23, err := m.AuthRoleDelete.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - if m.AuthRoleGet != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGet.Size())) - n24, err := m.AuthRoleGet.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - } - if m.AuthRoleGrantPermission != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGrantPermission.Size())) - n25, err := m.AuthRoleGrantPermission.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - } - if m.AuthRoleRevokePermission != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleRevokePermission.Size())) - n26, err := m.AuthRoleRevokePermission.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - } - return i, nil -} - -func (m *EmptyResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - if len(m.SimpleToken) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken))) - i += copy(dAtA[i:], m.SimpleToken) - } - return i, nil -} - -func encodeFixed64RaftInternal(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32RaftInternal(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *RequestHeader) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRaftInternal(uint64(m.ID)) - } - l = len(m.Username) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRevision != 0 { - n += 1 + sovRaftInternal(uint64(m.AuthRevision)) - } - return n -} - -func (m *InternalRaftRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRaftInternal(uint64(m.ID)) - } - if m.V2 != nil { - l = m.V2.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Range != nil { - l = m.Range.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Put != nil { - l = m.Put.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.DeleteRange != nil { - l = m.DeleteRange.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Txn != nil { - l = m.Txn.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Compaction != nil { - l = m.Compaction.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.LeaseGrant != nil { - l = m.LeaseGrant.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.LeaseRevoke != nil { - l = m.LeaseRevoke.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Alarm != nil { - l = m.Alarm.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Header != nil { - l = m.Header.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthEnable != nil { - l = m.AuthEnable.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthDisable != nil { - l = m.AuthDisable.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.Authenticate != nil { - l = m.Authenticate.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserAdd != nil { - l = m.AuthUserAdd.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserDelete != nil { - l = m.AuthUserDelete.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserGet != nil { - l = m.AuthUserGet.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserChangePassword != nil { - l = m.AuthUserChangePassword.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserGrantRole != nil { - l = m.AuthUserGrantRole.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserRevokeRole != nil { - l = m.AuthUserRevokeRole.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserList != nil { - l = m.AuthUserList.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleList != nil { - l = m.AuthRoleList.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleAdd != nil { - l = m.AuthRoleAdd.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleDelete != nil { - l = m.AuthRoleDelete.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleGet != nil { - l = m.AuthRoleGet.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleGrantPermission != nil { - l = m.AuthRoleGrantPermission.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleRevokePermission != nil { - l = m.AuthRoleRevokePermission.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - return n -} - -func (m *EmptyResponse) Size() (n int) { - var l int - _ = l - return n -} - -func (m *InternalAuthenticateRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - l = len(m.SimpleToken) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - return n -} - -func sovRaftInternal(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozRaftInternal(x uint64) (n int) { - return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *RequestHeader) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Username = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType) - } - m.AuthRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AuthRevision |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.V2 == nil { - m.V2 = &Request{} - } - if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Range == nil { - m.Range = &RangeRequest{} - } - if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Put == nil { - m.Put = &PutRequest{} - } - if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeleteRange == nil { - m.DeleteRange = &DeleteRangeRequest{} - } - if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Txn == nil { - m.Txn = &TxnRequest{} - } - if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Compaction == nil { - m.Compaction = &CompactionRequest{} - } - if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LeaseGrant == nil { - m.LeaseGrant = &LeaseGrantRequest{} - } - if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LeaseRevoke == nil { - m.LeaseRevoke = &LeaseRevokeRequest{} - } - if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Alarm == nil { - m.Alarm = &AlarmRequest{} - } - if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 100: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &RequestHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1000: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthEnable == nil { - m.AuthEnable = &AuthEnableRequest{} - } - if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1011: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthDisable == nil { - m.AuthDisable = &AuthDisableRequest{} - } - if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1012: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Authenticate == nil { - m.Authenticate = &InternalAuthenticateRequest{} - } - if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1100: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserAdd == nil { - m.AuthUserAdd = &AuthUserAddRequest{} - } - if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1101: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserDelete == nil { - m.AuthUserDelete = &AuthUserDeleteRequest{} - } - if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1102: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserGet == nil { - m.AuthUserGet = &AuthUserGetRequest{} - } - if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1103: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserChangePassword == nil { - m.AuthUserChangePassword = &AuthUserChangePasswordRequest{} - } - if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1104: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserGrantRole == nil { - m.AuthUserGrantRole = &AuthUserGrantRoleRequest{} - } - if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1105: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserRevokeRole == nil { - m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{} - } - if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1106: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserList == nil { - m.AuthUserList = &AuthUserListRequest{} - } - if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1107: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleList == nil { - m.AuthRoleList = &AuthRoleListRequest{} - } - if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1200: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleAdd == nil { - m.AuthRoleAdd = &AuthRoleAddRequest{} - } - if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1201: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleDelete == nil { - m.AuthRoleDelete = &AuthRoleDeleteRequest{} - } - if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1202: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleGet == nil { - m.AuthRoleGet = &AuthRoleGetRequest{} - } - if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1203: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleGrantPermission == nil { - m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{} - } - if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1204: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleRevokePermission == nil { - m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{} - } - if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EmptyResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SimpleToken = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRaftInternal(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthRaftInternal - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRaftInternal(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftInternal) } - -var fileDescriptorRaftInternal = []byte{ - // 837 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, - 0x14, 0xc7, 0x69, 0xf9, 0xec, 0xb6, 0x54, 0x5c, 0x40, 0xd7, 0x32, 0x53, 0xa1, 0x8c, 0x8a, 0x5f, - 0xe8, 0x94, 0x07, 0xd0, 0x4a, 0x19, 0x60, 0x86, 0x61, 0x98, 0x0c, 0xce, 0x38, 0xe3, 0x45, 0x5c, - 0x9a, 0x43, 0x1b, 0x49, 0x93, 0xb8, 0xd9, 0x56, 0x7c, 0x13, 0x1f, 0xc3, 0xaf, 0x87, 0xe0, 0xc2, - 0x0f, 0xd4, 0x17, 0x50, 0xbc, 0xf1, 0xca, 0x1b, 0x7d, 0x00, 0x67, 0x3f, 0x92, 0x34, 0x6d, 0xca, - 0x5d, 0x72, 0xce, 0xff, 0xfc, 0xce, 0xd9, 0xec, 0x7f, 0xbb, 0x45, 0xb3, 0x8c, 0x1e, 0x72, 0xd3, - 0x76, 0x39, 0x30, 0x97, 0x3a, 0xab, 0x3e, 0xf3, 0xb8, 0x87, 0x0b, 0xc0, 0x1b, 0x56, 0x00, 0xac, - 0x0b, 0xcc, 0x3f, 0x28, 0xcd, 0x35, 0xbd, 0xa6, 0x27, 0x13, 0xf7, 0xc4, 0x93, 0xd2, 0x94, 0x66, - 0x62, 0x8d, 0x8e, 0xe4, 0x98, 0xdf, 0x50, 0x8f, 0x95, 0x67, 0x68, 0xda, 0x80, 0x17, 0x1d, 0x08, - 0xf8, 0x16, 0x50, 0x0b, 0x18, 0x2e, 0xa2, 0xec, 0x76, 0x9d, 0x64, 0x16, 0x33, 0x2b, 0x63, 0x46, - 0x76, 0xbb, 0x8e, 0x4b, 0x68, 0xaa, 0x13, 0x88, 0x96, 0x6d, 0x20, 0xd9, 0xc5, 0xcc, 0x4a, 0xce, - 0x88, 0xde, 0xf1, 0x32, 0x9a, 0xa6, 0x1d, 0xde, 0x32, 0x19, 0x74, 0xed, 0xc0, 0xf6, 0x5c, 0x32, - 0x2a, 0xcb, 0x0a, 0x22, 0x68, 0xe8, 0x58, 0xe5, 0x4f, 0x11, 0xcd, 0x6e, 0xeb, 0xa9, 0x0d, 0x7a, - 0xc8, 0x75, 0xbb, 0x81, 0x46, 0xd7, 0x50, 0xb6, 0x5b, 0x95, 0x2d, 0xf2, 0xd5, 0xf9, 0xd5, 0xde, - 0x75, 0xad, 0xea, 0x12, 0x23, 0xdb, 0xad, 0xe2, 0xfb, 0x68, 0x9c, 0x51, 0xb7, 0x09, 0xb2, 0x57, - 0xbe, 0x5a, 0xea, 0x53, 0x8a, 0x54, 0x28, 0x57, 0x42, 0x7c, 0x0b, 0x8d, 0xfa, 0x1d, 0x4e, 0xc6, - 0xa4, 0x9e, 0x24, 0xf5, 0x7b, 0x9d, 0x70, 0x1e, 0x43, 0x88, 0xf0, 0x3a, 0x2a, 0x58, 0xe0, 0x00, - 0x07, 0x53, 0x35, 0x19, 0x97, 0x45, 0x8b, 0xc9, 0xa2, 0xba, 0x54, 0x24, 0x5a, 0xe5, 0xad, 0x38, - 0x26, 0x1a, 0xf2, 0x63, 0x97, 0x4c, 0xa4, 0x35, 0xdc, 0x3f, 0x76, 0xa3, 0x86, 0xfc, 0xd8, 0xc5, - 0x0f, 0x10, 0x6a, 0x78, 0x6d, 0x9f, 0x36, 0xb8, 0xf8, 0x7e, 0x93, 0xb2, 0xe4, 0x6a, 0xb2, 0x64, - 0x3d, 0xca, 0x87, 0x95, 0x3d, 0x25, 0xf8, 0x21, 0xca, 0x3b, 0x40, 0x03, 0x30, 0x9b, 0x8c, 0xba, - 0x9c, 0x4c, 0xa5, 0x11, 0x76, 0x84, 0x60, 0x53, 0xe4, 0x23, 0x82, 0x13, 0x85, 0xc4, 0x9a, 0x15, - 0x81, 0x41, 0xd7, 0x3b, 0x02, 0x92, 0x4b, 0x5b, 0xb3, 0x44, 0x18, 0x52, 0x10, 0xad, 0xd9, 0x89, - 0x63, 0x62, 0x5b, 0xa8, 0x43, 0x59, 0x9b, 0xa0, 0xb4, 0x6d, 0xa9, 0x89, 0x54, 0xb4, 0x2d, 0x52, - 0x88, 0xd7, 0xd0, 0x44, 0x4b, 0x5a, 0x8e, 0x58, 0xb2, 0x64, 0x21, 0x75, 0xcf, 0x95, 0x2b, 0x0d, - 0x2d, 0xc5, 0x35, 0x94, 0x97, 0x8e, 0x03, 0x97, 0x1e, 0x38, 0x40, 0x7e, 0xa7, 0x7e, 0xb0, 0x5a, - 0x87, 0xb7, 0x36, 0xa4, 0x20, 0x5a, 0x2e, 0x8d, 0x42, 0xb8, 0x8e, 0xa4, 0x3f, 0x4d, 0xcb, 0x0e, - 0x24, 0xe3, 0xef, 0x64, 0xda, 0x7a, 0x05, 0xa3, 0xae, 0x14, 0xd1, 0x7a, 0x69, 0x1c, 0xc3, 0xbb, - 0x8a, 0x02, 0x2e, 0xb7, 0x1b, 0x94, 0x03, 0xf9, 0xa7, 0x28, 0x37, 0x93, 0x94, 0xd0, 0xf7, 0xb5, - 0x1e, 0x69, 0x88, 0x4b, 0xd4, 0xe3, 0x0d, 0x7d, 0x94, 0xc4, 0xd9, 0x32, 0xa9, 0x65, 0x91, 0x8f, - 0x53, 0xc3, 0xc6, 0x7a, 0x1c, 0x00, 0xab, 0x59, 0x56, 0x62, 0x2c, 0x1d, 0xc3, 0xbb, 0x68, 0x26, - 0xc6, 0x28, 0x4f, 0x92, 0x4f, 0x8a, 0xb4, 0x9c, 0x4e, 0xd2, 0x66, 0xd6, 0xb0, 0x22, 0x4d, 0x84, - 0x93, 0x63, 0x35, 0x81, 0x93, 0xcf, 0xe7, 0x8e, 0xb5, 0x09, 0x7c, 0x60, 0xac, 0x4d, 0xe0, 0xb8, - 0x89, 0xae, 0xc4, 0x98, 0x46, 0x4b, 0x9c, 0x12, 0xd3, 0xa7, 0x41, 0xf0, 0xd2, 0x63, 0x16, 0xf9, - 0xa2, 0x90, 0xb7, 0xd3, 0x91, 0xeb, 0x52, 0xbd, 0xa7, 0xc5, 0x21, 0xfd, 0x12, 0x4d, 0x4d, 0xe3, - 0x27, 0x68, 0xae, 0x67, 0x5e, 0x61, 0x6f, 0x93, 0x79, 0x0e, 0x90, 0x53, 0xd5, 0xe3, 0xfa, 0x90, - 0xb1, 0xe5, 0xd1, 0xf0, 0xe2, 0xad, 0xbe, 0x48, 0xfb, 0x33, 0xf8, 0x29, 0x9a, 0x8f, 0xc9, 0xea, - 0xa4, 0x28, 0xf4, 0x57, 0x85, 0xbe, 0x91, 0x8e, 0xd6, 0x47, 0xa6, 0x87, 0x8d, 0xe9, 0x40, 0x0a, - 0x6f, 0xa1, 0x62, 0x0c, 0x77, 0xec, 0x80, 0x93, 0x6f, 0x8a, 0xba, 0x94, 0x4e, 0xdd, 0xb1, 0x03, - 0x9e, 0xf0, 0x51, 0x18, 0x8c, 0x48, 0x62, 0x34, 0x45, 0xfa, 0x3e, 0x94, 0x24, 0x5a, 0x0f, 0x90, - 0xc2, 0x60, 0xb4, 0xf5, 0x92, 0x24, 0x1c, 0xf9, 0x26, 0x37, 0x6c, 0xeb, 0x45, 0x4d, 0xbf, 0x23, - 0x75, 0x2c, 0x72, 0xa4, 0xc4, 0x68, 0x47, 0xbe, 0xcd, 0x0d, 0x73, 0xa4, 0xa8, 0x4a, 0x71, 0x64, - 0x1c, 0x4e, 0x8e, 0x25, 0x1c, 0xf9, 0xee, 0xdc, 0xb1, 0xfa, 0x1d, 0xa9, 0x63, 0xf8, 0x39, 0x2a, - 0xf5, 0x60, 0xa4, 0x51, 0x7c, 0x60, 0x6d, 0x3b, 0x90, 0xf7, 0xd8, 0x7b, 0xc5, 0xbc, 0x33, 0x84, - 0x29, 0xe4, 0x7b, 0x91, 0x3a, 0xe4, 0x5f, 0xa6, 0xe9, 0x79, 0xdc, 0x46, 0x0b, 0x71, 0x2f, 0x6d, - 0x9d, 0x9e, 0x66, 0x1f, 0x54, 0xb3, 0xbb, 0xe9, 0xcd, 0x94, 0x4b, 0x06, 0xbb, 0x11, 0x3a, 0x44, - 0x50, 0xb9, 0x80, 0xa6, 0x37, 0xda, 0x3e, 0x7f, 0x65, 0x40, 0xe0, 0x7b, 0x6e, 0x00, 0x15, 0x1f, - 0x2d, 0x9c, 0xf3, 0x43, 0x84, 0x31, 0x1a, 0x93, 0xb7, 0x7b, 0x46, 0xde, 0xee, 0xf2, 0x59, 0xdc, - 0xfa, 0xd1, 0xf9, 0xd4, 0xb7, 0x7e, 0xf8, 0x8e, 0x97, 0x50, 0x21, 0xb0, 0xdb, 0xbe, 0x03, 0x26, - 0xf7, 0x8e, 0x40, 0x5d, 0xfa, 0x39, 0x23, 0xaf, 0x62, 0xfb, 0x22, 0xf4, 0x68, 0xee, 0xe4, 0x67, - 0x79, 0xe4, 0xe4, 0xac, 0x9c, 0x39, 0x3d, 0x2b, 0x67, 0x7e, 0x9c, 0x95, 0x33, 0xaf, 0x7f, 0x95, - 0x47, 0x0e, 0x26, 0xe4, 0x5f, 0x8e, 0xb5, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xc9, 0xfc, - 0x0e, 0xca, 0x08, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto deleted file mode 100644 index 25d45d3c4f2..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto +++ /dev/null @@ -1,74 +0,0 @@ -syntax = "proto3"; -package etcdserverpb; - -import "gogoproto/gogo.proto"; -import "etcdserver.proto"; -import "rpc.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; - -message RequestHeader { - uint64 ID = 1; - // username is a username that is associated with an auth token of gRPC connection - string username = 2; - // auth_revision is a revision number of auth.authStore. It is not related to mvcc - uint64 auth_revision = 3; -} - -// An InternalRaftRequest is the union of all requests which can be -// sent via raft. -message InternalRaftRequest { - RequestHeader header = 100; - uint64 ID = 1; - - Request v2 = 2; - - RangeRequest range = 3; - PutRequest put = 4; - DeleteRangeRequest delete_range = 5; - TxnRequest txn = 6; - CompactionRequest compaction = 7; - - LeaseGrantRequest lease_grant = 8; - LeaseRevokeRequest lease_revoke = 9; - - AlarmRequest alarm = 10; - - AuthEnableRequest auth_enable = 1000; - AuthDisableRequest auth_disable = 1011; - - InternalAuthenticateRequest authenticate = 1012; - - AuthUserAddRequest auth_user_add = 1100; - AuthUserDeleteRequest auth_user_delete = 1101; - AuthUserGetRequest auth_user_get = 1102; - AuthUserChangePasswordRequest auth_user_change_password = 1103; - AuthUserGrantRoleRequest auth_user_grant_role = 1104; - AuthUserRevokeRoleRequest auth_user_revoke_role = 1105; - AuthUserListRequest auth_user_list = 1106; - AuthRoleListRequest auth_role_list = 1107; - - AuthRoleAddRequest auth_role_add = 1200; - AuthRoleDeleteRequest auth_role_delete = 1201; - AuthRoleGetRequest auth_role_get = 1202; - AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203; - AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204; -} - -message EmptyResponse { -} - -// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? -// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. -// For avoiding misusage the field, we have an internal version of AuthenticateRequest. -message InternalAuthenticateRequest { - string name = 1; - string password = 2; - - // simple_token is generated in API layer (etcdserver/v3_server.go) - string simple_token = 3; -} - diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go deleted file mode 100644 index 97e0c4c49e0..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ /dev/null @@ -1,18682 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: rpc.proto -// DO NOT EDIT! - -package etcdserverpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - mvccpb "github.com/coreos/etcd/mvcc/mvccpb" - - authpb "github.com/coreos/etcd/auth/authpb" - - context "golang.org/x/net/context" - - grpc "google.golang.org/grpc" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type AlarmType int32 - -const ( - AlarmType_NONE AlarmType = 0 - AlarmType_NOSPACE AlarmType = 1 - AlarmType_CORRUPT AlarmType = 2 -) - -var AlarmType_name = map[int32]string{ - 0: "NONE", - 1: "NOSPACE", - 2: "CORRUPT", -} -var AlarmType_value = map[string]int32{ - "NONE": 0, - "NOSPACE": 1, - "CORRUPT": 2, -} - -func (x AlarmType) String() string { - return proto.EnumName(AlarmType_name, int32(x)) -} -func (AlarmType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } - -type RangeRequest_SortOrder int32 - -const ( - RangeRequest_NONE RangeRequest_SortOrder = 0 - RangeRequest_ASCEND RangeRequest_SortOrder = 1 - RangeRequest_DESCEND RangeRequest_SortOrder = 2 -) - -var RangeRequest_SortOrder_name = map[int32]string{ - 0: "NONE", - 1: "ASCEND", - 2: "DESCEND", -} -var RangeRequest_SortOrder_value = map[string]int32{ - "NONE": 0, - "ASCEND": 1, - "DESCEND": 2, -} - -func (x RangeRequest_SortOrder) String() string { - return proto.EnumName(RangeRequest_SortOrder_name, int32(x)) -} -func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 0} } - -type RangeRequest_SortTarget int32 - -const ( - RangeRequest_KEY RangeRequest_SortTarget = 0 - RangeRequest_VERSION RangeRequest_SortTarget = 1 - RangeRequest_CREATE RangeRequest_SortTarget = 2 - RangeRequest_MOD RangeRequest_SortTarget = 3 - RangeRequest_VALUE RangeRequest_SortTarget = 4 -) - -var RangeRequest_SortTarget_name = map[int32]string{ - 0: "KEY", - 1: "VERSION", - 2: "CREATE", - 3: "MOD", - 4: "VALUE", -} -var RangeRequest_SortTarget_value = map[string]int32{ - "KEY": 0, - "VERSION": 1, - "CREATE": 2, - "MOD": 3, - "VALUE": 4, -} - -func (x RangeRequest_SortTarget) String() string { - return proto.EnumName(RangeRequest_SortTarget_name, int32(x)) -} -func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 1} } - -type Compare_CompareResult int32 - -const ( - Compare_EQUAL Compare_CompareResult = 0 - Compare_GREATER Compare_CompareResult = 1 - Compare_LESS Compare_CompareResult = 2 - Compare_NOT_EQUAL Compare_CompareResult = 3 -) - -var Compare_CompareResult_name = map[int32]string{ - 0: "EQUAL", - 1: "GREATER", - 2: "LESS", - 3: "NOT_EQUAL", -} -var Compare_CompareResult_value = map[string]int32{ - "EQUAL": 0, - "GREATER": 1, - "LESS": 2, - "NOT_EQUAL": 3, -} - -func (x Compare_CompareResult) String() string { - return proto.EnumName(Compare_CompareResult_name, int32(x)) -} -func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 0} } - -type Compare_CompareTarget int32 - -const ( - Compare_VERSION Compare_CompareTarget = 0 - Compare_CREATE Compare_CompareTarget = 1 - Compare_MOD Compare_CompareTarget = 2 - Compare_VALUE Compare_CompareTarget = 3 - Compare_LEASE Compare_CompareTarget = 4 -) - -var Compare_CompareTarget_name = map[int32]string{ - 0: "VERSION", - 1: "CREATE", - 2: "MOD", - 3: "VALUE", - 4: "LEASE", -} -var Compare_CompareTarget_value = map[string]int32{ - "VERSION": 0, - "CREATE": 1, - "MOD": 2, - "VALUE": 3, - "LEASE": 4, -} - -func (x Compare_CompareTarget) String() string { - return proto.EnumName(Compare_CompareTarget_name, int32(x)) -} -func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 1} } - -type WatchCreateRequest_FilterType int32 - -const ( - // filter out put event. - WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0 - // filter out delete event. - WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1 -) - -var WatchCreateRequest_FilterType_name = map[int32]string{ - 0: "NOPUT", - 1: "NODELETE", -} -var WatchCreateRequest_FilterType_value = map[string]int32{ - "NOPUT": 0, - "NODELETE": 1, -} - -func (x WatchCreateRequest_FilterType) String() string { - return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x)) -} -func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{21, 0} -} - -type AlarmRequest_AlarmAction int32 - -const ( - AlarmRequest_GET AlarmRequest_AlarmAction = 0 - AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1 - AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2 -) - -var AlarmRequest_AlarmAction_name = map[int32]string{ - 0: "GET", - 1: "ACTIVATE", - 2: "DEACTIVATE", -} -var AlarmRequest_AlarmAction_value = map[string]int32{ - "GET": 0, - "ACTIVATE": 1, - "DEACTIVATE": 2, -} - -func (x AlarmRequest_AlarmAction) String() string { - return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) -} -func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{48, 0} -} - -type ResponseHeader struct { - // cluster_id is the ID of the cluster which sent the response. - ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // member_id is the ID of the member which sent the response. - MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` - // revision is the key-value store revision when the request was applied. - Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` - // raft_term is the raft term when the request was applied. - RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"` -} - -func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } -func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } -func (*ResponseHeader) ProtoMessage() {} -func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } - -func (m *ResponseHeader) GetClusterId() uint64 { - if m != nil { - return m.ClusterId - } - return 0 -} - -func (m *ResponseHeader) GetMemberId() uint64 { - if m != nil { - return m.MemberId - } - return 0 -} - -func (m *ResponseHeader) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -func (m *ResponseHeader) GetRaftTerm() uint64 { - if m != nil { - return m.RaftTerm - } - return 0 -} - -type RangeRequest struct { - // key is the first key for the range. If range_end is not given, the request only looks up key. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // range_end is the upper bound on the requested range [key, range_end). - // If range_end is '\0', the range is all keys >= key. - // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), - // then the range request gets all keys prefixed with key. - // If both key and range_end are '\0', then the range request returns all keys. - RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // limit is a limit on the number of keys returned for the request. When limit is set to 0, - // it is treated as no limit. - Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` - // revision is the point-in-time of the key-value store to use for the range. - // If revision is less or equal to zero, the range is over the newest key-value store. - // If the revision has been compacted, ErrCompacted is returned as a response. - Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"` - // sort_order is the order for returned sorted results. - SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"` - // sort_target is the key-value field to use for sorting. - SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"` - // serializable sets the range request to use serializable member-local reads. - // Range requests are linearizable by default; linearizable requests have higher - // latency and lower throughput than serializable requests but reflect the current - // consensus of the cluster. For better performance, in exchange for possible stale reads, - // a serializable range request is served locally without needing to reach consensus - // with other nodes in the cluster. - Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"` - // keys_only when set returns only the keys and not the values. - KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"` - // count_only when set returns only the count of the keys in the range. - CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"` - // min_mod_revision is the lower bound for returned key mod revisions; all keys with - // lesser mod revisions will be filtered away. - MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"` - // max_mod_revision is the upper bound for returned key mod revisions; all keys with - // greater mod revisions will be filtered away. - MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"` - // min_create_revision is the lower bound for returned key create revisions; all keys with - // lesser create trevisions will be filtered away. - MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"` - // max_create_revision is the upper bound for returned key create revisions; all keys with - // greater create revisions will be filtered away. - MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` -} - -func (m *RangeRequest) Reset() { *m = RangeRequest{} } -func (m *RangeRequest) String() string { return proto.CompactTextString(m) } -func (*RangeRequest) ProtoMessage() {} -func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } - -func (m *RangeRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *RangeRequest) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -func (m *RangeRequest) GetLimit() int64 { - if m != nil { - return m.Limit - } - return 0 -} - -func (m *RangeRequest) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { - if m != nil { - return m.SortOrder - } - return RangeRequest_NONE -} - -func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { - if m != nil { - return m.SortTarget - } - return RangeRequest_KEY -} - -func (m *RangeRequest) GetSerializable() bool { - if m != nil { - return m.Serializable - } - return false -} - -func (m *RangeRequest) GetKeysOnly() bool { - if m != nil { - return m.KeysOnly - } - return false -} - -func (m *RangeRequest) GetCountOnly() bool { - if m != nil { - return m.CountOnly - } - return false -} - -func (m *RangeRequest) GetMinModRevision() int64 { - if m != nil { - return m.MinModRevision - } - return 0 -} - -func (m *RangeRequest) GetMaxModRevision() int64 { - if m != nil { - return m.MaxModRevision - } - return 0 -} - -func (m *RangeRequest) GetMinCreateRevision() int64 { - if m != nil { - return m.MinCreateRevision - } - return 0 -} - -func (m *RangeRequest) GetMaxCreateRevision() int64 { - if m != nil { - return m.MaxCreateRevision - } - return 0 -} - -type RangeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // kvs is the list of key-value pairs matched by the range request. - // kvs is empty when count is requested. - Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"` - // more indicates if there are more keys to return in the requested range. - More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"` - // count is set to the number of keys within the range when requested. - Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` -} - -func (m *RangeResponse) Reset() { *m = RangeResponse{} } -func (m *RangeResponse) String() string { return proto.CompactTextString(m) } -func (*RangeResponse) ProtoMessage() {} -func (*RangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} } - -func (m *RangeResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { - if m != nil { - return m.Kvs - } - return nil -} - -func (m *RangeResponse) GetMore() bool { - if m != nil { - return m.More - } - return false -} - -func (m *RangeResponse) GetCount() int64 { - if m != nil { - return m.Count - } - return 0 -} - -type PutRequest struct { - // key is the key, in bytes, to put into the key-value store. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // value is the value, in bytes, to associate with the key in the key-value store. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - // lease is the lease ID to associate with the key in the key-value store. A lease - // value of 0 indicates no lease. - Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"` - // If prev_kv is set, etcd gets the previous key-value pair before changing it. - // The previous key-value pair will be returned in the put response. - PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` - // If ignore_value is set, etcd updates the key using its current value. - // Returns an error if the key does not exist. - IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` - // If ignore_lease is set, etcd updates the key using its current lease. - // Returns an error if the key does not exist. - IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} -func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } - -func (m *PutRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *PutRequest) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *PutRequest) GetLease() int64 { - if m != nil { - return m.Lease - } - return 0 -} - -func (m *PutRequest) GetPrevKv() bool { - if m != nil { - return m.PrevKv - } - return false -} - -func (m *PutRequest) GetIgnoreValue() bool { - if m != nil { - return m.IgnoreValue - } - return false -} - -func (m *PutRequest) GetIgnoreLease() bool { - if m != nil { - return m.IgnoreLease - } - return false -} - -type PutResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // if prev_kv is set in the request, the previous key-value pair will be returned. - PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} -func (*PutResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} } - -func (m *PutResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue { - if m != nil { - return m.PrevKv - } - return nil -} - -type DeleteRangeRequest struct { - // key is the first key to delete in the range. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // range_end is the key following the last key to delete for the range [key, range_end). - // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all the keys - // with the prefix (the given key). - // If range_end is '\0', the range is all keys greater than or equal to the key argument. - RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delete response. - PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` -} - -func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } -func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRangeRequest) ProtoMessage() {} -func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } - -func (m *DeleteRangeRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *DeleteRangeRequest) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -func (m *DeleteRangeRequest) GetPrevKv() bool { - if m != nil { - return m.PrevKv - } - return false -} - -type DeleteRangeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // deleted is the number of keys deleted by the delete range request. - Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` - // if prev_kv is set in the request, the previous key-value pairs will be returned. - PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs" json:"prev_kvs,omitempty"` -} - -func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } -func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteRangeResponse) ProtoMessage() {} -func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{6} } - -func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *DeleteRangeResponse) GetDeleted() int64 { - if m != nil { - return m.Deleted - } - return 0 -} - -func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { - if m != nil { - return m.PrevKvs - } - return nil -} - -type RequestOp struct { - // request is a union of request types accepted by a transaction. - // - // Types that are valid to be assigned to Request: - // *RequestOp_RequestRange - // *RequestOp_RequestPut - // *RequestOp_RequestDeleteRange - // *RequestOp_RequestTxn - Request isRequestOp_Request `protobuf_oneof:"request"` -} - -func (m *RequestOp) Reset() { *m = RequestOp{} } -func (m *RequestOp) String() string { return proto.CompactTextString(m) } -func (*RequestOp) ProtoMessage() {} -func (*RequestOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{7} } - -type isRequestOp_Request interface { - isRequestOp_Request() - MarshalTo([]byte) (int, error) - Size() int -} - -type RequestOp_RequestRange struct { - RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,oneof"` -} -type RequestOp_RequestPut struct { - RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,oneof"` -} -type RequestOp_RequestDeleteRange struct { - RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,oneof"` -} -type RequestOp_RequestTxn struct { - RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,oneof"` -} - -func (*RequestOp_RequestRange) isRequestOp_Request() {} -func (*RequestOp_RequestPut) isRequestOp_Request() {} -func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {} -func (*RequestOp_RequestTxn) isRequestOp_Request() {} - -func (m *RequestOp) GetRequest() isRequestOp_Request { - if m != nil { - return m.Request - } - return nil -} - -func (m *RequestOp) GetRequestRange() *RangeRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok { - return x.RequestRange - } - return nil -} - -func (m *RequestOp) GetRequestPut() *PutRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok { - return x.RequestPut - } - return nil -} - -func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok { - return x.RequestDeleteRange - } - return nil -} - -func (m *RequestOp) GetRequestTxn() *TxnRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok { - return x.RequestTxn - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{ - (*RequestOp_RequestRange)(nil), - (*RequestOp_RequestPut)(nil), - (*RequestOp_RequestDeleteRange)(nil), - (*RequestOp_RequestTxn)(nil), - } -} - -func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*RequestOp) - // request - switch x := m.Request.(type) { - case *RequestOp_RequestRange: - _ = b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.RequestRange); err != nil { - return err - } - case *RequestOp_RequestPut: - _ = b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.RequestPut); err != nil { - return err - } - case *RequestOp_RequestDeleteRange: - _ = b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.RequestDeleteRange); err != nil { - return err - } - case *RequestOp_RequestTxn: - _ = b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.RequestTxn); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("RequestOp.Request has unexpected type %T", x) - } - return nil -} - -func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*RequestOp) - switch tag { - case 1: // request.request_range - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(RangeRequest) - err := b.DecodeMessage(msg) - m.Request = &RequestOp_RequestRange{msg} - return true, err - case 2: // request.request_put - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PutRequest) - err := b.DecodeMessage(msg) - m.Request = &RequestOp_RequestPut{msg} - return true, err - case 3: // request.request_delete_range - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(DeleteRangeRequest) - err := b.DecodeMessage(msg) - m.Request = &RequestOp_RequestDeleteRange{msg} - return true, err - case 4: // request.request_txn - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(TxnRequest) - err := b.DecodeMessage(msg) - m.Request = &RequestOp_RequestTxn{msg} - return true, err - default: - return false, nil - } -} - -func _RequestOp_OneofSizer(msg proto.Message) (n int) { - m := msg.(*RequestOp) - // request - switch x := m.Request.(type) { - case *RequestOp_RequestRange: - s := proto.Size(x.RequestRange) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *RequestOp_RequestPut: - s := proto.Size(x.RequestPut) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *RequestOp_RequestDeleteRange: - s := proto.Size(x.RequestDeleteRange) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *RequestOp_RequestTxn: - s := proto.Size(x.RequestTxn) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type ResponseOp struct { - // response is a union of response types returned by a transaction. - // - // Types that are valid to be assigned to Response: - // *ResponseOp_ResponseRange - // *ResponseOp_ResponsePut - // *ResponseOp_ResponseDeleteRange - // *ResponseOp_ResponseTxn - Response isResponseOp_Response `protobuf_oneof:"response"` -} - -func (m *ResponseOp) Reset() { *m = ResponseOp{} } -func (m *ResponseOp) String() string { return proto.CompactTextString(m) } -func (*ResponseOp) ProtoMessage() {} -func (*ResponseOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{8} } - -type isResponseOp_Response interface { - isResponseOp_Response() - MarshalTo([]byte) (int, error) - Size() int -} - -type ResponseOp_ResponseRange struct { - ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,oneof"` -} -type ResponseOp_ResponsePut struct { - ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,oneof"` -} -type ResponseOp_ResponseDeleteRange struct { - ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,oneof"` -} -type ResponseOp_ResponseTxn struct { - ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,oneof"` -} - -func (*ResponseOp_ResponseRange) isResponseOp_Response() {} -func (*ResponseOp_ResponsePut) isResponseOp_Response() {} -func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {} -func (*ResponseOp_ResponseTxn) isResponseOp_Response() {} - -func (m *ResponseOp) GetResponse() isResponseOp_Response { - if m != nil { - return m.Response - } - return nil -} - -func (m *ResponseOp) GetResponseRange() *RangeResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok { - return x.ResponseRange - } - return nil -} - -func (m *ResponseOp) GetResponsePut() *PutResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok { - return x.ResponsePut - } - return nil -} - -func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok { - return x.ResponseDeleteRange - } - return nil -} - -func (m *ResponseOp) GetResponseTxn() *TxnResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok { - return x.ResponseTxn - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{ - (*ResponseOp_ResponseRange)(nil), - (*ResponseOp_ResponsePut)(nil), - (*ResponseOp_ResponseDeleteRange)(nil), - (*ResponseOp_ResponseTxn)(nil), - } -} - -func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ResponseOp) - // response - switch x := m.Response.(type) { - case *ResponseOp_ResponseRange: - _ = b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ResponseRange); err != nil { - return err - } - case *ResponseOp_ResponsePut: - _ = b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ResponsePut); err != nil { - return err - } - case *ResponseOp_ResponseDeleteRange: - _ = b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil { - return err - } - case *ResponseOp_ResponseTxn: - _ = b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ResponseTxn); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ResponseOp.Response has unexpected type %T", x) - } - return nil -} - -func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ResponseOp) - switch tag { - case 1: // response.response_range - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(RangeResponse) - err := b.DecodeMessage(msg) - m.Response = &ResponseOp_ResponseRange{msg} - return true, err - case 2: // response.response_put - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PutResponse) - err := b.DecodeMessage(msg) - m.Response = &ResponseOp_ResponsePut{msg} - return true, err - case 3: // response.response_delete_range - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(DeleteRangeResponse) - err := b.DecodeMessage(msg) - m.Response = &ResponseOp_ResponseDeleteRange{msg} - return true, err - case 4: // response.response_txn - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(TxnResponse) - err := b.DecodeMessage(msg) - m.Response = &ResponseOp_ResponseTxn{msg} - return true, err - default: - return false, nil - } -} - -func _ResponseOp_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ResponseOp) - // response - switch x := m.Response.(type) { - case *ResponseOp_ResponseRange: - s := proto.Size(x.ResponseRange) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ResponseOp_ResponsePut: - s := proto.Size(x.ResponsePut) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ResponseOp_ResponseDeleteRange: - s := proto.Size(x.ResponseDeleteRange) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ResponseOp_ResponseTxn: - s := proto.Size(x.ResponseTxn) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Compare struct { - // result is logical comparison operation for this comparison. - Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"` - // target is the key-value field to inspect for the comparison. - Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"` - // key is the subject key for the comparison operation. - Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` - // Types that are valid to be assigned to TargetUnion: - // *Compare_Version - // *Compare_CreateRevision - // *Compare_ModRevision - // *Compare_Value - // *Compare_Lease - TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` - // range_end compares the given target to all keys in the range [key, range_end). - // See RangeRequest for more details on key ranges. - RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` -} - -func (m *Compare) Reset() { *m = Compare{} } -func (m *Compare) String() string { return proto.CompactTextString(m) } -func (*Compare) ProtoMessage() {} -func (*Compare) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9} } - -type isCompare_TargetUnion interface { - isCompare_TargetUnion() - MarshalTo([]byte) (int, error) - Size() int -} - -type Compare_Version struct { - Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"` -} -type Compare_CreateRevision struct { - CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof"` -} -type Compare_ModRevision struct { - ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof"` -} -type Compare_Value struct { - Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"` -} -type Compare_Lease struct { - Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof"` -} - -func (*Compare_Version) isCompare_TargetUnion() {} -func (*Compare_CreateRevision) isCompare_TargetUnion() {} -func (*Compare_ModRevision) isCompare_TargetUnion() {} -func (*Compare_Value) isCompare_TargetUnion() {} -func (*Compare_Lease) isCompare_TargetUnion() {} - -func (m *Compare) GetTargetUnion() isCompare_TargetUnion { - if m != nil { - return m.TargetUnion - } - return nil -} - -func (m *Compare) GetResult() Compare_CompareResult { - if m != nil { - return m.Result - } - return Compare_EQUAL -} - -func (m *Compare) GetTarget() Compare_CompareTarget { - if m != nil { - return m.Target - } - return Compare_VERSION -} - -func (m *Compare) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *Compare) GetVersion() int64 { - if x, ok := m.GetTargetUnion().(*Compare_Version); ok { - return x.Version - } - return 0 -} - -func (m *Compare) GetCreateRevision() int64 { - if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok { - return x.CreateRevision - } - return 0 -} - -func (m *Compare) GetModRevision() int64 { - if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok { - return x.ModRevision - } - return 0 -} - -func (m *Compare) GetValue() []byte { - if x, ok := m.GetTargetUnion().(*Compare_Value); ok { - return x.Value - } - return nil -} - -func (m *Compare) GetLease() int64 { - if x, ok := m.GetTargetUnion().(*Compare_Lease); ok { - return x.Lease - } - return 0 -} - -func (m *Compare) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{ - (*Compare_Version)(nil), - (*Compare_CreateRevision)(nil), - (*Compare_ModRevision)(nil), - (*Compare_Value)(nil), - (*Compare_Lease)(nil), - } -} - -func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Compare) - // target_union - switch x := m.TargetUnion.(type) { - case *Compare_Version: - _ = b.EncodeVarint(4<<3 | proto.WireVarint) - _ = b.EncodeVarint(uint64(x.Version)) - case *Compare_CreateRevision: - _ = b.EncodeVarint(5<<3 | proto.WireVarint) - _ = b.EncodeVarint(uint64(x.CreateRevision)) - case *Compare_ModRevision: - _ = b.EncodeVarint(6<<3 | proto.WireVarint) - _ = b.EncodeVarint(uint64(x.ModRevision)) - case *Compare_Value: - _ = b.EncodeVarint(7<<3 | proto.WireBytes) - _ = b.EncodeRawBytes(x.Value) - case *Compare_Lease: - _ = b.EncodeVarint(8<<3 | proto.WireVarint) - _ = b.EncodeVarint(uint64(x.Lease)) - case nil: - default: - return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x) - } - return nil -} - -func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Compare) - switch tag { - case 4: // target_union.version - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.TargetUnion = &Compare_Version{int64(x)} - return true, err - case 5: // target_union.create_revision - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.TargetUnion = &Compare_CreateRevision{int64(x)} - return true, err - case 6: // target_union.mod_revision - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.TargetUnion = &Compare_ModRevision{int64(x)} - return true, err - case 7: // target_union.value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.TargetUnion = &Compare_Value{x} - return true, err - case 8: // target_union.lease - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.TargetUnion = &Compare_Lease{int64(x)} - return true, err - default: - return false, nil - } -} - -func _Compare_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Compare) - // target_union - switch x := m.TargetUnion.(type) { - case *Compare_Version: - n += proto.SizeVarint(4<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.Version)) - case *Compare_CreateRevision: - n += proto.SizeVarint(5<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.CreateRevision)) - case *Compare_ModRevision: - n += proto.SizeVarint(6<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.ModRevision)) - case *Compare_Value: - n += proto.SizeVarint(7<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Value))) - n += len(x.Value) - case *Compare_Lease: - n += proto.SizeVarint(8<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.Lease)) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// From google paxosdb paper: -// Our implementation hinges around a powerful primitive which we call MultiOp. All other database -// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically -// and consists of three components: -// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check -// for the absence or presence of a value, or compare with a given value. Two different tests in the guard -// may apply to the same or different entries in the database. All tests in the guard are applied and -// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise -// it executes f op (see item 3 below). -// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or -// lookup operation, and applies to a single database entry. Two different operations in the list may apply -// to the same or different entries in the database. These operations are executed -// if guard evaluates to -// true. -// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. -type TxnRequest struct { - // compare is a list of predicates representing a conjunction of terms. - // If the comparisons succeed, then the success requests will be processed in order, - // and the response will contain their respective responses in order. - // If the comparisons fail, then the failure requests will be processed in order, - // and the response will contain their respective responses in order. - Compare []*Compare `protobuf:"bytes,1,rep,name=compare" json:"compare,omitempty"` - // success is a list of requests which will be applied when compare evaluates to true. - Success []*RequestOp `protobuf:"bytes,2,rep,name=success" json:"success,omitempty"` - // failure is a list of requests which will be applied when compare evaluates to false. - Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure" json:"failure,omitempty"` -} - -func (m *TxnRequest) Reset() { *m = TxnRequest{} } -func (m *TxnRequest) String() string { return proto.CompactTextString(m) } -func (*TxnRequest) ProtoMessage() {} -func (*TxnRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{10} } - -func (m *TxnRequest) GetCompare() []*Compare { - if m != nil { - return m.Compare - } - return nil -} - -func (m *TxnRequest) GetSuccess() []*RequestOp { - if m != nil { - return m.Success - } - return nil -} - -func (m *TxnRequest) GetFailure() []*RequestOp { - if m != nil { - return m.Failure - } - return nil -} - -type TxnResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // succeeded is set to true if the compare evaluated to true or false otherwise. - Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"` - // responses is a list of responses corresponding to the results from applying - // success if succeeded is true or failure if succeeded is false. - Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses" json:"responses,omitempty"` -} - -func (m *TxnResponse) Reset() { *m = TxnResponse{} } -func (m *TxnResponse) String() string { return proto.CompactTextString(m) } -func (*TxnResponse) ProtoMessage() {} -func (*TxnResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{11} } - -func (m *TxnResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *TxnResponse) GetSucceeded() bool { - if m != nil { - return m.Succeeded - } - return false -} - -func (m *TxnResponse) GetResponses() []*ResponseOp { - if m != nil { - return m.Responses - } - return nil -} - -// CompactionRequest compacts the key-value store up to a given revision. All superseded keys -// with a revision less than the compaction revision will be removed. -type CompactionRequest struct { - // revision is the key-value store revision for the compaction operation. - Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` - // physical is set so the RPC will wait until the compaction is physically - // applied to the local database such that compacted entries are totally - // removed from the backend database. - Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"` -} - -func (m *CompactionRequest) Reset() { *m = CompactionRequest{} } -func (m *CompactionRequest) String() string { return proto.CompactTextString(m) } -func (*CompactionRequest) ProtoMessage() {} -func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } - -func (m *CompactionRequest) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -func (m *CompactionRequest) GetPhysical() bool { - if m != nil { - return m.Physical - } - return false -} - -type CompactionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *CompactionResponse) Reset() { *m = CompactionResponse{} } -func (m *CompactionResponse) String() string { return proto.CompactTextString(m) } -func (*CompactionResponse) ProtoMessage() {} -func (*CompactionResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{13} } - -func (m *CompactionResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type HashRequest struct { -} - -func (m *HashRequest) Reset() { *m = HashRequest{} } -func (m *HashRequest) String() string { return proto.CompactTextString(m) } -func (*HashRequest) ProtoMessage() {} -func (*HashRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{14} } - -type HashKVRequest struct { - // revision is the key-value store revision for the hash operation. - Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` -} - -func (m *HashKVRequest) Reset() { *m = HashKVRequest{} } -func (m *HashKVRequest) String() string { return proto.CompactTextString(m) } -func (*HashKVRequest) ProtoMessage() {} -func (*HashKVRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{15} } - -func (m *HashKVRequest) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -type HashKVResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // hash is the hash value computed from the responding member's MVCC keys up to a given revision. - Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` - // compact_revision is the compacted revision of key-value store when hash begins. - CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` -} - -func (m *HashKVResponse) Reset() { *m = HashKVResponse{} } -func (m *HashKVResponse) String() string { return proto.CompactTextString(m) } -func (*HashKVResponse) ProtoMessage() {} -func (*HashKVResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{16} } - -func (m *HashKVResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *HashKVResponse) GetHash() uint32 { - if m != nil { - return m.Hash - } - return 0 -} - -func (m *HashKVResponse) GetCompactRevision() int64 { - if m != nil { - return m.CompactRevision - } - return 0 -} - -type HashResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // hash is the hash value computed from the responding member's KV's backend. - Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` -} - -func (m *HashResponse) Reset() { *m = HashResponse{} } -func (m *HashResponse) String() string { return proto.CompactTextString(m) } -func (*HashResponse) ProtoMessage() {} -func (*HashResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{17} } - -func (m *HashResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *HashResponse) GetHash() uint32 { - if m != nil { - return m.Hash - } - return 0 -} - -type SnapshotRequest struct { -} - -func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } -func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*SnapshotRequest) ProtoMessage() {} -func (*SnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{18} } - -type SnapshotResponse struct { - // header has the current key-value store information. The first header in the snapshot - // stream indicates the point in time of the snapshot. - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // remaining_bytes is the number of blob bytes to be sent after this message - RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"` - // blob contains the next chunk of the snapshot in the snapshot stream. - Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` -} - -func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } -func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*SnapshotResponse) ProtoMessage() {} -func (*SnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } - -func (m *SnapshotResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *SnapshotResponse) GetRemainingBytes() uint64 { - if m != nil { - return m.RemainingBytes - } - return 0 -} - -func (m *SnapshotResponse) GetBlob() []byte { - if m != nil { - return m.Blob - } - return nil -} - -type WatchRequest struct { - // request_union is a request to either create a new watcher or cancel an existing watcher. - // - // Types that are valid to be assigned to RequestUnion: - // *WatchRequest_CreateRequest - // *WatchRequest_CancelRequest - RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"` -} - -func (m *WatchRequest) Reset() { *m = WatchRequest{} } -func (m *WatchRequest) String() string { return proto.CompactTextString(m) } -func (*WatchRequest) ProtoMessage() {} -func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } - -type isWatchRequest_RequestUnion interface { - isWatchRequest_RequestUnion() - MarshalTo([]byte) (int, error) - Size() int -} - -type WatchRequest_CreateRequest struct { - CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,oneof"` -} -type WatchRequest_CancelRequest struct { - CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,oneof"` -} - -func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {} -func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {} - -func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion { - if m != nil { - return m.RequestUnion - } - return nil -} - -func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest { - if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok { - return x.CreateRequest - } - return nil -} - -func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest { - if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok { - return x.CancelRequest - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{ - (*WatchRequest_CreateRequest)(nil), - (*WatchRequest_CancelRequest)(nil), - } -} - -func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*WatchRequest) - // request_union - switch x := m.RequestUnion.(type) { - case *WatchRequest_CreateRequest: - _ = b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.CreateRequest); err != nil { - return err - } - case *WatchRequest_CancelRequest: - _ = b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.CancelRequest); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x) - } - return nil -} - -func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*WatchRequest) - switch tag { - case 1: // request_union.create_request - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(WatchCreateRequest) - err := b.DecodeMessage(msg) - m.RequestUnion = &WatchRequest_CreateRequest{msg} - return true, err - case 2: // request_union.cancel_request - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(WatchCancelRequest) - err := b.DecodeMessage(msg) - m.RequestUnion = &WatchRequest_CancelRequest{msg} - return true, err - default: - return false, nil - } -} - -func _WatchRequest_OneofSizer(msg proto.Message) (n int) { - m := msg.(*WatchRequest) - // request_union - switch x := m.RequestUnion.(type) { - case *WatchRequest_CreateRequest: - s := proto.Size(x.CreateRequest) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *WatchRequest_CancelRequest: - s := proto.Size(x.CancelRequest) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type WatchCreateRequest struct { - // key is the key to register for watching. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // range_end is the end of the range [key, range_end) to watch. If range_end is not given, - // only the key argument is watched. If range_end is equal to '\0', all keys greater than - // or equal to the key argument are watched. - // If the range_end is one bit larger than the given key, - // then all keys with the prefix (the given key) will be watched. - RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". - StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"` - // progress_notify is set so that the etcd server will periodically send a WatchResponse with - // no events to the new watcher if there are no recent events. It is useful when clients - // wish to recover a disconnected watcher starting from a recent known revision. - // The etcd server may decide how often it will send notifications based on current load. - ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"` - // filters filter the events at server side before it sends back to the watcher. - Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` - // If prev_kv is set, created watcher gets the previous KV before the event happens. - // If the previous KV is already compacted, nothing will be returned. - PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` -} - -func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } -func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) } -func (*WatchCreateRequest) ProtoMessage() {} -func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{21} } - -func (m *WatchCreateRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *WatchCreateRequest) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -func (m *WatchCreateRequest) GetStartRevision() int64 { - if m != nil { - return m.StartRevision - } - return 0 -} - -func (m *WatchCreateRequest) GetProgressNotify() bool { - if m != nil { - return m.ProgressNotify - } - return false -} - -func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { - if m != nil { - return m.Filters - } - return nil -} - -func (m *WatchCreateRequest) GetPrevKv() bool { - if m != nil { - return m.PrevKv - } - return false -} - -type WatchCancelRequest struct { - // watch_id is the watcher id to cancel so that no more events are transmitted. - WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` -} - -func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} } -func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) } -func (*WatchCancelRequest) ProtoMessage() {} -func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } - -func (m *WatchCancelRequest) GetWatchId() int64 { - if m != nil { - return m.WatchId - } - return 0 -} - -type WatchResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // watch_id is the ID of the watcher that corresponds to the response. - WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` - // created is set to true if the response is for a create watch request. - // The client should record the watch_id and expect to receive events for - // the created watcher from the same stream. - // All events sent to the created watcher will attach with the same watch_id. - Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` - // canceled is set to true if the response is for a cancel watch request. - // No further events will be sent to the canceled watcher. - Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"` - // compact_revision is set to the minimum index if a watcher tries to watch - // at a compacted index. - // - // This happens when creating a watcher at a compacted revision or the watcher cannot - // catch up with the progress of the key-value store. - // - // The client should treat the watcher as canceled and should not try to create any - // watcher with the same start_revision again. - CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` - // cancel_reason indicates the reason for canceling the watcher. - CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` - Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` -} - -func (m *WatchResponse) Reset() { *m = WatchResponse{} } -func (m *WatchResponse) String() string { return proto.CompactTextString(m) } -func (*WatchResponse) ProtoMessage() {} -func (*WatchResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{23} } - -func (m *WatchResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *WatchResponse) GetWatchId() int64 { - if m != nil { - return m.WatchId - } - return 0 -} - -func (m *WatchResponse) GetCreated() bool { - if m != nil { - return m.Created - } - return false -} - -func (m *WatchResponse) GetCanceled() bool { - if m != nil { - return m.Canceled - } - return false -} - -func (m *WatchResponse) GetCompactRevision() int64 { - if m != nil { - return m.CompactRevision - } - return 0 -} - -func (m *WatchResponse) GetCancelReason() string { - if m != nil { - return m.CancelReason - } - return "" -} - -func (m *WatchResponse) GetEvents() []*mvccpb.Event { - if m != nil { - return m.Events - } - return nil -} - -type LeaseGrantRequest struct { - // TTL is the advisory time-to-live in seconds. - TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` - // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} } -func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseGrantRequest) ProtoMessage() {} -func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } - -func (m *LeaseGrantRequest) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -func (m *LeaseGrantRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -type LeaseGrantResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // ID is the lease ID for the granted lease. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - // TTL is the server chosen lease time-to-live in seconds. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` - Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` -} - -func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} } -func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseGrantResponse) ProtoMessage() {} -func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{25} } - -func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LeaseGrantResponse) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseGrantResponse) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -func (m *LeaseGrantResponse) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -type LeaseRevokeRequest struct { - // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} } -func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseRevokeRequest) ProtoMessage() {} -func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } - -func (m *LeaseRevokeRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -type LeaseRevokeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} } -func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseRevokeResponse) ProtoMessage() {} -func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{27} } - -func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type LeaseKeepAliveRequest struct { - // ID is the lease ID for the lease to keep alive. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} } -func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseKeepAliveRequest) ProtoMessage() {} -func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } - -func (m *LeaseKeepAliveRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -type LeaseKeepAliveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // ID is the lease ID from the keep alive request. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - // TTL is the new time-to-live for the lease. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` -} - -func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} } -func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseKeepAliveResponse) ProtoMessage() {} -func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} } - -func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LeaseKeepAliveResponse) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseKeepAliveResponse) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -type LeaseTimeToLiveRequest struct { - // ID is the lease ID for the lease. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // keys is true to query all the keys attached to this lease. - Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` -} - -func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } -func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseTimeToLiveRequest) ProtoMessage() {} -func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } - -func (m *LeaseTimeToLiveRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseTimeToLiveRequest) GetKeys() bool { - if m != nil { - return m.Keys - } - return false -} - -type LeaseTimeToLiveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // ID is the lease ID from the keep alive request. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` - // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. - GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"` - // Keys is the list of keys attached to this lease. - Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"` -} - -func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } -func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseTimeToLiveResponse) ProtoMessage() {} -func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } - -func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LeaseTimeToLiveResponse) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseTimeToLiveResponse) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { - if m != nil { - return m.GrantedTTL - } - return 0 -} - -func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { - if m != nil { - return m.Keys - } - return nil -} - -type LeaseLeasesRequest struct { -} - -func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} } -func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseLeasesRequest) ProtoMessage() {} -func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{32} } - -type LeaseStatus struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *LeaseStatus) Reset() { *m = LeaseStatus{} } -func (m *LeaseStatus) String() string { return proto.CompactTextString(m) } -func (*LeaseStatus) ProtoMessage() {} -func (*LeaseStatus) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } - -func (m *LeaseStatus) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -type LeaseLeasesResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases" json:"leases,omitempty"` -} - -func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} } -func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseLeasesResponse) ProtoMessage() {} -func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{34} } - -func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus { - if m != nil { - return m.Leases - } - return nil -} - -type Member struct { - // ID is the member ID for this member. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // name is the human-readable name of the member. If the member is not started, the name will be an empty string. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // peerURLs is the list of URLs the member exposes to the cluster for communication. - PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs" json:"peerURLs,omitempty"` - // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. - ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs" json:"clientURLs,omitempty"` -} - -func (m *Member) Reset() { *m = Member{} } -func (m *Member) String() string { return proto.CompactTextString(m) } -func (*Member) ProtoMessage() {} -func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } - -func (m *Member) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *Member) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Member) GetPeerURLs() []string { - if m != nil { - return m.PeerURLs - } - return nil -} - -func (m *Member) GetClientURLs() []string { - if m != nil { - return m.ClientURLs - } - return nil -} - -type MemberAddRequest struct { - // peerURLs is the list of URLs the added member will use to communicate with the cluster. - PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` -} - -func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } -func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } -func (*MemberAddRequest) ProtoMessage() {} -func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{36} } - -func (m *MemberAddRequest) GetPeerURLs() []string { - if m != nil { - return m.PeerURLs - } - return nil -} - -type MemberAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // member is the member information for the added member. - Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"` - // members is a list of all members after adding the new member. - Members []*Member `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"` -} - -func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } -func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } -func (*MemberAddResponse) ProtoMessage() {} -func (*MemberAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{37} } - -func (m *MemberAddResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberAddResponse) GetMember() *Member { - if m != nil { - return m.Member - } - return nil -} - -func (m *MemberAddResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - -type MemberRemoveRequest struct { - // ID is the member ID of the member to remove. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } -func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } -func (*MemberRemoveRequest) ProtoMessage() {} -func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{38} } - -func (m *MemberRemoveRequest) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - -type MemberRemoveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // members is a list of all members after removing the member. - Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` -} - -func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } -func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } -func (*MemberRemoveResponse) ProtoMessage() {} -func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{39} } - -func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberRemoveResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - -type MemberUpdateRequest struct { - // ID is the member ID of the member to update. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // peerURLs is the new list of URLs the member will use to communicate with the cluster. - PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs" json:"peerURLs,omitempty"` -} - -func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } -func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } -func (*MemberUpdateRequest) ProtoMessage() {} -func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{40} } - -func (m *MemberUpdateRequest) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *MemberUpdateRequest) GetPeerURLs() []string { - if m != nil { - return m.PeerURLs - } - return nil -} - -type MemberUpdateResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // members is a list of all members after updating the member. - Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` -} - -func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } -func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } -func (*MemberUpdateResponse) ProtoMessage() {} -func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } - -func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberUpdateResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - -type MemberListRequest struct { -} - -func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } -func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } -func (*MemberListRequest) ProtoMessage() {} -func (*MemberListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } - -type MemberListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // members is a list of all members associated with the cluster. - Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` -} - -func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } -func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } -func (*MemberListResponse) ProtoMessage() {} -func (*MemberListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } - -func (m *MemberListResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberListResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - -type DefragmentRequest struct { -} - -func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } -func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } -func (*DefragmentRequest) ProtoMessage() {} -func (*DefragmentRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } - -type DefragmentResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } -func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } -func (*DefragmentResponse) ProtoMessage() {} -func (*DefragmentResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } - -func (m *DefragmentResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type MoveLeaderRequest struct { - // targetID is the node ID for the new leader. - TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"` -} - -func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} } -func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) } -func (*MoveLeaderRequest) ProtoMessage() {} -func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } - -func (m *MoveLeaderRequest) GetTargetID() uint64 { - if m != nil { - return m.TargetID - } - return 0 -} - -type MoveLeaderResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} } -func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) } -func (*MoveLeaderResponse) ProtoMessage() {} -func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } - -func (m *MoveLeaderResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AlarmRequest struct { - // action is the kind of alarm request to issue. The action - // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a - // raised alarm. - Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"` - // memberID is the ID of the member associated with the alarm. If memberID is 0, the - // alarm request covers all members. - MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"` - // alarm is the type of alarm to consider for this request. - Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` -} - -func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } -func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } -func (*AlarmRequest) ProtoMessage() {} -func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } - -func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { - if m != nil { - return m.Action - } - return AlarmRequest_GET -} - -func (m *AlarmRequest) GetMemberID() uint64 { - if m != nil { - return m.MemberID - } - return 0 -} - -func (m *AlarmRequest) GetAlarm() AlarmType { - if m != nil { - return m.Alarm - } - return AlarmType_NONE -} - -type AlarmMember struct { - // memberID is the ID of the member associated with the raised alarm. - MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` - // alarm is the type of alarm which has been raised. - Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` -} - -func (m *AlarmMember) Reset() { *m = AlarmMember{} } -func (m *AlarmMember) String() string { return proto.CompactTextString(m) } -func (*AlarmMember) ProtoMessage() {} -func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } - -func (m *AlarmMember) GetMemberID() uint64 { - if m != nil { - return m.MemberID - } - return 0 -} - -func (m *AlarmMember) GetAlarm() AlarmType { - if m != nil { - return m.Alarm - } - return AlarmType_NONE -} - -type AlarmResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // alarms is a list of alarms associated with the alarm request. - Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms" json:"alarms,omitempty"` -} - -func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } -func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } -func (*AlarmResponse) ProtoMessage() {} -func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } - -func (m *AlarmResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AlarmResponse) GetAlarms() []*AlarmMember { - if m != nil { - return m.Alarms - } - return nil -} - -type StatusRequest struct { -} - -func (m *StatusRequest) Reset() { *m = StatusRequest{} } -func (m *StatusRequest) String() string { return proto.CompactTextString(m) } -func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } - -type StatusResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // version is the cluster protocol version used by the responding member. - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // dbSize is the size of the backend database, in bytes, of the responding member. - DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"` - // leader is the member ID which the responding member believes is the current leader. - Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"` - // raftIndex is the current raft index of the responding member. - RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"` - // raftTerm is the current raft term of the responding member. - RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` -} - -func (m *StatusResponse) Reset() { *m = StatusResponse{} } -func (m *StatusResponse) String() string { return proto.CompactTextString(m) } -func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } - -func (m *StatusResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *StatusResponse) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *StatusResponse) GetDbSize() int64 { - if m != nil { - return m.DbSize - } - return 0 -} - -func (m *StatusResponse) GetLeader() uint64 { - if m != nil { - return m.Leader - } - return 0 -} - -func (m *StatusResponse) GetRaftIndex() uint64 { - if m != nil { - return m.RaftIndex - } - return 0 -} - -func (m *StatusResponse) GetRaftTerm() uint64 { - if m != nil { - return m.RaftTerm - } - return 0 -} - -type AuthEnableRequest struct { -} - -func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } -func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } -func (*AuthEnableRequest) ProtoMessage() {} -func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } - -type AuthDisableRequest struct { -} - -func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } -func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } -func (*AuthDisableRequest) ProtoMessage() {} -func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } - -type AuthenticateRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` -} - -func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } -func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } -func (*AuthenticateRequest) ProtoMessage() {} -func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } - -func (m *AuthenticateRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthenticateRequest) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -type AuthUserAddRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` -} - -func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } -func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserAddRequest) ProtoMessage() {} -func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } - -func (m *AuthUserAddRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthUserAddRequest) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -type AuthUserGetRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } -func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserGetRequest) ProtoMessage() {} -func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } - -func (m *AuthUserGetRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -type AuthUserDeleteRequest struct { - // name is the name of the user to delete. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } -func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserDeleteRequest) ProtoMessage() {} -func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} } - -func (m *AuthUserDeleteRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -type AuthUserChangePasswordRequest struct { - // name is the name of the user whose password is being changed. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // password is the new password for the user. - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` -} - -func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} } -func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserChangePasswordRequest) ProtoMessage() {} -func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{59} -} - -func (m *AuthUserChangePasswordRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthUserChangePasswordRequest) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -type AuthUserGrantRoleRequest struct { - // user is the name of the user which should be granted a given role. - User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` - // role is the name of the role to grant to the user. - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` -} - -func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } -func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserGrantRoleRequest) ProtoMessage() {} -func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } - -func (m *AuthUserGrantRoleRequest) GetUser() string { - if m != nil { - return m.User - } - return "" -} - -func (m *AuthUserGrantRoleRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -type AuthUserRevokeRoleRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` -} - -func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } -func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserRevokeRoleRequest) ProtoMessage() {} -func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } - -func (m *AuthUserRevokeRoleRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthUserRevokeRoleRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -type AuthRoleAddRequest struct { - // name is the name of the role to add to the authentication system. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } -func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleAddRequest) ProtoMessage() {} -func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} } - -func (m *AuthRoleAddRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -type AuthRoleGetRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` -} - -func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } -func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGetRequest) ProtoMessage() {} -func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{63} } - -func (m *AuthRoleGetRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -type AuthUserListRequest struct { -} - -func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } -func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserListRequest) ProtoMessage() {} -func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } - -type AuthRoleListRequest struct { -} - -func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } -func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleListRequest) ProtoMessage() {} -func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{65} } - -type AuthRoleDeleteRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` -} - -func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } -func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleDeleteRequest) ProtoMessage() {} -func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} } - -func (m *AuthRoleDeleteRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -type AuthRoleGrantPermissionRequest struct { - // name is the name of the role which will be granted the permission. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // perm is the permission to grant to the role. - Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm" json:"perm,omitempty"` -} - -func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} } -func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} -func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{67} -} - -func (m *AuthRoleGrantPermissionRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { - if m != nil { - return m.Perm - } - return nil -} - -type AuthRoleRevokePermissionRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - RangeEnd string `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` -} - -func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} } -func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} -func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{68} -} - -func (m *AuthRoleRevokePermissionRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -func (m *AuthRoleRevokePermissionRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string { - if m != nil { - return m.RangeEnd - } - return "" -} - -type AuthEnableResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } -func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } -func (*AuthEnableResponse) ProtoMessage() {} -func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } - -func (m *AuthEnableResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthDisableResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } -func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } -func (*AuthDisableResponse) ProtoMessage() {} -func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} } - -func (m *AuthDisableResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthenticateResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // token is an authorized token that can be used in succeeding RPCs - Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` -} - -func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } -func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } -func (*AuthenticateResponse) ProtoMessage() {} -func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } - -func (m *AuthenticateResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthenticateResponse) GetToken() string { - if m != nil { - return m.Token - } - return "" -} - -type AuthUserAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } -func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserAddResponse) ProtoMessage() {} -func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } - -func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserGetResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` -} - -func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } -func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserGetResponse) ProtoMessage() {} -func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{73} } - -func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthUserGetResponse) GetRoles() []string { - if m != nil { - return m.Roles - } - return nil -} - -type AuthUserDeleteResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } -func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserDeleteResponse) ProtoMessage() {} -func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{74} } - -func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserChangePasswordResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} } -func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserChangePasswordResponse) ProtoMessage() {} -func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{75} -} - -func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserGrantRoleResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } -func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserGrantRoleResponse) ProtoMessage() {} -func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{76} } - -func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserRevokeRoleResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } -func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserRevokeRoleResponse) ProtoMessage() {} -func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{77} } - -func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } -func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleAddResponse) ProtoMessage() {} -func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{78} } - -func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleGetResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm" json:"perm,omitempty"` -} - -func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } -func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGetResponse) ProtoMessage() {} -func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{79} } - -func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission { - if m != nil { - return m.Perm - } - return nil -} - -type AuthRoleListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` -} - -func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } -func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleListResponse) ProtoMessage() {} -func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{80} } - -func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthRoleListResponse) GetRoles() []string { - if m != nil { - return m.Roles - } - return nil -} - -type AuthUserListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` -} - -func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } -func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserListResponse) ProtoMessage() {} -func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{81} } - -func (m *AuthUserListResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthUserListResponse) GetUsers() []string { - if m != nil { - return m.Users - } - return nil -} - -type AuthRoleDeleteResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } -func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleDeleteResponse) ProtoMessage() {} -func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{82} } - -func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleGrantPermissionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} } -func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} -func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{83} -} - -func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleRevokePermissionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} } -func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} -func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{84} -} - -func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func init() { - proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader") - proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest") - proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse") - proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest") - proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse") - proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest") - proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse") - proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp") - proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp") - proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare") - proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest") - proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse") - proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest") - proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse") - proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest") - proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest") - proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse") - proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse") - proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest") - proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse") - proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest") - proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest") - proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest") - proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse") - proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest") - proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse") - proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest") - proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse") - proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest") - proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse") - proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest") - proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse") - proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest") - proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus") - proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse") - proto.RegisterType((*Member)(nil), "etcdserverpb.Member") - proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest") - proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse") - proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest") - proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse") - proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest") - proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse") - proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest") - proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse") - proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest") - proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse") - proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest") - proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse") - proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest") - proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember") - proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse") - proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest") - proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse") - proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest") - proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest") - proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest") - proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest") - proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest") - proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest") - proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest") - proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest") - proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest") - proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest") - proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest") - proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest") - proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest") - proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest") - proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest") - proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest") - proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse") - proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse") - proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse") - proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse") - proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse") - proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse") - proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse") - proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse") - proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse") - proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse") - proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse") - proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse") - proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse") - proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse") - proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse") - proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse") - proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value) - proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value) - proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) - proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) - proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) - proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) - proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for KV service - -type KVClient interface { - // Range gets the keys in the range from the key-value store. - Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) - // Put puts the given key into the key-value store. - // A put request increments the revision of the key-value store - // and generates one event in the event history. - Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) - // DeleteRange deletes the given range from the key-value store. - // A delete request increments the revision of the key-value store - // and generates a delete event in the event history for every deleted key. - DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) - // Txn processes multiple requests in a single transaction. - // A txn request increments the revision of the key-value store - // and generates events with the same revision for every completed request. - // It is not allowed to modify the same key several times within one txn. - Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) - // Compact compacts the event history in the etcd key-value store. The key-value - // store should be periodically compacted or the event history will continue to grow - // indefinitely. - Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) -} - -type kVClient struct { - cc *grpc.ClientConn -} - -func NewKVClient(cc *grpc.ClientConn) KVClient { - return &kVClient{cc} -} - -func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) { - out := new(RangeResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { - out := new(PutResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) { - out := new(DeleteRangeResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) { - out := new(TxnResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) { - out := new(CompactionResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for KV service - -type KVServer interface { - // Range gets the keys in the range from the key-value store. - Range(context.Context, *RangeRequest) (*RangeResponse, error) - // Put puts the given key into the key-value store. - // A put request increments the revision of the key-value store - // and generates one event in the event history. - Put(context.Context, *PutRequest) (*PutResponse, error) - // DeleteRange deletes the given range from the key-value store. - // A delete request increments the revision of the key-value store - // and generates a delete event in the event history for every deleted key. - DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error) - // Txn processes multiple requests in a single transaction. - // A txn request increments the revision of the key-value store - // and generates events with the same revision for every completed request. - // It is not allowed to modify the same key several times within one txn. - Txn(context.Context, *TxnRequest) (*TxnResponse, error) - // Compact compacts the event history in the etcd key-value store. The key-value - // store should be periodically compacted or the event history will continue to grow - // indefinitely. - Compact(context.Context, *CompactionRequest) (*CompactionResponse, error) -} - -func RegisterKVServer(s *grpc.Server, srv KVServer) { - s.RegisterService(&_KV_serviceDesc, srv) -} - -func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RangeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Range(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Range", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Range(ctx, req.(*RangeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PutRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Put(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Put", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Put(ctx, req.(*PutRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteRangeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).DeleteRange(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/DeleteRange", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TxnRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Txn(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Txn", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Txn(ctx, req.(*TxnRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CompactionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Compact(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Compact", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Compact(ctx, req.(*CompactionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _KV_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.KV", - HandlerType: (*KVServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Range", - Handler: _KV_Range_Handler, - }, - { - MethodName: "Put", - Handler: _KV_Put_Handler, - }, - { - MethodName: "DeleteRange", - Handler: _KV_DeleteRange_Handler, - }, - { - MethodName: "Txn", - Handler: _KV_Txn_Handler, - }, - { - MethodName: "Compact", - Handler: _KV_Compact_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "rpc.proto", -} - -// Client API for Watch service - -type WatchClient interface { - // Watch watches for events happening or that have happened. Both input and output - // are streams; the input stream is for creating and canceling watchers and the output - // stream sends events. One watch RPC can watch on multiple key ranges, streaming events - // for several watches at once. The entire event history can be watched starting from the - // last compaction revision. - Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) -} - -type watchClient struct { - cc *grpc.ClientConn -} - -func NewWatchClient(cc *grpc.ClientConn) WatchClient { - return &watchClient{cc} -} - -func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Watch/Watch", opts...) - if err != nil { - return nil, err - } - x := &watchWatchClient{stream} - return x, nil -} - -type Watch_WatchClient interface { - Send(*WatchRequest) error - Recv() (*WatchResponse, error) - grpc.ClientStream -} - -type watchWatchClient struct { - grpc.ClientStream -} - -func (x *watchWatchClient) Send(m *WatchRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *watchWatchClient) Recv() (*WatchResponse, error) { - m := new(WatchResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for Watch service - -type WatchServer interface { - // Watch watches for events happening or that have happened. Both input and output - // are streams; the input stream is for creating and canceling watchers and the output - // stream sends events. One watch RPC can watch on multiple key ranges, streaming events - // for several watches at once. The entire event history can be watched starting from the - // last compaction revision. - Watch(Watch_WatchServer) error -} - -func RegisterWatchServer(s *grpc.Server, srv WatchServer) { - s.RegisterService(&_Watch_serviceDesc, srv) -} - -func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(WatchServer).Watch(&watchWatchServer{stream}) -} - -type Watch_WatchServer interface { - Send(*WatchResponse) error - Recv() (*WatchRequest, error) - grpc.ServerStream -} - -type watchWatchServer struct { - grpc.ServerStream -} - -func (x *watchWatchServer) Send(m *WatchResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *watchWatchServer) Recv() (*WatchRequest, error) { - m := new(WatchRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Watch_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Watch", - HandlerType: (*WatchServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Watch", - Handler: _Watch_Watch_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "rpc.proto", -} - -// Client API for Lease service - -type LeaseClient interface { - // LeaseGrant creates a lease which expires if the server does not receive a keepAlive - // within a given time to live period. All keys attached to the lease will be expired and - // deleted if the lease expires. Each expired key generates a delete event in the event history. - LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) - // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. - LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) - // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client - // to the server and streaming keep alive responses from the server to the client. - LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) - // LeaseTimeToLive retrieves lease information. - LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) - // LeaseLeases lists all existing leases. - LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) -} - -type leaseClient struct { - cc *grpc.ClientConn -} - -func NewLeaseClient(cc *grpc.ClientConn) LeaseClient { - return &leaseClient{cc} -} - -func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) { - out := new(LeaseGrantResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) { - out := new(LeaseRevokeResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Lease_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Lease/LeaseKeepAlive", opts...) - if err != nil { - return nil, err - } - x := &leaseLeaseKeepAliveClient{stream} - return x, nil -} - -type Lease_LeaseKeepAliveClient interface { - Send(*LeaseKeepAliveRequest) error - Recv() (*LeaseKeepAliveResponse, error) - grpc.ClientStream -} - -type leaseLeaseKeepAliveClient struct { - grpc.ClientStream -} - -func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) { - m := new(LeaseKeepAliveResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) { - out := new(LeaseTimeToLiveResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) { - out := new(LeaseLeasesResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Lease service - -type LeaseServer interface { - // LeaseGrant creates a lease which expires if the server does not receive a keepAlive - // within a given time to live period. All keys attached to the lease will be expired and - // deleted if the lease expires. Each expired key generates a delete event in the event history. - LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error) - // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. - LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error) - // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client - // to the server and streaming keep alive responses from the server to the client. - LeaseKeepAlive(Lease_LeaseKeepAliveServer) error - // LeaseTimeToLive retrieves lease information. - LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) - // LeaseLeases lists all existing leases. - LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error) -} - -func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) { - s.RegisterService(&_Lease_serviceDesc, srv) -} - -func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseGrantRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseGrant(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseGrant", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseRevokeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseRevoke(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseRevoke", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream}) -} - -type Lease_LeaseKeepAliveServer interface { - Send(*LeaseKeepAliveResponse) error - Recv() (*LeaseKeepAliveRequest, error) - grpc.ServerStream -} - -type leaseLeaseKeepAliveServer struct { - grpc.ServerStream -} - -func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) { - m := new(LeaseKeepAliveRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseTimeToLiveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseTimeToLive(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseLeasesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseLeases(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseLeases", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Lease_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Lease", - HandlerType: (*LeaseServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "LeaseGrant", - Handler: _Lease_LeaseGrant_Handler, - }, - { - MethodName: "LeaseRevoke", - Handler: _Lease_LeaseRevoke_Handler, - }, - { - MethodName: "LeaseTimeToLive", - Handler: _Lease_LeaseTimeToLive_Handler, - }, - { - MethodName: "LeaseLeases", - Handler: _Lease_LeaseLeases_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "LeaseKeepAlive", - Handler: _Lease_LeaseKeepAlive_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "rpc.proto", -} - -// Client API for Cluster service - -type ClusterClient interface { - // MemberAdd adds a member into the cluster. - MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) - // MemberRemove removes an existing member from the cluster. - MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) - // MemberUpdate updates the member configuration. - MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) - // MemberList lists all the members in the cluster. - MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) -} - -type clusterClient struct { - cc *grpc.ClientConn -} - -func NewClusterClient(cc *grpc.ClientConn) ClusterClient { - return &clusterClient{cc} -} - -func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) { - out := new(MemberAddResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) { - out := new(MemberRemoveResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) { - out := new(MemberUpdateResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) { - out := new(MemberListResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Cluster service - -type ClusterServer interface { - // MemberAdd adds a member into the cluster. - MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error) - // MemberRemove removes an existing member from the cluster. - MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error) - // MemberUpdate updates the member configuration. - MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error) - // MemberList lists all the members in the cluster. - MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error) -} - -func RegisterClusterServer(s *grpc.Server, srv ClusterServer) { - s.RegisterService(&_Cluster_serviceDesc, srv) -} - -func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberAddRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberAdd(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberAdd", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberRemoveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberRemove(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberRemove", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberUpdateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberUpdate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberUpdate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Cluster_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Cluster", - HandlerType: (*ClusterServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "MemberAdd", - Handler: _Cluster_MemberAdd_Handler, - }, - { - MethodName: "MemberRemove", - Handler: _Cluster_MemberRemove_Handler, - }, - { - MethodName: "MemberUpdate", - Handler: _Cluster_MemberUpdate_Handler, - }, - { - MethodName: "MemberList", - Handler: _Cluster_MemberList_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "rpc.proto", -} - -// Client API for Maintenance service - -type MaintenanceClient interface { - // Alarm activates, deactivates, and queries alarms regarding cluster health. - Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) - // Status gets the status of the member. - Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) - // Defragment defragments a member's backend database to recover storage space. - Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) - // Hash computes the hash of the KV's backend. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. - Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) - // HashKV computes the hash of all MVCC keys up to a given revision. - HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) - // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. - Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) - // MoveLeader requests current leader node to transfer its leadership to transferee. - MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) -} - -type maintenanceClient struct { - cc *grpc.ClientConn -} - -func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient { - return &maintenanceClient{cc} -} - -func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) { - out := new(AlarmResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) { - out := new(DefragmentResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) { - out := new(HashResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) { - out := new(HashKVResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Maintenance_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Maintenance/Snapshot", opts...) - if err != nil { - return nil, err - } - x := &maintenanceSnapshotClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Maintenance_SnapshotClient interface { - Recv() (*SnapshotResponse, error) - grpc.ClientStream -} - -type maintenanceSnapshotClient struct { - grpc.ClientStream -} - -func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) { - m := new(SnapshotResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) { - out := new(MoveLeaderResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Maintenance service - -type MaintenanceServer interface { - // Alarm activates, deactivates, and queries alarms regarding cluster health. - Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error) - // Status gets the status of the member. - Status(context.Context, *StatusRequest) (*StatusResponse, error) - // Defragment defragments a member's backend database to recover storage space. - Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error) - // Hash computes the hash of the KV's backend. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. - Hash(context.Context, *HashRequest) (*HashResponse, error) - // HashKV computes the hash of all MVCC keys up to a given revision. - HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error) - // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. - Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error - // MoveLeader requests current leader node to transfer its leadership to transferee. - MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error) -} - -func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) { - s.RegisterService(&_Maintenance_serviceDesc, srv) -} - -func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AlarmRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Alarm(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Alarm", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Status(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Status", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DefragmentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Defragment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Defragment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HashRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Hash(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Hash", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HashKVRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).HashKV(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/HashKV", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SnapshotRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream}) -} - -type Maintenance_SnapshotServer interface { - Send(*SnapshotResponse) error - grpc.ServerStream -} - -type maintenanceSnapshotServer struct { - grpc.ServerStream -} - -func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MoveLeaderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).MoveLeader(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/MoveLeader", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Maintenance_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Maintenance", - HandlerType: (*MaintenanceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Alarm", - Handler: _Maintenance_Alarm_Handler, - }, - { - MethodName: "Status", - Handler: _Maintenance_Status_Handler, - }, - { - MethodName: "Defragment", - Handler: _Maintenance_Defragment_Handler, - }, - { - MethodName: "Hash", - Handler: _Maintenance_Hash_Handler, - }, - { - MethodName: "HashKV", - Handler: _Maintenance_HashKV_Handler, - }, - { - MethodName: "MoveLeader", - Handler: _Maintenance_MoveLeader_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Snapshot", - Handler: _Maintenance_Snapshot_Handler, - ServerStreams: true, - }, - }, - Metadata: "rpc.proto", -} - -// Client API for Auth service - -type AuthClient interface { - // AuthEnable enables authentication. - AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) - // AuthDisable disables authentication. - AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) - // Authenticate processes an authenticate request. - Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) - // UserAdd adds a new user. - UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) - // UserGet gets detailed user information. - UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) - // UserList gets a list of all users. - UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) - // UserDelete deletes a specified user. - UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) - // UserChangePassword changes the password of a specified user. - UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) - // UserGrant grants a role to a specified user. - UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) - // UserRevokeRole revokes a role of specified user. - UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) - // RoleAdd adds a new role. - RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) - // RoleGet gets detailed role information. - RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) - // RoleList gets lists of all roles. - RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) - // RoleDelete deletes a specified role. - RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) - // RoleGrantPermission grants a permission of a specified key or range to a specified role. - RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) - // RoleRevokePermission revokes a key or range permission of a specified role. - RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) -} - -type authClient struct { - cc *grpc.ClientConn -} - -func NewAuthClient(cc *grpc.ClientConn) AuthClient { - return &authClient{cc} -} - -func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) { - out := new(AuthEnableResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) { - out := new(AuthDisableResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) { - out := new(AuthenticateResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) { - out := new(AuthUserAddResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) { - out := new(AuthUserGetResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) { - out := new(AuthUserListResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) { - out := new(AuthUserDeleteResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) { - out := new(AuthUserChangePasswordResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) { - out := new(AuthUserGrantRoleResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) { - out := new(AuthUserRevokeRoleResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) { - out := new(AuthRoleAddResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) { - out := new(AuthRoleGetResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) { - out := new(AuthRoleListResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) { - out := new(AuthRoleDeleteResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) { - out := new(AuthRoleGrantPermissionResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) { - out := new(AuthRoleRevokePermissionResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Auth service - -type AuthServer interface { - // AuthEnable enables authentication. - AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error) - // AuthDisable disables authentication. - AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error) - // Authenticate processes an authenticate request. - Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) - // UserAdd adds a new user. - UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error) - // UserGet gets detailed user information. - UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error) - // UserList gets a list of all users. - UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error) - // UserDelete deletes a specified user. - UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) - // UserChangePassword changes the password of a specified user. - UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) - // UserGrant grants a role to a specified user. - UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) - // UserRevokeRole revokes a role of specified user. - UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) - // RoleAdd adds a new role. - RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error) - // RoleGet gets detailed role information. - RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error) - // RoleList gets lists of all roles. - RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error) - // RoleDelete deletes a specified role. - RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) - // RoleGrantPermission grants a permission of a specified key or range to a specified role. - RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) - // RoleRevokePermission revokes a key or range permission of a specified role. - RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) -} - -func RegisterAuthServer(s *grpc.Server, srv AuthServer) { - s.RegisterService(&_Auth_serviceDesc, srv) -} - -func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthEnableRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).AuthEnable(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/AuthEnable", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthDisableRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).AuthDisable(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/AuthDisable", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthenticateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).Authenticate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/Authenticate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserAddRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserAdd(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserAdd", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserGetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserGet(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserGet", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserDeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserDelete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserDelete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserChangePasswordRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserChangePassword(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserChangePassword", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserGrantRoleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserGrantRole(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserGrantRole", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserRevokeRoleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserRevokeRole(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserRevokeRole", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleAddRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleAdd(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleAdd", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleGetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleGet(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleGet", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleDeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleDelete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleDelete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleGrantPermissionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleGrantPermission(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleGrantPermission", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleRevokePermissionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleRevokePermission(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleRevokePermission", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Auth_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Auth", - HandlerType: (*AuthServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "AuthEnable", - Handler: _Auth_AuthEnable_Handler, - }, - { - MethodName: "AuthDisable", - Handler: _Auth_AuthDisable_Handler, - }, - { - MethodName: "Authenticate", - Handler: _Auth_Authenticate_Handler, - }, - { - MethodName: "UserAdd", - Handler: _Auth_UserAdd_Handler, - }, - { - MethodName: "UserGet", - Handler: _Auth_UserGet_Handler, - }, - { - MethodName: "UserList", - Handler: _Auth_UserList_Handler, - }, - { - MethodName: "UserDelete", - Handler: _Auth_UserDelete_Handler, - }, - { - MethodName: "UserChangePassword", - Handler: _Auth_UserChangePassword_Handler, - }, - { - MethodName: "UserGrantRole", - Handler: _Auth_UserGrantRole_Handler, - }, - { - MethodName: "UserRevokeRole", - Handler: _Auth_UserRevokeRole_Handler, - }, - { - MethodName: "RoleAdd", - Handler: _Auth_RoleAdd_Handler, - }, - { - MethodName: "RoleGet", - Handler: _Auth_RoleGet_Handler, - }, - { - MethodName: "RoleList", - Handler: _Auth_RoleList_Handler, - }, - { - MethodName: "RoleDelete", - Handler: _Auth_RoleDelete_Handler, - }, - { - MethodName: "RoleGrantPermission", - Handler: _Auth_RoleGrantPermission_Handler, - }, - { - MethodName: "RoleRevokePermission", - Handler: _Auth_RoleRevokePermission_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "rpc.proto", -} - -func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ClusterId != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId)) - } - if m.MemberId != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MemberId)) - } - if m.Revision != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - } - if m.RaftTerm != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) - } - return i, nil -} - -func (m *RangeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - if m.Limit != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) - } - if m.Revision != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - } - if m.SortOrder != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder)) - } - if m.SortTarget != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget)) - } - if m.Serializable { - dAtA[i] = 0x38 - i++ - if m.Serializable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.KeysOnly { - dAtA[i] = 0x40 - i++ - if m.KeysOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.CountOnly { - dAtA[i] = 0x48 - i++ - if m.CountOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.MinModRevision != 0 { - dAtA[i] = 0x50 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision)) - } - if m.MaxModRevision != 0 { - dAtA[i] = 0x58 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision)) - } - if m.MinCreateRevision != 0 { - dAtA[i] = 0x60 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision)) - } - if m.MaxCreateRevision != 0 { - dAtA[i] = 0x68 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision)) - } - return i, nil -} - -func (m *RangeResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n1, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if len(m.Kvs) > 0 { - for _, msg := range m.Kvs { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.More { - dAtA[i] = 0x18 - i++ - if m.More { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Count != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Count)) - } - return i, nil -} - -func (m *PutRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.Value) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - } - if m.Lease != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) - } - if m.PrevKv { - dAtA[i] = 0x20 - i++ - if m.PrevKv { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.IgnoreValue { - dAtA[i] = 0x28 - i++ - if m.IgnoreValue { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.IgnoreLease { - dAtA[i] = 0x30 - i++ - if m.IgnoreLease { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *PutResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n2, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.PrevKv != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.PrevKv.Size())) - n3, err := m.PrevKv.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - return i, nil -} - -func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - if m.PrevKv { - dAtA[i] = 0x18 - i++ - if m.PrevKv { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n4, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.Deleted != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Deleted)) - } - if len(m.PrevKvs) > 0 { - for _, msg := range m.PrevKvs { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RequestOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Request != nil { - nn5, err := m.Request.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn5 - } - return i, nil -} - -func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.RequestRange != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RequestRange.Size())) - n6, err := m.RequestRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - return i, nil -} -func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.RequestPut != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RequestPut.Size())) - n7, err := m.RequestPut.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - return i, nil -} -func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.RequestDeleteRange != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RequestDeleteRange.Size())) - n8, err := m.RequestDeleteRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - return i, nil -} -func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.RequestTxn != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RequestTxn.Size())) - n9, err := m.RequestTxn.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - return i, nil -} -func (m *ResponseOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Response != nil { - nn10, err := m.Response.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn10 - } - return i, nil -} - -func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ResponseRange != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ResponseRange.Size())) - n11, err := m.ResponseRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - return i, nil -} -func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ResponsePut != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ResponsePut.Size())) - n12, err := m.ResponsePut.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - return i, nil -} -func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ResponseDeleteRange != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ResponseDeleteRange.Size())) - n13, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - return i, nil -} -func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ResponseTxn != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ResponseTxn.Size())) - n14, err := m.ResponseTxn.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - return i, nil -} -func (m *Compare) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Compare) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Result != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Result)) - } - if m.Target != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Target)) - } - if len(m.Key) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if m.TargetUnion != nil { - nn15, err := m.TargetUnion.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn15 - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x4 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - return i, nil -} - -func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) { - i := 0 - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Version)) - return i, nil -} -func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) { - i := 0 - dAtA[i] = 0x28 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision)) - return i, nil -} -func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) { - i := 0 - dAtA[i] = 0x30 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision)) - return i, nil -} -func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Value != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - } - return i, nil -} -func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) { - i := 0 - dAtA[i] = 0x40 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) - return i, nil -} -func (m *TxnRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Compare) > 0 { - for _, msg := range m.Compare { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Success) > 0 { - for _, msg := range m.Success { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Failure) > 0 { - for _, msg := range m.Failure { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *TxnResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n16, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - if m.Succeeded { - dAtA[i] = 0x10 - i++ - if m.Succeeded { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Responses) > 0 { - for _, msg := range m.Responses { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *CompactionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Revision != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - } - if m.Physical { - dAtA[i] = 0x10 - i++ - if m.Physical { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *CompactionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n17, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - return i, nil -} - -func (m *HashRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *HashKVRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Revision != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - } - return i, nil -} - -func (m *HashKVResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n18, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.Hash != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) - } - if m.CompactRevision != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) - } - return i, nil -} - -func (m *HashResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n19, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - if m.Hash != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) - } - return i, nil -} - -func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n20, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - } - if m.RemainingBytes != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes)) - } - if len(m.Blob) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob))) - i += copy(dAtA[i:], m.Blob) - } - return i, nil -} - -func (m *WatchRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.RequestUnion != nil { - nn21, err := m.RequestUnion.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn21 - } - return i, nil -} - -func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.CreateRequest != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.CreateRequest.Size())) - n22, err := m.CreateRequest.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - } - return i, nil -} -func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.CancelRequest != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.CancelRequest.Size())) - n23, err := m.CancelRequest.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - return i, nil -} -func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - if m.StartRevision != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision)) - } - if m.ProgressNotify { - dAtA[i] = 0x20 - i++ - if m.ProgressNotify { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Filters) > 0 { - dAtA25 := make([]byte, len(m.Filters)*10) - var j24 int - for _, num := range m.Filters { - for num >= 1<<7 { - dAtA25[j24] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j24++ - } - dAtA25[j24] = uint8(num) - j24++ - } - dAtA[i] = 0x2a - i++ - i = encodeVarintRpc(dAtA, i, uint64(j24)) - i += copy(dAtA[i:], dAtA25[:j24]) - } - if m.PrevKv { - dAtA[i] = 0x30 - i++ - if m.PrevKv { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.WatchId != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - } - return i, nil -} - -func (m *WatchResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n26, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - } - if m.WatchId != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - } - if m.Created { - dAtA[i] = 0x18 - i++ - if m.Created { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Canceled { - dAtA[i] = 0x20 - i++ - if m.Canceled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.CompactRevision != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) - } - if len(m.CancelReason) > 0 { - dAtA[i] = 0x32 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) - i += copy(dAtA[i:], m.CancelReason) - } - if len(m.Events) > 0 { - for _, msg := range m.Events { - dAtA[i] = 0x5a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.TTL != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - } - if m.ID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - return i, nil -} - -func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n27, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - } - if m.ID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if m.TTL != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - } - if len(m.Error) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - } - return i, nil -} - -func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - return i, nil -} - -func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n28, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - } - return i, nil -} - -func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - return i, nil -} - -func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n29, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - } - if m.ID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if m.TTL != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - } - return i, nil -} - -func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if m.Keys { - dAtA[i] = 0x10 - i++ - if m.Keys { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n30, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 - } - if m.ID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if m.TTL != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - } - if m.GrantedTTL != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL)) - } - if len(m.Keys) > 0 { - for _, b := range m.Keys { - dAtA[i] = 0x2a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(b))) - i += copy(dAtA[i:], b) - } - } - return i, nil -} - -func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *LeaseStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - return i, nil -} - -func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n31, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } - if len(m.Leases) > 0 { - for _, msg := range m.Leases { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Member) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Member) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if len(m.Name) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.ClientURLs) > 0 { - for _, s := range m.ClientURLs { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n32, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - } - if m.Member != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size())) - n33, err := m.Member.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - } - if len(m.Members) > 0 { - for _, msg := range m.Members { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - return i, nil -} - -func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n34, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 - } - if len(m.Members) > 0 { - for _, msg := range m.Members { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n35, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - } - if len(m.Members) > 0 { - for _, msg := range m.Members { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *MemberListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *MemberListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n36, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - } - if len(m.Members) > 0 { - for _, msg := range m.Members { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n37, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - } - return i, nil -} - -func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.TargetID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.TargetID)) - } - return i, nil -} - -func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n38, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n38 - } - return i, nil -} - -func (m *AlarmRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Action != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Action)) - } - if m.MemberID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) - } - if m.Alarm != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) - } - return i, nil -} - -func (m *AlarmMember) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MemberID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) - } - if m.Alarm != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) - } - return i, nil -} - -func (m *AlarmResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n39, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - } - if len(m.Alarms) > 0 { - for _, msg := range m.Alarms { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *StatusRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *StatusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n40, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 - } - if len(m.Version) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - } - if m.DbSize != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.DbSize)) - } - if m.Leader != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Leader)) - } - if m.RaftIndex != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex)) - } - if m.RaftTerm != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) - } - return i, nil -} - -func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - return i, nil -} - -func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - return i, nil -} - -func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - return i, nil -} - -func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - return i, nil -} - -func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - return i, nil -} - -func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.User) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - } - if len(m.Role) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - return i, nil -} - -func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Role) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - return i, nil -} - -func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - return i, nil -} - -func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Role) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - return i, nil -} - -func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Role) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - return i, nil -} - -func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if m.Perm != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Perm.Size())) - n41, err := m.Perm.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n41 - } - return i, nil -} - -func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Role) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - if len(m.Key) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - return i, nil -} - -func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n42, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n42 - } - return i, nil -} - -func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n43, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 - } - return i, nil -} - -func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n44, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n44 - } - if len(m.Token) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) - } - return i, nil -} - -func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n45, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n45 - } - return i, nil -} - -func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n46, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n46 - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n47, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n47 - } - return i, nil -} - -func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n48, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 - } - return i, nil -} - -func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n49, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n49 - } - return i, nil -} - -func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n50, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n50 - } - return i, nil -} - -func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n51, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n51 - } - return i, nil -} - -func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n52, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n52 - } - if len(m.Perm) > 0 { - for _, msg := range m.Perm { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n53, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n53 - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n54, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n54 - } - if len(m.Users) > 0 { - for _, s := range m.Users { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n55, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n55 - } - return i, nil -} - -func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n56, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n56 - } - return i, nil -} - -func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n57, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n57 - } - return i, nil -} - -func encodeFixed64Rpc(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Rpc(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *ResponseHeader) Size() (n int) { - var l int - _ = l - if m.ClusterId != 0 { - n += 1 + sovRpc(uint64(m.ClusterId)) - } - if m.MemberId != 0 { - n += 1 + sovRpc(uint64(m.MemberId)) - } - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - if m.RaftTerm != 0 { - n += 1 + sovRpc(uint64(m.RaftTerm)) - } - return n -} - -func (m *RangeRequest) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Limit != 0 { - n += 1 + sovRpc(uint64(m.Limit)) - } - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - if m.SortOrder != 0 { - n += 1 + sovRpc(uint64(m.SortOrder)) - } - if m.SortTarget != 0 { - n += 1 + sovRpc(uint64(m.SortTarget)) - } - if m.Serializable { - n += 2 - } - if m.KeysOnly { - n += 2 - } - if m.CountOnly { - n += 2 - } - if m.MinModRevision != 0 { - n += 1 + sovRpc(uint64(m.MinModRevision)) - } - if m.MaxModRevision != 0 { - n += 1 + sovRpc(uint64(m.MaxModRevision)) - } - if m.MinCreateRevision != 0 { - n += 1 + sovRpc(uint64(m.MinCreateRevision)) - } - if m.MaxCreateRevision != 0 { - n += 1 + sovRpc(uint64(m.MaxCreateRevision)) - } - return n -} - -func (m *RangeResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Kvs) > 0 { - for _, e := range m.Kvs { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.More { - n += 2 - } - if m.Count != 0 { - n += 1 + sovRpc(uint64(m.Count)) - } - return n -} - -func (m *PutRequest) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Lease != 0 { - n += 1 + sovRpc(uint64(m.Lease)) - } - if m.PrevKv { - n += 2 - } - if m.IgnoreValue { - n += 2 - } - if m.IgnoreLease { - n += 2 - } - return n -} - -func (m *PutResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.PrevKv != nil { - l = m.PrevKv.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *DeleteRangeRequest) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.PrevKv { - n += 2 - } - return n -} - -func (m *DeleteRangeResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Deleted != 0 { - n += 1 + sovRpc(uint64(m.Deleted)) - } - if len(m.PrevKvs) > 0 { - for _, e := range m.PrevKvs { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *RequestOp) Size() (n int) { - var l int - _ = l - if m.Request != nil { - n += m.Request.Size() - } - return n -} - -func (m *RequestOp_RequestRange) Size() (n int) { - var l int - _ = l - if m.RequestRange != nil { - l = m.RequestRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *RequestOp_RequestPut) Size() (n int) { - var l int - _ = l - if m.RequestPut != nil { - l = m.RequestPut.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *RequestOp_RequestDeleteRange) Size() (n int) { - var l int - _ = l - if m.RequestDeleteRange != nil { - l = m.RequestDeleteRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *RequestOp_RequestTxn) Size() (n int) { - var l int - _ = l - if m.RequestTxn != nil { - l = m.RequestTxn.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp) Size() (n int) { - var l int - _ = l - if m.Response != nil { - n += m.Response.Size() - } - return n -} - -func (m *ResponseOp_ResponseRange) Size() (n int) { - var l int - _ = l - if m.ResponseRange != nil { - l = m.ResponseRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp_ResponsePut) Size() (n int) { - var l int - _ = l - if m.ResponsePut != nil { - l = m.ResponsePut.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { - var l int - _ = l - if m.ResponseDeleteRange != nil { - l = m.ResponseDeleteRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp_ResponseTxn) Size() (n int) { - var l int - _ = l - if m.ResponseTxn != nil { - l = m.ResponseTxn.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *Compare) Size() (n int) { - var l int - _ = l - if m.Result != 0 { - n += 1 + sovRpc(uint64(m.Result)) - } - if m.Target != 0 { - n += 1 + sovRpc(uint64(m.Target)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.TargetUnion != nil { - n += m.TargetUnion.Size() - } - l = len(m.RangeEnd) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *Compare_Version) Size() (n int) { - var l int - _ = l - n += 1 + sovRpc(uint64(m.Version)) - return n -} -func (m *Compare_CreateRevision) Size() (n int) { - var l int - _ = l - n += 1 + sovRpc(uint64(m.CreateRevision)) - return n -} -func (m *Compare_ModRevision) Size() (n int) { - var l int - _ = l - n += 1 + sovRpc(uint64(m.ModRevision)) - return n -} -func (m *Compare_Value) Size() (n int) { - var l int - _ = l - if m.Value != nil { - l = len(m.Value) - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *Compare_Lease) Size() (n int) { - var l int - _ = l - n += 1 + sovRpc(uint64(m.Lease)) - return n -} -func (m *TxnRequest) Size() (n int) { - var l int - _ = l - if len(m.Compare) > 0 { - for _, e := range m.Compare { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.Success) > 0 { - for _, e := range m.Success { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.Failure) > 0 { - for _, e := range m.Failure { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *TxnResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Succeeded { - n += 2 - } - if len(m.Responses) > 0 { - for _, e := range m.Responses { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *CompactionRequest) Size() (n int) { - var l int - _ = l - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - if m.Physical { - n += 2 - } - return n -} - -func (m *CompactionResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *HashRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *HashKVRequest) Size() (n int) { - var l int - _ = l - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - return n -} - -func (m *HashKVResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Hash != 0 { - n += 1 + sovRpc(uint64(m.Hash)) - } - if m.CompactRevision != 0 { - n += 1 + sovRpc(uint64(m.CompactRevision)) - } - return n -} - -func (m *HashResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Hash != 0 { - n += 1 + sovRpc(uint64(m.Hash)) - } - return n -} - -func (m *SnapshotRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *SnapshotResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.RemainingBytes != 0 { - n += 1 + sovRpc(uint64(m.RemainingBytes)) - } - l = len(m.Blob) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *WatchRequest) Size() (n int) { - var l int - _ = l - if m.RequestUnion != nil { - n += m.RequestUnion.Size() - } - return n -} - -func (m *WatchRequest_CreateRequest) Size() (n int) { - var l int - _ = l - if m.CreateRequest != nil { - l = m.CreateRequest.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *WatchRequest_CancelRequest) Size() (n int) { - var l int - _ = l - if m.CancelRequest != nil { - l = m.CancelRequest.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *WatchCreateRequest) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.StartRevision != 0 { - n += 1 + sovRpc(uint64(m.StartRevision)) - } - if m.ProgressNotify { - n += 2 - } - if len(m.Filters) > 0 { - l = 0 - for _, e := range m.Filters { - l += sovRpc(uint64(e)) - } - n += 1 + sovRpc(uint64(l)) + l - } - if m.PrevKv { - n += 2 - } - return n -} - -func (m *WatchCancelRequest) Size() (n int) { - var l int - _ = l - if m.WatchId != 0 { - n += 1 + sovRpc(uint64(m.WatchId)) - } - return n -} - -func (m *WatchResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.WatchId != 0 { - n += 1 + sovRpc(uint64(m.WatchId)) - } - if m.Created { - n += 2 - } - if m.Canceled { - n += 2 - } - if m.CompactRevision != 0 { - n += 1 + sovRpc(uint64(m.CompactRevision)) - } - l = len(m.CancelReason) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *LeaseGrantRequest) Size() (n int) { - var l int - _ = l - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - return n -} - -func (m *LeaseGrantResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *LeaseRevokeRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - return n -} - -func (m *LeaseRevokeResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *LeaseKeepAliveRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - return n -} - -func (m *LeaseKeepAliveResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - return n -} - -func (m *LeaseTimeToLiveRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.Keys { - n += 2 - } - return n -} - -func (m *LeaseTimeToLiveResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - if m.GrantedTTL != 0 { - n += 1 + sovRpc(uint64(m.GrantedTTL)) - } - if len(m.Keys) > 0 { - for _, b := range m.Keys { - l = len(b) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *LeaseLeasesRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *LeaseStatus) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - return n -} - -func (m *LeaseLeasesResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Leases) > 0 { - for _, e := range m.Leases { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *Member) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.ClientURLs) > 0 { - for _, s := range m.ClientURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberAddRequest) Size() (n int) { - var l int - _ = l - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberAddResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Member != nil { - l = m.Member.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberRemoveRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - return n -} - -func (m *MemberRemoveResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberUpdateRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberUpdateResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberListRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *MemberListResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *DefragmentRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *DefragmentResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *MoveLeaderRequest) Size() (n int) { - var l int - _ = l - if m.TargetID != 0 { - n += 1 + sovRpc(uint64(m.TargetID)) - } - return n -} - -func (m *MoveLeaderResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AlarmRequest) Size() (n int) { - var l int - _ = l - if m.Action != 0 { - n += 1 + sovRpc(uint64(m.Action)) - } - if m.MemberID != 0 { - n += 1 + sovRpc(uint64(m.MemberID)) - } - if m.Alarm != 0 { - n += 1 + sovRpc(uint64(m.Alarm)) - } - return n -} - -func (m *AlarmMember) Size() (n int) { - var l int - _ = l - if m.MemberID != 0 { - n += 1 + sovRpc(uint64(m.MemberID)) - } - if m.Alarm != 0 { - n += 1 + sovRpc(uint64(m.Alarm)) - } - return n -} - -func (m *AlarmResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Alarms) > 0 { - for _, e := range m.Alarms { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *StatusRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *StatusResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.DbSize != 0 { - n += 1 + sovRpc(uint64(m.DbSize)) - } - if m.Leader != 0 { - n += 1 + sovRpc(uint64(m.Leader)) - } - if m.RaftIndex != 0 { - n += 1 + sovRpc(uint64(m.RaftIndex)) - } - if m.RaftTerm != 0 { - n += 1 + sovRpc(uint64(m.RaftTerm)) - } - return n -} - -func (m *AuthEnableRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *AuthDisableRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *AuthenticateRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserAddRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserGetRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserDeleteRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserChangePasswordRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserGrantRoleRequest) Size() (n int) { - var l int - _ = l - l = len(m.User) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserRevokeRoleRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleAddRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleGetRequest) Size() (n int) { - var l int - _ = l - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserListRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *AuthRoleListRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *AuthRoleDeleteRequest) Size() (n int) { - var l int - _ = l - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleGrantPermissionRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Perm != nil { - l = m.Perm.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleRevokePermissionRequest) Size() (n int) { - var l int - _ = l - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthEnableResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthDisableResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthenticateResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Token) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserAddResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserGetResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *AuthUserDeleteResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserChangePasswordResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserGrantRoleResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserRevokeRoleResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleAddResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleGetResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Perm) > 0 { - for _, e := range m.Perm { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *AuthRoleListResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *AuthUserListResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Users) > 0 { - for _, s := range m.Users { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *AuthRoleDeleteResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleGrantPermissionResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleRevokePermissionResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func sovRpc(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozRpc(x uint64) (n int) { - return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ResponseHeader) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) - } - m.ClusterId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ClusterId |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType) - } - m.MemberId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemberId |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) - } - m.RaftTerm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RaftTerm |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RangeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType) - } - m.SortOrder = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SortOrder |= (RangeRequest_SortOrder(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType) - } - m.SortTarget = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SortTarget |= (RangeRequest_SortTarget(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Serializable = bool(v != 0) - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.KeysOnly = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CountOnly = bool(v != 0) - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType) - } - m.MinModRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinModRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType) - } - m.MaxModRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxModRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType) - } - m.MinCreateRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinCreateRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType) - } - m.MaxCreateRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxCreateRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RangeResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kvs = append(m.Kvs, &mvccpb.KeyValue{}) - if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field More", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.More = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PutRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PutRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - m.Lease = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lease |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PrevKv = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.IgnoreValue = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.IgnoreLease = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PutResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PutResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PrevKv == nil { - m.PrevKv = &mvccpb.KeyValue{} - } - if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PrevKv = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) - } - m.Deleted = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Deleted |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{}) - if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RangeRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestRange{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PutRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestPut{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DeleteRangeRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestDeleteRange{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &TxnRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestTxn{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RangeResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponseRange{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PutResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponsePut{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DeleteRangeResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponseDeleteRange{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &TxnResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponseTxn{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Compare) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Compare: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - m.Result = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Result |= (Compare_CompareResult(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - m.Target = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Target |= (Compare_CompareTarget(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_Version{v} - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_CreateRevision{v} - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_ModRevision{v} - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := make([]byte, postIndex-iNdEx) - copy(v, dAtA[iNdEx:postIndex]) - m.TargetUnion = &Compare_Value{v} - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_Lease{v} - case 64: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TxnRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Compare = append(m.Compare, &Compare{}) - if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Success = append(m.Success, &RequestOp{}) - if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Failure = append(m.Failure, &RequestOp{}) - if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TxnResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Succeeded = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Responses = append(m.Responses, &ResponseOp{}) - if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CompactionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Physical = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CompactionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HashRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HashRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HashKVRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HashKVResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - m.Hash = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Hash |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) - } - m.CompactRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CompactRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HashResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HashResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - m.Hash = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Hash |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType) - } - m.RemainingBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RemainingBytes |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...) - if m.Blob == nil { - m.Blob = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &WatchCreateRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.RequestUnion = &WatchRequest_CreateRequest{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &WatchCancelRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.RequestUnion = &WatchRequest_CancelRequest{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType) - } - m.StartRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ProgressNotify = bool(v != 0) - case 5: - if wireType == 0 { - var v WatchCreateRequest_FilterType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Filters = append(m.Filters, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + packedLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - for iNdEx < postIndex { - var v WatchCreateRequest_FilterType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Filters = append(m.Filters, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PrevKv = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) - } - m.WatchId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WatchId |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) - } - m.WatchId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WatchId |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Created = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Canceled = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) - } - m.CompactRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CompactRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CancelReason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, &mvccpb.Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Keys = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType) - } - m.GrantedTTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GrantedTTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) - copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Leases = append(m.Leases, &LeaseStatus{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Member) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Member: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Member == nil { - m.Member = &Member{} - } - if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DefragmentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType) - } - m.TargetID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TargetID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AlarmRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) - } - m.Action = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Action |= (AlarmRequest_AlarmAction(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) - } - m.MemberID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemberID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) - } - m.Alarm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Alarm |= (AlarmType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AlarmMember) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) - } - m.MemberID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemberID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) - } - m.Alarm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Alarm |= (AlarmType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AlarmResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Alarms = append(m.Alarms, &AlarmMember{}) - if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType) - } - m.DbSize = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DbSize |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) - } - m.Leader = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Leader |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) - } - m.RaftIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RaftIndex |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) - } - m.RaftTerm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RaftTerm |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Perm == nil { - m.Perm = &authpb.Permission{} - } - if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Perm = append(m.Perm, &authpb.Permission{}) - if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRpc(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthRpc - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRpc(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } - -var fileDescriptorRpc = []byte{ - // 3674 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x6f, 0x1b, 0xc7, - 0x77, 0xd7, 0x92, 0x22, 0x29, 0x1e, 0x5e, 0x44, 0x8d, 0x64, 0x9b, 0xa2, 0x6d, 0x59, 0x1e, 0xdf, - 0x64, 0x3b, 0x16, 0xff, 0x7f, 0x25, 0xed, 0x83, 0x5b, 0x04, 0x91, 0x25, 0xc6, 0x52, 0x24, 0x4b, - 0xca, 0x8a, 0x56, 0x52, 0x20, 0x28, 0xb1, 0x22, 0xc7, 0xd2, 0x42, 0xe4, 0x2e, 0xb3, 0xbb, 0xa4, - 0xa5, 0x34, 0x2d, 0x8a, 0x20, 0x41, 0xd1, 0x02, 0x7d, 0x69, 0x1e, 0x7a, 0x7b, 0x2c, 0x8a, 0x22, - 0x2f, 0x7d, 0x2b, 0xfa, 0x15, 0x8a, 0xbe, 0xb4, 0x40, 0xbf, 0x40, 0x91, 0xf6, 0xa5, 0xdf, 0xa1, - 0x45, 0xff, 0x98, 0xdb, 0xee, 0xec, 0x72, 0x97, 0x52, 0xc2, 0x24, 0x2f, 0xf2, 0xce, 0x99, 0x33, - 0xe7, 0x9c, 0x39, 0x33, 0xe7, 0x9c, 0x99, 0xdf, 0xd0, 0x90, 0x77, 0xfa, 0xed, 0xd5, 0xbe, 0x63, - 0x7b, 0x36, 0x2a, 0x12, 0xaf, 0xdd, 0x71, 0x89, 0x33, 0x24, 0x4e, 0xff, 0xb8, 0xb6, 0x70, 0x62, - 0x9f, 0xd8, 0xac, 0xa3, 0x4e, 0xbf, 0x38, 0x4f, 0x6d, 0x91, 0xf2, 0xd4, 0x7b, 0xc3, 0x76, 0x9b, - 0xfd, 0xe9, 0x1f, 0xd7, 0xcf, 0x86, 0xa2, 0xeb, 0x26, 0xeb, 0x32, 0x06, 0xde, 0x29, 0xfb, 0xd3, - 0x3f, 0x66, 0xff, 0x88, 0xce, 0x5b, 0x27, 0xb6, 0x7d, 0xd2, 0x25, 0x75, 0xa3, 0x6f, 0xd6, 0x0d, - 0xcb, 0xb2, 0x3d, 0xc3, 0x33, 0x6d, 0xcb, 0xe5, 0xbd, 0xf8, 0x1b, 0x0d, 0xca, 0x3a, 0x71, 0xfb, - 0xb6, 0xe5, 0x92, 0x2d, 0x62, 0x74, 0x88, 0x83, 0x6e, 0x03, 0xb4, 0xbb, 0x03, 0xd7, 0x23, 0x4e, - 0xcb, 0xec, 0x54, 0xb5, 0x65, 0x6d, 0x65, 0x5a, 0xcf, 0x0b, 0xca, 0x76, 0x07, 0xdd, 0x84, 0x7c, - 0x8f, 0xf4, 0x8e, 0x79, 0x6f, 0x8a, 0xf5, 0xce, 0x70, 0xc2, 0x76, 0x07, 0xd5, 0x60, 0xc6, 0x21, - 0x43, 0xd3, 0x35, 0x6d, 0xab, 0x9a, 0x5e, 0xd6, 0x56, 0xd2, 0xba, 0xdf, 0xa6, 0x03, 0x1d, 0xe3, - 0x8d, 0xd7, 0xf2, 0x88, 0xd3, 0xab, 0x4e, 0xf3, 0x81, 0x94, 0xd0, 0x24, 0x4e, 0x0f, 0x7f, 0x9d, - 0x81, 0xa2, 0x6e, 0x58, 0x27, 0x44, 0x27, 0x9f, 0x0f, 0x88, 0xeb, 0xa1, 0x0a, 0xa4, 0xcf, 0xc8, - 0x05, 0x53, 0x5f, 0xd4, 0xe9, 0x27, 0x1f, 0x6f, 0x9d, 0x90, 0x16, 0xb1, 0xb8, 0xe2, 0x22, 0x1d, - 0x6f, 0x9d, 0x90, 0x86, 0xd5, 0x41, 0x0b, 0x90, 0xe9, 0x9a, 0x3d, 0xd3, 0x13, 0x5a, 0x79, 0x23, - 0x64, 0xce, 0x74, 0xc4, 0x9c, 0x0d, 0x00, 0xd7, 0x76, 0xbc, 0x96, 0xed, 0x74, 0x88, 0x53, 0xcd, - 0x2c, 0x6b, 0x2b, 0xe5, 0xb5, 0xfb, 0xab, 0xea, 0x42, 0xac, 0xaa, 0x06, 0xad, 0x1e, 0xda, 0x8e, - 0xb7, 0x4f, 0x79, 0xf5, 0xbc, 0x2b, 0x3f, 0xd1, 0x87, 0x50, 0x60, 0x42, 0x3c, 0xc3, 0x39, 0x21, - 0x5e, 0x35, 0xcb, 0xa4, 0x3c, 0xb8, 0x44, 0x4a, 0x93, 0x31, 0xeb, 0x4c, 0x3d, 0xff, 0x46, 0x18, - 0x8a, 0x2e, 0x71, 0x4c, 0xa3, 0x6b, 0x7e, 0x61, 0x1c, 0x77, 0x49, 0x35, 0xb7, 0xac, 0xad, 0xcc, - 0xe8, 0x21, 0x1a, 0x9d, 0xff, 0x19, 0xb9, 0x70, 0x5b, 0xb6, 0xd5, 0xbd, 0xa8, 0xce, 0x30, 0x86, - 0x19, 0x4a, 0xd8, 0xb7, 0xba, 0x17, 0x6c, 0xd1, 0xec, 0x81, 0xe5, 0xf1, 0xde, 0x3c, 0xeb, 0xcd, - 0x33, 0x0a, 0xeb, 0x5e, 0x81, 0x4a, 0xcf, 0xb4, 0x5a, 0x3d, 0xbb, 0xd3, 0xf2, 0x1d, 0x02, 0xcc, - 0x21, 0xe5, 0x9e, 0x69, 0xbd, 0xb2, 0x3b, 0xba, 0x74, 0x0b, 0xe5, 0x34, 0xce, 0xc3, 0x9c, 0x05, - 0xc1, 0x69, 0x9c, 0xab, 0x9c, 0xab, 0x30, 0x4f, 0x65, 0xb6, 0x1d, 0x62, 0x78, 0x24, 0x60, 0x2e, - 0x32, 0xe6, 0xb9, 0x9e, 0x69, 0x6d, 0xb0, 0x9e, 0x10, 0xbf, 0x71, 0x3e, 0xc2, 0x5f, 0x12, 0xfc, - 0xc6, 0x79, 0x98, 0x1f, 0xaf, 0x42, 0xde, 0xf7, 0x39, 0x9a, 0x81, 0xe9, 0xbd, 0xfd, 0xbd, 0x46, - 0x65, 0x0a, 0x01, 0x64, 0xd7, 0x0f, 0x37, 0x1a, 0x7b, 0x9b, 0x15, 0x0d, 0x15, 0x20, 0xb7, 0xd9, - 0xe0, 0x8d, 0x14, 0x7e, 0x01, 0x10, 0x78, 0x17, 0xe5, 0x20, 0xbd, 0xd3, 0xf8, 0xbd, 0xca, 0x14, - 0xe5, 0x39, 0x6a, 0xe8, 0x87, 0xdb, 0xfb, 0x7b, 0x15, 0x8d, 0x0e, 0xde, 0xd0, 0x1b, 0xeb, 0xcd, - 0x46, 0x25, 0x45, 0x39, 0x5e, 0xed, 0x6f, 0x56, 0xd2, 0x28, 0x0f, 0x99, 0xa3, 0xf5, 0xdd, 0xd7, - 0x8d, 0xca, 0x34, 0xfe, 0x56, 0x83, 0x92, 0x58, 0x2f, 0x1e, 0x13, 0xe8, 0x3d, 0xc8, 0x9e, 0xb2, - 0xb8, 0x60, 0x5b, 0xb1, 0xb0, 0x76, 0x2b, 0xb2, 0xb8, 0xa1, 0xd8, 0xd1, 0x05, 0x2f, 0xc2, 0x90, - 0x3e, 0x1b, 0xba, 0xd5, 0xd4, 0x72, 0x7a, 0xa5, 0xb0, 0x56, 0x59, 0xe5, 0x01, 0xbb, 0xba, 0x43, - 0x2e, 0x8e, 0x8c, 0xee, 0x80, 0xe8, 0xb4, 0x13, 0x21, 0x98, 0xee, 0xd9, 0x0e, 0x61, 0x3b, 0x76, - 0x46, 0x67, 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0xf0, 0x77, 0x1a, 0xc0, 0xc1, - 0xc0, 0x4b, 0x0e, 0x8d, 0x05, 0xc8, 0x0c, 0xa9, 0x60, 0x11, 0x16, 0xbc, 0xc1, 0x62, 0x82, 0x18, - 0x2e, 0xf1, 0x63, 0x82, 0x36, 0xd0, 0x0d, 0xc8, 0xf5, 0x1d, 0x32, 0x6c, 0x9d, 0x0d, 0x99, 0x92, - 0x19, 0x3d, 0x4b, 0x9b, 0x3b, 0x43, 0x74, 0x17, 0x8a, 0xe6, 0x89, 0x65, 0x3b, 0xa4, 0xc5, 0x65, - 0x65, 0x58, 0x6f, 0x81, 0xd3, 0x98, 0xdd, 0x0a, 0x0b, 0x17, 0x9c, 0x55, 0x59, 0x76, 0x29, 0x09, - 0x5b, 0x50, 0x60, 0xa6, 0x4e, 0xe4, 0xbe, 0xc7, 0x81, 0x8d, 0x29, 0x36, 0x6c, 0xd4, 0x85, 0xc2, - 0x6a, 0xfc, 0x19, 0xa0, 0x4d, 0xd2, 0x25, 0x1e, 0x99, 0x24, 0x7b, 0x28, 0x3e, 0x49, 0xab, 0x3e, - 0xc1, 0x7f, 0xa1, 0xc1, 0x7c, 0x48, 0xfc, 0x44, 0xd3, 0xaa, 0x42, 0xae, 0xc3, 0x84, 0x71, 0x0b, - 0xd2, 0xba, 0x6c, 0xa2, 0xa7, 0x30, 0x23, 0x0c, 0x70, 0xab, 0xe9, 0x84, 0x4d, 0x93, 0xe3, 0x36, - 0xb9, 0xf8, 0xbb, 0x14, 0xe4, 0xc5, 0x44, 0xf7, 0xfb, 0x68, 0x1d, 0x4a, 0x0e, 0x6f, 0xb4, 0xd8, - 0x7c, 0x84, 0x45, 0xb5, 0xe4, 0x24, 0xb4, 0x35, 0xa5, 0x17, 0xc5, 0x10, 0x46, 0x46, 0xbf, 0x03, - 0x05, 0x29, 0xa2, 0x3f, 0xf0, 0x84, 0xcb, 0xab, 0x61, 0x01, 0xc1, 0xfe, 0xdb, 0x9a, 0xd2, 0x41, - 0xb0, 0x1f, 0x0c, 0x3c, 0xd4, 0x84, 0x05, 0x39, 0x98, 0xcf, 0x46, 0x98, 0x91, 0x66, 0x52, 0x96, - 0xc3, 0x52, 0x46, 0x97, 0x6a, 0x6b, 0x4a, 0x47, 0x62, 0xbc, 0xd2, 0xa9, 0x9a, 0xe4, 0x9d, 0xf3, - 0xe4, 0x3d, 0x62, 0x52, 0xf3, 0xdc, 0x1a, 0x35, 0xa9, 0x79, 0x6e, 0xbd, 0xc8, 0x43, 0x4e, 0xb4, - 0xf0, 0x3f, 0xa7, 0x00, 0xe4, 0x6a, 0xec, 0xf7, 0xd1, 0x26, 0x94, 0x1d, 0xd1, 0x0a, 0x79, 0xeb, - 0x66, 0xac, 0xb7, 0xc4, 0x22, 0x4e, 0xe9, 0x25, 0x39, 0x88, 0x1b, 0xf7, 0x3e, 0x14, 0x7d, 0x29, - 0x81, 0xc3, 0x16, 0x63, 0x1c, 0xe6, 0x4b, 0x28, 0xc8, 0x01, 0xd4, 0x65, 0x9f, 0xc0, 0x35, 0x7f, - 0x7c, 0x8c, 0xcf, 0xee, 0x8e, 0xf1, 0x99, 0x2f, 0x70, 0x5e, 0x4a, 0x50, 0xbd, 0xa6, 0x1a, 0x16, - 0xb8, 0x6d, 0x31, 0xc6, 0x6d, 0xa3, 0x86, 0x51, 0xc7, 0x01, 0xad, 0x97, 0xbc, 0x89, 0xff, 0x27, - 0x0d, 0xb9, 0x0d, 0xbb, 0xd7, 0x37, 0x1c, 0xba, 0x1a, 0x59, 0x87, 0xb8, 0x83, 0xae, 0xc7, 0xdc, - 0x55, 0x5e, 0xbb, 0x17, 0x96, 0x28, 0xd8, 0xe4, 0xbf, 0x3a, 0x63, 0xd5, 0xc5, 0x10, 0x3a, 0x58, - 0x94, 0xc7, 0xd4, 0x15, 0x06, 0x8b, 0xe2, 0x28, 0x86, 0xc8, 0x40, 0x4e, 0x07, 0x81, 0x5c, 0x83, - 0xdc, 0x90, 0x38, 0x41, 0x49, 0xdf, 0x9a, 0xd2, 0x25, 0x01, 0x3d, 0x86, 0xd9, 0x68, 0x79, 0xc9, - 0x08, 0x9e, 0x72, 0x3b, 0x5c, 0x8d, 0xee, 0x41, 0x31, 0x54, 0xe3, 0xb2, 0x82, 0xaf, 0xd0, 0x53, - 0x4a, 0xdc, 0x75, 0x99, 0x57, 0x69, 0x3d, 0x2e, 0x6e, 0x4d, 0xc9, 0xcc, 0x7a, 0x5d, 0x66, 0xd6, - 0x19, 0x31, 0x4a, 0xe4, 0xd6, 0x50, 0x92, 0xf9, 0x20, 0x9c, 0x64, 0xf0, 0x07, 0x50, 0x0a, 0x39, - 0x88, 0xd6, 0x9d, 0xc6, 0xc7, 0xaf, 0xd7, 0x77, 0x79, 0x91, 0x7a, 0xc9, 0xea, 0x92, 0x5e, 0xd1, - 0x68, 0xad, 0xdb, 0x6d, 0x1c, 0x1e, 0x56, 0x52, 0xa8, 0x04, 0xf9, 0xbd, 0xfd, 0x66, 0x8b, 0x73, - 0xa5, 0xf1, 0x4b, 0x5f, 0x82, 0x28, 0x72, 0x4a, 0x6d, 0x9b, 0x52, 0x6a, 0x9b, 0x26, 0x6b, 0x5b, - 0x2a, 0xa8, 0x6d, 0xac, 0xcc, 0xed, 0x36, 0xd6, 0x0f, 0x1b, 0x95, 0xe9, 0x17, 0x65, 0x28, 0x72, - 0xff, 0xb6, 0x06, 0x16, 0x2d, 0xb5, 0x7f, 0xa7, 0x01, 0x04, 0xd1, 0x84, 0xea, 0x90, 0x6b, 0x73, - 0x3d, 0x55, 0x8d, 0x25, 0xa3, 0x6b, 0xb1, 0x4b, 0xa6, 0x4b, 0x2e, 0xf4, 0x6b, 0xc8, 0xb9, 0x83, - 0x76, 0x9b, 0xb8, 0xb2, 0xe4, 0xdd, 0x88, 0xe6, 0x43, 0x91, 0xad, 0x74, 0xc9, 0x47, 0x87, 0xbc, - 0x31, 0xcc, 0xee, 0x80, 0x15, 0xc0, 0xf1, 0x43, 0x04, 0x1f, 0xfe, 0x6b, 0x0d, 0x0a, 0xca, 0xe6, - 0xfd, 0x91, 0x49, 0xf8, 0x16, 0xe4, 0x99, 0x0d, 0xa4, 0x23, 0xd2, 0xf0, 0x8c, 0x1e, 0x10, 0xd0, - 0x6f, 0x43, 0x5e, 0x46, 0x80, 0xcc, 0xc4, 0xd5, 0x78, 0xb1, 0xfb, 0x7d, 0x3d, 0x60, 0xc5, 0x3b, - 0x30, 0xc7, 0xbc, 0xd2, 0xa6, 0x87, 0x6b, 0xe9, 0x47, 0xf5, 0xf8, 0xa9, 0x45, 0x8e, 0x9f, 0x35, - 0x98, 0xe9, 0x9f, 0x5e, 0xb8, 0x66, 0xdb, 0xe8, 0x0a, 0x2b, 0xfc, 0x36, 0xfe, 0x08, 0x90, 0x2a, - 0x6c, 0x92, 0xe9, 0xe2, 0x12, 0x14, 0xb6, 0x0c, 0xf7, 0x54, 0x98, 0x84, 0x9f, 0x42, 0x89, 0x36, - 0x77, 0x8e, 0xae, 0x60, 0x23, 0xbb, 0x1c, 0x48, 0xee, 0x89, 0x7c, 0x8e, 0x60, 0xfa, 0xd4, 0x70, - 0x4f, 0xd9, 0x44, 0x4b, 0x3a, 0xfb, 0x46, 0x8f, 0xa1, 0xd2, 0xe6, 0x93, 0x6c, 0x45, 0xae, 0x0c, - 0xb3, 0x82, 0xee, 0x9f, 0x04, 0x3f, 0x85, 0x22, 0x9f, 0xc3, 0x4f, 0x6d, 0x04, 0x9e, 0x83, 0xd9, - 0x43, 0xcb, 0xe8, 0xbb, 0xa7, 0xb6, 0xac, 0x6e, 0x74, 0xd2, 0x95, 0x80, 0x36, 0x91, 0xc6, 0x47, - 0x30, 0xeb, 0x90, 0x9e, 0x61, 0x5a, 0xa6, 0x75, 0xd2, 0x3a, 0xbe, 0xf0, 0x88, 0x2b, 0x2e, 0x4c, - 0x65, 0x9f, 0xfc, 0x82, 0x52, 0xa9, 0x69, 0xc7, 0x5d, 0xfb, 0x58, 0xa4, 0x39, 0xf6, 0x8d, 0xff, - 0x49, 0x83, 0xe2, 0x27, 0x86, 0xd7, 0x96, 0x4b, 0x87, 0xb6, 0xa1, 0xec, 0x27, 0x37, 0x46, 0x11, - 0xb6, 0x44, 0x4a, 0x2c, 0x1b, 0x23, 0x8f, 0xd2, 0xb2, 0x3a, 0x96, 0xda, 0x2a, 0x81, 0x89, 0x32, - 0xac, 0x36, 0xe9, 0xfa, 0xa2, 0x52, 0xc9, 0xa2, 0x18, 0xa3, 0x2a, 0x4a, 0x25, 0xbc, 0x98, 0x0d, - 0x8e, 0x1f, 0x3c, 0x97, 0xfc, 0x4d, 0x0a, 0xd0, 0xa8, 0x0d, 0x3f, 0xf4, 0x44, 0xf6, 0x00, 0xca, - 0xae, 0x67, 0x38, 0x23, 0x7b, 0xa3, 0xc4, 0xa8, 0x7e, 0x82, 0x7e, 0x04, 0xb3, 0x7d, 0xc7, 0x3e, - 0x71, 0x88, 0xeb, 0xb6, 0x2c, 0xdb, 0x33, 0xdf, 0x5c, 0x88, 0x43, 0x6d, 0x59, 0x92, 0xf7, 0x18, - 0x15, 0x35, 0x20, 0xf7, 0xc6, 0xec, 0x7a, 0xc4, 0x71, 0xab, 0x99, 0xe5, 0xf4, 0x4a, 0x79, 0xed, - 0xe9, 0x65, 0x5e, 0x5b, 0xfd, 0x90, 0xf1, 0x37, 0x2f, 0xfa, 0x44, 0x97, 0x63, 0xd5, 0x83, 0x62, - 0x36, 0x74, 0x50, 0x7c, 0x00, 0x10, 0xf0, 0xd3, 0x54, 0xbb, 0xb7, 0x7f, 0xf0, 0xba, 0x59, 0x99, - 0x42, 0x45, 0x98, 0xd9, 0xdb, 0xdf, 0x6c, 0xec, 0x36, 0x68, 0x5e, 0xc6, 0x75, 0xe9, 0x1b, 0xd5, - 0x87, 0x68, 0x11, 0x66, 0xde, 0x52, 0xaa, 0xbc, 0x6f, 0xa7, 0xf5, 0x1c, 0x6b, 0x6f, 0x77, 0xf0, - 0x9f, 0xa7, 0xa0, 0x24, 0x76, 0xc1, 0x44, 0x5b, 0x51, 0x55, 0x91, 0x0a, 0xa9, 0xa0, 0xa7, 0x52, - 0xbe, 0x3b, 0x3a, 0xe2, 0xf0, 0x2b, 0x9b, 0x34, 0x37, 0xf0, 0xc5, 0x26, 0x1d, 0xe1, 0x56, 0xbf, - 0x1d, 0x1b, 0xbe, 0x99, 0xd8, 0xf0, 0x45, 0xf7, 0xa0, 0xe4, 0xef, 0x36, 0xc3, 0x15, 0xb5, 0x36, - 0xaf, 0x17, 0xe5, 0x46, 0xa2, 0x34, 0xf4, 0x00, 0xb2, 0x64, 0x48, 0x2c, 0xcf, 0xad, 0x16, 0x58, - 0xd6, 0x2d, 0xc9, 0xf3, 0x6f, 0x83, 0x52, 0x75, 0xd1, 0x89, 0x7f, 0x0b, 0xe6, 0xd8, 0x3d, 0xe3, - 0xa5, 0x63, 0x58, 0xea, 0x85, 0xa8, 0xd9, 0xdc, 0x15, 0xae, 0xa3, 0x9f, 0xa8, 0x0c, 0xa9, 0xed, - 0x4d, 0x31, 0xd1, 0xd4, 0xf6, 0x26, 0xfe, 0x4a, 0x03, 0xa4, 0x8e, 0x9b, 0xc8, 0x97, 0x11, 0xe1, - 0x52, 0x7d, 0x3a, 0x50, 0xbf, 0x00, 0x19, 0xe2, 0x38, 0xb6, 0xc3, 0xbc, 0x96, 0xd7, 0x79, 0x03, - 0xdf, 0x17, 0x36, 0xe8, 0x64, 0x68, 0x9f, 0xf9, 0x81, 0xc1, 0xa5, 0x69, 0xbe, 0xa9, 0x3b, 0x30, - 0x1f, 0xe2, 0x9a, 0x28, 0xfb, 0x3f, 0x82, 0x6b, 0x4c, 0xd8, 0x0e, 0x21, 0xfd, 0xf5, 0xae, 0x39, - 0x4c, 0xd4, 0xda, 0x87, 0xeb, 0x51, 0xc6, 0x9f, 0xd7, 0x47, 0xf8, 0x77, 0x85, 0xc6, 0xa6, 0xd9, - 0x23, 0x4d, 0x7b, 0x37, 0xd9, 0x36, 0x9a, 0x1d, 0xcf, 0xc8, 0x85, 0x2b, 0xca, 0x24, 0xfb, 0xc6, - 0x7f, 0xaf, 0xc1, 0x8d, 0x91, 0xe1, 0x3f, 0xf3, 0xaa, 0x2e, 0x01, 0x9c, 0xd0, 0xed, 0x43, 0x3a, - 0xb4, 0x83, 0xdf, 0xd0, 0x15, 0x8a, 0x6f, 0x27, 0x4d, 0x30, 0x45, 0x61, 0xe7, 0x82, 0x58, 0x73, - 0xf6, 0xc7, 0x95, 0x35, 0xe6, 0x36, 0x14, 0x18, 0xe1, 0xd0, 0x33, 0xbc, 0x81, 0x3b, 0xb2, 0x18, - 0x7f, 0x24, 0xb6, 0x80, 0x1c, 0x34, 0xd1, 0xbc, 0x7e, 0x0d, 0x59, 0x76, 0x38, 0x95, 0x47, 0xb3, - 0xc8, 0x6d, 0x40, 0xb1, 0x43, 0x17, 0x8c, 0xf8, 0x14, 0xb2, 0xaf, 0x18, 0xa2, 0xa7, 0x58, 0x36, - 0x2d, 0x97, 0xc2, 0x32, 0x7a, 0x1c, 0x67, 0xc8, 0xeb, 0xec, 0x9b, 0x9d, 0x64, 0x08, 0x71, 0x5e, - 0xeb, 0xbb, 0xfc, 0xc4, 0x94, 0xd7, 0xfd, 0x36, 0x75, 0x59, 0xbb, 0x6b, 0x12, 0xcb, 0x63, 0xbd, - 0xd3, 0xac, 0x57, 0xa1, 0xe0, 0x55, 0xa8, 0x70, 0x4d, 0xeb, 0x9d, 0x8e, 0x72, 0x22, 0xf1, 0xe5, - 0x69, 0x61, 0x79, 0xf8, 0x1f, 0x34, 0x98, 0x53, 0x06, 0x4c, 0xe4, 0x98, 0x77, 0x20, 0xcb, 0x71, - 0x4b, 0x51, 0xfc, 0x16, 0xc2, 0xa3, 0xb8, 0x1a, 0x5d, 0xf0, 0xa0, 0x55, 0xc8, 0xf1, 0x2f, 0x79, - 0x2c, 0x8c, 0x67, 0x97, 0x4c, 0xf8, 0x01, 0xcc, 0x0b, 0x12, 0xe9, 0xd9, 0x71, 0x7b, 0x9b, 0x39, - 0x14, 0x7f, 0x09, 0x0b, 0x61, 0xb6, 0x89, 0xa6, 0xa4, 0x18, 0x99, 0xba, 0x8a, 0x91, 0xeb, 0xd2, - 0xc8, 0xd7, 0xfd, 0x8e, 0x52, 0xab, 0xa3, 0xab, 0xae, 0xae, 0x48, 0x2a, 0xb2, 0x22, 0xfe, 0x04, - 0xa4, 0x88, 0x5f, 0x74, 0x02, 0xf3, 0x72, 0x3b, 0xec, 0x9a, 0xae, 0x7f, 0x82, 0xfb, 0x02, 0x90, - 0x4a, 0xfc, 0xa5, 0x0d, 0xda, 0x24, 0x6f, 0x1c, 0xe3, 0xa4, 0x47, 0xfc, 0xfa, 0x44, 0xcf, 0xf3, - 0x2a, 0x71, 0xa2, 0x8c, 0x5e, 0x87, 0xb9, 0x57, 0xf6, 0x90, 0xa6, 0x06, 0x4a, 0x0d, 0x42, 0x86, - 0xdf, 0xe7, 0xfc, 0x65, 0xf3, 0xdb, 0x54, 0xb9, 0x3a, 0x60, 0x22, 0xe5, 0xff, 0xa6, 0x41, 0x71, - 0xbd, 0x6b, 0x38, 0x3d, 0xa9, 0xf8, 0x7d, 0xc8, 0xf2, 0x5b, 0x8a, 0x00, 0x06, 0x1e, 0x86, 0xc5, - 0xa8, 0xbc, 0xbc, 0xb1, 0xce, 0xef, 0x34, 0x62, 0x14, 0x35, 0x5c, 0xbc, 0x1d, 0x6c, 0x46, 0xde, - 0x12, 0x36, 0xd1, 0x33, 0xc8, 0x18, 0x74, 0x08, 0x4b, 0xc1, 0xe5, 0xe8, 0xfd, 0x90, 0x49, 0x63, - 0x87, 0x33, 0xce, 0x85, 0xdf, 0x83, 0x82, 0xa2, 0x81, 0xde, 0x80, 0x5f, 0x36, 0xc4, 0x01, 0x6c, - 0x7d, 0xa3, 0xb9, 0x7d, 0xc4, 0x2f, 0xc6, 0x65, 0x80, 0xcd, 0x86, 0xdf, 0x4e, 0xe1, 0x4f, 0xc5, - 0x28, 0x91, 0xef, 0x54, 0x7b, 0xb4, 0x24, 0x7b, 0x52, 0x57, 0xb2, 0xe7, 0x1c, 0x4a, 0x62, 0xfa, - 0x93, 0xa6, 0x6f, 0x26, 0x2f, 0x21, 0x7d, 0x2b, 0xc6, 0xeb, 0x82, 0x11, 0xcf, 0x42, 0x49, 0x24, - 0x74, 0xb1, 0xff, 0xfe, 0x55, 0x83, 0xb2, 0xa4, 0x4c, 0x0a, 0x60, 0x4a, 0xec, 0x85, 0x57, 0x00, - 0x1f, 0x79, 0xb9, 0x0e, 0xd9, 0xce, 0xf1, 0xa1, 0xf9, 0x85, 0x04, 0x9b, 0x45, 0x8b, 0xd2, 0xbb, - 0x5c, 0x0f, 0x7f, 0xf1, 0x11, 0x2d, 0x7a, 0x0b, 0x77, 0x8c, 0x37, 0xde, 0xb6, 0xd5, 0x21, 0xe7, - 0xec, 0xdc, 0x38, 0xad, 0x07, 0x04, 0x76, 0x29, 0x15, 0x2f, 0x43, 0xec, 0xb0, 0xa8, 0xbe, 0x14, - 0xcd, 0xc3, 0xdc, 0xfa, 0xc0, 0x3b, 0x6d, 0x58, 0xc6, 0x71, 0x57, 0x66, 0x2c, 0x5a, 0x66, 0x29, - 0x71, 0xd3, 0x74, 0x55, 0x6a, 0x03, 0xe6, 0x29, 0x95, 0x58, 0x9e, 0xd9, 0x56, 0xd2, 0x9b, 0x2c, - 0x62, 0x5a, 0xa4, 0x88, 0x19, 0xae, 0xfb, 0xd6, 0x76, 0x3a, 0x62, 0x6a, 0x7e, 0x1b, 0x6f, 0x72, - 0xe1, 0xaf, 0xdd, 0x50, 0x99, 0xfa, 0xa1, 0x52, 0x56, 0x02, 0x29, 0x2f, 0x89, 0x37, 0x46, 0x0a, - 0x7e, 0x0a, 0xd7, 0x24, 0xa7, 0x00, 0xf7, 0xc6, 0x30, 0xef, 0xc3, 0x6d, 0xc9, 0xbc, 0x71, 0x4a, - 0x6f, 0x4f, 0x07, 0x42, 0xe1, 0x8f, 0xb5, 0xf3, 0x05, 0x54, 0x7d, 0x3b, 0xd9, 0x61, 0xd9, 0xee, - 0xaa, 0x06, 0x0c, 0x5c, 0xb1, 0x67, 0xf2, 0x3a, 0xfb, 0xa6, 0x34, 0xc7, 0xee, 0xfa, 0x47, 0x02, - 0xfa, 0x8d, 0x37, 0x60, 0x51, 0xca, 0x10, 0xc7, 0xd8, 0xb0, 0x90, 0x11, 0x83, 0xe2, 0x84, 0x08, - 0x87, 0xd1, 0xa1, 0xe3, 0xdd, 0xae, 0x72, 0x86, 0x5d, 0xcb, 0x64, 0x6a, 0x8a, 0xcc, 0x6b, 0x7c, - 0x47, 0x50, 0xc3, 0xd4, 0x8a, 0x21, 0xc8, 0x54, 0x80, 0x4a, 0x16, 0x0b, 0x41, 0xc9, 0x23, 0x0b, - 0x31, 0x22, 0xfa, 0x33, 0x58, 0xf2, 0x8d, 0xa0, 0x7e, 0x3b, 0x20, 0x4e, 0xcf, 0x74, 0x5d, 0x05, - 0x0e, 0x8a, 0x9b, 0xf8, 0x43, 0x98, 0xee, 0x13, 0x91, 0x53, 0x0a, 0x6b, 0x68, 0x95, 0xbf, 0xdf, - 0xae, 0x2a, 0x83, 0x59, 0x3f, 0xee, 0xc0, 0x1d, 0x29, 0x9d, 0x7b, 0x34, 0x56, 0x7c, 0xd4, 0x28, - 0x79, 0xeb, 0xe6, 0x6e, 0x1d, 0xbd, 0x75, 0xa7, 0xf9, 0xda, 0xfb, 0x10, 0xe5, 0x47, 0xdc, 0x91, - 0x32, 0xb6, 0x26, 0xaa, 0x15, 0x3b, 0xdc, 0xa7, 0x7e, 0x48, 0x4e, 0x24, 0xec, 0x18, 0x16, 0xc2, - 0x91, 0x3c, 0x51, 0x1a, 0x5b, 0x80, 0x8c, 0x67, 0x9f, 0x11, 0x99, 0xc4, 0x78, 0x43, 0x1a, 0xec, - 0x87, 0xf9, 0x44, 0x06, 0x1b, 0x81, 0x30, 0xb6, 0x25, 0x27, 0xb5, 0x97, 0xae, 0xa6, 0x3c, 0x7c, - 0xf1, 0x06, 0xde, 0x83, 0xeb, 0xd1, 0x34, 0x31, 0x91, 0xc9, 0x47, 0x7c, 0x03, 0xc7, 0x65, 0x92, - 0x89, 0xe4, 0x7e, 0x1c, 0x24, 0x03, 0x25, 0xa1, 0x4c, 0x24, 0x52, 0x87, 0x5a, 0x5c, 0x7e, 0xf9, - 0x29, 0xf6, 0xab, 0x9f, 0x6e, 0x26, 0x12, 0xe6, 0x06, 0xc2, 0x26, 0x5f, 0xfe, 0x20, 0x47, 0xa4, - 0xc7, 0xe6, 0x08, 0x11, 0x24, 0x41, 0x16, 0xfb, 0x19, 0x36, 0x9d, 0xd0, 0x11, 0x24, 0xd0, 0x49, - 0x75, 0xd0, 0x1a, 0xe2, 0xeb, 0x60, 0x0d, 0xb9, 0xb1, 0xd5, 0xb4, 0x3b, 0xd1, 0x62, 0x7c, 0x12, - 0xe4, 0xce, 0x91, 0xcc, 0x3c, 0x91, 0xe0, 0x4f, 0x61, 0x39, 0x39, 0x29, 0x4f, 0x22, 0xf9, 0x49, - 0x1d, 0xf2, 0xfe, 0x81, 0x52, 0xf9, 0xed, 0x43, 0x01, 0x72, 0x7b, 0xfb, 0x87, 0x07, 0xeb, 0x1b, - 0x0d, 0xfe, 0xe3, 0x87, 0x8d, 0x7d, 0x5d, 0x7f, 0x7d, 0xd0, 0xac, 0xa4, 0xd6, 0xfe, 0x2f, 0x0d, - 0xa9, 0x9d, 0x23, 0xf4, 0xfb, 0x90, 0xe1, 0x2f, 0x81, 0x63, 0x9e, 0x7f, 0x6b, 0xe3, 0x1e, 0x3b, - 0xf1, 0xad, 0xaf, 0xfe, 0xe3, 0xbf, 0xbf, 0x4d, 0x5d, 0xc7, 0x73, 0xf5, 0xe1, 0xbb, 0x46, 0xb7, - 0x7f, 0x6a, 0xd4, 0xcf, 0x86, 0x75, 0x56, 0x20, 0x9e, 0x6b, 0x4f, 0xd0, 0x11, 0xa4, 0x0f, 0x06, - 0x1e, 0x4a, 0x7c, 0x1b, 0xae, 0x25, 0x3f, 0x82, 0xe2, 0x1a, 0x93, 0xbc, 0x80, 0x67, 0x55, 0xc9, - 0xfd, 0x81, 0x47, 0xe5, 0x0e, 0xa1, 0xa0, 0xbe, 0x63, 0x5e, 0xfa, 0x6a, 0x5c, 0xbb, 0xfc, 0x8d, - 0x14, 0x63, 0xa6, 0xef, 0x16, 0xbe, 0xa1, 0xea, 0xe3, 0xcf, 0xad, 0xea, 0x7c, 0x9a, 0xe7, 0x16, - 0x4a, 0x7c, 0x58, 0xae, 0x25, 0xbf, 0x9d, 0xc6, 0xcf, 0xc7, 0x3b, 0xb7, 0xa8, 0x5c, 0x5b, 0xbc, - 0x9d, 0xb6, 0x3d, 0x74, 0x27, 0xe6, 0xed, 0x4c, 0x7d, 0x25, 0xaa, 0x2d, 0x27, 0x33, 0x08, 0x4d, - 0x77, 0x99, 0xa6, 0x9b, 0xf8, 0xba, 0xaa, 0xa9, 0xed, 0xf3, 0x3d, 0xd7, 0x9e, 0xac, 0x9d, 0x42, - 0x86, 0xc1, 0xc4, 0xa8, 0x25, 0x3f, 0x6a, 0x31, 0x00, 0x77, 0xc2, 0x0e, 0x08, 0x01, 0xcc, 0x78, - 0x91, 0x69, 0x9b, 0xc7, 0x65, 0x5f, 0x1b, 0x43, 0x8a, 0x9f, 0x6b, 0x4f, 0x56, 0xb4, 0x5f, 0x69, - 0x6b, 0xff, 0x3b, 0x0d, 0x19, 0x06, 0x1a, 0xa1, 0x3e, 0x40, 0x80, 0xa9, 0x46, 0xe7, 0x39, 0x82, - 0xd2, 0x46, 0xe7, 0x39, 0x0a, 0xc7, 0xe2, 0x3b, 0x4c, 0xf3, 0x22, 0x5e, 0xf0, 0x35, 0x33, 0x40, - 0xaa, 0xce, 0x30, 0x36, 0xea, 0xd6, 0xb7, 0x02, 0x37, 0xe3, 0xd1, 0x86, 0xe2, 0x24, 0x86, 0xc0, - 0xd5, 0xe8, 0x36, 0x89, 0x01, 0x56, 0xf1, 0x3d, 0xa6, 0xf4, 0x36, 0xae, 0xaa, 0xce, 0xe5, 0x7a, - 0x1d, 0xc6, 0x49, 0x15, 0x7f, 0xad, 0x41, 0x39, 0x8c, 0x8f, 0xa2, 0x7b, 0x31, 0xa2, 0xa3, 0x30, - 0x6b, 0xed, 0xfe, 0x78, 0xa6, 0x44, 0x13, 0xb8, 0xfe, 0x33, 0x42, 0xfa, 0x06, 0xe5, 0x14, 0xbe, - 0x47, 0x7f, 0xa2, 0xc1, 0x6c, 0x04, 0xf5, 0x44, 0x71, 0x2a, 0x46, 0x30, 0xd5, 0xda, 0x83, 0x4b, - 0xb8, 0x84, 0x25, 0x8f, 0x98, 0x25, 0x77, 0xf1, 0xad, 0x51, 0x67, 0x78, 0x66, 0x8f, 0x78, 0xb6, - 0xb0, 0xc6, 0x5f, 0x09, 0x0e, 0x51, 0xc6, 0xae, 0x44, 0x08, 0xf2, 0x8c, 0x5d, 0x89, 0x30, 0xbe, - 0x39, 0x6e, 0x25, 0x38, 0x30, 0x49, 0x37, 0xfa, 0xff, 0xa7, 0x21, 0xb7, 0xc1, 0x7f, 0x8c, 0x88, - 0x3c, 0xc8, 0xfb, 0x60, 0x20, 0x5a, 0x8a, 0x03, 0x66, 0x82, 0x8b, 0x43, 0xed, 0x4e, 0x62, 0xbf, - 0x50, 0xff, 0x90, 0xa9, 0x5f, 0xc6, 0x37, 0x7d, 0xf5, 0xe2, 0x47, 0x8f, 0x75, 0x0e, 0x01, 0xd4, - 0x8d, 0x4e, 0x87, 0x4e, 0xfd, 0x8f, 0x35, 0x28, 0xaa, 0x98, 0x1d, 0xba, 0x1b, 0x0b, 0x09, 0xa9, - 0xb0, 0x5f, 0x0d, 0x8f, 0x63, 0x11, 0xfa, 0x1f, 0x33, 0xfd, 0xf7, 0xf0, 0x52, 0x92, 0x7e, 0x87, - 0xf1, 0x87, 0x4d, 0xe0, 0xa8, 0x5b, 0xbc, 0x09, 0x21, 0x50, 0x2f, 0xde, 0x84, 0x30, 0x68, 0x77, - 0xb9, 0x09, 0x03, 0xc6, 0x4f, 0x4d, 0x38, 0x07, 0x08, 0x40, 0x36, 0x14, 0xeb, 0x5c, 0xe5, 0x2a, - 0x15, 0x0d, 0xfe, 0x51, 0x7c, 0x2e, 0x66, 0xeb, 0x45, 0x74, 0x77, 0x4d, 0x97, 0x26, 0x81, 0xb5, - 0x7f, 0xcc, 0x42, 0xe1, 0x95, 0x61, 0x5a, 0x1e, 0xb1, 0x0c, 0xab, 0x4d, 0xd0, 0x09, 0x64, 0x58, - 0xad, 0x8c, 0x66, 0x3c, 0x15, 0x7c, 0x8a, 0x66, 0xbc, 0x10, 0x32, 0x83, 0x1f, 0x30, 0xd5, 0x77, - 0x70, 0xcd, 0x57, 0xdd, 0x0b, 0xe4, 0xd7, 0x19, 0xaa, 0x42, 0xa7, 0x7c, 0x06, 0x59, 0x01, 0xd8, - 0x47, 0xa4, 0x85, 0xd0, 0x96, 0xda, 0xad, 0xf8, 0xce, 0xc4, 0x5d, 0xa6, 0xea, 0x72, 0x19, 0x33, - 0x55, 0xf6, 0x07, 0x00, 0x01, 0x66, 0x18, 0xf5, 0xef, 0x08, 0xc4, 0x58, 0x5b, 0x4e, 0x66, 0x10, - 0x8a, 0x9f, 0x30, 0xc5, 0xf7, 0xf1, 0x9d, 0x58, 0xc5, 0x1d, 0x7f, 0x00, 0x55, 0xde, 0x86, 0xe9, - 0x2d, 0xc3, 0x3d, 0x45, 0x91, 0xea, 0xa7, 0xfc, 0x90, 0xa0, 0x56, 0x8b, 0xeb, 0x12, 0xaa, 0xee, - 0x33, 0x55, 0x4b, 0x78, 0x31, 0x56, 0xd5, 0xa9, 0xe1, 0xd2, 0x62, 0x82, 0x4c, 0xc8, 0xf2, 0x1f, - 0x17, 0x44, 0xdd, 0x19, 0xfa, 0x81, 0x42, 0xd4, 0x9d, 0xe1, 0xdf, 0x23, 0x5c, 0x51, 0xd5, 0x00, - 0x66, 0xe4, 0x93, 0x3e, 0xba, 0x1d, 0x59, 0x9e, 0xf0, 0xf3, 0x7f, 0x6d, 0x29, 0xa9, 0x5b, 0x28, - 0x5c, 0x61, 0x0a, 0x31, 0xbe, 0x1d, 0xbf, 0x7e, 0x82, 0xfd, 0xb9, 0xf6, 0xe4, 0x57, 0x1a, 0xad, - 0x1a, 0x10, 0x60, 0xaf, 0x23, 0x41, 0x12, 0x85, 0x71, 0x47, 0x82, 0x64, 0x04, 0xb6, 0xc5, 0xef, - 0x32, 0xed, 0xcf, 0xf0, 0x4a, 0xac, 0x76, 0xcf, 0x31, 0x2c, 0xf7, 0x0d, 0x71, 0x9e, 0x71, 0x90, - 0xcd, 0x3d, 0x35, 0xfb, 0x34, 0x60, 0xfe, 0xac, 0x02, 0xd3, 0xf4, 0x9c, 0x4a, 0x0b, 0x76, 0x70, - 0xbd, 0x8f, 0x9a, 0x33, 0x02, 0xaa, 0x45, 0xcd, 0x19, 0x45, 0x06, 0x62, 0x0a, 0x36, 0xfb, 0x11, - 0x3a, 0x61, 0x5c, 0xd4, 0xf1, 0x1e, 0x14, 0x14, 0x10, 0x00, 0xc5, 0x48, 0x0c, 0x43, 0x76, 0xd1, - 0x32, 0x11, 0x83, 0x20, 0xe0, 0x65, 0xa6, 0xb4, 0x86, 0xaf, 0x85, 0x95, 0x76, 0x38, 0x1b, 0xd5, - 0xfa, 0x25, 0x14, 0x55, 0xb4, 0x00, 0xc5, 0x08, 0x8d, 0x60, 0x82, 0xd1, 0xec, 0x18, 0x07, 0x36, - 0xc4, 0xa4, 0x09, 0xff, 0x27, 0xf7, 0x92, 0x97, 0x6a, 0xff, 0x1c, 0x72, 0x02, 0x43, 0x88, 0x9b, - 0x6f, 0x18, 0x45, 0x8c, 0x9b, 0x6f, 0x04, 0x80, 0x88, 0x39, 0xfd, 0x31, 0xb5, 0xf4, 0xae, 0x24, - 0x4b, 0x92, 0x50, 0xf9, 0x92, 0x78, 0x49, 0x2a, 0x03, 0x5c, 0x2c, 0x49, 0xa5, 0x72, 0x4f, 0x1d, - 0xab, 0xf2, 0x84, 0x78, 0x22, 0xa4, 0xe4, 0x25, 0x10, 0x25, 0x48, 0x54, 0xf3, 0x3f, 0x1e, 0xc7, - 0x92, 0x78, 0x60, 0x0f, 0xb4, 0x8a, 0xe4, 0x8f, 0xfe, 0x10, 0x20, 0x00, 0x3c, 0xa2, 0x67, 0xb0, - 0x58, 0xd4, 0x34, 0x7a, 0x06, 0x8b, 0xc7, 0x4c, 0x62, 0x12, 0x49, 0xa0, 0x9c, 0x5f, 0x1a, 0xa8, - 0xfa, 0xbf, 0xd4, 0x00, 0x8d, 0x02, 0x24, 0xe8, 0x69, 0xbc, 0x8a, 0x58, 0x40, 0xb6, 0xf6, 0xce, - 0xd5, 0x98, 0x13, 0xeb, 0x45, 0x60, 0x57, 0x9b, 0x0d, 0xe9, 0xbf, 0xa5, 0x96, 0x7d, 0xa3, 0x41, - 0x29, 0x04, 0xb1, 0xa0, 0x87, 0x09, 0xeb, 0x1c, 0x01, 0x75, 0x6b, 0x8f, 0x2e, 0xe5, 0x4b, 0x3c, - 0x9f, 0x29, 0xbb, 0x42, 0x1e, 0xd1, 0xff, 0x54, 0x83, 0x72, 0x18, 0x97, 0x41, 0x09, 0x0a, 0x46, - 0x90, 0xe1, 0xda, 0xca, 0xe5, 0x8c, 0x57, 0x58, 0xad, 0xe0, 0xd4, 0xfe, 0x39, 0xe4, 0x04, 0x9c, - 0x13, 0x17, 0x16, 0x61, 0x60, 0x39, 0x2e, 0x2c, 0x22, 0x58, 0x50, 0x52, 0x58, 0x38, 0x76, 0x97, - 0x28, 0x91, 0x28, 0x40, 0x9f, 0x24, 0x95, 0xe3, 0x23, 0x31, 0x82, 0x18, 0x8d, 0x55, 0x19, 0x44, - 0xa2, 0x84, 0x7c, 0x50, 0x82, 0xc4, 0x4b, 0x22, 0x31, 0x8a, 0x18, 0x25, 0x45, 0x22, 0xd3, 0xaa, - 0x44, 0x62, 0x80, 0xd0, 0xc4, 0x45, 0xe2, 0x08, 0x6c, 0x1e, 0x17, 0x89, 0xa3, 0x20, 0x4f, 0xd2, - 0xda, 0x32, 0xe5, 0xa1, 0x48, 0x9c, 0x8f, 0x41, 0x74, 0xd0, 0x3b, 0x09, 0x3e, 0x8d, 0x85, 0xe4, - 0x6b, 0xcf, 0xae, 0xc8, 0x3d, 0x3e, 0x02, 0xf8, 0x6a, 0xc8, 0x08, 0xf8, 0x5b, 0x0d, 0x16, 0xe2, - 0x20, 0x21, 0x94, 0xa0, 0x2c, 0x01, 0xcf, 0xaf, 0xad, 0x5e, 0x95, 0xfd, 0x0a, 0x7e, 0xf3, 0x63, - 0xe2, 0x45, 0xe5, 0x5f, 0xbe, 0x5f, 0xd2, 0xfe, 0xfd, 0xfb, 0x25, 0xed, 0x3f, 0xbf, 0x5f, 0xd2, - 0xfe, 0xea, 0xbf, 0x96, 0xa6, 0x8e, 0xb3, 0xec, 0x7f, 0x82, 0xbd, 0xfb, 0x9b, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xdd, 0x84, 0xb6, 0xd7, 0x90, 0x36, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto deleted file mode 100644 index 2c18ce101c6..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto +++ /dev/null @@ -1,1053 +0,0 @@ -syntax = "proto3"; -package etcdserverpb; - -import "gogoproto/gogo.proto"; -import "etcd/mvcc/mvccpb/kv.proto"; -import "etcd/auth/authpb/auth.proto"; - -// for grpc-gateway -import "google/api/annotations.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -service KV { - // Range gets the keys in the range from the key-value store. - rpc Range(RangeRequest) returns (RangeResponse) { - option (google.api.http) = { - post: "/v3alpha/kv/range" - body: "*" - }; - } - - // Put puts the given key into the key-value store. - // A put request increments the revision of the key-value store - // and generates one event in the event history. - rpc Put(PutRequest) returns (PutResponse) { - option (google.api.http) = { - post: "/v3alpha/kv/put" - body: "*" - }; - } - - // DeleteRange deletes the given range from the key-value store. - // A delete request increments the revision of the key-value store - // and generates a delete event in the event history for every deleted key. - rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) { - option (google.api.http) = { - post: "/v3alpha/kv/deleterange" - body: "*" - }; - } - - // Txn processes multiple requests in a single transaction. - // A txn request increments the revision of the key-value store - // and generates events with the same revision for every completed request. - // It is not allowed to modify the same key several times within one txn. - rpc Txn(TxnRequest) returns (TxnResponse) { - option (google.api.http) = { - post: "/v3alpha/kv/txn" - body: "*" - }; - } - - // Compact compacts the event history in the etcd key-value store. The key-value - // store should be periodically compacted or the event history will continue to grow - // indefinitely. - rpc Compact(CompactionRequest) returns (CompactionResponse) { - option (google.api.http) = { - post: "/v3alpha/kv/compaction" - body: "*" - }; - } -} - -service Watch { - // Watch watches for events happening or that have happened. Both input and output - // are streams; the input stream is for creating and canceling watchers and the output - // stream sends events. One watch RPC can watch on multiple key ranges, streaming events - // for several watches at once. The entire event history can be watched starting from the - // last compaction revision. - rpc Watch(stream WatchRequest) returns (stream WatchResponse) { - option (google.api.http) = { - post: "/v3alpha/watch" - body: "*" - }; - } -} - -service Lease { - // LeaseGrant creates a lease which expires if the server does not receive a keepAlive - // within a given time to live period. All keys attached to the lease will be expired and - // deleted if the lease expires. Each expired key generates a delete event in the event history. - rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) { - option (google.api.http) = { - post: "/v3alpha/lease/grant" - body: "*" - }; - } - - // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. - rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) { - option (google.api.http) = { - post: "/v3alpha/kv/lease/revoke" - body: "*" - }; - } - - // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client - // to the server and streaming keep alive responses from the server to the client. - rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) { - option (google.api.http) = { - post: "/v3alpha/lease/keepalive" - body: "*" - }; - } - - // LeaseTimeToLive retrieves lease information. - rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) { - option (google.api.http) = { - post: "/v3alpha/kv/lease/timetolive" - body: "*" - }; - } - - // LeaseLeases lists all existing leases. - rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) { - option (google.api.http) = { - post: "/v3alpha/kv/lease/leases" - body: "*" - }; - } -} - -service Cluster { - // MemberAdd adds a member into the cluster. - rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) { - option (google.api.http) = { - post: "/v3alpha/cluster/member/add" - body: "*" - }; - } - - // MemberRemove removes an existing member from the cluster. - rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) { - option (google.api.http) = { - post: "/v3alpha/cluster/member/remove" - body: "*" - }; - } - - // MemberUpdate updates the member configuration. - rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) { - option (google.api.http) = { - post: "/v3alpha/cluster/member/update" - body: "*" - }; - } - - // MemberList lists all the members in the cluster. - rpc MemberList(MemberListRequest) returns (MemberListResponse) { - option (google.api.http) = { - post: "/v3alpha/cluster/member/list" - body: "*" - }; - } -} - -service Maintenance { - // Alarm activates, deactivates, and queries alarms regarding cluster health. - rpc Alarm(AlarmRequest) returns (AlarmResponse) { - option (google.api.http) = { - post: "/v3alpha/maintenance/alarm" - body: "*" - }; - } - - // Status gets the status of the member. - rpc Status(StatusRequest) returns (StatusResponse) { - option (google.api.http) = { - post: "/v3alpha/maintenance/status" - body: "*" - }; - } - - // Defragment defragments a member's backend database to recover storage space. - rpc Defragment(DefragmentRequest) returns (DefragmentResponse) { - option (google.api.http) = { - post: "/v3alpha/maintenance/defragment" - body: "*" - }; - } - - // Hash computes the hash of the KV's backend. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. - rpc Hash(HashRequest) returns (HashResponse) { - option (google.api.http) = { - post: "/v3alpha/maintenance/hash" - body: "*" - }; - } - - // HashKV computes the hash of all MVCC keys up to a given revision. - rpc HashKV(HashKVRequest) returns (HashKVResponse) { - option (google.api.http) = { - post: "/v3alpha/maintenance/hash" - body: "*" - }; - } - - // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. - rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) { - option (google.api.http) = { - post: "/v3alpha/maintenance/snapshot" - body: "*" - }; - } - - // MoveLeader requests current leader node to transfer its leadership to transferee. - rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) { - option (google.api.http) = { - post: "/v3alpha/maintenance/transfer-leadership" - body: "*" - }; - } -} - -service Auth { - // AuthEnable enables authentication. - rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/enable" - body: "*" - }; - } - - // AuthDisable disables authentication. - rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/disable" - body: "*" - }; - } - - // Authenticate processes an authenticate request. - rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/authenticate" - body: "*" - }; - } - - // UserAdd adds a new user. - rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/user/add" - body: "*" - }; - } - - // UserGet gets detailed user information. - rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/user/get" - body: "*" - }; - } - - // UserList gets a list of all users. - rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/user/list" - body: "*" - }; - } - - // UserDelete deletes a specified user. - rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/user/delete" - body: "*" - }; - } - - // UserChangePassword changes the password of a specified user. - rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/user/changepw" - body: "*" - }; - } - - // UserGrant grants a role to a specified user. - rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/user/grant" - body: "*" - }; - } - - // UserRevokeRole revokes a role of specified user. - rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/user/revoke" - body: "*" - }; - } - - // RoleAdd adds a new role. - rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/role/add" - body: "*" - }; - } - - // RoleGet gets detailed role information. - rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/role/get" - body: "*" - }; - } - - // RoleList gets lists of all roles. - rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/role/list" - body: "*" - }; - } - - // RoleDelete deletes a specified role. - rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/role/delete" - body: "*" - }; - } - - // RoleGrantPermission grants a permission of a specified key or range to a specified role. - rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/role/grant" - body: "*" - }; - } - - // RoleRevokePermission revokes a key or range permission of a specified role. - rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/role/revoke" - body: "*" - }; - } -} - -message ResponseHeader { - // cluster_id is the ID of the cluster which sent the response. - uint64 cluster_id = 1; - // member_id is the ID of the member which sent the response. - uint64 member_id = 2; - // revision is the key-value store revision when the request was applied. - int64 revision = 3; - // raft_term is the raft term when the request was applied. - uint64 raft_term = 4; -} - -message RangeRequest { - enum SortOrder { - NONE = 0; // default, no sorting - ASCEND = 1; // lowest target value first - DESCEND = 2; // highest target value first - } - enum SortTarget { - KEY = 0; - VERSION = 1; - CREATE = 2; - MOD = 3; - VALUE = 4; - } - - // key is the first key for the range. If range_end is not given, the request only looks up key. - bytes key = 1; - // range_end is the upper bound on the requested range [key, range_end). - // If range_end is '\0', the range is all keys >= key. - // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), - // then the range request gets all keys prefixed with key. - // If both key and range_end are '\0', then the range request returns all keys. - bytes range_end = 2; - // limit is a limit on the number of keys returned for the request. When limit is set to 0, - // it is treated as no limit. - int64 limit = 3; - // revision is the point-in-time of the key-value store to use for the range. - // If revision is less or equal to zero, the range is over the newest key-value store. - // If the revision has been compacted, ErrCompacted is returned as a response. - int64 revision = 4; - - // sort_order is the order for returned sorted results. - SortOrder sort_order = 5; - - // sort_target is the key-value field to use for sorting. - SortTarget sort_target = 6; - - // serializable sets the range request to use serializable member-local reads. - // Range requests are linearizable by default; linearizable requests have higher - // latency and lower throughput than serializable requests but reflect the current - // consensus of the cluster. For better performance, in exchange for possible stale reads, - // a serializable range request is served locally without needing to reach consensus - // with other nodes in the cluster. - bool serializable = 7; - - // keys_only when set returns only the keys and not the values. - bool keys_only = 8; - - // count_only when set returns only the count of the keys in the range. - bool count_only = 9; - - // min_mod_revision is the lower bound for returned key mod revisions; all keys with - // lesser mod revisions will be filtered away. - int64 min_mod_revision = 10; - - // max_mod_revision is the upper bound for returned key mod revisions; all keys with - // greater mod revisions will be filtered away. - int64 max_mod_revision = 11; - - // min_create_revision is the lower bound for returned key create revisions; all keys with - // lesser create trevisions will be filtered away. - int64 min_create_revision = 12; - - // max_create_revision is the upper bound for returned key create revisions; all keys with - // greater create revisions will be filtered away. - int64 max_create_revision = 13; -} - -message RangeResponse { - ResponseHeader header = 1; - // kvs is the list of key-value pairs matched by the range request. - // kvs is empty when count is requested. - repeated mvccpb.KeyValue kvs = 2; - // more indicates if there are more keys to return in the requested range. - bool more = 3; - // count is set to the number of keys within the range when requested. - int64 count = 4; -} - -message PutRequest { - // key is the key, in bytes, to put into the key-value store. - bytes key = 1; - // value is the value, in bytes, to associate with the key in the key-value store. - bytes value = 2; - // lease is the lease ID to associate with the key in the key-value store. A lease - // value of 0 indicates no lease. - int64 lease = 3; - - // If prev_kv is set, etcd gets the previous key-value pair before changing it. - // The previous key-value pair will be returned in the put response. - bool prev_kv = 4; - - // If ignore_value is set, etcd updates the key using its current value. - // Returns an error if the key does not exist. - bool ignore_value = 5; - - // If ignore_lease is set, etcd updates the key using its current lease. - // Returns an error if the key does not exist. - bool ignore_lease = 6; -} - -message PutResponse { - ResponseHeader header = 1; - // if prev_kv is set in the request, the previous key-value pair will be returned. - mvccpb.KeyValue prev_kv = 2; -} - -message DeleteRangeRequest { - // key is the first key to delete in the range. - bytes key = 1; - // range_end is the key following the last key to delete for the range [key, range_end). - // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all the keys - // with the prefix (the given key). - // If range_end is '\0', the range is all keys greater than or equal to the key argument. - bytes range_end = 2; - - // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delete response. - bool prev_kv = 3; -} - -message DeleteRangeResponse { - ResponseHeader header = 1; - // deleted is the number of keys deleted by the delete range request. - int64 deleted = 2; - // if prev_kv is set in the request, the previous key-value pairs will be returned. - repeated mvccpb.KeyValue prev_kvs = 3; -} - -message RequestOp { - // request is a union of request types accepted by a transaction. - oneof request { - RangeRequest request_range = 1; - PutRequest request_put = 2; - DeleteRangeRequest request_delete_range = 3; - TxnRequest request_txn = 4; - } -} - -message ResponseOp { - // response is a union of response types returned by a transaction. - oneof response { - RangeResponse response_range = 1; - PutResponse response_put = 2; - DeleteRangeResponse response_delete_range = 3; - TxnResponse response_txn = 4; - } -} - -message Compare { - enum CompareResult { - EQUAL = 0; - GREATER = 1; - LESS = 2; - NOT_EQUAL = 3; - } - enum CompareTarget { - VERSION = 0; - CREATE = 1; - MOD = 2; - VALUE= 3; - LEASE = 4; - } - // result is logical comparison operation for this comparison. - CompareResult result = 1; - // target is the key-value field to inspect for the comparison. - CompareTarget target = 2; - // key is the subject key for the comparison operation. - bytes key = 3; - oneof target_union { - // version is the version of the given key - int64 version = 4; - // create_revision is the creation revision of the given key - int64 create_revision = 5; - // mod_revision is the last modified revision of the given key. - int64 mod_revision = 6; - // value is the value of the given key, in bytes. - bytes value = 7; - // lease is the lease id of the given key. - int64 lease = 8; - // leave room for more target_union field tags, jump to 64 - } - - // range_end compares the given target to all keys in the range [key, range_end). - // See RangeRequest for more details on key ranges. - bytes range_end = 64; - // TODO: fill out with most of the rest of RangeRequest fields when needed. -} - -// From google paxosdb paper: -// Our implementation hinges around a powerful primitive which we call MultiOp. All other database -// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically -// and consists of three components: -// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check -// for the absence or presence of a value, or compare with a given value. Two different tests in the guard -// may apply to the same or different entries in the database. All tests in the guard are applied and -// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise -// it executes f op (see item 3 below). -// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or -// lookup operation, and applies to a single database entry. Two different operations in the list may apply -// to the same or different entries in the database. These operations are executed -// if guard evaluates to -// true. -// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. -message TxnRequest { - // compare is a list of predicates representing a conjunction of terms. - // If the comparisons succeed, then the success requests will be processed in order, - // and the response will contain their respective responses in order. - // If the comparisons fail, then the failure requests will be processed in order, - // and the response will contain their respective responses in order. - repeated Compare compare = 1; - // success is a list of requests which will be applied when compare evaluates to true. - repeated RequestOp success = 2; - // failure is a list of requests which will be applied when compare evaluates to false. - repeated RequestOp failure = 3; -} - -message TxnResponse { - ResponseHeader header = 1; - // succeeded is set to true if the compare evaluated to true or false otherwise. - bool succeeded = 2; - // responses is a list of responses corresponding to the results from applying - // success if succeeded is true or failure if succeeded is false. - repeated ResponseOp responses = 3; -} - -// CompactionRequest compacts the key-value store up to a given revision. All superseded keys -// with a revision less than the compaction revision will be removed. -message CompactionRequest { - // revision is the key-value store revision for the compaction operation. - int64 revision = 1; - // physical is set so the RPC will wait until the compaction is physically - // applied to the local database such that compacted entries are totally - // removed from the backend database. - bool physical = 2; -} - -message CompactionResponse { - ResponseHeader header = 1; -} - -message HashRequest { -} - -message HashKVRequest { - // revision is the key-value store revision for the hash operation. - int64 revision = 1; -} - -message HashKVResponse { - ResponseHeader header = 1; - // hash is the hash value computed from the responding member's MVCC keys up to a given revision. - uint32 hash = 2; - // compact_revision is the compacted revision of key-value store when hash begins. - int64 compact_revision = 3; -} - -message HashResponse { - ResponseHeader header = 1; - // hash is the hash value computed from the responding member's KV's backend. - uint32 hash = 2; -} - -message SnapshotRequest { -} - -message SnapshotResponse { - // header has the current key-value store information. The first header in the snapshot - // stream indicates the point in time of the snapshot. - ResponseHeader header = 1; - - // remaining_bytes is the number of blob bytes to be sent after this message - uint64 remaining_bytes = 2; - - // blob contains the next chunk of the snapshot in the snapshot stream. - bytes blob = 3; -} - -message WatchRequest { - // request_union is a request to either create a new watcher or cancel an existing watcher. - oneof request_union { - WatchCreateRequest create_request = 1; - WatchCancelRequest cancel_request = 2; - } -} - -message WatchCreateRequest { - // key is the key to register for watching. - bytes key = 1; - // range_end is the end of the range [key, range_end) to watch. If range_end is not given, - // only the key argument is watched. If range_end is equal to '\0', all keys greater than - // or equal to the key argument are watched. - // If the range_end is one bit larger than the given key, - // then all keys with the prefix (the given key) will be watched. - bytes range_end = 2; - // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". - int64 start_revision = 3; - // progress_notify is set so that the etcd server will periodically send a WatchResponse with - // no events to the new watcher if there are no recent events. It is useful when clients - // wish to recover a disconnected watcher starting from a recent known revision. - // The etcd server may decide how often it will send notifications based on current load. - bool progress_notify = 4; - - enum FilterType { - // filter out put event. - NOPUT = 0; - // filter out delete event. - NODELETE = 1; - } - // filters filter the events at server side before it sends back to the watcher. - repeated FilterType filters = 5; - - // If prev_kv is set, created watcher gets the previous KV before the event happens. - // If the previous KV is already compacted, nothing will be returned. - bool prev_kv = 6; -} - -message WatchCancelRequest { - // watch_id is the watcher id to cancel so that no more events are transmitted. - int64 watch_id = 1; -} - -message WatchResponse { - ResponseHeader header = 1; - // watch_id is the ID of the watcher that corresponds to the response. - int64 watch_id = 2; - // created is set to true if the response is for a create watch request. - // The client should record the watch_id and expect to receive events for - // the created watcher from the same stream. - // All events sent to the created watcher will attach with the same watch_id. - bool created = 3; - // canceled is set to true if the response is for a cancel watch request. - // No further events will be sent to the canceled watcher. - bool canceled = 4; - // compact_revision is set to the minimum index if a watcher tries to watch - // at a compacted index. - // - // This happens when creating a watcher at a compacted revision or the watcher cannot - // catch up with the progress of the key-value store. - // - // The client should treat the watcher as canceled and should not try to create any - // watcher with the same start_revision again. - int64 compact_revision = 5; - - // cancel_reason indicates the reason for canceling the watcher. - string cancel_reason = 6; - - repeated mvccpb.Event events = 11; -} - -message LeaseGrantRequest { - // TTL is the advisory time-to-live in seconds. - int64 TTL = 1; - // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. - int64 ID = 2; -} - -message LeaseGrantResponse { - ResponseHeader header = 1; - // ID is the lease ID for the granted lease. - int64 ID = 2; - // TTL is the server chosen lease time-to-live in seconds. - int64 TTL = 3; - string error = 4; -} - -message LeaseRevokeRequest { - // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. - int64 ID = 1; -} - -message LeaseRevokeResponse { - ResponseHeader header = 1; -} - -message LeaseKeepAliveRequest { - // ID is the lease ID for the lease to keep alive. - int64 ID = 1; -} - -message LeaseKeepAliveResponse { - ResponseHeader header = 1; - // ID is the lease ID from the keep alive request. - int64 ID = 2; - // TTL is the new time-to-live for the lease. - int64 TTL = 3; -} - -message LeaseTimeToLiveRequest { - // ID is the lease ID for the lease. - int64 ID = 1; - // keys is true to query all the keys attached to this lease. - bool keys = 2; -} - -message LeaseTimeToLiveResponse { - ResponseHeader header = 1; - // ID is the lease ID from the keep alive request. - int64 ID = 2; - // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. - int64 TTL = 3; - // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. - int64 grantedTTL = 4; - // Keys is the list of keys attached to this lease. - repeated bytes keys = 5; -} - -message LeaseLeasesRequest { -} - -message LeaseStatus { - int64 ID = 1; - // TODO: int64 TTL = 2; -} - -message LeaseLeasesResponse { - ResponseHeader header = 1; - repeated LeaseStatus leases = 2; -} - -message Member { - // ID is the member ID for this member. - uint64 ID = 1; - // name is the human-readable name of the member. If the member is not started, the name will be an empty string. - string name = 2; - // peerURLs is the list of URLs the member exposes to the cluster for communication. - repeated string peerURLs = 3; - // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. - repeated string clientURLs = 4; -} - -message MemberAddRequest { - // peerURLs is the list of URLs the added member will use to communicate with the cluster. - repeated string peerURLs = 1; -} - -message MemberAddResponse { - ResponseHeader header = 1; - // member is the member information for the added member. - Member member = 2; - // members is a list of all members after adding the new member. - repeated Member members = 3; -} - -message MemberRemoveRequest { - // ID is the member ID of the member to remove. - uint64 ID = 1; -} - -message MemberRemoveResponse { - ResponseHeader header = 1; - // members is a list of all members after removing the member. - repeated Member members = 2; -} - -message MemberUpdateRequest { - // ID is the member ID of the member to update. - uint64 ID = 1; - // peerURLs is the new list of URLs the member will use to communicate with the cluster. - repeated string peerURLs = 2; -} - -message MemberUpdateResponse{ - ResponseHeader header = 1; - // members is a list of all members after updating the member. - repeated Member members = 2; -} - -message MemberListRequest { -} - -message MemberListResponse { - ResponseHeader header = 1; - // members is a list of all members associated with the cluster. - repeated Member members = 2; -} - -message DefragmentRequest { -} - -message DefragmentResponse { - ResponseHeader header = 1; -} - -message MoveLeaderRequest { - // targetID is the node ID for the new leader. - uint64 targetID = 1; -} - -message MoveLeaderResponse { - ResponseHeader header = 1; -} - -enum AlarmType { - NONE = 0; // default, used to query if any alarm is active - NOSPACE = 1; // space quota is exhausted - CORRUPT = 2; // kv store corruption detected -} - -message AlarmRequest { - enum AlarmAction { - GET = 0; - ACTIVATE = 1; - DEACTIVATE = 2; - } - // action is the kind of alarm request to issue. The action - // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a - // raised alarm. - AlarmAction action = 1; - // memberID is the ID of the member associated with the alarm. If memberID is 0, the - // alarm request covers all members. - uint64 memberID = 2; - // alarm is the type of alarm to consider for this request. - AlarmType alarm = 3; -} - -message AlarmMember { - // memberID is the ID of the member associated with the raised alarm. - uint64 memberID = 1; - // alarm is the type of alarm which has been raised. - AlarmType alarm = 2; -} - -message AlarmResponse { - ResponseHeader header = 1; - // alarms is a list of alarms associated with the alarm request. - repeated AlarmMember alarms = 2; -} - -message StatusRequest { -} - -message StatusResponse { - ResponseHeader header = 1; - // version is the cluster protocol version used by the responding member. - string version = 2; - // dbSize is the size of the backend database, in bytes, of the responding member. - int64 dbSize = 3; - // leader is the member ID which the responding member believes is the current leader. - uint64 leader = 4; - // raftIndex is the current raft index of the responding member. - uint64 raftIndex = 5; - // raftTerm is the current raft term of the responding member. - uint64 raftTerm = 6; -} - -message AuthEnableRequest { -} - -message AuthDisableRequest { -} - -message AuthenticateRequest { - string name = 1; - string password = 2; -} - -message AuthUserAddRequest { - string name = 1; - string password = 2; -} - -message AuthUserGetRequest { - string name = 1; -} - -message AuthUserDeleteRequest { - // name is the name of the user to delete. - string name = 1; -} - -message AuthUserChangePasswordRequest { - // name is the name of the user whose password is being changed. - string name = 1; - // password is the new password for the user. - string password = 2; -} - -message AuthUserGrantRoleRequest { - // user is the name of the user which should be granted a given role. - string user = 1; - // role is the name of the role to grant to the user. - string role = 2; -} - -message AuthUserRevokeRoleRequest { - string name = 1; - string role = 2; -} - -message AuthRoleAddRequest { - // name is the name of the role to add to the authentication system. - string name = 1; -} - -message AuthRoleGetRequest { - string role = 1; -} - -message AuthUserListRequest { -} - -message AuthRoleListRequest { -} - -message AuthRoleDeleteRequest { - string role = 1; -} - -message AuthRoleGrantPermissionRequest { - // name is the name of the role which will be granted the permission. - string name = 1; - // perm is the permission to grant to the role. - authpb.Permission perm = 2; -} - -message AuthRoleRevokePermissionRequest { - string role = 1; - string key = 2; - string range_end = 3; -} - -message AuthEnableResponse { - ResponseHeader header = 1; -} - -message AuthDisableResponse { - ResponseHeader header = 1; -} - -message AuthenticateResponse { - ResponseHeader header = 1; - // token is an authorized token that can be used in succeeding RPCs - string token = 2; -} - -message AuthUserAddResponse { - ResponseHeader header = 1; -} - -message AuthUserGetResponse { - ResponseHeader header = 1; - - repeated string roles = 2; -} - -message AuthUserDeleteResponse { - ResponseHeader header = 1; -} - -message AuthUserChangePasswordResponse { - ResponseHeader header = 1; -} - -message AuthUserGrantRoleResponse { - ResponseHeader header = 1; -} - -message AuthUserRevokeRoleResponse { - ResponseHeader header = 1; -} - -message AuthRoleAddResponse { - ResponseHeader header = 1; -} - -message AuthRoleGetResponse { - ResponseHeader header = 1; - - repeated authpb.Permission perm = 2; -} - -message AuthRoleListResponse { - ResponseHeader header = 1; - - repeated string roles = 2; -} - -message AuthUserListResponse { - ResponseHeader header = 1; - - repeated string users = 2; -} - -message AuthRoleDeleteResponse { - ResponseHeader header = 1; -} - -message AuthRoleGrantPermissionResponse { - ResponseHeader header = 1; -} - -message AuthRoleRevokePermissionResponse { - ResponseHeader header = 1; -} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go deleted file mode 100644 index 7033f132662..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go +++ /dev/null @@ -1,735 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: kv.proto -// DO NOT EDIT! - -/* - Package mvccpb is a generated protocol buffer package. - - It is generated from these files: - kv.proto - - It has these top-level messages: - KeyValue - Event -*/ -package mvccpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Event_EventType int32 - -const ( - PUT Event_EventType = 0 - DELETE Event_EventType = 1 -) - -var Event_EventType_name = map[int32]string{ - 0: "PUT", - 1: "DELETE", -} -var Event_EventType_value = map[string]int32{ - "PUT": 0, - "DELETE": 1, -} - -func (x Event_EventType) String() string { - return proto.EnumName(Event_EventType_name, int32(x)) -} -func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} } - -type KeyValue struct { - // key is the key in bytes. An empty key is not allowed. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // create_revision is the revision of last creation on this key. - CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"` - // mod_revision is the revision of last modification on this key. - ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"` - // version is the version of the key. A deletion resets - // the version to zero and any modification of the key - // increases its version. - Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` - // value is the value held by the key, in bytes. - Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` - // lease is the ID of the lease that attached to key. - // When the attached lease expires, the key will be deleted. - // If lease is 0, then no lease is attached to the key. - Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"` -} - -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} } - -type Event struct { - // type is the kind of event. If type is a PUT, it indicates - // new data has been stored to the key. If type is a DELETE, - // it indicates the key was deleted. - Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"` - // kv holds the KeyValue for the event. - // A PUT event contains current kv pair. - // A PUT event with kv.Version=1 indicates the creation of a key. - // A DELETE/EXPIRE event contains the deleted key with - // its modification revision set to the revision of deletion. - Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` - // prev_kv holds the key-value pair before the event happens. - PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` -} - -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} } - -func init() { - proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue") - proto.RegisterType((*Event)(nil), "mvccpb.Event") - proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value) -} -func (m *KeyValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if m.CreateRevision != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision)) - } - if m.ModRevision != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.ModRevision)) - } - if m.Version != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.Version)) - } - if len(m.Value) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - } - if m.Lease != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.Lease)) - } - return i, nil -} - -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Type != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.Type)) - } - if m.Kv != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size())) - n1, err := m.Kv.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.PrevKv != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size())) - n2, err := m.PrevKv.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - return i, nil -} - -func encodeFixed64Kv(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Kv(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintKv(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *KeyValue) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovKv(uint64(l)) - } - if m.CreateRevision != 0 { - n += 1 + sovKv(uint64(m.CreateRevision)) - } - if m.ModRevision != 0 { - n += 1 + sovKv(uint64(m.ModRevision)) - } - if m.Version != 0 { - n += 1 + sovKv(uint64(m.Version)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovKv(uint64(l)) - } - if m.Lease != 0 { - n += 1 + sovKv(uint64(m.Lease)) - } - return n -} - -func (m *Event) Size() (n int) { - var l int - _ = l - if m.Type != 0 { - n += 1 + sovKv(uint64(m.Type)) - } - if m.Kv != nil { - l = m.Kv.Size() - n += 1 + l + sovKv(uint64(l)) - } - if m.PrevKv != nil { - l = m.PrevKv.Size() - n += 1 + l + sovKv(uint64(l)) - } - return n -} - -func sovKv(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozKv(x uint64) (n int) { - return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *KeyValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) - } - m.CreateRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreateRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) - } - m.ModRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ModRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - m.Version = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Version |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - m.Lease = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lease |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipKv(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthKv - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Event) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= (Event_EventType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Kv == nil { - m.Kv = &KeyValue{} - } - if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PrevKv == nil { - m.PrevKv = &KeyValue{} - } - if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipKv(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthKv - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipKv(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthKv - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipKv(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) } - -var fileDescriptorKv = []byte{ - // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, - 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, - 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, - 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, - 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, - 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, - 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, - 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, - 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, - 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, - 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, - 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, - 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, - 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, - 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, - 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, - 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, - 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, - 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto deleted file mode 100644 index 23c911b7da8..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; -package mvccpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_enum_prefix_all) = false; - -message KeyValue { - // key is the key in bytes. An empty key is not allowed. - bytes key = 1; - // create_revision is the revision of last creation on this key. - int64 create_revision = 2; - // mod_revision is the revision of last modification on this key. - int64 mod_revision = 3; - // version is the version of the key. A deletion resets - // the version to zero and any modification of the key - // increases its version. - int64 version = 4; - // value is the value held by the key, in bytes. - bytes value = 5; - // lease is the ID of the lease that attached to key. - // When the attached lease expires, the key will be deleted. - // If lease is 0, then no lease is attached to the key. - int64 lease = 6; -} - -message Event { - enum EventType { - PUT = 0; - DELETE = 1; - } - // type is the kind of event. If type is a PUT, it indicates - // new data has been stored to the key. If type is a DELETE, - // it indicates the key was deleted. - EventType type = 1; - // kv holds the KeyValue for the event. - // A PUT event contains current kv pair. - // A PUT event with kv.Version=1 indicates the creation of a key. - // A DELETE/EXPIRE event contains the deleted key with - // its modification revision set to the revision of deletion. - KeyValue kv = 2; - - // prev_kv holds the key-value pair before the event happens. - KeyValue prev_kv = 3; -} diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go deleted file mode 100644 index f26254ba933..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pathutil implements utility functions for handling slash-separated -// paths. -package pathutil - -import "path" - -// CanonicalURLPath returns the canonical url path for p, which follows the rules: -// 1. the path always starts with "/" -// 2. replace multiple slashes with a single slash -// 3. replace each '.' '..' path name element with equivalent one -// 4. keep the trailing slash -// The function is borrowed from stdlib http.cleanPath in server.go. -func CanonicalURLPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root, - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} diff --git a/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/vendor/github.com/coreos/etcd/pkg/srv/srv.go deleted file mode 100644 index 600061ce8ea..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/srv/srv.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package srv looks up DNS SRV records. -package srv - -import ( - "fmt" - "net" - "net/url" - "strings" - - "github.com/coreos/etcd/pkg/types" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict - resolveTCPAddr = net.ResolveTCPAddr -) - -// GetCluster gets the cluster information via DNS discovery. -// Also sees each entry as a separate instance. -func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) { - tempName := int(0) - tcp2ap := make(map[string]url.URL) - - // First, resolve the apurls - for _, url := range apurls { - tcpAddr, err := resolveTCPAddr("tcp", url.Host) - if err != nil { - return nil, err - } - tcp2ap[tcpAddr.String()] = url - } - - stringParts := []string{} - updateNodeMap := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", dns) - if err != nil { - return err - } - for _, srv := range addrs { - port := fmt.Sprintf("%d", srv.Port) - host := net.JoinHostPort(srv.Target, port) - tcpAddr, terr := resolveTCPAddr("tcp", host) - if terr != nil { - err = terr - continue - } - n := "" - url, ok := tcp2ap[tcpAddr.String()] - if ok { - n = name - } - if n == "" { - n = fmt.Sprintf("%d", tempName) - tempName++ - } - // SRV records have a trailing dot but URL shouldn't. - shortHost := strings.TrimSuffix(srv.Target, ".") - urlHost := net.JoinHostPort(shortHost, port) - if ok && url.Scheme != scheme { - err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) - } else { - stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) - } - } - if len(stringParts) == 0 { - return err - } - return nil - } - - failCount := 0 - err := updateNodeMap(service+"-ssl", "https") - srvErr := make([]string, 2) - if err != nil { - srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err) - failCount++ - } - err = updateNodeMap(service, "http") - if err != nil { - srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err) - failCount++ - } - if failCount == 2 { - return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1]) - } - return stringParts, nil -} - -type SRVClients struct { - Endpoints []string - SRVs []*net.SRV -} - -// GetClient looks up the client endpoints for a service and domain. -func GetClient(service, domain string) (*SRVClients, error) { - var urls []*url.URL - var srvs []*net.SRV - - updateURLs := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", domain) - if err != nil { - return err - } - for _, srv := range addrs { - urls = append(urls, &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), - }) - } - srvs = append(srvs, addrs...) - return nil - } - - errHTTPS := updateURLs(service+"-ssl", "https") - errHTTP := updateURLs(service, "http") - - if errHTTPS != nil && errHTTP != nil { - return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) - } - - endpoints := make([]string, len(urls)) - for i := range urls { - endpoints[i] = urls[i].String() - } - return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go deleted file mode 100644 index 3b6aa670baf..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package tlsutil provides utility functions for handling TLS. -package tlsutil diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go deleted file mode 100644 index 79b1f632ed5..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tlsutil - -import ( - "crypto/tls" - "crypto/x509" - "encoding/pem" - "io/ioutil" -) - -// NewCertPool creates x509 certPool with provided CA files. -func NewCertPool(CAFiles []string) (*x509.CertPool, error) { - certPool := x509.NewCertPool() - - for _, CAFile := range CAFiles { - pemByte, err := ioutil.ReadFile(CAFile) - if err != nil { - return nil, err - } - - for { - var block *pem.Block - block, pemByte = pem.Decode(pemByte) - if block == nil { - break - } - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - certPool.AddCert(cert) - } - } - - return certPool, nil -} - -// NewCert generates TLS cert by using the given cert,key and parse function. -func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) { - cert, err := ioutil.ReadFile(certfile) - if err != nil { - return nil, err - } - - key, err := ioutil.ReadFile(keyfile) - if err != nil { - return nil, err - } - - if parseFunc == nil { - parseFunc = tls.X509KeyPair - } - - tlsCert, err := parseFunc(cert, key) - if err != nil { - return nil, err - } - return &tlsCert, nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/doc.go b/vendor/github.com/coreos/etcd/pkg/transport/doc.go deleted file mode 100644 index 37658ce591a..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package transport implements various HTTP transport utilities based on Go -// net package. -package transport diff --git a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go deleted file mode 100644 index 6ccae4ee4a1..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "crypto/tls" - "fmt" - "net" - "time" -) - -type keepAliveConn interface { - SetKeepAlive(bool) error - SetKeepAlivePeriod(d time.Duration) error -} - -// NewKeepAliveListener returns a listener that listens on the given address. -// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil. -// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake. -// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html -func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) { - if scheme == "https" { - if tlscfg == nil { - return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") - } - return newTLSKeepaliveListener(l, tlscfg), nil - } - - return &keepaliveListener{ - Listener: l, - }, nil -} - -type keepaliveListener struct{ net.Listener } - -func (kln *keepaliveListener) Accept() (net.Conn, error) { - c, err := kln.Listener.Accept() - if err != nil { - return nil, err - } - kac := c.(keepAliveConn) - // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl - // default on linux: 30 + 8 * 30 - // default on osx: 30 + 8 * 75 - kac.SetKeepAlive(true) - kac.SetKeepAlivePeriod(30 * time.Second) - return c, nil -} - -// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections. -type tlsKeepaliveListener struct { - net.Listener - config *tls.Config -} - -// Accept waits for and returns the next incoming TLS connection. -// The returned connection c is a *tls.Conn. -func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) { - c, err = l.Listener.Accept() - if err != nil { - return - } - kac := c.(keepAliveConn) - // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl - // default on linux: 30 + 8 * 30 - // default on osx: 30 + 8 * 75 - kac.SetKeepAlive(true) - kac.SetKeepAlivePeriod(30 * time.Second) - c = tls.Server(c, l.config) - return -} - -// NewListener creates a Listener which accepts connections from an inner -// Listener and wraps each connection with Server. -// The configuration config must be non-nil and must have -// at least one certificate. -func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener { - l := &tlsKeepaliveListener{} - l.Listener = inner - l.config = config - return l -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go b/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go deleted file mode 100644 index 930c542066f..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2013 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package transport provides network utility functions, complementing the more -// common ones in the net package. -package transport - -import ( - "errors" - "net" - "sync" - "time" -) - -var ( - ErrNotTCP = errors.New("only tcp connections have keepalive") -) - -// LimitListener returns a Listener that accepts at most n simultaneous -// connections from the provided Listener. -func LimitListener(l net.Listener, n int) net.Listener { - return &limitListener{l, make(chan struct{}, n)} -} - -type limitListener struct { - net.Listener - sem chan struct{} -} - -func (l *limitListener) acquire() { l.sem <- struct{}{} } -func (l *limitListener) release() { <-l.sem } - -func (l *limitListener) Accept() (net.Conn, error) { - l.acquire() - c, err := l.Listener.Accept() - if err != nil { - l.release() - return nil, err - } - return &limitListenerConn{Conn: c, release: l.release}, nil -} - -type limitListenerConn struct { - net.Conn - releaseOnce sync.Once - release func() -} - -func (l *limitListenerConn) Close() error { - err := l.Conn.Close() - l.releaseOnce.Do(l.release) - return err -} - -func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error { - tcpc, ok := l.Conn.(*net.TCPConn) - if !ok { - return ErrNotTCP - } - return tcpc.SetKeepAlive(doKeepAlive) -} - -func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error { - tcpc, ok := l.Conn.(*net.TCPConn) - if !ok { - return ErrNotTCP - } - return tcpc.SetKeepAlivePeriod(d) -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go deleted file mode 100644 index 555618e6f0b..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net" - "os" - "path/filepath" - "strings" - "time" - - "github.com/coreos/etcd/pkg/tlsutil" -) - -func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { - if l, err = newListener(addr, scheme); err != nil { - return nil, err - } - return wrapTLS(addr, scheme, tlsinfo, l) -} - -func newListener(addr string, scheme string) (net.Listener, error) { - if scheme == "unix" || scheme == "unixs" { - // unix sockets via unix://laddr - return NewUnixListener(addr) - } - return net.Listen("tcp", addr) -} - -func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { - if scheme != "https" && scheme != "unixs" { - return l, nil - } - return newTLSListener(l, tlsinfo, checkSAN) -} - -type TLSInfo struct { - CertFile string - KeyFile string - CAFile string // TODO: deprecate this in v4 - TrustedCAFile string - ClientCertAuth bool - CRLFile string - InsecureSkipVerify bool - - // ServerName ensures the cert matches the given host in case of discovery / virtual hosting - ServerName string - - // HandshakeFailure is optionally called when a connection fails to handshake. The - // connection will be closed immediately afterwards. - HandshakeFailure func(*tls.Conn, error) - - selfCert bool - - // parseFunc exists to simplify testing. Typically, parseFunc - // should be left nil. In that case, tls.X509KeyPair will be used. - parseFunc func([]byte, []byte) (tls.Certificate, error) - - // AllowedCN is a CN which must be provided by a client. - AllowedCN string -} - -func (info TLSInfo) String() string { - return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) -} - -func (info TLSInfo) Empty() bool { - return info.CertFile == "" && info.KeyFile == "" -} - -func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { - if err = os.MkdirAll(dirpath, 0700); err != nil { - return - } - - certPath := filepath.Join(dirpath, "cert.pem") - keyPath := filepath.Join(dirpath, "key.pem") - _, errcert := os.Stat(certPath) - _, errkey := os.Stat(keyPath) - if errcert == nil && errkey == nil { - info.CertFile = certPath - info.KeyFile = keyPath - info.selfCert = true - return - } - - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return - } - - tmpl := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{Organization: []string{"etcd"}}, - NotBefore: time.Now(), - NotAfter: time.Now().Add(365 * (24 * time.Hour)), - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - for _, host := range hosts { - h, _, _ := net.SplitHostPort(host) - if ip := net.ParseIP(h); ip != nil { - tmpl.IPAddresses = append(tmpl.IPAddresses, ip) - } else { - tmpl.DNSNames = append(tmpl.DNSNames, h) - } - } - - priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - if err != nil { - return - } - - derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) - if err != nil { - return - } - - certOut, err := os.Create(certPath) - if err != nil { - return - } - pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - certOut.Close() - - b, err := x509.MarshalECPrivateKey(priv) - if err != nil { - return - } - keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return - } - pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) - keyOut.Close() - - return SelfCert(dirpath, hosts) -} - -func (info TLSInfo) baseConfig() (*tls.Config, error) { - if info.KeyFile == "" || info.CertFile == "" { - return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile) - } - - tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) - if err != nil { - return nil, err - } - - cfg := &tls.Config{ - Certificates: []tls.Certificate{*tlsCert}, - MinVersion: tls.VersionTLS12, - ServerName: info.ServerName, - } - - if info.AllowedCN != "" { - cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - for _, chains := range verifiedChains { - if len(chains) != 0 { - if info.AllowedCN == chains[0].Subject.CommonName { - return nil - } - } - } - return errors.New("CommonName authentication failed") - } - } - - // this only reloads certs when there's a client request - // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching - cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { - return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) - } - cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) { - return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) - } - return cfg, nil -} - -// cafiles returns a list of CA file paths. -func (info TLSInfo) cafiles() []string { - cs := make([]string, 0) - if info.CAFile != "" { - cs = append(cs, info.CAFile) - } - if info.TrustedCAFile != "" { - cs = append(cs, info.TrustedCAFile) - } - return cs -} - -// ServerConfig generates a tls.Config object for use by an HTTP server. -func (info TLSInfo) ServerConfig() (*tls.Config, error) { - cfg, err := info.baseConfig() - if err != nil { - return nil, err - } - - cfg.ClientAuth = tls.NoClientCert - if info.CAFile != "" || info.ClientCertAuth { - cfg.ClientAuth = tls.RequireAndVerifyClientCert - } - - CAFiles := info.cafiles() - if len(CAFiles) > 0 { - cp, err := tlsutil.NewCertPool(CAFiles) - if err != nil { - return nil, err - } - cfg.ClientCAs = cp - } - - // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server - cfg.NextProtos = []string{"h2"} - - return cfg, nil -} - -// ClientConfig generates a tls.Config object for use by an HTTP client. -func (info TLSInfo) ClientConfig() (*tls.Config, error) { - var cfg *tls.Config - var err error - - if !info.Empty() { - cfg, err = info.baseConfig() - if err != nil { - return nil, err - } - } else { - cfg = &tls.Config{ServerName: info.ServerName} - } - cfg.InsecureSkipVerify = info.InsecureSkipVerify - - CAFiles := info.cafiles() - if len(CAFiles) > 0 { - cfg.RootCAs, err = tlsutil.NewCertPool(CAFiles) - if err != nil { - return nil, err - } - } - - if info.selfCert { - cfg.InsecureSkipVerify = true - } - return cfg, nil -} - -// IsClosedConnError returns true if the error is from closing listener, cmux. -// copied from golang.org/x/net/http2/http2.go -func IsClosedConnError(err error) bool { - // 'use of closed network connection' (Go <=1.8) - // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) - // 'mux: listener closed' (cmux.ErrListenerClosed) - return err != nil && strings.Contains(err.Error(), "closed") -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go deleted file mode 100644 index 6f1600945cc..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "strings" - "sync" -) - -// tlsListener overrides a TLS listener so it will reject client -// certificates with insufficient SAN credentials or CRL revoked -// certificates. -type tlsListener struct { - net.Listener - connc chan net.Conn - donec chan struct{} - err error - handshakeFailure func(*tls.Conn, error) - check tlsCheckFunc -} - -type tlsCheckFunc func(context.Context, *tls.Conn) error - -// NewTLSListener handshakes TLS connections and performs optional CRL checking. -func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { - check := func(context.Context, *tls.Conn) error { return nil } - return newTLSListener(l, tlsinfo, check) -} - -func newTLSListener(l net.Listener, tlsinfo *TLSInfo, check tlsCheckFunc) (net.Listener, error) { - if tlsinfo == nil || tlsinfo.Empty() { - l.Close() - return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String()) - } - tlscfg, err := tlsinfo.ServerConfig() - if err != nil { - return nil, err - } - - hf := tlsinfo.HandshakeFailure - if hf == nil { - hf = func(*tls.Conn, error) {} - } - - if len(tlsinfo.CRLFile) > 0 { - prevCheck := check - check = func(ctx context.Context, tlsConn *tls.Conn) error { - if err := prevCheck(ctx, tlsConn); err != nil { - return err - } - st := tlsConn.ConnectionState() - if certs := st.PeerCertificates; len(certs) > 0 { - return checkCRL(tlsinfo.CRLFile, certs) - } - return nil - } - } - - tlsl := &tlsListener{ - Listener: tls.NewListener(l, tlscfg), - connc: make(chan net.Conn), - donec: make(chan struct{}), - handshakeFailure: hf, - check: check, - } - go tlsl.acceptLoop() - return tlsl, nil -} - -func (l *tlsListener) Accept() (net.Conn, error) { - select { - case conn := <-l.connc: - return conn, nil - case <-l.donec: - return nil, l.err - } -} - -func checkSAN(ctx context.Context, tlsConn *tls.Conn) error { - st := tlsConn.ConnectionState() - if certs := st.PeerCertificates; len(certs) > 0 { - addr := tlsConn.RemoteAddr().String() - return checkCertSAN(ctx, certs[0], addr) - } - return nil -} - -// acceptLoop launches each TLS handshake in a separate goroutine -// to prevent a hanging TLS connection from blocking other connections. -func (l *tlsListener) acceptLoop() { - var wg sync.WaitGroup - var pendingMu sync.Mutex - - pending := make(map[net.Conn]struct{}) - ctx, cancel := context.WithCancel(context.Background()) - defer func() { - cancel() - pendingMu.Lock() - for c := range pending { - c.Close() - } - pendingMu.Unlock() - wg.Wait() - close(l.donec) - }() - - for { - conn, err := l.Listener.Accept() - if err != nil { - l.err = err - return - } - - pendingMu.Lock() - pending[conn] = struct{}{} - pendingMu.Unlock() - - wg.Add(1) - go func() { - defer func() { - if conn != nil { - conn.Close() - } - wg.Done() - }() - - tlsConn := conn.(*tls.Conn) - herr := tlsConn.Handshake() - pendingMu.Lock() - delete(pending, conn) - pendingMu.Unlock() - - if herr != nil { - l.handshakeFailure(tlsConn, herr) - return - } - if err := l.check(ctx, tlsConn); err != nil { - l.handshakeFailure(tlsConn, err) - return - } - - select { - case l.connc <- tlsConn: - conn = nil - case <-ctx.Done(): - } - }() - } -} - -func checkCRL(crlPath string, cert []*x509.Certificate) error { - // TODO: cache - crlBytes, err := ioutil.ReadFile(crlPath) - if err != nil { - return err - } - certList, err := x509.ParseCRL(crlBytes) - if err != nil { - return err - } - revokedSerials := make(map[string]struct{}) - for _, rc := range certList.TBSCertList.RevokedCertificates { - revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{} - } - for _, c := range cert { - serial := string(c.SerialNumber.Bytes()) - if _, ok := revokedSerials[serial]; ok { - return fmt.Errorf("transport: certificate serial %x revoked", serial) - } - } - return nil -} - -func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { - if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 { - return nil - } - h, _, herr := net.SplitHostPort(remoteAddr) - if herr != nil { - return herr - } - if len(cert.IPAddresses) > 0 { - cerr := cert.VerifyHostname(h) - if cerr == nil { - return nil - } - if len(cert.DNSNames) == 0 { - return cerr - } - } - if len(cert.DNSNames) > 0 { - ok, err := isHostInDNS(ctx, h, cert.DNSNames) - if ok { - return nil - } - errStr := "" - if err != nil { - errStr = " (" + err.Error() + ")" - } - return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames) - } - return nil -} - -func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { - // reverse lookup - wildcards, names := []string{}, []string{} - for _, dns := range dnsNames { - if strings.HasPrefix(dns, "*.") { - wildcards = append(wildcards, dns[1:]) - } else { - names = append(names, dns) - } - } - lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host) - for _, name := range lnames { - // strip trailing '.' from PTR record - if name[len(name)-1] == '.' { - name = name[:len(name)-1] - } - for _, wc := range wildcards { - if strings.HasSuffix(name, wc) { - return true, nil - } - } - for _, n := range names { - if n == name { - return true, nil - } - } - } - err = lerr - - // forward lookup - for _, dns := range names { - addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns) - if lerr != nil { - err = lerr - continue - } - for _, addr := range addrs { - if addr == host { - return true, nil - } - } - } - return false, err -} - -func (l *tlsListener) Close() error { - err := l.Listener.Close() - <-l.donec - return err -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go deleted file mode 100644 index 7e8c02030fe..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "time" -) - -type timeoutConn struct { - net.Conn - wtimeoutd time.Duration - rdtimeoutd time.Duration -} - -func (c timeoutConn) Write(b []byte) (n int, err error) { - if c.wtimeoutd > 0 { - if err := c.SetWriteDeadline(time.Now().Add(c.wtimeoutd)); err != nil { - return 0, err - } - } - return c.Conn.Write(b) -} - -func (c timeoutConn) Read(b []byte) (n int, err error) { - if c.rdtimeoutd > 0 { - if err := c.SetReadDeadline(time.Now().Add(c.rdtimeoutd)); err != nil { - return 0, err - } - } - return c.Conn.Read(b) -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go deleted file mode 100644 index 6ae39ecfc9b..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "time" -) - -type rwTimeoutDialer struct { - wtimeoutd time.Duration - rdtimeoutd time.Duration - net.Dialer -} - -func (d *rwTimeoutDialer) Dial(network, address string) (net.Conn, error) { - conn, err := d.Dialer.Dial(network, address) - tconn := &timeoutConn{ - rdtimeoutd: d.rdtimeoutd, - wtimeoutd: d.wtimeoutd, - Conn: conn, - } - return tconn, err -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go deleted file mode 100644 index b35e04955bb..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "time" -) - -// NewTimeoutListener returns a listener that listens on the given address. -// If read/write on the accepted connection blocks longer than its time limit, -// it will return timeout error. -func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { - ln, err := newListener(addr, scheme) - if err != nil { - return nil, err - } - ln = &rwTimeoutListener{ - Listener: ln, - rdtimeoutd: rdtimeoutd, - wtimeoutd: wtimeoutd, - } - if ln, err = wrapTLS(addr, scheme, tlsinfo, ln); err != nil { - return nil, err - } - return ln, nil -} - -type rwTimeoutListener struct { - net.Listener - wtimeoutd time.Duration - rdtimeoutd time.Duration -} - -func (rwln *rwTimeoutListener) Accept() (net.Conn, error) { - c, err := rwln.Listener.Accept() - if err != nil { - return nil, err - } - return timeoutConn{ - Conn: c, - wtimeoutd: rwln.wtimeoutd, - rdtimeoutd: rwln.rdtimeoutd, - }, nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go deleted file mode 100644 index ea16b4c0f86..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "net/http" - "time" -) - -// NewTimeoutTransport returns a transport created using the given TLS info. -// If read/write on the created connection blocks longer than its time limit, -// it will return timeout error. -// If read/write timeout is set, transport will not be able to reuse connection. -func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) { - tr, err := NewTransport(info, dialtimeoutd) - if err != nil { - return nil, err - } - - if rdtimeoutd != 0 || wtimeoutd != 0 { - // the timed out connection will timeout soon after it is idle. - // it should not be put back to http transport as an idle connection for future usage. - tr.MaxIdleConnsPerHost = -1 - } else { - // allow more idle connections between peers to avoid unnecessary port allocation. - tr.MaxIdleConnsPerHost = 1024 - } - - tr.Dial = (&rwTimeoutDialer{ - Dialer: net.Dialer{ - Timeout: dialtimeoutd, - KeepAlive: 30 * time.Second, - }, - rdtimeoutd: rdtimeoutd, - wtimeoutd: wtimeoutd, - }).Dial - return tr, nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/tls.go b/vendor/github.com/coreos/etcd/pkg/transport/tls.go deleted file mode 100644 index 62fe0d38519..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/tls.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "fmt" - "strings" - "time" -) - -// ValidateSecureEndpoints scans the given endpoints against tls info, returning only those -// endpoints that could be validated as secure. -func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { - t, err := NewTransport(tlsInfo, 5*time.Second) - if err != nil { - return nil, err - } - var errs []string - var endpoints []string - for _, ep := range eps { - if !strings.HasPrefix(ep, "https://") { - errs = append(errs, fmt.Sprintf("%q is insecure", ep)) - continue - } - conn, cerr := t.Dial("tcp", ep[len("https://"):]) - if cerr != nil { - errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr)) - continue - } - conn.Close() - endpoints = append(endpoints, ep) - } - if len(errs) != 0 { - err = fmt.Errorf("%s", strings.Join(errs, ",")) - } - return endpoints, err -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/transport.go b/vendor/github.com/coreos/etcd/pkg/transport/transport.go deleted file mode 100644 index 4a7fe69d2e1..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/transport.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "net/http" - "strings" - "time" -) - -type unixTransport struct{ *http.Transport } - -func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) { - cfg, err := info.ClientConfig() - if err != nil { - return nil, err - } - - t := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: dialtimeoutd, - // value taken from http.DefaultTransport - KeepAlive: 30 * time.Second, - }).Dial, - // value taken from http.DefaultTransport - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: cfg, - } - - dialer := (&net.Dialer{ - Timeout: dialtimeoutd, - KeepAlive: 30 * time.Second, - }) - dial := func(net, addr string) (net.Conn, error) { - return dialer.Dial("unix", addr) - } - - tu := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: cfg, - } - ut := &unixTransport{tu} - - t.RegisterProtocol("unix", ut) - t.RegisterProtocol("unixs", ut) - - return t, nil -} - -func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) { - url := *req.URL - req.URL = &url - req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1) - return urt.Transport.RoundTrip(req) -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go deleted file mode 100644 index 123e2036f0f..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "os" -) - -type unixListener struct{ net.Listener } - -func NewUnixListener(addr string) (net.Listener, error) { - if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { - return nil, err - } - l, err := net.Listen("unix", addr) - if err != nil { - return nil, err - } - return &unixListener{l}, nil -} - -func (ul *unixListener) Close() error { - if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) { - return err - } - return ul.Listener.Close() -} diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/github.com/coreos/etcd/pkg/types/doc.go deleted file mode 100644 index de8ef0bd712..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package types declares various data types and implements type-checking -// functions. -package types diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go deleted file mode 100644 index 1b042d9ce65..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/id.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "strconv" -) - -// ID represents a generic identifier which is canonically -// stored as a uint64 but is typically represented as a -// base-16 string for input/output -type ID uint64 - -func (i ID) String() string { - return strconv.FormatUint(uint64(i), 16) -} - -// IDFromString attempts to create an ID from a base-16 string. -func IDFromString(s string) (ID, error) { - i, err := strconv.ParseUint(s, 16, 64) - return ID(i), err -} - -// IDSlice implements the sort interface -type IDSlice []ID - -func (p IDSlice) Len() int { return len(p) } -func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } -func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go deleted file mode 100644 index 73ef431bef1..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/set.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "sync" -) - -type Set interface { - Add(string) - Remove(string) - Contains(string) bool - Equals(Set) bool - Length() int - Values() []string - Copy() Set - Sub(Set) Set -} - -func NewUnsafeSet(values ...string) *unsafeSet { - set := &unsafeSet{make(map[string]struct{})} - for _, v := range values { - set.Add(v) - } - return set -} - -func NewThreadsafeSet(values ...string) *tsafeSet { - us := NewUnsafeSet(values...) - return &tsafeSet{us, sync.RWMutex{}} -} - -type unsafeSet struct { - d map[string]struct{} -} - -// Add adds a new value to the set (no-op if the value is already present) -func (us *unsafeSet) Add(value string) { - us.d[value] = struct{}{} -} - -// Remove removes the given value from the set -func (us *unsafeSet) Remove(value string) { - delete(us.d, value) -} - -// Contains returns whether the set contains the given value -func (us *unsafeSet) Contains(value string) (exists bool) { - _, exists = us.d[value] - return -} - -// ContainsAll returns whether the set contains all given values -func (us *unsafeSet) ContainsAll(values []string) bool { - for _, s := range values { - if !us.Contains(s) { - return false - } - } - return true -} - -// Equals returns whether the contents of two sets are identical -func (us *unsafeSet) Equals(other Set) bool { - v1 := sort.StringSlice(us.Values()) - v2 := sort.StringSlice(other.Values()) - v1.Sort() - v2.Sort() - return reflect.DeepEqual(v1, v2) -} - -// Length returns the number of elements in the set -func (us *unsafeSet) Length() int { - return len(us.d) -} - -// Values returns the values of the Set in an unspecified order. -func (us *unsafeSet) Values() (values []string) { - values = make([]string, 0) - for val := range us.d { - values = append(values, val) - } - return -} - -// Copy creates a new Set containing the values of the first -func (us *unsafeSet) Copy() Set { - cp := NewUnsafeSet() - for val := range us.d { - cp.Add(val) - } - - return cp -} - -// Sub removes all elements in other from the set -func (us *unsafeSet) Sub(other Set) Set { - oValues := other.Values() - result := us.Copy().(*unsafeSet) - - for _, val := range oValues { - if _, ok := result.d[val]; !ok { - continue - } - delete(result.d, val) - } - - return result -} - -type tsafeSet struct { - us *unsafeSet - m sync.RWMutex -} - -func (ts *tsafeSet) Add(value string) { - ts.m.Lock() - defer ts.m.Unlock() - ts.us.Add(value) -} - -func (ts *tsafeSet) Remove(value string) { - ts.m.Lock() - defer ts.m.Unlock() - ts.us.Remove(value) -} - -func (ts *tsafeSet) Contains(value string) (exists bool) { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Contains(value) -} - -func (ts *tsafeSet) Equals(other Set) bool { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Equals(other) -} - -func (ts *tsafeSet) Length() int { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Length() -} - -func (ts *tsafeSet) Values() (values []string) { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Values() -} - -func (ts *tsafeSet) Copy() Set { - ts.m.RLock() - defer ts.m.RUnlock() - usResult := ts.us.Copy().(*unsafeSet) - return &tsafeSet{usResult, sync.RWMutex{}} -} - -func (ts *tsafeSet) Sub(other Set) Set { - ts.m.RLock() - defer ts.m.RUnlock() - usResult := ts.us.Sub(other).(*unsafeSet) - return &tsafeSet{usResult, sync.RWMutex{}} -} diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/github.com/coreos/etcd/pkg/types/slice.go deleted file mode 100644 index 0dd9ca798ae..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/slice.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -// Uint64Slice implements sort interface -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go deleted file mode 100644 index 9e5d03ff645..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/urls.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "errors" - "fmt" - "net" - "net/url" - "sort" - "strings" -) - -type URLs []url.URL - -func NewURLs(strs []string) (URLs, error) { - all := make([]url.URL, len(strs)) - if len(all) == 0 { - return nil, errors.New("no valid URLs given") - } - for i, in := range strs { - in = strings.TrimSpace(in) - u, err := url.Parse(in) - if err != nil { - return nil, err - } - if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { - return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) - } - if _, _, err := net.SplitHostPort(u.Host); err != nil { - return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) - } - if u.Path != "" { - return nil, fmt.Errorf("URL must not contain a path: %s", in) - } - all[i] = *u - } - us := URLs(all) - us.Sort() - - return us, nil -} - -func MustNewURLs(strs []string) URLs { - urls, err := NewURLs(strs) - if err != nil { - panic(err) - } - return urls -} - -func (us URLs) String() string { - return strings.Join(us.StringSlice(), ",") -} - -func (us *URLs) Sort() { - sort.Sort(us) -} -func (us URLs) Len() int { return len(us) } -func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } -func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } - -func (us URLs) StringSlice() []string { - out := make([]string, len(us)) - for i := range us { - out[i] = us[i].String() - } - - return out -} diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go deleted file mode 100644 index 47690cc381a..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "sort" - "strings" -) - -// URLsMap is a map from a name to its URLs. -type URLsMap map[string]URLs - -// NewURLsMap returns a URLsMap instantiated from the given string, -// which consists of discovery-formatted names-to-URLs, like: -// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 -func NewURLsMap(s string) (URLsMap, error) { - m := parse(s) - - cl := URLsMap{} - for name, urls := range m { - us, err := NewURLs(urls) - if err != nil { - return nil, err - } - cl[name] = us - } - return cl, nil -} - -// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The -// string values in the map can be multiple values separated by the sep string. -func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { - var err error - um := URLsMap{} - for k, v := range m { - um[k], err = NewURLs(strings.Split(v, sep)) - if err != nil { - return nil, err - } - } - return um, nil -} - -// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. -func (c URLsMap) String() string { - var pairs []string - for name, urls := range c { - for _, url := range urls { - pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) - } - } - sort.Strings(pairs) - return strings.Join(pairs, ",") -} - -// URLs returns a list of all URLs. -// The returned list is sorted in ascending lexicographical order. -func (c URLsMap) URLs() []string { - var urls []string - for _, us := range c { - for _, u := range us { - urls = append(urls, u.String()) - } - } - sort.Strings(urls) - return urls -} - -// Len returns the size of URLsMap. -func (c URLsMap) Len() int { - return len(c) -} - -// parse parses the given string and returns a map listing the values specified for each key. -func parse(s string) map[string][]string { - m := make(map[string][]string) - for s != "" { - key := s - if i := strings.IndexAny(key, ","); i >= 0 { - key, s = key[:i], key[i+1:] - } else { - s = "" - } - if key == "" { - continue - } - value := "" - if i := strings.Index(key, "="); i >= 0 { - key, value = key[:i], key[i+1:] - } - m[key] = append(m[key], value) - } - return m -} diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go deleted file mode 100644 index 9134cebd35b..00000000000 --- a/vendor/github.com/coreos/etcd/version/version.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package version implements etcd version parsing and contains latest version -// information. -package version - -import ( - "fmt" - "strings" - - "github.com/coreos/go-semver/semver" -) - -var ( - // MinClusterVersion is the min cluster version this etcd binary is compatible with. - MinClusterVersion = "3.0.0" - Version = "3.2.0+git" - APIVersion = "unknown" - - // Git SHA Value will be set during build - GitSHA = "Not provided (use ./build instead of go build)" -) - -func init() { - ver, err := semver.NewVersion(Version) - if err == nil { - APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor) - } -} - -type Versions struct { - Server string `json:"etcdserver"` - Cluster string `json:"etcdcluster"` - // TODO: raft state machine version -} - -// Cluster only keeps the major.minor. -func Cluster(v string) string { - vs := strings.Split(v, ".") - if len(vs) <= 2 { - return v - } - return fmt.Sprintf("%s.%s", vs[0], vs[1]) -} diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/vendor/github.com/coreos/go-semver/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go deleted file mode 100644 index 76cf4852c76..00000000000 --- a/vendor/github.com/coreos/go-semver/semver/semver.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright 2013-2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Semantic Versions http://semver.org -package semver - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -type Version struct { - Major int64 - Minor int64 - Patch int64 - PreRelease PreRelease - Metadata string -} - -type PreRelease string - -func splitOff(input *string, delim string) (val string) { - parts := strings.SplitN(*input, delim, 2) - - if len(parts) == 2 { - *input = parts[0] - val = parts[1] - } - - return val -} - -func New(version string) *Version { - return Must(NewVersion(version)) -} - -func NewVersion(version string) (*Version, error) { - v := Version{} - - if err := v.Set(version); err != nil { - return nil, err - } - - return &v, nil -} - -// Must is a helper for wrapping NewVersion and will panic if err is not nil. -func Must(v *Version, err error) *Version { - if err != nil { - panic(err) - } - return v -} - -// Set parses and updates v from the given version string. Implements flag.Value -func (v *Version) Set(version string) error { - metadata := splitOff(&version, "+") - preRelease := PreRelease(splitOff(&version, "-")) - dotParts := strings.SplitN(version, ".", 3) - - if len(dotParts) != 3 { - return fmt.Errorf("%s is not in dotted-tri format", version) - } - - if err := validateIdentifier(string(preRelease)); err != nil { - return fmt.Errorf("failed to validate pre-release: %v", err) - } - - if err := validateIdentifier(metadata); err != nil { - return fmt.Errorf("failed to validate metadata: %v", err) - } - - parsed := make([]int64, 3, 3) - - for i, v := range dotParts[:3] { - val, err := strconv.ParseInt(v, 10, 64) - parsed[i] = val - if err != nil { - return err - } - } - - v.Metadata = metadata - v.PreRelease = preRelease - v.Major = parsed[0] - v.Minor = parsed[1] - v.Patch = parsed[2] - return nil -} - -func (v Version) String() string { - var buffer bytes.Buffer - - fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) - - if v.PreRelease != "" { - fmt.Fprintf(&buffer, "-%s", v.PreRelease) - } - - if v.Metadata != "" { - fmt.Fprintf(&buffer, "+%s", v.Metadata) - } - - return buffer.String() -} - -func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { - var data string - if err := unmarshal(&data); err != nil { - return err - } - return v.Set(data) -} - -func (v Version) MarshalJSON() ([]byte, error) { - return []byte(`"` + v.String() + `"`), nil -} - -func (v *Version) UnmarshalJSON(data []byte) error { - l := len(data) - if l == 0 || string(data) == `""` { - return nil - } - if l < 2 || data[0] != '"' || data[l-1] != '"' { - return errors.New("invalid semver string") - } - return v.Set(string(data[1 : l-1])) -} - -// Compare tests if v is less than, equal to, or greater than versionB, -// returning -1, 0, or +1 respectively. -func (v Version) Compare(versionB Version) int { - if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { - return cmp - } - return preReleaseCompare(v, versionB) -} - -// Equal tests if v is equal to versionB. -func (v Version) Equal(versionB Version) bool { - return v.Compare(versionB) == 0 -} - -// LessThan tests if v is less than versionB. -func (v Version) LessThan(versionB Version) bool { - return v.Compare(versionB) < 0 -} - -// Slice converts the comparable parts of the semver into a slice of integers. -func (v Version) Slice() []int64 { - return []int64{v.Major, v.Minor, v.Patch} -} - -func (p PreRelease) Slice() []string { - preRelease := string(p) - return strings.Split(preRelease, ".") -} - -func preReleaseCompare(versionA Version, versionB Version) int { - a := versionA.PreRelease - b := versionB.PreRelease - - /* Handle the case where if two versions are otherwise equal it is the - * one without a PreRelease that is greater */ - if len(a) == 0 && (len(b) > 0) { - return 1 - } else if len(b) == 0 && (len(a) > 0) { - return -1 - } - - // If there is a prerelease, check and compare each part. - return recursivePreReleaseCompare(a.Slice(), b.Slice()) -} - -func recursiveCompare(versionA []int64, versionB []int64) int { - if len(versionA) == 0 { - return 0 - } - - a := versionA[0] - b := versionB[0] - - if a > b { - return 1 - } else if a < b { - return -1 - } - - return recursiveCompare(versionA[1:], versionB[1:]) -} - -func recursivePreReleaseCompare(versionA []string, versionB []string) int { - // A larger set of pre-release fields has a higher precedence than a smaller set, - // if all of the preceding identifiers are equal. - if len(versionA) == 0 { - if len(versionB) > 0 { - return -1 - } - return 0 - } else if len(versionB) == 0 { - // We're longer than versionB so return 1. - return 1 - } - - a := versionA[0] - b := versionB[0] - - aInt := false - bInt := false - - aI, err := strconv.Atoi(versionA[0]) - if err == nil { - aInt = true - } - - bI, err := strconv.Atoi(versionB[0]) - if err == nil { - bInt = true - } - - // Numeric identifiers always have lower precedence than non-numeric identifiers. - if aInt && !bInt { - return -1 - } else if !aInt && bInt { - return 1 - } - - // Handle Integer Comparison - if aInt && bInt { - if aI > bI { - return 1 - } else if aI < bI { - return -1 - } - } - - // Handle String Comparison - if a > b { - return 1 - } else if a < b { - return -1 - } - - return recursivePreReleaseCompare(versionA[1:], versionB[1:]) -} - -// BumpMajor increments the Major field by 1 and resets all other fields to their default values -func (v *Version) BumpMajor() { - v.Major += 1 - v.Minor = 0 - v.Patch = 0 - v.PreRelease = PreRelease("") - v.Metadata = "" -} - -// BumpMinor increments the Minor field by 1 and resets all other fields to their default values -func (v *Version) BumpMinor() { - v.Minor += 1 - v.Patch = 0 - v.PreRelease = PreRelease("") - v.Metadata = "" -} - -// BumpPatch increments the Patch field by 1 and resets all other fields to their default values -func (v *Version) BumpPatch() { - v.Patch += 1 - v.PreRelease = PreRelease("") - v.Metadata = "" -} - -// validateIdentifier makes sure the provided identifier satisfies semver spec -func validateIdentifier(id string) error { - if id != "" && !reIdentifier.MatchString(id) { - return fmt.Errorf("%s is not a valid semver identifier", id) - } - return nil -} - -// reIdentifier is a regular expression used to check that pre-release and metadata -// identifiers satisfy the spec requirements -var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go deleted file mode 100644 index e256b41a5dd..00000000000 --- a/vendor/github.com/coreos/go-semver/semver/sort.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2013-2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semver - -import ( - "sort" -) - -type Versions []*Version - -func (s Versions) Len() int { - return len(s) -} - -func (s Versions) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s Versions) Less(i, j int) bool { - return s[i].LessThan(*s[j]) -} - -// Sort sorts the given slice of Version -func Sort(versions []*Version) { - sort.Sort(Versions(versions)) -} diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE deleted file mode 100644 index df83a9c2f01..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -Copyright (c) 2012 Dave Grijalva - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md deleted file mode 100644 index 7fc1f793cbc..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md +++ /dev/null @@ -1,97 +0,0 @@ -## Migration Guide from v2 -> v3 - -Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. - -### `Token.Claims` is now an interface type - -The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. - -`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. - -The old example for parsing a token looked like this.. - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is now directly mapped to... - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. - -```go - type MyCustomClaims struct { - User string - *StandardClaims - } - - if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { - claims := token.Claims.(*MyCustomClaims) - fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) - } -``` - -### `ParseFromRequest` has been moved - -To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. - -`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. - -This simple parsing example: - -```go - if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is directly mapped to: - -```go - if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -There are several concrete `Extractor` types provided for your convenience: - -* `HeaderExtractor` will search a list of headers until one contains content. -* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. -* `MultiExtractor` will try a list of `Extractors` in order until one returns content. -* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. -* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument -* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header - - -### RSA signing methods no longer accept `[]byte` keys - -Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. - -To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. - -```go - func keyLookupFunc(*Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - - // Look up key - key, err := lookupPublicKey(token.Header["kid"]) - if err != nil { - return nil, err - } - - // Unpack key from PEM encoded PKCS8 - return jwt.ParseRSAPublicKeyFromPEM(key) - } -``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md deleted file mode 100644 index 25aec486c63..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/README.md +++ /dev/null @@ -1,85 +0,0 @@ -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) - -[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) - -**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. - -**NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. - - -## What the heck is a JWT? - -JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. - -In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. - -The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. - -The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. - -## What's in the box? - -This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. - -## Examples - -See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: - -* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) -* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) -* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) - -## Extensions - -This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. - -Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go - -## Compliance - -This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: - -* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. - -## Project Status & Versioning - -This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). - -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). - -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. - -## Usage Tips - -### Signing vs Encryption - -A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: - -* The author of the token was in the possession of the signing secret -* The data has not been modified since it was signed - -It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. - -### Choosing a Signing Method - -There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. - -Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. - -Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. - -### JWT and OAuth - -It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. - -Without going too far down the rabbit hole, here's a description of the interaction of these technologies: - -* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. -* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. -* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. - -## More - -Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). - -The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md deleted file mode 100644 index c21551f6bbd..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ /dev/null @@ -1,111 +0,0 @@ -## `jwt-go` Version History - -#### 3.1.0 - -* Improvements to `jwt` command line tool -* Added `SkipClaimsValidation` option to `Parser` -* Documentation updates - -#### 3.0.0 - -* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code - * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. - * `ParseFromRequest` has been moved to `request` subpackage and usage has changed - * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. -* Other Additions and Changes - * Added `Claims` interface type to allow users to decode the claims into a custom type - * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. - * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage - * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` - * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. - * Added several new, more specific, validation errors to error type bitmask - * Moved examples from README to executable example files - * Signing method registry is now thread safe - * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) - -#### 2.7.0 - -This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. - -* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying -* Error text for expired tokens includes how long it's been expired -* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` -* Documentation updates - -#### 2.6.0 - -* Exposed inner error within ValidationError -* Fixed validation errors when using UseJSONNumber flag -* Added several unit tests - -#### 2.5.0 - -* Added support for signing method none. You shouldn't use this. The API tries to make this clear. -* Updated/fixed some documentation -* Added more helpful error message when trying to parse tokens that begin with `BEARER ` - -#### 2.4.0 - -* Added new type, Parser, to allow for configuration of various parsing parameters - * You can now specify a list of valid signing methods. Anything outside this set will be rejected. - * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON -* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) -* Fixed some bugs with ECDSA parsing - -#### 2.3.0 - -* Added support for ECDSA signing methods -* Added support for RSA PSS signing methods (requires go v1.4) - -#### 2.2.0 - -* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. - -#### 2.1.0 - -Backwards compatible API change that was missed in 2.0.0. - -* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` - -#### 2.0.0 - -There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. - -The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. - -It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. - -* **Compatibility Breaking Changes** - * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` - * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` - * `KeyFunc` now returns `interface{}` instead of `[]byte` - * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key - * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key -* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodHS256` - * Added public package global `SigningMethodHS384` - * Added public package global `SigningMethodHS512` -* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodRS256` - * Added public package global `SigningMethodRS384` - * Added public package global `SigningMethodRS512` -* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. -* Refactored the RSA implementation to be easier to read -* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` - -#### 1.0.2 - -* Fixed bug in parsing public keys from certificates -* Added more tests around the parsing of keys for RS256 -* Code refactoring in RS256 implementation. No functional changes - -#### 1.0.1 - -* Fixed panic if RS256 signing method was passed an invalid key - -#### 1.0.0 - -* First versioned release -* API stabilized -* Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go deleted file mode 100644 index f0228f02e03..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/claims.go +++ /dev/null @@ -1,134 +0,0 @@ -package jwt - -import ( - "crypto/subtle" - "fmt" - "time" -) - -// For a type to be a Claims object, it must just have a Valid method that determines -// if the token is invalid for any supported reason -type Claims interface { - Valid() error -} - -// Structured version of Claims Section, as referenced at -// https://tools.ietf.org/html/rfc7519#section-4.1 -// See examples for how to use this with your own claim types -type StandardClaims struct { - Audience string `json:"aud,omitempty"` - ExpiresAt int64 `json:"exp,omitempty"` - Id string `json:"jti,omitempty"` - IssuedAt int64 `json:"iat,omitempty"` - Issuer string `json:"iss,omitempty"` - NotBefore int64 `json:"nbf,omitempty"` - Subject string `json:"sub,omitempty"` -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (c StandardClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - // The claims below are optional, by default, so if they are set to the - // default value in Go, let's not fail the verification for them. - if c.VerifyExpiresAt(now, false) == false { - delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) - vErr.Inner = fmt.Errorf("token is expired by %v", delta) - vErr.Errors |= ValidationErrorExpired - } - - if c.VerifyIssuedAt(now, false) == false { - vErr.Inner = fmt.Errorf("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if c.VerifyNotBefore(now, false) == false { - vErr.Inner = fmt.Errorf("token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} - -// Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { - return verifyAud(c.Audience, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { - return verifyExp(c.ExpiresAt, cmp, req) -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { - return verifyIat(c.IssuedAt, cmp, req) -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { - return verifyIss(c.Issuer, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { - return verifyNbf(c.NotBefore, cmp, req) -} - -// ----- helpers - -func verifyAud(aud string, cmp string, required bool) bool { - if aud == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { - return true - } else { - return false - } -} - -func verifyExp(exp int64, now int64, required bool) bool { - if exp == 0 { - return !required - } - return now <= exp -} - -func verifyIat(iat int64, now int64, required bool) bool { - if iat == 0 { - return !required - } - return now >= iat -} - -func verifyIss(iss string, cmp string, required bool) bool { - if iss == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { - return true - } else { - return false - } -} - -func verifyNbf(nbf int64, now int64, required bool) bool { - if nbf == 0 { - return !required - } - return now >= nbf -} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go deleted file mode 100644 index a86dc1a3b34..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html -// -// See README.md for more info. -package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go deleted file mode 100644 index 2f59a222363..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go +++ /dev/null @@ -1,147 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "errors" - "math/big" -) - -var ( - // Sadly this is missing from crypto/ecdsa compared to crypto/rsa - ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") -) - -// Implements the ECDSA family of signing methods signing methods -type SigningMethodECDSA struct { - Name string - Hash crypto.Hash - KeySize int - CurveBits int -} - -// Specific instances for EC256 and company -var ( - SigningMethodES256 *SigningMethodECDSA - SigningMethodES384 *SigningMethodECDSA - SigningMethodES512 *SigningMethodECDSA -) - -func init() { - // ES256 - SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} - RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { - return SigningMethodES256 - }) - - // ES384 - SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} - RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { - return SigningMethodES384 - }) - - // ES512 - SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} - RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { - return SigningMethodES512 - }) -} - -func (m *SigningMethodECDSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an ecdsa.PublicKey struct -func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - // Get the key - var ecdsaKey *ecdsa.PublicKey - switch k := key.(type) { - case *ecdsa.PublicKey: - ecdsaKey = k - default: - return ErrInvalidKeyType - } - - if len(sig) != 2*m.KeySize { - return ErrECDSAVerification - } - - r := big.NewInt(0).SetBytes(sig[:m.KeySize]) - s := big.NewInt(0).SetBytes(sig[m.KeySize:]) - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { - return nil - } else { - return ErrECDSAVerification - } -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an ecdsa.PrivateKey struct -func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { - // Get the key - var ecdsaKey *ecdsa.PrivateKey - switch k := key.(type) { - case *ecdsa.PrivateKey: - ecdsaKey = k - default: - return "", ErrInvalidKeyType - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return r, s - if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { - curveBits := ecdsaKey.Curve.Params().BitSize - - if m.CurveBits != curveBits { - return "", ErrInvalidKey - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes += 1 - } - - // We serialize the outpus (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return EncodeSegment(out), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go deleted file mode 100644 index d19624b7264..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go +++ /dev/null @@ -1,67 +0,0 @@ -package jwt - -import ( - "crypto/ecdsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") - ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") -) - -// Parse PEM encoded Elliptic Curve Private Key Structure -func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { - return nil, err - } - - var pkey *ecdsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { - return nil, ErrNotECPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *ecdsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { - return nil, ErrNotECPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go deleted file mode 100644 index 1c93024aad2..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/errors.go +++ /dev/null @@ -1,59 +0,0 @@ -package jwt - -import ( - "errors" -) - -// Error constants -var ( - ErrInvalidKey = errors.New("key is invalid") - ErrInvalidKeyType = errors.New("key is of invalid type") - ErrHashUnavailable = errors.New("the requested hash function is unavailable") -) - -// The errors that might occur when parsing and validating a token -const ( - ValidationErrorMalformed uint32 = 1 << iota // Token is malformed - ValidationErrorUnverifiable // Token could not be verified because of signing problems - ValidationErrorSignatureInvalid // Signature validation failed - - // Standard Claim validation errors - ValidationErrorAudience // AUD validation failed - ValidationErrorExpired // EXP validation failed - ValidationErrorIssuedAt // IAT validation failed - ValidationErrorIssuer // ISS validation failed - ValidationErrorNotValidYet // NBF validation failed - ValidationErrorId // JTI validation failed - ValidationErrorClaimsInvalid // Generic claims validation error -) - -// Helper for constructing a ValidationError with a string error message -func NewValidationError(errorText string, errorFlags uint32) *ValidationError { - return &ValidationError{ - text: errorText, - Errors: errorFlags, - } -} - -// The error from Parse if token is not valid -type ValidationError struct { - Inner error // stores the error returned by external dependencies, i.e.: KeyFunc - Errors uint32 // bitfield. see ValidationError... constants - text string // errors that do not have a valid error just have text -} - -// Validation error is an error type -func (e ValidationError) Error() string { - if e.Inner != nil { - return e.Inner.Error() - } else if e.text != "" { - return e.text - } else { - return "token is invalid" - } -} - -// No errors -func (e *ValidationError) valid() bool { - return e.Errors == 0 -} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go deleted file mode 100644 index c2299192545..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/hmac.go +++ /dev/null @@ -1,94 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/hmac" - "errors" -) - -// Implements the HMAC-SHA family of signing methods signing methods -type SigningMethodHMAC struct { - Name string - Hash crypto.Hash -} - -// Specific instances for HS256 and company -var ( - SigningMethodHS256 *SigningMethodHMAC - SigningMethodHS384 *SigningMethodHMAC - SigningMethodHS512 *SigningMethodHMAC - ErrSignatureInvalid = errors.New("signature is invalid") -) - -func init() { - // HS256 - SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { - return SigningMethodHS256 - }) - - // HS384 - SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { - return SigningMethodHS384 - }) - - // HS512 - SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { - return SigningMethodHS512 - }) -} - -func (m *SigningMethodHMAC) Alg() string { - return m.Name -} - -// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. -func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { - // Verify the key is the right type - keyBytes, ok := key.([]byte) - if !ok { - return ErrInvalidKeyType - } - - // Decode signature, for comparison - sig, err := DecodeSegment(signature) - if err != nil { - return err - } - - // Can we use the specified hashing method? - if !m.Hash.Available() { - return ErrHashUnavailable - } - - // This signing method is symmetric, so we validate the signature - // by reproducing the signature from the signing string and key, then - // comparing that against the provided signature. - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - if !hmac.Equal(sig, hasher.Sum(nil)) { - return ErrSignatureInvalid - } - - // No validation errors. Signature is good. - return nil -} - -// Implements the Sign method from SigningMethod for this signing method. -// Key must be []byte -func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { - if keyBytes, ok := key.([]byte); ok { - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - - return EncodeSegment(hasher.Sum(nil)), nil - } - - return "", ErrInvalidKey -} diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go deleted file mode 100644 index 291213c460d..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/map_claims.go +++ /dev/null @@ -1,94 +0,0 @@ -package jwt - -import ( - "encoding/json" - "errors" - // "fmt" -) - -// Claims type that uses the map[string]interface{} for JSON decoding -// This is the default claims type if you don't supply one -type MapClaims map[string]interface{} - -// Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyAudience(cmp string, req bool) bool { - aud, _ := m["aud"].(string) - return verifyAud(aud, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { - switch exp := m["exp"].(type) { - case float64: - return verifyExp(int64(exp), cmp, req) - case json.Number: - v, _ := exp.Int64() - return verifyExp(v, cmp, req) - } - return req == false -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { - switch iat := m["iat"].(type) { - case float64: - return verifyIat(int64(iat), cmp, req) - case json.Number: - v, _ := iat.Int64() - return verifyIat(v, cmp, req) - } - return req == false -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { - iss, _ := m["iss"].(string) - return verifyIss(iss, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { - switch nbf := m["nbf"].(type) { - case float64: - return verifyNbf(int64(nbf), cmp, req) - case json.Number: - v, _ := nbf.Int64() - return verifyNbf(v, cmp, req) - } - return req == false -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (m MapClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - if m.VerifyExpiresAt(now, false) == false { - vErr.Inner = errors.New("Token is expired") - vErr.Errors |= ValidationErrorExpired - } - - if m.VerifyIssuedAt(now, false) == false { - vErr.Inner = errors.New("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if m.VerifyNotBefore(now, false) == false { - vErr.Inner = errors.New("Token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go deleted file mode 100644 index f04d189d067..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/none.go +++ /dev/null @@ -1,52 +0,0 @@ -package jwt - -// Implements the none signing method. This is required by the spec -// but you probably should never use it. -var SigningMethodNone *signingMethodNone - -const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" - -var NoneSignatureTypeDisallowedError error - -type signingMethodNone struct{} -type unsafeNoneMagicConstant string - -func init() { - SigningMethodNone = &signingMethodNone{} - NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) - - RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { - return SigningMethodNone - }) -} - -func (m *signingMethodNone) Alg() string { - return "none" -} - -// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { - // Key must be UnsafeAllowNoneSignatureType to prevent accidentally - // accepting 'none' signing method - if _, ok := key.(unsafeNoneMagicConstant); !ok { - return NoneSignatureTypeDisallowedError - } - // If signing method is none, signature must be an empty string - if signature != "" { - return NewValidationError( - "'none' signing method with non-empty signature", - ValidationErrorSignatureInvalid, - ) - } - - // Accept 'none' signing method. - return nil -} - -// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { - if _, ok := key.(unsafeNoneMagicConstant); ok { - return "", nil - } - return "", NoneSignatureTypeDisallowedError -} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go deleted file mode 100644 index 7bf1c4ea084..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/parser.go +++ /dev/null @@ -1,131 +0,0 @@ -package jwt - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" -) - -type Parser struct { - ValidMethods []string // If populated, only these methods will be considered valid - UseJSONNumber bool // Use JSON Number format in JSON decoder - SkipClaimsValidation bool // Skip claims validation during token parsing -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) -} - -func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - parts := strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) - } - - var err error - token := &Token{Raw: tokenString} - - // parse Header - var headerBytes []byte - if headerBytes, err = DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) - } - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // parse Claims - var claimBytes []byte - token.Claims = claims - - if claimBytes, err = DecodeSegment(parts[1]); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.UseJSONNumber { - dec.UseNumber() - } - // JSON Decode. Special case for map type to avoid weird pointer behavior - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) - } else { - err = dec.Decode(&claims) - } - // Handle decode error - if err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) - } - } else { - return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) - } - - // Verify signing method is in the required set - if p.ValidMethods != nil { - var signingMethodValid = false - var alg = token.Method.Alg() - for _, m := range p.ValidMethods { - if m == alg { - signingMethodValid = true - break - } - } - if !signingMethodValid { - // signing method is not in the listed set - return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) - } - } - - // Lookup key - var key interface{} - if keyFunc == nil { - // keyFunc was not provided. short circuiting validation - return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) - } - if key, err = keyFunc(token); err != nil { - // keyFunc returned an error - return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} - } - - vErr := &ValidationError{} - - // Validate Claims - if !p.SkipClaimsValidation { - if err := token.Claims.Valid(); err != nil { - - // If the Claims Valid returned an error, check if it is a validation error, - // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set - if e, ok := err.(*ValidationError); !ok { - vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} - } else { - vErr = e - } - } - } - - // Perform validation - token.Signature = parts[2] - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { - vErr.Inner = err - vErr.Errors |= ValidationErrorSignatureInvalid - } - - if vErr.valid() { - token.Valid = true - return token, nil - } - - return token, vErr -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go deleted file mode 100644 index 0ae0b1984e5..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa.go +++ /dev/null @@ -1,100 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSA family of signing methods signing methods -type SigningMethodRSA struct { - Name string - Hash crypto.Hash -} - -// Specific instances for RS256 and company -var ( - SigningMethodRS256 *SigningMethodRSA - SigningMethodRS384 *SigningMethodRSA - SigningMethodRS512 *SigningMethodRSA -) - -func init() { - // RS256 - SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { - return SigningMethodRS256 - }) - - // RS384 - SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { - return SigningMethodRS384 - }) - - // RS512 - SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { - return SigningMethodRS512 - }) -} - -func (m *SigningMethodRSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this signing method, must be an rsa.PublicKey structure. -func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - var ok bool - - if rsaKey, ok = key.(*rsa.PublicKey); !ok { - return ErrInvalidKeyType - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) -} - -// Implements the Sign method from SigningMethod -// For this signing method, must be an rsa.PrivateKey structure. -func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - var ok bool - - // Validate type of key - if rsaKey, ok = key.(*rsa.PrivateKey); !ok { - return "", ErrInvalidKey - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go deleted file mode 100644 index 10ee9db8a4e..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go +++ /dev/null @@ -1,126 +0,0 @@ -// +build go1.4 - -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSAPSS family of signing methods signing methods -type SigningMethodRSAPSS struct { - *SigningMethodRSA - Options *rsa.PSSOptions -} - -// Specific instances for RS/PS and company -var ( - SigningMethodPS256 *SigningMethodRSAPSS - SigningMethodPS384 *SigningMethodRSAPSS - SigningMethodPS512 *SigningMethodRSAPSS -) - -func init() { - // PS256 - SigningMethodPS256 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS256", - Hash: crypto.SHA256, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA256, - }, - } - RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { - return SigningMethodPS256 - }) - - // PS384 - SigningMethodPS384 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS384", - Hash: crypto.SHA384, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA384, - }, - } - RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { - return SigningMethodPS384 - }) - - // PS512 - SigningMethodPS512 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS512", - Hash: crypto.SHA512, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA512, - }, - } - RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { - return SigningMethodPS512 - }) -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an rsa.PublicKey struct -func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - switch k := key.(type) { - case *rsa.PublicKey: - rsaKey = k - default: - return ErrInvalidKey - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an rsa.PrivateKey struct -func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - - switch k := key.(type) { - case *rsa.PrivateKey: - rsaKey = k - default: - return "", ErrInvalidKeyType - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go deleted file mode 100644 index 213a90dbbf8..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go +++ /dev/null @@ -1,69 +0,0 @@ -package jwt - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") - ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") - ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") -) - -// Parse PEM encoded PKCS1 or PKCS8 private key -func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *rsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, ErrNotRSAPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go deleted file mode 100644 index ed1f212b21e..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/signing_method.go +++ /dev/null @@ -1,35 +0,0 @@ -package jwt - -import ( - "sync" -) - -var signingMethods = map[string]func() SigningMethod{} -var signingMethodLock = new(sync.RWMutex) - -// Implement SigningMethod to add new methods for signing or verifying tokens. -type SigningMethod interface { - Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid - Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error - Alg() string // returns the alg identifier for this method (example: 'HS256') -} - -// Register the "alg" name and a factory function for signing method. -// This is typically done during init() in the method's implementation -func RegisterSigningMethod(alg string, f func() SigningMethod) { - signingMethodLock.Lock() - defer signingMethodLock.Unlock() - - signingMethods[alg] = f -} - -// Get a signing method from an "alg" string -func GetSigningMethod(alg string) (method SigningMethod) { - signingMethodLock.RLock() - defer signingMethodLock.RUnlock() - - if methodF, ok := signingMethods[alg]; ok { - method = methodF() - } - return -} diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go deleted file mode 100644 index d637e0867c6..00000000000 --- a/vendor/github.com/dgrijalva/jwt-go/token.go +++ /dev/null @@ -1,108 +0,0 @@ -package jwt - -import ( - "encoding/base64" - "encoding/json" - "strings" - "time" -) - -// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). -// You can override it to use another time value. This is useful for testing or if your -// server uses a different time zone than your tokens. -var TimeFunc = time.Now - -// Parse methods use this callback function to supply -// the key for verification. The function receives the parsed, -// but unverified Token. This allows you to use properties in the -// Header of the token (such as `kid`) to identify which key to use. -type Keyfunc func(*Token) (interface{}, error) - -// A JWT Token. Different fields will be used depending on whether you're -// creating or parsing/verifying a token. -type Token struct { - Raw string // The raw token. Populated when you Parse a token - Method SigningMethod // The signing method used or to be used - Header map[string]interface{} // The first segment of the token - Claims Claims // The second segment of the token - Signature string // The third segment of the token. Populated when you Parse a token - Valid bool // Is the token valid? Populated when you Parse/Verify a token -} - -// Create a new Token. Takes a signing method -func New(method SigningMethod) *Token { - return NewWithClaims(method, MapClaims{}) -} - -func NewWithClaims(method SigningMethod, claims Claims) *Token { - return &Token{ - Header: map[string]interface{}{ - "typ": "JWT", - "alg": method.Alg(), - }, - Claims: claims, - Method: method, - } -} - -// Get the complete, signed token -func (t *Token) SignedString(key interface{}) (string, error) { - var sig, sstr string - var err error - if sstr, err = t.SigningString(); err != nil { - return "", err - } - if sig, err = t.Method.Sign(sstr, key); err != nil { - return "", err - } - return strings.Join([]string{sstr, sig}, "."), nil -} - -// Generate the signing string. This is the -// most expensive part of the whole deal. Unless you -// need this for something special, just go straight for -// the SignedString. -func (t *Token) SigningString() (string, error) { - var err error - parts := make([]string, 2) - for i, _ := range parts { - var jsonValue []byte - if i == 0 { - if jsonValue, err = json.Marshal(t.Header); err != nil { - return "", err - } - } else { - if jsonValue, err = json.Marshal(t.Claims); err != nil { - return "", err - } - } - - parts[i] = EncodeSegment(jsonValue) - } - return strings.Join(parts, "."), nil -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return new(Parser).Parse(tokenString, keyFunc) -} - -func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) -} - -// Encode JWT specific base64url encoding with padding stripped -func EncodeSegment(seg []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") -} - -// Decode JWT specific base64url encoding with padding stripped -func DecodeSegment(seg string) ([]byte, error) { - if l := len(seg) % 4; l > 0 { - seg += strings.Repeat("=", 4-l) - } - - return base64.URLEncoding.DecodeString(seg) -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile deleted file mode 100644 index f706871a6fa..00000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/ -# at src/google/protobuf/descriptor.proto -regenerate: - @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION - cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto . - protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go deleted file mode 100644 index c6a91bcab9c..00000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ /dev/null @@ -1,2215 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/descriptor.proto - -/* -Package descriptor is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/descriptor.proto - -It has these top-level messages: - FileDescriptorSet - FileDescriptorProto - DescriptorProto - ExtensionRangeOptions - FieldDescriptorProto - OneofDescriptorProto - EnumDescriptorProto - EnumValueDescriptorProto - ServiceDescriptorProto - MethodDescriptorProto - FileOptions - MessageOptions - FieldOptions - OneofOptions - EnumOptions - EnumValueOptions - ServiceOptions - MethodOptions - UninterpretedOption - SourceCodeInfo - GeneratedCodeInfo -*/ -package descriptor - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type FieldDescriptorProto_Type int32 - -const ( - // 0 is reserved for errors. - // Order is weird for historical reasons. - FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 - FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 - FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 - FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 - FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 - FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 - FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 - FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 - // New in version 2. - FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 - FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 - FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 - FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 - FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 - FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 - FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 -) - -var FieldDescriptorProto_Type_name = map[int32]string{ - 1: "TYPE_DOUBLE", - 2: "TYPE_FLOAT", - 3: "TYPE_INT64", - 4: "TYPE_UINT64", - 5: "TYPE_INT32", - 6: "TYPE_FIXED64", - 7: "TYPE_FIXED32", - 8: "TYPE_BOOL", - 9: "TYPE_STRING", - 10: "TYPE_GROUP", - 11: "TYPE_MESSAGE", - 12: "TYPE_BYTES", - 13: "TYPE_UINT32", - 14: "TYPE_ENUM", - 15: "TYPE_SFIXED32", - 16: "TYPE_SFIXED64", - 17: "TYPE_SINT32", - 18: "TYPE_SINT64", -} -var FieldDescriptorProto_Type_value = map[string]int32{ - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18, -} - -func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { - p := new(FieldDescriptorProto_Type) - *p = x - return p -} -func (x FieldDescriptorProto_Type) String() string { - return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) -} -func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") - if err != nil { - return err - } - *x = FieldDescriptorProto_Type(value) - return nil -} -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } - -type FieldDescriptorProto_Label int32 - -const ( - // 0 is reserved for errors - FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 -) - -var FieldDescriptorProto_Label_name = map[int32]string{ - 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", - 3: "LABEL_REPEATED", -} -var FieldDescriptorProto_Label_value = map[string]int32{ - "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, - "LABEL_REPEATED": 3, -} - -func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { - p := new(FieldDescriptorProto_Label) - *p = x - return p -} -func (x FieldDescriptorProto_Label) String() string { - return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) -} -func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") - if err != nil { - return err - } - *x = FieldDescriptorProto_Label(value) - return nil -} -func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{4, 1} -} - -// Generated classes can be optimized for speed or code size. -type FileOptions_OptimizeMode int32 - -const ( - FileOptions_SPEED FileOptions_OptimizeMode = 1 - // etc. - FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 - FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 -) - -var FileOptions_OptimizeMode_name = map[int32]string{ - 1: "SPEED", - 2: "CODE_SIZE", - 3: "LITE_RUNTIME", -} -var FileOptions_OptimizeMode_value = map[string]int32{ - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3, -} - -func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { - p := new(FileOptions_OptimizeMode) - *p = x - return p -} -func (x FileOptions_OptimizeMode) String() string { - return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) -} -func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") - if err != nil { - return err - } - *x = FileOptions_OptimizeMode(value) - return nil -} -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} } - -type FieldOptions_CType int32 - -const ( - // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 - FieldOptions_CORD FieldOptions_CType = 1 - FieldOptions_STRING_PIECE FieldOptions_CType = 2 -) - -var FieldOptions_CType_name = map[int32]string{ - 0: "STRING", - 1: "CORD", - 2: "STRING_PIECE", -} -var FieldOptions_CType_value = map[string]int32{ - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2, -} - -func (x FieldOptions_CType) Enum() *FieldOptions_CType { - p := new(FieldOptions_CType) - *p = x - return p -} -func (x FieldOptions_CType) String() string { - return proto.EnumName(FieldOptions_CType_name, int32(x)) -} -func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") - if err != nil { - return err - } - *x = FieldOptions_CType(value) - return nil -} -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } - -type FieldOptions_JSType int32 - -const ( - // Use the default type. - FieldOptions_JS_NORMAL FieldOptions_JSType = 0 - // Use JavaScript strings. - FieldOptions_JS_STRING FieldOptions_JSType = 1 - // Use JavaScript numbers. - FieldOptions_JS_NUMBER FieldOptions_JSType = 2 -) - -var FieldOptions_JSType_name = map[int32]string{ - 0: "JS_NORMAL", - 1: "JS_STRING", - 2: "JS_NUMBER", -} -var FieldOptions_JSType_value = map[string]int32{ - "JS_NORMAL": 0, - "JS_STRING": 1, - "JS_NUMBER": 2, -} - -func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { - p := new(FieldOptions_JSType) - *p = x - return p -} -func (x FieldOptions_JSType) String() string { - return proto.EnumName(FieldOptions_JSType_name, int32(x)) -} -func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") - if err != nil { - return err - } - *x = FieldOptions_JSType(value) - return nil -} -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 1} } - -// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -// or neither? HTTP based RPC implementation may choose GET verb for safe -// methods, and PUT verb for idempotent methods instead of the default POST. -type MethodOptions_IdempotencyLevel int32 - -const ( - MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 - MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 - MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 -) - -var MethodOptions_IdempotencyLevel_name = map[int32]string{ - 0: "IDEMPOTENCY_UNKNOWN", - 1: "NO_SIDE_EFFECTS", - 2: "IDEMPOTENT", -} -var MethodOptions_IdempotencyLevel_value = map[string]int32{ - "IDEMPOTENCY_UNKNOWN": 0, - "NO_SIDE_EFFECTS": 1, - "IDEMPOTENT": 2, -} - -func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { - p := new(MethodOptions_IdempotencyLevel) - *p = x - return p -} -func (x MethodOptions_IdempotencyLevel) String() string { - return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) -} -func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") - if err != nil { - return err - } - *x = MethodOptions_IdempotencyLevel(value) - return nil -} -func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{17, 0} -} - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { - if m != nil { - return m.File - } - return nil -} - -// Describes a complete .proto file. -type FileDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` - // Names of files imported by this file. - Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` - // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` - // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` - Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *FileDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FileDescriptorProto) GetPackage() string { - if m != nil && m.Package != nil { - return *m.Package - } - return "" -} - -func (m *FileDescriptorProto) GetDependency() []string { - if m != nil { - return m.Dependency - } - return nil -} - -func (m *FileDescriptorProto) GetPublicDependency() []int32 { - if m != nil { - return m.PublicDependency - } - return nil -} - -func (m *FileDescriptorProto) GetWeakDependency() []int32 { - if m != nil { - return m.WeakDependency - } - return nil -} - -func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { - if m != nil { - return m.MessageType - } - return nil -} - -func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { - if m != nil { - return m.Service - } - return nil -} - -func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *FileDescriptorProto) GetOptions() *FileOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { - if m != nil { - return m.SourceCodeInfo - } - return nil -} - -func (m *FileDescriptorProto) GetSyntax() string { - if m != nil && m.Syntax != nil { - return *m.Syntax - } - return "" -} - -// Describes a message type. -type DescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *DescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *DescriptorProto) GetField() []*FieldDescriptorProto { - if m != nil { - return m.Field - } - return nil -} - -func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *DescriptorProto) GetNestedType() []*DescriptorProto { - if m != nil { - return m.NestedType - } - return nil -} - -func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { - if m != nil { - return m.ExtensionRange - } - return nil -} - -func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { - if m != nil { - return m.OneofDecl - } - return nil -} - -func (m *DescriptorProto) GetOptions() *MessageOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *DescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } -func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ExtensionRange) ProtoMessage() {} -func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{2, 0} -} - -func (m *DescriptorProto_ExtensionRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { - if m != nil { - return m.Options - } - return nil -} - -// Range of reserved tag numbers. Reserved tag numbers may not be used by -// fields or extension ranges in the same message. Reserved ranges may -// not overlap. -type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } -func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ReservedRange) ProtoMessage() {} -func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{2, 1} -} - -func (m *DescriptorProto_ReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -type ExtensionRangeOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ExtensionRangeOptions -} - -func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// Describes a field within a message. -type FieldDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` - Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *FieldDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FieldDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { - if m != nil && m.Label != nil { - return *m.Label - } - return FieldDescriptorProto_LABEL_OPTIONAL -} - -func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return FieldDescriptorProto_TYPE_DOUBLE -} - -func (m *FieldDescriptorProto) GetTypeName() string { - if m != nil && m.TypeName != nil { - return *m.TypeName - } - return "" -} - -func (m *FieldDescriptorProto) GetExtendee() string { - if m != nil && m.Extendee != nil { - return *m.Extendee - } - return "" -} - -func (m *FieldDescriptorProto) GetDefaultValue() string { - if m != nil && m.DefaultValue != nil { - return *m.DefaultValue - } - return "" -} - -func (m *FieldDescriptorProto) GetOneofIndex() int32 { - if m != nil && m.OneofIndex != nil { - return *m.OneofIndex - } - return 0 -} - -func (m *FieldDescriptorProto) GetJsonName() string { - if m != nil && m.JsonName != nil { - return *m.JsonName - } - return "" -} - -func (m *FieldDescriptorProto) GetOptions() *FieldOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a oneof. -type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *OneofDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *OneofDescriptorProto) GetOptions() *OneofOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes an enum type. -type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *EnumDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { - if m != nil { - return m.Value - } - return nil -} - -func (m *EnumDescriptorProto) GetOptions() *EnumOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a value within an enum. -type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *EnumValueDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumValueDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a service. -type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *ServiceDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { - if m != nil { - return m.Method - } - return nil -} - -func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a method of a service. -type MethodDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` - Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` - // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -const Default_MethodDescriptorProto_ClientStreaming bool = false -const Default_MethodDescriptorProto_ServerStreaming bool = false - -func (m *MethodDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MethodDescriptorProto) GetInputType() string { - if m != nil && m.InputType != nil { - return *m.InputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOutputType() string { - if m != nil && m.OutputType != nil { - return *m.OutputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOptions() *MethodOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *MethodDescriptorProto) GetClientStreaming() bool { - if m != nil && m.ClientStreaming != nil { - return *m.ClientStreaming - } - return Default_MethodDescriptorProto_ClientStreaming -} - -func (m *MethodDescriptorProto) GetServerStreaming() bool { - if m != nil && m.ServerStreaming != nil { - return *m.ServerStreaming - } - return Default_MethodDescriptorProto_ServerStreaming -} - -type FileOptions struct { - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` - // Namespace for generated classes; defaults to the package. - CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } - -var extRange_FileOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FileOptions -} - -const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaStringCheckUtf8 bool = false -const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED -const Default_FileOptions_CcGenericServices bool = false -const Default_FileOptions_JavaGenericServices bool = false -const Default_FileOptions_PyGenericServices bool = false -const Default_FileOptions_PhpGenericServices bool = false -const Default_FileOptions_Deprecated bool = false -const Default_FileOptions_CcEnableArenas bool = false - -func (m *FileOptions) GetJavaPackage() string { - if m != nil && m.JavaPackage != nil { - return *m.JavaPackage - } - return "" -} - -func (m *FileOptions) GetJavaOuterClassname() string { - if m != nil && m.JavaOuterClassname != nil { - return *m.JavaOuterClassname - } - return "" -} - -func (m *FileOptions) GetJavaMultipleFiles() bool { - if m != nil && m.JavaMultipleFiles != nil { - return *m.JavaMultipleFiles - } - return Default_FileOptions_JavaMultipleFiles -} - -func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { - if m != nil && m.JavaGenerateEqualsAndHash != nil { - return *m.JavaGenerateEqualsAndHash - } - return false -} - -func (m *FileOptions) GetJavaStringCheckUtf8() bool { - if m != nil && m.JavaStringCheckUtf8 != nil { - return *m.JavaStringCheckUtf8 - } - return Default_FileOptions_JavaStringCheckUtf8 -} - -func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { - if m != nil && m.OptimizeFor != nil { - return *m.OptimizeFor - } - return Default_FileOptions_OptimizeFor -} - -func (m *FileOptions) GetGoPackage() string { - if m != nil && m.GoPackage != nil { - return *m.GoPackage - } - return "" -} - -func (m *FileOptions) GetCcGenericServices() bool { - if m != nil && m.CcGenericServices != nil { - return *m.CcGenericServices - } - return Default_FileOptions_CcGenericServices -} - -func (m *FileOptions) GetJavaGenericServices() bool { - if m != nil && m.JavaGenericServices != nil { - return *m.JavaGenericServices - } - return Default_FileOptions_JavaGenericServices -} - -func (m *FileOptions) GetPyGenericServices() bool { - if m != nil && m.PyGenericServices != nil { - return *m.PyGenericServices - } - return Default_FileOptions_PyGenericServices -} - -func (m *FileOptions) GetPhpGenericServices() bool { - if m != nil && m.PhpGenericServices != nil { - return *m.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - -func (m *FileOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FileOptions_Deprecated -} - -func (m *FileOptions) GetCcEnableArenas() bool { - if m != nil && m.CcEnableArenas != nil { - return *m.CcEnableArenas - } - return Default_FileOptions_CcEnableArenas -} - -func (m *FileOptions) GetObjcClassPrefix() string { - if m != nil && m.ObjcClassPrefix != nil { - return *m.ObjcClassPrefix - } - return "" -} - -func (m *FileOptions) GetCsharpNamespace() string { - if m != nil && m.CsharpNamespace != nil { - return *m.CsharpNamespace - } - return "" -} - -func (m *FileOptions) GetSwiftPrefix() string { - if m != nil && m.SwiftPrefix != nil { - return *m.SwiftPrefix - } - return "" -} - -func (m *FileOptions) GetPhpClassPrefix() string { - if m != nil && m.PhpClassPrefix != nil { - return *m.PhpClassPrefix - } - return "" -} - -func (m *FileOptions) GetPhpNamespace() string { - if m != nil && m.PhpNamespace != nil { - return *m.PhpNamespace - } - return "" -} - -func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MessageOptions struct { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } - -var extRange_MessageOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MessageOptions -} - -const Default_MessageOptions_MessageSetWireFormat bool = false -const Default_MessageOptions_NoStandardDescriptorAccessor bool = false -const Default_MessageOptions_Deprecated bool = false - -func (m *MessageOptions) GetMessageSetWireFormat() bool { - if m != nil && m.MessageSetWireFormat != nil { - return *m.MessageSetWireFormat - } - return Default_MessageOptions_MessageSetWireFormat -} - -func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { - if m != nil && m.NoStandardDescriptorAccessor != nil { - return *m.NoStandardDescriptorAccessor - } - return Default_MessageOptions_NoStandardDescriptorAccessor -} - -func (m *MessageOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MessageOptions_Deprecated -} - -func (m *MessageOptions) GetMapEntry() bool { - if m != nil && m.MapEntry != nil { - return *m.MapEntry - } - return false -} - -func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type FieldOptions struct { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // For Google-internal migration only. Do not use. - Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -var extRange_FieldOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FieldOptions -} - -const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING -const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL -const Default_FieldOptions_Lazy bool = false -const Default_FieldOptions_Deprecated bool = false -const Default_FieldOptions_Weak bool = false - -func (m *FieldOptions) GetCtype() FieldOptions_CType { - if m != nil && m.Ctype != nil { - return *m.Ctype - } - return Default_FieldOptions_Ctype -} - -func (m *FieldOptions) GetPacked() bool { - if m != nil && m.Packed != nil { - return *m.Packed - } - return false -} - -func (m *FieldOptions) GetJstype() FieldOptions_JSType { - if m != nil && m.Jstype != nil { - return *m.Jstype - } - return Default_FieldOptions_Jstype -} - -func (m *FieldOptions) GetLazy() bool { - if m != nil && m.Lazy != nil { - return *m.Lazy - } - return Default_FieldOptions_Lazy -} - -func (m *FieldOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FieldOptions_Deprecated -} - -func (m *FieldOptions) GetWeak() bool { - if m != nil && m.Weak != nil { - return *m.Weak - } - return Default_FieldOptions_Weak -} - -func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type OneofOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -var extRange_OneofOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OneofOptions -} - -func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumOptions struct { - // Set this option to true to allow mapping different tag names to the same - // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } - -var extRange_EnumOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumOptions -} - -const Default_EnumOptions_Deprecated bool = false - -func (m *EnumOptions) GetAllowAlias() bool { - if m != nil && m.AllowAlias != nil { - return *m.AllowAlias - } - return false -} - -func (m *EnumOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumOptions_Deprecated -} - -func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumValueOptions struct { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } - -var extRange_EnumValueOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumValueOptions -} - -const Default_EnumValueOptions_Deprecated bool = false - -func (m *EnumValueOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumValueOptions_Deprecated -} - -func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type ServiceOptions struct { - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -var extRange_ServiceOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ServiceOptions -} - -const Default_ServiceOptions_Deprecated bool = false - -func (m *ServiceOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_ServiceOptions_Deprecated -} - -func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MethodOptions struct { - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -var extRange_MethodOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MethodOptions -} - -const Default_MethodOptions_Deprecated bool = false -const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN - -func (m *MethodOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MethodOptions_Deprecated -} - -func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { - if m != nil && m.IdempotencyLevel != nil { - return *m.IdempotencyLevel - } - return Default_MethodOptions_IdempotencyLevel -} - -func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -type UninterpretedOption struct { - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } - -func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { - if m != nil { - return m.Name - } - return nil -} - -func (m *UninterpretedOption) GetIdentifierValue() string { - if m != nil && m.IdentifierValue != nil { - return *m.IdentifierValue - } - return "" -} - -func (m *UninterpretedOption) GetPositiveIntValue() uint64 { - if m != nil && m.PositiveIntValue != nil { - return *m.PositiveIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetNegativeIntValue() int64 { - if m != nil && m.NegativeIntValue != nil { - return *m.NegativeIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *UninterpretedOption) GetStringValue() []byte { - if m != nil { - return m.StringValue - } - return nil -} - -func (m *UninterpretedOption) GetAggregateValue() string { - if m != nil && m.AggregateValue != nil { - return *m.AggregateValue - } - return "" -} - -// The name of the uninterpreted option. Each string represents a segment in -// a dot-separated name. is_extension is true iff a segment represents an -// extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". -type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } -func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption_NamePart) ProtoMessage() {} -func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{18, 0} -} - -func (m *UninterpretedOption_NamePart) GetNamePart() string { - if m != nil && m.NamePart != nil { - return *m.NamePart - } - return "" -} - -func (m *UninterpretedOption_NamePart) GetIsExtension() bool { - if m != nil && m.IsExtension != nil { - return *m.IsExtension - } - return false -} - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -type SourceCodeInfo struct { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } - -func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { - if m != nil { - return m.Location - } - return nil -} - -type SourceCodeInfo_Location struct { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` - LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } - -func (m *SourceCodeInfo_Location) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *SourceCodeInfo_Location) GetSpan() []int32 { - if m != nil { - return m.Span - } - return nil -} - -func (m *SourceCodeInfo_Location) GetLeadingComments() string { - if m != nil && m.LeadingComments != nil { - return *m.LeadingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetTrailingComments() string { - if m != nil && m.TrailingComments != nil { - return *m.TrailingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { - if m != nil { - return m.LeadingDetachedComments - } - return nil -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -type GeneratedCodeInfo struct { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } - -func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { - if m != nil { - return m.Annotation - } - return nil -} - -type GeneratedCodeInfo_Annotation struct { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Identifies the filesystem path to the original source .proto. - SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } -func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} -func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{20, 0} -} - -func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { - if m != nil && m.SourceFile != nil { - return *m.SourceFile - } - return "" -} - -func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { - if m != nil && m.Begin != nil { - return *m.Begin - } - return 0 -} - -func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func init() { - proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") - proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") - proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") - proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") - proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") - proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") - proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") - proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") - proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") - proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") - proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") - proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") - proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") - proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") - proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") - proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") - proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") - proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") - proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") - proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") - proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") - proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") - proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") - proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") - proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") - proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) - proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) - proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) - proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) - proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) -} - -func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 2519 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, - 0x15, 0x0e, 0x7f, 0x45, 0x1e, 0x52, 0xd4, 0x68, 0xa4, 0xd8, 0x6b, 0xe5, 0xc7, 0x32, 0xf3, 0x63, - 0xd9, 0x69, 0xa8, 0x40, 0xb1, 0x1d, 0x47, 0x29, 0xd2, 0x52, 0xe4, 0x5a, 0xa1, 0x4a, 0x91, 0xec, - 0x92, 0x6a, 0x7e, 0x6e, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, 0xb4, 0xad, - 0xa0, 0x17, 0x06, 0x7a, 0x55, 0xa0, 0x0f, 0x50, 0x14, 0x45, 0x2f, 0x72, 0x13, 0xa0, 0x0f, 0x50, - 0x20, 0x77, 0x7d, 0x82, 0x02, 0x79, 0x83, 0xa2, 0x28, 0xd0, 0x3e, 0x46, 0x31, 0x33, 0xbb, 0xcb, - 0x5d, 0xfe, 0xc4, 0x6a, 0x80, 0x38, 0x57, 0xe4, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x9c, 0x39, 0x33, - 0x73, 0x66, 0x16, 0x76, 0x47, 0xb6, 0x3d, 0x32, 0xe9, 0xbe, 0xe3, 0xda, 0xbe, 0x7d, 0x3e, 0x1d, - 0xee, 0xeb, 0xd4, 0xd3, 0x5c, 0xc3, 0xf1, 0x6d, 0xb7, 0xc6, 0x31, 0xbc, 0x21, 0x18, 0xb5, 0x90, - 0x51, 0x3d, 0x85, 0xcd, 0x07, 0x86, 0x49, 0x9b, 0x11, 0xb1, 0x4f, 0x7d, 0x7c, 0x1f, 0xb2, 0x43, - 0xc3, 0xa4, 0x52, 0x6a, 0x37, 0xb3, 0x57, 0x3a, 0x78, 0xb3, 0x36, 0xa7, 0x54, 0x4b, 0x6a, 0xf4, - 0x18, 0xac, 0x70, 0x8d, 0xea, 0xbf, 0xb3, 0xb0, 0xb5, 0x44, 0x8a, 0x31, 0x64, 0x2d, 0x32, 0x61, - 0x16, 0x53, 0x7b, 0x45, 0x85, 0xff, 0xc7, 0x12, 0xac, 0x39, 0x44, 0x7b, 0x44, 0x46, 0x54, 0x4a, - 0x73, 0x38, 0x6c, 0xe2, 0xd7, 0x01, 0x74, 0xea, 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x21, 0x65, 0x76, - 0x33, 0x7b, 0x45, 0x25, 0x86, 0xe0, 0x77, 0x60, 0xd3, 0x99, 0x9e, 0x9b, 0x86, 0xa6, 0xc6, 0x68, - 0xb0, 0x9b, 0xd9, 0xcb, 0x29, 0x48, 0x08, 0x9a, 0x33, 0xf2, 0x4d, 0xd8, 0x78, 0x42, 0xc9, 0xa3, - 0x38, 0xb5, 0xc4, 0xa9, 0x15, 0x06, 0xc7, 0x88, 0x0d, 0x28, 0x4f, 0xa8, 0xe7, 0x91, 0x11, 0x55, - 0xfd, 0x0b, 0x87, 0x4a, 0x59, 0x3e, 0xfa, 0xdd, 0x85, 0xd1, 0xcf, 0x8f, 0xbc, 0x14, 0x68, 0x0d, - 0x2e, 0x1c, 0x8a, 0xeb, 0x50, 0xa4, 0xd6, 0x74, 0x22, 0x2c, 0xe4, 0x56, 0xc4, 0x4f, 0xb6, 0xa6, - 0x93, 0x79, 0x2b, 0x05, 0xa6, 0x16, 0x98, 0x58, 0xf3, 0xa8, 0xfb, 0xd8, 0xd0, 0xa8, 0x94, 0xe7, - 0x06, 0x6e, 0x2e, 0x18, 0xe8, 0x0b, 0xf9, 0xbc, 0x8d, 0x50, 0x0f, 0x37, 0xa0, 0x48, 0x9f, 0xfa, - 0xd4, 0xf2, 0x0c, 0xdb, 0x92, 0xd6, 0xb8, 0x91, 0xb7, 0x96, 0xcc, 0x22, 0x35, 0xf5, 0x79, 0x13, - 0x33, 0x3d, 0x7c, 0x0f, 0xd6, 0x6c, 0xc7, 0x37, 0x6c, 0xcb, 0x93, 0x0a, 0xbb, 0xa9, 0xbd, 0xd2, - 0xc1, 0xab, 0x4b, 0x13, 0xa1, 0x2b, 0x38, 0x4a, 0x48, 0xc6, 0x2d, 0x40, 0x9e, 0x3d, 0x75, 0x35, - 0xaa, 0x6a, 0xb6, 0x4e, 0x55, 0xc3, 0x1a, 0xda, 0x52, 0x91, 0x1b, 0xb8, 0xbe, 0x38, 0x10, 0x4e, - 0x6c, 0xd8, 0x3a, 0x6d, 0x59, 0x43, 0x5b, 0xa9, 0x78, 0x89, 0x36, 0xbe, 0x02, 0x79, 0xef, 0xc2, - 0xf2, 0xc9, 0x53, 0xa9, 0xcc, 0x33, 0x24, 0x68, 0x55, 0xbf, 0xcd, 0xc3, 0xc6, 0x65, 0x52, 0xec, - 0x23, 0xc8, 0x0d, 0xd9, 0x28, 0xa5, 0xf4, 0xff, 0x13, 0x03, 0xa1, 0x93, 0x0c, 0x62, 0xfe, 0x07, - 0x06, 0xb1, 0x0e, 0x25, 0x8b, 0x7a, 0x3e, 0xd5, 0x45, 0x46, 0x64, 0x2e, 0x99, 0x53, 0x20, 0x94, - 0x16, 0x53, 0x2a, 0xfb, 0x83, 0x52, 0xea, 0x33, 0xd8, 0x88, 0x5c, 0x52, 0x5d, 0x62, 0x8d, 0xc2, - 0xdc, 0xdc, 0x7f, 0x9e, 0x27, 0x35, 0x39, 0xd4, 0x53, 0x98, 0x9a, 0x52, 0xa1, 0x89, 0x36, 0x6e, - 0x02, 0xd8, 0x16, 0xb5, 0x87, 0xaa, 0x4e, 0x35, 0x53, 0x2a, 0xac, 0x88, 0x52, 0x97, 0x51, 0x16, - 0xa2, 0x64, 0x0b, 0x54, 0x33, 0xf1, 0x87, 0xb3, 0x54, 0x5b, 0x5b, 0x91, 0x29, 0xa7, 0x62, 0x91, - 0x2d, 0x64, 0xdb, 0x19, 0x54, 0x5c, 0xca, 0xf2, 0x9e, 0xea, 0xc1, 0xc8, 0x8a, 0xdc, 0x89, 0xda, - 0x73, 0x47, 0xa6, 0x04, 0x6a, 0x62, 0x60, 0xeb, 0x6e, 0xbc, 0x89, 0xdf, 0x80, 0x08, 0x50, 0x79, - 0x5a, 0x01, 0xdf, 0x85, 0xca, 0x21, 0xd8, 0x21, 0x13, 0xba, 0xf3, 0x15, 0x54, 0x92, 0xe1, 0xc1, - 0xdb, 0x90, 0xf3, 0x7c, 0xe2, 0xfa, 0x3c, 0x0b, 0x73, 0x8a, 0x68, 0x60, 0x04, 0x19, 0x6a, 0xe9, - 0x7c, 0x97, 0xcb, 0x29, 0xec, 0x2f, 0xfe, 0xe5, 0x6c, 0xc0, 0x19, 0x3e, 0xe0, 0xb7, 0x17, 0x67, - 0x34, 0x61, 0x79, 0x7e, 0xdc, 0x3b, 0x1f, 0xc0, 0x7a, 0x62, 0x00, 0x97, 0xed, 0xba, 0xfa, 0x5b, - 0x78, 0x79, 0xa9, 0x69, 0xfc, 0x19, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x65, - 0xac, 0xe8, 0x4a, 0xfa, 0xcf, 0xda, 0x8a, 0x9c, 0x3b, 0x8b, 0xb3, 0x85, 0x15, 0x65, 0x6b, 0xba, - 0x08, 0xde, 0x2e, 0x16, 0xfe, 0xbb, 0x86, 0x9e, 0x3d, 0x7b, 0xf6, 0x2c, 0x5d, 0xfd, 0x63, 0x1e, - 0xb6, 0x97, 0xad, 0x99, 0xa5, 0xcb, 0xf7, 0x0a, 0xe4, 0xad, 0xe9, 0xe4, 0x9c, 0xba, 0x3c, 0x48, - 0x39, 0x25, 0x68, 0xe1, 0x3a, 0xe4, 0x4c, 0x72, 0x4e, 0x4d, 0x29, 0xbb, 0x9b, 0xda, 0xab, 0x1c, - 0xbc, 0x73, 0xa9, 0x55, 0x59, 0x6b, 0x33, 0x15, 0x45, 0x68, 0xe2, 0x8f, 0x21, 0x1b, 0x6c, 0xd1, - 0xcc, 0xc2, 0xed, 0xcb, 0x59, 0x60, 0x6b, 0x49, 0xe1, 0x7a, 0xf8, 0x15, 0x28, 0xb2, 0x5f, 0x91, - 0x1b, 0x79, 0xee, 0x73, 0x81, 0x01, 0x2c, 0x2f, 0xf0, 0x0e, 0x14, 0xf8, 0x32, 0xd1, 0x69, 0x78, - 0xb4, 0x45, 0x6d, 0x96, 0x58, 0x3a, 0x1d, 0x92, 0xa9, 0xe9, 0xab, 0x8f, 0x89, 0x39, 0xa5, 0x3c, - 0xe1, 0x8b, 0x4a, 0x39, 0x00, 0x7f, 0xc3, 0x30, 0x7c, 0x1d, 0x4a, 0x62, 0x55, 0x19, 0x96, 0x4e, - 0x9f, 0xf2, 0xdd, 0x33, 0xa7, 0x88, 0x85, 0xd6, 0x62, 0x08, 0xeb, 0xfe, 0xa1, 0x67, 0x5b, 0x61, - 0x6a, 0xf2, 0x2e, 0x18, 0xc0, 0xbb, 0xff, 0x60, 0x7e, 0xe3, 0x7e, 0x6d, 0xf9, 0xf0, 0xe6, 0x73, - 0xaa, 0xfa, 0xb7, 0x34, 0x64, 0xf9, 0x7e, 0xb1, 0x01, 0xa5, 0xc1, 0xe7, 0x3d, 0x59, 0x6d, 0x76, - 0xcf, 0x8e, 0xda, 0x32, 0x4a, 0xe1, 0x0a, 0x00, 0x07, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47, - 0xed, 0x56, 0x67, 0x70, 0xef, 0x0e, 0xca, 0x44, 0x0a, 0x67, 0x02, 0xc8, 0xc6, 0x09, 0xef, 0x1f, - 0xa0, 0x1c, 0x46, 0x50, 0x16, 0x06, 0x5a, 0x9f, 0xc9, 0xcd, 0x7b, 0x77, 0x50, 0x3e, 0x89, 0xbc, - 0x7f, 0x80, 0xd6, 0xf0, 0x3a, 0x14, 0x39, 0x72, 0xd4, 0xed, 0xb6, 0x51, 0x21, 0xb2, 0xd9, 0x1f, - 0x28, 0xad, 0xce, 0x31, 0x2a, 0x46, 0x36, 0x8f, 0x95, 0xee, 0x59, 0x0f, 0x41, 0x64, 0xe1, 0x54, - 0xee, 0xf7, 0xeb, 0xc7, 0x32, 0x2a, 0x45, 0x8c, 0xa3, 0xcf, 0x07, 0x72, 0x1f, 0x95, 0x13, 0x6e, - 0xbd, 0x7f, 0x80, 0xd6, 0xa3, 0x2e, 0xe4, 0xce, 0xd9, 0x29, 0xaa, 0xe0, 0x4d, 0x58, 0x17, 0x5d, - 0x84, 0x4e, 0x6c, 0xcc, 0x41, 0xf7, 0xee, 0x20, 0x34, 0x73, 0x44, 0x58, 0xd9, 0x4c, 0x00, 0xf7, - 0xee, 0x20, 0x5c, 0x6d, 0x40, 0x8e, 0x67, 0x17, 0xc6, 0x50, 0x69, 0xd7, 0x8f, 0xe4, 0xb6, 0xda, - 0xed, 0x0d, 0x5a, 0xdd, 0x4e, 0xbd, 0x8d, 0x52, 0x33, 0x4c, 0x91, 0x7f, 0x7d, 0xd6, 0x52, 0xe4, - 0x26, 0x4a, 0xc7, 0xb1, 0x9e, 0x5c, 0x1f, 0xc8, 0x4d, 0x94, 0xa9, 0x6a, 0xb0, 0xbd, 0x6c, 0x9f, - 0x5c, 0xba, 0x32, 0x62, 0x53, 0x9c, 0x5e, 0x31, 0xc5, 0xdc, 0xd6, 0xc2, 0x14, 0x7f, 0x9d, 0x82, - 0xad, 0x25, 0x67, 0xc5, 0xd2, 0x4e, 0x7e, 0x01, 0x39, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb5, 0xf4, - 0xd0, 0xe1, 0x09, 0xbb, 0x70, 0x82, 0x72, 0xbd, 0x78, 0x05, 0x91, 0x59, 0x51, 0x41, 0x30, 0x13, - 0x0b, 0x4e, 0xfe, 0x2e, 0x05, 0xd2, 0x2a, 0xdb, 0xcf, 0xd9, 0x28, 0xd2, 0x89, 0x8d, 0xe2, 0xa3, - 0x79, 0x07, 0x6e, 0xac, 0x1e, 0xc3, 0x82, 0x17, 0xdf, 0xa4, 0xe0, 0xca, 0xf2, 0x42, 0x6b, 0xa9, - 0x0f, 0x1f, 0x43, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x16, 0x1b, 0x6f, 0x2f, 0x39, 0xc2, 0x98, 0x78, - 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xcc, 0xac, 0xaa, 0x96, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x3e, - 0x0d, 0x2f, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xd7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xa2, 0xa0, 0x10, - 0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80, - 0x38, 0xe1, 0xfe, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xf5, 0x15, 0x23, 0x5d, 0x38, 0xab, 0xdf, 0x03, - 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1, - 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0xcf, 0x38, 0x37, - 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0xb6, 0x00, 0xa5, 0x58, 0x59, 0x8a, 0x6f, - 0x40, 0xf9, 0x21, 0x79, 0x4c, 0xd4, 0xf0, 0xaa, 0x21, 0x22, 0x51, 0x62, 0x58, 0x2f, 0xb8, 0x6e, - 0xbc, 0x07, 0xdb, 0x9c, 0x62, 0x4f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, 0x3c, 0x8f, 0x07, 0xad, 0xc0, - 0xa9, 0x98, 0xc9, 0xba, 0x4c, 0xd4, 0x08, 0x25, 0xf8, 0x2e, 0x6c, 0x71, 0x8d, 0xc9, 0xd4, 0xf4, - 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xfc, 0x78, 0x7c, 0x23, 0x8e, 0x3c, 0xdb, 0x64, 0x8c, 0xd3, 0x80, - 0xc0, 0x3c, 0xf2, 0x70, 0x13, 0x5e, 0xe3, 0x6a, 0x23, 0x6a, 0x51, 0x97, 0xf8, 0x54, 0xa5, 0x5f, - 0x4e, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x31, 0xf1, 0xc6, 0xd2, 0x36, 0x33, 0x70, 0x94, 0x96, - 0x52, 0xca, 0x35, 0x46, 0x3c, 0x0e, 0x78, 0x32, 0xa7, 0xd5, 0x2d, 0xfd, 0x13, 0xe2, 0x8d, 0xf1, - 0x21, 0x5c, 0xe1, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x91, 0xaa, 0x8d, 0xa9, 0xf6, 0x48, 0x9d, 0xfa, - 0xc3, 0xfb, 0xd2, 0x2b, 0xf1, 0xfe, 0xb9, 0x87, 0x7d, 0xce, 0x69, 0x30, 0xca, 0x99, 0x3f, 0xbc, - 0x8f, 0xfb, 0x50, 0x66, 0x93, 0x31, 0x31, 0xbe, 0xa2, 0xea, 0xd0, 0x76, 0xf9, 0xc9, 0x52, 0x59, - 0xb2, 0xb2, 0x63, 0x11, 0xac, 0x75, 0x03, 0x85, 0x53, 0x5b, 0xa7, 0x87, 0xb9, 0x7e, 0x4f, 0x96, - 0x9b, 0x4a, 0x29, 0xb4, 0xf2, 0xc0, 0x76, 0x59, 0x42, 0x8d, 0xec, 0x28, 0xc0, 0x25, 0x91, 0x50, - 0x23, 0x3b, 0x0c, 0xef, 0x5d, 0xd8, 0xd2, 0x34, 0x31, 0x66, 0x43, 0x53, 0x83, 0x2b, 0x8a, 0x27, - 0xa1, 0x44, 0xb0, 0x34, 0xed, 0x58, 0x10, 0x82, 0x1c, 0xf7, 0xf0, 0x87, 0xf0, 0xf2, 0x2c, 0x58, - 0x71, 0xc5, 0xcd, 0x85, 0x51, 0xce, 0xab, 0xde, 0x85, 0x2d, 0xe7, 0x62, 0x51, 0x11, 0x27, 0x7a, - 0x74, 0x2e, 0xe6, 0xd5, 0x3e, 0x80, 0x6d, 0x67, 0xec, 0x2c, 0xea, 0xdd, 0x8e, 0xeb, 0x61, 0x67, - 0xec, 0xcc, 0x2b, 0xbe, 0xc5, 0xef, 0xab, 0x2e, 0xd5, 0x88, 0x4f, 0x75, 0xe9, 0x6a, 0x9c, 0x1e, - 0x13, 0xe0, 0x7d, 0x40, 0x9a, 0xa6, 0x52, 0x8b, 0x9c, 0x9b, 0x54, 0x25, 0x2e, 0xb5, 0x88, 0x27, - 0x5d, 0x8f, 0x93, 0x2b, 0x9a, 0x26, 0x73, 0x69, 0x9d, 0x0b, 0xf1, 0x6d, 0xd8, 0xb4, 0xcf, 0x1f, - 0x6a, 0x22, 0x25, 0x55, 0xc7, 0xa5, 0x43, 0xe3, 0xa9, 0xf4, 0x26, 0x8f, 0xef, 0x06, 0x13, 0xf0, - 0x84, 0xec, 0x71, 0x18, 0xdf, 0x02, 0xa4, 0x79, 0x63, 0xe2, 0x3a, 0xbc, 0x26, 0xf0, 0x1c, 0xa2, - 0x51, 0xe9, 0x2d, 0x41, 0x15, 0x78, 0x27, 0x84, 0xd9, 0x92, 0xf0, 0x9e, 0x18, 0x43, 0x3f, 0xb4, - 0x78, 0x53, 0x2c, 0x09, 0x8e, 0x05, 0xd6, 0xf6, 0x00, 0xb1, 0x50, 0x24, 0x3a, 0xde, 0xe3, 0xb4, - 0x8a, 0x33, 0x76, 0xe2, 0xfd, 0xbe, 0x01, 0xeb, 0x8c, 0x39, 0xeb, 0xf4, 0x96, 0xa8, 0x67, 0x9c, - 0x71, 0xac, 0xc7, 0x1f, 0xad, 0xb4, 0xac, 0x1e, 0x42, 0x39, 0x9e, 0x9f, 0xb8, 0x08, 0x22, 0x43, - 0x51, 0x8a, 0x9d, 0xf5, 0x8d, 0x6e, 0x93, 0x9d, 0xd2, 0x5f, 0xc8, 0x28, 0xcd, 0xaa, 0x85, 0x76, - 0x6b, 0x20, 0xab, 0xca, 0x59, 0x67, 0xd0, 0x3a, 0x95, 0x51, 0x26, 0x56, 0x96, 0x9e, 0x64, 0x0b, - 0x6f, 0xa3, 0x9b, 0xd5, 0xef, 0xd2, 0x50, 0x49, 0xde, 0x33, 0xf0, 0xcf, 0xe1, 0x6a, 0xf8, 0x28, - 0xe0, 0x51, 0x5f, 0x7d, 0x62, 0xb8, 0x7c, 0xe1, 0x4c, 0x88, 0xa8, 0xb3, 0xa3, 0xa9, 0xdb, 0x0e, - 0x58, 0x7d, 0xea, 0x7f, 0x6a, 0xb8, 0x6c, 0x59, 0x4c, 0x88, 0x8f, 0xdb, 0x70, 0xdd, 0xb2, 0x55, - 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3d, 0xc7, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xe2, - 0xc0, 0x8a, 0xac, 0xbc, 0x6a, 0xd9, 0xfd, 0x80, 0x3c, 0xdb, 0xc9, 0xeb, 0x01, 0x75, 0x2e, 0xcd, - 0x32, 0xab, 0xd2, 0xec, 0x15, 0x28, 0x4e, 0x88, 0xa3, 0x52, 0xcb, 0x77, 0x2f, 0x78, 0x75, 0x59, - 0x50, 0x0a, 0x13, 0xe2, 0xc8, 0xac, 0xfd, 0x42, 0x8a, 0xfc, 0x93, 0x6c, 0xa1, 0x80, 0x8a, 0x27, - 0xd9, 0x42, 0x11, 0x41, 0xf5, 0x5f, 0x19, 0x28, 0xc7, 0xab, 0x4d, 0x56, 0xbc, 0x6b, 0xfc, 0x64, - 0x49, 0xf1, 0xbd, 0xe7, 0x8d, 0xef, 0xad, 0x4d, 0x6b, 0x0d, 0x76, 0xe4, 0x1c, 0xe6, 0x45, 0x0d, - 0xa8, 0x08, 0x4d, 0x76, 0xdc, 0xb3, 0xdd, 0x86, 0x8a, 0x7b, 0x4d, 0x41, 0x09, 0x5a, 0xf8, 0x18, - 0xf2, 0x0f, 0x3d, 0x6e, 0x3b, 0xcf, 0x6d, 0xbf, 0xf9, 0xfd, 0xb6, 0x4f, 0xfa, 0xdc, 0x78, 0xf1, - 0xa4, 0xaf, 0x76, 0xba, 0xca, 0x69, 0xbd, 0xad, 0x04, 0xea, 0xf8, 0x1a, 0x64, 0x4d, 0xf2, 0xd5, - 0x45, 0xf2, 0x70, 0xe2, 0xd0, 0x65, 0x27, 0xe1, 0x1a, 0x64, 0x9f, 0x50, 0xf2, 0x28, 0x79, 0x24, - 0x70, 0xe8, 0x47, 0x5c, 0x0c, 0xfb, 0x90, 0xe3, 0xf1, 0xc2, 0x00, 0x41, 0xc4, 0xd0, 0x4b, 0xb8, - 0x00, 0xd9, 0x46, 0x57, 0x61, 0x0b, 0x02, 0x41, 0x59, 0xa0, 0x6a, 0xaf, 0x25, 0x37, 0x64, 0x94, - 0xae, 0xde, 0x85, 0xbc, 0x08, 0x02, 0x5b, 0x2c, 0x51, 0x18, 0xd0, 0x4b, 0x41, 0x33, 0xb0, 0x91, - 0x0a, 0xa5, 0x67, 0xa7, 0x47, 0xb2, 0x82, 0xd2, 0xc9, 0xa9, 0xce, 0xa2, 0x5c, 0xd5, 0x83, 0x72, - 0xbc, 0xdc, 0x7c, 0x31, 0x57, 0xc9, 0xbf, 0xa7, 0xa0, 0x14, 0x2b, 0x1f, 0x59, 0xe1, 0x42, 0x4c, - 0xd3, 0x7e, 0xa2, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, 0x2e, 0x3b, 0x75, - 0x2f, 0x68, 0x89, 0xe4, 0x50, 0xbe, 0xfa, 0x97, 0x14, 0xa0, 0xf9, 0x02, 0x74, 0xce, 0xcd, 0xd4, - 0x4f, 0xe9, 0x66, 0xf5, 0xcf, 0x29, 0xa8, 0x24, 0xab, 0xce, 0x39, 0xf7, 0x6e, 0xfc, 0xa4, 0xee, - 0xfd, 0x33, 0x0d, 0xeb, 0x89, 0x5a, 0xf3, 0xb2, 0xde, 0x7d, 0x09, 0x9b, 0x86, 0x4e, 0x27, 0x8e, - 0xed, 0x53, 0x4b, 0xbb, 0x50, 0x4d, 0xfa, 0x98, 0x9a, 0x52, 0x95, 0x6f, 0x1a, 0xfb, 0xdf, 0x5f, - 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0xd3, 0x5e, 0x77, 0x20, - 0x77, 0x1a, 0x9f, 0xab, 0x67, 0x9d, 0x5f, 0x75, 0xba, 0x9f, 0x76, 0x14, 0x64, 0xcc, 0xd1, 0x7e, - 0xc4, 0x65, 0xdf, 0x03, 0x34, 0xef, 0x14, 0xbe, 0x0a, 0xcb, 0xdc, 0x42, 0x2f, 0xe1, 0x2d, 0xd8, - 0xe8, 0x74, 0xd5, 0x7e, 0xab, 0x29, 0xab, 0xf2, 0x83, 0x07, 0x72, 0x63, 0xd0, 0x17, 0xd7, 0xfb, - 0x88, 0x3d, 0x48, 0x2c, 0xf0, 0xea, 0x9f, 0x32, 0xb0, 0xb5, 0xc4, 0x13, 0x5c, 0x0f, 0x6e, 0x16, - 0xe2, 0xb2, 0xf3, 0xee, 0x65, 0xbc, 0xaf, 0xb1, 0x82, 0xa0, 0x47, 0x5c, 0x3f, 0xb8, 0x88, 0xdc, - 0x02, 0x16, 0x25, 0xcb, 0x37, 0x86, 0x06, 0x75, 0x83, 0xd7, 0x10, 0x71, 0xdd, 0xd8, 0x98, 0xe1, - 0xe2, 0x41, 0xe4, 0x67, 0x80, 0x1d, 0xdb, 0x33, 0x7c, 0xe3, 0x31, 0x55, 0x0d, 0x2b, 0x7c, 0x3a, - 0x61, 0xd7, 0x8f, 0xac, 0x82, 0x42, 0x49, 0xcb, 0xf2, 0x23, 0xb6, 0x45, 0x47, 0x64, 0x8e, 0xcd, - 0x36, 0xf3, 0x8c, 0x82, 0x42, 0x49, 0xc4, 0xbe, 0x01, 0x65, 0xdd, 0x9e, 0xb2, 0x9a, 0x4c, 0xf0, - 0xd8, 0xd9, 0x91, 0x52, 0x4a, 0x02, 0x8b, 0x28, 0x41, 0xb5, 0x3d, 0x7b, 0xb3, 0x29, 0x2b, 0x25, - 0x81, 0x09, 0xca, 0x4d, 0xd8, 0x20, 0xa3, 0x91, 0xcb, 0x8c, 0x87, 0x86, 0xc4, 0xfd, 0xa1, 0x12, - 0xc1, 0x9c, 0xb8, 0x73, 0x02, 0x85, 0x30, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0x50, 0x1d, 0xf1, 0x6e, - 0x97, 0xde, 0x2b, 0x2a, 0x05, 0x2b, 0x14, 0xde, 0x80, 0xb2, 0xe1, 0xa9, 0xb3, 0x27, 0xe8, 0xf4, - 0x6e, 0x7a, 0xaf, 0xa0, 0x94, 0x0c, 0x2f, 0x7a, 0xbe, 0xab, 0x7e, 0x93, 0x86, 0x4a, 0xf2, 0x09, - 0x1d, 0x37, 0xa1, 0x60, 0xda, 0x1a, 0xe1, 0xa9, 0x25, 0xbe, 0xdf, 0xec, 0x3d, 0xe7, 0xd5, 0xbd, - 0xd6, 0x0e, 0xf8, 0x4a, 0xa4, 0xb9, 0xf3, 0x8f, 0x14, 0x14, 0x42, 0x18, 0x5f, 0x81, 0xac, 0x43, - 0xfc, 0x31, 0x37, 0x97, 0x3b, 0x4a, 0xa3, 0x94, 0xc2, 0xdb, 0x0c, 0xf7, 0x1c, 0x62, 0xf1, 0x14, - 0x08, 0x70, 0xd6, 0x66, 0xf3, 0x6a, 0x52, 0xa2, 0xf3, 0xcb, 0x89, 0x3d, 0x99, 0x50, 0xcb, 0xf7, - 0xc2, 0x79, 0x0d, 0xf0, 0x46, 0x00, 0xe3, 0x77, 0x60, 0xd3, 0x77, 0x89, 0x61, 0x26, 0xb8, 0x59, - 0xce, 0x45, 0xa1, 0x20, 0x22, 0x1f, 0xc2, 0xb5, 0xd0, 0xae, 0x4e, 0x7d, 0xa2, 0x8d, 0xa9, 0x3e, - 0x53, 0xca, 0xf3, 0xf7, 0xd9, 0xab, 0x01, 0xa1, 0x19, 0xc8, 0x43, 0xdd, 0xea, 0x77, 0x29, 0xd8, - 0x0c, 0xaf, 0x53, 0x7a, 0x14, 0xac, 0x53, 0x00, 0x62, 0x59, 0xb6, 0x1f, 0x0f, 0xd7, 0x62, 0x2a, - 0x2f, 0xe8, 0xd5, 0xea, 0x91, 0x92, 0x12, 0x33, 0xb0, 0x33, 0x01, 0x98, 0x49, 0x56, 0x86, 0xed, - 0x3a, 0x94, 0x82, 0xef, 0x23, 0xfc, 0x23, 0x9b, 0xb8, 0x80, 0x83, 0x80, 0xd8, 0xbd, 0x0b, 0x6f, - 0x43, 0xee, 0x9c, 0x8e, 0x0c, 0x2b, 0x78, 0xf5, 0x14, 0x8d, 0xf0, 0x25, 0x37, 0x1b, 0xbd, 0xe4, - 0x1e, 0xfd, 0x21, 0x05, 0x5b, 0x9a, 0x3d, 0x99, 0xf7, 0xf7, 0x08, 0xcd, 0xbd, 0x02, 0x78, 0x9f, - 0xa4, 0xbe, 0xf8, 0x78, 0x64, 0xf8, 0xe3, 0xe9, 0x79, 0x4d, 0xb3, 0x27, 0xfb, 0x23, 0xdb, 0x24, - 0xd6, 0x68, 0xf6, 0x95, 0x90, 0xff, 0xd1, 0xde, 0x1d, 0x51, 0xeb, 0xdd, 0x91, 0x1d, 0xfb, 0x66, - 0xf8, 0xd1, 0xec, 0xef, 0xd7, 0xe9, 0xcc, 0x71, 0xef, 0xe8, 0xaf, 0xe9, 0x9d, 0x63, 0xd1, 0x57, - 0x2f, 0x8c, 0x8d, 0x42, 0x87, 0x26, 0xd5, 0xd8, 0x78, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x0c, - 0xab, 0xb6, 0x37, 0x7e, 0x1c, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto deleted file mode 100644 index 4d4fb378f50..00000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto +++ /dev/null @@ -1,849 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// Based on original Protocol Buffers design by -// Sanjay Ghemawat, Jeff Dean, and others. -// -// The messages in this file describe the definitions found in .proto files. -// A valid .proto file can be translated directly to a FileDescriptorProto -// without any other information (e.g. without reading its imports). - - -syntax = "proto2"; - -package google.protobuf; -option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DescriptorProtos"; -option csharp_namespace = "Google.Protobuf.Reflection"; -option objc_class_prefix = "GPB"; - -// descriptor.proto must be optimized for speed because reflection-based -// algorithms don't work during bootstrapping. -option optimize_for = SPEED; - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -message FileDescriptorSet { - repeated FileDescriptorProto file = 1; -} - -// Describes a complete .proto file. -message FileDescriptorProto { - optional string name = 1; // file name, relative to root of source tree - optional string package = 2; // e.g. "foo", "foo.bar", etc. - - // Names of files imported by this file. - repeated string dependency = 3; - // Indexes of the public imported files in the dependency list above. - repeated int32 public_dependency = 10; - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - repeated int32 weak_dependency = 11; - - // All top-level definitions in this file. - repeated DescriptorProto message_type = 4; - repeated EnumDescriptorProto enum_type = 5; - repeated ServiceDescriptorProto service = 6; - repeated FieldDescriptorProto extension = 7; - - optional FileOptions options = 8; - - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - optional SourceCodeInfo source_code_info = 9; - - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - optional string syntax = 12; -} - -// Describes a message type. -message DescriptorProto { - optional string name = 1; - - repeated FieldDescriptorProto field = 2; - repeated FieldDescriptorProto extension = 6; - - repeated DescriptorProto nested_type = 3; - repeated EnumDescriptorProto enum_type = 4; - - message ExtensionRange { - optional int32 start = 1; - optional int32 end = 2; - - optional ExtensionRangeOptions options = 3; - } - repeated ExtensionRange extension_range = 5; - - repeated OneofDescriptorProto oneof_decl = 8; - - optional MessageOptions options = 7; - - // Range of reserved tag numbers. Reserved tag numbers may not be used by - // fields or extension ranges in the same message. Reserved ranges may - // not overlap. - message ReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Exclusive. - } - repeated ReservedRange reserved_range = 9; - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - repeated string reserved_name = 10; -} - -message ExtensionRangeOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -// Describes a field within a message. -message FieldDescriptorProto { - enum Type { - // 0 is reserved for errors. - // Order is weird for historical reasons. - TYPE_DOUBLE = 1; - TYPE_FLOAT = 2; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - TYPE_INT64 = 3; - TYPE_UINT64 = 4; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - TYPE_INT32 = 5; - TYPE_FIXED64 = 6; - TYPE_FIXED32 = 7; - TYPE_BOOL = 8; - TYPE_STRING = 9; - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - TYPE_GROUP = 10; - TYPE_MESSAGE = 11; // Length-delimited aggregate. - - // New in version 2. - TYPE_BYTES = 12; - TYPE_UINT32 = 13; - TYPE_ENUM = 14; - TYPE_SFIXED32 = 15; - TYPE_SFIXED64 = 16; - TYPE_SINT32 = 17; // Uses ZigZag encoding. - TYPE_SINT64 = 18; // Uses ZigZag encoding. - }; - - enum Label { - // 0 is reserved for errors - LABEL_OPTIONAL = 1; - LABEL_REQUIRED = 2; - LABEL_REPEATED = 3; - }; - - optional string name = 1; - optional int32 number = 3; - optional Label label = 4; - - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - optional Type type = 5; - - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - optional string type_name = 6; - - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - optional string extendee = 2; - - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - optional string default_value = 7; - - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - optional int32 oneof_index = 9; - - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - optional string json_name = 10; - - optional FieldOptions options = 8; -} - -// Describes a oneof. -message OneofDescriptorProto { - optional string name = 1; - optional OneofOptions options = 2; -} - -// Describes an enum type. -message EnumDescriptorProto { - optional string name = 1; - - repeated EnumValueDescriptorProto value = 2; - - optional EnumOptions options = 3; -} - -// Describes a value within an enum. -message EnumValueDescriptorProto { - optional string name = 1; - optional int32 number = 2; - - optional EnumValueOptions options = 3; -} - -// Describes a service. -message ServiceDescriptorProto { - optional string name = 1; - repeated MethodDescriptorProto method = 2; - - optional ServiceOptions options = 3; -} - -// Describes a method of a service. -message MethodDescriptorProto { - optional string name = 1; - - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - optional string input_type = 2; - optional string output_type = 3; - - optional MethodOptions options = 4; - - // Identifies if client streams multiple client messages - optional bool client_streaming = 5 [default=false]; - // Identifies if server streams multiple server messages - optional bool server_streaming = 6 [default=false]; -} - - -// =================================================================== -// Options - -// Each of the definitions above may have "options" attached. These are -// just annotations which may cause code to be generated slightly differently -// or may contain hints for code that manipulates protocol messages. -// -// Clients may define custom options as extensions of the *Options messages. -// These extensions may not yet be known at parsing time, so the parser cannot -// store the values in them. Instead it stores them in a field in the *Options -// message called uninterpreted_option. This field must have the same name -// across all *Options messages. We then use this field to populate the -// extensions when we build a descriptor, at which point all protos have been -// parsed and so all extensions are known. -// -// Extension numbers for custom options may be chosen as follows: -// * For options which will only be used within a single application or -// organization, or for experimental options, use field numbers 50000 -// through 99999. It is up to you to ensure that you do not use the -// same number for multiple options. -// * For options which will be published and used publicly by multiple -// independent entities, e-mail protobuf-global-extension-registry@google.com -// to reserve extension numbers. Simply provide your project name (e.g. -// Objective-C plugin) and your project website (if available) -- there's no -// need to explain how you intend to use them. Usually you only need one -// extension number. You can declare multiple options with only one extension -// number by putting them in a sub-message. See the Custom Options section of -// the docs for examples: -// https://developers.google.com/protocol-buffers/docs/proto#options -// If this turns out to be popular, a web service will be set up -// to automatically assign option numbers. - - -message FileOptions { - - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - optional string java_package = 1; - - - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - optional string java_outer_classname = 8; - - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - optional bool java_multiple_files = 10 [default=false]; - - // This option does nothing. - optional bool java_generate_equals_and_hash = 20 [deprecated=true]; - - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - optional bool java_string_check_utf8 = 27 [default=false]; - - - // Generated classes can be optimized for speed or code size. - enum OptimizeMode { - SPEED = 1; // Generate complete code for parsing, serialization, - // etc. - CODE_SIZE = 2; // Use ReflectionOps to implement these methods. - LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. - } - optional OptimizeMode optimize_for = 9 [default=SPEED]; - - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - optional string go_package = 11; - - - - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - optional bool cc_generic_services = 16 [default=false]; - optional bool java_generic_services = 17 [default=false]; - optional bool py_generic_services = 18 [default=false]; - optional bool php_generic_services = 42 [default=false]; - - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - optional bool deprecated = 23 [default=false]; - - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - optional bool cc_enable_arenas = 31 [default=false]; - - - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - optional string objc_class_prefix = 36; - - // Namespace for generated classes; defaults to the package. - optional string csharp_namespace = 37; - - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - optional string swift_prefix = 39; - - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - optional string php_class_prefix = 40; - - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - optional string php_namespace = 41; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; - - reserved 38; -} - -message MessageOptions { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - optional bool message_set_wire_format = 1 [default=false]; - - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - optional bool no_standard_descriptor_accessor = 2 [default=false]; - - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - optional bool deprecated = 3 [default=false]; - - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - optional bool map_entry = 7; - - reserved 8; // javalite_serializable - reserved 9; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message FieldOptions { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - optional CType ctype = 1 [default = STRING]; - enum CType { - // Default mode. - STRING = 0; - - CORD = 1; - - STRING_PIECE = 2; - } - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - optional bool packed = 2; - - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - optional JSType jstype = 6 [default = JS_NORMAL]; - enum JSType { - // Use the default type. - JS_NORMAL = 0; - - // Use JavaScript strings. - JS_STRING = 1; - - // Use JavaScript numbers. - JS_NUMBER = 2; - } - - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - optional bool lazy = 5 [default=false]; - - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - optional bool deprecated = 3 [default=false]; - - // For Google-internal migration only. Do not use. - optional bool weak = 10 [default=false]; - - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; - - reserved 4; // removed jtype -} - -message OneofOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumOptions { - - // Set this option to true to allow mapping different tag names to the same - // value. - optional bool allow_alias = 2; - - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - optional bool deprecated = 3 [default=false]; - - reserved 5; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumValueOptions { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - optional bool deprecated = 1 [default=false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message ServiceOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - optional bool deprecated = 33 [default=false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message MethodOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - optional bool deprecated = 33 [default=false]; - - // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, - // or neither? HTTP based RPC implementation may choose GET verb for safe - // methods, and PUT verb for idempotent methods instead of the default POST. - enum IdempotencyLevel { - IDEMPOTENCY_UNKNOWN = 0; - NO_SIDE_EFFECTS = 1; // implies idempotent - IDEMPOTENT = 2; // idempotent, but may have side effects - } - optional IdempotencyLevel idempotency_level = - 34 [default=IDEMPOTENCY_UNKNOWN]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -message UninterpretedOption { - // The name of the uninterpreted option. Each string represents a segment in - // a dot-separated name. is_extension is true iff a segment represents an - // extension (denoted with parentheses in options specs in .proto files). - // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents - // "foo.(bar.baz).qux". - message NamePart { - required string name_part = 1; - required bool is_extension = 2; - } - repeated NamePart name = 2; - - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - optional string identifier_value = 3; - optional uint64 positive_int_value = 4; - optional int64 negative_int_value = 5; - optional double double_value = 6; - optional bytes string_value = 7; - optional string aggregate_value = 8; -} - -// =================================================================== -// Optional source code info - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -message SourceCodeInfo { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - repeated Location location = 1; - message Location { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - repeated int32 path = 1 [packed=true]; - - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - repeated int32 span = 2 [packed=true]; - - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - optional string leading_comments = 3; - optional string trailing_comments = 4; - repeated string leading_detached_comments = 6; - } -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -message GeneratedCodeInfo { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - repeated Annotation annotation = 1; - message Annotation { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - repeated int32 path = 1 [packed=true]; - - // Identifies the filesystem path to the original source .proto. - optional string source_file = 2; - - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - optional int32 begin = 3; - - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - optional int32 end = 4; - } -} diff --git a/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md b/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md deleted file mode 100644 index 2827b7d3fa2..00000000000 --- a/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md +++ /dev/null @@ -1,27 +0,0 @@ -Want to contribute? Great! First, read this page (including the small print at the end). - -### Before you contribute -Before we can use your code, you must sign the -[Google Individual Contributor License Agreement] -(https://cla.developers.google.com/about/google-individual) -(CLA), which you can do online. The CLA is necessary mainly because you own the -copyright to your changes, even after your contribution becomes part of our -codebase, so we need your permission to use and distribute your code. We also -need to be sure of various other things—for instance that you'll tell us if you -know that your code infringes on other people's patents. You don't have to sign -the CLA until after you've submitted your code for review and a member has -approved it, but you must do it before we can put your code into our codebase. -Before you start working on a larger contribution, you should get in touch with -us first through the issue tracker with your idea so that we can help out and -possibly guide you. Coordinating up front makes it much easier to avoid -frustration later on. - -### Code reviews -All submissions, including submissions by project members, require review. We -use Github pull requests for this purpose. - -### The small print -Contributions made by corporations are covered by a different agreement than -the one above, the -[Software Grant and Corporate Contributor License Agreement] -(https://cla.developers.google.com/about/google-corporate). diff --git a/vendor/github.com/googleapis/gax-go/LICENSE b/vendor/github.com/googleapis/gax-go/LICENSE deleted file mode 100644 index 6d16b6578a2..00000000000 --- a/vendor/github.com/googleapis/gax-go/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright 2016, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/googleapis/gax-go/README.md b/vendor/github.com/googleapis/gax-go/README.md deleted file mode 100644 index 3cedd5be96c..00000000000 --- a/vendor/github.com/googleapis/gax-go/README.md +++ /dev/null @@ -1,24 +0,0 @@ -Google API Extensions for Go -============================ - -[![Build Status](https://travis-ci.org/googleapis/gax-go.svg?branch=master)](https://travis-ci.org/googleapis/gax-go) -[![Code Coverage](https://img.shields.io/codecov/c/github/googleapis/gax-go.svg)](https://codecov.io/github/googleapis/gax-go) - -Google API Extensions for Go (gax-go) is a set of modules which aids the -development of APIs for clients and servers based on `gRPC` and Google API -conventions. - -Application code will rarely need to use this library directly, -but the code generated automatically from API definition files can use it -to simplify code generation and to provide more convenient and idiomatic API surface. - -**This project is currently experimental and not supported.** - -Go Versions -=========== -This library requires Go 1.6 or above. - -License -======= -BSD - please see [LICENSE](https://github.com/googleapis/gax-go/blob/master/LICENSE) -for more information. diff --git a/vendor/github.com/googleapis/gax-go/call_option.go b/vendor/github.com/googleapis/gax-go/call_option.go deleted file mode 100644 index 7b621643e94..00000000000 --- a/vendor/github.com/googleapis/gax-go/call_option.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "math/rand" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// CallOption is an option used by Invoke to control behaviors of RPC calls. -// CallOption works by modifying relevant fields of CallSettings. -type CallOption interface { - // Resolve applies the option by modifying cs. - Resolve(cs *CallSettings) -} - -// Retryer is used by Invoke to determine retry behavior. -type Retryer interface { - // Retry reports whether a request should be retriedand how long to pause before retrying - // if the previous attempt returned with err. Invoke never calls Retry with nil error. - Retry(err error) (pause time.Duration, shouldRetry bool) -} - -type retryerOption func() Retryer - -func (o retryerOption) Resolve(s *CallSettings) { - s.Retry = o -} - -// WithRetry sets CallSettings.Retry to fn. -func WithRetry(fn func() Retryer) CallOption { - return retryerOption(fn) -} - -// OnCodes returns a Retryer that retries if and only if -// the previous attempt returns a GRPC error whose error code is stored in cc. -// Pause times between retries are specified by bo. -// -// bo is only used for its parameters; each Retryer has its own copy. -func OnCodes(cc []codes.Code, bo Backoff) Retryer { - return &boRetryer{ - backoff: bo, - codes: append([]codes.Code(nil), cc...), - } -} - -type boRetryer struct { - backoff Backoff - codes []codes.Code -} - -func (r *boRetryer) Retry(err error) (time.Duration, bool) { - st, ok := status.FromError(err) - if !ok { - return 0, false - } - c := st.Code() - for _, rc := range r.codes { - if c == rc { - return r.backoff.Pause(), true - } - } - return 0, false -} - -// Backoff implements exponential backoff. -// The wait time between retries is a random value between 0 and the "retry envelope". -// The envelope starts at Initial and increases by the factor of Multiplier every retry, -// but is capped at Max. -type Backoff struct { - // Initial is the initial value of the retry envelope, defaults to 1 second. - Initial time.Duration - - // Max is the maximum value of the retry envelope, defaults to 30 seconds. - Max time.Duration - - // Multiplier is the factor by which the retry envelope increases. - // It should be greater than 1 and defaults to 2. - Multiplier float64 - - // cur is the current retry envelope - cur time.Duration -} - -func (bo *Backoff) Pause() time.Duration { - if bo.Initial == 0 { - bo.Initial = time.Second - } - if bo.cur == 0 { - bo.cur = bo.Initial - } - if bo.Max == 0 { - bo.Max = 30 * time.Second - } - if bo.Multiplier < 1 { - bo.Multiplier = 2 - } - // Select a duration between zero and the current max. It might seem counterintuitive to - // have so much jitter, but https://www.awsarchitectureblog.com/2015/03/backoff.html - // argues that that is the best strategy. - d := time.Duration(rand.Int63n(int64(bo.cur))) - bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) - if bo.cur > bo.Max { - bo.cur = bo.Max - } - return d -} - -type grpcOpt []grpc.CallOption - -func (o grpcOpt) Resolve(s *CallSettings) { - s.GRPC = o -} - -func WithGRPCOptions(opt ...grpc.CallOption) CallOption { - return grpcOpt(append([]grpc.CallOption(nil), opt...)) -} - -type CallSettings struct { - // Retry returns a Retryer to be used to control retry logic of a method call. - // If Retry is nil or the returned Retryer is nil, the call will not be retried. - Retry func() Retryer - - // CallOptions to be forwarded to GRPC. - GRPC []grpc.CallOption -} diff --git a/vendor/github.com/googleapis/gax-go/gax.go b/vendor/github.com/googleapis/gax-go/gax.go deleted file mode 100644 index 5ebedff0d02..00000000000 --- a/vendor/github.com/googleapis/gax-go/gax.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package gax contains a set of modules which aid the development of APIs -// for clients and servers based on gRPC and Google API conventions. -// -// Application code will rarely need to use this library directly. -// However, code generated automatically from API definition files can use it -// to simplify code generation and to provide more convenient and idiomatic API surfaces. -// -// This project is currently experimental and not supported. -package gax - -const Version = "0.1.0" diff --git a/vendor/github.com/googleapis/gax-go/header.go b/vendor/github.com/googleapis/gax-go/header.go deleted file mode 100644 index d81455eccd9..00000000000 --- a/vendor/github.com/googleapis/gax-go/header.go +++ /dev/null @@ -1,24 +0,0 @@ -package gax - -import "bytes" - -// XGoogHeader is for use by the Google Cloud Libraries only. -// -// XGoogHeader formats key-value pairs. -// The resulting string is suitable for x-goog-api-client header. -func XGoogHeader(keyval ...string) string { - if len(keyval) == 0 { - return "" - } - if len(keyval)%2 != 0 { - panic("gax.Header: odd argument count") - } - var buf bytes.Buffer - for i := 0; i < len(keyval); i += 2 { - buf.WriteByte(' ') - buf.WriteString(keyval[i]) - buf.WriteByte('/') - buf.WriteString(keyval[i+1]) - } - return buf.String()[1:] -} diff --git a/vendor/github.com/googleapis/gax-go/invoke.go b/vendor/github.com/googleapis/gax-go/invoke.go deleted file mode 100644 index 86049d826f8..00000000000 --- a/vendor/github.com/googleapis/gax-go/invoke.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "time" - - "golang.org/x/net/context" -) - -// A user defined call stub. -type APICall func(context.Context, CallSettings) error - -// Invoke calls the given APICall, -// performing retries as specified by opts, if any. -func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { - var settings CallSettings - for _, opt := range opts { - opt.Resolve(&settings) - } - return invoke(ctx, call, settings, Sleep) -} - -// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. -// If interrupted, Sleep returns ctx.Err(). -func Sleep(ctx context.Context, d time.Duration) error { - t := time.NewTimer(d) - select { - case <-ctx.Done(): - t.Stop() - return ctx.Err() - case <-t.C: - return nil - } -} - -type sleeper func(ctx context.Context, d time.Duration) error - -// invoke implements Invoke, taking an additional sleeper argument for testing. -func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { - var retryer Retryer - for { - err := call(ctx, settings) - if err == nil { - return nil - } - if settings.Retry == nil { - return err - } - if retryer == nil { - if r := settings.Retry(); r != nil { - retryer = r - } else { - return err - } - } - if d, ok := retryer.Retry(err); !ok { - return err - } else if err = sp(ctx, d); err != nil { - return err - } - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/vendor/github.com/gophercloud/gophercloud/FAQ.md b/vendor/github.com/gophercloud/gophercloud/FAQ.md deleted file mode 100644 index 88a366a288b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/FAQ.md +++ /dev/null @@ -1,148 +0,0 @@ -# Tips - -## Implementing default logging and re-authentication attempts - -You can implement custom logging and/or limit re-auth attempts by creating a custom HTTP client -like the following and setting it as the provider client's HTTP Client (via the -`gophercloud.ProviderClient.HTTPClient` field): - -```go -//... - -// LogRoundTripper satisfies the http.RoundTripper interface and is used to -// customize the default Gophercloud RoundTripper to allow for logging. -type LogRoundTripper struct { - rt http.RoundTripper - numReauthAttempts int -} - -// newHTTPClient return a custom HTTP client that allows for logging relevant -// information before and after the HTTP request. -func newHTTPClient() http.Client { - return http.Client{ - Transport: &LogRoundTripper{ - rt: http.DefaultTransport, - }, - } -} - -// RoundTrip performs a round-trip HTTP request and logs relevant information about it. -func (lrt *LogRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { - glog.Infof("Request URL: %s\n", request.URL) - - response, err := lrt.rt.RoundTrip(request) - if response == nil { - return nil, err - } - - if response.StatusCode == http.StatusUnauthorized { - if lrt.numReauthAttempts == 3 { - return response, fmt.Errorf("Tried to re-authenticate 3 times with no success.") - } - lrt.numReauthAttempts++ - } - - glog.Debugf("Response Status: %s\n", response.Status) - - return response, nil -} - -endpoint := "https://127.0.0.1/auth" -pc := openstack.NewClient(endpoint) -pc.HTTPClient = newHTTPClient() - -//... -``` - - -## Implementing custom objects - -OpenStack request/response objects may differ among variable names or types. - -### Custom request objects - -To pass custom options to a request, implement the desired `OptsBuilder` interface. For -example, to pass in - -```go -type MyCreateServerOpts struct { - Name string - Size int -} -``` - -to `servers.Create`, simply implement the `servers.CreateOptsBuilder` interface: - -```go -func (o MyCreateServeropts) ToServerCreateMap() (map[string]interface{}, error) { - return map[string]interface{}{ - "name": o.Name, - "size": o.Size, - }, nil -} -``` - -create an instance of your custom options object, and pass it to `servers.Create`: - -```go -// ... -myOpts := MyCreateServerOpts{ - Name: "s1", - Size: "100", -} -server, err := servers.Create(computeClient, myOpts).Extract() -// ... -``` - -### Custom response objects - -Some OpenStack services have extensions. Extensions that are supported in Gophercloud can be -combined to create a custom object: - -```go -// ... -type MyVolume struct { - volumes.Volume - tenantattr.VolumeExt -} - -var v struct { - MyVolume `json:"volume"` -} - -err := volumes.Get(client, volID).ExtractInto(&v) -// ... -``` - -## Overriding default `UnmarshalJSON` method - -For some response objects, a field may be a custom type or may be allowed to take on -different types. In these cases, overriding the default `UnmarshalJSON` method may be -necessary. To do this, declare the JSON `struct` field tag as "-" and create an `UnmarshalJSON` -method on the type: - -```go -// ... -type MyVolume struct { - ID string `json: "id"` - TimeCreated time.Time `json: "-"` -} - -func (r *MyVolume) UnmarshalJSON(b []byte) error { - type tmp MyVolume - var s struct { - tmp - TimeCreated gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Volume(s.tmp) - - r.TimeCreated = time.Time(s.CreatedAt) - - return err -} -// ... -``` diff --git a/vendor/github.com/gophercloud/gophercloud/LICENSE b/vendor/github.com/gophercloud/gophercloud/LICENSE deleted file mode 100644 index fbbbc9e4cba..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Copyright 2012-2013 Rackspace, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use -this file except in compliance with the License. You may obtain a copy of the -License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed -under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. - ------- - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/gophercloud/gophercloud/MIGRATING.md b/vendor/github.com/gophercloud/gophercloud/MIGRATING.md deleted file mode 100644 index aa383c9cc9e..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/MIGRATING.md +++ /dev/null @@ -1,32 +0,0 @@ -# Compute - -## Floating IPs - -* `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingip` is now `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips` -* `floatingips.Associate` and `floatingips.Disassociate` have been removed. -* `floatingips.DisassociateOpts` is now required to disassociate a Floating IP. - -## Security Groups - -* `secgroups.AddServerToGroup` is now `secgroups.AddServer`. -* `secgroups.RemoveServerFromGroup` is now `secgroups.RemoveServer`. - -## Servers - -* `servers.Reboot` now requires a `servers.RebootOpts` struct: - - ```golang - rebootOpts := &servers.RebootOpts{ - Type: servers.SoftReboot, - } - res := servers.Reboot(client, server.ID, rebootOpts) - ``` - -# Identity - -## V3 - -### Tokens - -* `Token.ExpiresAt` is now of type `gophercloud.JSONRFC3339Milli` instead of - `time.Time` diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md deleted file mode 100644 index 60ca479de89..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/README.md +++ /dev/null @@ -1,143 +0,0 @@ -# Gophercloud: an OpenStack SDK for Go -[![Build Status](https://travis-ci.org/gophercloud/gophercloud.svg?branch=master)](https://travis-ci.org/gophercloud/gophercloud) -[![Coverage Status](https://coveralls.io/repos/github/gophercloud/gophercloud/badge.svg?branch=master)](https://coveralls.io/github/gophercloud/gophercloud?branch=master) - -Gophercloud is an OpenStack Go SDK. - -## Useful links - -* [Reference documentation](http://godoc.org/github.com/gophercloud/gophercloud) -* [Effective Go](https://golang.org/doc/effective_go.html) - -## How to install - -Before installing, you need to ensure that your [GOPATH environment variable](https://golang.org/doc/code.html#GOPATH) -is pointing to an appropriate directory where you want to install Gophercloud: - -```bash -mkdir $HOME/go -export GOPATH=$HOME/go -``` - -To protect yourself against changes in your dependencies, we highly recommend choosing a -[dependency management solution](https://github.com/golang/go/wiki/PackageManagementTools) for -your projects, such as [godep](https://github.com/tools/godep). Once this is set up, you can install -Gophercloud as a dependency like so: - -```bash -go get github.com/gophercloud/gophercloud - -# Edit your code to import relevant packages from "github.com/gophercloud/gophercloud" - -godep save ./... -``` - -This will install all the source files you need into a `Godeps/_workspace` directory, which is -referenceable from your own source files when you use the `godep go` command. - -## Getting started - -### Credentials - -Because you'll be hitting an API, you will need to retrieve your OpenStack -credentials and either store them as environment variables or in your local Go -files. The first method is recommended because it decouples credential -information from source code, allowing you to push the latter to your version -control system without any security risk. - -You will need to retrieve the following: - -* username -* password -* a valid Keystone identity URL - -For users that have the OpenStack dashboard installed, there's a shortcut. If -you visit the `project/access_and_security` path in Horizon and click on the -"Download OpenStack RC File" button at the top right hand corner, you will -download a bash file that exports all of your access details to environment -variables. To execute the file, run `source admin-openrc.sh` and you will be -prompted for your password. - -### Authentication - -Once you have access to your credentials, you can begin plugging them into -Gophercloud. The next step is authentication, and this is handled by a base -"Provider" struct. To get one, you can either pass in your credentials -explicitly, or tell Gophercloud to use environment variables: - -```go -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/gophercloud/openstack/utils" -) - -// Option 1: Pass in the values yourself -opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", -} - -// Option 2: Use a utility function to retrieve all your environment variables -opts, err := openstack.AuthOptionsFromEnv() -``` - -Once you have the `opts` variable, you can pass it in and get back a -`ProviderClient` struct: - -```go -provider, err := openstack.AuthenticatedClient(opts) -``` - -The `ProviderClient` is the top-level client that all of your OpenStack services -derive from. The provider contains all of the authentication details that allow -your Go code to access the API - such as the base URL and token ID. - -### Provision a server - -Once we have a base Provider, we inject it as a dependency into each OpenStack -service. In order to work with the Compute API, we need a Compute service -client; which can be created like so: - -```go -client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ - Region: os.Getenv("OS_REGION_NAME"), -}) -``` - -We then use this `client` for any Compute API operation we want. In our case, -we want to provision a new server - so we invoke the `Create` method and pass -in the flavor ID (hardware specification) and image ID (operating system) we're -interested in: - -```go -import "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - -server, err := servers.Create(client, servers.CreateOpts{ - Name: "My new server!", - FlavorRef: "flavor_id", - ImageRef: "image_id", -}).Extract() -``` - -The above code sample creates a new server with the parameters, and embodies the -new resource in the `server` variable (a -[`servers.Server`](http://godoc.org/github.com/gophercloud/gophercloud) struct). - -## Advanced Usage - -Have a look at the [FAQ](./FAQ.md) for some tips on customizing the way Gophercloud works. - -## Backwards-Compatibility Guarantees - -None. Vendor it and write tests covering the parts you use. - -## Contributing - -See the [contributing guide](./.github/CONTRIBUTING.md). - -## Help and feedback - -If you're struggling with something or have spotted a potential bug, feel free -to submit an issue to our [bug tracker](/issues). diff --git a/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md b/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md deleted file mode 100644 index e7531a83d9d..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md +++ /dev/null @@ -1,74 +0,0 @@ - -## On Pull Requests - -- Before you start a PR there needs to be a Github issue and a discussion about it - on that issue with a core contributor, even if it's just a 'SGTM'. - -- A PR's description must reference the issue it closes with a `For ` (e.g. For #293). - -- A PR's description must contain link(s) to the line(s) in the OpenStack - source code (on Github) that prove(s) the PR code to be valid. Links to documentation - are not good enough. The link(s) should be to a non-`master` branch. For example, - a pull request implementing the creation of a Neutron v2 subnet might put the - following link in the description: - - https://github.com/openstack/neutron/blob/stable/mitaka/neutron/api/v2/attributes.py#L749 - - From that link, a reviewer (or user) can verify the fields in the request/response - objects in the PR. - -- A PR that is in-progress should have `[wip]` in front of the PR's title. When - ready for review, remove the `[wip]` and ping a core contributor with an `@`. - -- Forcing PRs to be small can have the effect of users submitting PRs in a hierarchical chain, with - one depending on the next. If a PR depends on another one, it should have a [Pending #PRNUM] - prefix in the PR title. In addition, it will be the PR submitter's responsibility to remove the - [Pending #PRNUM] tag once the PR has been updated with the merged, dependent PR. That will - let reviewers know it is ready to review. - -- A PR should be small. Even if you intend on implementing an entire - service, a PR should only be one route of that service - (e.g. create server or get server, but not both). - -- Unless explicitly asked, do not squash commits in the middle of a review; only - append. It makes it difficult for the reviewer to see what's changed from one - review to the next. - -## On Code - -- In re design: follow as closely as is reasonable the code already in the library. - Most operations (e.g. create, delete) admit the same design. - -- Unit tests and acceptance (integration) tests must be written to cover each PR. - Tests for operations with several options (e.g. list, create) should include all - the options in the tests. This will allow users to verify an operation on their - own infrastructure and see an example of usage. - -- If in doubt, ask in-line on the PR. - -### File Structure - -- The following should be used in most cases: - - - `requests.go`: contains all the functions that make HTTP requests and the - types associated with the HTTP request (parameters for URL, body, etc) - - `results.go`: contains all the response objects and their methods - - `urls.go`: contains the endpoints to which the requests are made - -### Naming - -- For methods on a type in `results.go`, the receiver should be named `r` and the - variable into which it will be unmarshalled `s`. - -- Functions in `requests.go`, with the exception of functions that return a - `pagination.Pager`, should be named returns of the name `r`. - -- Functions in `requests.go` that accept request bodies should accept as their - last parameter an `interface` named `OptsBuilder` (eg `CreateOptsBuilder`). - This `interface` should have at the least a method named `ToMap` - (eg `ToPortCreateMap`). - -- Functions in `requests.go` that accept query strings should accept as their - last parameter an `interface` named `OptsBuilder` (eg `ListOptsBuilder`). - This `interface` should have at the least a method named `ToQuery` - (eg `ToServerListQuery`). diff --git a/vendor/github.com/gophercloud/gophercloud/auth_options.go b/vendor/github.com/gophercloud/gophercloud/auth_options.go deleted file mode 100644 index 4211470020a..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/auth_options.go +++ /dev/null @@ -1,354 +0,0 @@ -package gophercloud - -/* -AuthOptions stores information needed to authenticate to an OpenStack Cloud. -You can populate one manually, or use a provider's AuthOptionsFromEnv() function -to read relevant information from the standard environment variables. Pass one -to a provider's AuthenticatedClient function to authenticate and obtain a -ProviderClient representing an active session on that provider. - -Its fields are the union of those recognized by each identity implementation and -provider. - -An example of manually providing authentication information: - - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", - } - - provider, err := openstack.AuthenticatedClient(opts) - -An example of using AuthOptionsFromEnv(), where the environment variables can -be read from a file, such as a standard openrc file: - - opts, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(opts) -*/ -type AuthOptions struct { - // IdentityEndpoint specifies the HTTP endpoint that is required to work with - // the Identity API of the appropriate version. While it's ultimately needed by - // all of the identity services, it will often be populated by a provider-level - // function. - // - // The IdentityEndpoint is typically referred to as the "auth_url" or - // "OS_AUTH_URL" in the information provided by the cloud operator. - IdentityEndpoint string `json:"-"` - - // Username is required if using Identity V2 API. Consult with your provider's - // control panel to discover your account's username. In Identity V3, either - // UserID or a combination of Username and DomainID or DomainName are needed. - Username string `json:"username,omitempty"` - UserID string `json:"-"` - - Password string `json:"password,omitempty"` - - // At most one of DomainID and DomainName must be provided if using Username - // with Identity V3. Otherwise, either are optional. - DomainID string `json:"-"` - DomainName string `json:"name,omitempty"` - - // The TenantID and TenantName fields are optional for the Identity V2 API. - // The same fields are known as project_id and project_name in the Identity - // V3 API, but are collected as TenantID and TenantName here in both cases. - // Some providers allow you to specify a TenantName instead of the TenantId. - // Some require both. Your provider's authentication policies will determine - // how these fields influence authentication. - // If DomainID or DomainName are provided, they will also apply to TenantName. - // It is not currently possible to authenticate with Username and a Domain - // and scope to a Project in a different Domain by using TenantName. To - // accomplish that, the ProjectID will need to be provided as the TenantID - // option. - TenantID string `json:"tenantId,omitempty"` - TenantName string `json:"tenantName,omitempty"` - - // AllowReauth should be set to true if you grant permission for Gophercloud to - // cache your credentials in memory, and to allow Gophercloud to attempt to - // re-authenticate automatically if/when your token expires. If you set it to - // false, it will not cache these settings, but re-authentication will not be - // possible. This setting defaults to false. - // - // NOTE: The reauth function will try to re-authenticate endlessly if left - // unchecked. The way to limit the number of attempts is to provide a custom - // HTTP client to the provider client and provide a transport that implements - // the RoundTripper interface and stores the number of failed retries. For an - // example of this, see here: - // https://github.com/rackspace/rack/blob/1.0.0/auth/clients.go#L311 - AllowReauth bool `json:"-"` - - // TokenID allows users to authenticate (possibly as another user) with an - // authentication token ID. - TokenID string `json:"-"` -} - -// ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder -// interface in the v2 tokens package -func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { - // Populate the request map. - authMap := make(map[string]interface{}) - - if opts.Username != "" { - if opts.Password != "" { - authMap["passwordCredentials"] = map[string]interface{}{ - "username": opts.Username, - "password": opts.Password, - } - } else { - return nil, ErrMissingInput{Argument: "Password"} - } - } else if opts.TokenID != "" { - authMap["token"] = map[string]interface{}{ - "id": opts.TokenID, - } - } else { - return nil, ErrMissingInput{Argument: "Username"} - } - - if opts.TenantID != "" { - authMap["tenantId"] = opts.TenantID - } - if opts.TenantName != "" { - authMap["tenantName"] = opts.TenantName - } - - return map[string]interface{}{"auth": authMap}, nil -} - -func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { - type domainReq struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - } - - type projectReq struct { - Domain *domainReq `json:"domain,omitempty"` - Name *string `json:"name,omitempty"` - ID *string `json:"id,omitempty"` - } - - type userReq struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Password string `json:"password"` - Domain *domainReq `json:"domain,omitempty"` - } - - type passwordReq struct { - User userReq `json:"user"` - } - - type tokenReq struct { - ID string `json:"id"` - } - - type identityReq struct { - Methods []string `json:"methods"` - Password *passwordReq `json:"password,omitempty"` - Token *tokenReq `json:"token,omitempty"` - } - - type authReq struct { - Identity identityReq `json:"identity"` - } - - type request struct { - Auth authReq `json:"auth"` - } - - // Populate the request structure based on the provided arguments. Create and return an error - // if insufficient or incompatible information is present. - var req request - - if opts.Password == "" { - if opts.TokenID != "" { - // Because we aren't using password authentication, it's an error to also provide any of the user-based authentication - // parameters. - if opts.Username != "" { - return nil, ErrUsernameWithToken{} - } - if opts.UserID != "" { - return nil, ErrUserIDWithToken{} - } - if opts.DomainID != "" { - return nil, ErrDomainIDWithToken{} - } - if opts.DomainName != "" { - return nil, ErrDomainNameWithToken{} - } - - // Configure the request for Token authentication. - req.Auth.Identity.Methods = []string{"token"} - req.Auth.Identity.Token = &tokenReq{ - ID: opts.TokenID, - } - } else { - // If no password or token ID are available, authentication can't continue. - return nil, ErrMissingPassword{} - } - } else { - // Password authentication. - req.Auth.Identity.Methods = []string{"password"} - - // At least one of Username and UserID must be specified. - if opts.Username == "" && opts.UserID == "" { - return nil, ErrUsernameOrUserID{} - } - - if opts.Username != "" { - // If Username is provided, UserID may not be provided. - if opts.UserID != "" { - return nil, ErrUsernameOrUserID{} - } - - // Either DomainID or DomainName must also be specified. - if opts.DomainID == "" && opts.DomainName == "" { - return nil, ErrDomainIDOrDomainName{} - } - - if opts.DomainID != "" { - if opts.DomainName != "" { - return nil, ErrDomainIDOrDomainName{} - } - - // Configure the request for Username and Password authentication with a DomainID. - req.Auth.Identity.Password = &passwordReq{ - User: userReq{ - Name: &opts.Username, - Password: opts.Password, - Domain: &domainReq{ID: &opts.DomainID}, - }, - } - } - - if opts.DomainName != "" { - // Configure the request for Username and Password authentication with a DomainName. - req.Auth.Identity.Password = &passwordReq{ - User: userReq{ - Name: &opts.Username, - Password: opts.Password, - Domain: &domainReq{Name: &opts.DomainName}, - }, - } - } - } - - if opts.UserID != "" { - // If UserID is specified, neither DomainID nor DomainName may be. - if opts.DomainID != "" { - return nil, ErrDomainIDWithUserID{} - } - if opts.DomainName != "" { - return nil, ErrDomainNameWithUserID{} - } - - // Configure the request for UserID and Password authentication. - req.Auth.Identity.Password = &passwordReq{ - User: userReq{ID: &opts.UserID, Password: opts.Password}, - } - } - } - - b, err := BuildRequestBody(req, "") - if err != nil { - return nil, err - } - - if len(scope) != 0 { - b["auth"].(map[string]interface{})["scope"] = scope - } - - return b, nil -} - -func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - - var scope struct { - ProjectID string - ProjectName string - DomainID string - DomainName string - } - - if opts.TenantID != "" { - scope.ProjectID = opts.TenantID - } else { - if opts.TenantName != "" { - scope.ProjectName = opts.TenantName - scope.DomainID = opts.DomainID - scope.DomainName = opts.DomainName - } - } - - if scope.ProjectName != "" { - // ProjectName provided: either DomainID or DomainName must also be supplied. - // ProjectID may not be supplied. - if scope.DomainID == "" && scope.DomainName == "" { - return nil, ErrScopeDomainIDOrDomainName{} - } - if scope.ProjectID != "" { - return nil, ErrScopeProjectIDOrProjectName{} - } - - if scope.DomainID != "" { - // ProjectName + DomainID - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &scope.ProjectName, - "domain": map[string]interface{}{"id": &scope.DomainID}, - }, - }, nil - } - - if scope.DomainName != "" { - // ProjectName + DomainName - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &scope.ProjectName, - "domain": map[string]interface{}{"name": &scope.DomainName}, - }, - }, nil - } - } else if scope.ProjectID != "" { - // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. - if scope.DomainID != "" { - return nil, ErrScopeProjectIDAlone{} - } - if scope.DomainName != "" { - return nil, ErrScopeProjectIDAlone{} - } - - // ProjectID - return map[string]interface{}{ - "project": map[string]interface{}{ - "id": &scope.ProjectID, - }, - }, nil - } else if scope.DomainID != "" { - // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. - if scope.DomainName != "" { - return nil, ErrScopeDomainIDOrDomainName{} - } - - // DomainID - return map[string]interface{}{ - "domain": map[string]interface{}{ - "id": &scope.DomainID, - }, - }, nil - } else if scope.DomainName != "" { - // DomainName - return map[string]interface{}{ - "domain": map[string]interface{}{ - "name": &scope.DomainName, - }, - }, nil - } - - return nil, nil -} - -func (opts AuthOptions) CanReauth() bool { - return opts.AllowReauth -} diff --git a/vendor/github.com/gophercloud/gophercloud/doc.go b/vendor/github.com/gophercloud/gophercloud/doc.go deleted file mode 100644 index 30067aa3527..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/doc.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Package gophercloud provides a multi-vendor interface to OpenStack-compatible -clouds. The library has a three-level hierarchy: providers, services, and -resources. - -Authenticating with Providers - -Provider structs represent the cloud providers that offer and manage a -collection of services. You will generally want to create one Provider -client per OpenStack cloud. - -Use your OpenStack credentials to create a Provider client. The -IdentityEndpoint is typically refered to as "auth_url" or "OS_AUTH_URL" in -information provided by the cloud operator. Additionally, the cloud may refer to -TenantID or TenantName as project_id and project_name. Credentials are -specified like so: - - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", - } - - provider, err := openstack.AuthenticatedClient(opts) - -You may also use the openstack.AuthOptionsFromEnv() helper function. This -function reads in standard environment variables frequently found in an -OpenStack `openrc` file. Again note that Gophercloud currently uses "tenant" -instead of "project". - - opts, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(opts) - -Service Clients - -Service structs are specific to a provider and handle all of the logic and -operations for a particular OpenStack service. Examples of services include: -Compute, Object Storage, Block Storage. In order to define one, you need to -pass in the parent provider, like so: - - opts := gophercloud.EndpointOpts{Region: "RegionOne"} - - client := openstack.NewComputeV2(provider, opts) - -Resources - -Resource structs are the domain models that services make use of in order -to work with and represent the state of API resources: - - server, err := servers.Get(client, "{serverId}").Extract() - -Intermediate Result structs are returned for API operations, which allow -generic access to the HTTP headers, response body, and any errors associated -with the network transaction. To turn a result into a usable resource struct, -you must call the Extract method which is chained to the response, or an -Extract function from an applicable extension: - - result := servers.Get(client, "{serverId}") - - // Attempt to extract the disk configuration from the OS-DCF disk config - // extension: - config, err := diskconfig.ExtractGet(result) - -All requests that enumerate a collection return a Pager struct that is used to -iterate through the results one page at a time. Use the EachPage method on that -Pager to handle each successive Page in a closure, then use the appropriate -extraction method from that request's package to interpret that Page as a slice -of results: - - err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { - s, err := servers.ExtractServers(page) - if err != nil { - return false, err - } - - // Handle the []servers.Server slice. - - // Return "false" or an error to prematurely stop fetching new pages. - return true, nil - }) - -If you want to obtain the entire collection of pages without doing any -intermediary processing on each page, you can use the AllPages method: - - allPages, err := servers.List(client, nil).AllPages() - allServers, err := servers.ExtractServers(allPages) - -This top-level package contains utility functions and data types that are used -throughout the provider and service packages. Of particular note for end users -are the AuthOptions and EndpointOpts structs. -*/ -package gophercloud diff --git a/vendor/github.com/gophercloud/gophercloud/endpoint_search.go b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go deleted file mode 100644 index 2fbc3c97f14..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/endpoint_search.go +++ /dev/null @@ -1,76 +0,0 @@ -package gophercloud - -// Availability indicates to whom a specific service endpoint is accessible: -// the internet at large, internal networks only, or only to administrators. -// Different identity services use different terminology for these. Identity v2 -// lists them as different kinds of URLs within the service catalog ("adminURL", -// "internalURL", and "publicURL"), while v3 lists them as "Interfaces" in an -// endpoint's response. -type Availability string - -const ( - // AvailabilityAdmin indicates that an endpoint is only available to - // administrators. - AvailabilityAdmin Availability = "admin" - - // AvailabilityPublic indicates that an endpoint is available to everyone on - // the internet. - AvailabilityPublic Availability = "public" - - // AvailabilityInternal indicates that an endpoint is only available within - // the cluster's internal network. - AvailabilityInternal Availability = "internal" -) - -// EndpointOpts specifies search criteria used by queries against an -// OpenStack service catalog. The options must contain enough information to -// unambiguously identify one, and only one, endpoint within the catalog. -// -// Usually, these are passed to service client factory functions in a provider -// package, like "openstack.NewComputeV2()". -type EndpointOpts struct { - // Type [required] is the service type for the client (e.g., "compute", - // "object-store"). Generally, this will be supplied by the service client - // function, but a user-given value will be honored if provided. - Type string - - // Name [optional] is the service name for the client (e.g., "nova") as it - // appears in the service catalog. Services can have the same Type but a - // different Name, which is why both Type and Name are sometimes needed. - Name string - - // Region [required] is the geographic region in which the endpoint resides, - // generally specifying which datacenter should house your resources. - // Required only for services that span multiple regions. - Region string - - // Availability [optional] is the visibility of the endpoint to be returned. - // Valid types include the constants AvailabilityPublic, AvailabilityInternal, - // or AvailabilityAdmin from this package. - // - // Availability is not required, and defaults to AvailabilityPublic. Not all - // providers or services offer all Availability options. - Availability Availability -} - -/* -EndpointLocator is an internal function to be used by provider implementations. - -It provides an implementation that locates a single endpoint from a service -catalog for a specific ProviderClient based on user-provided EndpointOpts. The -provider then uses it to discover related ServiceClients. -*/ -type EndpointLocator func(EndpointOpts) (string, error) - -// ApplyDefaults is an internal method to be used by provider implementations. -// -// It sets EndpointOpts fields if not already set, including a default type. -// Currently, EndpointOpts.Availability defaults to the public endpoint. -func (eo *EndpointOpts) ApplyDefaults(t string) { - if eo.Type == "" { - eo.Type = t - } - if eo.Availability == "" { - eo.Availability = AvailabilityPublic - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/errors.go b/vendor/github.com/gophercloud/gophercloud/errors.go deleted file mode 100644 index 88fd2ac676b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/errors.go +++ /dev/null @@ -1,401 +0,0 @@ -package gophercloud - -import "fmt" - -// BaseError is an error type that all other error types embed. -type BaseError struct { - DefaultErrString string - Info string -} - -func (e BaseError) Error() string { - e.DefaultErrString = "An error occurred while executing a Gophercloud request." - return e.choseErrString() -} - -func (e BaseError) choseErrString() string { - if e.Info != "" { - return e.Info - } - return e.DefaultErrString -} - -// ErrMissingInput is the error when input is required in a particular -// situation but not provided by the user -type ErrMissingInput struct { - BaseError - Argument string -} - -func (e ErrMissingInput) Error() string { - e.DefaultErrString = fmt.Sprintf("Missing input for argument [%s]", e.Argument) - return e.choseErrString() -} - -// ErrInvalidInput is an error type used for most non-HTTP Gophercloud errors. -type ErrInvalidInput struct { - ErrMissingInput - Value interface{} -} - -func (e ErrInvalidInput) Error() string { - e.DefaultErrString = fmt.Sprintf("Invalid input provided for argument [%s]: [%+v]", e.Argument, e.Value) - return e.choseErrString() -} - -// ErrUnexpectedResponseCode is returned by the Request method when a response code other than -// those listed in OkCodes is encountered. -type ErrUnexpectedResponseCode struct { - BaseError - URL string - Method string - Expected []int - Actual int - Body []byte -} - -func (e ErrUnexpectedResponseCode) Error() string { - e.DefaultErrString = fmt.Sprintf( - "Expected HTTP response code %v when accessing [%s %s], but got %d instead\n%s", - e.Expected, e.Method, e.URL, e.Actual, e.Body, - ) - return e.choseErrString() -} - -// ErrDefault400 is the default error type returned on a 400 HTTP response code. -type ErrDefault400 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault401 is the default error type returned on a 401 HTTP response code. -type ErrDefault401 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault404 is the default error type returned on a 404 HTTP response code. -type ErrDefault404 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault405 is the default error type returned on a 405 HTTP response code. -type ErrDefault405 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault408 is the default error type returned on a 408 HTTP response code. -type ErrDefault408 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault429 is the default error type returned on a 429 HTTP response code. -type ErrDefault429 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault500 is the default error type returned on a 500 HTTP response code. -type ErrDefault500 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault503 is the default error type returned on a 503 HTTP response code. -type ErrDefault503 struct { - ErrUnexpectedResponseCode -} - -func (e ErrDefault400) Error() string { - return "Invalid request due to incorrect syntax or missing required parameters." -} -func (e ErrDefault401) Error() string { - return "Authentication failed" -} -func (e ErrDefault404) Error() string { - return "Resource not found" -} -func (e ErrDefault405) Error() string { - return "Method not allowed" -} -func (e ErrDefault408) Error() string { - return "The server timed out waiting for the request" -} -func (e ErrDefault429) Error() string { - return "Too many requests have been sent in a given amount of time. Pause" + - " requests, wait up to one minute, and try again." -} -func (e ErrDefault500) Error() string { - return "Internal Server Error" -} -func (e ErrDefault503) Error() string { - return "The service is currently unable to handle the request due to a temporary" + - " overloading or maintenance. This is a temporary condition. Try again later." -} - -// Err400er is the interface resource error types implement to override the error message -// from a 400 error. -type Err400er interface { - Error400(ErrUnexpectedResponseCode) error -} - -// Err401er is the interface resource error types implement to override the error message -// from a 401 error. -type Err401er interface { - Error401(ErrUnexpectedResponseCode) error -} - -// Err404er is the interface resource error types implement to override the error message -// from a 404 error. -type Err404er interface { - Error404(ErrUnexpectedResponseCode) error -} - -// Err405er is the interface resource error types implement to override the error message -// from a 405 error. -type Err405er interface { - Error405(ErrUnexpectedResponseCode) error -} - -// Err408er is the interface resource error types implement to override the error message -// from a 408 error. -type Err408er interface { - Error408(ErrUnexpectedResponseCode) error -} - -// Err429er is the interface resource error types implement to override the error message -// from a 429 error. -type Err429er interface { - Error429(ErrUnexpectedResponseCode) error -} - -// Err500er is the interface resource error types implement to override the error message -// from a 500 error. -type Err500er interface { - Error500(ErrUnexpectedResponseCode) error -} - -// Err503er is the interface resource error types implement to override the error message -// from a 503 error. -type Err503er interface { - Error503(ErrUnexpectedResponseCode) error -} - -// ErrTimeOut is the error type returned when an operations times out. -type ErrTimeOut struct { - BaseError -} - -func (e ErrTimeOut) Error() string { - e.DefaultErrString = "A time out occurred" - return e.choseErrString() -} - -// ErrUnableToReauthenticate is the error type returned when reauthentication fails. -type ErrUnableToReauthenticate struct { - BaseError - ErrOriginal error -} - -func (e ErrUnableToReauthenticate) Error() string { - e.DefaultErrString = fmt.Sprintf("Unable to re-authenticate: %s", e.ErrOriginal) - return e.choseErrString() -} - -// ErrErrorAfterReauthentication is the error type returned when reauthentication -// succeeds, but an error occurs afterword (usually an HTTP error). -type ErrErrorAfterReauthentication struct { - BaseError - ErrOriginal error -} - -func (e ErrErrorAfterReauthentication) Error() string { - e.DefaultErrString = fmt.Sprintf("Successfully re-authenticated, but got error executing request: %s", e.ErrOriginal) - return e.choseErrString() -} - -// ErrServiceNotFound is returned when no service in a service catalog matches -// the provided EndpointOpts. This is generally returned by provider service -// factory methods like "NewComputeV2()" and can mean that a service is not -// enabled for your account. -type ErrServiceNotFound struct { - BaseError -} - -func (e ErrServiceNotFound) Error() string { - e.DefaultErrString = "No suitable service could be found in the service catalog." - return e.choseErrString() -} - -// ErrEndpointNotFound is returned when no available endpoints match the -// provided EndpointOpts. This is also generally returned by provider service -// factory methods, and usually indicates that a region was specified -// incorrectly. -type ErrEndpointNotFound struct { - BaseError -} - -func (e ErrEndpointNotFound) Error() string { - e.DefaultErrString = "No suitable endpoint could be found in the service catalog." - return e.choseErrString() -} - -// ErrResourceNotFound is the error when trying to retrieve a resource's -// ID by name and the resource doesn't exist. -type ErrResourceNotFound struct { - BaseError - Name string - ResourceType string -} - -func (e ErrResourceNotFound) Error() string { - e.DefaultErrString = fmt.Sprintf("Unable to find %s with name %s", e.ResourceType, e.Name) - return e.choseErrString() -} - -// ErrMultipleResourcesFound is the error when trying to retrieve a resource's -// ID by name and multiple resources have the user-provided name. -type ErrMultipleResourcesFound struct { - BaseError - Name string - Count int - ResourceType string -} - -func (e ErrMultipleResourcesFound) Error() string { - e.DefaultErrString = fmt.Sprintf("Found %d %ss matching %s", e.Count, e.ResourceType, e.Name) - return e.choseErrString() -} - -// ErrUnexpectedType is the error when an unexpected type is encountered -type ErrUnexpectedType struct { - BaseError - Expected string - Actual string -} - -func (e ErrUnexpectedType) Error() string { - e.DefaultErrString = fmt.Sprintf("Expected %s but got %s", e.Expected, e.Actual) - return e.choseErrString() -} - -func unacceptedAttributeErr(attribute string) string { - return fmt.Sprintf("The base Identity V3 API does not accept authentication by %s", attribute) -} - -func redundantWithTokenErr(attribute string) string { - return fmt.Sprintf("%s may not be provided when authenticating with a TokenID", attribute) -} - -func redundantWithUserID(attribute string) string { - return fmt.Sprintf("%s may not be provided when authenticating with a UserID", attribute) -} - -// ErrAPIKeyProvided indicates that an APIKey was provided but can't be used. -type ErrAPIKeyProvided struct{ BaseError } - -func (e ErrAPIKeyProvided) Error() string { - return unacceptedAttributeErr("APIKey") -} - -// ErrTenantIDProvided indicates that a TenantID was provided but can't be used. -type ErrTenantIDProvided struct{ BaseError } - -func (e ErrTenantIDProvided) Error() string { - return unacceptedAttributeErr("TenantID") -} - -// ErrTenantNameProvided indicates that a TenantName was provided but can't be used. -type ErrTenantNameProvided struct{ BaseError } - -func (e ErrTenantNameProvided) Error() string { - return unacceptedAttributeErr("TenantName") -} - -// ErrUsernameWithToken indicates that a Username was provided, but token authentication is being used instead. -type ErrUsernameWithToken struct{ BaseError } - -func (e ErrUsernameWithToken) Error() string { - return redundantWithTokenErr("Username") -} - -// ErrUserIDWithToken indicates that a UserID was provided, but token authentication is being used instead. -type ErrUserIDWithToken struct{ BaseError } - -func (e ErrUserIDWithToken) Error() string { - return redundantWithTokenErr("UserID") -} - -// ErrDomainIDWithToken indicates that a DomainID was provided, but token authentication is being used instead. -type ErrDomainIDWithToken struct{ BaseError } - -func (e ErrDomainIDWithToken) Error() string { - return redundantWithTokenErr("DomainID") -} - -// ErrDomainNameWithToken indicates that a DomainName was provided, but token authentication is being used instead.s -type ErrDomainNameWithToken struct{ BaseError } - -func (e ErrDomainNameWithToken) Error() string { - return redundantWithTokenErr("DomainName") -} - -// ErrUsernameOrUserID indicates that neither username nor userID are specified, or both are at once. -type ErrUsernameOrUserID struct{ BaseError } - -func (e ErrUsernameOrUserID) Error() string { - return "Exactly one of Username and UserID must be provided for password authentication" -} - -// ErrDomainIDWithUserID indicates that a DomainID was provided, but unnecessary because a UserID is being used. -type ErrDomainIDWithUserID struct{ BaseError } - -func (e ErrDomainIDWithUserID) Error() string { - return redundantWithUserID("DomainID") -} - -// ErrDomainNameWithUserID indicates that a DomainName was provided, but unnecessary because a UserID is being used. -type ErrDomainNameWithUserID struct{ BaseError } - -func (e ErrDomainNameWithUserID) Error() string { - return redundantWithUserID("DomainName") -} - -// ErrDomainIDOrDomainName indicates that a username was provided, but no domain to scope it. -// It may also indicate that both a DomainID and a DomainName were provided at once. -type ErrDomainIDOrDomainName struct{ BaseError } - -func (e ErrDomainIDOrDomainName) Error() string { - return "You must provide exactly one of DomainID or DomainName to authenticate by Username" -} - -// ErrMissingPassword indicates that no password was provided and no token is available. -type ErrMissingPassword struct{ BaseError } - -func (e ErrMissingPassword) Error() string { - return "You must provide a password to authenticate" -} - -// ErrScopeDomainIDOrDomainName indicates that a domain ID or Name was required in a Scope, but not present. -type ErrScopeDomainIDOrDomainName struct{ BaseError } - -func (e ErrScopeDomainIDOrDomainName) Error() string { - return "You must provide exactly one of DomainID or DomainName in a Scope with ProjectName" -} - -// ErrScopeProjectIDOrProjectName indicates that both a ProjectID and a ProjectName were provided in a Scope. -type ErrScopeProjectIDOrProjectName struct{ BaseError } - -func (e ErrScopeProjectIDOrProjectName) Error() string { - return "You must provide at most one of ProjectID or ProjectName in a Scope" -} - -// ErrScopeProjectIDAlone indicates that a ProjectID was provided with other constraints in a Scope. -type ErrScopeProjectIDAlone struct{ BaseError } - -func (e ErrScopeProjectIDAlone) Error() string { - return "ProjectID must be supplied alone in a Scope" -} - -// ErrScopeEmpty indicates that no credentials were provided in a Scope. -type ErrScopeEmpty struct{ BaseError } - -func (e ErrScopeEmpty) Error() string { - return "You must provide either a Project or Domain in a Scope" -} diff --git a/vendor/github.com/gophercloud/gophercloud/internal/pkg.go b/vendor/github.com/gophercloud/gophercloud/internal/pkg.go deleted file mode 100644 index 5bf0569ce8c..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/internal/pkg.go +++ /dev/null @@ -1 +0,0 @@ -package internal diff --git a/vendor/github.com/gophercloud/gophercloud/internal/util.go b/vendor/github.com/gophercloud/gophercloud/internal/util.go deleted file mode 100644 index 8efb283e729..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/internal/util.go +++ /dev/null @@ -1,34 +0,0 @@ -package internal - -import ( - "reflect" - "strings" -) - -// RemainingKeys will inspect a struct and compare it to a map. Any struct -// field that does not have a JSON tag that matches a key in the map or -// a matching lower-case field in the map will be returned as an extra. -// -// This is useful for determining the extra fields returned in response bodies -// for resources that can contain an arbitrary or dynamic number of fields. -func RemainingKeys(s interface{}, m map[string]interface{}) (extras map[string]interface{}) { - extras = make(map[string]interface{}) - for k, v := range m { - extras[k] = v - } - - valueOf := reflect.ValueOf(s) - typeOf := reflect.TypeOf(s) - for i := 0; i < valueOf.NumField(); i++ { - field := typeOf.Field(i) - - lowerField := strings.ToLower(field.Name) - delete(extras, lowerField) - - if tagValue := field.Tag.Get("json"); tagValue != "" && tagValue != "-" { - delete(extras, tagValue) - } - } - - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go deleted file mode 100644 index 95286041d66..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go +++ /dev/null @@ -1,64 +0,0 @@ -package openstack - -import ( - "os" - - "github.com/gophercloud/gophercloud" -) - -var nilOptions = gophercloud.AuthOptions{} - -/* -AuthOptionsFromEnv fills out an identity.AuthOptions structure with the -settings found on the various OpenStack OS_* environment variables. - -The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, -OS_PASSWORD, OS_TENANT_ID, and OS_TENANT_NAME. - -Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must have settings, -or an error will result. OS_TENANT_ID and OS_TENANT_NAME are optional. - -To use this function, first set the OS_* environment variables (for example, -by sourcing an `openrc` file), then: - - opts, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(opts) -*/ -func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { - authURL := os.Getenv("OS_AUTH_URL") - username := os.Getenv("OS_USERNAME") - userID := os.Getenv("OS_USERID") - password := os.Getenv("OS_PASSWORD") - tenantID := os.Getenv("OS_TENANT_ID") - tenantName := os.Getenv("OS_TENANT_NAME") - domainID := os.Getenv("OS_DOMAIN_ID") - domainName := os.Getenv("OS_DOMAIN_NAME") - - if authURL == "" { - err := gophercloud.ErrMissingInput{Argument: "authURL"} - return nilOptions, err - } - - if username == "" && userID == "" { - err := gophercloud.ErrMissingInput{Argument: "username"} - return nilOptions, err - } - - if password == "" { - err := gophercloud.ErrMissingInput{Argument: "password"} - return nilOptions, err - } - - ao := gophercloud.AuthOptions{ - IdentityEndpoint: authURL, - UserID: userID, - Username: username, - Password: password, - TenantID: tenantID, - TenantName: tenantName, - DomainID: domainID, - DomainName: domainName, - } - - return ao, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go deleted file mode 100644 index a78d3d0482c..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Package volumeactions provides information and interaction with volumes in the -OpenStack Block Storage service. A volume is a detachable block storage -device, akin to a USB hard drive. - -Example of Attaching a Volume to an Instance - - attachOpts := volumeactions.AttachOpts{ - MountPoint: "/mnt", - Mode: "rw", - InstanceUUID: server.ID, - } - - err := volumeactions.Attach(client, volume.ID, attachOpts).ExtractErr() - if err != nil { - panic(err) - } - - detachOpts := volumeactions.DetachOpts{ - AttachmentID: volume.Attachments[0].AttachmentID, - } - - err = volumeactions.Detach(client, volume.ID, detachOpts).ExtractErr() - if err != nil { - panic(err) - } - - -Example of Creating an Image from a Volume - - uploadImageOpts := volumeactions.UploadImageOpts{ - ImageName: "my_vol", - Force: true, - } - - volumeImage, err := volumeactions.UploadImage(client, volume.ID, uploadImageOpts).Extract() - if err != nil { - panic(err) - } - - fmt.Printf("%+v\n", volumeImage) - -Example of Extending a Volume's Size - - extendOpts := volumeactions.ExtendSizeOpts{ - NewSize: 100, - } - - err := volumeactions.ExtendSize(client, volume.ID, extendOpts).ExtractErr() - if err != nil { - panic(err) - } - -Example of Initializing a Volume Connection - - connectOpts := &volumeactions.InitializeConnectionOpts{ - IP: "127.0.0.1", - Host: "stack", - Initiator: "iqn.1994-05.com.redhat:17cf566367d2", - Multipath: gophercloud.Disabled, - Platform: "x86_64", - OSType: "linux2", - } - - connectionInfo, err := volumeactions.InitializeConnection(client, volume.ID, connectOpts).Extract() - if err != nil { - panic(err) - } - - fmt.Printf("%+v\n", connectionInfo["data"]) - - terminateOpts := &volumeactions.InitializeConnectionOpts{ - IP: "127.0.0.1", - Host: "stack", - Initiator: "iqn.1994-05.com.redhat:17cf566367d2", - Multipath: gophercloud.Disabled, - Platform: "x86_64", - OSType: "linux2", - } - - err = volumeactions.TerminateConnection(client, volume.ID, terminateOpts).ExtractErr() - if err != nil { - panic(err) - } -*/ -package volumeactions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go deleted file mode 100644 index a3916c77c16..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go +++ /dev/null @@ -1,263 +0,0 @@ -package volumeactions - -import ( - "github.com/gophercloud/gophercloud" -) - -// AttachOptsBuilder allows extensions to add additional parameters to the -// Attach request. -type AttachOptsBuilder interface { - ToVolumeAttachMap() (map[string]interface{}, error) -} - -// AttachMode describes the attachment mode for volumes. -type AttachMode string - -// These constants determine how a volume is attached. -const ( - ReadOnly AttachMode = "ro" - ReadWrite AttachMode = "rw" -) - -// AttachOpts contains options for attaching a Volume. -type AttachOpts struct { - // The mountpoint of this volume. - MountPoint string `json:"mountpoint,omitempty"` - - // The nova instance ID, can't set simultaneously with HostName. - InstanceUUID string `json:"instance_uuid,omitempty"` - - // The hostname of baremetal host, can't set simultaneously with InstanceUUID. - HostName string `json:"host_name,omitempty"` - - // Mount mode of this volume. - Mode AttachMode `json:"mode,omitempty"` -} - -// ToVolumeAttachMap assembles a request body based on the contents of a -// AttachOpts. -func (opts AttachOpts) ToVolumeAttachMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "os-attach") -} - -// Attach will attach a volume based on the values in AttachOpts. -func Attach(client *gophercloud.ServiceClient, id string, opts AttachOptsBuilder) (r AttachResult) { - b, err := opts.ToVolumeAttachMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(attachURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} - -// BeginDetach will mark the volume as detaching. -func BeginDetaching(client *gophercloud.ServiceClient, id string) (r BeginDetachingResult) { - b := map[string]interface{}{"os-begin_detaching": make(map[string]interface{})} - _, r.Err = client.Post(beginDetachingURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} - -// DetachOptsBuilder allows extensions to add additional parameters to the -// Detach request. -type DetachOptsBuilder interface { - ToVolumeDetachMap() (map[string]interface{}, error) -} - -// DetachOpts contains options for detaching a Volume. -type DetachOpts struct { - // AttachmentID is the ID of the attachment between a volume and instance. - AttachmentID string `json:"attachment_id,omitempty"` -} - -// ToVolumeDetachMap assembles a request body based on the contents of a -// DetachOpts. -func (opts DetachOpts) ToVolumeDetachMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "os-detach") -} - -// Detach will detach a volume based on volume ID. -func Detach(client *gophercloud.ServiceClient, id string, opts DetachOptsBuilder) (r DetachResult) { - b, err := opts.ToVolumeDetachMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(detachURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} - -// Reserve will reserve a volume based on volume ID. -func Reserve(client *gophercloud.ServiceClient, id string) (r ReserveResult) { - b := map[string]interface{}{"os-reserve": make(map[string]interface{})} - _, r.Err = client.Post(reserveURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201, 202}, - }) - return -} - -// Unreserve will unreserve a volume based on volume ID. -func Unreserve(client *gophercloud.ServiceClient, id string) (r UnreserveResult) { - b := map[string]interface{}{"os-unreserve": make(map[string]interface{})} - _, r.Err = client.Post(unreserveURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201, 202}, - }) - return -} - -// InitializeConnectionOptsBuilder allows extensions to add additional parameters to the -// InitializeConnection request. -type InitializeConnectionOptsBuilder interface { - ToVolumeInitializeConnectionMap() (map[string]interface{}, error) -} - -// InitializeConnectionOpts hosts options for InitializeConnection. -// The fields are specific to the storage driver in use and the destination -// attachment. -type InitializeConnectionOpts struct { - IP string `json:"ip,omitempty"` - Host string `json:"host,omitempty"` - Initiator string `json:"initiator,omitempty"` - Wwpns []string `json:"wwpns,omitempty"` - Wwnns string `json:"wwnns,omitempty"` - Multipath *bool `json:"multipath,omitempty"` - Platform string `json:"platform,omitempty"` - OSType string `json:"os_type,omitempty"` -} - -// ToVolumeInitializeConnectionMap assembles a request body based on the contents of a -// InitializeConnectionOpts. -func (opts InitializeConnectionOpts) ToVolumeInitializeConnectionMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "connector") - return map[string]interface{}{"os-initialize_connection": b}, err -} - -// InitializeConnection initializes an iSCSI connection by volume ID. -func InitializeConnection(client *gophercloud.ServiceClient, id string, opts InitializeConnectionOptsBuilder) (r InitializeConnectionResult) { - b, err := opts.ToVolumeInitializeConnectionMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(initializeConnectionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201, 202}, - }) - return -} - -// TerminateConnectionOptsBuilder allows extensions to add additional parameters to the -// TerminateConnection request. -type TerminateConnectionOptsBuilder interface { - ToVolumeTerminateConnectionMap() (map[string]interface{}, error) -} - -// TerminateConnectionOpts hosts options for TerminateConnection. -type TerminateConnectionOpts struct { - IP string `json:"ip,omitempty"` - Host string `json:"host,omitempty"` - Initiator string `json:"initiator,omitempty"` - Wwpns []string `json:"wwpns,omitempty"` - Wwnns string `json:"wwnns,omitempty"` - Multipath *bool `json:"multipath,omitempty"` - Platform string `json:"platform,omitempty"` - OSType string `json:"os_type,omitempty"` -} - -// ToVolumeTerminateConnectionMap assembles a request body based on the contents of a -// TerminateConnectionOpts. -func (opts TerminateConnectionOpts) ToVolumeTerminateConnectionMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "connector") - return map[string]interface{}{"os-terminate_connection": b}, err -} - -// TerminateConnection terminates an iSCSI connection by volume ID. -func TerminateConnection(client *gophercloud.ServiceClient, id string, opts TerminateConnectionOptsBuilder) (r TerminateConnectionResult) { - b, err := opts.ToVolumeTerminateConnectionMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(teminateConnectionURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} - -// ExtendSizeOptsBuilder allows extensions to add additional parameters to the -// ExtendSize request. -type ExtendSizeOptsBuilder interface { - ToVolumeExtendSizeMap() (map[string]interface{}, error) -} - -// ExtendSizeOpts contains options for extending the size of an existing Volume. -// This object is passed to the volumes.ExtendSize function. -type ExtendSizeOpts struct { - // NewSize is the new size of the volume, in GB. - NewSize int `json:"new_size" required:"true"` -} - -// ToVolumeExtendSizeMap assembles a request body based on the contents of an -// ExtendSizeOpts. -func (opts ExtendSizeOpts) ToVolumeExtendSizeMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "os-extend") -} - -// ExtendSize will extend the size of the volume based on the provided information. -// This operation does not return a response body. -func ExtendSize(client *gophercloud.ServiceClient, id string, opts ExtendSizeOptsBuilder) (r ExtendSizeResult) { - b, err := opts.ToVolumeExtendSizeMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(extendSizeURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} - -// UploadImageOptsBuilder allows extensions to add additional parameters to the -// UploadImage request. -type UploadImageOptsBuilder interface { - ToVolumeUploadImageMap() (map[string]interface{}, error) -} - -// UploadImageOpts contains options for uploading a Volume to image storage. -type UploadImageOpts struct { - // Container format, may be bare, ofv, ova, etc. - ContainerFormat string `json:"container_format,omitempty"` - - // Disk format, may be raw, qcow2, vhd, vdi, vmdk, etc. - DiskFormat string `json:"disk_format,omitempty"` - - // The name of image that will be stored in glance. - ImageName string `json:"image_name,omitempty"` - - // Force image creation, usable if volume attached to instance. - Force bool `json:"force,omitempty"` -} - -// ToVolumeUploadImageMap assembles a request body based on the contents of a -// UploadImageOpts. -func (opts UploadImageOpts) ToVolumeUploadImageMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "os-volume_upload_image") -} - -// UploadImage will upload an image based on the values in UploadImageOptsBuilder. -func UploadImage(client *gophercloud.ServiceClient, id string, opts UploadImageOptsBuilder) (r UploadImageResult) { - b, err := opts.ToVolumeUploadImageMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(uploadURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go deleted file mode 100644 index 9815f0c26ac..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go +++ /dev/null @@ -1,186 +0,0 @@ -package volumeactions - -import ( - "encoding/json" - "time" - - "github.com/gophercloud/gophercloud" -) - -// AttachResult contains the response body and error from an Attach request. -type AttachResult struct { - gophercloud.ErrResult -} - -// BeginDetachingResult contains the response body and error from a BeginDetach -// request. -type BeginDetachingResult struct { - gophercloud.ErrResult -} - -// DetachResult contains the response body and error from a Detach request. -type DetachResult struct { - gophercloud.ErrResult -} - -// UploadImageResult contains the response body and error from an UploadImage -// request. -type UploadImageResult struct { - gophercloud.Result -} - -// ReserveResult contains the response body and error from a Reserve request. -type ReserveResult struct { - gophercloud.ErrResult -} - -// UnreserveResult contains the response body and error from an Unreserve -// request. -type UnreserveResult struct { - gophercloud.ErrResult -} - -// TerminateConnectionResult contains the response body and error from a -// TerminateConnection request. -type TerminateConnectionResult struct { - gophercloud.ErrResult -} - -// InitializeConnectionResult contains the response body and error from an -// InitializeConnection request. -type InitializeConnectionResult struct { - gophercloud.Result -} - -// ExtendSizeResult contains the response body and error from an ExtendSize request. -type ExtendSizeResult struct { - gophercloud.ErrResult -} - -// Extract will get the connection information out of the -// InitializeConnectionResult object. -// -// This will be a generic map[string]interface{} and the results will be -// dependent on the type of connection made. -func (r InitializeConnectionResult) Extract() (map[string]interface{}, error) { - var s struct { - ConnectionInfo map[string]interface{} `json:"connection_info"` - } - err := r.ExtractInto(&s) - return s.ConnectionInfo, err -} - -// ImageVolumeType contains volume type information obtained from UploadImage -// action. -type ImageVolumeType struct { - // The ID of a volume type. - ID string `json:"id"` - - // Human-readable display name for the volume type. - Name string `json:"name"` - - // Human-readable description for the volume type. - Description string `json:"display_description"` - - // Flag for public access. - IsPublic bool `json:"is_public"` - - // Extra specifications for volume type. - ExtraSpecs map[string]interface{} `json:"extra_specs"` - - // ID of quality of service specs. - QosSpecsID string `json:"qos_specs_id"` - - // Flag for deletion status of volume type. - Deleted bool `json:"deleted"` - - // The date when volume type was deleted. - DeletedAt time.Time `json:"-"` - - // The date when volume type was created. - CreatedAt time.Time `json:"-"` - - // The date when this volume was last updated. - UpdatedAt time.Time `json:"-"` -} - -func (r *ImageVolumeType) UnmarshalJSON(b []byte) error { - type tmp ImageVolumeType - var s struct { - tmp - CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` - UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` - DeletedAt gophercloud.JSONRFC3339MilliNoZ `json:"deleted_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = ImageVolumeType(s.tmp) - - r.CreatedAt = time.Time(s.CreatedAt) - r.UpdatedAt = time.Time(s.UpdatedAt) - r.DeletedAt = time.Time(s.DeletedAt) - - return err -} - -// VolumeImage contains information about volume uploaded to an image service. -type VolumeImage struct { - // The ID of a volume an image is created from. - VolumeID string `json:"id"` - - // Container format, may be bare, ofv, ova, etc. - ContainerFormat string `json:"container_format"` - - // Disk format, may be raw, qcow2, vhd, vdi, vmdk, etc. - DiskFormat string `json:"disk_format"` - - // Human-readable description for the volume. - Description string `json:"display_description"` - - // The ID of the created image. - ImageID string `json:"image_id"` - - // Human-readable display name for the image. - ImageName string `json:"image_name"` - - // Size of the volume in GB. - Size int `json:"size"` - - // Current status of the volume. - Status string `json:"status"` - - // The date when this volume was last updated. - UpdatedAt time.Time `json:"-"` - - // Volume type object of used volume. - VolumeType ImageVolumeType `json:"volume_type"` -} - -func (r *VolumeImage) UnmarshalJSON(b []byte) error { - type tmp VolumeImage - var s struct { - tmp - UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = VolumeImage(s.tmp) - - r.UpdatedAt = time.Time(s.UpdatedAt) - - return err -} - -// Extract will get an object with info about the uploaded image out of the -// UploadImageResult object. -func (r UploadImageResult) Extract() (VolumeImage, error) { - var s struct { - VolumeImage VolumeImage `json:"os-volume_upload_image"` - } - err := r.ExtractInto(&s) - return s.VolumeImage, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go deleted file mode 100644 index 5efd2b25c05..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go +++ /dev/null @@ -1,39 +0,0 @@ -package volumeactions - -import "github.com/gophercloud/gophercloud" - -func attachURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("volumes", id, "action") -} - -func beginDetachingURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} - -func detachURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} - -func uploadURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} - -func reserveURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} - -func unreserveURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} - -func initializeConnectionURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} - -func teminateConnectionURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} - -func extendSizeURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/doc.go deleted file mode 100644 index 307b8b12d2f..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package volumes provides information and interaction with volumes in the -// OpenStack Block Storage service. A volume is a detachable block storage -// device, akin to a USB hard drive. It can only be attached to one instance at -// a time. -package volumes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/requests.go deleted file mode 100644 index 566def51811..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/requests.go +++ /dev/null @@ -1,167 +0,0 @@ -package volumes - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToVolumeCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains options for creating a Volume. This object is passed to -// the volumes.Create function. For more information about these parameters, -// see the Volume object. -type CreateOpts struct { - Size int `json:"size" required:"true"` - AvailabilityZone string `json:"availability_zone,omitempty"` - Description string `json:"display_description,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` - Name string `json:"display_name,omitempty"` - SnapshotID string `json:"snapshot_id,omitempty"` - SourceVolID string `json:"source_volid,omitempty"` - ImageID string `json:"imageRef,omitempty"` - VolumeType string `json:"volume_type,omitempty"` -} - -// ToVolumeCreateMap assembles a request body based on the contents of a -// CreateOpts. -func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "volume") -} - -// Create will create a new Volume based on the values in CreateOpts. To extract -// the Volume object from the response, call the Extract method on the -// CreateResult. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToVolumeCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201}, - }) - return -} - -// Delete will delete the existing Volume with the provided ID. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, id), nil) - return -} - -// Get retrieves the Volume with the provided ID. To extract the Volume object -// from the response, call the Extract method on the GetResult. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// ListOptsBuilder allows extensions to add additional parameters to the List -// request. -type ListOptsBuilder interface { - ToVolumeListQuery() (string, error) -} - -// ListOpts holds options for listing Volumes. It is passed to the volumes.List -// function. -type ListOpts struct { - // admin-only option. Set it to true to see all tenant volumes. - AllTenants bool `q:"all_tenants"` - // List only volumes that contain Metadata. - Metadata map[string]string `q:"metadata"` - // List only volumes that have Name as the display name. - Name string `q:"display_name"` - // List only volumes that have a status of Status. - Status string `q:"status"` -} - -// ToVolumeListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToVolumeListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List returns Volumes optionally limited by the conditions provided in ListOpts. -func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listURL(client) - if opts != nil { - query, err := opts.ToVolumeListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return VolumePage{pagination.SinglePageBase(r)} - }) -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToVolumeUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contain options for updating an existing Volume. This object is passed -// to the volumes.Update function. For more information about the parameters, see -// the Volume object. -type UpdateOpts struct { - Name string `json:"display_name,omitempty"` - Description string `json:"display_description,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` -} - -// ToVolumeUpdateMap assembles a request body based on the contents of an -// UpdateOpts. -func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "volume") -} - -// Update will update the Volume with provided information. To extract the updated -// Volume from the response, call the Extract method on the UpdateResult. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToVolumeUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// IDFromName is a convienience function that returns a server's ID given its name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - count := 0 - id := "" - pages, err := List(client, nil).AllPages() - if err != nil { - return "", err - } - - all, err := ExtractVolumes(pages) - if err != nil { - return "", err - } - - for _, s := range all { - if s.Name == name { - count++ - id = s.ID - } - } - - switch count { - case 0: - return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "volume"} - case 1: - return id, nil - default: - return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "volume"} - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/results.go deleted file mode 100644 index 7f68d148639..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/results.go +++ /dev/null @@ -1,109 +0,0 @@ -package volumes - -import ( - "encoding/json" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Volume contains all the information associated with an OpenStack Volume. -type Volume struct { - // Current status of the volume. - Status string `json:"status"` - // Human-readable display name for the volume. - Name string `json:"display_name"` - // Instances onto which the volume is attached. - Attachments []map[string]interface{} `json:"attachments"` - // This parameter is no longer used. - AvailabilityZone string `json:"availability_zone"` - // Indicates whether this is a bootable volume. - Bootable string `json:"bootable"` - // The date when this volume was created. - CreatedAt time.Time `json:"-"` - // Human-readable description for the volume. - Description string `json:"display_description"` - // The type of volume to create, either SATA or SSD. - VolumeType string `json:"volume_type"` - // The ID of the snapshot from which the volume was created - SnapshotID string `json:"snapshot_id"` - // The ID of another block storage volume from which the current volume was created - SourceVolID string `json:"source_volid"` - // Arbitrary key-value pairs defined by the user. - Metadata map[string]string `json:"metadata"` - // Unique identifier for the volume. - ID string `json:"id"` - // Size of the volume in GB. - Size int `json:"size"` -} - -func (r *Volume) UnmarshalJSON(b []byte) error { - type tmp Volume - var s struct { - tmp - CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Volume(s.tmp) - - r.CreatedAt = time.Time(s.CreatedAt) - - return err -} - -// CreateResult contains the response body and error from a Create request. -type CreateResult struct { - commonResult -} - -// GetResult contains the response body and error from a Get request. -type GetResult struct { - commonResult -} - -// DeleteResult contains the response body and error from a Delete request. -type DeleteResult struct { - gophercloud.ErrResult -} - -// VolumePage is a pagination.pager that is returned from a call to the List function. -type VolumePage struct { - pagination.SinglePageBase -} - -// IsEmpty returns true if a VolumePage contains no Volumes. -func (r VolumePage) IsEmpty() (bool, error) { - volumes, err := ExtractVolumes(r) - return len(volumes) == 0, err -} - -// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. -func ExtractVolumes(r pagination.Page) ([]Volume, error) { - var s struct { - Volumes []Volume `json:"volumes"` - } - err := (r.(VolumePage)).ExtractInto(&s) - return s.Volumes, err -} - -// UpdateResult contains the response body and error from an Update request. -type UpdateResult struct { - commonResult -} - -type commonResult struct { - gophercloud.Result -} - -// Extract will get the Volume object out of the commonResult object. -func (r commonResult) Extract() (*Volume, error) { - var s struct { - Volume *Volume `json:"volume"` - } - err := r.ExtractInto(&s) - return s.Volume, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/urls.go deleted file mode 100644 index 8a00f97e98c..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/urls.go +++ /dev/null @@ -1,23 +0,0 @@ -package volumes - -import "github.com/gophercloud/gophercloud" - -func createURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("volumes") -} - -func listURL(c *gophercloud.ServiceClient) string { - return createURL(c) -} - -func deleteURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("volumes", id) -} - -func getURL(c *gophercloud.ServiceClient, id string) string { - return deleteURL(c, id) -} - -func updateURL(c *gophercloud.ServiceClient, id string) string { - return deleteURL(c, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/util.go deleted file mode 100644 index e86c1b4b4ee..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/util.go +++ /dev/null @@ -1,22 +0,0 @@ -package volumes - -import ( - "github.com/gophercloud/gophercloud" -) - -// WaitForStatus will continually poll the resource, checking for a particular -// status. It will do this for the amount of seconds defined. -func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { - return gophercloud.WaitFor(secs, func() (bool, error) { - current, err := Get(c, id).Extract() - if err != nil { - return false, err - } - - if current.Status == status { - return true, nil - } - - return false, nil - }) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go deleted file mode 100644 index 307b8b12d2f..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package volumes provides information and interaction with volumes in the -// OpenStack Block Storage service. A volume is a detachable block storage -// device, akin to a USB hard drive. It can only be attached to one instance at -// a time. -package volumes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go deleted file mode 100644 index 18c9cb272ec..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go +++ /dev/null @@ -1,182 +0,0 @@ -package volumes - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToVolumeCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains options for creating a Volume. This object is passed to -// the volumes.Create function. For more information about these parameters, -// see the Volume object. -type CreateOpts struct { - // The size of the volume, in GB - Size int `json:"size" required:"true"` - // The availability zone - AvailabilityZone string `json:"availability_zone,omitempty"` - // ConsistencyGroupID is the ID of a consistency group - ConsistencyGroupID string `json:"consistencygroup_id,omitempty"` - // The volume description - Description string `json:"description,omitempty"` - // One or more metadata key and value pairs to associate with the volume - Metadata map[string]string `json:"metadata,omitempty"` - // The volume name - Name string `json:"name,omitempty"` - // the ID of the existing volume snapshot - SnapshotID string `json:"snapshot_id,omitempty"` - // SourceReplica is a UUID of an existing volume to replicate with - SourceReplica string `json:"source_replica,omitempty"` - // the ID of the existing volume - SourceVolID string `json:"source_volid,omitempty"` - // The ID of the image from which you want to create the volume. - // Required to create a bootable volume. - ImageID string `json:"imageRef,omitempty"` - // The associated volume type - VolumeType string `json:"volume_type,omitempty"` -} - -// ToVolumeCreateMap assembles a request body based on the contents of a -// CreateOpts. -func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "volume") -} - -// Create will create a new Volume based on the values in CreateOpts. To extract -// the Volume object from the response, call the Extract method on the -// CreateResult. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToVolumeCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} - -// Delete will delete the existing Volume with the provided ID. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, id), nil) - return -} - -// Get retrieves the Volume with the provided ID. To extract the Volume object -// from the response, call the Extract method on the GetResult. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// ListOptsBuilder allows extensions to add additional parameters to the List -// request. -type ListOptsBuilder interface { - ToVolumeListQuery() (string, error) -} - -// ListOpts holds options for listing Volumes. It is passed to the volumes.List -// function. -type ListOpts struct { - // admin-only option. Set it to true to see all tenant volumes. - AllTenants bool `q:"all_tenants"` - // List only volumes that contain Metadata. - Metadata map[string]string `q:"metadata"` - // List only volumes that have Name as the display name. - Name string `q:"name"` - // List only volumes that have a status of Status. - Status string `q:"status"` -} - -// ToVolumeListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToVolumeListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List returns Volumes optionally limited by the conditions provided in ListOpts. -func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listURL(client) - if opts != nil { - query, err := opts.ToVolumeListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return VolumePage{pagination.SinglePageBase(r)} - }) -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToVolumeUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contain options for updating an existing Volume. This object is passed -// to the volumes.Update function. For more information about the parameters, see -// the Volume object. -type UpdateOpts struct { - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` -} - -// ToVolumeUpdateMap assembles a request body based on the contents of an -// UpdateOpts. -func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "volume") -} - -// Update will update the Volume with provided information. To extract the updated -// Volume from the response, call the Extract method on the UpdateResult. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToVolumeUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// IDFromName is a convienience function that returns a server's ID given its name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - count := 0 - id := "" - pages, err := List(client, nil).AllPages() - if err != nil { - return "", err - } - - all, err := ExtractVolumes(pages) - if err != nil { - return "", err - } - - for _, s := range all { - if s.Name == name { - count++ - id = s.ID - } - } - - switch count { - case 0: - return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "volume"} - case 1: - return id, nil - default: - return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "volume"} - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go deleted file mode 100644 index 674ec346865..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go +++ /dev/null @@ -1,154 +0,0 @@ -package volumes - -import ( - "encoding/json" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -type Attachment struct { - AttachedAt time.Time `json:"-"` - AttachmentID string `json:"attachment_id"` - Device string `json:"device"` - HostName string `json:"host_name"` - ID string `json:"id"` - ServerID string `json:"server_id"` - VolumeID string `json:"volume_id"` -} - -func (r *Attachment) UnmarshalJSON(b []byte) error { - type tmp Attachment - var s struct { - tmp - AttachedAt gophercloud.JSONRFC3339MilliNoZ `json:"attached_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Attachment(s.tmp) - - r.AttachedAt = time.Time(s.AttachedAt) - - return err -} - -// Volume contains all the information associated with an OpenStack Volume. -type Volume struct { - // Unique identifier for the volume. - ID string `json:"id"` - // Current status of the volume. - Status string `json:"status"` - // Size of the volume in GB. - Size int `json:"size"` - // AvailabilityZone is which availability zone the volume is in. - AvailabilityZone string `json:"availability_zone"` - // The date when this volume was created. - CreatedAt time.Time `json:"-"` - // The date when this volume was last updated - UpdatedAt time.Time `json:"-"` - // Instances onto which the volume is attached. - Attachments []Attachment `json:"attachments"` - // Human-readable display name for the volume. - Name string `json:"name"` - // Human-readable description for the volume. - Description string `json:"description"` - // The type of volume to create, either SATA or SSD. - VolumeType string `json:"volume_type"` - // The ID of the snapshot from which the volume was created - SnapshotID string `json:"snapshot_id"` - // The ID of another block storage volume from which the current volume was created - SourceVolID string `json:"source_volid"` - // Arbitrary key-value pairs defined by the user. - Metadata map[string]string `json:"metadata"` - // UserID is the id of the user who created the volume. - UserID string `json:"user_id"` - // Indicates whether this is a bootable volume. - Bootable string `json:"bootable"` - // Encrypted denotes if the volume is encrypted. - Encrypted bool `json:"encrypted"` - // ReplicationStatus is the status of replication. - ReplicationStatus string `json:"replication_status"` - // ConsistencyGroupID is the consistency group ID. - ConsistencyGroupID string `json:"consistencygroup_id"` - // Multiattach denotes if the volume is multi-attach capable. - Multiattach bool `json:"multiattach"` -} - -func (r *Volume) UnmarshalJSON(b []byte) error { - type tmp Volume - var s struct { - tmp - CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` - UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Volume(s.tmp) - - r.CreatedAt = time.Time(s.CreatedAt) - r.UpdatedAt = time.Time(s.UpdatedAt) - - return err -} - -// VolumePage is a pagination.pager that is returned from a call to the List function. -type VolumePage struct { - pagination.SinglePageBase -} - -// IsEmpty returns true if a ListResult contains no Volumes. -func (r VolumePage) IsEmpty() (bool, error) { - volumes, err := ExtractVolumes(r) - return len(volumes) == 0, err -} - -// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. -func ExtractVolumes(r pagination.Page) ([]Volume, error) { - var s []Volume - err := ExtractVolumesInto(r, &s) - return s, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract will get the Volume object out of the commonResult object. -func (r commonResult) Extract() (*Volume, error) { - var s Volume - err := r.ExtractInto(&s) - return &s, err -} - -func (r commonResult) ExtractInto(v interface{}) error { - return r.Result.ExtractIntoStructPtr(v, "volume") -} - -func ExtractVolumesInto(r pagination.Page, v interface{}) error { - return r.(VolumePage).Result.ExtractIntoSlicePtr(v, "volumes") -} - -// CreateResult contains the response body and error from a Create request. -type CreateResult struct { - commonResult -} - -// GetResult contains the response body and error from a Get request. -type GetResult struct { - commonResult -} - -// UpdateResult contains the response body and error from an Update request. -type UpdateResult struct { - commonResult -} - -// DeleteResult contains the response body and error from a Delete request. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go deleted file mode 100644 index 170724905ab..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go +++ /dev/null @@ -1,23 +0,0 @@ -package volumes - -import "github.com/gophercloud/gophercloud" - -func createURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("volumes") -} - -func listURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("volumes", "detail") -} - -func deleteURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("volumes", id) -} - -func getURL(c *gophercloud.ServiceClient, id string) string { - return deleteURL(c, id) -} - -func updateURL(c *gophercloud.ServiceClient, id string) string { - return deleteURL(c, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go deleted file mode 100644 index e86c1b4b4ee..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go +++ /dev/null @@ -1,22 +0,0 @@ -package volumes - -import ( - "github.com/gophercloud/gophercloud" -) - -// WaitForStatus will continually poll the resource, checking for a particular -// status. It will do this for the amount of seconds defined. -func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { - return gophercloud.WaitFor(secs, func() (bool, error) { - current, err := Get(c, id).Extract() - if err != nil { - return false, err - } - - if current.Status == status { - return true, nil - } - - return false, nil - }) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go deleted file mode 100644 index 77214caca3b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/client.go +++ /dev/null @@ -1,348 +0,0 @@ -package openstack - -import ( - "fmt" - "net/url" - "reflect" - "regexp" - "strings" - - "github.com/gophercloud/gophercloud" - tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" - "github.com/gophercloud/gophercloud/openstack/utils" -) - -const ( - // v2 represents Keystone v2. - // It should never increase beyond 2.0. - v2 = "v2.0" - - // v3 represents Keystone v3. - // The version can be anything from v3 to v3.x. - v3 = "v3" -) - -/* -NewClient prepares an unauthenticated ProviderClient instance. -Most users will probably prefer using the AuthenticatedClient function -instead. - -This is useful if you wish to explicitly control the version of the identity -service that's used for authentication explicitly, for example. - -A basic example of using this would be: - - ao, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.NewClient(ao.IdentityEndpoint) - client, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) -*/ -func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { - u, err := url.Parse(endpoint) - if err != nil { - return nil, err - } - - u.RawQuery, u.Fragment = "", "" - - var base string - versionRe := regexp.MustCompile("v[0-9.]+/?") - if version := versionRe.FindString(u.Path); version != "" { - base = strings.Replace(u.String(), version, "", -1) - } else { - base = u.String() - } - - endpoint = gophercloud.NormalizeURL(endpoint) - base = gophercloud.NormalizeURL(base) - - return &gophercloud.ProviderClient{ - IdentityBase: base, - IdentityEndpoint: endpoint, - }, nil - -} - -/* -AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint -specified by the options, acquires a token, and returns a Provider Client -instance that's ready to operate. - -If the full path to a versioned identity endpoint was specified (example: -http://example.com:5000/v3), that path will be used as the endpoint to query. - -If a versionless endpoint was specified (example: http://example.com:5000/), -the endpoint will be queried to determine which versions of the identity service -are available, then chooses the most recent or most supported version. - -Example: - - ao, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(ao) - client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ - Region: os.Getenv("OS_REGION_NAME"), - }) -*/ -func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { - client, err := NewClient(options.IdentityEndpoint) - if err != nil { - return nil, err - } - - err = Authenticate(client, options) - if err != nil { - return nil, err - } - return client, nil -} - -// Authenticate or re-authenticate against the most recent identity service -// supported at the provided endpoint. -func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { - versions := []*utils.Version{ - {ID: v2, Priority: 20, Suffix: "/v2.0/"}, - {ID: v3, Priority: 30, Suffix: "/v3/"}, - } - - chosen, endpoint, err := utils.ChooseVersion(client, versions) - if err != nil { - return err - } - - switch chosen.ID { - case v2: - return v2auth(client, endpoint, options, gophercloud.EndpointOpts{}) - case v3: - return v3auth(client, endpoint, &options, gophercloud.EndpointOpts{}) - default: - // The switch statement must be out of date from the versions list. - return fmt.Errorf("Unrecognized identity version: %s", chosen.ID) - } -} - -// AuthenticateV2 explicitly authenticates against the identity v2 endpoint. -func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { - return v2auth(client, "", options, eo) -} - -func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { - v2Client, err := NewIdentityV2(client, eo) - if err != nil { - return err - } - - if endpoint != "" { - v2Client.Endpoint = endpoint - } - - v2Opts := tokens2.AuthOptions{ - IdentityEndpoint: options.IdentityEndpoint, - Username: options.Username, - Password: options.Password, - TenantID: options.TenantID, - TenantName: options.TenantName, - AllowReauth: options.AllowReauth, - TokenID: options.TokenID, - } - - result := tokens2.Create(v2Client, v2Opts) - - token, err := result.ExtractToken() - if err != nil { - return err - } - - catalog, err := result.ExtractServiceCatalog() - if err != nil { - return err - } - - if options.AllowReauth { - client.ReauthFunc = func() error { - client.TokenID = "" - return v2auth(client, endpoint, options, eo) - } - } - client.TokenID = token.ID - client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V2EndpointURL(catalog, opts) - } - - return nil -} - -// AuthenticateV3 explicitly authenticates against the identity v3 service. -func AuthenticateV3(client *gophercloud.ProviderClient, options tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { - return v3auth(client, "", options, eo) -} - -func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { - // Override the generated service endpoint with the one returned by the version endpoint. - v3Client, err := NewIdentityV3(client, eo) - if err != nil { - return err - } - - if endpoint != "" { - v3Client.Endpoint = endpoint - } - - result := tokens3.Create(v3Client, opts) - - token, err := result.ExtractToken() - if err != nil { - return err - } - - catalog, err := result.ExtractServiceCatalog() - if err != nil { - return err - } - - client.TokenID = token.ID - - if opts.CanReauth() { - client.ReauthFunc = func() error { - client.TokenID = "" - return v3auth(client, endpoint, opts, eo) - } - } - client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V3EndpointURL(catalog, opts) - } - - return nil -} - -// NewIdentityV2 creates a ServiceClient that may be used to interact with the -// v2 identity service. -func NewIdentityV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - endpoint := client.IdentityBase + "v2.0/" - clientType := "identity" - var err error - if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { - eo.ApplyDefaults(clientType) - endpoint, err = client.EndpointLocator(eo) - if err != nil { - return nil, err - } - } - - return &gophercloud.ServiceClient{ - ProviderClient: client, - Endpoint: endpoint, - Type: clientType, - }, nil -} - -// NewIdentityV3 creates a ServiceClient that may be used to access the v3 -// identity service. -func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - endpoint := client.IdentityBase + "v3/" - clientType := "identity" - var err error - if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { - eo.ApplyDefaults(clientType) - endpoint, err = client.EndpointLocator(eo) - if err != nil { - return nil, err - } - } - - // Ensure endpoint still has a suffix of v3. - // This is because EndpointLocator might have found a versionless - // endpoint and requests will fail unless targeted at /v3. - if !strings.HasSuffix(endpoint, "v3/") { - endpoint = endpoint + "v3/" - } - - return &gophercloud.ServiceClient{ - ProviderClient: client, - Endpoint: endpoint, - Type: clientType, - }, nil -} - -func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) { - sc := new(gophercloud.ServiceClient) - eo.ApplyDefaults(clientType) - url, err := client.EndpointLocator(eo) - if err != nil { - return sc, err - } - sc.ProviderClient = client - sc.Endpoint = url - sc.Type = clientType - return sc, nil -} - -// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 -// object storage package. -func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "object-store") -} - -// NewComputeV2 creates a ServiceClient that may be used with the v2 compute -// package. -func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "compute") -} - -// NewNetworkV2 creates a ServiceClient that may be used with the v2 network -// package. -func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "network") - sc.ResourceBase = sc.Endpoint + "v2.0/" - return sc, err -} - -// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 -// block storage service. -func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volume") -} - -// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 -// block storage service. -func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volumev2") -} - -// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the -// v2 shared file system service. -func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "sharev2") -} - -// NewCDNV1 creates a ServiceClient that may be used to access the OpenStack v1 -// CDN service. -func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "cdn") -} - -// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 -// orchestration service. -func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "orchestration") -} - -// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. -func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "database") -} - -// NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS -// service. -func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "dns") - sc.ResourceBase = sc.Endpoint + "v2/" - return sc, err -} - -// NewImageServiceV2 creates a ServiceClient that may be used to access the v2 -// image service. -func NewImageServiceV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "image") - sc.ResourceBase = sc.Endpoint + "v2/" - return sc, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/doc.go deleted file mode 100644 index 80464ba3994..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package availabilityzones provides the ability to extend a server result with -availability zone information. Example: - - type ServerWithAZ struct { - servers.Server - availabilityzones.ServerAvailabilityZoneExt - } - - var allServers []ServerWithAZ - - allPages, err := servers.List(client, nil).AllPages() - if err != nil { - panic("Unable to retrieve servers: %s", err) - } - - err = servers.ExtractServersInto(allPages, &allServers) - if err != nil { - panic("Unable to extract servers: %s", err) - } - - for _, server := range allServers { - fmt.Println(server.AvailabilityZone) - } -*/ -package availabilityzones diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/results.go deleted file mode 100644 index ae874041371..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/results.go +++ /dev/null @@ -1,8 +0,0 @@ -package availabilityzones - -// ServerAvailabilityZoneExt is an extension to the base Server result which -// includes the Availability Zone information. -type ServerAvailabilityZoneExt struct { - // AvailabilityZone is the availabilty zone the server is in. - AvailabilityZone string `json:"OS-EXT-AZ:availability_zone"` -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/doc.go deleted file mode 100644 index d291325e0a1..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/doc.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Package bootfromvolume extends a server create request with the ability to -specify block device options. This can be used to boot a server from a block -storage volume as well as specify multiple ephemeral disks upon creation. - -It is recommended to refer to the Block Device Mapping documentation to see -all possible ways to configure a server's block devices at creation time: - -https://docs.openstack.org/nova/latest/user/block-device-mapping.html - -Note that this package implements `block_device_mapping_v2`. - -Example of Creating a Server From an Image - -This example will boot a server from an image and use a standard ephemeral -disk as the server's root disk. This is virtually no different than creating -a server without using block device mappings. - - blockDevices := []bootfromvolume.BlockDevice{ - bootfromvolume.BlockDevice{ - BootIndex: 0, - DeleteOnTermination: true, - DestinationType: bootfromvolume.DestinationLocal, - SourceType: bootfromvolume.SourceImage, - UUID: "image-uuid", - }, - } - - serverCreateOpts := servers.CreateOpts{ - Name: "server_name", - FlavorRef: "flavor-uuid", - ImageRef: "image-uuid", - } - - createOpts := bootfromvolume.CreateOptsExt{ - CreateOptsBuilder: serverCreateOpts, - BlockDevice: blockDevices, - } - - server, err := bootfromvolume.Create(client, createOpts).Extract() - if err != nil { - panic(err) - } - -Example of Creating a Server From a New Volume - -This example will create a block storage volume based on the given Image. The -server will use this volume as its root disk. - - blockDevices := []bootfromvolume.BlockDevice{ - bootfromvolume.BlockDevice{ - DeleteOnTermination: true, - DestinationType: bootfromvolume.DestinationVolume, - SourceType: bootfromvolume.SourceImage, - UUID: "image-uuid", - VolumeSize: 2, - }, - } - - serverCreateOpts := servers.CreateOpts{ - Name: "server_name", - FlavorRef: "flavor-uuid", - } - - createOpts := bootfromvolume.CreateOptsExt{ - CreateOptsBuilder: serverCreateOpts, - BlockDevice: blockDevices, - } - - server, err := bootfromvolume.Create(client, createOpts).Extract() - if err != nil { - panic(err) - } - -Example of Creating a Server From an Existing Volume - -This example will create a server with an existing volume as its root disk. - - blockDevices := []bootfromvolume.BlockDevice{ - bootfromvolume.BlockDevice{ - DeleteOnTermination: true, - DestinationType: bootfromvolume.DestinationVolume, - SourceType: bootfromvolume.SourceVolume, - UUID: "volume-uuid", - }, - } - - serverCreateOpts := servers.CreateOpts{ - Name: "server_name", - FlavorRef: "flavor-uuid", - } - - createOpts := bootfromvolume.CreateOptsExt{ - CreateOptsBuilder: serverCreateOpts, - BlockDevice: blockDevices, - } - - server, err := bootfromvolume.Create(client, createOpts).Extract() - if err != nil { - panic(err) - } - -Example of Creating a Server with Multiple Ephemeral Disks - -This example will create a server with multiple ephemeral disks. The first -block device will be based off of an existing Image. Each additional -ephemeral disks must have an index of -1. - - blockDevices := []bootfromvolume.BlockDevice{ - bootfromvolume.BlockDevice{ - BootIndex: 0, - DestinationType: bootfromvolume.DestinationLocal, - DeleteOnTermination: true, - SourceType: bootfromvolume.SourceImage, - UUID: "image-uuid", - VolumeSize: 5, - }, - bootfromvolume.BlockDevice{ - BootIndex: -1, - DestinationType: bootfromvolume.DestinationLocal, - DeleteOnTermination: true, - GuestFormat: "ext4", - SourceType: bootfromvolume.SourceBlank, - VolumeSize: 1, - }, - bootfromvolume.BlockDevice{ - BootIndex: -1, - DestinationType: bootfromvolume.DestinationLocal, - DeleteOnTermination: true, - GuestFormat: "ext4", - SourceType: bootfromvolume.SourceBlank, - VolumeSize: 1, - }, - } - - serverCreateOpts := servers.CreateOpts{ - Name: "server_name", - FlavorRef: "flavor-uuid", - ImageRef: "image-uuid", - } - - createOpts := bootfromvolume.CreateOptsExt{ - CreateOptsBuilder: serverCreateOpts, - BlockDevice: blockDevices, - } - - server, err := bootfromvolume.Create(client, createOpts).Extract() - if err != nil { - panic(err) - } -*/ -package bootfromvolume diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go deleted file mode 100644 index 9dae14c7a93..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go +++ /dev/null @@ -1,120 +0,0 @@ -package bootfromvolume - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" -) - -type ( - // DestinationType represents the type of medium being used as the - // destination of the bootable device. - DestinationType string - - // SourceType represents the type of medium being used as the source of the - // bootable device. - SourceType string -) - -const ( - // DestinationLocal DestinationType is for using an ephemeral disk as the - // destination. - DestinationLocal DestinationType = "local" - - // DestinationVolume DestinationType is for using a volume as the destination. - DestinationVolume DestinationType = "volume" - - // SourceBlank SourceType is for a "blank" or empty source. - SourceBlank SourceType = "blank" - - // SourceImage SourceType is for using images as the source of a block device. - SourceImage SourceType = "image" - - // SourceSnapshot SourceType is for using a volume snapshot as the source of - // a block device. - SourceSnapshot SourceType = "snapshot" - - // SourceVolume SourceType is for using a volume as the source of block - // device. - SourceVolume SourceType = "volume" -) - -// BlockDevice is a structure with options for creating block devices in a -// server. The block device may be created from an image, snapshot, new volume, -// or existing volume. The destination may be a new volume, existing volume -// which will be attached to the instance, ephemeral disk, or boot device. -type BlockDevice struct { - // SourceType must be one of: "volume", "snapshot", "image", or "blank". - SourceType SourceType `json:"source_type" required:"true"` - - // UUID is the unique identifier for the existing volume, snapshot, or - // image (see above). - UUID string `json:"uuid,omitempty"` - - // BootIndex is the boot index. It defaults to 0. - BootIndex int `json:"boot_index"` - - // DeleteOnTermination specifies whether or not to delete the attached volume - // when the server is deleted. Defaults to `false`. - DeleteOnTermination bool `json:"delete_on_termination"` - - // DestinationType is the type that gets created. Possible values are "volume" - // and "local". - DestinationType DestinationType `json:"destination_type,omitempty"` - - // GuestFormat specifies the format of the block device. - GuestFormat string `json:"guest_format,omitempty"` - - // VolumeSize is the size of the volume to create (in gigabytes). This can be - // omitted for existing volumes. - VolumeSize int `json:"volume_size,omitempty"` -} - -// CreateOptsExt is a structure that extends the server `CreateOpts` structure -// by allowing for a block device mapping. -type CreateOptsExt struct { - servers.CreateOptsBuilder - BlockDevice []BlockDevice `json:"block_device_mapping_v2,omitempty"` -} - -// ToServerCreateMap adds the block device mapping option to the base server -// creation options. -func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { - base, err := opts.CreateOptsBuilder.ToServerCreateMap() - if err != nil { - return nil, err - } - - if len(opts.BlockDevice) == 0 { - err := gophercloud.ErrMissingInput{} - err.Argument = "bootfromvolume.CreateOptsExt.BlockDevice" - return nil, err - } - - serverMap := base["server"].(map[string]interface{}) - - blockDevice := make([]map[string]interface{}, len(opts.BlockDevice)) - - for i, bd := range opts.BlockDevice { - b, err := gophercloud.BuildRequestBody(bd, "") - if err != nil { - return nil, err - } - blockDevice[i] = b - } - serverMap["block_device_mapping_v2"] = blockDevice - - return base, nil -} - -// Create requests the creation of a server from the given block device mapping. -func Create(client *gophercloud.ServiceClient, opts servers.CreateOptsBuilder) (r servers.CreateResult) { - b, err := opts.ToServerCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 202}, - }) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go deleted file mode 100644 index ba1eafabcd0..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go +++ /dev/null @@ -1,12 +0,0 @@ -package bootfromvolume - -import ( - os "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" -) - -// CreateResult temporarily contains the response from a Create call. -// It embeds the standard servers.CreateResults type and so can be used the -// same way as a standard server request result. -type CreateResult struct { - os.CreateResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go deleted file mode 100644 index dc007eadf86..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go +++ /dev/null @@ -1,7 +0,0 @@ -package bootfromvolume - -import "github.com/gophercloud/gophercloud" - -func createURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("os-volumes_boot") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go deleted file mode 100644 index f5dbdbf8b94..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Package floatingips provides the ability to manage floating ips through the -Nova API. - -This API has been deprecated and will be removed from a future release of the -Nova API service. - -For environements that support this extension, this package can be used -regardless of if either Neutron or nova-network is used as the cloud's network -service. - -Example to List Floating IPs - - allPages, err := floatingips.List(computeClient).AllPages() - if err != nil { - panic(err) - } - - allFloatingIPs, err := floatingips.ExtractFloatingIPs(allPages) - if err != nil { - panic(err) - } - - for _, fip := range allFloatingIPs { - fmt.Printf("%+v\n", fip) - } - -Example to Create a Floating IP - - createOpts := floatingips.CreateOpts{ - Pool: "nova", - } - - fip, err := floatingips.Create(computeClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Floating IP - - err := floatingips.Delete(computeClient, "floatingip-id").ExtractErr() - if err != nil { - panic(err) - } - -Example to Associate a Floating IP With a Server - - associateOpts := floatingips.AssociateOpts{ - FloatingIP: "10.10.10.2", - } - - err := floatingips.AssociateInstance(computeClient, "server-id", associateOpts).ExtractErr() - if err != nil { - panic(err) - } - -Example to Disassociate a Floating IP From a Server - - disassociateOpts := floatingips.DisassociateOpts{ - FloatingIP: "10.10.10.2", - } - - err := floatingips.DisassociateInstance(computeClient, "server-id", disassociateOpts).ExtractErr() - if err != nil { - panic(err) - } -*/ -package floatingips diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go deleted file mode 100644 index a922639dec9..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go +++ /dev/null @@ -1,114 +0,0 @@ -package floatingips - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// List returns a Pager that allows you to iterate over a collection of FloatingIPs. -func List(client *gophercloud.ServiceClient) pagination.Pager { - return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { - return FloatingIPPage{pagination.SinglePageBase(r)} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToFloatingIPCreateMap() (map[string]interface{}, error) -} - -// CreateOpts specifies a Floating IP allocation request. -type CreateOpts struct { - // Pool is the pool of Floating IPs to allocate one from. - Pool string `json:"pool" required:"true"` -} - -// ToFloatingIPCreateMap constructs a request body from CreateOpts. -func (opts CreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "") -} - -// Create requests the creation of a new Floating IP. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToFloatingIPCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Get returns data about a previously created Floating IP. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// Delete requests the deletion of a previous allocated Floating IP. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, id), nil) - return -} - -// AssociateOptsBuilder allows extensions to add additional parameters to the -// Associate request. -type AssociateOptsBuilder interface { - ToFloatingIPAssociateMap() (map[string]interface{}, error) -} - -// AssociateOpts specifies the required information to associate a Floating IP with an instance -type AssociateOpts struct { - // FloatingIP is the Floating IP to associate with an instance. - FloatingIP string `json:"address" required:"true"` - - // FixedIP is an optional fixed IP address of the server. - FixedIP string `json:"fixed_address,omitempty"` -} - -// ToFloatingIPAssociateMap constructs a request body from AssociateOpts. -func (opts AssociateOpts) ToFloatingIPAssociateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "addFloatingIp") -} - -// AssociateInstance pairs an allocated Floating IP with a server. -func AssociateInstance(client *gophercloud.ServiceClient, serverID string, opts AssociateOptsBuilder) (r AssociateResult) { - b, err := opts.ToFloatingIPAssociateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(associateURL(client, serverID), b, nil, nil) - return -} - -// DisassociateOptsBuilder allows extensions to add additional parameters to -// the Disassociate request. -type DisassociateOptsBuilder interface { - ToFloatingIPDisassociateMap() (map[string]interface{}, error) -} - -// DisassociateOpts specifies the required information to disassociate a -// Floating IP with a server. -type DisassociateOpts struct { - FloatingIP string `json:"address" required:"true"` -} - -// ToFloatingIPDisassociateMap constructs a request body from DisassociateOpts. -func (opts DisassociateOpts) ToFloatingIPDisassociateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "removeFloatingIp") -} - -// DisassociateInstance decouples an allocated Floating IP from an instance -func DisassociateInstance(client *gophercloud.ServiceClient, serverID string, opts DisassociateOptsBuilder) (r DisassociateResult) { - b, err := opts.ToFloatingIPDisassociateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(disassociateURL(client, serverID), b, nil, nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go deleted file mode 100644 index da4e9da0e62..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go +++ /dev/null @@ -1,115 +0,0 @@ -package floatingips - -import ( - "encoding/json" - "strconv" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// A FloatingIP is an IP that can be associated with a server. -type FloatingIP struct { - // ID is a unique ID of the Floating IP - ID string `json:"-"` - - // FixedIP is a specific IP on the server to pair the Floating IP with. - FixedIP string `json:"fixed_ip,omitempty"` - - // InstanceID is the ID of the server that is using the Floating IP. - InstanceID string `json:"instance_id"` - - // IP is the actual Floating IP. - IP string `json:"ip"` - - // Pool is the pool of Floating IPs that this Floating IP belongs to. - Pool string `json:"pool"` -} - -func (r *FloatingIP) UnmarshalJSON(b []byte) error { - type tmp FloatingIP - var s struct { - tmp - ID interface{} `json:"id"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = FloatingIP(s.tmp) - - switch t := s.ID.(type) { - case float64: - r.ID = strconv.FormatFloat(t, 'f', -1, 64) - case string: - r.ID = t - } - - return err -} - -// FloatingIPPage stores a single page of FloatingIPs from a List call. -type FloatingIPPage struct { - pagination.SinglePageBase -} - -// IsEmpty determines whether or not a FloatingIPsPage is empty. -func (page FloatingIPPage) IsEmpty() (bool, error) { - va, err := ExtractFloatingIPs(page) - return len(va) == 0, err -} - -// ExtractFloatingIPs interprets a page of results as a slice of FloatingIPs. -func ExtractFloatingIPs(r pagination.Page) ([]FloatingIP, error) { - var s struct { - FloatingIPs []FloatingIP `json:"floating_ips"` - } - err := (r.(FloatingIPPage)).ExtractInto(&s) - return s.FloatingIPs, err -} - -// FloatingIPResult is the raw result from a FloatingIP request. -type FloatingIPResult struct { - gophercloud.Result -} - -// Extract is a method that attempts to interpret any FloatingIP resource -// response as a FloatingIP struct. -func (r FloatingIPResult) Extract() (*FloatingIP, error) { - var s struct { - FloatingIP *FloatingIP `json:"floating_ip"` - } - err := r.ExtractInto(&s) - return s.FloatingIP, err -} - -// CreateResult is the response from a Create operation. Call its Extract method -// to interpret it as a FloatingIP. -type CreateResult struct { - FloatingIPResult -} - -// GetResult is the response from a Get operation. Call its Extract method to -// interpret it as a FloatingIP. -type GetResult struct { - FloatingIPResult -} - -// DeleteResult is the response from a Delete operation. Call its ExtractErr -// method to determine if the call succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// AssociateResult is the response from a Delete operation. Call its ExtractErr -// method to determine if the call succeeded or failed. -type AssociateResult struct { - gophercloud.ErrResult -} - -// DisassociateResult is the response from a Delete operation. Call its -// ExtractErr method to determine if the call succeeded or failed. -type DisassociateResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/urls.go deleted file mode 100644 index 4768e5a8976..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/urls.go +++ /dev/null @@ -1,37 +0,0 @@ -package floatingips - -import "github.com/gophercloud/gophercloud" - -const resourcePath = "os-floating-ips" - -func resourceURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(resourcePath) -} - -func listURL(c *gophercloud.ServiceClient) string { - return resourceURL(c) -} - -func createURL(c *gophercloud.ServiceClient) string { - return resourceURL(c) -} - -func getURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(resourcePath, id) -} - -func deleteURL(c *gophercloud.ServiceClient, id string) string { - return getURL(c, id) -} - -func serverURL(c *gophercloud.ServiceClient, serverID string) string { - return c.ServiceURL("servers/" + serverID + "/action") -} - -func associateURL(c *gophercloud.ServiceClient, serverID string) string { - return serverURL(c, serverID) -} - -func disassociateURL(c *gophercloud.ServiceClient, serverID string) string { - return serverURL(c, serverID) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go deleted file mode 100644 index dc7b65fda18..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Package keypairs provides the ability to manage key pairs as well as create -servers with a specified key pair. - -Example to List Key Pairs - - allPages, err := keypairs.List(computeClient).AllPages() - if err != nil { - panic(err) - } - - allKeyPairs, err := keypairs.ExtractKeyPairs(allPages) - if err != nil { - panic(err) - } - - for _, kp := range allKeyPairs { - fmt.Printf("%+v\n", kp) - } - -Example to Create a Key Pair - - createOpts := keypairs.CreateOpts{ - Name: "keypair-name", - } - - keypair, err := keypairs.Create(computeClient, createOpts).Extract() - if err != nil { - panic(err) - } - - fmt.Printf("%+v", keypair) - -Example to Import a Key Pair - - createOpts := keypairs.CreateOpts{ - Name: "keypair-name", - PublicKey: "public-key", - } - - keypair, err := keypairs.Create(computeClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Key Pair - - err := keypairs.Delete(computeClient, "keypair-name").ExtractErr() - if err != nil { - panic(err) - } - -Example to Create a Server With a Key Pair - - serverCreateOpts := servers.CreateOpts{ - Name: "server_name", - ImageRef: "image-uuid", - FlavorRef: "flavor-uuid", - } - - createOpts := keypairs.CreateOpts{ - CreateOptsBuilder: serverCreateOpts, - KeyName: "keypair-name", - } - - server, err := servers.Create(computeClient, createOpts).Extract() - if err != nil { - panic(err) - } -*/ -package keypairs diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go deleted file mode 100644 index 4e5e499e3aa..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go +++ /dev/null @@ -1,86 +0,0 @@ -package keypairs - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/pagination" -) - -// CreateOptsExt adds a KeyPair option to the base CreateOpts. -type CreateOptsExt struct { - servers.CreateOptsBuilder - - // KeyName is the name of the key pair. - KeyName string `json:"key_name,omitempty"` -} - -// ToServerCreateMap adds the key_name to the base server creation options. -func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { - base, err := opts.CreateOptsBuilder.ToServerCreateMap() - if err != nil { - return nil, err - } - - if opts.KeyName == "" { - return base, nil - } - - serverMap := base["server"].(map[string]interface{}) - serverMap["key_name"] = opts.KeyName - - return base, nil -} - -// List returns a Pager that allows you to iterate over a collection of KeyPairs. -func List(client *gophercloud.ServiceClient) pagination.Pager { - return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { - return KeyPairPage{pagination.SinglePageBase(r)} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToKeyPairCreateMap() (map[string]interface{}, error) -} - -// CreateOpts specifies KeyPair creation or import parameters. -type CreateOpts struct { - // Name is a friendly name to refer to this KeyPair in other services. - Name string `json:"name" required:"true"` - - // PublicKey [optional] is a pregenerated OpenSSH-formatted public key. - // If provided, this key will be imported and no new key will be created. - PublicKey string `json:"public_key,omitempty"` -} - -// ToKeyPairCreateMap constructs a request body from CreateOpts. -func (opts CreateOpts) ToKeyPairCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "keypair") -} - -// Create requests the creation of a new KeyPair on the server, or to import a -// pre-existing keypair. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToKeyPairCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Get returns public data about a previously uploaded KeyPair. -func Get(client *gophercloud.ServiceClient, name string) (r GetResult) { - _, r.Err = client.Get(getURL(client, name), &r.Body, nil) - return -} - -// Delete requests the deletion of a previous stored KeyPair from the server. -func Delete(client *gophercloud.ServiceClient, name string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, name), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go deleted file mode 100644 index 2d71034b101..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go +++ /dev/null @@ -1,91 +0,0 @@ -package keypairs - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// KeyPair is an SSH key known to the OpenStack Cloud that is available to be -// injected into servers. -type KeyPair struct { - // Name is used to refer to this keypair from other services within this - // region. - Name string `json:"name"` - - // Fingerprint is a short sequence of bytes that can be used to authenticate - // or validate a longer public key. - Fingerprint string `json:"fingerprint"` - - // PublicKey is the public key from this pair, in OpenSSH format. - // "ssh-rsa AAAAB3Nz..." - PublicKey string `json:"public_key"` - - // PrivateKey is the private key from this pair, in PEM format. - // "-----BEGIN RSA PRIVATE KEY-----\nMIICXA..." - // It is only present if this KeyPair was just returned from a Create call. - PrivateKey string `json:"private_key"` - - // UserID is the user who owns this KeyPair. - UserID string `json:"user_id"` -} - -// KeyPairPage stores a single page of all KeyPair results from a List call. -// Use the ExtractKeyPairs function to convert the results to a slice of -// KeyPairs. -type KeyPairPage struct { - pagination.SinglePageBase -} - -// IsEmpty determines whether or not a KeyPairPage is empty. -func (page KeyPairPage) IsEmpty() (bool, error) { - ks, err := ExtractKeyPairs(page) - return len(ks) == 0, err -} - -// ExtractKeyPairs interprets a page of results as a slice of KeyPairs. -func ExtractKeyPairs(r pagination.Page) ([]KeyPair, error) { - type pair struct { - KeyPair KeyPair `json:"keypair"` - } - var s struct { - KeyPairs []pair `json:"keypairs"` - } - err := (r.(KeyPairPage)).ExtractInto(&s) - results := make([]KeyPair, len(s.KeyPairs)) - for i, pair := range s.KeyPairs { - results[i] = pair.KeyPair - } - return results, err -} - -type keyPairResult struct { - gophercloud.Result -} - -// Extract is a method that attempts to interpret any KeyPair resource response -// as a KeyPair struct. -func (r keyPairResult) Extract() (*KeyPair, error) { - var s struct { - KeyPair *KeyPair `json:"keypair"` - } - err := r.ExtractInto(&s) - return s.KeyPair, err -} - -// CreateResult is the response from a Create operation. Call its Extract method -// to interpret it as a KeyPair. -type CreateResult struct { - keyPairResult -} - -// GetResult is the response from a Get operation. Call its Extract method to -// interpret it as a KeyPair. -type GetResult struct { - keyPairResult -} - -// DeleteResult is the response from a Delete operation. Call its ExtractErr -// method to determine if the call succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go deleted file mode 100644 index fec38f36793..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go +++ /dev/null @@ -1,25 +0,0 @@ -package keypairs - -import "github.com/gophercloud/gophercloud" - -const resourcePath = "os-keypairs" - -func resourceURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(resourcePath) -} - -func listURL(c *gophercloud.ServiceClient) string { - return resourceURL(c) -} - -func createURL(c *gophercloud.ServiceClient) string { - return resourceURL(c) -} - -func getURL(c *gophercloud.ServiceClient, name string) string { - return c.ServiceURL(resourcePath, name) -} - -func deleteURL(c *gophercloud.ServiceClient, name string) string { - return getURL(c, name) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go deleted file mode 100644 index 2d9d3acdeca..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Package schedulerhints extends the server create request with the ability to -specify additional parameters which determine where the server will be -created in the OpenStack cloud. - -Example to Add a Server to a Server Group - - schedulerHints := schedulerhints.SchedulerHints{ - Group: "servergroup-uuid", - } - - serverCreateOpts := servers.CreateOpts{ - Name: "server_name", - ImageRef: "image-uuid", - FlavorRef: "flavor-uuid", - } - - createOpts := schedulerhints.CreateOptsExt{ - CreateOptsBuilder: serverCreateOpts, - SchedulerHints: schedulerHints, - } - - server, err := servers.Create(computeClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Place Server B on a Different Host than Server A - - schedulerHints := schedulerhints.SchedulerHints{ - DifferentHost: []string{ - "server-a-uuid", - } - } - - serverCreateOpts := servers.CreateOpts{ - Name: "server_b", - ImageRef: "image-uuid", - FlavorRef: "flavor-uuid", - } - - createOpts := schedulerhints.CreateOptsExt{ - CreateOptsBuilder: serverCreateOpts, - SchedulerHints: schedulerHints, - } - - server, err := servers.Create(computeClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Place Server B on the Same Host as Server A - - schedulerHints := schedulerhints.SchedulerHints{ - SameHost: []string{ - "server-a-uuid", - } - } - - serverCreateOpts := servers.CreateOpts{ - Name: "server_b", - ImageRef: "image-uuid", - FlavorRef: "flavor-uuid", - } - - createOpts := schedulerhints.CreateOptsExt{ - CreateOptsBuilder: serverCreateOpts, - SchedulerHints: schedulerHints, - } - - server, err := servers.Create(computeClient, createOpts).Extract() - if err != nil { - panic(err) - } -*/ -package schedulerhints diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go deleted file mode 100644 index 3fabeddef3b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go +++ /dev/null @@ -1,164 +0,0 @@ -package schedulerhints - -import ( - "net" - "regexp" - "strings" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" -) - -// SchedulerHints represents a set of scheduling hints that are passed to the -// OpenStack scheduler. -type SchedulerHints struct { - // Group specifies a Server Group to place the instance in. - Group string - - // DifferentHost will place the instance on a compute node that does not - // host the given instances. - DifferentHost []string - - // SameHost will place the instance on a compute node that hosts the given - // instances. - SameHost []string - - // Query is a conditional statement that results in compute nodes able to - // host the instance. - Query []interface{} - - // TargetCell specifies a cell name where the instance will be placed. - TargetCell string `json:"target_cell,omitempty"` - - // BuildNearHostIP specifies a subnet of compute nodes to host the instance. - BuildNearHostIP string - - // AdditionalProperies are arbitrary key/values that are not validated by nova. - AdditionalProperties map[string]interface{} -} - -// CreateOptsBuilder builds the scheduler hints into a serializable format. -type CreateOptsBuilder interface { - ToServerSchedulerHintsCreateMap() (map[string]interface{}, error) -} - -// ToServerSchedulerHintsMap builds the scheduler hints into a serializable format. -func (opts SchedulerHints) ToServerSchedulerHintsCreateMap() (map[string]interface{}, error) { - sh := make(map[string]interface{}) - - uuidRegex, _ := regexp.Compile("^[a-z0-9]{8}-[a-z0-9]{4}-[1-5][a-z0-9]{3}-[a-z0-9]{4}-[a-z0-9]{12}$") - - if opts.Group != "" { - if !uuidRegex.MatchString(opts.Group) { - err := gophercloud.ErrInvalidInput{} - err.Argument = "schedulerhints.SchedulerHints.Group" - err.Value = opts.Group - err.Info = "Group must be a UUID" - return nil, err - } - sh["group"] = opts.Group - } - - if len(opts.DifferentHost) > 0 { - for _, diffHost := range opts.DifferentHost { - if !uuidRegex.MatchString(diffHost) { - err := gophercloud.ErrInvalidInput{} - err.Argument = "schedulerhints.SchedulerHints.DifferentHost" - err.Value = opts.DifferentHost - err.Info = "The hosts must be in UUID format." - return nil, err - } - } - sh["different_host"] = opts.DifferentHost - } - - if len(opts.SameHost) > 0 { - for _, sameHost := range opts.SameHost { - if !uuidRegex.MatchString(sameHost) { - err := gophercloud.ErrInvalidInput{} - err.Argument = "schedulerhints.SchedulerHints.SameHost" - err.Value = opts.SameHost - err.Info = "The hosts must be in UUID format." - return nil, err - } - } - sh["same_host"] = opts.SameHost - } - - /* - Query can be something simple like: - [">=", "$free_ram_mb", 1024] - - Or more complex like: - ['and', - ['>=', '$free_ram_mb', 1024], - ['>=', '$free_disk_mb', 200 * 1024] - ] - - Because of the possible complexity, just make sure the length is a minimum of 3. - */ - if len(opts.Query) > 0 { - if len(opts.Query) < 3 { - err := gophercloud.ErrInvalidInput{} - err.Argument = "schedulerhints.SchedulerHints.Query" - err.Value = opts.Query - err.Info = "Must be a conditional statement in the format of [op,variable,value]" - return nil, err - } - sh["query"] = opts.Query - } - - if opts.TargetCell != "" { - sh["target_cell"] = opts.TargetCell - } - - if opts.BuildNearHostIP != "" { - if _, _, err := net.ParseCIDR(opts.BuildNearHostIP); err != nil { - err := gophercloud.ErrInvalidInput{} - err.Argument = "schedulerhints.SchedulerHints.BuildNearHostIP" - err.Value = opts.BuildNearHostIP - err.Info = "Must be a valid subnet in the form 192.168.1.1/24" - return nil, err - } - ipParts := strings.Split(opts.BuildNearHostIP, "/") - sh["build_near_host_ip"] = ipParts[0] - sh["cidr"] = "/" + ipParts[1] - } - - if opts.AdditionalProperties != nil { - for k, v := range opts.AdditionalProperties { - sh[k] = v - } - } - - return sh, nil -} - -// CreateOptsExt adds a SchedulerHints option to the base CreateOpts. -type CreateOptsExt struct { - servers.CreateOptsBuilder - - // SchedulerHints provides a set of hints to the scheduler. - SchedulerHints CreateOptsBuilder -} - -// ToServerCreateMap adds the SchedulerHints option to the base server creation options. -func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { - base, err := opts.CreateOptsBuilder.ToServerCreateMap() - if err != nil { - return nil, err - } - - schedulerHints, err := opts.SchedulerHints.ToServerSchedulerHintsCreateMap() - if err != nil { - return nil, err - } - - if len(schedulerHints) == 0 { - return base, nil - } - - base["os:scheduler_hints"] = schedulerHints - - return base, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go deleted file mode 100644 index 8d3ebf2e5d5..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Package secgroups provides the ability to manage security groups through the -Nova API. - -This API has been deprecated and will be removed from a future release of the -Nova API service. - -For environments that support this extension, this package can be used -regardless of if either Neutron or nova-network is used as the cloud's network -service. - -Example to List Security Groups - - allPages, err := secroups.List(computeClient).AllPages() - if err != nil { - panic(err) - } - - allSecurityGroups, err := secgroups.ExtractSecurityGroups(allPages) - if err != nil { - panic(err) - } - - for _, sg := range allSecurityGroups { - fmt.Printf("%+v\n", sg) - } - -Example to List Security Groups by Server - - serverID := "aab3ad01-9956-4623-a29b-24afc89a7d36" - - allPages, err := secroups.ListByServer(computeClient, serverID).AllPages() - if err != nil { - panic(err) - } - - allSecurityGroups, err := secgroups.ExtractSecurityGroups(allPages) - if err != nil { - panic(err) - } - - for _, sg := range allSecurityGroups { - fmt.Printf("%+v\n", sg) - } - -Example to Create a Security Group - - createOpts := secgroups.CreateOpts{ - Name: "group_name", - Description: "A Security Group", - } - - sg, err := secgroups.Create(computeClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Create a Security Group Rule - - sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" - - createOpts := secgroups.CreateRuleOpts{ - ParentGroupID: sgID, - FromPort: 22, - ToPort: 22, - IPProtocol: "tcp", - CIDR: "0.0.0.0/0", - } - - rule, err := secgroups.CreateRule(computeClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Add a Security Group to a Server - - serverID := "aab3ad01-9956-4623-a29b-24afc89a7d36" - sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" - - err := secgroups.AddServer(computeClient, serverID, sgID).ExtractErr() - if err != nil { - panic(err) - } - -Example to Remove a Security Group from a Server - - serverID := "aab3ad01-9956-4623-a29b-24afc89a7d36" - sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" - - err := secgroups.RemoveServer(computeClient, serverID, sgID).ExtractErr() - if err != nil { - panic(err) - } - -Example to Delete a Security Group - - - sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" - err := secgroups.Delete(computeClient, sgID).ExtractErr() - if err != nil { - panic(err) - } - -Example to Delete a Security Group Rule - - ruleID := "6221fe3e-383d-46c9-a3a6-845e66c1e8b4" - err := secgroups.DeleteRule(computeClient, ruleID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package secgroups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go deleted file mode 100644 index bcceaeacdd1..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go +++ /dev/null @@ -1,183 +0,0 @@ -package secgroups - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -func commonList(client *gophercloud.ServiceClient, url string) pagination.Pager { - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return SecurityGroupPage{pagination.SinglePageBase(r)} - }) -} - -// List will return a collection of all the security groups for a particular -// tenant. -func List(client *gophercloud.ServiceClient) pagination.Pager { - return commonList(client, rootURL(client)) -} - -// ListByServer will return a collection of all the security groups which are -// associated with a particular server. -func ListByServer(client *gophercloud.ServiceClient, serverID string) pagination.Pager { - return commonList(client, listByServerURL(client, serverID)) -} - -// GroupOpts is the underlying struct responsible for creating or updating -// security groups. It therefore represents the mutable attributes of a -// security group. -type GroupOpts struct { - // the name of your security group. - Name string `json:"name" required:"true"` - // the description of your security group. - Description string `json:"description" required:"true"` -} - -// CreateOpts is the struct responsible for creating a security group. -type CreateOpts GroupOpts - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToSecGroupCreateMap() (map[string]interface{}, error) -} - -// ToSecGroupCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToSecGroupCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "security_group") -} - -// Create will create a new security group. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToSecGroupCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(rootURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// UpdateOpts is the struct responsible for updating an existing security group. -type UpdateOpts GroupOpts - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToSecGroupUpdateMap() (map[string]interface{}, error) -} - -// ToSecGroupUpdateMap builds a request body from UpdateOpts. -func (opts UpdateOpts) ToSecGroupUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "security_group") -} - -// Update will modify the mutable properties of a security group, notably its -// name and description. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToSecGroupUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Put(resourceURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Get will return details for a particular security group. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(resourceURL(client, id), &r.Body, nil) - return -} - -// Delete will permanently delete a security group from the project. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(resourceURL(client, id), nil) - return -} - -// CreateRuleOpts represents the configuration for adding a new rule to an -// existing security group. -type CreateRuleOpts struct { - // ID is the ID of the group that this rule will be added to. - ParentGroupID string `json:"parent_group_id" required:"true"` - - // FromPort is the lower bound of the port range that will be opened. - // Use -1 to allow all ICMP traffic. - FromPort int `json:"from_port"` - - // ToPort is the upper bound of the port range that will be opened. - // Use -1 to allow all ICMP traffic. - ToPort int `json:"to_port"` - - // IPProtocol the protocol type that will be allowed, e.g. TCP. - IPProtocol string `json:"ip_protocol" required:"true"` - - // CIDR is the network CIDR to allow traffic from. - // This is ONLY required if FromGroupID is blank. This represents the IP - // range that will be the source of network traffic to your security group. - // Use 0.0.0.0/0 to allow all IP addresses. - CIDR string `json:"cidr,omitempty" or:"FromGroupID"` - - // FromGroupID represents another security group to allow access. - // This is ONLY required if CIDR is blank. This value represents the ID of a - // group that forwards traffic to the parent group. So, instead of accepting - // network traffic from an entire IP range, you can instead refine the - // inbound source by an existing security group. - FromGroupID string `json:"group_id,omitempty" or:"CIDR"` -} - -// CreateRuleOptsBuilder allows extensions to add additional parameters to the -// CreateRule request. -type CreateRuleOptsBuilder interface { - ToRuleCreateMap() (map[string]interface{}, error) -} - -// ToRuleCreateMap builds a request body from CreateRuleOpts. -func (opts CreateRuleOpts) ToRuleCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "security_group_rule") -} - -// CreateRule will add a new rule to an existing security group (whose ID is -// specified in CreateRuleOpts). You have the option of controlling inbound -// traffic from either an IP range (CIDR) or from another security group. -func CreateRule(client *gophercloud.ServiceClient, opts CreateRuleOptsBuilder) (r CreateRuleResult) { - b, err := opts.ToRuleCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(rootRuleURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// DeleteRule will permanently delete a rule from a security group. -func DeleteRule(client *gophercloud.ServiceClient, id string) (r DeleteRuleResult) { - _, r.Err = client.Delete(resourceRuleURL(client, id), nil) - return -} - -func actionMap(prefix, groupName string) map[string]map[string]string { - return map[string]map[string]string{ - prefix + "SecurityGroup": map[string]string{"name": groupName}, - } -} - -// AddServer will associate a server and a security group, enforcing the -// rules of the group on the server. -func AddServer(client *gophercloud.ServiceClient, serverID, groupName string) (r AddServerResult) { - _, r.Err = client.Post(serverActionURL(client, serverID), actionMap("add", groupName), &r.Body, nil) - return -} - -// RemoveServer will disassociate a server from a security group. -func RemoveServer(client *gophercloud.ServiceClient, serverID, groupName string) (r RemoveServerResult) { - _, r.Err = client.Post(serverActionURL(client, serverID), actionMap("remove", groupName), &r.Body, nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/results.go deleted file mode 100644 index cf08547e901..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/results.go +++ /dev/null @@ -1,214 +0,0 @@ -package secgroups - -import ( - "encoding/json" - "strconv" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// SecurityGroup represents a security group. -type SecurityGroup struct { - // The unique ID of the group. If Neutron is installed, this ID will be - // represented as a string UUID; if Neutron is not installed, it will be a - // numeric ID. For the sake of consistency, we always cast it to a string. - ID string `json:"-"` - - // The human-readable name of the group, which needs to be unique. - Name string `json:"name"` - - // The human-readable description of the group. - Description string `json:"description"` - - // The rules which determine how this security group operates. - Rules []Rule `json:"rules"` - - // The ID of the tenant to which this security group belongs. - TenantID string `json:"tenant_id"` -} - -func (r *SecurityGroup) UnmarshalJSON(b []byte) error { - type tmp SecurityGroup - var s struct { - tmp - ID interface{} `json:"id"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = SecurityGroup(s.tmp) - - switch t := s.ID.(type) { - case float64: - r.ID = strconv.FormatFloat(t, 'f', -1, 64) - case string: - r.ID = t - } - - return err -} - -// Rule represents a security group rule, a policy which determines how a -// security group operates and what inbound traffic it allows in. -type Rule struct { - // The unique ID. If Neutron is installed, this ID will be - // represented as a string UUID; if Neutron is not installed, it will be a - // numeric ID. For the sake of consistency, we always cast it to a string. - ID string `json:"-"` - - // The lower bound of the port range which this security group should open up. - FromPort int `json:"from_port"` - - // The upper bound of the port range which this security group should open up. - ToPort int `json:"to_port"` - - // The IP protocol (e.g. TCP) which the security group accepts. - IPProtocol string `json:"ip_protocol"` - - // The CIDR IP range whose traffic can be received. - IPRange IPRange `json:"ip_range"` - - // The security group ID to which this rule belongs. - ParentGroupID string `json:"parent_group_id"` - - // Not documented. - Group Group -} - -func (r *Rule) UnmarshalJSON(b []byte) error { - type tmp Rule - var s struct { - tmp - ID interface{} `json:"id"` - ParentGroupID interface{} `json:"parent_group_id"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = Rule(s.tmp) - - switch t := s.ID.(type) { - case float64: - r.ID = strconv.FormatFloat(t, 'f', -1, 64) - case string: - r.ID = t - } - - switch t := s.ParentGroupID.(type) { - case float64: - r.ParentGroupID = strconv.FormatFloat(t, 'f', -1, 64) - case string: - r.ParentGroupID = t - } - - return err -} - -// IPRange represents the IP range whose traffic will be accepted by the -// security group. -type IPRange struct { - CIDR string -} - -// Group represents a group. -type Group struct { - TenantID string `json:"tenant_id"` - Name string -} - -// SecurityGroupPage is a single page of a SecurityGroup collection. -type SecurityGroupPage struct { - pagination.SinglePageBase -} - -// IsEmpty determines whether or not a page of Security Groups contains any -// results. -func (page SecurityGroupPage) IsEmpty() (bool, error) { - users, err := ExtractSecurityGroups(page) - return len(users) == 0, err -} - -// ExtractSecurityGroups returns a slice of SecurityGroups contained in a -// single page of results. -func ExtractSecurityGroups(r pagination.Page) ([]SecurityGroup, error) { - var s struct { - SecurityGroups []SecurityGroup `json:"security_groups"` - } - err := (r.(SecurityGroupPage)).ExtractInto(&s) - return s.SecurityGroups, err -} - -type commonResult struct { - gophercloud.Result -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret the result as a SecurityGroup. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret the result as a SecurityGroup. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret the result as a SecurityGroup. -type UpdateResult struct { - commonResult -} - -// Extract will extract a SecurityGroup struct from most responses. -func (r commonResult) Extract() (*SecurityGroup, error) { - var s struct { - SecurityGroup *SecurityGroup `json:"security_group"` - } - err := r.ExtractInto(&s) - return s.SecurityGroup, err -} - -// CreateRuleResult represents the result when adding rules to a security group. -// Call its Extract method to interpret the result as a Rule. -type CreateRuleResult struct { - gophercloud.Result -} - -// Extract will extract a Rule struct from a CreateRuleResult. -func (r CreateRuleResult) Extract() (*Rule, error) { - var s struct { - Rule *Rule `json:"security_group_rule"` - } - err := r.ExtractInto(&s) - return s.Rule, err -} - -// DeleteResult is the response from delete operation. Call its ExtractErr -// method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// DeleteRuleResult is the response from a DeleteRule operation. Call its -// ExtractErr method to determine if the request succeeded or failed. -type DeleteRuleResult struct { - gophercloud.ErrResult -} - -// AddServerResult is the response from an AddServer operation. Call its -// ExtractErr method to determine if the request succeeded or failed. -type AddServerResult struct { - gophercloud.ErrResult -} - -// RemoveServerResult is the response from a RemoveServer operation. Call its -// ExtractErr method to determine if the request succeeded or failed. -type RemoveServerResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go deleted file mode 100644 index d99746cae92..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go +++ /dev/null @@ -1,32 +0,0 @@ -package secgroups - -import "github.com/gophercloud/gophercloud" - -const ( - secgrouppath = "os-security-groups" - rulepath = "os-security-group-rules" -) - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(secgrouppath, id) -} - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(secgrouppath) -} - -func listByServerURL(c *gophercloud.ServiceClient, serverID string) string { - return c.ServiceURL("servers", serverID, secgrouppath) -} - -func rootRuleURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rulepath) -} - -func resourceRuleURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rulepath, id) -} - -func serverActionURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("servers", id, "action") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go deleted file mode 100644 index 814bde37f3c..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Package servergroups provides the ability to manage server groups. - -Example to List Server Groups - - allpages, err := servergroups.List(computeClient).AllPages() - if err != nil { - panic(err) - } - - allServerGroups, err := servergroups.ExtractServerGroups(allPages) - if err != nil { - panic(err) - } - - for _, sg := range allServerGroups { - fmt.Printf("%#v\n", sg) - } - -Example to Create a Server Group - - createOpts := servergroups.CreateOpts{ - Name: "my_sg", - Policies: []string{"anti-affinity"}, - } - - sg, err := servergroups.Create(computeClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Server Group - - sgID := "7a6f29ad-e34d-4368-951a-58a08f11cfb7" - err := servergroups.Delete(computeClient, sgID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package servergroups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go deleted file mode 100644 index 1439a5a34ce..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go +++ /dev/null @@ -1,59 +0,0 @@ -package servergroups - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// List returns a Pager that allows you to iterate over a collection of -// ServerGroups. -func List(client *gophercloud.ServiceClient) pagination.Pager { - return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { - return ServerGroupPage{pagination.SinglePageBase(r)} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToServerGroupCreateMap() (map[string]interface{}, error) -} - -// CreateOpts specifies Server Group creation parameters. -type CreateOpts struct { - // Name is the name of the server group - Name string `json:"name" required:"true"` - - // Policies are the server group policies - Policies []string `json:"policies" required:"true"` -} - -// ToServerGroupCreateMap constructs a request body from CreateOpts. -func (opts CreateOpts) ToServerGroupCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "server_group") -} - -// Create requests the creation of a new Server Group. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToServerGroupCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Get returns data about a previously created ServerGroup. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// Delete requests the deletion of a previously allocated ServerGroup. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/results.go deleted file mode 100644 index b9aeef98154..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/results.go +++ /dev/null @@ -1,87 +0,0 @@ -package servergroups - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// A ServerGroup creates a policy for instance placement in the cloud. -type ServerGroup struct { - // ID is the unique ID of the Server Group. - ID string `json:"id"` - - // Name is the common name of the server group. - Name string `json:"name"` - - // Polices are the group policies. - // - // Normally a single policy is applied: - // - // "affinity" will place all servers within the server group on the - // same compute node. - // - // "anti-affinity" will place servers within the server group on different - // compute nodes. - Policies []string `json:"policies"` - - // Members are the members of the server group. - Members []string `json:"members"` - - // Metadata includes a list of all user-specified key-value pairs attached - // to the Server Group. - Metadata map[string]interface{} -} - -// ServerGroupPage stores a single page of all ServerGroups results from a -// List call. -type ServerGroupPage struct { - pagination.SinglePageBase -} - -// IsEmpty determines whether or not a ServerGroupsPage is empty. -func (page ServerGroupPage) IsEmpty() (bool, error) { - va, err := ExtractServerGroups(page) - return len(va) == 0, err -} - -// ExtractServerGroups interprets a page of results as a slice of -// ServerGroups. -func ExtractServerGroups(r pagination.Page) ([]ServerGroup, error) { - var s struct { - ServerGroups []ServerGroup `json:"server_groups"` - } - err := (r.(ServerGroupPage)).ExtractInto(&s) - return s.ServerGroups, err -} - -type ServerGroupResult struct { - gophercloud.Result -} - -// Extract is a method that attempts to interpret any Server Group resource -// response as a ServerGroup struct. -func (r ServerGroupResult) Extract() (*ServerGroup, error) { - var s struct { - ServerGroup *ServerGroup `json:"server_group"` - } - err := r.ExtractInto(&s) - return s.ServerGroup, err -} - -// CreateResult is the response from a Create operation. Call its Extract method -// to interpret it as a ServerGroup. -type CreateResult struct { - ServerGroupResult -} - -// GetResult is the response from a Get operation. Call its Extract method to -// interpret it as a ServerGroup. -type GetResult struct { - ServerGroupResult -} - -// DeleteResult is the response from a Delete operation. Call its ExtractErr -// method to determine if the call succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go deleted file mode 100644 index 9a1f99b1997..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go +++ /dev/null @@ -1,25 +0,0 @@ -package servergroups - -import "github.com/gophercloud/gophercloud" - -const resourcePath = "os-server-groups" - -func resourceURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(resourcePath) -} - -func listURL(c *gophercloud.ServiceClient) string { - return resourceURL(c) -} - -func createURL(c *gophercloud.ServiceClient) string { - return resourceURL(c) -} - -func getURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(resourcePath, id) -} - -func deleteURL(c *gophercloud.ServiceClient, id string) string { - return getURL(c, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go deleted file mode 100644 index ab97edb7766..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Package startstop provides functionality to start and stop servers that have -been provisioned by the OpenStack Compute service. - -Example to Stop and Start a Server - - serverID := "47b6b7b7-568d-40e4-868c-d5c41735532e" - - err := startstop.Stop(computeClient, serverID).ExtractErr() - if err != nil { - panic(err) - } - - err := startstop.Start(computeClient, serverID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package startstop diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go deleted file mode 100644 index 5b4f3f39dd4..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go +++ /dev/null @@ -1,19 +0,0 @@ -package startstop - -import "github.com/gophercloud/gophercloud" - -func actionURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("servers", id, "action") -} - -// Start is the operation responsible for starting a Compute server. -func Start(client *gophercloud.ServiceClient, id string) (r StartResult) { - _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-start": nil}, nil, nil) - return -} - -// Stop is the operation responsible for stopping a Compute server. -func Stop(client *gophercloud.ServiceClient, id string) (r StopResult) { - _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-stop": nil}, nil, nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go deleted file mode 100644 index 83496893328..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go +++ /dev/null @@ -1,15 +0,0 @@ -package startstop - -import "github.com/gophercloud/gophercloud" - -// StartResult is the response from a Start operation. Call its ExtractErr -// method to determine if the request succeeded or failed. -type StartResult struct { - gophercloud.ErrResult -} - -// StopResult is the response from Stop operation. Call its ExtractErr -// method to determine if the request succeeded or failed. -type StopResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go deleted file mode 100644 index a32e8ffd587..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package tenantnetworks provides the ability for tenants to see information -about the networks they have access to. - -This is a deprecated API and will be removed from the Nova API service in a -future version. - -This API works in both Neutron and nova-network based OpenStack clouds. - -Example to List Networks Available to a Tenant - - allPages, err := tenantnetworks.List(computeClient).AllPages() - if err != nil { - panic(err) - } - - allNetworks, err := tenantnetworks.ExtractNetworks(allPages) - if err != nil { - panic(err) - } - - for _, network := range allNetworks { - fmt.Printf("%+v\n", network) - } -*/ -package tenantnetworks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go deleted file mode 100644 index 00899056fdd..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go +++ /dev/null @@ -1,19 +0,0 @@ -package tenantnetworks - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// List returns a Pager that allows you to iterate over a collection of Networks. -func List(client *gophercloud.ServiceClient) pagination.Pager { - return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { - return NetworkPage{pagination.SinglePageBase(r)} - }) -} - -// Get returns data about a previously created Network. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go deleted file mode 100644 index bda77d5f502..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go +++ /dev/null @@ -1,58 +0,0 @@ -package tenantnetworks - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// A Network represents a network that a server communicates on. -type Network struct { - // CIDR is the IPv4 subnet. - CIDR string `json:"cidr"` - - // ID is the UUID of the network. - ID string `json:"id"` - - // Name is the common name that the network has. - Name string `json:"label"` -} - -// NetworkPage stores a single page of all Networks results from a List call. -type NetworkPage struct { - pagination.SinglePageBase -} - -// IsEmpty determines whether or not a NetworkPage is empty. -func (page NetworkPage) IsEmpty() (bool, error) { - va, err := ExtractNetworks(page) - return len(va) == 0, err -} - -// ExtractNetworks interprets a page of results as a slice of Network. -func ExtractNetworks(r pagination.Page) ([]Network, error) { - var s struct { - Networks []Network `json:"networks"` - } - err := (r.(NetworkPage)).ExtractInto(&s) - return s.Networks, err -} - -type NetworkResult struct { - gophercloud.Result -} - -// Extract is a method that attempts to interpret any Network resource response -// as a Network struct. -func (r NetworkResult) Extract() (*Network, error) { - var s struct { - Network *Network `json:"network"` - } - err := r.ExtractInto(&s) - return s.Network, err -} - -// GetResult is the response from a Get operation. Call its Extract method to -// interpret it as a Network. -type GetResult struct { - NetworkResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go deleted file mode 100644 index 683041ded37..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go +++ /dev/null @@ -1,17 +0,0 @@ -package tenantnetworks - -import "github.com/gophercloud/gophercloud" - -const resourcePath = "os-tenant-networks" - -func resourceURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(resourcePath) -} - -func listURL(c *gophercloud.ServiceClient) string { - return resourceURL(c) -} - -func getURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(resourcePath, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go deleted file mode 100644 index 484eb20000c..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Package volumeattach provides the ability to attach and detach volumes -from servers. - -Example to Attach a Volume - - serverID := "7ac8686c-de71-4acb-9600-ec18b1a1ed6d" - volumeID := "87463836-f0e2-4029-abf6-20c8892a3103" - - createOpts := volumeattach.CreateOpts{ - Device: "/dev/vdc", - VolumeID: volumeID, - } - - result, err := volumeattach.Create(computeClient, serverID, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Detach a Volume - - serverID := "7ac8686c-de71-4acb-9600-ec18b1a1ed6d" - attachmentID := "ed081613-1c9b-4231-aa5e-ebfd4d87f983" - - err := volumeattach.Delete(computeClient, serverID, attachmentID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package volumeattach diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go deleted file mode 100644 index 6a262c212e1..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go +++ /dev/null @@ -1,60 +0,0 @@ -package volumeattach - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// List returns a Pager that allows you to iterate over a collection of -// VolumeAttachments. -func List(client *gophercloud.ServiceClient, serverID string) pagination.Pager { - return pagination.NewPager(client, listURL(client, serverID), func(r pagination.PageResult) pagination.Page { - return VolumeAttachmentPage{pagination.SinglePageBase(r)} - }) -} - -// CreateOptsBuilder allows extensions to add parameters to the Create request. -type CreateOptsBuilder interface { - ToVolumeAttachmentCreateMap() (map[string]interface{}, error) -} - -// CreateOpts specifies volume attachment creation or import parameters. -type CreateOpts struct { - // Device is the device that the volume will attach to the instance as. - // Omit for "auto". - Device string `json:"device,omitempty"` - - // VolumeID is the ID of the volume to attach to the instance. - VolumeID string `json:"volumeId" required:"true"` -} - -// ToVolumeAttachmentCreateMap constructs a request body from CreateOpts. -func (opts CreateOpts) ToVolumeAttachmentCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "volumeAttachment") -} - -// Create requests the creation of a new volume attachment on the server. -func Create(client *gophercloud.ServiceClient, serverID string, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToVolumeAttachmentCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client, serverID), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Get returns public data about a previously created VolumeAttachment. -func Get(client *gophercloud.ServiceClient, serverID, attachmentID string) (r GetResult) { - _, r.Err = client.Get(getURL(client, serverID, attachmentID), &r.Body, nil) - return -} - -// Delete requests the deletion of a previous stored VolumeAttachment from -// the server. -func Delete(client *gophercloud.ServiceClient, serverID, attachmentID string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, serverID, attachmentID), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go deleted file mode 100644 index 56d50347291..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go +++ /dev/null @@ -1,77 +0,0 @@ -package volumeattach - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// VolumeAttachment contains attachment information between a volume -// and server. -type VolumeAttachment struct { - // ID is a unique id of the attachment. - ID string `json:"id"` - - // Device is what device the volume is attached as. - Device string `json:"device"` - - // VolumeID is the ID of the attached volume. - VolumeID string `json:"volumeId"` - - // ServerID is the ID of the instance that has the volume attached. - ServerID string `json:"serverId"` -} - -// VolumeAttachmentPage stores a single page all of VolumeAttachment -// results from a List call. -type VolumeAttachmentPage struct { - pagination.SinglePageBase -} - -// IsEmpty determines whether or not a VolumeAttachmentPage is empty. -func (page VolumeAttachmentPage) IsEmpty() (bool, error) { - va, err := ExtractVolumeAttachments(page) - return len(va) == 0, err -} - -// ExtractVolumeAttachments interprets a page of results as a slice of -// VolumeAttachment. -func ExtractVolumeAttachments(r pagination.Page) ([]VolumeAttachment, error) { - var s struct { - VolumeAttachments []VolumeAttachment `json:"volumeAttachments"` - } - err := (r.(VolumeAttachmentPage)).ExtractInto(&s) - return s.VolumeAttachments, err -} - -// VolumeAttachmentResult is the result from a volume attachment operation. -type VolumeAttachmentResult struct { - gophercloud.Result -} - -// Extract is a method that attempts to interpret any VolumeAttachment resource -// response as a VolumeAttachment struct. -func (r VolumeAttachmentResult) Extract() (*VolumeAttachment, error) { - var s struct { - VolumeAttachment *VolumeAttachment `json:"volumeAttachment"` - } - err := r.ExtractInto(&s) - return s.VolumeAttachment, err -} - -// CreateResult is the response from a Create operation. Call its Extract method -// to interpret it as a VolumeAttachment. -type CreateResult struct { - VolumeAttachmentResult -} - -// GetResult is the response from a Get operation. Call its Extract method to -// interpret it as a VolumeAttachment. -type GetResult struct { - VolumeAttachmentResult -} - -// DeleteResult is the response from a Delete operation. Call its ExtractErr -// method to determine if the call succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go deleted file mode 100644 index 083f8dc4554..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go +++ /dev/null @@ -1,25 +0,0 @@ -package volumeattach - -import "github.com/gophercloud/gophercloud" - -const resourcePath = "os-volume_attachments" - -func resourceURL(c *gophercloud.ServiceClient, serverID string) string { - return c.ServiceURL("servers", serverID, resourcePath) -} - -func listURL(c *gophercloud.ServiceClient, serverID string) string { - return resourceURL(c, serverID) -} - -func createURL(c *gophercloud.ServiceClient, serverID string) string { - return resourceURL(c, serverID) -} - -func getURL(c *gophercloud.ServiceClient, serverID, aID string) string { - return c.ServiceURL("servers", serverID, resourcePath, aID) -} - -func deleteURL(c *gophercloud.ServiceClient, serverID, aID string) string { - return getURL(c, serverID, aID) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go deleted file mode 100644 index 2661cfac209..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Package flavors provides information and interaction with the flavor API -in the OpenStack Compute service. - -A flavor is an available hardware configuration for a server. Each flavor -has a unique combination of disk space, memory capacity and priority for CPU -time. - -Example to List Flavors - - listOpts := flavors.ListOpts{ - AccessType: flavors.PublicAccess, - } - - allPages, err := flavors.ListDetail(computeClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allFlavors, err := flavors.ExtractFlavors(allPages) - if err != nil { - panic(err) - } - - for _, flavor := range allFlavors { - fmt.Printf("%+v\n", flavor) - } - -Example to Create a Flavor - - createOpts := flavors.CreateOpts{ - ID: "1", - Name: "m1.tiny", - Disk: gophercloud.IntToPointer(1), - RAM: 512, - VCPUs: 1, - RxTxFactor: 1.0, - } - - flavor, err := flavors.Create(computeClient, createOpts).Extract() - if err != nil { - panic(err) - } -*/ -package flavors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go deleted file mode 100644 index 317fd8530c3..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go +++ /dev/null @@ -1,194 +0,0 @@ -package flavors - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToFlavorListQuery() (string, error) -} - -/* - AccessType maps to OpenStack's Flavor.is_public field. Although the is_public - field is boolean, the request options are ternary, which is why AccessType is - a string. The following values are allowed: - - The AccessType arguement is optional, and if it is not supplied, OpenStack - returns the PublicAccess flavors. -*/ -type AccessType string - -const ( - // PublicAccess returns public flavors and private flavors associated with - // that project. - PublicAccess AccessType = "true" - - // PrivateAccess (admin only) returns private flavors, across all projects. - PrivateAccess AccessType = "false" - - // AllAccess (admin only) returns public and private flavors across all - // projects. - AllAccess AccessType = "None" -) - -/* - ListOpts filters the results returned by the List() function. - For example, a flavor with a minDisk field of 10 will not be returned if you - specify MinDisk set to 20. - - Typically, software will use the last ID of the previous call to List to set - the Marker for the current call. -*/ -type ListOpts struct { - - // ChangesSince, if provided, instructs List to return only those things which - // have changed since the timestamp provided. - ChangesSince string `q:"changes-since"` - - // MinDisk and MinRAM, if provided, elides flavors which do not meet your - // criteria. - MinDisk int `q:"minDisk"` - MinRAM int `q:"minRam"` - - // Marker and Limit control paging. - // Marker instructs List where to start listing from. - Marker string `q:"marker"` - - // Limit instructs List to refrain from sending excessively large lists of - // flavors. - Limit int `q:"limit"` - - // AccessType, if provided, instructs List which set of flavors to return. - // If IsPublic not provided, flavors for the current project are returned. - AccessType AccessType `q:"is_public"` -} - -// ToFlavorListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToFlavorListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// ListDetail instructs OpenStack to provide a list of flavors. -// You may provide criteria by which List curtails its results for easier -// processing. -func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listURL(client) - if opts != nil { - query, err := opts.ToFlavorListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return FlavorPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -type CreateOptsBuilder interface { - ToFlavorCreateMap() (map[string]interface{}, error) -} - -// CreateOpts specifies parameters used for creating a flavor. -type CreateOpts struct { - // Name is the name of the flavor. - Name string `json:"name" required:"true"` - - // RAM is the memory of the flavor, measured in MB. - RAM int `json:"ram" required:"true"` - - // VCPUs is the number of vcpus for the flavor. - VCPUs int `json:"vcpus" required:"true"` - - // Disk the amount of root disk space, measured in GB. - Disk *int `json:"disk" required:"true"` - - // ID is a unique ID for the flavor. - ID string `json:"id,omitempty"` - - // Swap is the amount of swap space for the flavor, measured in MB. - Swap *int `json:"swap,omitempty"` - - // RxTxFactor alters the network bandwidth of a flavor. - RxTxFactor float64 `json:"rxtx_factor,omitempty"` - - // IsPublic flags a flavor as being available to all projects or not. - IsPublic *bool `json:"os-flavor-access:is_public,omitempty"` - - // Ephemeral is the amount of ephemeral disk space, measured in GB. - Ephemeral *int `json:"OS-FLV-EXT-DATA:ephemeral,omitempty"` -} - -// ToFlavorCreateMap constructs a request body from CreateOpts. -func (opts CreateOpts) ToFlavorCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "flavor") -} - -// Create requests the creation of a new flavor. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToFlavorCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201}, - }) - return -} - -// Get retrieves details of a single flavor. Use ExtractFlavor to convert its -// result into a Flavor. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// Delete deletes the specified flavor ID. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, id), nil) - return -} - -// IDFromName is a convienience function that returns a flavor's ID given its -// name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - count := 0 - id := "" - allPages, err := ListDetail(client, nil).AllPages() - if err != nil { - return "", err - } - - all, err := ExtractFlavors(allPages) - if err != nil { - return "", err - } - - for _, f := range all { - if f.Name == name { - count++ - id = f.ID - } - } - - switch count { - case 0: - err := &gophercloud.ErrResourceNotFound{} - err.ResourceType = "flavor" - err.Name = name - return "", err - case 1: - return id, nil - default: - err := &gophercloud.ErrMultipleResourcesFound{} - err.ResourceType = "flavor" - err.Name = name - err.Count = count - return "", err - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go deleted file mode 100644 index fda11d3e06a..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go +++ /dev/null @@ -1,133 +0,0 @@ -package flavors - -import ( - "encoding/json" - "strconv" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -type commonResult struct { - gophercloud.Result -} - -type CreateResult struct { - commonResult -} - -// GetResult is the response of a Get operations. Call its Extract method to -// interpret it as a Flavor. -type GetResult struct { - commonResult -} - -// DeleteResult is the result from a Delete operation. Call its ExtractErr -// method to determine if the call succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// Extract provides access to the individual Flavor returned by the Get and -// Create functions. -func (r commonResult) Extract() (*Flavor, error) { - var s struct { - Flavor *Flavor `json:"flavor"` - } - err := r.ExtractInto(&s) - return s.Flavor, err -} - -// Flavor represent (virtual) hardware configurations for server resources -// in a region. -type Flavor struct { - // ID is the flavor's unique ID. - ID string `json:"id"` - - // Disk is the amount of root disk, measured in GB. - Disk int `json:"disk"` - - // RAM is the amount of memory, measured in MB. - RAM int `json:"ram"` - - // Name is the name of the flavor. - Name string `json:"name"` - - // RxTxFactor describes bandwidth alterations of the flavor. - RxTxFactor float64 `json:"rxtx_factor"` - - // Swap is the amount of swap space, measured in MB. - Swap int `json:"swap"` - - // VCPUs indicates how many (virtual) CPUs are available for this flavor. - VCPUs int `json:"vcpus"` - - // IsPublic indicates whether the flavor is public. - IsPublic bool `json:"is_public"` -} - -func (r *Flavor) UnmarshalJSON(b []byte) error { - type tmp Flavor - var s struct { - tmp - Swap interface{} `json:"swap"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = Flavor(s.tmp) - - switch t := s.Swap.(type) { - case float64: - r.Swap = int(t) - case string: - switch t { - case "": - r.Swap = 0 - default: - swap, err := strconv.ParseFloat(t, 64) - if err != nil { - return err - } - r.Swap = int(swap) - } - } - - return nil -} - -// FlavorPage contains a single page of all flavors from a ListDetails call. -type FlavorPage struct { - pagination.LinkedPageBase -} - -// IsEmpty determines if a FlavorPage contains any results. -func (page FlavorPage) IsEmpty() (bool, error) { - flavors, err := ExtractFlavors(page) - return len(flavors) == 0, err -} - -// NextPageURL uses the response's embedded link reference to navigate to the -// next page of results. -func (page FlavorPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"flavors_links"` - } - err := page.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// ExtractFlavors provides access to the list of flavors in a page acquired -// from the ListDetail operation. -func ExtractFlavors(r pagination.Page) ([]Flavor, error) { - var s struct { - Flavors []Flavor `json:"flavors"` - } - err := (r.(FlavorPage)).ExtractInto(&s) - return s.Flavors, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go deleted file mode 100644 index 518d05b3699..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go +++ /dev/null @@ -1,21 +0,0 @@ -package flavors - -import ( - "github.com/gophercloud/gophercloud" -) - -func getURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("flavors", id) -} - -func listURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("flavors", "detail") -} - -func createURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("flavors") -} - -func deleteURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("flavors", id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go deleted file mode 100644 index 22410a79a27..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Package images provides information and interaction with the images through -the OpenStack Compute service. - -This API is deprecated and will be removed from a future version of the Nova -API service. - -An image is a collection of files used to create or rebuild a server. -Operators provide a number of pre-built OS images by default. You may also -create custom images from cloud servers you have launched. - -Example to List Images - - listOpts := images.ListOpts{ - Limit: 2, - } - - allPages, err := images.ListDetail(computeClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allImages, err := images.ExtractImages(allPages) - if err != nil { - panic(err) - } - - for _, image := range allImages { - fmt.Printf("%+v\n", image) - } -*/ -package images diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go deleted file mode 100644 index 558b481b9e7..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go +++ /dev/null @@ -1,109 +0,0 @@ -package images - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// ListDetail request. -type ListOptsBuilder interface { - ToImageListQuery() (string, error) -} - -// ListOpts contain options filtering Images returned from a call to ListDetail. -type ListOpts struct { - // ChangesSince filters Images based on the last changed status (in date-time - // format). - ChangesSince string `q:"changes-since"` - - // Limit limits the number of Images to return. - Limit int `q:"limit"` - - // Mark is an Image UUID at which to set a marker. - Marker string `q:"marker"` - - // Name is the name of the Image. - Name string `q:"name"` - - // Server is the name of the Server (in URL format). - Server string `q:"server"` - - // Status is the current status of the Image. - Status string `q:"status"` - - // Type is the type of image (e.g. BASE, SERVER, ALL). - Type string `q:"type"` -} - -// ToImageListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToImageListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// ListDetail enumerates the available images. -func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listDetailURL(client) - if opts != nil { - query, err := opts.ToImageListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return ImagePage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// Get returns data about a specific image by its ID. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// Delete deletes the specified image ID. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, id), nil) - return -} - -// IDFromName is a convienience function that returns an image's ID given its -// name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - count := 0 - id := "" - allPages, err := ListDetail(client, nil).AllPages() - if err != nil { - return "", err - } - - all, err := ExtractImages(allPages) - if err != nil { - return "", err - } - - for _, f := range all { - if f.Name == name { - count++ - id = f.ID - } - } - - switch count { - case 0: - err := &gophercloud.ErrResourceNotFound{} - err.ResourceType = "image" - err.Name = name - return "", err - case 1: - return id, nil - default: - err := &gophercloud.ErrMultipleResourcesFound{} - err.ResourceType = "image" - err.Name = name - err.Count = count - return "", err - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go deleted file mode 100644 index 70d1018c721..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go +++ /dev/null @@ -1,95 +0,0 @@ -package images - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// GetResult is the response from a Get operation. Call its Extract method to -// interpret it as an Image. -type GetResult struct { - gophercloud.Result -} - -// DeleteResult is the result from a Delete operation. Call its ExtractErr -// method to determine if the call succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// Extract interprets a GetResult as an Image. -func (r GetResult) Extract() (*Image, error) { - var s struct { - Image *Image `json:"image"` - } - err := r.ExtractInto(&s) - return s.Image, err -} - -// Image represents an Image returned by the Compute API. -type Image struct { - // ID is the unique ID of an image. - ID string - - // Created is the date when the image was created. - Created string - - // MinDisk is the minimum amount of disk a flavor must have to be able - // to create a server based on the image, measured in GB. - MinDisk int - - // MinRAM is the minimum amount of RAM a flavor must have to be able - // to create a server based on the image, measured in MB. - MinRAM int - - // Name provides a human-readable moniker for the OS image. - Name string - - // The Progress and Status fields indicate image-creation status. - Progress int - - // Status is the current status of the image. - Status string - - // Update is the date when the image was updated. - Updated string - - // Metadata provides free-form key/value pairs that further describe the - // image. - Metadata map[string]interface{} -} - -// ImagePage contains a single page of all Images returne from a ListDetail -// operation. Use ExtractImages to convert it into a slice of usable structs. -type ImagePage struct { - pagination.LinkedPageBase -} - -// IsEmpty returns true if an ImagePage contains no Image results. -func (page ImagePage) IsEmpty() (bool, error) { - images, err := ExtractImages(page) - return len(images) == 0, err -} - -// NextPageURL uses the response's embedded link reference to navigate to the -// next page of results. -func (page ImagePage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"images_links"` - } - err := page.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// ExtractImages converts a page of List results into a slice of usable Image -// structs. -func ExtractImages(r pagination.Page) ([]Image, error) { - var s struct { - Images []Image `json:"images"` - } - err := (r.(ImagePage)).ExtractInto(&s) - return s.Images, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go deleted file mode 100644 index 57787fb725e..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go +++ /dev/null @@ -1,15 +0,0 @@ -package images - -import "github.com/gophercloud/gophercloud" - -func listDetailURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("images", "detail") -} - -func getURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("images", id) -} - -func deleteURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("images", id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go deleted file mode 100644 index 3b0ab783626..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Package servers provides information and interaction with the server API -resource in the OpenStack Compute service. - -A server is a virtual machine instance in the compute system. In order for -one to be provisioned, a valid flavor and image are required. - -Example to List Servers - - listOpts := servers.ListOpts{ - AllTenants: true, - } - - allPages, err := servers.List(computeClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allServers, err := servers.ExtractServers(allPages) - if err != nil { - panic(err) - } - - for _, server := range allServers { - fmt.Printf("%+v\n", server) - } - -Example to Create a Server - - createOpts := servers.CreateOpts{ - Name: "server_name", - ImageRef: "image-uuid", - FlavorRef: "flavor-uuid", - } - - server, err := servers.Create(computeClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Server - - serverID := "d9072956-1560-487c-97f2-18bdf65ec749" - err := servers.Delete(computeClient, serverID).ExtractErr() - if err != nil { - panic(err) - } - -Example to Force Delete a Server - - serverID := "d9072956-1560-487c-97f2-18bdf65ec749" - err := servers.ForceDelete(computeClient, serverID).ExtractErr() - if err != nil { - panic(err) - } - -Example to Reboot a Server - - rebootOpts := servers.RebootOpts{ - Type: servers.SoftReboot, - } - - serverID := "d9072956-1560-487c-97f2-18bdf65ec749" - - err := servers.Reboot(computeClient, serverID, rebootOpts).ExtractErr() - if err != nil { - panic(err) - } - -Example to Rebuild a Server - - rebuildOpts := servers.RebuildOpts{ - Name: "new_name", - ImageID: "image-uuid", - } - - serverID := "d9072956-1560-487c-97f2-18bdf65ec749" - - server, err := servers.Rebuilt(computeClient, serverID, rebuildOpts).Extract() - if err != nil { - panic(err) - } - -Example to Resize a Server - - resizeOpts := servers.ResizeOpts{ - FlavorRef: "flavor-uuid", - } - - serverID := "d9072956-1560-487c-97f2-18bdf65ec749" - - err := servers.Resize(computeClient, serverID, resizeOpts).ExtractErr() - if err != nil { - panic(err) - } - - err = servers.ConfirmResize(computeClient, serverID).ExtractErr() - if err != nil { - panic(err) - } - -Example to Snapshot a Server - - snapshotOpts := servers.CreateImageOpts{ - Name: "snapshot_name", - } - - serverID := "d9072956-1560-487c-97f2-18bdf65ec749" - - image, err := servers.CreateImage(computeClient, serverID, snapshotOpts).ExtractImageID() - if err != nil { - panic(err) - } -*/ -package servers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go deleted file mode 100644 index c9f0e3c20b5..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go +++ /dev/null @@ -1,71 +0,0 @@ -package servers - -import ( - "fmt" - - "github.com/gophercloud/gophercloud" -) - -// ErrNeitherImageIDNorImageNameProvided is the error when neither the image -// ID nor the image name is provided for a server operation -type ErrNeitherImageIDNorImageNameProvided struct{ gophercloud.ErrMissingInput } - -func (e ErrNeitherImageIDNorImageNameProvided) Error() string { - return "One and only one of the image ID and the image name must be provided." -} - -// ErrNeitherFlavorIDNorFlavorNameProvided is the error when neither the flavor -// ID nor the flavor name is provided for a server operation -type ErrNeitherFlavorIDNorFlavorNameProvided struct{ gophercloud.ErrMissingInput } - -func (e ErrNeitherFlavorIDNorFlavorNameProvided) Error() string { - return "One and only one of the flavor ID and the flavor name must be provided." -} - -type ErrNoClientProvidedForIDByName struct{ gophercloud.ErrMissingInput } - -func (e ErrNoClientProvidedForIDByName) Error() string { - return "A service client must be provided to find a resource ID by name." -} - -// ErrInvalidHowParameterProvided is the error when an unknown value is given -// for the `how` argument -type ErrInvalidHowParameterProvided struct{ gophercloud.ErrInvalidInput } - -// ErrNoAdminPassProvided is the error when an administrative password isn't -// provided for a server operation -type ErrNoAdminPassProvided struct{ gophercloud.ErrMissingInput } - -// ErrNoImageIDProvided is the error when an image ID isn't provided for a server -// operation -type ErrNoImageIDProvided struct{ gophercloud.ErrMissingInput } - -// ErrNoIDProvided is the error when a server ID isn't provided for a server -// operation -type ErrNoIDProvided struct{ gophercloud.ErrMissingInput } - -// ErrServer is a generic error type for servers HTTP operations. -type ErrServer struct { - gophercloud.ErrUnexpectedResponseCode - ID string -} - -func (se ErrServer) Error() string { - return fmt.Sprintf("Error while executing HTTP request for server [%s]", se.ID) -} - -// Error404 overrides the generic 404 error message. -func (se ErrServer) Error404(e gophercloud.ErrUnexpectedResponseCode) error { - se.ErrUnexpectedResponseCode = e - return &ErrServerNotFound{se} -} - -// ErrServerNotFound is the error when a 404 is received during server HTTP -// operations. -type ErrServerNotFound struct { - ErrServer -} - -func (e ErrServerNotFound) Error() string { - return fmt.Sprintf("I couldn't find server [%s]", e.ID) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go deleted file mode 100644 index c445f0ede0e..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go +++ /dev/null @@ -1,791 +0,0 @@ -package servers - -import ( - "encoding/base64" - "encoding/json" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" - "github.com/gophercloud/gophercloud/openstack/compute/v2/images" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToServerListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the server attributes you want to see returned. Marker and Limit are used -// for pagination. -type ListOpts struct { - // ChangesSince is a time/date stamp for when the server last changed status. - ChangesSince string `q:"changes-since"` - - // Image is the name of the image in URL format. - Image string `q:"image"` - - // Flavor is the name of the flavor in URL format. - Flavor string `q:"flavor"` - - // Name of the server as a string; can be queried with regular expressions. - // Realize that ?name=bob returns both bob and bobb. If you need to match bob - // only, you can use a regular expression matching the syntax of the - // underlying database server implemented for Compute. - Name string `q:"name"` - - // Status is the value of the status of the server so that you can filter on - // "ACTIVE" for example. - Status string `q:"status"` - - // Host is the name of the host as a string. - Host string `q:"host"` - - // Marker is a UUID of the server at which you want to set a marker. - Marker string `q:"marker"` - - // Limit is an integer value for the limit of values to return. - Limit int `q:"limit"` - - // AllTenants is a bool to show all tenants. - AllTenants bool `q:"all_tenants"` - - // TenantID lists servers for a particular tenant. - // Setting "AllTenants = true" is required. - TenantID string `q:"tenant_id"` -} - -// ToServerListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToServerListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List makes a request against the API to list servers accessible to you. -func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listDetailURL(client) - if opts != nil { - query, err := opts.ToServerListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return ServerPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToServerCreateMap() (map[string]interface{}, error) -} - -// Network is used within CreateOpts to control a new server's network -// attachments. -type Network struct { - // UUID of a network to attach to the newly provisioned server. - // Required unless Port is provided. - UUID string - - // Port of a neutron network to attach to the newly provisioned server. - // Required unless UUID is provided. - Port string - - // FixedIP specifies a fixed IPv4 address to be used on this network. - FixedIP string -} - -// Personality is an array of files that are injected into the server at launch. -type Personality []*File - -// File is used within CreateOpts and RebuildOpts to inject a file into the -// server at launch. -// File implements the json.Marshaler interface, so when a Create or Rebuild -// operation is requested, json.Marshal will call File's MarshalJSON method. -type File struct { - // Path of the file. - Path string - - // Contents of the file. Maximum content size is 255 bytes. - Contents []byte -} - -// MarshalJSON marshals the escaped file, base64 encoding the contents. -func (f *File) MarshalJSON() ([]byte, error) { - file := struct { - Path string `json:"path"` - Contents string `json:"contents"` - }{ - Path: f.Path, - Contents: base64.StdEncoding.EncodeToString(f.Contents), - } - return json.Marshal(file) -} - -// CreateOpts specifies server creation parameters. -type CreateOpts struct { - // Name is the name to assign to the newly launched server. - Name string `json:"name" required:"true"` - - // ImageRef [optional; required if ImageName is not provided] is the ID or - // full URL to the image that contains the server's OS and initial state. - // Also optional if using the boot-from-volume extension. - ImageRef string `json:"imageRef"` - - // ImageName [optional; required if ImageRef is not provided] is the name of - // the image that contains the server's OS and initial state. - // Also optional if using the boot-from-volume extension. - ImageName string `json:"-"` - - // FlavorRef [optional; required if FlavorName is not provided] is the ID or - // full URL to the flavor that describes the server's specs. - FlavorRef string `json:"flavorRef"` - - // FlavorName [optional; required if FlavorRef is not provided] is the name of - // the flavor that describes the server's specs. - FlavorName string `json:"-"` - - // SecurityGroups lists the names of the security groups to which this server - // should belong. - SecurityGroups []string `json:"-"` - - // UserData contains configuration information or scripts to use upon launch. - // Create will base64-encode it for you, if it isn't already. - UserData []byte `json:"-"` - - // AvailabilityZone in which to launch the server. - AvailabilityZone string `json:"availability_zone,omitempty"` - - // Networks dictates how this server will be attached to available networks. - // By default, the server will be attached to all isolated networks for the - // tenant. - Networks []Network `json:"-"` - - // Metadata contains key-value pairs (up to 255 bytes each) to attach to the - // server. - Metadata map[string]string `json:"metadata,omitempty"` - - // Personality includes files to inject into the server at launch. - // Create will base64-encode file contents for you. - Personality Personality `json:"personality,omitempty"` - - // ConfigDrive enables metadata injection through a configuration drive. - ConfigDrive *bool `json:"config_drive,omitempty"` - - // AdminPass sets the root user password. If not set, a randomly-generated - // password will be created and returned in the response. - AdminPass string `json:"adminPass,omitempty"` - - // AccessIPv4 specifies an IPv4 address for the instance. - AccessIPv4 string `json:"accessIPv4,omitempty"` - - // AccessIPv6 pecifies an IPv6 address for the instance. - AccessIPv6 string `json:"accessIPv6,omitempty"` - - // ServiceClient will allow calls to be made to retrieve an image or - // flavor ID by name. - ServiceClient *gophercloud.ServiceClient `json:"-"` -} - -// ToServerCreateMap assembles a request body based on the contents of a -// CreateOpts. -func (opts CreateOpts) ToServerCreateMap() (map[string]interface{}, error) { - sc := opts.ServiceClient - opts.ServiceClient = nil - b, err := gophercloud.BuildRequestBody(opts, "") - if err != nil { - return nil, err - } - - if opts.UserData != nil { - var userData string - if _, err := base64.StdEncoding.DecodeString(string(opts.UserData)); err != nil { - userData = base64.StdEncoding.EncodeToString(opts.UserData) - } else { - userData = string(opts.UserData) - } - b["user_data"] = &userData - } - - if len(opts.SecurityGroups) > 0 { - securityGroups := make([]map[string]interface{}, len(opts.SecurityGroups)) - for i, groupName := range opts.SecurityGroups { - securityGroups[i] = map[string]interface{}{"name": groupName} - } - b["security_groups"] = securityGroups - } - - if len(opts.Networks) > 0 { - networks := make([]map[string]interface{}, len(opts.Networks)) - for i, net := range opts.Networks { - networks[i] = make(map[string]interface{}) - if net.UUID != "" { - networks[i]["uuid"] = net.UUID - } - if net.Port != "" { - networks[i]["port"] = net.Port - } - if net.FixedIP != "" { - networks[i]["fixed_ip"] = net.FixedIP - } - } - b["networks"] = networks - } - - // If ImageRef isn't provided, check if ImageName was provided to ascertain - // the image ID. - if opts.ImageRef == "" { - if opts.ImageName != "" { - if sc == nil { - err := ErrNoClientProvidedForIDByName{} - err.Argument = "ServiceClient" - return nil, err - } - imageID, err := images.IDFromName(sc, opts.ImageName) - if err != nil { - return nil, err - } - b["imageRef"] = imageID - } - } - - // If FlavorRef isn't provided, use FlavorName to ascertain the flavor ID. - if opts.FlavorRef == "" { - if opts.FlavorName == "" { - err := ErrNeitherFlavorIDNorFlavorNameProvided{} - err.Argument = "FlavorRef/FlavorName" - return nil, err - } - if sc == nil { - err := ErrNoClientProvidedForIDByName{} - err.Argument = "ServiceClient" - return nil, err - } - flavorID, err := flavors.IDFromName(sc, opts.FlavorName) - if err != nil { - return nil, err - } - b["flavorRef"] = flavorID - } - - return map[string]interface{}{"server": b}, nil -} - -// Create requests a server to be provisioned to the user in the current tenant. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - reqBody, err := opts.ToServerCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(listURL(client), reqBody, &r.Body, nil) - return -} - -// Delete requests that a server previously provisioned be removed from your -// account. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, id), nil) - return -} - -// ForceDelete forces the deletion of a server. -func ForceDelete(client *gophercloud.ServiceClient, id string) (r ActionResult) { - _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"forceDelete": ""}, nil, nil) - return -} - -// Get requests details on a single server, by ID. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 203}, - }) - return -} - -// UpdateOptsBuilder allows extensions to add additional attributes to the -// Update request. -type UpdateOptsBuilder interface { - ToServerUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts specifies the base attributes that may be updated on an existing -// server. -type UpdateOpts struct { - // Name changes the displayed name of the server. - // The server host name will *not* change. - // Server names are not constrained to be unique, even within the same tenant. - Name string `json:"name,omitempty"` - - // AccessIPv4 provides a new IPv4 address for the instance. - AccessIPv4 string `json:"accessIPv4,omitempty"` - - // AccessIPv6 provides a new IPv6 address for the instance. - AccessIPv6 string `json:"accessIPv6,omitempty"` -} - -// ToServerUpdateMap formats an UpdateOpts structure into a request body. -func (opts UpdateOpts) ToServerUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "server") -} - -// Update requests that various attributes of the indicated server be changed. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToServerUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// ChangeAdminPassword alters the administrator or root password for a specified -// server. -func ChangeAdminPassword(client *gophercloud.ServiceClient, id, newPassword string) (r ActionResult) { - b := map[string]interface{}{ - "changePassword": map[string]string{ - "adminPass": newPassword, - }, - } - _, r.Err = client.Post(actionURL(client, id), b, nil, nil) - return -} - -// RebootMethod describes the mechanisms by which a server reboot can be requested. -type RebootMethod string - -// These constants determine how a server should be rebooted. -// See the Reboot() function for further details. -const ( - SoftReboot RebootMethod = "SOFT" - HardReboot RebootMethod = "HARD" - OSReboot = SoftReboot - PowerCycle = HardReboot -) - -// RebootOptsBuilder allows extensions to add additional parameters to the -// reboot request. -type RebootOptsBuilder interface { - ToServerRebootMap() (map[string]interface{}, error) -} - -// RebootOpts provides options to the reboot request. -type RebootOpts struct { - // Type is the type of reboot to perform on the server. - Type RebootMethod `json:"type" required:"true"` -} - -// ToServerRebootMap builds a body for the reboot request. -func (opts *RebootOpts) ToServerRebootMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "reboot") -} - -/* - Reboot requests that a given server reboot. - - Two methods exist for rebooting a server: - - HardReboot (aka PowerCycle) starts the server instance by physically cutting - power to the machine, or if a VM, terminating it at the hypervisor level. - It's done. Caput. Full stop. - Then, after a brief while, power is rtored or the VM instance restarted. - - SoftReboot (aka OSReboot) simply tells the OS to restart under its own - procedure. - E.g., in Linux, asking it to enter runlevel 6, or executing - "sudo shutdown -r now", or by asking Windows to rtart the machine. -*/ -func Reboot(client *gophercloud.ServiceClient, id string, opts RebootOptsBuilder) (r ActionResult) { - b, err := opts.ToServerRebootMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(actionURL(client, id), b, nil, nil) - return -} - -// RebuildOptsBuilder allows extensions to provide additional parameters to the -// rebuild request. -type RebuildOptsBuilder interface { - ToServerRebuildMap() (map[string]interface{}, error) -} - -// RebuildOpts represents the configuration options used in a server rebuild -// operation. -type RebuildOpts struct { - // AdminPass is the server's admin password - AdminPass string `json:"adminPass,omitempty"` - - // ImageID is the ID of the image you want your server to be provisioned on. - ImageID string `json:"imageRef"` - - // ImageName is readable name of an image. - ImageName string `json:"-"` - - // Name to set the server to - Name string `json:"name,omitempty"` - - // AccessIPv4 [optional] provides a new IPv4 address for the instance. - AccessIPv4 string `json:"accessIPv4,omitempty"` - - // AccessIPv6 [optional] provides a new IPv6 address for the instance. - AccessIPv6 string `json:"accessIPv6,omitempty"` - - // Metadata [optional] contains key-value pairs (up to 255 bytes each) - // to attach to the server. - Metadata map[string]string `json:"metadata,omitempty"` - - // Personality [optional] includes files to inject into the server at launch. - // Rebuild will base64-encode file contents for you. - Personality Personality `json:"personality,omitempty"` - - // ServiceClient will allow calls to be made to retrieve an image or - // flavor ID by name. - ServiceClient *gophercloud.ServiceClient `json:"-"` -} - -// ToServerRebuildMap formats a RebuildOpts struct into a map for use in JSON -func (opts RebuildOpts) ToServerRebuildMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "") - if err != nil { - return nil, err - } - - // If ImageRef isn't provided, check if ImageName was provided to ascertain - // the image ID. - if opts.ImageID == "" { - if opts.ImageName != "" { - if opts.ServiceClient == nil { - err := ErrNoClientProvidedForIDByName{} - err.Argument = "ServiceClient" - return nil, err - } - imageID, err := images.IDFromName(opts.ServiceClient, opts.ImageName) - if err != nil { - return nil, err - } - b["imageRef"] = imageID - } - } - - return map[string]interface{}{"rebuild": b}, nil -} - -// Rebuild will reprovision the server according to the configuration options -// provided in the RebuildOpts struct. -func Rebuild(client *gophercloud.ServiceClient, id string, opts RebuildOptsBuilder) (r RebuildResult) { - b, err := opts.ToServerRebuildMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(actionURL(client, id), b, &r.Body, nil) - return -} - -// ResizeOptsBuilder allows extensions to add additional parameters to the -// resize request. -type ResizeOptsBuilder interface { - ToServerResizeMap() (map[string]interface{}, error) -} - -// ResizeOpts represents the configuration options used to control a Resize -// operation. -type ResizeOpts struct { - // FlavorRef is the ID of the flavor you wish your server to become. - FlavorRef string `json:"flavorRef" required:"true"` -} - -// ToServerResizeMap formats a ResizeOpts as a map that can be used as a JSON -// request body for the Resize request. -func (opts ResizeOpts) ToServerResizeMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "resize") -} - -// Resize instructs the provider to change the flavor of the server. -// -// Note that this implies rebuilding it. -// -// Unfortunately, one cannot pass rebuild parameters to the resize function. -// When the resize completes, the server will be in RESIZE_VERIFY state. -// While in this state, you can explore the use of the new server's -// configuration. If you like it, call ConfirmResize() to commit the resize -// permanently. Otherwise, call RevertResize() to restore the old configuration. -func Resize(client *gophercloud.ServiceClient, id string, opts ResizeOptsBuilder) (r ActionResult) { - b, err := opts.ToServerResizeMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(actionURL(client, id), b, nil, nil) - return -} - -// ConfirmResize confirms a previous resize operation on a server. -// See Resize() for more details. -func ConfirmResize(client *gophercloud.ServiceClient, id string) (r ActionResult) { - _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"confirmResize": nil}, nil, &gophercloud.RequestOpts{ - OkCodes: []int{201, 202, 204}, - }) - return -} - -// RevertResize cancels a previous resize operation on a server. -// See Resize() for more details. -func RevertResize(client *gophercloud.ServiceClient, id string) (r ActionResult) { - _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"revertResize": nil}, nil, nil) - return -} - -// RescueOptsBuilder is an interface that allows extensions to override the -// default structure of a Rescue request. -type RescueOptsBuilder interface { - ToServerRescueMap() (map[string]interface{}, error) -} - -// RescueOpts represents the configuration options used to control a Rescue -// option. -type RescueOpts struct { - // AdminPass is the desired administrative password for the instance in - // RESCUE mode. If it's left blank, the server will generate a password. - AdminPass string `json:"adminPass,omitempty"` -} - -// ToServerRescueMap formats a RescueOpts as a map that can be used as a JSON -// request body for the Rescue request. -func (opts RescueOpts) ToServerRescueMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "rescue") -} - -// Rescue instructs the provider to place the server into RESCUE mode. -func Rescue(client *gophercloud.ServiceClient, id string, opts RescueOptsBuilder) (r RescueResult) { - b, err := opts.ToServerRescueMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// ResetMetadataOptsBuilder allows extensions to add additional parameters to -// the Reset request. -type ResetMetadataOptsBuilder interface { - ToMetadataResetMap() (map[string]interface{}, error) -} - -// MetadataOpts is a map that contains key-value pairs. -type MetadataOpts map[string]string - -// ToMetadataResetMap assembles a body for a Reset request based on the contents -// of a MetadataOpts. -func (opts MetadataOpts) ToMetadataResetMap() (map[string]interface{}, error) { - return map[string]interface{}{"metadata": opts}, nil -} - -// ToMetadataUpdateMap assembles a body for an Update request based on the -// contents of a MetadataOpts. -func (opts MetadataOpts) ToMetadataUpdateMap() (map[string]interface{}, error) { - return map[string]interface{}{"metadata": opts}, nil -} - -// ResetMetadata will create multiple new key-value pairs for the given server -// ID. -// Note: Using this operation will erase any already-existing metadata and -// create the new metadata provided. To keep any already-existing metadata, -// use the UpdateMetadatas or UpdateMetadata function. -func ResetMetadata(client *gophercloud.ServiceClient, id string, opts ResetMetadataOptsBuilder) (r ResetMetadataResult) { - b, err := opts.ToMetadataResetMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Put(metadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Metadata requests all the metadata for the given server ID. -func Metadata(client *gophercloud.ServiceClient, id string) (r GetMetadataResult) { - _, r.Err = client.Get(metadataURL(client, id), &r.Body, nil) - return -} - -// UpdateMetadataOptsBuilder allows extensions to add additional parameters to -// the Create request. -type UpdateMetadataOptsBuilder interface { - ToMetadataUpdateMap() (map[string]interface{}, error) -} - -// UpdateMetadata updates (or creates) all the metadata specified by opts for -// the given server ID. This operation does not affect already-existing metadata -// that is not specified by opts. -func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) (r UpdateMetadataResult) { - b, err := opts.ToMetadataUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(metadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// MetadatumOptsBuilder allows extensions to add additional parameters to the -// Create request. -type MetadatumOptsBuilder interface { - ToMetadatumCreateMap() (map[string]interface{}, string, error) -} - -// MetadatumOpts is a map of length one that contains a key-value pair. -type MetadatumOpts map[string]string - -// ToMetadatumCreateMap assembles a body for a Create request based on the -// contents of a MetadataumOpts. -func (opts MetadatumOpts) ToMetadatumCreateMap() (map[string]interface{}, string, error) { - if len(opts) != 1 { - err := gophercloud.ErrInvalidInput{} - err.Argument = "servers.MetadatumOpts" - err.Info = "Must have 1 and only 1 key-value pair" - return nil, "", err - } - metadatum := map[string]interface{}{"meta": opts} - var key string - for k := range metadatum["meta"].(MetadatumOpts) { - key = k - } - return metadatum, key, nil -} - -// CreateMetadatum will create or update the key-value pair with the given key -// for the given server ID. -func CreateMetadatum(client *gophercloud.ServiceClient, id string, opts MetadatumOptsBuilder) (r CreateMetadatumResult) { - b, key, err := opts.ToMetadatumCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Put(metadatumURL(client, id, key), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Metadatum requests the key-value pair with the given key for the given -// server ID. -func Metadatum(client *gophercloud.ServiceClient, id, key string) (r GetMetadatumResult) { - _, r.Err = client.Get(metadatumURL(client, id, key), &r.Body, nil) - return -} - -// DeleteMetadatum will delete the key-value pair with the given key for the -// given server ID. -func DeleteMetadatum(client *gophercloud.ServiceClient, id, key string) (r DeleteMetadatumResult) { - _, r.Err = client.Delete(metadatumURL(client, id, key), nil) - return -} - -// ListAddresses makes a request against the API to list the servers IP -// addresses. -func ListAddresses(client *gophercloud.ServiceClient, id string) pagination.Pager { - return pagination.NewPager(client, listAddressesURL(client, id), func(r pagination.PageResult) pagination.Page { - return AddressPage{pagination.SinglePageBase(r)} - }) -} - -// ListAddressesByNetwork makes a request against the API to list the servers IP -// addresses for the given network. -func ListAddressesByNetwork(client *gophercloud.ServiceClient, id, network string) pagination.Pager { - return pagination.NewPager(client, listAddressesByNetworkURL(client, id, network), func(r pagination.PageResult) pagination.Page { - return NetworkAddressPage{pagination.SinglePageBase(r)} - }) -} - -// CreateImageOptsBuilder allows extensions to add additional parameters to the -// CreateImage request. -type CreateImageOptsBuilder interface { - ToServerCreateImageMap() (map[string]interface{}, error) -} - -// CreateImageOpts provides options to pass to the CreateImage request. -type CreateImageOpts struct { - // Name of the image/snapshot. - Name string `json:"name" required:"true"` - - // Metadata contains key-value pairs (up to 255 bytes each) to attach to - // the created image. - Metadata map[string]string `json:"metadata,omitempty"` -} - -// ToServerCreateImageMap formats a CreateImageOpts structure into a request -// body. -func (opts CreateImageOpts) ToServerCreateImageMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "createImage") -} - -// CreateImage makes a request against the nova API to schedule an image to be -// created of the server -func CreateImage(client *gophercloud.ServiceClient, id string, opts CreateImageOptsBuilder) (r CreateImageResult) { - b, err := opts.ToServerCreateImageMap() - if err != nil { - r.Err = err - return - } - resp, err := client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - r.Err = err - r.Header = resp.Header - return -} - -// IDFromName is a convienience function that returns a server's ID given its -// name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - count := 0 - id := "" - allPages, err := List(client, nil).AllPages() - if err != nil { - return "", err - } - - all, err := ExtractServers(allPages) - if err != nil { - return "", err - } - - for _, f := range all { - if f.Name == name { - count++ - id = f.ID - } - } - - switch count { - case 0: - return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "server"} - case 1: - return id, nil - default: - return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "server"} - } -} - -// GetPassword makes a request against the nova API to get the encrypted -// administrative password. -func GetPassword(client *gophercloud.ServiceClient, serverId string) (r GetPasswordResult) { - _, r.Err = client.Get(passwordURL(client, serverId), &r.Body, nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go deleted file mode 100644 index 7ef80e92e2e..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go +++ /dev/null @@ -1,404 +0,0 @@ -package servers - -import ( - "crypto/rsa" - "encoding/base64" - "encoding/json" - "fmt" - "net/url" - "path" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -type serverResult struct { - gophercloud.Result -} - -// Extract interprets any serverResult as a Server, if possible. -func (r serverResult) Extract() (*Server, error) { - var s Server - err := r.ExtractInto(&s) - return &s, err -} - -func (r serverResult) ExtractInto(v interface{}) error { - return r.Result.ExtractIntoStructPtr(v, "server") -} - -func ExtractServersInto(r pagination.Page, v interface{}) error { - return r.(ServerPage).Result.ExtractIntoSlicePtr(v, "servers") -} - -// CreateResult is the response from a Create operation. Call its Extract -// method to interpret it as a Server. -type CreateResult struct { - serverResult -} - -// GetResult is the response from a Get operation. Call its Extract -// method to interpret it as a Server. -type GetResult struct { - serverResult -} - -// UpdateResult is the response from an Update operation. Call its Extract -// method to interpret it as a Server. -type UpdateResult struct { - serverResult -} - -// DeleteResult is the response from a Delete operation. Call its ExtractErr -// method to determine if the call succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// RebuildResult is the response from a Rebuild operation. Call its Extract -// method to interpret it as a Server. -type RebuildResult struct { - serverResult -} - -// ActionResult represents the result of server action operations, like reboot. -// Call its ExtractErr method to determine if the action succeeded or failed. -type ActionResult struct { - gophercloud.ErrResult -} - -// RescueResult is the response from a Rescue operation. Call its ExtractErr -// method to determine if the call succeeded or failed. -type RescueResult struct { - ActionResult -} - -// CreateImageResult is the response from a CreateImage operation. Call its -// ExtractImageID method to retrieve the ID of the newly created image. -type CreateImageResult struct { - gophercloud.Result -} - -// GetPasswordResult represent the result of a get os-server-password operation. -// Call its ExtractPassword method to retrieve the password. -type GetPasswordResult struct { - gophercloud.Result -} - -// ExtractPassword gets the encrypted password. -// If privateKey != nil the password is decrypted with the private key. -// If privateKey == nil the encrypted password is returned and can be decrypted -// with: -// echo '' | base64 -D | openssl rsautl -decrypt -inkey -func (r GetPasswordResult) ExtractPassword(privateKey *rsa.PrivateKey) (string, error) { - var s struct { - Password string `json:"password"` - } - err := r.ExtractInto(&s) - if err == nil && privateKey != nil && s.Password != "" { - return decryptPassword(s.Password, privateKey) - } - return s.Password, err -} - -func decryptPassword(encryptedPassword string, privateKey *rsa.PrivateKey) (string, error) { - b64EncryptedPassword := make([]byte, base64.StdEncoding.DecodedLen(len(encryptedPassword))) - - n, err := base64.StdEncoding.Decode(b64EncryptedPassword, []byte(encryptedPassword)) - if err != nil { - return "", fmt.Errorf("Failed to base64 decode encrypted password: %s", err) - } - password, err := rsa.DecryptPKCS1v15(nil, privateKey, b64EncryptedPassword[0:n]) - if err != nil { - return "", fmt.Errorf("Failed to decrypt password: %s", err) - } - - return string(password), nil -} - -// ExtractImageID gets the ID of the newly created server image from the header. -func (r CreateImageResult) ExtractImageID() (string, error) { - if r.Err != nil { - return "", r.Err - } - // Get the image id from the header - u, err := url.ParseRequestURI(r.Header.Get("Location")) - if err != nil { - return "", err - } - imageID := path.Base(u.Path) - if imageID == "." || imageID == "/" { - return "", fmt.Errorf("Failed to parse the ID of newly created image: %s", u) - } - return imageID, nil -} - -// Extract interprets any RescueResult as an AdminPass, if possible. -func (r RescueResult) Extract() (string, error) { - var s struct { - AdminPass string `json:"adminPass"` - } - err := r.ExtractInto(&s) - return s.AdminPass, err -} - -// Server represents a server/instance in the OpenStack cloud. -type Server struct { - // ID uniquely identifies this server amongst all other servers, - // including those not accessible to the current tenant. - ID string `json:"id"` - - // TenantID identifies the tenant owning this server resource. - TenantID string `json:"tenant_id"` - - // UserID uniquely identifies the user account owning the tenant. - UserID string `json:"user_id"` - - // Name contains the human-readable name for the server. - Name string `json:"name"` - - // Updated and Created contain ISO-8601 timestamps of when the state of the - // server last changed, and when it was created. - Updated time.Time `json:"updated"` - Created time.Time `json:"created"` - - // HostID is the host where the server is located in the cloud. - HostID string `json:"hostid"` - - // Status contains the current operational status of the server, - // such as IN_PROGRESS or ACTIVE. - Status string `json:"status"` - - // Progress ranges from 0..100. - // A request made against the server completes only once Progress reaches 100. - Progress int `json:"progress"` - - // AccessIPv4 and AccessIPv6 contain the IP addresses of the server, - // suitable for remote access for administration. - AccessIPv4 string `json:"accessIPv4"` - AccessIPv6 string `json:"accessIPv6"` - - // Image refers to a JSON object, which itself indicates the OS image used to - // deploy the server. - Image map[string]interface{} `json:"-"` - - // Flavor refers to a JSON object, which itself indicates the hardware - // configuration of the deployed server. - Flavor map[string]interface{} `json:"flavor"` - - // Addresses includes a list of all IP addresses assigned to the server, - // keyed by pool. - Addresses map[string]interface{} `json:"addresses"` - - // Metadata includes a list of all user-specified key-value pairs attached - // to the server. - Metadata map[string]string `json:"metadata"` - - // Links includes HTTP references to the itself, useful for passing along to - // other APIs that might want a server reference. - Links []interface{} `json:"links"` - - // KeyName indicates which public key was injected into the server on launch. - KeyName string `json:"key_name"` - - // AdminPass will generally be empty (""). However, it will contain the - // administrative password chosen when provisioning a new server without a - // set AdminPass setting in the first place. - // Note that this is the ONLY time this field will be valid. - AdminPass string `json:"adminPass"` - - // SecurityGroups includes the security groups that this instance has applied - // to it. - SecurityGroups []map[string]interface{} `json:"security_groups"` -} - -func (r *Server) UnmarshalJSON(b []byte) error { - type tmp Server - var s struct { - tmp - Image interface{} `json:"image"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = Server(s.tmp) - - switch t := s.Image.(type) { - case map[string]interface{}: - r.Image = t - case string: - switch t { - case "": - r.Image = nil - } - } - - return err -} - -// ServerPage abstracts the raw results of making a List() request against -// the API. As OpenStack extensions may freely alter the response bodies of -// structures returned to the client, you may only safely access the data -// provided through the ExtractServers call. -type ServerPage struct { - pagination.LinkedPageBase -} - -// IsEmpty returns true if a page contains no Server results. -func (r ServerPage) IsEmpty() (bool, error) { - s, err := ExtractServers(r) - return len(s) == 0, err -} - -// NextPageURL uses the response's embedded link reference to navigate to the -// next page of results. -func (r ServerPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"servers_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// ExtractServers interprets the results of a single page from a List() call, -// producing a slice of Server entities. -func ExtractServers(r pagination.Page) ([]Server, error) { - var s []Server - err := ExtractServersInto(r, &s) - return s, err -} - -// MetadataResult contains the result of a call for (potentially) multiple -// key-value pairs. Call its Extract method to interpret it as a -// map[string]interface. -type MetadataResult struct { - gophercloud.Result -} - -// GetMetadataResult contains the result of a Get operation. Call its Extract -// method to interpret it as a map[string]interface. -type GetMetadataResult struct { - MetadataResult -} - -// ResetMetadataResult contains the result of a Reset operation. Call its -// Extract method to interpret it as a map[string]interface. -type ResetMetadataResult struct { - MetadataResult -} - -// UpdateMetadataResult contains the result of an Update operation. Call its -// Extract method to interpret it as a map[string]interface. -type UpdateMetadataResult struct { - MetadataResult -} - -// MetadatumResult contains the result of a call for individual a single -// key-value pair. -type MetadatumResult struct { - gophercloud.Result -} - -// GetMetadatumResult contains the result of a Get operation. Call its Extract -// method to interpret it as a map[string]interface. -type GetMetadatumResult struct { - MetadatumResult -} - -// CreateMetadatumResult contains the result of a Create operation. Call its -// Extract method to interpret it as a map[string]interface. -type CreateMetadatumResult struct { - MetadatumResult -} - -// DeleteMetadatumResult contains the result of a Delete operation. Call its -// ExtractErr method to determine if the call succeeded or failed. -type DeleteMetadatumResult struct { - gophercloud.ErrResult -} - -// Extract interprets any MetadataResult as a Metadata, if possible. -func (r MetadataResult) Extract() (map[string]string, error) { - var s struct { - Metadata map[string]string `json:"metadata"` - } - err := r.ExtractInto(&s) - return s.Metadata, err -} - -// Extract interprets any MetadatumResult as a Metadatum, if possible. -func (r MetadatumResult) Extract() (map[string]string, error) { - var s struct { - Metadatum map[string]string `json:"meta"` - } - err := r.ExtractInto(&s) - return s.Metadatum, err -} - -// Address represents an IP address. -type Address struct { - Version int `json:"version"` - Address string `json:"addr"` -} - -// AddressPage abstracts the raw results of making a ListAddresses() request -// against the API. As OpenStack extensions may freely alter the response bodies -// of structures returned to the client, you may only safely access the data -// provided through the ExtractAddresses call. -type AddressPage struct { - pagination.SinglePageBase -} - -// IsEmpty returns true if an AddressPage contains no networks. -func (r AddressPage) IsEmpty() (bool, error) { - addresses, err := ExtractAddresses(r) - return len(addresses) == 0, err -} - -// ExtractAddresses interprets the results of a single page from a -// ListAddresses() call, producing a map of addresses. -func ExtractAddresses(r pagination.Page) (map[string][]Address, error) { - var s struct { - Addresses map[string][]Address `json:"addresses"` - } - err := (r.(AddressPage)).ExtractInto(&s) - return s.Addresses, err -} - -// NetworkAddressPage abstracts the raw results of making a -// ListAddressesByNetwork() request against the API. -// As OpenStack extensions may freely alter the response bodies of structures -// returned to the client, you may only safely access the data provided through -// the ExtractAddresses call. -type NetworkAddressPage struct { - pagination.SinglePageBase -} - -// IsEmpty returns true if a NetworkAddressPage contains no addresses. -func (r NetworkAddressPage) IsEmpty() (bool, error) { - addresses, err := ExtractNetworkAddresses(r) - return len(addresses) == 0, err -} - -// ExtractNetworkAddresses interprets the results of a single page from a -// ListAddressesByNetwork() call, producing a slice of addresses. -func ExtractNetworkAddresses(r pagination.Page) ([]Address, error) { - var s map[string][]Address - err := (r.(NetworkAddressPage)).ExtractInto(&s) - if err != nil { - return nil, err - } - - var key string - for k := range s { - key = k - } - - return s[key], err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go deleted file mode 100644 index e892e8d9259..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go +++ /dev/null @@ -1,51 +0,0 @@ -package servers - -import "github.com/gophercloud/gophercloud" - -func createURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("servers") -} - -func listURL(client *gophercloud.ServiceClient) string { - return createURL(client) -} - -func listDetailURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("servers", "detail") -} - -func deleteURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("servers", id) -} - -func getURL(client *gophercloud.ServiceClient, id string) string { - return deleteURL(client, id) -} - -func updateURL(client *gophercloud.ServiceClient, id string) string { - return deleteURL(client, id) -} - -func actionURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("servers", id, "action") -} - -func metadatumURL(client *gophercloud.ServiceClient, id, key string) string { - return client.ServiceURL("servers", id, "metadata", key) -} - -func metadataURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("servers", id, "metadata") -} - -func listAddressesURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("servers", id, "ips") -} - -func listAddressesByNetworkURL(client *gophercloud.ServiceClient, id, network string) string { - return client.ServiceURL("servers", id, "ips", network) -} - -func passwordURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("servers", id, "os-server-password") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go deleted file mode 100644 index cadef054506..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go +++ /dev/null @@ -1,21 +0,0 @@ -package servers - -import "github.com/gophercloud/gophercloud" - -// WaitForStatus will continually poll a server until it successfully -// transitions to a specified status. It will do this for at most the number -// of seconds specified. -func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { - return gophercloud.WaitFor(secs, func() (bool, error) { - current, err := Get(c, id).Extract() - if err != nil { - return false, err - } - - if current.Status == status { - return true, nil - } - - return false, nil - }) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/doc.go deleted file mode 100644 index 617fafa6314..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/doc.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Package recordsets provides information and interaction with the zone API -resource for the OpenStack DNS service. - -Example to List RecordSets by Zone - - listOpts := recordsets.ListOpts{ - Type: "A", - } - - zoneID := "fff121f5-c506-410a-a69e-2d73ef9cbdbd" - - allPages, err := recordsets.ListByZone(dnsClient, zoneID, listOpts).AllPages() - if err != nil { - panic(err) - } - - allRRs, err := recordsets.ExtractRecordSets(allPages() - if err != nil { - panic(err) - } - - for _, rr := range allRRs { - fmt.Printf("%+v\n", rr) - } - -Example to Create a RecordSet - - createOpts := recordsets.CreateOpts{ - Name: "example.com.", - Type: "A", - TTL: 3600, - Description: "This is a recordset.", - Records: []string{"10.1.0.2"}, - } - - zoneID := "fff121f5-c506-410a-a69e-2d73ef9cbdbd" - - rr, err := recordsets.Create(dnsClient, zoneID, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a RecordSet - - zoneID := "fff121f5-c506-410a-a69e-2d73ef9cbdbd" - recordsetID := "d96ed01a-b439-4eb8-9b90-7a9f71017f7b" - - err := recordsets.Delete(dnsClient, zoneID, recordsetID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package recordsets diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go deleted file mode 100644 index 2d6ecdc3dcf..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go +++ /dev/null @@ -1,166 +0,0 @@ -package recordsets - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToRecordSetListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the server attributes you want to see returned. Marker and Limit are used -// for pagination. -// https://developer.openstack.org/api-ref/dns/ -type ListOpts struct { - // Integer value for the limit of values to return. - Limit int `q:"limit"` - - // UUID of the recordset at which you want to set a marker. - Marker string `q:"marker"` - - Data string `q:"data"` - Description string `q:"description"` - Name string `q:"name"` - SortDir string `q:"sort_dir"` - SortKey string `q:"sort_key"` - Status string `q:"status"` - TTL int `q:"ttl"` - Type string `q:"type"` - ZoneID string `q:"zone_id"` -} - -// ToRecordSetListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToRecordSetListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// ListByZone implements the recordset list request. -func ListByZone(client *gophercloud.ServiceClient, zoneID string, opts ListOptsBuilder) pagination.Pager { - url := baseURL(client, zoneID) - if opts != nil { - query, err := opts.ToRecordSetListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return RecordSetPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// Get implements the recordset Get request. -func Get(client *gophercloud.ServiceClient, zoneID string, rrsetID string) (r GetResult) { - _, r.Err = client.Get(rrsetURL(client, zoneID, rrsetID), &r.Body, nil) - return -} - -// CreateOptsBuilder allows extensions to add additional attributes to the -// Create request. -type CreateOptsBuilder interface { - ToRecordSetCreateMap() (map[string]interface{}, error) -} - -// CreateOpts specifies the base attributes that may be used to create a -// RecordSet. -type CreateOpts struct { - // Name is the name of the RecordSet. - Name string `json:"name" required:"true"` - - // Description is a description of the RecordSet. - Description string `json:"description,omitempty"` - - // Records are the DNS records of the RecordSet. - Records []string `json:"records,omitempty"` - - // TTL is the time to live of the RecordSet. - TTL int `json:"ttl,omitempty"` - - // Type is the RRTYPE of the RecordSet. - Type string `json:"type,omitempty"` -} - -// ToRecordSetCreateMap formats an CreateOpts structure into a request body. -func (opts CreateOpts) ToRecordSetCreateMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "") - if err != nil { - return nil, err - } - - return b, nil -} - -// Create creates a recordset in a given zone. -func Create(client *gophercloud.ServiceClient, zoneID string, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToRecordSetCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(baseURL(client, zoneID), &b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{201, 202}, - }) - return -} - -// UpdateOptsBuilder allows extensions to add additional attributes to the -// Update request. -type UpdateOptsBuilder interface { - ToRecordSetUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts specifies the base attributes that may be updated on an existing -// RecordSet. -type UpdateOpts struct { - // Description is a description of the RecordSet. - Description string `json:"description,omitempty"` - - // TTL is the time to live of the RecordSet. - TTL int `json:"ttl,omitempty"` - - // Records are the DNS records of the RecordSet. - Records []string `json:"records,omitempty"` -} - -// ToRecordSetUpdateMap formats an UpdateOpts structure into a request body. -func (opts UpdateOpts) ToRecordSetUpdateMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "") - if err != nil { - return nil, err - } - - if opts.TTL > 0 { - b["ttl"] = opts.TTL - } else { - b["ttl"] = nil - } - - return b, nil -} - -// Update updates a recordset in a given zone -func Update(client *gophercloud.ServiceClient, zoneID string, rrsetID string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToRecordSetUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Put(rrsetURL(client, zoneID, rrsetID), &b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 202}, - }) - return -} - -// Delete removes an existing RecordSet. -func Delete(client *gophercloud.ServiceClient, zoneID string, rrsetID string) (r DeleteResult) { - _, r.Err = client.Delete(rrsetURL(client, zoneID, rrsetID), &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/results.go deleted file mode 100644 index 0fdc1fe52ea..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/results.go +++ /dev/null @@ -1,147 +0,0 @@ -package recordsets - -import ( - "encoding/json" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -type commonResult struct { - gophercloud.Result -} - -// Extract interprets a GetResult, CreateResult or UpdateResult as a RecordSet. -// An error is returned if the original call or the extraction failed. -func (r commonResult) Extract() (*RecordSet, error) { - var s *RecordSet - err := r.ExtractInto(&s) - return s, err -} - -// CreateResult is the result of a Create operation. Call its Extract method to -// interpret the result as a RecordSet. -type CreateResult struct { - commonResult -} - -// GetResult is the result of a Get operation. Call its Extract method to -// interpret the result as a RecordSet. -type GetResult struct { - commonResult -} - -// RecordSetPage is a single page of RecordSet results. -type RecordSetPage struct { - pagination.LinkedPageBase -} - -// UpdateResult is result of an Update operation. Call its Extract method to -// interpret the result as a RecordSet. -type UpdateResult struct { - commonResult -} - -// DeleteResult is result of a Delete operation. Call its ExtractErr method to -// determine if the operation succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// IsEmpty returns true if the page contains no results. -func (r RecordSetPage) IsEmpty() (bool, error) { - s, err := ExtractRecordSets(r) - return len(s) == 0, err -} - -// ExtractRecordSets extracts a slice of RecordSets from a List result. -func ExtractRecordSets(r pagination.Page) ([]RecordSet, error) { - var s struct { - RecordSets []RecordSet `json:"recordsets"` - } - err := (r.(RecordSetPage)).ExtractInto(&s) - return s.RecordSets, err -} - -// RecordSet represents a DNS Record Set. -type RecordSet struct { - // ID is the unique ID of the recordset - ID string `json:"id"` - - // ZoneID is the ID of the zone the recordset belongs to. - ZoneID string `json:"zone_id"` - - // ProjectID is the ID of the project that owns the recordset. - ProjectID string `json:"project_id"` - - // Name is the name of the recordset. - Name string `json:"name"` - - // ZoneName is the name of the zone the recordset belongs to. - ZoneName string `json:"zone_name"` - - // Type is the RRTYPE of the recordset. - Type string `json:"type"` - - // Records are the DNS records of the recordset. - Records []string `json:"records"` - - // TTL is the time to live of the recordset. - TTL int `json:"ttl"` - - // Status is the status of the recordset. - Status string `json:"status"` - - // Action is the current action in progress of the recordset. - Action string `json:"action"` - - // Description is the description of the recordset. - Description string `json:"description"` - - // Version is the revision of the recordset. - Version int `json:"version"` - - // CreatedAt is the date when the recordset was created. - CreatedAt time.Time `json:"-"` - - // UpdatedAt is the date when the recordset was updated. - UpdatedAt time.Time `json:"-"` - - // Links includes HTTP references to the itself, - // useful for passing along to other APIs that might want a recordset - // reference. - Links []gophercloud.Link `json:"-"` -} - -func (r *RecordSet) UnmarshalJSON(b []byte) error { - type tmp RecordSet - var s struct { - tmp - CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` - UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` - Links map[string]interface{} `json:"links"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = RecordSet(s.tmp) - - r.CreatedAt = time.Time(s.CreatedAt) - r.UpdatedAt = time.Time(s.UpdatedAt) - - if s.Links != nil { - for rel, href := range s.Links { - if v, ok := href.(string); ok { - link := gophercloud.Link{ - Rel: rel, - Href: v, - } - r.Links = append(r.Links, link) - } - } - } - - return err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/urls.go deleted file mode 100644 index 5ec18d1bb78..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/urls.go +++ /dev/null @@ -1,11 +0,0 @@ -package recordsets - -import "github.com/gophercloud/gophercloud" - -func baseURL(c *gophercloud.ServiceClient, zoneID string) string { - return c.ServiceURL("zones", zoneID, "recordsets") -} - -func rrsetURL(c *gophercloud.ServiceClient, zoneID string, rrsetID string) string { - return c.ServiceURL("zones", zoneID, "recordsets", rrsetID) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/doc.go deleted file mode 100644 index 7733155bcfa..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/doc.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Package zones provides information and interaction with the zone API -resource for the OpenStack DNS service. - -Example to List Zones - - listOpts := zones.ListOpts{ - Email: "jdoe@example.com", - } - - allPages, err := zones.List(dnsClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allZones, err := zones.ExtractZones(allPages) - if err != nil { - panic(err) - } - - for _, zone := range allZones { - fmt.Printf("%+v\n", zone) - } - -Example to Create a Zone - - createOpts := zones.CreateOpts{ - Name: "example.com.", - Email: "jdoe@example.com", - Type: "PRIMARY", - TTL: 7200, - Description: "This is a zone.", - } - - zone, err := zones.Create(dnsClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Zone - - zoneID := "99d10f68-5623-4491-91a0-6daafa32b60e" - err := zones.Delete(dnsClient, zoneID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package zones diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/requests.go deleted file mode 100644 index f87deadce74..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/requests.go +++ /dev/null @@ -1,174 +0,0 @@ -package zones - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add parameters to the List request. -type ListOptsBuilder interface { - ToZoneListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the server attributes you want to see returned. Marker and Limit are used -// for pagination. -// https://developer.openstack.org/api-ref/dns/ -type ListOpts struct { - // Integer value for the limit of values to return. - Limit int `q:"limit"` - - // UUID of the zone at which you want to set a marker. - Marker string `q:"marker"` - - Description string `q:"description"` - Email string `q:"email"` - Name string `q:"name"` - SortDir string `q:"sort_dir"` - SortKey string `q:"sort_key"` - Status string `q:"status"` - TTL int `q:"ttl"` - Type string `q:"type"` -} - -// ToZoneListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToZoneListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List implements a zone List request. -func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := baseURL(client) - if opts != nil { - query, err := opts.ToZoneListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return ZonePage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// Get returns information about a zone, given its ID. -func Get(client *gophercloud.ServiceClient, zoneID string) (r GetResult) { - _, r.Err = client.Get(zoneURL(client, zoneID), &r.Body, nil) - return -} - -// CreateOptsBuilder allows extensions to add additional attributes to the -// Create request. -type CreateOptsBuilder interface { - ToZoneCreateMap() (map[string]interface{}, error) -} - -// CreateOpts specifies the attributes used to create a zone. -type CreateOpts struct { - // Attributes are settings that supply hints and filters for the zone. - Attributes map[string]string `json:"attributes,omitempty"` - - // Email contact of the zone. - Email string `json:"email,omitempty"` - - // Description of the zone. - Description string `json:"description,omitempty"` - - // Name of the zone. - Name string `json:"name" required:"true"` - - // Masters specifies zone masters if this is a secondary zone. - Masters []string `json:"masters,omitempty"` - - // TTL is the time to live of the zone. - TTL int `json:"-"` - - // Type specifies if this is a primary or secondary zone. - Type string `json:"type,omitempty"` -} - -// ToZoneCreateMap formats an CreateOpts structure into a request body. -func (opts CreateOpts) ToZoneCreateMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "") - if err != nil { - return nil, err - } - - if opts.TTL > 0 { - b["ttl"] = opts.TTL - } - - return b, nil -} - -// Create implements a zone create request. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToZoneCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(baseURL(client), &b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{201, 202}, - }) - return -} - -// UpdateOptsBuilder allows extensions to add additional attributes to the -// Update request. -type UpdateOptsBuilder interface { - ToZoneUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts specifies the attributes to update a zone. -type UpdateOpts struct { - // Email contact of the zone. - Email string `json:"email,omitempty"` - - // TTL is the time to live of the zone. - TTL int `json:"-"` - - // Masters specifies zone masters if this is a secondary zone. - Masters []string `json:"masters,omitempty"` - - // Description of the zone. - Description string `json:"description,omitempty"` -} - -// ToZoneUpdateMap formats an UpdateOpts structure into a request body. -func (opts UpdateOpts) ToZoneUpdateMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "") - if err != nil { - return nil, err - } - - if opts.TTL > 0 { - b["ttl"] = opts.TTL - } - - return b, nil -} - -// Update implements a zone update request. -func Update(client *gophercloud.ServiceClient, zoneID string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToZoneUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Patch(zoneURL(client, zoneID), &b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 202}, - }) - return -} - -// Delete implements a zone delete request. -func Delete(client *gophercloud.ServiceClient, zoneID string) (r DeleteResult) { - _, r.Err = client.Delete(zoneURL(client, zoneID), &gophercloud.RequestOpts{ - OkCodes: []int{202}, - JSONResponse: &r.Body, - }) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/results.go deleted file mode 100644 index a36eca7e205..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/results.go +++ /dev/null @@ -1,166 +0,0 @@ -package zones - -import ( - "encoding/json" - "strconv" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -type commonResult struct { - gophercloud.Result -} - -// Extract interprets a GetResult, CreateResult or UpdateResult as a Zone. -// An error is returned if the original call or the extraction failed. -func (r commonResult) Extract() (*Zone, error) { - var s *Zone - err := r.ExtractInto(&s) - return s, err -} - -// CreateResult is the result of a Create request. Call its Extract method -// to interpret the result as a Zone. -type CreateResult struct { - commonResult -} - -// GetResult is the result of a Get request. Call its Extract method -// to interpret the result as a Zone. -type GetResult struct { - commonResult -} - -// UpdateResult is the result of an Update request. Call its Extract method -// to interpret the result as a Zone. -type UpdateResult struct { - commonResult -} - -// DeleteResult is the result of a Delete request. Call its ExtractErr method -// to determine if the request succeeded or failed. -type DeleteResult struct { - commonResult -} - -// ZonePage is a single page of Zone results. -type ZonePage struct { - pagination.LinkedPageBase -} - -// IsEmpty returns true if the page contains no results. -func (r ZonePage) IsEmpty() (bool, error) { - s, err := ExtractZones(r) - return len(s) == 0, err -} - -// ExtractZones extracts a slice of Zones from a List result. -func ExtractZones(r pagination.Page) ([]Zone, error) { - var s struct { - Zones []Zone `json:"zones"` - } - err := (r.(ZonePage)).ExtractInto(&s) - return s.Zones, err -} - -// Zone represents a DNS zone. -type Zone struct { - // ID uniquely identifies this zone amongst all other zones, including those - // not accessible to the current tenant. - ID string `json:"id"` - - // PoolID is the ID for the pool hosting this zone. - PoolID string `json:"pool_id"` - - // ProjectID identifies the project/tenant owning this resource. - ProjectID string `json:"project_id"` - - // Name is the DNS Name for the zone. - Name string `json:"name"` - - // Email for the zone. Used in SOA records for the zone. - Email string `json:"email"` - - // Description for this zone. - Description string `json:"description"` - - // TTL is the Time to Live for the zone. - TTL int `json:"ttl"` - - // Serial is the current serial number for the zone. - Serial int `json:"-"` - - // Status is the status of the resource. - Status string `json:"status"` - - // Action is the current action in progress on the resource. - Action string `json:"action"` - - // Version of the resource. - Version int `json:"version"` - - // Attributes for the zone. - Attributes map[string]string `json:"attributes"` - - // Type of zone. Primary is controlled by Designate. - // Secondary zones are slaved from another DNS Server. - // Defaults to Primary. - Type string `json:"type"` - - // Masters is the servers for slave servers to get DNS information from. - Masters []string `json:"masters"` - - // CreatedAt is the date when the zone was created. - CreatedAt time.Time `json:"-"` - - // UpdatedAt is the date when the last change was made to the zone. - UpdatedAt time.Time `json:"-"` - - // TransferredAt is the last time an update was retrieved from the - // master servers. - TransferredAt time.Time `json:"-"` - - // Links includes HTTP references to the itself, useful for passing along - // to other APIs that might want a server reference. - Links map[string]interface{} `json:"links"` -} - -func (r *Zone) UnmarshalJSON(b []byte) error { - type tmp Zone - var s struct { - tmp - CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` - UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` - TransferredAt gophercloud.JSONRFC3339MilliNoZ `json:"transferred_at"` - Serial interface{} `json:"serial"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Zone(s.tmp) - - r.CreatedAt = time.Time(s.CreatedAt) - r.UpdatedAt = time.Time(s.UpdatedAt) - r.TransferredAt = time.Time(s.TransferredAt) - - switch t := s.Serial.(type) { - case float64: - r.Serial = int(t) - case string: - switch t { - case "": - r.Serial = 0 - default: - serial, err := strconv.ParseFloat(t, 64) - if err != nil { - return err - } - r.Serial = int(serial) - } - } - - return err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/urls.go deleted file mode 100644 index 9bef7058096..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/urls.go +++ /dev/null @@ -1,11 +0,0 @@ -package zones - -import "github.com/gophercloud/gophercloud" - -func baseURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("zones") -} - -func zoneURL(c *gophercloud.ServiceClient, zoneID string) string { - return c.ServiceURL("zones", zoneID) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go deleted file mode 100644 index cedf1f4d3a3..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -/* -Package openstack contains resources for the individual OpenStack projects -supported in Gophercloud. It also includes functions to authenticate to an -OpenStack cloud and for provisioning various service-level clients. - -Example of Creating a Service Client - - ao, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(ao) - client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ - Region: os.Getenv("OS_REGION_NAME"), - }) -*/ -package openstack diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go deleted file mode 100644 index 070ea7cbef0..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go +++ /dev/null @@ -1,107 +0,0 @@ -package openstack - -import ( - "github.com/gophercloud/gophercloud" - tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" -) - -/* -V2EndpointURL discovers the endpoint URL for a specific service from a -ServiceCatalog acquired during the v2 identity service. - -The specified EndpointOpts are used to identify a unique, unambiguous endpoint -to return. It's an error both when multiple endpoints match the provided -criteria and when none do. The minimum that can be specified is a Type, but you -will also often need to specify a Name and/or a Region depending on what's -available on your OpenStack deployment. -*/ -func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { - // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. - var endpoints = make([]tokens2.Endpoint, 0, 1) - for _, entry := range catalog.Entries { - if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { - for _, endpoint := range entry.Endpoints { - if opts.Region == "" || endpoint.Region == opts.Region { - endpoints = append(endpoints, endpoint) - } - } - } - } - - // Report an error if the options were ambiguous. - if len(endpoints) > 1 { - err := &ErrMultipleMatchingEndpointsV2{} - err.Endpoints = endpoints - return "", err - } - - // Extract the appropriate URL from the matching Endpoint. - for _, endpoint := range endpoints { - switch opts.Availability { - case gophercloud.AvailabilityPublic: - return gophercloud.NormalizeURL(endpoint.PublicURL), nil - case gophercloud.AvailabilityInternal: - return gophercloud.NormalizeURL(endpoint.InternalURL), nil - case gophercloud.AvailabilityAdmin: - return gophercloud.NormalizeURL(endpoint.AdminURL), nil - default: - err := &ErrInvalidAvailabilityProvided{} - err.Argument = "Availability" - err.Value = opts.Availability - return "", err - } - } - - // Report an error if there were no matching endpoints. - err := &gophercloud.ErrEndpointNotFound{} - return "", err -} - -/* -V3EndpointURL discovers the endpoint URL for a specific service from a Catalog -acquired during the v3 identity service. - -The specified EndpointOpts are used to identify a unique, unambiguous endpoint -to return. It's an error both when multiple endpoints match the provided -criteria and when none do. The minimum that can be specified is a Type, but you -will also often need to specify a Name and/or a Region depending on what's -available on your OpenStack deployment. -*/ -func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { - // Extract Endpoints from the catalog entries that match the requested Type, Interface, - // Name if provided, and Region if provided. - var endpoints = make([]tokens3.Endpoint, 0, 1) - for _, entry := range catalog.Entries { - if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { - for _, endpoint := range entry.Endpoints { - if opts.Availability != gophercloud.AvailabilityAdmin && - opts.Availability != gophercloud.AvailabilityPublic && - opts.Availability != gophercloud.AvailabilityInternal { - err := &ErrInvalidAvailabilityProvided{} - err.Argument = "Availability" - err.Value = opts.Availability - return "", err - } - if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && - (opts.Region == "" || endpoint.Region == opts.Region) { - endpoints = append(endpoints, endpoint) - } - } - } - } - - // Report an error if the options were ambiguous. - if len(endpoints) > 1 { - return "", ErrMultipleMatchingEndpointsV3{Endpoints: endpoints} - } - - // Extract the URL from the matching Endpoint. - for _, endpoint := range endpoints { - return gophercloud.NormalizeURL(endpoint.URL), nil - } - - // Report an error if there were no matching endpoints. - err := &gophercloud.ErrEndpointNotFound{} - return "", err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/errors.go deleted file mode 100644 index df410b1c611..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/errors.go +++ /dev/null @@ -1,71 +0,0 @@ -package openstack - -import ( - "fmt" - - "github.com/gophercloud/gophercloud" - tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" -) - -// ErrEndpointNotFound is the error when no suitable endpoint can be found -// in the user's catalog -type ErrEndpointNotFound struct{ gophercloud.BaseError } - -func (e ErrEndpointNotFound) Error() string { - return "No suitable endpoint could be found in the service catalog." -} - -// ErrInvalidAvailabilityProvided is the error when an invalid endpoint -// availability is provided -type ErrInvalidAvailabilityProvided struct{ gophercloud.ErrInvalidInput } - -func (e ErrInvalidAvailabilityProvided) Error() string { - return fmt.Sprintf("Unexpected availability in endpoint query: %s", e.Value) -} - -// ErrMultipleMatchingEndpointsV2 is the error when more than one endpoint -// for the given options is found in the v2 catalog -type ErrMultipleMatchingEndpointsV2 struct { - gophercloud.BaseError - Endpoints []tokens2.Endpoint -} - -func (e ErrMultipleMatchingEndpointsV2) Error() string { - return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) -} - -// ErrMultipleMatchingEndpointsV3 is the error when more than one endpoint -// for the given options is found in the v3 catalog -type ErrMultipleMatchingEndpointsV3 struct { - gophercloud.BaseError - Endpoints []tokens3.Endpoint -} - -func (e ErrMultipleMatchingEndpointsV3) Error() string { - return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) -} - -// ErrNoAuthURL is the error when the OS_AUTH_URL environment variable is not -// found -type ErrNoAuthURL struct{ gophercloud.ErrInvalidInput } - -func (e ErrNoAuthURL) Error() string { - return "Environment variable OS_AUTH_URL needs to be set." -} - -// ErrNoUsername is the error when the OS_USERNAME environment variable is not -// found -type ErrNoUsername struct{ gophercloud.ErrInvalidInput } - -func (e ErrNoUsername) Error() string { - return "Environment variable OS_USERNAME needs to be set." -} - -// ErrNoPassword is the error when the OS_PASSWORD environment variable is not -// found -type ErrNoPassword struct{ gophercloud.ErrInvalidInput } - -func (e ErrNoPassword) Error() string { - return "Environment variable OS_PASSWORD needs to be set." -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go deleted file mode 100644 index 45623369e18..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Package tenants provides information and interaction with the -tenants API resource for the OpenStack Identity service. - -See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 -and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants -for more information. - -Example to List Tenants - - listOpts := tenants.ListOpts{ - Limit: 2, - } - - allPages, err := tenants.List(identityClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allTenants, err := tenants.ExtractTenants(allPages) - if err != nil { - panic(err) - } - - for _, tenant := range allTenants { - fmt.Printf("%+v\n", tenant) - } - -Example to Create a Tenant - - createOpts := tenants.CreateOpts{ - Name: "tenant_name", - Description: "this is a tenant", - Enabled: gophercloud.Enabled, - } - - tenant, err := tenants.Create(identityClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Tenant - - tenantID := "e6db6ed6277c461a853458589063b295" - - updateOpts := tenants.UpdateOpts{ - Description: "this is a new description", - Enabled: gophercloud.Disabled, - } - - tenant, err := tenants.Update(identityClient, tenantID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Tenant - - tenantID := "e6db6ed6277c461a853458589063b295" - - err := tenants.Delete(identitYClient, tenantID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package tenants diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go deleted file mode 100644 index 60f58c8ce39..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go +++ /dev/null @@ -1,116 +0,0 @@ -package tenants - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts filters the Tenants that are returned by the List call. -type ListOpts struct { - // Marker is the ID of the last Tenant on the previous page. - Marker string `q:"marker"` - - // Limit specifies the page size. - Limit int `q:"limit"` -} - -// List enumerates the Tenants to which the current token has access. -func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { - url := listURL(client) - if opts != nil { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return pagination.Pager{Err: err} - } - url += q.String() - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return TenantPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOpts represents the options needed when creating new tenant. -type CreateOpts struct { - // Name is the name of the tenant. - Name string `json:"name" required:"true"` - - // Description is the description of the tenant. - Description string `json:"description,omitempty"` - - // Enabled sets the tenant status to enabled or disabled. - Enabled *bool `json:"enabled,omitempty"` -} - -// CreateOptsBuilder enables extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToTenantCreateMap() (map[string]interface{}, error) -} - -// ToTenantCreateMap assembles a request body based on the contents of -// a CreateOpts. -func (opts CreateOpts) ToTenantCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "tenant") -} - -// Create is the operation responsible for creating new tenant. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToTenantCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201}, - }) - return -} - -// Get requests details on a single tenant by ID. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToTenantUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts specifies the base attributes that may be updated on an existing -// tenant. -type UpdateOpts struct { - // Name is the name of the tenant. - Name string `json:"name,omitempty"` - - // Description is the description of the tenant. - Description string `json:"description,omitempty"` - - // Enabled sets the tenant status to enabled or disabled. - Enabled *bool `json:"enabled,omitempty"` -} - -// ToTenantUpdateMap formats an UpdateOpts structure into a request body. -func (opts UpdateOpts) ToTenantUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "tenant") -} - -// Update is the operation responsible for updating exist tenants by their TenantID. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToTenantUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Put(updateURL(client, id), &b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Delete is the operation responsible for permanently deleting a tenant. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go deleted file mode 100644 index bb6c2c6b08a..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go +++ /dev/null @@ -1,91 +0,0 @@ -package tenants - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Tenant is a grouping of users in the identity service. -type Tenant struct { - // ID is a unique identifier for this tenant. - ID string `json:"id"` - - // Name is a friendlier user-facing name for this tenant. - Name string `json:"name"` - - // Description is a human-readable explanation of this Tenant's purpose. - Description string `json:"description"` - - // Enabled indicates whether or not a tenant is active. - Enabled bool `json:"enabled"` -} - -// TenantPage is a single page of Tenant results. -type TenantPage struct { - pagination.LinkedPageBase -} - -// IsEmpty determines whether or not a page of Tenants contains any results. -func (r TenantPage) IsEmpty() (bool, error) { - tenants, err := ExtractTenants(r) - return len(tenants) == 0, err -} - -// NextPageURL extracts the "next" link from the tenants_links section of the result. -func (r TenantPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"tenants_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// ExtractTenants returns a slice of Tenants contained in a single page of -// results. -func ExtractTenants(r pagination.Page) ([]Tenant, error) { - var s struct { - Tenants []Tenant `json:"tenants"` - } - err := (r.(TenantPage)).ExtractInto(&s) - return s.Tenants, err -} - -type tenantResult struct { - gophercloud.Result -} - -// Extract interprets any tenantResults as a Tenant. -func (r tenantResult) Extract() (*Tenant, error) { - var s struct { - Tenant *Tenant `json:"tenant"` - } - err := r.ExtractInto(&s) - return s.Tenant, err -} - -// GetResult is the response from a Get request. Call its Extract method to -// interpret it as a Tenant. -type GetResult struct { - tenantResult -} - -// CreateResult is the response from a Create request. Call its Extract method -// to interpret it as a Tenant. -type CreateResult struct { - tenantResult -} - -// DeleteResult is the response from a Get request. Call its ExtractErr method -// to determine if the call succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// UpdateResult is the response from a Update request. Call its Extract method -// to interpret it as a Tenant. -type UpdateResult struct { - tenantResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go deleted file mode 100644 index 0f026690790..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go +++ /dev/null @@ -1,23 +0,0 @@ -package tenants - -import "github.com/gophercloud/gophercloud" - -func listURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("tenants") -} - -func getURL(client *gophercloud.ServiceClient, tenantID string) string { - return client.ServiceURL("tenants", tenantID) -} - -func createURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("tenants") -} - -func deleteURL(client *gophercloud.ServiceClient, tenantID string) string { - return client.ServiceURL("tenants", tenantID) -} - -func updateURL(client *gophercloud.ServiceClient, tenantID string) string { - return client.ServiceURL("tenants", tenantID) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go deleted file mode 100644 index 5375eea8726..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Package tokens provides information and interaction with the token API -resource for the OpenStack Identity service. - -For more information, see: -http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 - -Example to Create an Unscoped Token from a Password - - authOpts := gophercloud.AuthOptions{ - Username: "user", - Password: "pass" - } - - token, err := tokens.Create(identityClient, authOpts).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Tenant ID and Password - - authOpts := gophercloud.AuthOptions{ - Username: "user", - Password: "password", - TenantID: "fc394f2ab2df4114bde39905f800dc57" - } - - token, err := tokens.Create(identityClient, authOpts).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Tenant Name and Password - - authOpts := gophercloud.AuthOptions{ - Username: "user", - Password: "password", - TenantName: "tenantname" - } - - token, err := tokens.Create(identityClient, authOpts).ExtractToken() - if err != nil { - panic(err) - } -*/ -package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go deleted file mode 100644 index ab32368cc6e..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go +++ /dev/null @@ -1,103 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -// PasswordCredentialsV2 represents the required options to authenticate -// with a username and password. -type PasswordCredentialsV2 struct { - Username string `json:"username" required:"true"` - Password string `json:"password" required:"true"` -} - -// TokenCredentialsV2 represents the required options to authenticate -// with a token. -type TokenCredentialsV2 struct { - ID string `json:"id,omitempty" required:"true"` -} - -// AuthOptionsV2 wraps a gophercloud AuthOptions in order to adhere to the -// AuthOptionsBuilder interface. -type AuthOptionsV2 struct { - PasswordCredentials *PasswordCredentialsV2 `json:"passwordCredentials,omitempty" xor:"TokenCredentials"` - - // The TenantID and TenantName fields are optional for the Identity V2 API. - // Some providers allow you to specify a TenantName instead of the TenantId. - // Some require both. Your provider's authentication policies will determine - // how these fields influence authentication. - TenantID string `json:"tenantId,omitempty"` - TenantName string `json:"tenantName,omitempty"` - - // TokenCredentials allows users to authenticate (possibly as another user) - // with an authentication token ID. - TokenCredentials *TokenCredentialsV2 `json:"token,omitempty" xor:"PasswordCredentials"` -} - -// AuthOptionsBuilder allows extensions to add additional parameters to the -// token create request. -type AuthOptionsBuilder interface { - // ToTokenCreateMap assembles the Create request body, returning an error - // if parameters are missing or inconsistent. - ToTokenV2CreateMap() (map[string]interface{}, error) -} - -// AuthOptions are the valid options for Openstack Identity v2 authentication. -// For field descriptions, see gophercloud.AuthOptions. -type AuthOptions struct { - IdentityEndpoint string `json:"-"` - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - TenantID string `json:"tenantId,omitempty"` - TenantName string `json:"tenantName,omitempty"` - AllowReauth bool `json:"-"` - TokenID string -} - -// ToTokenV2CreateMap builds a token request body from the given AuthOptions. -func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { - v2Opts := AuthOptionsV2{ - TenantID: opts.TenantID, - TenantName: opts.TenantName, - } - - if opts.Password != "" { - v2Opts.PasswordCredentials = &PasswordCredentialsV2{ - Username: opts.Username, - Password: opts.Password, - } - } else { - v2Opts.TokenCredentials = &TokenCredentialsV2{ - ID: opts.TokenID, - } - } - - b, err := gophercloud.BuildRequestBody(v2Opts, "auth") - if err != nil { - return nil, err - } - return b, nil -} - -// Create authenticates to the identity service and attempts to acquire a Token. -// Generally, rather than interact with this call directly, end users should -// call openstack.AuthenticatedClient(), which abstracts all of the gory details -// about navigating service catalogs and such. -func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) (r CreateResult) { - b, err := auth.ToTokenV2CreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(CreateURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 203}, - MoreHeaders: map[string]string{"X-Auth-Token": ""}, - }) - return -} - -// Get validates and retrieves information for user's token. -func Get(client *gophercloud.ServiceClient, token string) (r GetResult) { - _, r.Err = client.Get(GetURL(client, token), &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 203}, - }) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go deleted file mode 100644 index b11326772b1..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go +++ /dev/null @@ -1,159 +0,0 @@ -package tokens - -import ( - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" -) - -// Token provides only the most basic information related to an authentication -// token. -type Token struct { - // ID provides the primary means of identifying a user to the OpenStack API. - // OpenStack defines this field as an opaque value, so do not depend on its - // content. It is safe, however, to compare for equality. - ID string - - // ExpiresAt provides a timestamp in ISO 8601 format, indicating when the - // authentication token becomes invalid. After this point in time, future - // API requests made using this authentication token will respond with - // errors. Either the caller will need to reauthenticate manually, or more - // preferably, the caller should exploit automatic re-authentication. - // See the AuthOptions structure for more details. - ExpiresAt time.Time - - // Tenant provides information about the tenant to which this token grants - // access. - Tenant tenants.Tenant -} - -// Role is a role for a user. -type Role struct { - Name string `json:"name"` -} - -// User is an OpenStack user. -type User struct { - ID string `json:"id"` - Name string `json:"name"` - UserName string `json:"username"` - Roles []Role `json:"roles"` -} - -// Endpoint represents a single API endpoint offered by a service. -// It provides the public and internal URLs, if supported, along with a region -// specifier, again if provided. -// -// The significance of the Region field will depend upon your provider. -// -// In addition, the interface offered by the service will have version -// information associated with it through the VersionId, VersionInfo, and -// VersionList fields, if provided or supported. -// -// In all cases, fields which aren't supported by the provider and service -// combined will assume a zero-value (""). -type Endpoint struct { - TenantID string `json:"tenantId"` - PublicURL string `json:"publicURL"` - InternalURL string `json:"internalURL"` - AdminURL string `json:"adminURL"` - Region string `json:"region"` - VersionID string `json:"versionId"` - VersionInfo string `json:"versionInfo"` - VersionList string `json:"versionList"` -} - -// CatalogEntry provides a type-safe interface to an Identity API V2 service -// catalog listing. -// -// Each class of service, such as cloud DNS or block storage services, will have -// a single CatalogEntry representing it. -// -// Note: when looking for the desired service, try, whenever possible, to key -// off the type field. Otherwise, you'll tie the representation of the service -// to a specific provider. -type CatalogEntry struct { - // Name will contain the provider-specified name for the service. - Name string `json:"name"` - - // Type will contain a type string if OpenStack defines a type for the - // service. Otherwise, for provider-specific services, the provider may assign - // their own type strings. - Type string `json:"type"` - - // Endpoints will let the caller iterate over all the different endpoints that - // may exist for the service. - Endpoints []Endpoint `json:"endpoints"` -} - -// ServiceCatalog provides a view into the service catalog from a previous, -// successful authentication. -type ServiceCatalog struct { - Entries []CatalogEntry -} - -// CreateResult is the response from a Create request. Use ExtractToken() to -// interpret it as a Token, or ExtractServiceCatalog() to interpret it as a -// service catalog. -type CreateResult struct { - gophercloud.Result -} - -// GetResult is the deferred response from a Get call, which is the same with a -// Created token. Use ExtractUser() to interpret it as a User. -type GetResult struct { - CreateResult -} - -// ExtractToken returns the just-created Token from a CreateResult. -func (r CreateResult) ExtractToken() (*Token, error) { - var s struct { - Access struct { - Token struct { - Expires string `json:"expires"` - ID string `json:"id"` - Tenant tenants.Tenant `json:"tenant"` - } `json:"token"` - } `json:"access"` - } - - err := r.ExtractInto(&s) - if err != nil { - return nil, err - } - - expiresTs, err := time.Parse(gophercloud.RFC3339Milli, s.Access.Token.Expires) - if err != nil { - return nil, err - } - - return &Token{ - ID: s.Access.Token.ID, - ExpiresAt: expiresTs, - Tenant: s.Access.Token.Tenant, - }, nil -} - -// ExtractServiceCatalog returns the ServiceCatalog that was generated along -// with the user's Token. -func (r CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { - var s struct { - Access struct { - Entries []CatalogEntry `json:"serviceCatalog"` - } `json:"access"` - } - err := r.ExtractInto(&s) - return &ServiceCatalog{Entries: s.Access.Entries}, err -} - -// ExtractUser returns the User from a GetResult. -func (r GetResult) ExtractUser() (*User, error) { - var s struct { - Access struct { - User User `json:"user"` - } `json:"access"` - } - err := r.ExtractInto(&s) - return &s.Access.User, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go deleted file mode 100644 index ee0a28f2004..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go +++ /dev/null @@ -1,13 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -// CreateURL generates the URL used to create new Tokens. -func CreateURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("tokens") -} - -// GetURL generates the URL used to Validate Tokens. -func GetURL(client *gophercloud.ServiceClient, token string) string { - return client.ServiceURL("tokens", token) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/doc.go deleted file mode 100644 index 696e2a5d8e5..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/doc.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Package groups manages and retrieves Groups in the OpenStack Identity Service. - -Example to List Groups - - listOpts := groups.ListOpts{ - DomainID: "default", - } - - allPages, err := groups.List(identityClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allGroups, err := groups.ExtractGroups(allPages) - if err != nil { - panic(err) - } - - for _, group := range allGroups { - fmt.Printf("%+v\n", group) - } - -Example to Create a Group - - createOpts := groups.CreateOpts{ - Name: "groupname", - DomainID: "default", - Extra: map[string]interface{}{ - "email": "groupname@example.com", - } - } - - group, err := groups.Create(identityClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Group - - groupID := "0fe36e73809d46aeae6705c39077b1b3" - - updateOpts := groups.UpdateOpts{ - Description: "Updated Description for group", - } - - group, err := groups.Update(identityClient, groupID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Group - - groupID := "0fe36e73809d46aeae6705c39077b1b3" - err := groups.Delete(identityClient, groupID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package groups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/requests.go deleted file mode 100644 index b6e74dcf976..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/requests.go +++ /dev/null @@ -1,158 +0,0 @@ -package groups - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to -// the List request -type ListOptsBuilder interface { - ToGroupListQuery() (string, error) -} - -// ListOpts provides options to filter the List results. -type ListOpts struct { - // DomainID filters the response by a domain ID. - DomainID string `q:"domain_id"` - - // Name filters the response by group name. - Name string `q:"name"` -} - -// ToGroupListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToGroupListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List enumerates the Groups to which the current token has access. -func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listURL(client) - if opts != nil { - query, err := opts.ToGroupListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return GroupPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// Get retrieves details on a single group, by ID. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// CreateOptsBuilder allows extensions to add additional parameters to -// the Create request. -type CreateOptsBuilder interface { - ToGroupCreateMap() (map[string]interface{}, error) -} - -// CreateOpts provides options used to create a group. -type CreateOpts struct { - // Name is the name of the new group. - Name string `json:"name" required:"true"` - - // Description is a description of the group. - Description string `json:"description,omitempty"` - - // DomainID is the ID of the domain the group belongs to. - DomainID string `json:"domain_id,omitempty"` - - // Extra is free-form extra key/value pairs to describe the group. - Extra map[string]interface{} `json:"-"` -} - -// ToGroupCreateMap formats a CreateOpts into a create request. -func (opts CreateOpts) ToGroupCreateMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "group") - if err != nil { - return nil, err - } - - if opts.Extra != nil { - if v, ok := b["group"].(map[string]interface{}); ok { - for key, value := range opts.Extra { - v[key] = value - } - } - } - - return b, nil -} - -// Create creates a new Group. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToGroupCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{201}, - }) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to -// the Update request. -type UpdateOptsBuilder interface { - ToGroupUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts provides options for updating a group. -type UpdateOpts struct { - // Name is the name of the new group. - Name string `json:"name,omitempty"` - - // Description is a description of the group. - Description string `json:"description,omitempty"` - - // DomainID is the ID of the domain the group belongs to. - DomainID string `json:"domain_id,omitempty"` - - // Extra is free-form extra key/value pairs to describe the group. - Extra map[string]interface{} `json:"-"` -} - -// ToGroupUpdateMap formats a UpdateOpts into an update request. -func (opts UpdateOpts) ToGroupUpdateMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "group") - if err != nil { - return nil, err - } - - if opts.Extra != nil { - if v, ok := b["group"].(map[string]interface{}); ok { - for key, value := range opts.Extra { - v[key] = value - } - } - } - - return b, nil -} - -// Update updates an existing Group. -func Update(client *gophercloud.ServiceClient, groupID string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToGroupUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Patch(updateURL(client, groupID), &b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Delete deletes a group. -func Delete(client *gophercloud.ServiceClient, groupID string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, groupID), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/results.go deleted file mode 100644 index ba7d018d173..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/results.go +++ /dev/null @@ -1,132 +0,0 @@ -package groups - -import ( - "encoding/json" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/internal" - "github.com/gophercloud/gophercloud/pagination" -) - -// Group helps manage related users. -type Group struct { - // Description describes the group purpose. - Description string `json:"description"` - - // DomainID is the domain ID the group belongs to. - DomainID string `json:"domain_id"` - - // ID is the unique ID of the group. - ID string `json:"id"` - - // Extra is a collection of miscellaneous key/values. - Extra map[string]interface{} `json:"-"` - - // Links contains referencing links to the group. - Links map[string]interface{} `json:"links"` - - // Name is the name of the group. - Name string `json:"name"` -} - -func (r *Group) UnmarshalJSON(b []byte) error { - type tmp Group - var s struct { - tmp - Extra map[string]interface{} `json:"extra"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Group(s.tmp) - - // Collect other fields and bundle them into Extra - // but only if a field titled "extra" wasn't sent. - if s.Extra != nil { - r.Extra = s.Extra - } else { - var result interface{} - err := json.Unmarshal(b, &result) - if err != nil { - return err - } - if resultMap, ok := result.(map[string]interface{}); ok { - r.Extra = internal.RemainingKeys(Group{}, resultMap) - } - } - - return err -} - -type groupResult struct { - gophercloud.Result -} - -// GetResult is the response from a Get operation. Call its Extract method -// to interpret it as a Group. -type GetResult struct { - groupResult -} - -// CreateResult is the response from a Create operation. Call its Extract method -// to interpret it as a Group. -type CreateResult struct { - groupResult -} - -// UpdateResult is the response from an Update operation. Call its Extract -// method to interpret it as a Group. -type UpdateResult struct { - groupResult -} - -// DeleteResult is the response from a Delete operation. Call its ExtractErr to -// determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// GroupPage is a single page of Group results. -type GroupPage struct { - pagination.LinkedPageBase -} - -// IsEmpty determines whether or not a page of Groups contains any results. -func (r GroupPage) IsEmpty() (bool, error) { - groups, err := ExtractGroups(r) - return len(groups) == 0, err -} - -// NextPageURL extracts the "next" link from the links section of the result. -func (r GroupPage) NextPageURL() (string, error) { - var s struct { - Links struct { - Next string `json:"next"` - Previous string `json:"previous"` - } `json:"links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return s.Links.Next, err -} - -// ExtractGroups returns a slice of Groups contained in a single page of results. -func ExtractGroups(r pagination.Page) ([]Group, error) { - var s struct { - Groups []Group `json:"groups"` - } - err := (r.(GroupPage)).ExtractInto(&s) - return s.Groups, err -} - -// Extract interprets any group results as a Group. -func (r groupResult) Extract() (*Group, error) { - var s struct { - Group *Group `json:"group"` - } - err := r.ExtractInto(&s) - return s.Group, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/urls.go deleted file mode 100644 index e7d1e53b27f..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/urls.go +++ /dev/null @@ -1,23 +0,0 @@ -package groups - -import "github.com/gophercloud/gophercloud" - -func listURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("groups") -} - -func getURL(client *gophercloud.ServiceClient, groupID string) string { - return client.ServiceURL("groups", groupID) -} - -func createURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("groups") -} - -func updateURL(client *gophercloud.ServiceClient, groupID string) string { - return client.ServiceURL("groups", groupID) -} - -func deleteURL(client *gophercloud.ServiceClient, groupID string) string { - return client.ServiceURL("groups", groupID) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/doc.go deleted file mode 100644 index 4f5b45ab655..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/doc.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Package projects manages and retrieves Projects in the OpenStack Identity -Service. - -Example to List Projects - - listOpts := projects.ListOpts{ - Enabled: gophercloud.Enabled, - } - - allPages, err := projects.List(identityClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allProjects, err := projects.ExtractProjects(allPages) - if err != nil { - panic(err) - } - - for _, project := range allProjects { - fmt.Printf("%+v\n", project) - } - -Example to Create a Project - - createOpts := projects.CreateOpts{ - Name: "project_name", - Description: "Project Description" - } - - project, err := projects.Create(identityClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Project - - projectID := "966b3c7d36a24facaf20b7e458bf2192" - - updateOpts := projects.UpdateOpts{ - Enabled: gophercloud.Disabled, - } - - project, err := projects.Update(identityClient, projectID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Project - - projectID := "966b3c7d36a24facaf20b7e458bf2192" - err := projects.Delete(identityClient, projectID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package projects diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/requests.go deleted file mode 100644 index 368b7321ba7..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/requests.go +++ /dev/null @@ -1,152 +0,0 @@ -package projects - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to -// the List request -type ListOptsBuilder interface { - ToProjectListQuery() (string, error) -} - -// ListOpts enables filtering of a list request. -type ListOpts struct { - // DomainID filters the response by a domain ID. - DomainID string `q:"domain_id"` - - // Enabled filters the response by enabled projects. - Enabled *bool `q:"enabled"` - - // IsDomain filters the response by projects that are domains. - // Setting this to true is effectively listing domains. - IsDomain *bool `q:"is_domain"` - - // Name filters the response by project name. - Name string `q:"name"` - - // ParentID filters the response by projects of a given parent project. - ParentID string `q:"parent_id"` -} - -// ToProjectListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToProjectListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List enumerates the Projects to which the current token has access. -func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listURL(client) - if opts != nil { - query, err := opts.ToProjectListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return ProjectPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// Get retrieves details on a single project, by ID. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// CreateOptsBuilder allows extensions to add additional parameters to -// the Create request. -type CreateOptsBuilder interface { - ToProjectCreateMap() (map[string]interface{}, error) -} - -// CreateOpts represents parameters used to create a project. -type CreateOpts struct { - // DomainID is the ID this project will belong under. - DomainID string `json:"domain_id,omitempty"` - - // Enabled sets the project status to enabled or disabled. - Enabled *bool `json:"enabled,omitempty"` - - // IsDomain indicates if this project is a domain. - IsDomain *bool `json:"is_domain,omitempty"` - - // Name is the name of the project. - Name string `json:"name" required:"true"` - - // ParentID specifies the parent project of this new project. - ParentID string `json:"parent_id,omitempty"` - - // Description is the description of the project. - Description string `json:"description,omitempty"` -} - -// ToProjectCreateMap formats a CreateOpts into a create request. -func (opts CreateOpts) ToProjectCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "project") -} - -// Create creates a new Project. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToProjectCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), &b, &r.Body, nil) - return -} - -// Delete deletes a project. -func Delete(client *gophercloud.ServiceClient, projectID string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, projectID), nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to -// the Update request. -type UpdateOptsBuilder interface { - ToProjectUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts represents parameters to update a project. -type UpdateOpts struct { - // DomainID is the ID this project will belong under. - DomainID string `json:"domain_id,omitempty"` - - // Enabled sets the project status to enabled or disabled. - Enabled *bool `json:"enabled,omitempty"` - - // IsDomain indicates if this project is a domain. - IsDomain *bool `json:"is_domain,omitempty"` - - // Name is the name of the project. - Name string `json:"name,omitempty"` - - // ParentID specifies the parent project of this new project. - ParentID string `json:"parent_id,omitempty"` - - // Description is the description of the project. - Description string `json:"description,omitempty"` -} - -// ToUpdateCreateMap formats a UpdateOpts into an update request. -func (opts UpdateOpts) ToProjectUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "project") -} - -// Update modifies the attributes of a project. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToProjectUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/results.go deleted file mode 100644 index a13fa7f2ae1..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/results.go +++ /dev/null @@ -1,103 +0,0 @@ -package projects - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -type projectResult struct { - gophercloud.Result -} - -// GetResult is the result of a Get request. Call its Extract method to -// interpret it as a Project. -type GetResult struct { - projectResult -} - -// CreateResult is the result of a Create request. Call its Extract method to -// interpret it as a Project. -type CreateResult struct { - projectResult -} - -// DeleteResult is the result of a Delete request. Call its ExtractErr method to -// determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// UpdateResult is the result of an Update request. Call its Extract method to -// interpret it as a Project. -type UpdateResult struct { - projectResult -} - -// Project represents an OpenStack Identity Project. -type Project struct { - // IsDomain indicates whether the project is a domain. - IsDomain bool `json:"is_domain"` - - // Description is the description of the project. - Description string `json:"description"` - - // DomainID is the domain ID the project belongs to. - DomainID string `json:"domain_id"` - - // Enabled is whether or not the project is enabled. - Enabled bool `json:"enabled"` - - // ID is the unique ID of the project. - ID string `json:"id"` - - // Name is the name of the project. - Name string `json:"name"` - - // ParentID is the parent_id of the project. - ParentID string `json:"parent_id"` -} - -// ProjectPage is a single page of Project results. -type ProjectPage struct { - pagination.LinkedPageBase -} - -// IsEmpty determines whether or not a page of Projects contains any results. -func (r ProjectPage) IsEmpty() (bool, error) { - projects, err := ExtractProjects(r) - return len(projects) == 0, err -} - -// NextPageURL extracts the "next" link from the links section of the result. -func (r ProjectPage) NextPageURL() (string, error) { - var s struct { - Links struct { - Next string `json:"next"` - Previous string `json:"previous"` - } `json:"links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return s.Links.Next, err -} - -// ExtractProjects returns a slice of Projects contained in a single page of -// results. -func ExtractProjects(r pagination.Page) ([]Project, error) { - var s struct { - Projects []Project `json:"projects"` - } - err := (r.(ProjectPage)).ExtractInto(&s) - return s.Projects, err -} - -// Extract interprets any projectResults as a Project. -func (r projectResult) Extract() (*Project, error) { - var s struct { - Project *Project `json:"project"` - } - err := r.ExtractInto(&s) - return s.Project, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/urls.go deleted file mode 100644 index e26cf3684cc..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/urls.go +++ /dev/null @@ -1,23 +0,0 @@ -package projects - -import "github.com/gophercloud/gophercloud" - -func listURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("projects") -} - -func getURL(client *gophercloud.ServiceClient, projectID string) string { - return client.ServiceURL("projects", projectID) -} - -func createURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("projects") -} - -func deleteURL(client *gophercloud.ServiceClient, projectID string) string { - return client.ServiceURL("projects", projectID) -} - -func updateURL(client *gophercloud.ServiceClient, projectID string) string { - return client.ServiceURL("projects", projectID) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go deleted file mode 100644 index 966e128f128..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Package tokens provides information and interaction with the token API -resource for the OpenStack Identity service. - -For more information, see: -http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3 - -Example to Create a Token From a Username and Password - - authOptions := tokens.AuthOptions{ - UserID: "username", - Password: "password", - } - - token, err := tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token From a Username, Password, and Domain - - authOptions := tokens.AuthOptions{ - UserID: "username", - Password: "password", - DomainID: "default", - } - - token, err := tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - - authOptions = tokens.AuthOptions{ - UserID: "username", - Password: "password", - DomainName: "default", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token From a Token - - authOptions := tokens.AuthOptions{ - TokenID: "token_id", - } - - token, err := tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Username and Password with Project ID Scope - - scope := tokens.Scope{ - ProjectID: "0fe36e73809d46aeae6705c39077b1b3", - } - - authOptions := tokens.AuthOptions{ - Scope: &scope, - UserID: "username", - Password: "password", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Username and Password with Domain ID Scope - - scope := tokens.Scope{ - DomainID: "default", - } - - authOptions := tokens.AuthOptions{ - Scope: &scope, - UserID: "username", - Password: "password", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Username and Password with Project Name Scope - - scope := tokens.Scope{ - ProjectName: "project_name", - DomainID: "default", - } - - authOptions := tokens.AuthOptions{ - Scope: &scope, - UserID: "username", - Password: "password", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -*/ -package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go deleted file mode 100644 index ca35851e4a4..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go +++ /dev/null @@ -1,210 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -// Scope allows a created token to be limited to a specific domain or project. -type Scope struct { - ProjectID string - ProjectName string - DomainID string - DomainName string -} - -// AuthOptionsBuilder provides the ability for extensions to add additional -// parameters to AuthOptions. Extensions must satisfy all required methods. -type AuthOptionsBuilder interface { - // ToTokenV3CreateMap assembles the Create request body, returning an error - // if parameters are missing or inconsistent. - ToTokenV3CreateMap(map[string]interface{}) (map[string]interface{}, error) - ToTokenV3ScopeMap() (map[string]interface{}, error) - CanReauth() bool -} - -// AuthOptions represents options for authenticating a user. -type AuthOptions struct { - // IdentityEndpoint specifies the HTTP endpoint that is required to work with - // the Identity API of the appropriate version. While it's ultimately needed - // by all of the identity services, it will often be populated by a - // provider-level function. - IdentityEndpoint string `json:"-"` - - // Username is required if using Identity V2 API. Consult with your provider's - // control panel to discover your account's username. In Identity V3, either - // UserID or a combination of Username and DomainID or DomainName are needed. - Username string `json:"username,omitempty"` - UserID string `json:"id,omitempty"` - - Password string `json:"password,omitempty"` - - // At most one of DomainID and DomainName must be provided if using Username - // with Identity V3. Otherwise, either are optional. - DomainID string `json:"-"` - DomainName string `json:"name,omitempty"` - - // AllowReauth should be set to true if you grant permission for Gophercloud - // to cache your credentials in memory, and to allow Gophercloud to attempt - // to re-authenticate automatically if/when your token expires. If you set - // it to false, it will not cache these settings, but re-authentication will - // not be possible. This setting defaults to false. - AllowReauth bool `json:"-"` - - // TokenID allows users to authenticate (possibly as another user) with an - // authentication token ID. - TokenID string `json:"-"` - - Scope Scope `json:"-"` -} - -// ToTokenV3CreateMap builds a request body from AuthOptions. -func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { - gophercloudAuthOpts := gophercloud.AuthOptions{ - Username: opts.Username, - UserID: opts.UserID, - Password: opts.Password, - DomainID: opts.DomainID, - DomainName: opts.DomainName, - AllowReauth: opts.AllowReauth, - TokenID: opts.TokenID, - } - - return gophercloudAuthOpts.ToTokenV3CreateMap(scope) -} - -// ToTokenV3CreateMap builds a scope request body from AuthOptions. -func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - if opts.Scope.ProjectName != "" { - // ProjectName provided: either DomainID or DomainName must also be supplied. - // ProjectID may not be supplied. - if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { - return nil, gophercloud.ErrScopeDomainIDOrDomainName{} - } - if opts.Scope.ProjectID != "" { - return nil, gophercloud.ErrScopeProjectIDOrProjectName{} - } - - if opts.Scope.DomainID != "" { - // ProjectName + DomainID - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &opts.Scope.ProjectName, - "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, - }, - }, nil - } - - if opts.Scope.DomainName != "" { - // ProjectName + DomainName - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &opts.Scope.ProjectName, - "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, - }, - }, nil - } - } else if opts.Scope.ProjectID != "" { - // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. - if opts.Scope.DomainID != "" { - return nil, gophercloud.ErrScopeProjectIDAlone{} - } - if opts.Scope.DomainName != "" { - return nil, gophercloud.ErrScopeProjectIDAlone{} - } - - // ProjectID - return map[string]interface{}{ - "project": map[string]interface{}{ - "id": &opts.Scope.ProjectID, - }, - }, nil - } else if opts.Scope.DomainID != "" { - // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. - if opts.Scope.DomainName != "" { - return nil, gophercloud.ErrScopeDomainIDOrDomainName{} - } - - // DomainID - return map[string]interface{}{ - "domain": map[string]interface{}{ - "id": &opts.Scope.DomainID, - }, - }, nil - } else if opts.Scope.DomainName != "" { - // DomainName - return map[string]interface{}{ - "domain": map[string]interface{}{ - "name": &opts.Scope.DomainName, - }, - }, nil - } - - return nil, nil -} - -func (opts *AuthOptions) CanReauth() bool { - return opts.AllowReauth -} - -func subjectTokenHeaders(c *gophercloud.ServiceClient, subjectToken string) map[string]string { - return map[string]string{ - "X-Subject-Token": subjectToken, - } -} - -// Create authenticates and either generates a new token, or changes the Scope -// of an existing token. -func Create(c *gophercloud.ServiceClient, opts AuthOptionsBuilder) (r CreateResult) { - scope, err := opts.ToTokenV3ScopeMap() - if err != nil { - r.Err = err - return - } - - b, err := opts.ToTokenV3CreateMap(scope) - if err != nil { - r.Err = err - return - } - - resp, err := c.Post(tokenURL(c), b, &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: map[string]string{"X-Auth-Token": ""}, - }) - r.Err = err - if resp != nil { - r.Header = resp.Header - } - return -} - -// Get validates and retrieves information about another token. -func Get(c *gophercloud.ServiceClient, token string) (r GetResult) { - resp, err := c.Get(tokenURL(c), &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(c, token), - OkCodes: []int{200, 203}, - }) - if resp != nil { - r.Err = err - r.Header = resp.Header - } - return -} - -// Validate determines if a specified token is valid or not. -func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { - resp, err := c.Request("HEAD", tokenURL(c), &gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(c, token), - OkCodes: []int{200, 204, 404}, - }) - if err != nil { - return false, err - } - - return resp.StatusCode == 200 || resp.StatusCode == 204, nil -} - -// Revoke immediately makes specified token invalid. -func Revoke(c *gophercloud.ServiceClient, token string) (r RevokeResult) { - _, r.Err = c.Delete(tokenURL(c), &gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(c, token), - }) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go deleted file mode 100644 index 6e78d1cbdbf..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go +++ /dev/null @@ -1,170 +0,0 @@ -package tokens - -import ( - "time" - - "github.com/gophercloud/gophercloud" -) - -// Endpoint represents a single API endpoint offered by a service. -// It matches either a public, internal or admin URL. -// If supported, it contains a region specifier, again if provided. -// The significance of the Region field will depend upon your provider. -type Endpoint struct { - ID string `json:"id"` - Region string `json:"region"` - Interface string `json:"interface"` - URL string `json:"url"` -} - -// CatalogEntry provides a type-safe interface to an Identity API V3 service -// catalog listing. Each class of service, such as cloud DNS or block storage -// services, could have multiple CatalogEntry representing it (one by interface -// type, e.g public, admin or internal). -// -// Note: when looking for the desired service, try, whenever possible, to key -// off the type field. Otherwise, you'll tie the representation of the service -// to a specific provider. -type CatalogEntry struct { - // Service ID - ID string `json:"id"` - - // Name will contain the provider-specified name for the service. - Name string `json:"name"` - - // Type will contain a type string if OpenStack defines a type for the - // service. Otherwise, for provider-specific services, the provider may - // assign their own type strings. - Type string `json:"type"` - - // Endpoints will let the caller iterate over all the different endpoints that - // may exist for the service. - Endpoints []Endpoint `json:"endpoints"` -} - -// ServiceCatalog provides a view into the service catalog from a previous, -// successful authentication. -type ServiceCatalog struct { - Entries []CatalogEntry `json:"catalog"` -} - -// Domain provides information about the domain to which this token grants -// access. -type Domain struct { - ID string `json:"id"` - Name string `json:"name"` -} - -// User represents a user resource that exists in the Identity Service. -type User struct { - Domain Domain `json:"domain"` - ID string `json:"id"` - Name string `json:"name"` -} - -// Role provides information about roles to which User is authorized. -type Role struct { - ID string `json:"id"` - Name string `json:"name"` -} - -// Project provides information about project to which User is authorized. -type Project struct { - Domain Domain `json:"domain"` - ID string `json:"id"` - Name string `json:"name"` -} - -// commonResult is the response from a request. A commonResult has various -// methods which can be used to extract different details about the result. -type commonResult struct { - gophercloud.Result -} - -// Extract is a shortcut for ExtractToken. -// This function is deprecated and still present for backward compatibility. -func (r commonResult) Extract() (*Token, error) { - return r.ExtractToken() -} - -// ExtractToken interprets a commonResult as a Token. -func (r commonResult) ExtractToken() (*Token, error) { - var s Token - err := r.ExtractInto(&s) - if err != nil { - return nil, err - } - - // Parse the token itself from the stored headers. - s.ID = r.Header.Get("X-Subject-Token") - - return &s, err -} - -// ExtractServiceCatalog returns the ServiceCatalog that was generated along -// with the user's Token. -func (r commonResult) ExtractServiceCatalog() (*ServiceCatalog, error) { - var s ServiceCatalog - err := r.ExtractInto(&s) - return &s, err -} - -// ExtractUser returns the User that is the owner of the Token. -func (r commonResult) ExtractUser() (*User, error) { - var s struct { - User *User `json:"user"` - } - err := r.ExtractInto(&s) - return s.User, err -} - -// ExtractRoles returns Roles to which User is authorized. -func (r commonResult) ExtractRoles() ([]Role, error) { - var s struct { - Roles []Role `json:"roles"` - } - err := r.ExtractInto(&s) - return s.Roles, err -} - -// ExtractProject returns Project to which User is authorized. -func (r commonResult) ExtractProject() (*Project, error) { - var s struct { - Project *Project `json:"project"` - } - err := r.ExtractInto(&s) - return s.Project, err -} - -// CreateResult is the response from a Create request. Use ExtractToken() -// to interpret it as a Token, or ExtractServiceCatalog() to interpret it -// as a service catalog. -type CreateResult struct { - commonResult -} - -// GetResult is the response from a Get request. Use ExtractToken() -// to interpret it as a Token, or ExtractServiceCatalog() to interpret it -// as a service catalog. -type GetResult struct { - commonResult -} - -// RevokeResult is response from a Revoke request. -type RevokeResult struct { - commonResult -} - -// Token is a string that grants a user access to a controlled set of services -// in an OpenStack provider. Each Token is valid for a set length of time. -type Token struct { - // ID is the issued token. - ID string `json:"id"` - - // ExpiresAt is the timestamp at which this token will no longer be accepted. - ExpiresAt time.Time `json:"expires_at"` -} - -func (r commonResult) ExtractInto(v interface{}) error { - return r.ExtractIntoStructPtr(v, "token") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go deleted file mode 100644 index 2f864a31c8b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go +++ /dev/null @@ -1,7 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -func tokenURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("auth", "tokens") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/doc.go deleted file mode 100644 index aa7ec196f5a..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/doc.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Package users manages and retrieves Users in the OpenStack Identity Service. - -Example to List Users - - listOpts := users.ListOpts{ - DomainID: "default", - } - - allPages, err := users.List(identityClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allUsers, err := users.ExtractUsers(allPages) - if err != nil { - panic(err) - } - - for _, user := range allUsers { - fmt.Printf("%+v\n", user) - } - -Example to Create a User - - projectID := "a99e9b4e620e4db09a2dfb6e42a01e66" - - createOpts := users.CreateOpts{ - Name: "username", - DomainID: "default", - DefaultProjectID: projectID, - Enabled: gophercloud.Enabled, - Password: "supersecret", - Extra: map[string]interface{}{ - "email": "username@example.com", - } - } - - user, err := users.Create(identityClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a User - - userID := "0fe36e73809d46aeae6705c39077b1b3" - - updateOpts := users.UpdateOpts{ - Enabled: gophercloud.Disabled, - } - - user, err := users.Update(identityClient, userID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a User - - userID := "0fe36e73809d46aeae6705c39077b1b3" - err := users.Delete(identityClient, userID).ExtractErr() - if err != nil { - panic(err) - } - -Example to List Groups a User Belongs To - - userID := "0fe36e73809d46aeae6705c39077b1b3" - - allPages, err := users.ListGroups(identityClient, userID).AllPages() - if err != nil { - panic(err) - } - - allGroups, err := groups.ExtractGroups(allPages) - if err != nil { - panic(err) - } - - for _, group := range allGroups { - fmt.Printf("%+v\n", group) - } - -Example to List Projects a User Belongs To - - userID := "0fe36e73809d46aeae6705c39077b1b3" - - allPages, err := users.ListProjects(identityClient, userID).AllPages() - if err != nil { - panic(err) - } - - allProjects, err := projects.ExtractProjects(allPages) - if err != nil { - panic(err) - } - - for _, project := range allProjects { - fmt.Printf("%+v\n", project) - } - -Example to List Users in a Group - - groupID := "bede500ee1124ae9b0006ff859758b3a" - listOpts := users.ListOpts{ - DomainID: "default", - } - - allPages, err := users.ListInGroup(identityClient, groupID, listOpts).AllPages() - if err != nil { - panic(err) - } - - allUsers, err := users.ExtractUsers(allPages) - if err != nil { - panic(err) - } - - for _, user := range allUsers { - fmt.Printf("%+v\n", user) - } - -*/ -package users diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/requests.go deleted file mode 100644 index 779d116fccd..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/requests.go +++ /dev/null @@ -1,242 +0,0 @@ -package users - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/identity/v3/groups" - "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" - "github.com/gophercloud/gophercloud/pagination" -) - -// Option is a specific option defined at the API to enable features -// on a user account. -type Option string - -const ( - IgnoreChangePasswordUponFirstUse Option = "ignore_change_password_upon_first_use" - IgnorePasswordExpiry Option = "ignore_password_expiry" - IgnoreLockoutFailureAttempts Option = "ignore_lockout_failure_attempts" - MultiFactorAuthRules Option = "multi_factor_auth_rules" - MultiFactorAuthEnabled Option = "multi_factor_auth_enabled" -) - -// ListOptsBuilder allows extensions to add additional parameters to -// the List request -type ListOptsBuilder interface { - ToUserListQuery() (string, error) -} - -// ListOpts provides options to filter the List results. -type ListOpts struct { - // DomainID filters the response by a domain ID. - DomainID string `q:"domain_id"` - - // Enabled filters the response by enabled users. - Enabled *bool `q:"enabled"` - - // IdpID filters the response by an Identity Provider ID. - IdPID string `q:"idp_id"` - - // Name filters the response by username. - Name string `q:"name"` - - // PasswordExpiresAt filters the response based on expiring passwords. - PasswordExpiresAt string `q:"password_expires_at"` - - // ProtocolID filters the response by protocol ID. - ProtocolID string `q:"protocol_id"` - - // UniqueID filters the response by unique ID. - UniqueID string `q:"unique_id"` -} - -// ToUserListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToUserListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List enumerates the Users to which the current token has access. -func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listURL(client) - if opts != nil { - query, err := opts.ToUserListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return UserPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// Get retrieves details on a single user, by ID. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// CreateOptsBuilder allows extensions to add additional parameters to -// the Create request. -type CreateOptsBuilder interface { - ToUserCreateMap() (map[string]interface{}, error) -} - -// CreateOpts provides options used to create a user. -type CreateOpts struct { - // Name is the name of the new user. - Name string `json:"name" required:"true"` - - // DefaultProjectID is the ID of the default project of the user. - DefaultProjectID string `json:"default_project_id,omitempty"` - - // Description is a description of the user. - Description string `json:"description,omitempty"` - - // DomainID is the ID of the domain the user belongs to. - DomainID string `json:"domain_id,omitempty"` - - // Enabled sets the user status to enabled or disabled. - Enabled *bool `json:"enabled,omitempty"` - - // Extra is free-form extra key/value pairs to describe the user. - Extra map[string]interface{} `json:"-"` - - // Options are defined options in the API to enable certain features. - Options map[Option]interface{} `json:"options,omitempty"` - - // Password is the password of the new user. - Password string `json:"password,omitempty"` -} - -// ToUserCreateMap formats a CreateOpts into a create request. -func (opts CreateOpts) ToUserCreateMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "user") - if err != nil { - return nil, err - } - - if opts.Extra != nil { - if v, ok := b["user"].(map[string]interface{}); ok { - for key, value := range opts.Extra { - v[key] = value - } - } - } - - return b, nil -} - -// Create creates a new User. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToUserCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{201}, - }) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to -// the Update request. -type UpdateOptsBuilder interface { - ToUserUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts provides options for updating a user account. -type UpdateOpts struct { - // Name is the name of the new user. - Name string `json:"name,omitempty"` - - // DefaultProjectID is the ID of the default project of the user. - DefaultProjectID string `json:"default_project_id,omitempty"` - - // Description is a description of the user. - Description string `json:"description,omitempty"` - - // DomainID is the ID of the domain the user belongs to. - DomainID string `json:"domain_id,omitempty"` - - // Enabled sets the user status to enabled or disabled. - Enabled *bool `json:"enabled,omitempty"` - - // Extra is free-form extra key/value pairs to describe the user. - Extra map[string]interface{} `json:"-"` - - // Options are defined options in the API to enable certain features. - Options map[Option]interface{} `json:"options,omitempty"` - - // Password is the password of the new user. - Password string `json:"password,omitempty"` -} - -// ToUserUpdateMap formats a UpdateOpts into an update request. -func (opts UpdateOpts) ToUserUpdateMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "user") - if err != nil { - return nil, err - } - - if opts.Extra != nil { - if v, ok := b["user"].(map[string]interface{}); ok { - for key, value := range opts.Extra { - v[key] = value - } - } - } - - return b, nil -} - -// Update updates an existing User. -func Update(client *gophercloud.ServiceClient, userID string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToUserUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Patch(updateURL(client, userID), &b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Delete deletes a user. -func Delete(client *gophercloud.ServiceClient, userID string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, userID), nil) - return -} - -// ListGroups enumerates groups user belongs to. -func ListGroups(client *gophercloud.ServiceClient, userID string) pagination.Pager { - url := listGroupsURL(client, userID) - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return groups.GroupPage{LinkedPageBase: pagination.LinkedPageBase{PageResult: r}} - }) -} - -// ListProjects enumerates groups user belongs to. -func ListProjects(client *gophercloud.ServiceClient, userID string) pagination.Pager { - url := listProjectsURL(client, userID) - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return projects.ProjectPage{LinkedPageBase: pagination.LinkedPageBase{PageResult: r}} - }) -} - -// ListInGroup enumerates users that belong to a group. -func ListInGroup(client *gophercloud.ServiceClient, groupID string, opts ListOptsBuilder) pagination.Pager { - url := listInGroupURL(client, groupID) - if opts != nil { - query, err := opts.ToUserListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return UserPage{pagination.LinkedPageBase{PageResult: r}} - }) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/results.go deleted file mode 100644 index c474e882b90..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/results.go +++ /dev/null @@ -1,149 +0,0 @@ -package users - -import ( - "encoding/json" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/internal" - "github.com/gophercloud/gophercloud/pagination" -) - -// User represents a User in the OpenStack Identity Service. -type User struct { - // DefaultProjectID is the ID of the default project of the user. - DefaultProjectID string `json:"default_project_id"` - - // Description is the description of the user. - Description string `json:"description"` - - // DomainID is the domain ID the user belongs to. - DomainID string `json:"domain_id"` - - // Enabled is whether or not the user is enabled. - Enabled bool `json:"enabled"` - - // Extra is a collection of miscellaneous key/values. - Extra map[string]interface{} `json:"-"` - - // ID is the unique ID of the user. - ID string `json:"id"` - - // Links contains referencing links to the user. - Links map[string]interface{} `json:"links"` - - // Name is the name of the user. - Name string `json:"name"` - - // Options are a set of defined options of the user. - Options map[string]interface{} `json:"options"` - - // PasswordExpiresAt is the timestamp when the user's password expires. - PasswordExpiresAt time.Time `json:"-"` -} - -func (r *User) UnmarshalJSON(b []byte) error { - type tmp User - var s struct { - tmp - Extra map[string]interface{} `json:"extra"` - PasswordExpiresAt gophercloud.JSONRFC3339MilliNoZ `json:"password_expires_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = User(s.tmp) - - r.PasswordExpiresAt = time.Time(s.PasswordExpiresAt) - - // Collect other fields and bundle them into Extra - // but only if a field titled "extra" wasn't sent. - if s.Extra != nil { - r.Extra = s.Extra - } else { - var result interface{} - err := json.Unmarshal(b, &result) - if err != nil { - return err - } - if resultMap, ok := result.(map[string]interface{}); ok { - delete(resultMap, "password_expires_at") - r.Extra = internal.RemainingKeys(User{}, resultMap) - } - } - - return err -} - -type userResult struct { - gophercloud.Result -} - -// GetResult is the response from a Get operation. Call its Extract method -// to interpret it as a User. -type GetResult struct { - userResult -} - -// CreateResult is the response from a Create operation. Call its Extract method -// to interpret it as a User. -type CreateResult struct { - userResult -} - -// UpdateResult is the response from an Update operation. Call its Extract -// method to interpret it as a User. -type UpdateResult struct { - userResult -} - -// DeleteResult is the response from a Delete operation. Call its ExtractErr to -// determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// UserPage is a single page of User results. -type UserPage struct { - pagination.LinkedPageBase -} - -// IsEmpty determines whether or not a UserPage contains any results. -func (r UserPage) IsEmpty() (bool, error) { - users, err := ExtractUsers(r) - return len(users) == 0, err -} - -// NextPageURL extracts the "next" link from the links section of the result. -func (r UserPage) NextPageURL() (string, error) { - var s struct { - Links struct { - Next string `json:"next"` - Previous string `json:"previous"` - } `json:"links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return s.Links.Next, err -} - -// ExtractUsers returns a slice of Users contained in a single page of results. -func ExtractUsers(r pagination.Page) ([]User, error) { - var s struct { - Users []User `json:"users"` - } - err := (r.(UserPage)).ExtractInto(&s) - return s.Users, err -} - -// Extract interprets any user results as a User. -func (r userResult) Extract() (*User, error) { - var s struct { - User *User `json:"user"` - } - err := r.ExtractInto(&s) - return s.User, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/urls.go deleted file mode 100644 index 1db2831b5ed..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/urls.go +++ /dev/null @@ -1,35 +0,0 @@ -package users - -import "github.com/gophercloud/gophercloud" - -func listURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("users") -} - -func getURL(client *gophercloud.ServiceClient, userID string) string { - return client.ServiceURL("users", userID) -} - -func createURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("users") -} - -func updateURL(client *gophercloud.ServiceClient, userID string) string { - return client.ServiceURL("users", userID) -} - -func deleteURL(client *gophercloud.ServiceClient, userID string) string { - return client.ServiceURL("users", userID) -} - -func listGroupsURL(client *gophercloud.ServiceClient, userID string) string { - return client.ServiceURL("users", userID, "groups") -} - -func listProjectsURL(client *gophercloud.ServiceClient, userID string) string { - return client.ServiceURL("users", userID, "projects") -} - -func listInGroupURL(client *gophercloud.ServiceClient, groupID string) string { - return client.ServiceURL("groups", groupID, "users") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/doc.go deleted file mode 100644 index a2f5e58b89a..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/doc.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Package imagedata enables management of image data. - -Example to Upload Image Data - - imageID := "da3b75d9-3f4a-40e7-8a2c-bfab23927dea" - - imageData, err := os.Open("/path/to/image/file") - if err != nil { - panic(err) - } - defer imageData.Close() - - err = imagedata.Upload(imageClient, imageID, imageData).ExtractErr() - if err != nil { - panic(err) - } - -Example to Download Image Data - - imageID := "da3b75d9-3f4a-40e7-8a2c-bfab23927dea" - - image, err := imagedata.Download(imageClient, imageID).Extract() - if err != nil { - panic(err) - } - - imageData, err := ioutil.ReadAll(image) - if err != nil { - panic(err) - } -*/ -package imagedata diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/requests.go deleted file mode 100644 index 4761e488c6b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/requests.go +++ /dev/null @@ -1,28 +0,0 @@ -package imagedata - -import ( - "io" - "net/http" - - "github.com/gophercloud/gophercloud" -) - -// Upload uploads an image file. -func Upload(client *gophercloud.ServiceClient, id string, data io.Reader) (r UploadResult) { - _, r.Err = client.Put(uploadURL(client, id), data, nil, &gophercloud.RequestOpts{ - MoreHeaders: map[string]string{"Content-Type": "application/octet-stream"}, - OkCodes: []int{204}, - }) - return -} - -// Download retrieves an image. -func Download(client *gophercloud.ServiceClient, id string) (r DownloadResult) { - var resp *http.Response - resp, r.Err = client.Get(downloadURL(client, id), nil, nil) - if resp != nil { - r.Body = resp.Body - r.Header = resp.Header - } - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/results.go deleted file mode 100644 index 895d28ba8ae..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/results.go +++ /dev/null @@ -1,28 +0,0 @@ -package imagedata - -import ( - "fmt" - "io" - - "github.com/gophercloud/gophercloud" -) - -// UploadResult is the result of an upload image operation. Call its ExtractErr -// method to determine if the request succeeded or failed. -type UploadResult struct { - gophercloud.ErrResult -} - -// DownloadResult is the result of a download image operation. Call its Extract -// method to gain access to the image data. -type DownloadResult struct { - gophercloud.Result -} - -// Extract builds images model from io.Reader -func (r DownloadResult) Extract() (io.Reader, error) { - if r, ok := r.Body.(io.Reader); ok { - return r, nil - } - return nil, fmt.Errorf("Expected io.Reader but got: %T(%#v)", r.Body, r.Body) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/urls.go deleted file mode 100644 index ccd6416e53e..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/urls.go +++ /dev/null @@ -1,13 +0,0 @@ -package imagedata - -import "github.com/gophercloud/gophercloud" - -// `imageDataURL(c,i)` is the URL for the binary image data for the -// image identified by ID `i` in the service `c`. -func uploadURL(c *gophercloud.ServiceClient, imageID string) string { - return c.ServiceURL("images", imageID, "file") -} - -func downloadURL(c *gophercloud.ServiceClient, imageID string) string { - return uploadURL(c, imageID) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go deleted file mode 100644 index 14da9ac90da..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Package images enables management and retrieval of images from the OpenStack -Image Service. - -Example to List Images - - images.ListOpts{ - Owner: "a7509e1ae65945fda83f3e52c6296017", - } - - allPages, err := images.List(imagesClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allImages, err := images.ExtractImages(allPages) - if err != nil { - panic(err) - } - - for _, image := range allImages { - fmt.Printf("%+v\n", image) - } - -Example to Create an Image - - createOpts := images.CreateOpts{ - Name: "image_name", - Visibility: images.ImageVisibilityPrivate, - } - - image, err := images.Create(imageClient, createOpts) - if err != nil { - panic(err) - } - -Example to Update an Image - - imageID := "1bea47ed-f6a9-463b-b423-14b9cca9ad27" - - updateOpts := images.UpdateOpts{ - images.ReplaceImageName{ - NewName: "new_name", - }, - } - - image, err := images.Update(imageClient, imageID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete an Image - - imageID := "1bea47ed-f6a9-463b-b423-14b9cca9ad27" - err := images.Delete(imageClient, imageID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package images diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go deleted file mode 100644 index 387e791cebc..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go +++ /dev/null @@ -1,258 +0,0 @@ -package images - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToImageListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the server attributes you want to see returned. Marker and Limit are used -// for pagination. -// -// http://developer.openstack.org/api-ref-image-v2.html -type ListOpts struct { - // Integer value for the limit of values to return. - Limit int `q:"limit"` - - // UUID of the server at which you want to set a marker. - Marker string `q:"marker"` - - // Name filters on the name of the image. - Name string `q:"name"` - - // Visibility filters on the visibility of the image. - Visibility ImageVisibility `q:"visibility"` - - // MemberStatus filters on the member status of the image. - MemberStatus ImageMemberStatus `q:"member_status"` - - // Owner filters on the project ID of the image. - Owner string `q:"owner"` - - // Status filters on the status of the image. - Status ImageStatus `q:"status"` - - // SizeMin filters on the size_min image property. - SizeMin int64 `q:"size_min"` - - // SizeMax filters on the size_max image property. - SizeMax int64 `q:"size_max"` - - // SortKey will sort the results based on a specified image property. - SortKey string `q:"sort_key"` - - // SortDir will sort the list results either ascending or decending. - SortDir string `q:"sort_dir"` - Tag string `q:"tag"` -} - -// ToImageListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToImageListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List implements image list request. -func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listURL(c) - if opts != nil { - query, err := opts.ToImageListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - return ImagePage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOptsBuilder allows extensions to add parameters to the Create request. -type CreateOptsBuilder interface { - // Returns value that can be passed to json.Marshal - ToImageCreateMap() (map[string]interface{}, error) -} - -// CreateOpts represents options used to create an image. -type CreateOpts struct { - // Name is the name of the new image. - Name string `json:"name" required:"true"` - - // Id is the the image ID. - ID string `json:"id,omitempty"` - - // Visibility defines who can see/use the image. - Visibility *ImageVisibility `json:"visibility,omitempty"` - - // Tags is a set of image tags. - Tags []string `json:"tags,omitempty"` - - // ContainerFormat is the format of the - // container. Valid values are ami, ari, aki, bare, and ovf. - ContainerFormat string `json:"container_format,omitempty"` - - // DiskFormat is the format of the disk. If set, - // valid values are ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, - // and iso. - DiskFormat string `json:"disk_format,omitempty"` - - // MinDisk is the amount of disk space in - // GB that is required to boot the image. - MinDisk int `json:"min_disk,omitempty"` - - // MinRAM is the amount of RAM in MB that - // is required to boot the image. - MinRAM int `json:"min_ram,omitempty"` - - // protected is whether the image is not deletable. - Protected *bool `json:"protected,omitempty"` - - // properties is a set of properties, if any, that - // are associated with the image. - Properties map[string]string `json:"-"` -} - -// ToImageCreateMap assembles a request body based on the contents of -// a CreateOpts. -func (opts CreateOpts) ToImageCreateMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "") - if err != nil { - return nil, err - } - - if opts.Properties != nil { - for k, v := range opts.Properties { - b[k] = v - } - } - return b, nil -} - -// Create implements create image request. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToImageCreateMap() - if err != nil { - r.Err = err - return r - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{OkCodes: []int{201}}) - return -} - -// Delete implements image delete request. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, id), nil) - return -} - -// Get implements image get request. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// Update implements image updated request. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToImageUpdateMap() - if err != nil { - r.Err = err - return r - } - _, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - MoreHeaders: map[string]string{"Content-Type": "application/openstack-images-v2.1-json-patch"}, - }) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - // returns value implementing json.Marshaler which when marshaled matches - // the patch schema: - // http://specs.openstack.org/openstack/glance-specs/specs/api/v2/http-patch-image-api-v2.html - ToImageUpdateMap() ([]interface{}, error) -} - -// UpdateOpts implements UpdateOpts -type UpdateOpts []Patch - -// ToImageUpdateMap assembles a request body based on the contents of -// UpdateOpts. -func (opts UpdateOpts) ToImageUpdateMap() ([]interface{}, error) { - m := make([]interface{}, len(opts)) - for i, patch := range opts { - patchJSON := patch.ToImagePatchMap() - m[i] = patchJSON - } - return m, nil -} - -// Patch represents a single update to an existing image. Multiple updates -// to an image can be submitted at the same time. -type Patch interface { - ToImagePatchMap() map[string]interface{} -} - -// UpdateVisibility represents an updated visibility property request. -type UpdateVisibility struct { - Visibility ImageVisibility -} - -// ToImagePatchMap assembles a request body based on UpdateVisibility. -func (u UpdateVisibility) ToImagePatchMap() map[string]interface{} { - return map[string]interface{}{ - "op": "replace", - "path": "/visibility", - "value": u.Visibility, - } -} - -// ReplaceImageName represents an updated image_name property request. -type ReplaceImageName struct { - NewName string -} - -// ToImagePatchMap assembles a request body based on ReplaceImageName. -func (r ReplaceImageName) ToImagePatchMap() map[string]interface{} { - return map[string]interface{}{ - "op": "replace", - "path": "/name", - "value": r.NewName, - } -} - -// ReplaceImageChecksum represents an updated checksum property request. -type ReplaceImageChecksum struct { - Checksum string -} - -// ReplaceImageChecksum assembles a request body based on ReplaceImageChecksum. -func (rc ReplaceImageChecksum) ToImagePatchMap() map[string]interface{} { - return map[string]interface{}{ - "op": "replace", - "path": "/checksum", - "value": rc.Checksum, - } -} - -// ReplaceImageTags represents an updated tags property request. -type ReplaceImageTags struct { - NewTags []string -} - -// ToImagePatchMap assembles a request body based on ReplaceImageTags. -func (r ReplaceImageTags) ToImagePatchMap() map[string]interface{} { - return map[string]interface{}{ - "op": "replace", - "path": "/tags", - "value": r.NewTags, - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go deleted file mode 100644 index cd819ec9c82..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go +++ /dev/null @@ -1,200 +0,0 @@ -package images - -import ( - "encoding/json" - "fmt" - "reflect" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/internal" - "github.com/gophercloud/gophercloud/pagination" -) - -// Image represents an image found in the OpenStack Image service. -type Image struct { - // ID is the image UUID. - ID string `json:"id"` - - // Name is the human-readable display name for the image. - Name string `json:"name"` - - // Status is the image status. It can be "queued" or "active" - // See imageservice/v2/images/type.go - Status ImageStatus `json:"status"` - - // Tags is a list of image tags. Tags are arbitrarily defined strings - // attached to an image. - Tags []string `json:"tags"` - - // ContainerFormat is the format of the container. - // Valid values are ami, ari, aki, bare, and ovf. - ContainerFormat string `json:"container_format"` - - // DiskFormat is the format of the disk. - // If set, valid values are ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, - // and iso. - DiskFormat string `json:"disk_format"` - - // MinDiskGigabytes is the amount of disk space in GB that is required to - // boot the image. - MinDiskGigabytes int `json:"min_disk"` - - // MinRAMMegabytes [optional] is the amount of RAM in MB that is required to - // boot the image. - MinRAMMegabytes int `json:"min_ram"` - - // Owner is the tenant ID the image belongs to. - Owner string `json:"owner"` - - // Protected is whether the image is deletable or not. - Protected bool `json:"protected"` - - // Visibility defines who can see/use the image. - Visibility ImageVisibility `json:"visibility"` - - // Checksum is the checksum of the data that's associated with the image. - Checksum string `json:"checksum"` - - // SizeBytes is the size of the data that's associated with the image. - SizeBytes int64 `json:"size"` - - // Metadata is a set of metadata associated with the image. - // Image metadata allow for meaningfully define the image properties - // and tags. - // See http://docs.openstack.org/developer/glance/metadefs-concepts.html. - Metadata map[string]string `json:"metadata"` - - // Properties is a set of key-value pairs, if any, that are associated with - // the image. - Properties map[string]interface{} `json:"-"` - - // CreatedAt is the date when the image has been created. - CreatedAt time.Time `json:"created_at"` - - // UpdatedAt is the date when the last change has been made to the image or - // it's properties. - UpdatedAt time.Time `json:"updated_at"` - - // File is the trailing path after the glance endpoint that represent the - // location of the image or the path to retrieve it. - File string `json:"file"` - - // Schema is the path to the JSON-schema that represent the image or image - // entity. - Schema string `json:"schema"` - - // VirtualSize is the virtual size of the image - VirtualSize int64 `json:"virtual_size"` -} - -func (r *Image) UnmarshalJSON(b []byte) error { - type tmp Image - var s struct { - tmp - SizeBytes interface{} `json:"size"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Image(s.tmp) - - switch t := s.SizeBytes.(type) { - case nil: - return nil - case float32: - r.SizeBytes = int64(t) - case float64: - r.SizeBytes = int64(t) - default: - return fmt.Errorf("Unknown type for SizeBytes: %v (value: %v)", reflect.TypeOf(t), t) - } - - // Bundle all other fields into Properties - var result interface{} - err = json.Unmarshal(b, &result) - if err != nil { - return err - } - if resultMap, ok := result.(map[string]interface{}); ok { - delete(resultMap, "self") - r.Properties = internal.RemainingKeys(Image{}, resultMap) - } - - return err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract interprets any commonResult as an Image. -func (r commonResult) Extract() (*Image, error) { - var s *Image - err := r.ExtractInto(&s) - return s, err -} - -// CreateResult represents the result of a Create operation. Call its Extract -// method to interpret it as an Image. -type CreateResult struct { - commonResult -} - -// UpdateResult represents the result of an Update operation. Call its Extract -// method to interpret it as an Image. -type UpdateResult struct { - commonResult -} - -// GetResult represents the result of a Get operation. Call its Extract -// method to interpret it as an Image. -type GetResult struct { - commonResult -} - -// DeleteResult represents the result of a Delete operation. Call its -// ExtractErr method to interpret it as an Image. -type DeleteResult struct { - gophercloud.ErrResult -} - -// ImagePage represents the results of a List request. -type ImagePage struct { - pagination.LinkedPageBase -} - -// IsEmpty returns true if an ImagePage contains no Images results. -func (r ImagePage) IsEmpty() (bool, error) { - images, err := ExtractImages(r) - return len(images) == 0, err -} - -// NextPageURL uses the response's embedded link reference to navigate to -// the next page of results. -func (r ImagePage) NextPageURL() (string, error) { - var s struct { - Next string `json:"next"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - - if s.Next == "" { - return "", nil - } - - return nextPageURL(r.URL.String(), s.Next) -} - -// ExtractImages interprets the results of a single page from a List() call, -// producing a slice of Image entities. -func ExtractImages(r pagination.Page) ([]Image, error) { - var s struct { - Images []Image `json:"images"` - } - err := (r.(ImagePage)).ExtractInto(&s) - return s.Images, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go deleted file mode 100644 index 2e01b38f5c4..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go +++ /dev/null @@ -1,79 +0,0 @@ -package images - -// ImageStatus image statuses -// http://docs.openstack.org/developer/glance/statuses.html -type ImageStatus string - -const ( - // ImageStatusQueued is a status for an image which identifier has - // been reserved for an image in the image registry. - ImageStatusQueued ImageStatus = "queued" - - // ImageStatusSaving denotes that an image’s raw data is currently being - // uploaded to Glance - ImageStatusSaving ImageStatus = "saving" - - // ImageStatusActive denotes an image that is fully available in Glance. - ImageStatusActive ImageStatus = "active" - - // ImageStatusKilled denotes that an error occurred during the uploading - // of an image’s data, and that the image is not readable. - ImageStatusKilled ImageStatus = "killed" - - // ImageStatusDeleted is used for an image that is no longer available to use. - // The image information is retained in the image registry. - ImageStatusDeleted ImageStatus = "deleted" - - // ImageStatusPendingDelete is similar to Delete, but the image is not yet - // deleted. - ImageStatusPendingDelete ImageStatus = "pending_delete" - - // ImageStatusDeactivated denotes that access to image data is not allowed to - // any non-admin user. - ImageStatusDeactivated ImageStatus = "deactivated" -) - -// ImageVisibility denotes an image that is fully available in Glance. -// This occurs when the image data is uploaded, or the image size is explicitly -// set to zero on creation. -// According to design -// https://wiki.openstack.org/wiki/Glance-v2-community-image-visibility-design -type ImageVisibility string - -const ( - // ImageVisibilityPublic all users - ImageVisibilityPublic ImageVisibility = "public" - - // ImageVisibilityPrivate users with tenantId == tenantId(owner) - ImageVisibilityPrivate ImageVisibility = "private" - - // ImageVisibilityShared images are visible to: - // - users with tenantId == tenantId(owner) - // - users with tenantId in the member-list of the image - // - users with tenantId in the member-list with member_status == 'accepted' - ImageVisibilityShared ImageVisibility = "shared" - - // ImageVisibilityCommunity images: - // - all users can see and boot it - // - users with tenantId in the member-list of the image with - // member_status == 'accepted' have this image in their default image-list. - ImageVisibilityCommunity ImageVisibility = "community" -) - -// MemberStatus is a status for adding a new member (tenant) to an image -// member list. -type ImageMemberStatus string - -const ( - // ImageMemberStatusAccepted is the status for an accepted image member. - ImageMemberStatusAccepted ImageMemberStatus = "accepted" - - // ImageMemberStatusPending shows that the member addition is pending - ImageMemberStatusPending ImageMemberStatus = "pending" - - // ImageMemberStatusAccepted is the status for a rejected image member - ImageMemberStatusRejected ImageMemberStatus = "rejected" - - // ImageMemberStatusAll - ImageMemberStatusAll ImageMemberStatus = "all" -) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/urls.go deleted file mode 100644 index bf7cea1ef89..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/urls.go +++ /dev/null @@ -1,51 +0,0 @@ -package images - -import ( - "net/url" - - "github.com/gophercloud/gophercloud" -) - -// `listURL` is a pure function. `listURL(c)` is a URL for which a GET -// request will respond with a list of images in the service `c`. -func listURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("images") -} - -func createURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("images") -} - -// `imageURL(c,i)` is the URL for the image identified by ID `i` in -// the service `c`. -func imageURL(c *gophercloud.ServiceClient, imageID string) string { - return c.ServiceURL("images", imageID) -} - -// `getURL(c,i)` is a URL for which a GET request will respond with -// information about the image identified by ID `i` in the service -// `c`. -func getURL(c *gophercloud.ServiceClient, imageID string) string { - return imageURL(c, imageID) -} - -func updateURL(c *gophercloud.ServiceClient, imageID string) string { - return imageURL(c, imageID) -} - -func deleteURL(c *gophercloud.ServiceClient, imageID string) string { - return imageURL(c, imageID) -} - -// builds next page full url based on current url -func nextPageURL(currentURL string, next string) (string, error) { - base, err := url.Parse(currentURL) - if err != nil { - return "", err - } - rel, err := url.Parse(next) - if err != nil { - return "", err - } - return base.ResolveReference(rel).String(), nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/doc.go deleted file mode 100644 index c01070edc8c..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/doc.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Package firewalls allows management and retrieval of firewalls from the -OpenStack Networking Service. - -Example to List Firewalls - - listOpts := firewalls.ListOpts{ - TenantID: "tenant-id", - } - - allPages, err := firewalls.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allFirewalls, err := firewalls.ExtractFirewalls(allPages) - if err != nil { - panic(err) - } - - for _, fw := range allFirewalls { - fmt.Printf("%+v\n", fw) - } - -Example to Create a Firewall - - createOpts := firewalls.CreateOpts{ - Name: "firewall_1", - Description: "A firewall", - PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", - AdminStateUp: gophercloud.Enabled, - } - - firewall, err := firewalls.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Firewall - - firewallID := "a6917946-38ab-4ffd-a55a-26c0980ce5ee" - - updateOpts := firewalls.UpdateOpts{ - AdminStateUp: gophercloud.Disabled, - } - - firewall, err := firewalls.Update(networkClient, firewallID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Firewall - - firewallID := "a6917946-38ab-4ffd-a55a-26c0980ce5ee" - err := firewalls.Delete(networkClient, firewallID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package firewalls diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go deleted file mode 100644 index dd92bb20dbe..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go +++ /dev/null @@ -1,11 +0,0 @@ -package firewalls - -import "fmt" - -func err(str string) error { - return fmt.Errorf("%s", str) -} - -var ( - errPolicyRequired = err("A policy ID is required") -) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go deleted file mode 100644 index aa30194668a..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go +++ /dev/null @@ -1,137 +0,0 @@ -package firewalls - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToFirewallListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the firewall attributes you want to see returned. SortKey allows you to sort -// by a particular firewall attribute. SortDir sets the direction, and is either -// `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - TenantID string `q:"tenant_id"` - Name string `q:"name"` - Description string `q:"description"` - AdminStateUp bool `q:"admin_state_up"` - Shared bool `q:"shared"` - PolicyID string `q:"firewall_policy_id"` - ID string `q:"id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// ToFirewallListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToFirewallListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List returns a Pager which allows you to iterate over a collection of -// firewalls. It accepts a ListOpts struct, which allows you to filter -// and sort the returned collection for greater efficiency. -// -// Default policy settings return only those firewalls that are owned by the -// tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := rootURL(c) - if opts != nil { - query, err := opts.ToFirewallListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - return FirewallPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToFirewallCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new firewall. -type CreateOpts struct { - PolicyID string `json:"firewall_policy_id" required:"true"` - // TenantID specifies a tenant to own the firewall. The caller must have - // an admin role in order to set this. Otherwise, this field is left unset - // and the caller will be the owner. - TenantID string `json:"tenant_id,omitempty"` - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - AdminStateUp *bool `json:"admin_state_up,omitempty"` - Shared *bool `json:"shared,omitempty"` -} - -// ToFirewallCreateMap casts a CreateOpts struct to a map. -func (opts CreateOpts) ToFirewallCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "firewall") -} - -// Create accepts a CreateOpts struct and uses the values to create a new firewall. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToFirewallCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular firewall based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToFirewallUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contains the values used when updating a firewall. -type UpdateOpts struct { - PolicyID string `json:"firewall_policy_id" required:"true"` - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - AdminStateUp *bool `json:"admin_state_up,omitempty"` - Shared *bool `json:"shared,omitempty"` -} - -// ToFirewallUpdateMap casts a CreateOpts struct to a map. -func (opts UpdateOpts) ToFirewallUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "firewall") -} - -// Update allows firewalls to be updated. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToFirewallUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Delete will permanently delete a particular firewall based on its unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go deleted file mode 100644 index f6786a44331..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go +++ /dev/null @@ -1,95 +0,0 @@ -package firewalls - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Firewall is an OpenStack firewall. -type Firewall struct { - ID string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AdminStateUp bool `json:"admin_state_up"` - Status string `json:"status"` - PolicyID string `json:"firewall_policy_id"` - TenantID string `json:"tenant_id"` -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a firewall. -func (r commonResult) Extract() (*Firewall, error) { - var s Firewall - err := r.ExtractInto(&s) - return &s, err -} - -func (r commonResult) ExtractInto(v interface{}) error { - return r.Result.ExtractIntoStructPtr(v, "firewall") -} - -func ExtractFirewallsInto(r pagination.Page, v interface{}) error { - return r.(FirewallPage).Result.ExtractIntoSlicePtr(v, "firewalls") -} - -// FirewallPage is the page returned by a pager when traversing over a -// collection of firewalls. -type FirewallPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of firewalls has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r FirewallPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"firewalls_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a FirewallPage struct is empty. -func (r FirewallPage) IsEmpty() (bool, error) { - is, err := ExtractFirewalls(r) - return len(is) == 0, err -} - -// ExtractFirewalls accepts a Page struct, specifically a FirewallPage struct, -// and extracts the elements into a slice of Firewall structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractFirewalls(r pagination.Page) ([]Firewall, error) { - var s []Firewall - err := ExtractFirewallsInto(r, &s) - return s, err -} - -// GetResult represents the result of a Get operation. Call its Extract -// method to interpret it as a Firewall. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an Update operation. Call its Extract -// method to interpret it as a Firewall. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to determine if the operation succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// CreateResult represents the result of a Create operation. Call its Extract -// method to interpret it as a Firewall. -type CreateResult struct { - commonResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go deleted file mode 100644 index 807ea1ab656..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go +++ /dev/null @@ -1,16 +0,0 @@ -package firewalls - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "fw" - resourcePath = "firewalls" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/doc.go deleted file mode 100644 index ae824491f18..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/doc.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Package policies allows management and retrieval of Firewall Policies in the -OpenStack Networking Service. - -Example to List Policies - - listOpts := policies.ListOpts{ - TenantID: "966b3c7d36a24facaf20b7e458bf2192", - } - - allPages, err := policies.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allPolicies, err := policies.ExtractPolicies(allPages) - if err != nil { - panic(err) - } - - for _, policy := range allPolicies { - fmt.Printf("%+v\n", policy) - } - -Example to Create a Policy - - createOpts := policies.CreateOpts{ - Name: "policy_1", - Description: "A policy", - Rules: []string{ - "98a58c87-76be-ae7c-a74e-b77fffb88d95", - "7c4f087a-ed46-4ea8-8040-11ca460a61c0", - } - } - - policy, err := policies.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Policy - - policyID := "38aee955-6283-4279-b091-8b9c828000ec" - - updateOpts := policies.UpdateOpts{ - Description: "New Description", - } - - policy, err := policies.Update(networkClient, policyID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Policy - - policyID := "38aee955-6283-4279-b091-8b9c828000ec" - err := policies.Delete(networkClient, policyID).ExtractErr() - if err != nil { - panic(err) - } - -Example to Add a Rule to a Policy - - policyID := "38aee955-6283-4279-b091-8b9c828000ec" - ruleOpts := policies.InsertRuleOpts{ - ID: "98a58c87-76be-ae7c-a74e-b77fffb88d95", - } - - policy, err := policies.AddRule(networkClient, policyID, ruleOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Rule from a Policy - - policyID := "38aee955-6283-4279-b091-8b9c828000ec" - ruleID := "98a58c87-76be-ae7c-a74e-b77fffb88d95", - - policy, err := policies.RemoveRule(networkClient, policyID, ruleID).Extract() - if err != nil { - panic(err) - } -*/ -package policies diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go deleted file mode 100644 index b1a6a5598b6..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go +++ /dev/null @@ -1,177 +0,0 @@ -package policies - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToPolicyListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the firewall policy attributes you want to see returned. SortKey allows you -// to sort by a particular firewall policy attribute. SortDir sets the direction, -// and is either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - TenantID string `q:"tenant_id"` - Name string `q:"name"` - Description string `q:"description"` - Shared *bool `q:"shared"` - Audited *bool `q:"audited"` - ID string `q:"id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// ToPolicyListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToPolicyListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List returns a Pager which allows you to iterate over a collection of -// firewall policies. It accepts a ListOpts struct, which allows you to filter -// and sort the returned collection for greater efficiency. -// -// Default policy settings return only those firewall policies that are owned by -// the tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := rootURL(c) - if opts != nil { - query, err := opts.ToPolicyListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - return PolicyPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToFirewallPolicyCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new firewall policy. -type CreateOpts struct { - // TenantID specifies a tenant to own the firewall. The caller must have - // an admin role in order to set this. Otherwise, this field is left unset - // and the caller will be the owner. - TenantID string `json:"tenant_id,omitempty"` - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - Shared *bool `json:"shared,omitempty"` - Audited *bool `json:"audited,omitempty"` - Rules []string `json:"firewall_rules,omitempty"` -} - -// ToFirewallPolicyCreateMap casts a CreateOpts struct to a map. -func (opts CreateOpts) ToFirewallPolicyCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "firewall_policy") -} - -// Create accepts a CreateOpts struct and uses the values to create a new -// firewall policy. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToFirewallPolicyCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular firewall policy based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToFirewallPolicyUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contains the values used when updating a firewall policy. -type UpdateOpts struct { - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - Shared *bool `json:"shared,omitempty"` - Audited *bool `json:"audited,omitempty"` - Rules []string `json:"firewall_rules,omitempty"` -} - -// ToFirewallPolicyUpdateMap casts a CreateOpts struct to a map. -func (opts UpdateOpts) ToFirewallPolicyUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "firewall_policy") -} - -// Update allows firewall policies to be updated. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToFirewallPolicyUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Delete will permanently delete a particular firewall policy based on its -// unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} - -// InsertRuleOptsBuilder allows extensions to add additional parameters to the -// InsertRule request. -type InsertRuleOptsBuilder interface { - ToFirewallPolicyInsertRuleMap() (map[string]interface{}, error) -} - -// InsertRuleOpts contains the values used when updating a policy's rules. -type InsertRuleOpts struct { - ID string `json:"firewall_rule_id" required:"true"` - BeforeRuleID string `json:"insert_before,omitempty"` - AfterRuleID string `json:"insert_after,omitempty"` -} - -func (opts InsertRuleOpts) ToFirewallPolicyInsertRuleMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "") -} - -// AddRule will add a rule to a policy. -func AddRule(c *gophercloud.ServiceClient, id string, opts InsertRuleOptsBuilder) (r InsertRuleResult) { - b, err := opts.ToFirewallPolicyInsertRuleMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(insertURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// RemoveRule will add a rule to a policy. -func RemoveRule(c *gophercloud.ServiceClient, id, ruleID string) (r RemoveRuleResult) { - b := map[string]interface{}{"firewall_rule_id": ruleID} - _, r.Err = c.Put(removeURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go deleted file mode 100644 index bbe22b13612..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go +++ /dev/null @@ -1,103 +0,0 @@ -package policies - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Policy is a firewall policy. -type Policy struct { - ID string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - TenantID string `json:"tenant_id"` - Audited bool `json:"audited"` - Shared bool `json:"shared"` - Rules []string `json:"firewall_rules,omitempty"` -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a firewall policy. -func (r commonResult) Extract() (*Policy, error) { - var s struct { - Policy *Policy `json:"firewall_policy"` - } - err := r.ExtractInto(&s) - return s.Policy, err -} - -// PolicyPage is the page returned by a pager when traversing over a -// collection of firewall policies. -type PolicyPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of firewall policies has -// reached the end of a page and the pager seeks to traverse over a new one. -// In order to do this, it needs to construct the next page's URL. -func (r PolicyPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"firewall_policies_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a PolicyPage struct is empty. -func (r PolicyPage) IsEmpty() (bool, error) { - is, err := ExtractPolicies(r) - return len(is) == 0, err -} - -// ExtractPolicies accepts a Page struct, specifically a Policy struct, -// and extracts the elements into a slice of Policy structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractPolicies(r pagination.Page) ([]Policy, error) { - var s struct { - Policies []Policy `json:"firewall_policies"` - } - err := (r.(PolicyPage)).ExtractInto(&s) - return s.Policies, err -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a Policy. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its -// Extract method to interpret it as a Policy. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to determine if the operation succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a Policy. -type CreateResult struct { - commonResult -} - -// InsertRuleResult represents the result of an InsertRule operation. Call its -// Extract method to interpret it as a Policy. -type InsertRuleResult struct { - commonResult -} - -// RemoveRuleResult represents the result of a RemoveRule operation. Call its -// Extract method to interpret it as a Policy. -type RemoveRuleResult struct { - commonResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go deleted file mode 100644 index c252b79dd0c..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go +++ /dev/null @@ -1,26 +0,0 @@ -package policies - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "fw" - resourcePath = "firewall_policies" - insertPath = "insert_rule" - removePath = "remove_rule" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} - -func insertURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id, insertPath) -} - -func removeURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id, removePath) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/doc.go deleted file mode 100644 index 4f0a779eec9..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/doc.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Package routerinsertion implements the fwaasrouterinsertion Firewall extension. -It is used to manage the router information of a firewall. - -Example to List Firewalls with Router Information - - type FirewallsWithRouters struct { - firewalls.Firewall - routerinsertion.FirewallExt - } - - var allFirewalls []FirewallsWithRouters - - allPages, err := firewalls.List(networkClient, nil).AllPages() - if err != nil { - panic(err) - } - - err = firewalls.ExtractFirewallsInto(allPages, &allFirewalls) - if err != nil { - panic(err) - } - - for _, fw := range allFirewalls { - fmt.Printf("%+v\n", fw) - } - -Example to Create a Firewall with a Router - - firewallCreateOpts := firewalls.CreateOpts{ - Name: "firewall_1", - PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", - } - - createOpts := routerinsertion.CreateOptsExt{ - CreateOptsBuilder: firewallCreateOpts, - RouterIDs: []string{ - "8a3a0d6a-34b5-4a92-b65d-6375a4c1e9e8", - }, - } - - firewall, err := firewalls.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Firewall with a Router - - firewallID := "a6917946-38ab-4ffd-a55a-26c0980ce5ee" - - firewallUpdateOpts := firewalls.UpdateOpts{ - Description: "updated firewall", - PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", - } - - updateOpts := routerinsertion.UpdateOptsExt{ - UpdateOptsBuilder: firewallUpdateOpts, - RouterIDs: []string{ - "8a3a0d6a-34b5-4a92-b65d-6375a4c1e9e8", - }, - } - - firewall, err := firewalls.Update(networkClient, firewallID, updateOpts).Extract() - if err != nil { - panic(err) - } -*/ -package routerinsertion diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/requests.go deleted file mode 100644 index b1f6d76e387..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/requests.go +++ /dev/null @@ -1,43 +0,0 @@ -package routerinsertion - -import ( - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" -) - -// CreateOptsExt adds the RouterIDs option to the base CreateOpts. -type CreateOptsExt struct { - firewalls.CreateOptsBuilder - RouterIDs []string `json:"router_ids"` -} - -// ToFirewallCreateMap adds router_ids to the base firewall creation options. -func (opts CreateOptsExt) ToFirewallCreateMap() (map[string]interface{}, error) { - base, err := opts.CreateOptsBuilder.ToFirewallCreateMap() - if err != nil { - return nil, err - } - - firewallMap := base["firewall"].(map[string]interface{}) - firewallMap["router_ids"] = opts.RouterIDs - - return base, nil -} - -// UpdateOptsExt adds the RouterIDs option to the base UpdateOpts. -type UpdateOptsExt struct { - firewalls.UpdateOptsBuilder - RouterIDs []string `json:"router_ids"` -} - -// ToFirewallUpdateMap adds router_ids to the base firewall update options. -func (opts UpdateOptsExt) ToFirewallUpdateMap() (map[string]interface{}, error) { - base, err := opts.UpdateOptsBuilder.ToFirewallUpdateMap() - if err != nil { - return nil, err - } - - firewallMap := base["firewall"].(map[string]interface{}) - firewallMap["router_ids"] = opts.RouterIDs - - return base, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/results.go deleted file mode 100644 index 85c288e51e6..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/results.go +++ /dev/null @@ -1,7 +0,0 @@ -package routerinsertion - -// FirewallExt is an extension to the base Firewall object -type FirewallExt struct { - // RouterIDs are the routers that the firewall is attached to. - RouterIDs []string `json:"router_ids"` -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/doc.go deleted file mode 100644 index 3351a3e5c95..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/doc.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Package rules enables management and retrieval of Firewall Rules in the -OpenStack Networking Service. - -Example to List Rules - - listOpts := rules.ListOpts{ - Protocol: rules.ProtocolAny, - } - - allPages, err := rules.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allRules, err := rules.ExtractRules(allPages) - if err != nil { - panic(err) - } - - for _, rule := range allRules { - fmt.Printf("%+v\n", rule) - } - -Example to Create a Rule - - createOpts := rules.CreateOpts{ - Action: "allow", - Protocol: rules.ProtocolTCP, - Description: "ssh", - DestinationPort: 22, - DestinationIPAddress: "192.168.1.0/24", - } - - rule, err := rules.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Rule - - ruleID := "f03bd950-6c56-4f5e-a307-45967078f507" - newPort := 80 - newDescription := "http" - - updateOpts := rules.UpdateOpts{ - Description: &newDescription, - port: &newPort, - } - - rule, err := rules.Update(networkClient, ruleID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Rule - - ruleID := "f03bd950-6c56-4f5e-a307-45967078f507" - err := rules.Delete(networkClient, ruleID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package rules diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go deleted file mode 100644 index 0b29d39fd9e..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go +++ /dev/null @@ -1,12 +0,0 @@ -package rules - -import "fmt" - -func err(str string) error { - return fmt.Errorf("%s", str) -} - -var ( - errProtocolRequired = err("A protocol is required (tcp, udp, icmp or any)") - errActionRequired = err("An action is required (allow or deny)") -) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go deleted file mode 100644 index 83bbe99b6d2..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go +++ /dev/null @@ -1,188 +0,0 @@ -package rules - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -type ( - // Protocol represents a valid rule protocol. - Protocol string -) - -const ( - // ProtocolAny is to allow any protocol. - ProtocolAny Protocol = "any" - - // ProtocolICMP is to allow the ICMP protocol. - ProtocolICMP Protocol = "icmp" - - // ProtocolTCP is to allow the TCP protocol. - ProtocolTCP Protocol = "tcp" - - // ProtocolUDP is to allow the UDP protocol. - ProtocolUDP Protocol = "udp" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToRuleListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the Firewall rule attributes you want to see returned. SortKey allows you to -// sort by a particular firewall rule attribute. SortDir sets the direction, and -// is either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - TenantID string `q:"tenant_id"` - Name string `q:"name"` - Description string `q:"description"` - Protocol string `q:"protocol"` - Action string `q:"action"` - IPVersion int `q:"ip_version"` - SourceIPAddress string `q:"source_ip_address"` - DestinationIPAddress string `q:"destination_ip_address"` - SourcePort string `q:"source_port"` - DestinationPort string `q:"destination_port"` - Enabled bool `q:"enabled"` - ID string `q:"id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// ToRuleListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToRuleListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return "", err - } - return q.String(), nil -} - -// List returns a Pager which allows you to iterate over a collection of -// firewall rules. It accepts a ListOpts struct, which allows you to filter -// and sort the returned collection for greater efficiency. -// -// Default policy settings return only those firewall rules that are owned by -// the tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := rootURL(c) - - if opts != nil { - query, err := opts.ToRuleListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - - return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - return RulePage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToRuleCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new firewall rule. -type CreateOpts struct { - Protocol Protocol `json:"protocol" required:"true"` - Action string `json:"action" required:"true"` - TenantID string `json:"tenant_id,omitempty"` - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - IPVersion gophercloud.IPVersion `json:"ip_version,omitempty"` - SourceIPAddress string `json:"source_ip_address,omitempty"` - DestinationIPAddress string `json:"destination_ip_address,omitempty"` - SourcePort string `json:"source_port,omitempty"` - DestinationPort string `json:"destination_port,omitempty"` - Shared *bool `json:"shared,omitempty"` - Enabled *bool `json:"enabled,omitempty"` -} - -// ToRuleCreateMap casts a CreateOpts struct to a map. -func (opts CreateOpts) ToRuleCreateMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "firewall_rule") - if err != nil { - return nil, err - } - - if m := b["firewall_rule"].(map[string]interface{}); m["protocol"] == "any" { - m["protocol"] = nil - } - - return b, nil -} - -// Create accepts a CreateOpts struct and uses the values to create a new -// firewall rule. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToRuleCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular firewall rule based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToRuleUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contains the values used when updating a firewall rule. -// These fields are all pointers so that unset fields will not cause the -// existing Rule attribute to be removed. -type UpdateOpts struct { - Protocol *string `json:"protocol,omitempty"` - Action *string `json:"action,omitempty"` - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - IPVersion *gophercloud.IPVersion `json:"ip_version,omitempty"` - SourceIPAddress *string `json:"source_ip_address,omitempty"` - DestinationIPAddress *string `json:"destination_ip_address,omitempty"` - SourcePort *string `json:"source_port,omitempty"` - DestinationPort *string `json:"destination_port,omitempty"` - Shared *bool `json:"shared,omitempty"` - Enabled *bool `json:"enabled,omitempty"` -} - -// ToRuleUpdateMap casts a UpdateOpts struct to a map. -func (opts UpdateOpts) ToRuleUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "firewall_rule") -} - -// Update allows firewall policies to be updated. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToRuleUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Delete will permanently delete a particular firewall rule based on its -// unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go deleted file mode 100644 index 1af03e573d9..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go +++ /dev/null @@ -1,99 +0,0 @@ -package rules - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Rule represents a firewall rule. -type Rule struct { - ID string `json:"id"` - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - Protocol string `json:"protocol"` - Action string `json:"action"` - IPVersion int `json:"ip_version,omitempty"` - SourceIPAddress string `json:"source_ip_address,omitempty"` - DestinationIPAddress string `json:"destination_ip_address,omitempty"` - SourcePort string `json:"source_port,omitempty"` - DestinationPort string `json:"destination_port,omitempty"` - Shared bool `json:"shared,omitempty"` - Enabled bool `json:"enabled,omitempty"` - PolicyID string `json:"firewall_policy_id"` - Position int `json:"position"` - TenantID string `json:"tenant_id"` -} - -// RulePage is the page returned by a pager when traversing over a -// collection of firewall rules. -type RulePage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of firewall rules has -// reached the end of a page and the pager seeks to traverse over a new one. -// In order to do this, it needs to construct the next page's URL. -func (r RulePage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"firewall_rules_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a RulePage struct is empty. -func (r RulePage) IsEmpty() (bool, error) { - is, err := ExtractRules(r) - return len(is) == 0, err -} - -// ExtractRules accepts a Page struct, specifically a RulePage struct, -// and extracts the elements into a slice of Rule structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractRules(r pagination.Page) ([]Rule, error) { - var s struct { - Rules []Rule `json:"firewall_rules"` - } - err := (r.(RulePage)).ExtractInto(&s) - return s.Rules, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a firewall rule. -func (r commonResult) Extract() (*Rule, error) { - var s struct { - Rule *Rule `json:"firewall_rule"` - } - err := r.ExtractInto(&s) - return s.Rule, err -} - -// GetResult represents the result of a get operation. Call its Extract method -// to interpret it as a Rule. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a Rule. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its ExtractErr -// method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a Rule. -type CreateResult struct { - commonResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go deleted file mode 100644 index 79654be73e2..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go +++ /dev/null @@ -1,16 +0,0 @@ -package rules - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "fw" - resourcePath = "firewall_rules" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/doc.go deleted file mode 100644 index bf5ec6807cc..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/doc.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -package floatingips enables management and retrieval of Floating IPs from the -OpenStack Networking service. - -Example to List Floating IPs - - listOpts := floatingips.ListOpts{ - FloatingNetworkID: "a6917946-38ab-4ffd-a55a-26c0980ce5ee", - } - - allPages, err := floatingips.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allFIPs, err := floatingips.ExtractFloatingIPs(allPages) - if err != nil { - panic(err) - } - - for _, fip := range allFIPs { - fmt.Printf("%+v\n", fip) - } - -Example to Create a Floating IP - - createOpts := floatingips.CreateOpts{ - FloatingNetworkID: "a6917946-38ab-4ffd-a55a-26c0980ce5ee", - } - - fip, err := floatingips.Create(networkingClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Floating IP - - fipID := "2f245a7b-796b-4f26-9cf9-9e82d248fda7" - portID := "76d0a61b-b8e5-490c-9892-4cf674f2bec8" - - updateOpts := floatingips.UpdateOpts{ - PortID: &portID, - } - - fip, err := floatingips.Update(networkingClient, fipID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Disassociate a Floating IP with a Port - - fipID := "2f245a7b-796b-4f26-9cf9-9e82d248fda7" - - updateOpts := floatingips.UpdateOpts{ - PortID: nil, - } - - fip, err := floatingips.Update(networkingClient, fipID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Floating IP - - fipID := "2f245a7b-796b-4f26-9cf9-9e82d248fda7" - err := floatingips.Delete(networkClient, fipID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package floatingips diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go deleted file mode 100644 index 1c8a8b2f13f..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go +++ /dev/null @@ -1,147 +0,0 @@ -package floatingips - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the floating IP attributes you want to see returned. SortKey allows you to -// sort by a particular network attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - ID string `q:"id"` - FloatingNetworkID string `q:"floating_network_id"` - PortID string `q:"port_id"` - FixedIP string `q:"fixed_ip_address"` - FloatingIP string `q:"floating_ip_address"` - TenantID string `q:"tenant_id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` - RouterID string `q:"router_id"` - Status string `q:"status"` -} - -// List returns a Pager which allows you to iterate over a collection of -// floating IP resources. It accepts a ListOpts struct, which allows you to -// filter and sort the returned collection for greater efficiency. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} - } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { - return FloatingIPPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToFloatingIPCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new floating IP -// resource. The only required fields are FloatingNetworkID and PortID which -// refer to the external network and internal port respectively. -type CreateOpts struct { - FloatingNetworkID string `json:"floating_network_id" required:"true"` - FloatingIP string `json:"floating_ip_address,omitempty"` - PortID string `json:"port_id,omitempty"` - FixedIP string `json:"fixed_ip_address,omitempty"` - TenantID string `json:"tenant_id,omitempty"` -} - -// ToFloatingIPCreateMap allows CreateOpts to satisfy the CreateOptsBuilder -// interface -func (opts CreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "floatingip") -} - -// Create accepts a CreateOpts struct and uses the values provided to create a -// new floating IP resource. You can create floating IPs on external networks -// only. If you provide a FloatingNetworkID which refers to a network that is -// not external (i.e. its `router:external' attribute is False), the operation -// will fail and return a 400 error. -// -// If you do not specify a FloatingIP address value, the operation will -// automatically allocate an available address for the new resource. If you do -// choose to specify one, it must fall within the subnet range for the external -// network - otherwise the operation returns a 400 error. If the FloatingIP -// address is already in use, the operation returns a 409 error code. -// -// You can associate the new resource with an internal port by using the PortID -// field. If you specify a PortID that is not valid, the operation will fail and -// return 404 error code. -// -// You must also configure an IP address for the port associated with the PortID -// you have provided - this is what the FixedIP refers to: an IP fixed to a -// port. Because a port might be associated with multiple IP addresses, you can -// use the FixedIP field to associate a particular IP address rather than have -// the API assume for you. If you specify an IP address that is not valid, the -// operation will fail and return a 400 error code. If the PortID and FixedIP -// are already associated with another resource, the operation will fail and -// returns a 409 error code. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToFloatingIPCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular floating IP resource based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToFloatingIPUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contains the values used when updating a floating IP resource. The -// only value that can be updated is which internal port the floating IP is -// linked to. To associate the floating IP with a new internal port, provide its -// ID. To disassociate the floating IP from all ports, provide an empty string. -type UpdateOpts struct { - PortID *string `json:"port_id"` -} - -// ToFloatingIPUpdateMap allows UpdateOpts to satisfy the UpdateOptsBuilder -// interface -func (opts UpdateOpts) ToFloatingIPUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "floatingip") -} - -// Update allows floating IP resources to be updated. Currently, the only way to -// "update" a floating IP is to associate it with a new internal port, or -// disassociated it from all ports. See UpdateOpts for instructions of how to -// do this. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToFloatingIPUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Delete will permanently delete a particular floating IP resource. Please -// ensure this is what you want - you can also disassociate the IP from existing -// internal ports. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go deleted file mode 100644 index f1af23d4a60..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go +++ /dev/null @@ -1,116 +0,0 @@ -package floatingips - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// FloatingIP represents a floating IP resource. A floating IP is an external -// IP address that is mapped to an internal port and, optionally, a specific -// IP address on a private network. In other words, it enables access to an -// instance on a private network from an external network. For this reason, -// floating IPs can only be defined on networks where the `router:external' -// attribute (provided by the external network extension) is set to True. -type FloatingIP struct { - // ID is the unique identifier for the floating IP instance. - ID string `json:"id"` - - // FloatingNetworkID is the UUID of the external network where the floating - // IP is to be created. - FloatingNetworkID string `json:"floating_network_id"` - - // FloatingIP is the address of the floating IP on the external network. - FloatingIP string `json:"floating_ip_address"` - - // PortID is the UUID of the port on an internal network that is associated - // with the floating IP. - PortID string `json:"port_id"` - - // FixedIP is the specific IP address of the internal port which should be - // associated with the floating IP. - FixedIP string `json:"fixed_ip_address"` - - // TenantID is the Owner of the floating IP. Only admin users can specify a - // tenant identifier other than its own. - TenantID string `json:"tenant_id"` - - // Status is the condition of the API resource. - Status string `json:"status"` - - // RouterID is the ID of the router used for this floating IP. - RouterID string `json:"router_id"` -} - -type commonResult struct { - gophercloud.Result -} - -// Extract will extract a FloatingIP resource from a result. -func (r commonResult) Extract() (*FloatingIP, error) { - var s struct { - FloatingIP *FloatingIP `json:"floatingip"` - } - err := r.ExtractInto(&s) - return s.FloatingIP, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a FloatingIP. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a FloatingIP. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a FloatingIP. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of an update operation. Call its -// ExtractErr method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// FloatingIPPage is the page returned by a pager when traversing over a -// collection of floating IPs. -type FloatingIPPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of floating IPs has -// reached the end of a page and the pager seeks to traverse over a new one. -// In order to do this, it needs to construct the next page's URL. -func (r FloatingIPPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"floatingips_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a FloatingIPPage struct is empty. -func (r FloatingIPPage) IsEmpty() (bool, error) { - is, err := ExtractFloatingIPs(r) - return len(is) == 0, err -} - -// ExtractFloatingIPs accepts a Page struct, specifically a FloatingIPPage -// struct, and extracts the elements into a slice of FloatingIP structs. In -// other words, a generic collection is mapped into a relevant slice. -func ExtractFloatingIPs(r pagination.Page) ([]FloatingIP, error) { - var s struct { - FloatingIPs []FloatingIP `json:"floatingips"` - } - err := (r.(FloatingIPPage)).ExtractInto(&s) - return s.FloatingIPs, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go deleted file mode 100644 index 1318a184caa..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go +++ /dev/null @@ -1,13 +0,0 @@ -package floatingips - -import "github.com/gophercloud/gophercloud" - -const resourcePath = "floatingips" - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(resourcePath, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/doc.go deleted file mode 100644 index 6ede7f5e171..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/doc.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Package routers enables management and retrieval of Routers from the OpenStack -Networking service. - -Example to List Routers - - listOpts := routers.ListOpts{} - allPages, err := routers.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allRouters, err := routers.ExtractRouters(allPages) - if err != nil { - panic(err) - } - - for _, router := range allRoutes { - fmt.Printf("%+v\n", router) - } - -Example to Create a Router - - iTrue := true - gwi := routers.GatewayInfo{ - NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b", - } - - createOpts := routers.CreateOpts{ - Name: "router_1", - AdminStateUp: &iTrue, - GatewayInfo: &gwi, - } - - router, err := routers.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Router - - routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" - - routes := []routers.Route{{ - DestinationCIDR: "40.0.1.0/24", - NextHop: "10.1.0.10", - }} - - updateOpts := routers.UpdateOpts{ - Name: "new_name", - Routes: routes, - } - - router, err := routers.Update(networkClient, routerID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Remove all Routes from a Router - - routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" - - routes := []routers.Route{} - - updateOpts := routers.UpdateOpts{ - Routes: routes, - } - - router, err := routers.Update(networkClient, routerID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Router - - routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" - err := routers.Delete(networkClient, routerID).ExtractErr() - if err != nil { - panic(err) - } - -Example to Add an Interface to a Router - - routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" - - intOpts := routers.AddInterfaceOpts{ - SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1", - } - - interface, err := routers.AddInterface(networkClient, routerID, intOpts).Extract() - if err != nil { - panic(err) - } - -Example to Remove an Interface from a Router - - routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" - - intOpts := routers.RemoveInterfaceOpts{ - SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1", - } - - interface, err := routers.RemoveInterface(networkClient, routerID, intOpts).Extract() - if err != nil { - panic(err) - } -*/ -package routers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go deleted file mode 100644 index 6799d200b7a..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go +++ /dev/null @@ -1,223 +0,0 @@ -package routers - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the floating IP attributes you want to see returned. SortKey allows you to -// sort by a particular network attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - ID string `q:"id"` - Name string `q:"name"` - AdminStateUp *bool `q:"admin_state_up"` - Distributed *bool `q:"distributed"` - Status string `q:"status"` - TenantID string `q:"tenant_id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// List returns a Pager which allows you to iterate over a collection of -// routers. It accepts a ListOpts struct, which allows you to filter and sort -// the returned collection for greater efficiency. -// -// Default policy settings return only those routers that are owned by the -// tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} - } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { - return RouterPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToRouterCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new router. There are -// no required values. -type CreateOpts struct { - Name string `json:"name,omitempty"` - AdminStateUp *bool `json:"admin_state_up,omitempty"` - Distributed *bool `json:"distributed,omitempty"` - TenantID string `json:"tenant_id,omitempty"` - GatewayInfo *GatewayInfo `json:"external_gateway_info,omitempty"` -} - -// ToRouterCreateMap builds a create request body from CreateOpts. -func (opts CreateOpts) ToRouterCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "router") -} - -// Create accepts a CreateOpts struct and uses the values to create a new -// logical router. When it is created, the router does not have an internal -// interface - it is not associated to any subnet. -// -// You can optionally specify an external gateway for a router using the -// GatewayInfo struct. The external gateway for the router must be plugged into -// an external network (it is external if its `router:external' field is set to -// true). -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToRouterCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular router based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToRouterUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contains the values used when updating a router. -type UpdateOpts struct { - Name string `json:"name,omitempty"` - AdminStateUp *bool `json:"admin_state_up,omitempty"` - Distributed *bool `json:"distributed,omitempty"` - GatewayInfo *GatewayInfo `json:"external_gateway_info,omitempty"` - Routes []Route `json:"routes"` -} - -// ToRouterUpdateMap builds an update body based on UpdateOpts. -func (opts UpdateOpts) ToRouterUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "router") -} - -// Update allows routers to be updated. You can update the name, administrative -// state, and the external gateway. For more information about how to set the -// external gateway for a router, see Create. This operation does not enable -// the update of router interfaces. To do this, use the AddInterface and -// RemoveInterface functions. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToRouterUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Delete will permanently delete a particular router based on its unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} - -// AddInterfaceOptsBuilder allows extensions to add additional parameters to -// the AddInterface request. -type AddInterfaceOptsBuilder interface { - ToRouterAddInterfaceMap() (map[string]interface{}, error) -} - -// AddInterfaceOpts represents the options for adding an interface to a router. -type AddInterfaceOpts struct { - SubnetID string `json:"subnet_id,omitempty" xor:"PortID"` - PortID string `json:"port_id,omitempty" xor:"SubnetID"` -} - -// ToRouterAddInterfaceMap builds a request body from AddInterfaceOpts. -func (opts AddInterfaceOpts) ToRouterAddInterfaceMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "") -} - -// AddInterface attaches a subnet to an internal router interface. You must -// specify either a SubnetID or PortID in the request body. If you specify both, -// the operation will fail and an error will be returned. -// -// If you specify a SubnetID, the gateway IP address for that particular subnet -// is used to create the router interface. Alternatively, if you specify a -// PortID, the IP address associated with the port is used to create the router -// interface. -// -// If you reference a port that is associated with multiple IP addresses, or -// if the port is associated with zero IP addresses, the operation will fail and -// a 400 Bad Request error will be returned. -// -// If you reference a port already in use, the operation will fail and a 409 -// Conflict error will be returned. -// -// The PortID that is returned after using Extract() on the result of this -// operation can either be the same PortID passed in or, on the other hand, the -// identifier of a new port created by this operation. After the operation -// completes, the device ID of the port is set to the router ID, and the -// device owner attribute is set to `network:router_interface'. -func AddInterface(c *gophercloud.ServiceClient, id string, opts AddInterfaceOptsBuilder) (r InterfaceResult) { - b, err := opts.ToRouterAddInterfaceMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(addInterfaceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// RemoveInterfaceOptsBuilder allows extensions to add additional parameters to -// the RemoveInterface request. -type RemoveInterfaceOptsBuilder interface { - ToRouterRemoveInterfaceMap() (map[string]interface{}, error) -} - -// RemoveInterfaceOpts represents options for removing an interface from -// a router. -type RemoveInterfaceOpts struct { - SubnetID string `json:"subnet_id,omitempty" or:"PortID"` - PortID string `json:"port_id,omitempty" or:"SubnetID"` -} - -// ToRouterRemoveInterfaceMap builds a request body based on -// RemoveInterfaceOpts. -func (opts RemoveInterfaceOpts) ToRouterRemoveInterfaceMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "") -} - -// RemoveInterface removes an internal router interface, which detaches a -// subnet from the router. You must specify either a SubnetID or PortID, since -// these values are used to identify the router interface to remove. -// -// Unlike AddInterface, you can also specify both a SubnetID and PortID. If you -// choose to specify both, the subnet ID must correspond to the subnet ID of -// the first IP address on the port specified by the port ID. Otherwise, the -// operation will fail and return a 409 Conflict error. -// -// If the router, subnet or port which are referenced do not exist or are not -// visible to you, the operation will fail and a 404 Not Found error will be -// returned. After this operation completes, the port connecting the router -// with the subnet is removed from the subnet for the network. -func RemoveInterface(c *gophercloud.ServiceClient, id string, opts RemoveInterfaceOptsBuilder) (r InterfaceResult) { - b, err := opts.ToRouterRemoveInterfaceMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(removeInterfaceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go deleted file mode 100644 index e7c27dc332e..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go +++ /dev/null @@ -1,167 +0,0 @@ -package routers - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// GatewayInfo represents the information of an external gateway for any -// particular network router. -type GatewayInfo struct { - NetworkID string `json:"network_id"` - ExternalFixedIPs []ExternalFixedIP `json:"external_fixed_ips,omitempty"` -} - -// ExternalFixedIP is the IP address and subnet ID of the external gateway of a -// router. -type ExternalFixedIP struct { - IPAddress string `json:"ip_address"` - SubnetID string `json:"subnet_id"` -} - -// Route is a possible route in a router. -type Route struct { - NextHop string `json:"nexthop"` - DestinationCIDR string `json:"destination"` -} - -// Router represents a Neutron router. A router is a logical entity that -// forwards packets across internal subnets and NATs (network address -// translation) them on external networks through an appropriate gateway. -// -// A router has an interface for each subnet with which it is associated. By -// default, the IP address of such interface is the subnet's gateway IP. Also, -// whenever a router is associated with a subnet, a port for that router -// interface is added to the subnet's network. -type Router struct { - // Status indicates whether or not a router is currently operational. - Status string `json:"status"` - - // GateayInfo provides information on external gateway for the router. - GatewayInfo GatewayInfo `json:"external_gateway_info"` - - // AdminStateUp is the administrative state of the router. - AdminStateUp bool `json:"admin_state_up"` - - // Distributed is whether router is disitrubted or not. - Distributed bool `json:"distributed"` - - // Name is the human readable name for the router. It does not have to be - // unique. - Name string `json:"name"` - - // ID is the unique identifier for the router. - ID string `json:"id"` - - // TenantID is the owner of the router. Only admin users can specify a tenant - // identifier other than its own. - TenantID string `json:"tenant_id"` - - // Routes are a collection of static routes that the router will host. - Routes []Route `json:"routes"` -} - -// RouterPage is the page returned by a pager when traversing over a -// collection of routers. -type RouterPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of routers has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r RouterPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"routers_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a RouterPage struct is empty. -func (r RouterPage) IsEmpty() (bool, error) { - is, err := ExtractRouters(r) - return len(is) == 0, err -} - -// ExtractRouters accepts a Page struct, specifically a RouterPage struct, -// and extracts the elements into a slice of Router structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractRouters(r pagination.Page) ([]Router, error) { - var s struct { - Routers []Router `json:"routers"` - } - err := (r.(RouterPage)).ExtractInto(&s) - return s.Routers, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a router. -func (r commonResult) Extract() (*Router, error) { - var s struct { - Router *Router `json:"router"` - } - err := r.ExtractInto(&s) - return s.Router, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a Router. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a Router. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a Router. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its ExtractErr -// method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// InterfaceInfo represents information about a particular router interface. As -// mentioned above, in order for a router to forward to a subnet, it needs an -// interface. -type InterfaceInfo struct { - // SubnetID is the ID of the subnet which this interface is associated with. - SubnetID string `json:"subnet_id"` - - // PortID is the ID of the port that is a part of the subnet. - PortID string `json:"port_id"` - - // ID is the UUID of the interface. - ID string `json:"id"` - - // TenantID is the owner of the interface. - TenantID string `json:"tenant_id"` -} - -// InterfaceResult represents the result of interface operations, such as -// AddInterface() and RemoveInterface(). Call its Extract method to interpret -// the result as a InterfaceInfo. -type InterfaceResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts an information struct. -func (r InterfaceResult) Extract() (*InterfaceInfo, error) { - var s InterfaceInfo - err := r.ExtractInto(&s) - return &s, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go deleted file mode 100644 index f9e9da32117..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go +++ /dev/null @@ -1,21 +0,0 @@ -package routers - -import "github.com/gophercloud/gophercloud" - -const resourcePath = "routers" - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(resourcePath, id) -} - -func addInterfaceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(resourcePath, id, "add_router_interface") -} - -func removeInterfaceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(resourcePath, id, "remove_router_interface") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go deleted file mode 100644 index bad3324b5ed..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Package members provides information and interaction with Members of the -Load Balancer as a Service extension for the OpenStack Networking service. - -Example to List Members - - listOpts := members.ListOpts{ - ProtocolPort: 80, - } - - allPages, err := members.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allMembers, err := members.ExtractMembers(allPages) - if err != nil { - panic(err) - } - - for _, member := range allMembers { - fmt.Printf("%+v\n", member) - } - -Example to Create a Member - - createOpts := members.CreateOpts{ - Address: "192.168.2.14", - ProtocolPort: 80, - PoolID: "0b266a12-0fdf-4434-bd11-649d84e54bd5" - } - - member, err := members.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Member - - memberID := "46592c54-03f7-40ef-9cdf-b1fcf2775ddf" - - updateOpts := members.UpdateOpts{ - AdminStateUp: gophercloud.Disabled, - } - - member, err := members.Update(networkClient, memberID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Member - - memberID := "46592c54-03f7-40ef-9cdf-b1fcf2775ddf" - err := members.Delete(networkClient, memberID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package members diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go deleted file mode 100644 index 1a312884440..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go +++ /dev/null @@ -1,124 +0,0 @@ -package members - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the floating IP attributes you want to see returned. SortKey allows you to -// sort by a particular network attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - Status string `q:"status"` - Weight int `q:"weight"` - AdminStateUp *bool `q:"admin_state_up"` - TenantID string `q:"tenant_id"` - PoolID string `q:"pool_id"` - Address string `q:"address"` - ProtocolPort int `q:"protocol_port"` - ID string `q:"id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// List returns a Pager which allows you to iterate over a collection of -// members. It accepts a ListOpts struct, which allows you to filter and sort -// the returned collection for greater efficiency. -// -// Default policy settings return only those members that are owned by the -// tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} - } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { - return MemberPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToLBMemberCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new pool member. -type CreateOpts struct { - // Address is the IP address of the member. - Address string `json:"address" required:"true"` - - // ProtocolPort is the port on which the application is hosted. - ProtocolPort int `json:"protocol_port" required:"true"` - - // PoolID is the pool to which this member will belong. - PoolID string `json:"pool_id" required:"true"` - - // TenantID is only required if the caller has an admin role and wants - // to create a pool for another tenant. - TenantID string `json:"tenant_id,omitempty"` -} - -// ToLBMemberCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToLBMemberCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "member") -} - -// Create accepts a CreateOpts struct and uses the values to create a new -// load balancer pool member. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToLBMemberCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular pool member based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToLBMemberUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contains the values used when updating a pool member. -type UpdateOpts struct { - // The administrative state of the member, which is up (true) or down (false). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToLBMemberUpdateMap builds a request body from UpdateOpts. -func (opts UpdateOpts) ToLBMemberUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "member") -} - -// Update allows members to be updated. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToLBMemberUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201, 202}, - }) - return -} - -// Delete will permanently delete a particular member based on its unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go deleted file mode 100644 index 804dbe8445f..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go +++ /dev/null @@ -1,109 +0,0 @@ -package members - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Member represents the application running on a backend server. -type Member struct { - // Status is the status of the member. Indicates whether the member - // is operational. - Status string - - // Weight is the weight of member. - Weight int - - // AdminStateUp is the administrative state of the member, which is up - // (true) or down (false). - AdminStateUp bool `json:"admin_state_up"` - - // TenantID is the owner of the member. - TenantID string `json:"tenant_id"` - - // PoolID is the pool to which the member belongs. - PoolID string `json:"pool_id"` - - // Address is the IP address of the member. - Address string - - // ProtocolPort is the port on which the application is hosted. - ProtocolPort int `json:"protocol_port"` - - // ID is the unique ID for the member. - ID string -} - -// MemberPage is the page returned by a pager when traversing over a -// collection of pool members. -type MemberPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of members has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r MemberPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"members_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a MemberPage struct is empty. -func (r MemberPage) IsEmpty() (bool, error) { - is, err := ExtractMembers(r) - return len(is) == 0, err -} - -// ExtractMembers accepts a Page struct, specifically a MemberPage struct, -// and extracts the elements into a slice of Member structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractMembers(r pagination.Page) ([]Member, error) { - var s struct { - Members []Member `json:"members"` - } - err := (r.(MemberPage)).ExtractInto(&s) - return s.Members, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a member. -func (r commonResult) Extract() (*Member, error) { - var s struct { - Member *Member `json:"member"` - } - err := r.ExtractInto(&s) - return s.Member, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a Member. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a Member. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a Member. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to determine if the result succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go deleted file mode 100644 index e2248f81f45..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go +++ /dev/null @@ -1,16 +0,0 @@ -package members - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "lb" - resourcePath = "members" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go deleted file mode 100644 index b5c0f29f054..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Package monitors provides information and interaction with the Monitors -of the Load Balancer as a Service extension for the OpenStack Networking -Service. - -Example to List Monitors - - listOpts: monitors.ListOpts{ - Type: "HTTP", - } - - allPages, err := monitors.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allMonitors, err := monitors.ExtractMonitors(allPages) - if err != nil { - panic(err) - } - - for _, monitor := range allMonitors { - fmt.Printf("%+v\n", monitor) - } - -Example to Create a Monitor - - createOpts := monitors.CreateOpts{ - Type: "HTTP", - Delay: 20, - Timeout: 20, - MaxRetries: 5, - URLPath: "/check", - ExpectedCodes: "200-299", - } - - monitor, err := monitors.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Monitor - - monitorID := "681aed03-aadb-43ae-aead-b9016375650a" - - updateOpts := monitors.UpdateOpts{ - Timeout: 30, - } - - monitor, err := monitors.Update(networkClient, monitorID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Member - - monitorID := "681aed03-aadb-43ae-aead-b9016375650a" - err := monitors.Delete(networkClient, monitorID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package monitors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go deleted file mode 100644 index 9ed0c769c90..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go +++ /dev/null @@ -1,227 +0,0 @@ -package monitors - -import ( - "fmt" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the floating IP attributes you want to see returned. SortKey allows you to -// sort by a particular network attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - ID string `q:"id"` - TenantID string `q:"tenant_id"` - Type string `q:"type"` - Delay int `q:"delay"` - Timeout int `q:"timeout"` - MaxRetries int `q:"max_retries"` - HTTPMethod string `q:"http_method"` - URLPath string `q:"url_path"` - ExpectedCodes string `q:"expected_codes"` - AdminStateUp *bool `q:"admin_state_up"` - Status string `q:"status"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// List returns a Pager which allows you to iterate over a collection of -// monitors. It accepts a ListOpts struct, which allows you to filter and sort -// the returned collection for greater efficiency. -// -// Default policy settings return only those monitors that are owned by the -// tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} - } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { - return MonitorPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// MonitorType is the type for all the types of LB monitors. -type MonitorType string - -// Constants that represent approved monitoring types. -const ( - TypePING MonitorType = "PING" - TypeTCP MonitorType = "TCP" - TypeHTTP MonitorType = "HTTP" - TypeHTTPS MonitorType = "HTTPS" -) - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToLBMonitorCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new health monitor. -type CreateOpts struct { - // MonitorType is the type of probe, which is PING, TCP, HTTP, or HTTPS, - // that is sent by the load balancer to verify the member state. - Type MonitorType `json:"type" required:"true"` - - // Delay is the time, in seconds, between sending probes to members. - Delay int `json:"delay" required:"true"` - - // Timeout is the maximum number of seconds for a monitor to wait for a ping - // reply before it times out. The value must be less than the delay value. - Timeout int `json:"timeout" required:"true"` - - // MaxRetries is the number of permissible ping failures before changing the - // member's status to INACTIVE. Must be a number between 1 and 10. - MaxRetries int `json:"max_retries" required:"true"` - - // URLPath is the URI path that will be accessed if monitor type - // is HTTP or HTTPS. Required for HTTP(S) types. - URLPath string `json:"url_path,omitempty"` - - // HTTPMethod is the HTTP method used for requests by the monitor. If this - // attribute is not specified, it defaults to "GET". Required for HTTP(S) - // types. - HTTPMethod string `json:"http_method,omitempty"` - - // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor - // You can either specify a single status like "200", or a range like - // "200-202". Required for HTTP(S) types. - ExpectedCodes string `json:"expected_codes,omitempty"` - - // TenantID is only required if the caller has an admin role and wants - // to create a pool for another tenant. - TenantID string `json:"tenant_id,omitempty"` - - // AdminStateUp denotes whether the monitor is administratively up or down. - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToLBMonitorCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToLBMonitorCreateMap() (map[string]interface{}, error) { - if opts.Type == TypeHTTP || opts.Type == TypeHTTPS { - if opts.URLPath == "" { - err := gophercloud.ErrMissingInput{} - err.Argument = "monitors.CreateOpts.URLPath" - return nil, err - } - if opts.ExpectedCodes == "" { - err := gophercloud.ErrMissingInput{} - err.Argument = "monitors.CreateOpts.ExpectedCodes" - return nil, err - } - } - if opts.Delay < opts.Timeout { - err := gophercloud.ErrInvalidInput{} - err.Argument = "monitors.CreateOpts.Delay/monitors.CreateOpts.Timeout" - err.Info = "Delay must be greater than or equal to timeout" - return nil, err - } - return gophercloud.BuildRequestBody(opts, "health_monitor") -} - -// Create is an operation which provisions a new health monitor. There are -// different types of monitor you can provision: PING, TCP or HTTP(S). Below -// are examples of how to create each one. -// -// Here is an example config struct to use when creating a PING or TCP monitor: -// -// CreateOpts{Type: TypePING, Delay: 20, Timeout: 10, MaxRetries: 3} -// CreateOpts{Type: TypeTCP, Delay: 20, Timeout: 10, MaxRetries: 3} -// -// Here is an example config struct to use when creating a HTTP(S) monitor: -// -// CreateOpts{Type: TypeHTTP, Delay: 20, Timeout: 10, MaxRetries: 3, -// HttpMethod: "HEAD", ExpectedCodes: "200"} -// -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToLBMonitorCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular health monitor based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToLBMonitorUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contains all the values needed to update an existing monitor. -// Attributes not listed here but appear in CreateOpts are immutable and cannot -// be updated. -type UpdateOpts struct { - // Delay is the time, in seconds, between sending probes to members. - Delay int `json:"delay,omitempty"` - - // Timeout is the maximum number of seconds for a monitor to wait for a ping - // reply before it times out. The value must be less than the delay value. - Timeout int `json:"timeout,omitempty"` - - // MaxRetries is the number of permissible ping failures before changing the - // member's status to INACTIVE. Must be a number between 1 and 10. - MaxRetries int `json:"max_retries,omitempty"` - - // URLPath is the URI path that will be accessed if monitor type - // is HTTP or HTTPS. - URLPath string `json:"url_path,omitempty"` - - // HTTPMethod is the HTTP method used for requests by the monitor. If this - // attribute is not specified, it defaults to "GET". - HTTPMethod string `json:"http_method,omitempty"` - - // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor - // You can either specify a single status like "200", or a range like - // "200-202". - ExpectedCodes string `json:"expected_codes,omitempty"` - - // AdminStateUp denotes whether the monitor is administratively up or down. - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToLBMonitorUpdateMap builds a request body from UpdateOpts. -func (opts UpdateOpts) ToLBMonitorUpdateMap() (map[string]interface{}, error) { - if opts.Delay > 0 && opts.Timeout > 0 && opts.Delay < opts.Timeout { - err := gophercloud.ErrInvalidInput{} - err.Argument = "monitors.CreateOpts.Delay/monitors.CreateOpts.Timeout" - err.Value = fmt.Sprintf("%d/%d", opts.Delay, opts.Timeout) - err.Info = "Delay must be greater than or equal to timeout" - return nil, err - } - return gophercloud.BuildRequestBody(opts, "health_monitor") -} - -// Update is an operation which modifies the attributes of the specified -// monitor. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToLBMonitorUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 202}, - }) - return -} - -// Delete will permanently delete a particular monitor based on its unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go deleted file mode 100644 index cc99f7cced7..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go +++ /dev/null @@ -1,141 +0,0 @@ -package monitors - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Monitor represents a load balancer health monitor. A health monitor is used -// to determine whether or not back-end members of the VIP's pool are usable -// for processing a request. A pool can have several health monitors associated -// with it. There are different types of health monitors supported: -// -// PING: used to ping the members using ICMP. -// TCP: used to connect to the members using TCP. -// HTTP: used to send an HTTP request to the member. -// HTTPS: used to send a secure HTTP request to the member. -// -// When a pool has several monitors associated with it, each member of the pool -// is monitored by all these monitors. If any monitor declares the member as -// unhealthy, then the member status is changed to INACTIVE and the member -// won't participate in its pool's load balancing. In other words, ALL monitors -// must declare the member to be healthy for it to stay ACTIVE. -type Monitor struct { - // ID is the unique ID for the Monitor. - ID string - - // Name is the monitor name. Does not have to be unique. - Name string - - // TenantID is the owner of the Monitor. - TenantID string `json:"tenant_id"` - - // Type is the type of probe sent by the load balancer to verify the member - // state, which is PING, TCP, HTTP, or HTTPS. - Type string - - // Delay is the time, in seconds, between sending probes to members. - Delay int - - // Timeout is the maximum number of seconds for a monitor to wait for a - // connection to be established before it times out. This value must be less - // than the delay value. - Timeout int - - // MaxRetries is the number of allowed connection failures before changing the - // status of the member to INACTIVE. A valid value is from 1 to 10. - MaxRetries int `json:"max_retries"` - - // HTTPMethod is the HTTP method that the monitor uses for requests. - HTTPMethod string `json:"http_method"` - - // URLPath is the HTTP path of the request sent by the monitor to test the - // health of a member. Must be a string beginning with a forward slash (/). - URLPath string `json:"url_path"` - - // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor. - ExpectedCodes string `json:"expected_codes"` - - // AdminStateUp is the administrative state of the health monitor, which is up - // (true) or down (false). - AdminStateUp bool `json:"admin_state_up"` - - // Status is the status of the health monitor. Indicates whether the health - // monitor is operational. - Status string -} - -// MonitorPage is the page returned by a pager when traversing over a -// collection of health monitors. -type MonitorPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of monitors has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r MonitorPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"health_monitors_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a PoolPage struct is empty. -func (r MonitorPage) IsEmpty() (bool, error) { - is, err := ExtractMonitors(r) - return len(is) == 0, err -} - -// ExtractMonitors accepts a Page struct, specifically a MonitorPage struct, -// and extracts the elements into a slice of Monitor structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractMonitors(r pagination.Page) ([]Monitor, error) { - var s struct { - Monitors []Monitor `json:"health_monitors"` - } - err := (r.(MonitorPage)).ExtractInto(&s) - return s.Monitors, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a monitor. -func (r commonResult) Extract() (*Monitor, error) { - var s struct { - Monitor *Monitor `json:"health_monitor"` - } - err := r.ExtractInto(&s) - return s.Monitor, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a Monitor. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a Monitor. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a Monitor. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its Extract -// method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go deleted file mode 100644 index e9d90fcc569..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go +++ /dev/null @@ -1,16 +0,0 @@ -package monitors - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "lb" - resourcePath = "health_monitors" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go deleted file mode 100644 index 25c4204dc93..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Package pools provides information and interaction with the Pools of the -Load Balancing as a Service extension for the OpenStack Networking service. - -Example to List Pools - - listOpts := pools.ListOpts{ - SubnetID: "d9bd223b-f1a9-4f98-953b-df977b0f902d", - } - - allPages, err := pools.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allPools, err := pools.ExtractPools(allPages) - if err != nil { - panic(err) - } - - for _, pool := range allPools { - fmt.Printf("%+v\n", pool) - } - -Example to Create a Pool - - createOpts := pools.CreateOpts{ - LBMethod: pools.LBMethodRoundRobin, - Protocol: "HTTP", - Name: "Example pool", - SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", - Provider: "haproxy", - } - - pool, err := pools.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Pool - - poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" - - updateOpts := pools.UpdateOpts{ - LBMethod: pools.LBMethodLeastConnections, - } - - pool, err := pools.Update(networkClient, poolID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Pool - - poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" - err := pools.Delete(networkClient, poolID).ExtractErr() - if err != nil { - panic(err) - } - -Example to Associate a Monitor to a Pool - - poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" - monitorID := "8bbfbe1c-6faa-4d97-abdb-0df6c90df70b" - - pool, err := pools.AssociateMonitor(networkClient, poolID, monitorID).Extract() - if err != nil { - panic(err) - } - -Example to Disassociate a Monitor from a Pool - - poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" - monitorID := "8bbfbe1c-6faa-4d97-abdb-0df6c90df70b" - - pool, err := pools.DisassociateMonitor(networkClient, poolID, monitorID).Extract() - if err != nil { - panic(err) - } -*/ -package pools diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go deleted file mode 100644 index b3593548d37..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go +++ /dev/null @@ -1,175 +0,0 @@ -package pools - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the floating IP attributes you want to see returned. SortKey allows you to -// sort by a particular network attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - Status string `q:"status"` - LBMethod string `q:"lb_method"` - Protocol string `q:"protocol"` - SubnetID string `q:"subnet_id"` - TenantID string `q:"tenant_id"` - AdminStateUp *bool `q:"admin_state_up"` - Name string `q:"name"` - ID string `q:"id"` - VIPID string `q:"vip_id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// List returns a Pager which allows you to iterate over a collection of -// pools. It accepts a ListOpts struct, which allows you to filter and sort -// the returned collection for greater efficiency. -// -// Default policy settings return only those pools that are owned by the -// tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} - } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { - return PoolPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// LBMethod is a type used for possible load balancing methods. -type LBMethod string - -// LBProtocol is a type used for possible load balancing protocols. -type LBProtocol string - -// Supported attributes for create/update operations. -const ( - LBMethodRoundRobin LBMethod = "ROUND_ROBIN" - LBMethodLeastConnections LBMethod = "LEAST_CONNECTIONS" - - ProtocolTCP LBProtocol = "TCP" - ProtocolHTTP LBProtocol = "HTTP" - ProtocolHTTPS LBProtocol = "HTTPS" -) - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToLBPoolCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new pool. -type CreateOpts struct { - // Name of the pool. - Name string `json:"name" required:"true"` - - // Protocol used by the pool members, you can use either - // ProtocolTCP, ProtocolHTTP, or ProtocolHTTPS. - Protocol LBProtocol `json:"protocol" required:"true"` - - // TenantID is only required if the caller has an admin role and wants - // to create a pool for another tenant. - TenantID string `json:"tenant_id,omitempty"` - - // SubnetID is the network on which the members of the pool will be located. - // Only members that are on this network can be added to the pool. - SubnetID string `json:"subnet_id,omitempty"` - - // LBMethod is the algorithm used to distribute load between the members of - // the pool. The current specification supports LBMethodRoundRobin and - // LBMethodLeastConnections as valid values for this attribute. - LBMethod LBMethod `json:"lb_method" required:"true"` - - // Provider of the pool. - Provider string `json:"provider,omitempty"` -} - -// ToLBPoolCreateMap builds a request body based on CreateOpts. -func (opts CreateOpts) ToLBPoolCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "pool") -} - -// Create accepts a CreateOptsBuilder and uses the values to create a new -// load balancer pool. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToLBPoolCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular pool based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters ot the -// Update request. -type UpdateOptsBuilder interface { - ToLBPoolUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contains the values used when updating a pool. -type UpdateOpts struct { - // Name of the pool. - Name string `json:"name,omitempty"` - - // LBMethod is the algorithm used to distribute load between the members of - // the pool. The current specification supports LBMethodRoundRobin and - // LBMethodLeastConnections as valid values for this attribute. - LBMethod LBMethod `json:"lb_method,omitempty"` -} - -// ToLBPoolUpdateMap builds a request body based on UpdateOpts. -func (opts UpdateOpts) ToLBPoolUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "pool") -} - -// Update allows pools to be updated. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToLBPoolUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Delete will permanently delete a particular pool based on its unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} - -// AssociateMonitor will associate a health monitor with a particular pool. -// Once associated, the health monitor will start monitoring the members of the -// pool and will deactivate these members if they are deemed unhealthy. A -// member can be deactivated (status set to INACTIVE) if any of health monitors -// finds it unhealthy. -func AssociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) (r AssociateResult) { - b := map[string]interface{}{"health_monitor": map[string]string{"id": monitorID}} - _, r.Err = c.Post(associateURL(c, poolID), b, &r.Body, nil) - return -} - -// DisassociateMonitor will disassociate a health monitor with a particular -// pool. When dissociation is successful, the health monitor will no longer -// check for the health of the members of the pool. -func DisassociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) (r AssociateResult) { - _, r.Err = c.Delete(disassociateURL(c, poolID, monitorID), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go deleted file mode 100644 index c2bae82d56e..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go +++ /dev/null @@ -1,137 +0,0 @@ -package pools - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Pool represents a logical set of devices, such as web servers, that you -// group together to receive and process traffic. The load balancing function -// chooses a member of the pool according to the configured load balancing -// method to handle the new requests or connections received on the VIP address. -// There is only one pool per virtual IP. -type Pool struct { - // Status of the pool. Indicates whether the pool is operational. - Status string - - // LBMethod is the load-balancer algorithm, which is round-robin, - // least-connections, and so on. This value, which must be supported, is - // dependent on the provider. - LBMethod string `json:"lb_method"` - - // Protocol of the pool, which is TCP, HTTP, or HTTPS. - Protocol string - - // Description for the pool. - Description string - - // MonitorIDs are the IDs of associated monitors which check the health of - // the pool members. - MonitorIDs []string `json:"health_monitors"` - - // SubnetID is the network on which the members of the pool will be located. - // Only members that are on this network can be added to the pool. - SubnetID string `json:"subnet_id"` - - // TenantID is the owner of the pool. - TenantID string `json:"tenant_id"` - - // AdminStateUp is the administrative state of the pool, which is up - // (true) or down (false). - AdminStateUp bool `json:"admin_state_up"` - - // Name of the pool. - Name string - - // MemberIDs is the list of member IDs that belong to the pool. - MemberIDs []string `json:"members"` - - // ID is the unique ID for the pool. - ID string - - // VIPID is the ID of the virtual IP associated with this pool. - VIPID string `json:"vip_id"` - - // The provider. - Provider string -} - -// PoolPage is the page returned by a pager when traversing over a -// collection of pools. -type PoolPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of pools has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r PoolPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"pools_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a PoolPage struct is empty. -func (r PoolPage) IsEmpty() (bool, error) { - is, err := ExtractPools(r) - return len(is) == 0, err -} - -// ExtractPools accepts a Page struct, specifically a PoolPage struct, -// and extracts the elements into a slice of Router structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractPools(r pagination.Page) ([]Pool, error) { - var s struct { - Pools []Pool `json:"pools"` - } - err := (r.(PoolPage)).ExtractInto(&s) - return s.Pools, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a router. -func (r commonResult) Extract() (*Pool, error) { - var s struct { - Pool *Pool `json:"pool"` - } - err := r.ExtractInto(&s) - return s.Pool, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a Pool. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a Pool. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a Pool. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to interpret it as a Pool. -type DeleteResult struct { - gophercloud.ErrResult -} - -// AssociateResult represents the result of an association operation. Call its Extract -// method to interpret it as a Pool. -type AssociateResult struct { - commonResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go deleted file mode 100644 index fe3601bbec9..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go +++ /dev/null @@ -1,25 +0,0 @@ -package pools - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "lb" - resourcePath = "pools" - monitorPath = "health_monitors" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} - -func associateURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id, monitorPath) -} - -func disassociateURL(c *gophercloud.ServiceClient, poolID, monitorID string) string { - return c.ServiceURL(rootPath, resourcePath, poolID, monitorPath, monitorID) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go deleted file mode 100644 index 7fd861044b6..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Package vips provides information and interaction with the Virtual IPs of the -Load Balancing as a Service extension for the OpenStack Networking service. - -Example to List Virtual IPs - - listOpts := vips.ListOpts{ - SubnetID: "d9bd223b-f1a9-4f98-953b-df977b0f902d", - } - - allPages, err := vips.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allVIPs, err := vips.ExtractVIPs(allPages) - if err != nil { - panic(err) - } - - for _, vip := range allVIPs { - fmt.Printf("%+v\n", vip) - } - -Example to Create a Virtual IP - - createOpts := vips.CreateOpts{ - Protocol: "HTTP", - Name: "NewVip", - AdminStateUp: gophercloud.Enabled, - SubnetID: "8032909d-47a1-4715-90af-5153ffe39861", - PoolID: "61b1f87a-7a21-4ad3-9dda-7f81d249944f", - ProtocolPort: 80, - Persistence: &vips.SessionPersistence{Type: "SOURCE_IP"}, - } - - vip, err := vips.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Virtual IP - - vipID := "93f1bad4-0423-40a8-afac-3fc541839912" - - i1000 := 1000 - updateOpts := vips.UpdateOpts{ - ConnLimit: &i1000, - Persistence: &vips.SessionPersistence{Type: "SOURCE_IP"}, - } - - vip, err := vips.Update(networkClient, vipID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Virtual IP - - vipID := "93f1bad4-0423-40a8-afac-3fc541839912" - err := vips.Delete(networkClient, vipID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package vips diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go deleted file mode 100644 index 53b81bfdb92..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go +++ /dev/null @@ -1,180 +0,0 @@ -package vips - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the floating IP attributes you want to see returned. SortKey allows you to -// sort by a particular network attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - ID string `q:"id"` - Name string `q:"name"` - AdminStateUp *bool `q:"admin_state_up"` - Status string `q:"status"` - TenantID string `q:"tenant_id"` - SubnetID string `q:"subnet_id"` - Address string `q:"address"` - PortID string `q:"port_id"` - Protocol string `q:"protocol"` - ProtocolPort int `q:"protocol_port"` - ConnectionLimit int `q:"connection_limit"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// List returns a Pager which allows you to iterate over a collection of -// Virtual IPs. It accepts a ListOpts struct, which allows you to filter and -// sort the returned collection for greater efficiency. -// -// Default policy settings return only those virtual IPs that are owned by the -// tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} - } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { - return VIPPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create Request. -type CreateOptsBuilder interface { - ToVIPCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new virtual IP. -type CreateOpts struct { - // Name is the human-readable name for the VIP. Does not have to be unique. - Name string `json:"name" required:"true"` - - // SubnetID is the network on which to allocate the VIP's address. A tenant - // can only create VIPs on networks authorized by policy (e.g. networks that - // belong to them or networks that are shared). - SubnetID string `json:"subnet_id" required:"true"` - - // Protocol - can either be TCP, HTTP or HTTPS. - Protocol string `json:"protocol" required:"true"` - - // ProtocolPort is the port on which to listen for client traffic. - ProtocolPort int `json:"protocol_port" required:"true"` - - // PoolID is the ID of the pool with which the VIP is associated. - PoolID string `json:"pool_id" required:"true"` - - // TenantID is only required if the caller has an admin role and wants - // to create a pool for another tenant. - TenantID string `json:"tenant_id,omitempty"` - - // Address is the IP address of the VIP. - Address string `json:"address,omitempty"` - - // Description is the human-readable description for the VIP. - Description string `json:"description,omitempty"` - - // Persistence is the the of session persistence to use. - // Omit this field to prevent session persistence. - Persistence *SessionPersistence `json:"session_persistence,omitempty"` - - // ConnLimit is the maximum number of connections allowed for the VIP. - ConnLimit *int `json:"connection_limit,omitempty"` - - // AdminStateUp is the administrative state of the VIP. A valid value is - // true (UP) or false (DOWN). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToVIPCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToVIPCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "vip") -} - -// Create is an operation which provisions a new virtual IP based on the -// configuration defined in the CreateOpts struct. Once the request is -// validated and progress has started on the provisioning process, a -// CreateResult will be returned. -// -// Please note that the PoolID should refer to a pool that is not already -// associated with another vip. If the pool is already used by another vip, -// then the operation will fail with a 409 Conflict error will be returned. -// -// Users with an admin role can create VIPs on behalf of other tenants by -// specifying a TenantID attribute different than their own. -func Create(c *gophercloud.ServiceClient, opts CreateOpts) (r CreateResult) { - b, err := opts.ToVIPCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular virtual IP based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToVIPUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contains all the values needed to update an existing virtual IP. -// Attributes not listed here but appear in CreateOpts are immutable and cannot -// be updated. -type UpdateOpts struct { - // Name is the human-readable name for the VIP. Does not have to be unique. - Name *string `json:"name,omitempty"` - - // PoolID is the ID of the pool with which the VIP is associated. - PoolID *string `json:"pool_id,omitempty"` - - // Description is the human-readable description for the VIP. - Description *string `json:"description,omitempty"` - - // Persistence is the the of session persistence to use. - // Omit this field to prevent session persistence. - Persistence *SessionPersistence `json:"session_persistence,omitempty"` - - // ConnLimit is the maximum number of connections allowed for the VIP. - ConnLimit *int `json:"connection_limit,omitempty"` - - // AdminStateUp is the administrative state of the VIP. A valid value is - // true (UP) or false (DOWN). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToVIPUpdateMap builds a request body based on UpdateOpts. -func (opts UpdateOpts) ToVIPUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "vip") -} - -// Update is an operation which modifies the attributes of the specified VIP. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToVIPUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 202}, - }) - return -} - -// Delete will permanently delete a particular virtual IP based on its unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go deleted file mode 100644 index cb0994a7b25..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go +++ /dev/null @@ -1,156 +0,0 @@ -package vips - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// SessionPersistence represents the session persistence feature of the load -// balancing service. It attempts to force connections or requests in the same -// session to be processed by the same member as long as it is ative. Three -// types of persistence are supported: -// -// SOURCE_IP: With this mode, all connections originating from the same source -// IP address, will be handled by the same member of the pool. -// HTTP_COOKIE: With this persistence mode, the load balancing function will -// create a cookie on the first request from a client. Subsequent -// requests containing the same cookie value will be handled by -// the same member of the pool. -// APP_COOKIE: With this persistence mode, the load balancing function will -// rely on a cookie established by the backend application. All -// requests carrying the same cookie value will be handled by the -// same member of the pool. -type SessionPersistence struct { - // Type is the type of persistence mode. - Type string `json:"type"` - - // CookieName is the name of cookie if persistence mode is set appropriately. - CookieName string `json:"cookie_name,omitempty"` -} - -// VirtualIP is the primary load balancing configuration object that specifies -// the virtual IP address and port on which client traffic is received, as well -// as other details such as the load balancing method to be use, protocol, etc. -// This entity is sometimes known in LB products under the name of a "virtual -// server", a "vserver" or a "listener". -type VirtualIP struct { - // ID is the unique ID for the VIP. - ID string `json:"id"` - - // TenantID is the owner of the VIP. - TenantID string `json:"tenant_id"` - - // Name is the human-readable name for the VIP. Does not have to be unique. - Name string `json:"name"` - - // Description is the human-readable description for the VIP. - Description string `json:"description"` - - // SubnetID is the ID of the subnet on which to allocate the VIP address. - SubnetID string `json:"subnet_id"` - - // Address is the IP address of the VIP. - Address string `json:"address"` - - // Protocol of the VIP address. A valid value is TCP, HTTP, or HTTPS. - Protocol string `json:"protocol"` - - // ProtocolPort is the port on which to listen to client traffic that is - // associated with the VIP address. A valid value is from 0 to 65535. - ProtocolPort int `json:"protocol_port"` - - // PoolID is the ID of the pool with which the VIP is associated. - PoolID string `json:"pool_id"` - - // PortID is the ID of the port which belongs to the load balancer. - PortID string `json:"port_id"` - - // Persistence indicates whether connections in the same session will be - // processed by the same pool member or not. - Persistence SessionPersistence `json:"session_persistence"` - - // ConnLimit is the maximum number of connections allowed for the VIP. - // Default is -1, meaning no limit. - ConnLimit int `json:"connection_limit"` - - // AdminStateUp is the administrative state of the VIP. A valid value is - // true (UP) or false (DOWN). - AdminStateUp bool `json:"admin_state_up"` - - // Status is the status of the VIP. Indicates whether the VIP is operational. - Status string `json:"status"` -} - -// VIPPage is the page returned by a pager when traversing over a -// collection of virtual IPs. -type VIPPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of routers has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r VIPPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"vips_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a VIPPage struct is empty. -func (r VIPPage) IsEmpty() (bool, error) { - is, err := ExtractVIPs(r) - return len(is) == 0, err -} - -// ExtractVIPs accepts a Page struct, specifically a VIPPage struct, -// and extracts the elements into a slice of VirtualIP structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractVIPs(r pagination.Page) ([]VirtualIP, error) { - var s struct { - VIPs []VirtualIP `json:"vips"` - } - err := (r.(VIPPage)).ExtractInto(&s) - return s.VIPs, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a VirtualIP. -func (r commonResult) Extract() (*VirtualIP, error) { - var s struct { - VirtualIP *VirtualIP `json:"vip" json:"vip"` - } - err := r.ExtractInto(&s) - return s.VirtualIP, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a VirtualIP -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a VirtualIP -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a VirtualIP -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go deleted file mode 100644 index 584a1cf680c..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go +++ /dev/null @@ -1,16 +0,0 @@ -package vips - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "lb" - resourcePath = "vips" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/doc.go deleted file mode 100644 index 108cdb03d8b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/doc.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Package listeners provides information and interaction with Listeners of the -LBaaS v2 extension for the OpenStack Networking service. - -Example to List Listeners - - listOpts := listeners.ListOpts{ - LoadbalancerID : "ca430f80-1737-4712-8dc6-3f640d55594b", - } - - allPages, err := listeners.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allListeners, err := listeners.ExtractListeners(allPages) - if err != nil { - panic(err) - } - - for _, listener := range allListeners { - fmt.Printf("%+v\n", listener) - } - -Example to Create a Listener - - createOpts := listeners.CreateOpts{ - Protocol: "TCP", - Name: "db", - LoadbalancerID: "79e05663-7f03-45d2-a092-8b94062f22ab", - AdminStateUp: gophercloud.Enabled, - DefaultPoolID: "41efe233-7591-43c5-9cf7-923964759f9e", - ProtocolPort: 3306, - } - - listener, err := listeners.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Listener - - listenerID := "d67d56a6-4a86-4688-a282-f46444705c64" - - i1001 := 1001 - updateOpts := listeners.UpdateOpts{ - ConnLimit: &i1001, - } - - listener, err := listeners.Update(networkClient, listenerID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Listener - - listenerID := "d67d56a6-4a86-4688-a282-f46444705c64" - err := listeners.Delete(networkClient, listenerID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package listeners diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go deleted file mode 100644 index 625748fd256..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go +++ /dev/null @@ -1,194 +0,0 @@ -package listeners - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Type Protocol represents a listener protocol. -type Protocol string - -// Supported attributes for create/update operations. -const ( - ProtocolTCP Protocol = "TCP" - ProtocolHTTP Protocol = "HTTP" - ProtocolHTTPS Protocol = "HTTPS" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToListenerListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the floating IP attributes you want to see returned. SortKey allows you to -// sort by a particular listener attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - ID string `q:"id"` - Name string `q:"name"` - AdminStateUp *bool `q:"admin_state_up"` - TenantID string `q:"tenant_id"` - LoadbalancerID string `q:"loadbalancer_id"` - DefaultPoolID string `q:"default_pool_id"` - Protocol string `q:"protocol"` - ProtocolPort int `q:"protocol_port"` - ConnectionLimit int `q:"connection_limit"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// ToListenerListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToListenerListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List returns a Pager which allows you to iterate over a collection of -// listeners. It accepts a ListOpts struct, which allows you to filter and sort -// the returned collection for greater efficiency. -// -// Default policy settings return only those listeners that are owned by the -// tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := rootURL(c) - if opts != nil { - query, err := opts.ToListenerListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - return ListenerPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToListenerCreateMap() (map[string]interface{}, error) -} - -// CreateOpts represents options for creating a listener. -type CreateOpts struct { - // The load balancer on which to provision this listener. - LoadbalancerID string `json:"loadbalancer_id" required:"true"` - - // The protocol - can either be TCP, HTTP or HTTPS. - Protocol Protocol `json:"protocol" required:"true"` - - // The port on which to listen for client traffic. - ProtocolPort int `json:"protocol_port" required:"true"` - - // TenantID is only required if the caller has an admin role and wants - // to create a pool for another tenant. - TenantID string `json:"tenant_id,omitempty"` - - // Human-readable name for the Listener. Does not have to be unique. - Name string `json:"name,omitempty"` - - // The ID of the default pool with which the Listener is associated. - DefaultPoolID string `json:"default_pool_id,omitempty"` - - // Human-readable description for the Listener. - Description string `json:"description,omitempty"` - - // The maximum number of connections allowed for the Listener. - ConnLimit *int `json:"connection_limit,omitempty"` - - // A reference to a Barbican container of TLS secrets. - DefaultTlsContainerRef string `json:"default_tls_container_ref,omitempty"` - - // A list of references to TLS secrets. - SniContainerRefs []string `json:"sni_container_refs,omitempty"` - - // The administrative state of the Listener. A valid value is true (UP) - // or false (DOWN). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToListenerCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToListenerCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "listener") -} - -// Create is an operation which provisions a new Listeners based on the -// configuration defined in the CreateOpts struct. Once the request is -// validated and progress has started on the provisioning process, a -// CreateResult will be returned. -// -// Users with an admin role can create Listeners on behalf of other tenants by -// specifying a TenantID attribute different than their own. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToListenerCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular Listeners based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToListenerUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts represents options for updating a Listener. -type UpdateOpts struct { - // Human-readable name for the Listener. Does not have to be unique. - Name string `json:"name,omitempty"` - - // Human-readable description for the Listener. - Description string `json:"description,omitempty"` - - // The maximum number of connections allowed for the Listener. - ConnLimit *int `json:"connection_limit,omitempty"` - - // A reference to a Barbican container of TLS secrets. - DefaultTlsContainerRef string `json:"default_tls_container_ref,omitempty"` - - // A list of references to TLS secrets. - SniContainerRefs []string `json:"sni_container_refs,omitempty"` - - // The administrative state of the Listener. A valid value is true (UP) - // or false (DOWN). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToListenerUpdateMap builds a request body from UpdateOpts. -func (opts UpdateOpts) ToListenerUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "listener") -} - -// Update is an operation which modifies the attributes of the specified -// Listener. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { - b, err := opts.ToListenerUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 202}, - }) - return -} - -// Delete will permanently delete a particular Listeners based on its unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go deleted file mode 100644 index e0c134ed51b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go +++ /dev/null @@ -1,131 +0,0 @@ -package listeners - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" - "github.com/gophercloud/gophercloud/pagination" -) - -type LoadBalancerID struct { - ID string `json:"id"` -} - -// Listener is the primary load balancing configuration object that specifies -// the loadbalancer and port on which client traffic is received, as well -// as other details such as the load balancing method to be use, protocol, etc. -type Listener struct { - // The unique ID for the Listener. - ID string `json:"id"` - - // Owner of the Listener. - TenantID string `json:"tenant_id"` - - // Human-readable name for the Listener. Does not have to be unique. - Name string `json:"name"` - - // Human-readable description for the Listener. - Description string `json:"description"` - - // The protocol to loadbalance. A valid value is TCP, HTTP, or HTTPS. - Protocol string `json:"protocol"` - - // The port on which to listen to client traffic that is associated with the - // Loadbalancer. A valid value is from 0 to 65535. - ProtocolPort int `json:"protocol_port"` - - // The UUID of default pool. Must have compatible protocol with listener. - DefaultPoolID string `json:"default_pool_id"` - - // A list of load balancer IDs. - Loadbalancers []LoadBalancerID `json:"loadbalancers"` - - // The maximum number of connections allowed for the Loadbalancer. - // Default is -1, meaning no limit. - ConnLimit int `json:"connection_limit"` - - // The list of references to TLS secrets. - SniContainerRefs []string `json:"sni_container_refs"` - - // A reference to a Barbican container of TLS secrets. - DefaultTlsContainerRef string `json:"default_tls_container_ref"` - - // The administrative state of the Listener. A valid value is true (UP) or false (DOWN). - AdminStateUp bool `json:"admin_state_up"` - - // Pools are the pools which are part of this listener. - Pools []pools.Pool `json:"pools"` -} - -// ListenerPage is the page returned by a pager when traversing over a -// collection of listeners. -type ListenerPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of listeners has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r ListenerPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"listeners_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a ListenerPage struct is empty. -func (r ListenerPage) IsEmpty() (bool, error) { - is, err := ExtractListeners(r) - return len(is) == 0, err -} - -// ExtractListeners accepts a Page struct, specifically a ListenerPage struct, -// and extracts the elements into a slice of Listener structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractListeners(r pagination.Page) ([]Listener, error) { - var s struct { - Listeners []Listener `json:"listeners"` - } - err := (r.(ListenerPage)).ExtractInto(&s) - return s.Listeners, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a listener. -func (r commonResult) Extract() (*Listener, error) { - var s struct { - Listener *Listener `json:"listener"` - } - err := r.ExtractInto(&s) - return s.Listener, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a Listener. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a Listener. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a Listener. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go deleted file mode 100644 index 02fb1eb39ec..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go +++ /dev/null @@ -1,16 +0,0 @@ -package listeners - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "lbaas" - resourcePath = "listeners" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/doc.go deleted file mode 100644 index eea43391a8c..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/doc.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Package loadbalancers provides information and interaction with Load Balancers -of the LBaaS v2 extension for the OpenStack Networking service. - -Example to List Load Balancers - - listOpts := loadbalancers.ListOpts{ - Provider: "haproxy", - } - - allPages, err := loadbalancers.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allLoadbalancers, err := loadbalancers.ExtractLoadBalancers(allPages) - if err != nil { - panic(err) - } - - for _, lb := range allLoadbalancers { - fmt.Printf("%+v\n", lb) - } - -Example to Create a Load Balancer - - createOpts := loadbalancers.CreateOpts{ - Name: "db_lb", - AdminStateUp: gophercloud.Enabled, - VipSubnetID: "9cedb85d-0759-4898-8a4b-fa5a5ea10086", - VipAddress: "10.30.176.48", - Flavor: "medium", - Provider: "haproxy", - } - - lb, err := loadbalancers.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Load Balancer - - lbID := "d67d56a6-4a86-4688-a282-f46444705c64" - - i1001 := 1001 - updateOpts := loadbalancers.UpdateOpts{ - Name: "new-name", - } - - lb, err := loadbalancers.Update(networkClient, lbID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Load Balancers - - lbID := "d67d56a6-4a86-4688-a282-f46444705c64" - err := loadbalancers.Delete(networkClient, lbID).ExtractErr() - if err != nil { - panic(err) - } - -Example to Get the Status of a Load Balancer - - lbID := "d67d56a6-4a86-4688-a282-f46444705c64" - status, err := loadbalancers.GetStatuses(networkClient, LBID).Extract() - if err != nil { - panic(err) - } -*/ -package loadbalancers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go deleted file mode 100644 index 839776dd28b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go +++ /dev/null @@ -1,177 +0,0 @@ -package loadbalancers - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToLoadBalancerListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the Loadbalancer attributes you want to see returned. SortKey allows you to -// sort by a particular attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - Description string `q:"description"` - AdminStateUp *bool `q:"admin_state_up"` - TenantID string `q:"tenant_id"` - ProvisioningStatus string `q:"provisioning_status"` - VipAddress string `q:"vip_address"` - VipPortID string `q:"vip_port_id"` - VipSubnetID string `q:"vip_subnet_id"` - ID string `q:"id"` - OperatingStatus string `q:"operating_status"` - Name string `q:"name"` - Flavor string `q:"flavor"` - Provider string `q:"provider"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// ToLoadbalancerListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToLoadBalancerListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List returns a Pager which allows you to iterate over a collection of -// load balancers. It accepts a ListOpts struct, which allows you to filter -// and sort the returned collection for greater efficiency. -// -// Default policy settings return only those load balancers that are owned by -// the tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := rootURL(c) - if opts != nil { - query, err := opts.ToLoadBalancerListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - return LoadBalancerPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToLoadBalancerCreateMap() (map[string]interface{}, error) -} - -// CreateOpts is the common options struct used in this package's Create -// operation. -type CreateOpts struct { - // Human-readable name for the Loadbalancer. Does not have to be unique. - Name string `json:"name,omitempty"` - - // Human-readable description for the Loadbalancer. - Description string `json:"description,omitempty"` - - // The network on which to allocate the Loadbalancer's address. A tenant can - // only create Loadbalancers on networks authorized by policy (e.g. networks - // that belong to them or networks that are shared). - VipSubnetID string `json:"vip_subnet_id" required:"true"` - - // The UUID of the tenant who owns the Loadbalancer. Only administrative users - // can specify a tenant UUID other than their own. - TenantID string `json:"tenant_id,omitempty"` - - // The IP address of the Loadbalancer. - VipAddress string `json:"vip_address,omitempty"` - - // The administrative state of the Loadbalancer. A valid value is true (UP) - // or false (DOWN). - AdminStateUp *bool `json:"admin_state_up,omitempty"` - - // The UUID of a flavor. - Flavor string `json:"flavor,omitempty"` - - // The name of the provider. - Provider string `json:"provider,omitempty"` -} - -// ToLoadBalancerCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToLoadBalancerCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "loadbalancer") -} - -// Create is an operation which provisions a new loadbalancer based on the -// configuration defined in the CreateOpts struct. Once the request is -// validated and progress has started on the provisioning process, a -// CreateResult will be returned. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToLoadBalancerCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular Loadbalancer based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToLoadBalancerUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts is the common options struct used in this package's Update -// operation. -type UpdateOpts struct { - // Human-readable name for the Loadbalancer. Does not have to be unique. - Name string `json:"name,omitempty"` - - // Human-readable description for the Loadbalancer. - Description string `json:"description,omitempty"` - - // The administrative state of the Loadbalancer. A valid value is true (UP) - // or false (DOWN). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToLoadBalancerUpdateMap builds a request body from UpdateOpts. -func (opts UpdateOpts) ToLoadBalancerUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "loadbalancer") -} - -// Update is an operation which modifies the attributes of the specified -// LoadBalancer. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { - b, err := opts.ToLoadBalancerUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 202}, - }) - return -} - -// Delete will permanently delete a particular LoadBalancer based on its -// unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} - -// GetStatuses will return the status of a particular LoadBalancer. -func GetStatuses(c *gophercloud.ServiceClient, id string) (r GetStatusesResult) { - _, r.Err = c.Get(statusRootURL(c, id), &r.Body, nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go deleted file mode 100644 index 9f8f19d7c56..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go +++ /dev/null @@ -1,149 +0,0 @@ -package loadbalancers - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" - "github.com/gophercloud/gophercloud/pagination" -) - -// LoadBalancer is the primary load balancing configuration object that -// specifies the virtual IP address on which client traffic is received, as well -// as other details such as the load balancing method to be use, protocol, etc. -type LoadBalancer struct { - // Human-readable description for the Loadbalancer. - Description string `json:"description"` - - // The administrative state of the Loadbalancer. - // A valid value is true (UP) or false (DOWN). - AdminStateUp bool `json:"admin_state_up"` - - // Owner of the LoadBalancer. - TenantID string `json:"tenant_id"` - - // The provisioning status of the LoadBalancer. - // This value is ACTIVE, PENDING_CREATE or ERROR. - ProvisioningStatus string `json:"provisioning_status"` - - // The IP address of the Loadbalancer. - VipAddress string `json:"vip_address"` - - // The UUID of the port associated with the IP address. - VipPortID string `json:"vip_port_id"` - - // The UUID of the subnet on which to allocate the virtual IP for the - // Loadbalancer address. - VipSubnetID string `json:"vip_subnet_id"` - - // The unique ID for the LoadBalancer. - ID string `json:"id"` - - // The operating status of the LoadBalancer. This value is ONLINE or OFFLINE. - OperatingStatus string `json:"operating_status"` - - // Human-readable name for the LoadBalancer. Does not have to be unique. - Name string `json:"name"` - - // The UUID of a flavor if set. - Flavor string `json:"flavor"` - - // The name of the provider. - Provider string `json:"provider"` - - // Listeners are the listeners related to this Loadbalancer. - Listeners []listeners.Listener `json:"listeners"` -} - -// StatusTree represents the status of a loadbalancer. -type StatusTree struct { - Loadbalancer *LoadBalancer `json:"loadbalancer"` -} - -// LoadBalancerPage is the page returned by a pager when traversing over a -// collection of load balancers. -type LoadBalancerPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of load balancers has -// reached the end of a page and the pager seeks to traverse over a new one. -// In order to do this, it needs to construct the next page's URL. -func (r LoadBalancerPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"loadbalancers_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a LoadBalancerPage struct is empty. -func (p LoadBalancerPage) IsEmpty() (bool, error) { - is, err := ExtractLoadBalancers(p) - return len(is) == 0, err -} - -// ExtractLoadBalancers accepts a Page struct, specifically a LoadbalancerPage -// struct, and extracts the elements into a slice of LoadBalancer structs. In -// other words, a generic collection is mapped into a relevant slice. -func ExtractLoadBalancers(r pagination.Page) ([]LoadBalancer, error) { - var s struct { - LoadBalancers []LoadBalancer `json:"loadbalancers"` - } - err := (r.(LoadBalancerPage)).ExtractInto(&s) - return s.LoadBalancers, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a loadbalancer. -func (r commonResult) Extract() (*LoadBalancer, error) { - var s struct { - LoadBalancer *LoadBalancer `json:"loadbalancer"` - } - err := r.ExtractInto(&s) - return s.LoadBalancer, err -} - -// GetStatusesResult represents the result of a GetStatuses operation. -// Call its Extract method to interpret it as a StatusTree. -type GetStatusesResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts the status of -// a Loadbalancer. -func (r GetStatusesResult) Extract() (*StatusTree, error) { - var s struct { - Statuses *StatusTree `json:"statuses"` - } - err := r.ExtractInto(&s) - return s.Statuses, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a LoadBalancer. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a LoadBalancer. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a LoadBalancer. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go deleted file mode 100644 index 73cf5dc126a..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go +++ /dev/null @@ -1,21 +0,0 @@ -package loadbalancers - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "lbaas" - resourcePath = "loadbalancers" - statusPath = "statuses" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} - -func statusRootURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id, statusPath) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/doc.go deleted file mode 100644 index 6ed8c8fb5ff..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Package monitors provides information and interaction with Monitors -of the LBaaS v2 extension for the OpenStack Networking service. - -Example to List Monitors - - listOpts := monitors.ListOpts{ - PoolID: "c79a4468-d788-410c-bf79-9a8ef6354852", - } - - allPages, err := monitors.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allMonitors, err := monitors.ExtractMonitors(allPages) - if err != nil { - panic(err) - } - - for _, monitor := range allMonitors { - fmt.Printf("%+v\n", monitor) - } - -Example to Create a Monitor - - createOpts := monitors.CreateOpts{ - Type: "HTTP", - Name: "db", - PoolID: "84f1b61f-58c4-45bf-a8a9-2dafb9e5214d", - Delay: 20, - Timeout: 10, - MaxRetries: 5, - URLPath: "/check", - ExpectedCodes: "200-299", - } - - monitor, err := monitors.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Monitor - - monitorID := "d67d56a6-4a86-4688-a282-f46444705c64" - - updateOpts := monitors.UpdateOpts{ - Name: "NewHealthmonitorName", - Delay: 3, - Timeout: 20, - MaxRetries: 10, - URLPath: "/another_check", - ExpectedCodes: "301", - } - - monitor, err := monitors.Update(networkClient, monitorID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Monitor - - monitorID := "d67d56a6-4a86-4688-a282-f46444705c64" - err := monitors.Delete(networkClient, monitorID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package monitors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go deleted file mode 100644 index 6d9ab8ba79b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go +++ /dev/null @@ -1,252 +0,0 @@ -package monitors - -import ( - "fmt" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToMonitorListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the Monitor attributes you want to see returned. SortKey allows you to -// sort by a particular Monitor attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - ID string `q:"id"` - Name string `q:"name"` - TenantID string `q:"tenant_id"` - PoolID string `q:"pool_id"` - Type string `q:"type"` - Delay int `q:"delay"` - Timeout int `q:"timeout"` - MaxRetries int `q:"max_retries"` - HTTPMethod string `q:"http_method"` - URLPath string `q:"url_path"` - ExpectedCodes string `q:"expected_codes"` - AdminStateUp *bool `q:"admin_state_up"` - Status string `q:"status"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// ToMonitorListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToMonitorListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return "", err - } - return q.String(), nil -} - -// List returns a Pager which allows you to iterate over a collection of -// health monitors. It accepts a ListOpts struct, which allows you to filter and sort -// the returned collection for greater efficiency. -// -// Default policy settings return only those health monitors that are owned by the -// tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := rootURL(c) - if opts != nil { - query, err := opts.ToMonitorListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - return MonitorPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// Constants that represent approved monitoring types. -const ( - TypePING = "PING" - TypeTCP = "TCP" - TypeHTTP = "HTTP" - TypeHTTPS = "HTTPS" -) - -var ( - errDelayMustGETimeout = fmt.Errorf("Delay must be greater than or equal to timeout") -) - -// CreateOptsBuilder allows extensions to add additional parameters to the -// List request. -type CreateOptsBuilder interface { - ToMonitorCreateMap() (map[string]interface{}, error) -} - -// CreateOpts is the common options struct used in this package's Create -// operation. -type CreateOpts struct { - // The Pool to Monitor. - PoolID string `json:"pool_id" required:"true"` - - // The type of probe, which is PING, TCP, HTTP, or HTTPS, that is - // sent by the load balancer to verify the member state. - Type string `json:"type" required:"true"` - - // The time, in seconds, between sending probes to members. - Delay int `json:"delay" required:"true"` - - // Maximum number of seconds for a Monitor to wait for a ping reply - // before it times out. The value must be less than the delay value. - Timeout int `json:"timeout" required:"true"` - - // Number of permissible ping failures before changing the member's - // status to INACTIVE. Must be a number between 1 and 10. - MaxRetries int `json:"max_retries" required:"true"` - - // URI path that will be accessed if Monitor type is HTTP or HTTPS. - // Required for HTTP(S) types. - URLPath string `json:"url_path,omitempty"` - - // The HTTP method used for requests by the Monitor. If this attribute - // is not specified, it defaults to "GET". Required for HTTP(S) types. - HTTPMethod string `json:"http_method,omitempty"` - - // Expected HTTP codes for a passing HTTP(S) Monitor. You can either specify - // a single status like "200", or a range like "200-202". Required for HTTP(S) - // types. - ExpectedCodes string `json:"expected_codes,omitempty"` - - // The UUID of the tenant who owns the Monitor. Only administrative users - // can specify a tenant UUID other than their own. - TenantID string `json:"tenant_id,omitempty"` - - // The Name of the Monitor. - Name string `json:"name,omitempty"` - - // The administrative state of the Monitor. A valid value is true (UP) - // or false (DOWN). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToMonitorCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToMonitorCreateMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "healthmonitor") - if err != nil { - return nil, err - } - - switch opts.Type { - case TypeHTTP, TypeHTTPS: - switch opts.URLPath { - case "": - return nil, fmt.Errorf("URLPath must be provided for HTTP and HTTPS") - } - switch opts.ExpectedCodes { - case "": - return nil, fmt.Errorf("ExpectedCodes must be provided for HTTP and HTTPS") - } - } - - return b, nil -} - -/* - Create is an operation which provisions a new Health Monitor. There are - different types of Monitor you can provision: PING, TCP or HTTP(S). Below - are examples of how to create each one. - - Here is an example config struct to use when creating a PING or TCP Monitor: - - CreateOpts{Type: TypePING, Delay: 20, Timeout: 10, MaxRetries: 3} - CreateOpts{Type: TypeTCP, Delay: 20, Timeout: 10, MaxRetries: 3} - - Here is an example config struct to use when creating a HTTP(S) Monitor: - - CreateOpts{Type: TypeHTTP, Delay: 20, Timeout: 10, MaxRetries: 3, - HttpMethod: "HEAD", ExpectedCodes: "200", PoolID: "2c946bfc-1804-43ab-a2ff-58f6a762b505"} -*/ -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToMonitorCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular Health Monitor based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToMonitorUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts is the common options struct used in this package's Update -// operation. -type UpdateOpts struct { - // The time, in seconds, between sending probes to members. - Delay int `json:"delay,omitempty"` - - // Maximum number of seconds for a Monitor to wait for a ping reply - // before it times out. The value must be less than the delay value. - Timeout int `json:"timeout,omitempty"` - - // Number of permissible ping failures before changing the member's - // status to INACTIVE. Must be a number between 1 and 10. - MaxRetries int `json:"max_retries,omitempty"` - - // URI path that will be accessed if Monitor type is HTTP or HTTPS. - // Required for HTTP(S) types. - URLPath string `json:"url_path,omitempty"` - - // The HTTP method used for requests by the Monitor. If this attribute - // is not specified, it defaults to "GET". Required for HTTP(S) types. - HTTPMethod string `json:"http_method,omitempty"` - - // Expected HTTP codes for a passing HTTP(S) Monitor. You can either specify - // a single status like "200", or a range like "200-202". Required for HTTP(S) - // types. - ExpectedCodes string `json:"expected_codes,omitempty"` - - // The Name of the Monitor. - Name string `json:"name,omitempty"` - - // The administrative state of the Monitor. A valid value is true (UP) - // or false (DOWN). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToMonitorUpdateMap builds a request body from UpdateOpts. -func (opts UpdateOpts) ToMonitorUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "healthmonitor") -} - -// Update is an operation which modifies the attributes of the specified -// Monitor. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToMonitorUpdateMap() - if err != nil { - r.Err = err - return - } - - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 202}, - }) - return -} - -// Delete will permanently delete a particular Monitor based on its unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go deleted file mode 100644 index ea832cc5d07..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go +++ /dev/null @@ -1,149 +0,0 @@ -package monitors - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -type PoolID struct { - ID string `json:"id"` -} - -// Monitor represents a load balancer health monitor. A health monitor is used -// to determine whether or not back-end members of the VIP's pool are usable -// for processing a request. A pool can have several health monitors associated -// with it. There are different types of health monitors supported: -// -// PING: used to ping the members using ICMP. -// TCP: used to connect to the members using TCP. -// HTTP: used to send an HTTP request to the member. -// HTTPS: used to send a secure HTTP request to the member. -// -// When a pool has several monitors associated with it, each member of the pool -// is monitored by all these monitors. If any monitor declares the member as -// unhealthy, then the member status is changed to INACTIVE and the member -// won't participate in its pool's load balancing. In other words, ALL monitors -// must declare the member to be healthy for it to stay ACTIVE. -type Monitor struct { - // The unique ID for the Monitor. - ID string `json:"id"` - - // The Name of the Monitor. - Name string `json:"name"` - - // TenantID is the owner of the Monitor. - TenantID string `json:"tenant_id"` - - // The type of probe sent by the load balancer to verify the member state, - // which is PING, TCP, HTTP, or HTTPS. - Type string `json:"type"` - - // The time, in seconds, between sending probes to members. - Delay int `json:"delay"` - - // The maximum number of seconds for a monitor to wait for a connection to be - // established before it times out. This value must be less than the delay - // value. - Timeout int `json:"timeout"` - - // Number of allowed connection failures before changing the status of the - // member to INACTIVE. A valid value is from 1 to 10. - MaxRetries int `json:"max_retries"` - - // The HTTP method that the monitor uses for requests. - HTTPMethod string `json:"http_method"` - - // The HTTP path of the request sent by the monitor to test the health of a - // member. Must be a string beginning with a forward slash (/). - URLPath string `json:"url_path" ` - - // Expected HTTP codes for a passing HTTP(S) monitor. - ExpectedCodes string `json:"expected_codes"` - - // The administrative state of the health monitor, which is up (true) or - // down (false). - AdminStateUp bool `json:"admin_state_up"` - - // The status of the health monitor. Indicates whether the health monitor is - // operational. - Status string `json:"status"` - - // List of pools that are associated with the health monitor. - Pools []PoolID `json:"pools"` -} - -// MonitorPage is the page returned by a pager when traversing over a -// collection of health monitors. -type MonitorPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of monitors has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r MonitorPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"healthmonitors_links"` - } - - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a MonitorPage struct is empty. -func (r MonitorPage) IsEmpty() (bool, error) { - is, err := ExtractMonitors(r) - return len(is) == 0, err -} - -// ExtractMonitors accepts a Page struct, specifically a MonitorPage struct, -// and extracts the elements into a slice of Monitor structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractMonitors(r pagination.Page) ([]Monitor, error) { - var s struct { - Monitors []Monitor `json:"healthmonitors"` - } - err := (r.(MonitorPage)).ExtractInto(&s) - return s.Monitors, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a monitor. -func (r commonResult) Extract() (*Monitor, error) { - var s struct { - Monitor *Monitor `json:"healthmonitor"` - } - err := r.ExtractInto(&s) - return s.Monitor, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a Monitor. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a Monitor. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a Monitor. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to determine if the result succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go deleted file mode 100644 index a222e52a93d..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go +++ /dev/null @@ -1,16 +0,0 @@ -package monitors - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "lbaas" - resourcePath = "healthmonitors" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/doc.go deleted file mode 100644 index 2d57ed43938..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/doc.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Package pools provides information and interaction with Pools and -Members of the LBaaS v2 extension for the OpenStack Networking service. - -Example to List Pools - - listOpts := pools.ListOpts{ - LoadbalancerID: "c79a4468-d788-410c-bf79-9a8ef6354852", - } - - allPages, err := pools.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allPools, err := pools.ExtractMonitors(allPages) - if err != nil { - panic(err) - } - - for _, pools := range allPools { - fmt.Printf("%+v\n", pool) - } - -Example to Create a Pool - - createOpts := pools.CreateOpts{ - LBMethod: pools.LBMethodRoundRobin, - Protocol: "HTTP", - Name: "Example pool", - LoadbalancerID: "79e05663-7f03-45d2-a092-8b94062f22ab", - } - - pool, err := pools.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Pool - - poolID := "d67d56a6-4a86-4688-a282-f46444705c64" - - updateOpts := pools.UpdateOpts{ - Name: "new-name", - } - - pool, err := pools.Update(networkClient, poolID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Pool - - poolID := "d67d56a6-4a86-4688-a282-f46444705c64" - err := pools.Delete(networkClient, poolID).ExtractErr() - if err != nil { - panic(err) - } - -Example to List Pool Members - - poolID := "d67d56a6-4a86-4688-a282-f46444705c64" - - listOpts := pools.ListMemberOpts{ - ProtocolPort: 80, - } - - allPages, err := pools.ListMembers(networkClient, poolID, listOpts).AllPages() - if err != nil { - panic(err) - } - - allMembers, err := pools.ExtractMembers(allPages) - if err != nil { - panic(err) - } - - for _, member := allMembers { - fmt.Printf("%+v\n", member) - } - -Example to Create a Member - - poolID := "d67d56a6-4a86-4688-a282-f46444705c64" - - createOpts := pools.CreateMemberOpts{ - Name: "db", - SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", - Address: "10.0.2.11", - ProtocolPort: 80, - Weight: 10, - } - - member, err := pools.CreateMember(networkClient, poolID, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Member - - poolID := "d67d56a6-4a86-4688-a282-f46444705c64" - memberID := "64dba99f-8af8-4200-8882-e32a0660f23e" - - updateOpts := pools.UpdateMemberOpts{ - Name: "new-name", - Weight: 4, - } - - member, err := pools.UpdateMember(networkClient, poolID, memberID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Member - - poolID := "d67d56a6-4a86-4688-a282-f46444705c64" - memberID := "64dba99f-8af8-4200-8882-e32a0660f23e" - - err := pools.DeleteMember(networkClient, poolID, memberID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package pools diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go deleted file mode 100644 index 2173ee81711..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go +++ /dev/null @@ -1,347 +0,0 @@ -package pools - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToPoolListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the Pool attributes you want to see returned. SortKey allows you to -// sort by a particular Pool attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - LBMethod string `q:"lb_algorithm"` - Protocol string `q:"protocol"` - TenantID string `q:"tenant_id"` - AdminStateUp *bool `q:"admin_state_up"` - Name string `q:"name"` - ID string `q:"id"` - LoadbalancerID string `q:"loadbalancer_id"` - ListenerID string `q:"listener_id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// ToPoolListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToPoolListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List returns a Pager which allows you to iterate over a collection of -// pools. It accepts a ListOpts struct, which allows you to filter and sort -// the returned collection for greater efficiency. -// -// Default policy settings return only those pools that are owned by the -// tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := rootURL(c) - if opts != nil { - query, err := opts.ToPoolListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - return PoolPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -type LBMethod string -type Protocol string - -// Supported attributes for create/update operations. -const ( - LBMethodRoundRobin LBMethod = "ROUND_ROBIN" - LBMethodLeastConnections LBMethod = "LEAST_CONNECTIONS" - LBMethodSourceIp LBMethod = "SOURCE_IP" - - ProtocolTCP Protocol = "TCP" - ProtocolHTTP Protocol = "HTTP" - ProtocolHTTPS Protocol = "HTTPS" -) - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToPoolCreateMap() (map[string]interface{}, error) -} - -// CreateOpts is the common options struct used in this package's Create -// operation. -type CreateOpts struct { - // The algorithm used to distribute load between the members of the pool. The - // current specification supports LBMethodRoundRobin, LBMethodLeastConnections - // and LBMethodSourceIp as valid values for this attribute. - LBMethod LBMethod `json:"lb_algorithm" required:"true"` - - // The protocol used by the pool members, you can use either - // ProtocolTCP, ProtocolHTTP, or ProtocolHTTPS. - Protocol Protocol `json:"protocol" required:"true"` - - // The Loadbalancer on which the members of the pool will be associated with. - // Note: one of LoadbalancerID or ListenerID must be provided. - LoadbalancerID string `json:"loadbalancer_id,omitempty" xor:"ListenerID"` - - // The Listener on which the members of the pool will be associated with. - // Note: one of LoadbalancerID or ListenerID must be provided. - ListenerID string `json:"listener_id,omitempty" xor:"LoadbalancerID"` - - // The UUID of the tenant who owns the Pool. Only administrative users - // can specify a tenant UUID other than their own. - TenantID string `json:"tenant_id,omitempty"` - - // Name of the pool. - Name string `json:"name,omitempty"` - - // Human-readable description for the pool. - Description string `json:"description,omitempty"` - - // Persistence is the session persistence of the pool. - // Omit this field to prevent session persistence. - Persistence *SessionPersistence `json:"session_persistence,omitempty"` - - // The administrative state of the Pool. A valid value is true (UP) - // or false (DOWN). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToPoolCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToPoolCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "pool") -} - -// Create accepts a CreateOpts struct and uses the values to create a new -// load balancer pool. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToPoolCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular pool based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToPoolUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts is the common options struct used in this package's Update -// operation. -type UpdateOpts struct { - // Name of the pool. - Name string `json:"name,omitempty"` - - // Human-readable description for the pool. - Description string `json:"description,omitempty"` - - // The algorithm used to distribute load between the members of the pool. The - // current specification supports LBMethodRoundRobin, LBMethodLeastConnections - // and LBMethodSourceIp as valid values for this attribute. - LBMethod LBMethod `json:"lb_algorithm,omitempty"` - - // The administrative state of the Pool. A valid value is true (UP) - // or false (DOWN). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToPoolUpdateMap builds a request body from UpdateOpts. -func (opts UpdateOpts) ToPoolUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "pool") -} - -// Update allows pools to be updated. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToPoolUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Delete will permanently delete a particular pool based on its unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} - -// ListMemberOptsBuilder allows extensions to add additional parameters to the -// ListMembers request. -type ListMembersOptsBuilder interface { - ToMembersListQuery() (string, error) -} - -// ListMembersOpts allows the filtering and sorting of paginated collections -// through the API. Filtering is achieved by passing in struct field values -// that map to the Member attributes you want to see returned. SortKey allows -// you to sort by a particular Member attribute. SortDir sets the direction, -// and is either `asc' or `desc'. Marker and Limit are used for pagination. -type ListMembersOpts struct { - Name string `q:"name"` - Weight int `q:"weight"` - AdminStateUp *bool `q:"admin_state_up"` - TenantID string `q:"tenant_id"` - Address string `q:"address"` - ProtocolPort int `q:"protocol_port"` - ID string `q:"id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// ToMemberListQuery formats a ListOpts into a query string. -func (opts ListMembersOpts) ToMembersListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// ListMembers returns a Pager which allows you to iterate over a collection of -// members. It accepts a ListMembersOptsBuilder, which allows you to filter and -// sort the returned collection for greater efficiency. -// -// Default policy settings return only those members that are owned by the -// tenant who submits the request, unless an admin user submits the request. -func ListMembers(c *gophercloud.ServiceClient, poolID string, opts ListMembersOptsBuilder) pagination.Pager { - url := memberRootURL(c, poolID) - if opts != nil { - query, err := opts.ToMembersListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - return MemberPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateMemberOptsBuilder allows extensions to add additional parameters to the -// CreateMember request. -type CreateMemberOptsBuilder interface { - ToMemberCreateMap() (map[string]interface{}, error) -} - -// CreateMemberOpts is the common options struct used in this package's CreateMember -// operation. -type CreateMemberOpts struct { - // The IP address of the member to receive traffic from the load balancer. - Address string `json:"address" required:"true"` - - // The port on which to listen for client traffic. - ProtocolPort int `json:"protocol_port" required:"true"` - - // Name of the Member. - Name string `json:"name,omitempty"` - - // The UUID of the tenant who owns the Member. Only administrative users - // can specify a tenant UUID other than their own. - TenantID string `json:"tenant_id,omitempty"` - - // A positive integer value that indicates the relative portion of traffic - // that this member should receive from the pool. For example, a member with - // a weight of 10 receives five times as much traffic as a member with a - // weight of 2. - Weight int `json:"weight,omitempty"` - - // If you omit this parameter, LBaaS uses the vip_subnet_id parameter value - // for the subnet UUID. - SubnetID string `json:"subnet_id,omitempty"` - - // The administrative state of the Pool. A valid value is true (UP) - // or false (DOWN). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToMemberCreateMap builds a request body from CreateMemberOpts. -func (opts CreateMemberOpts) ToMemberCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "member") -} - -// CreateMember will create and associate a Member with a particular Pool. -func CreateMember(c *gophercloud.ServiceClient, poolID string, opts CreateMemberOpts) (r CreateMemberResult) { - b, err := opts.ToMemberCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(memberRootURL(c, poolID), b, &r.Body, nil) - return -} - -// GetMember retrieves a particular Pool Member based on its unique ID. -func GetMember(c *gophercloud.ServiceClient, poolID string, memberID string) (r GetMemberResult) { - _, r.Err = c.Get(memberResourceURL(c, poolID, memberID), &r.Body, nil) - return -} - -// UpdateMemberOptsBuilder allows extensions to add additional parameters to the -// List request. -type UpdateMemberOptsBuilder interface { - ToMemberUpdateMap() (map[string]interface{}, error) -} - -// UpdateMemberOpts is the common options struct used in this package's Update -// operation. -type UpdateMemberOpts struct { - // Name of the Member. - Name string `json:"name,omitempty"` - - // A positive integer value that indicates the relative portion of traffic - // that this member should receive from the pool. For example, a member with - // a weight of 10 receives five times as much traffic as a member with a - // weight of 2. - Weight int `json:"weight,omitempty"` - - // The administrative state of the Pool. A valid value is true (UP) - // or false (DOWN). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToMemberUpdateMap builds a request body from UpdateMemberOpts. -func (opts UpdateMemberOpts) ToMemberUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "member") -} - -// Update allows Member to be updated. -func UpdateMember(c *gophercloud.ServiceClient, poolID string, memberID string, opts UpdateMemberOptsBuilder) (r UpdateMemberResult) { - b, err := opts.ToMemberUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(memberResourceURL(c, poolID, memberID), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201, 202}, - }) - return -} - -// DisassociateMember will remove and disassociate a Member from a particular -// Pool. -func DeleteMember(c *gophercloud.ServiceClient, poolID string, memberID string) (r DeleteMemberResult) { - _, r.Err = c.Delete(memberResourceURL(c, poolID, memberID), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go deleted file mode 100644 index 56790fff990..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go +++ /dev/null @@ -1,273 +0,0 @@ -package pools - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" - "github.com/gophercloud/gophercloud/pagination" -) - -// SessionPersistence represents the session persistence feature of the load -// balancing service. It attempts to force connections or requests in the same -// session to be processed by the same member as long as it is ative. Three -// types of persistence are supported: -// -// SOURCE_IP: With this mode, all connections originating from the same source -// IP address, will be handled by the same Member of the Pool. -// HTTP_COOKIE: With this persistence mode, the load balancing function will -// create a cookie on the first request from a client. Subsequent -// requests containing the same cookie value will be handled by -// the same Member of the Pool. -// APP_COOKIE: With this persistence mode, the load balancing function will -// rely on a cookie established by the backend application. All -// requests carrying the same cookie value will be handled by the -// same Member of the Pool. -type SessionPersistence struct { - // The type of persistence mode. - Type string `json:"type"` - - // Name of cookie if persistence mode is set appropriately. - CookieName string `json:"cookie_name,omitempty"` -} - -// LoadBalancerID represents a load balancer. -type LoadBalancerID struct { - ID string `json:"id"` -} - -// ListenerID represents a listener. -type ListenerID struct { - ID string `json:"id"` -} - -// Pool represents a logical set of devices, such as web servers, that you -// group together to receive and process traffic. The load balancing function -// chooses a Member of the Pool according to the configured load balancing -// method to handle the new requests or connections received on the VIP address. -type Pool struct { - // The load-balancer algorithm, which is round-robin, least-connections, and - // so on. This value, which must be supported, is dependent on the provider. - // Round-robin must be supported. - LBMethod string `json:"lb_algorithm"` - - // The protocol of the Pool, which is TCP, HTTP, or HTTPS. - Protocol string `json:"protocol"` - - // Description for the Pool. - Description string `json:"description"` - - // A list of listeners objects IDs. - Listeners []ListenerID `json:"listeners"` //[]map[string]interface{} - - // A list of member objects IDs. - Members []Member `json:"members"` - - // The ID of associated health monitor. - MonitorID string `json:"healthmonitor_id"` - - // The network on which the members of the Pool will be located. Only members - // that are on this network can be added to the Pool. - SubnetID string `json:"subnet_id"` - - // Owner of the Pool. - TenantID string `json:"tenant_id"` - - // The administrative state of the Pool, which is up (true) or down (false). - AdminStateUp bool `json:"admin_state_up"` - - // Pool name. Does not have to be unique. - Name string `json:"name"` - - // The unique ID for the Pool. - ID string `json:"id"` - - // A list of load balancer objects IDs. - Loadbalancers []LoadBalancerID `json:"loadbalancers"` - - // Indicates whether connections in the same session will be processed by the - // same Pool member or not. - Persistence SessionPersistence `json:"session_persistence"` - - // The load balancer provider. - Provider string `json:"provider"` - - // The Monitor associated with this Pool. - Monitor monitors.Monitor `json:"healthmonitor"` -} - -// PoolPage is the page returned by a pager when traversing over a -// collection of pools. -type PoolPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of pools has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r PoolPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"pools_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a PoolPage struct is empty. -func (r PoolPage) IsEmpty() (bool, error) { - is, err := ExtractPools(r) - return len(is) == 0, err -} - -// ExtractPools accepts a Page struct, specifically a PoolPage struct, -// and extracts the elements into a slice of Pool structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractPools(r pagination.Page) ([]Pool, error) { - var s struct { - Pools []Pool `json:"pools"` - } - err := (r.(PoolPage)).ExtractInto(&s) - return s.Pools, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a pool. -func (r commonResult) Extract() (*Pool, error) { - var s struct { - Pool *Pool `json:"pool"` - } - err := r.ExtractInto(&s) - return s.Pool, err -} - -// CreateResult represents the result of a Create operation. Call its Extract -// method to interpret the result as a Pool. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a Get operation. Call its Extract -// method to interpret the result as a Pool. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an Update operation. Call its Extract -// method to interpret the result as a Pool. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a Delete operation. Call its -// ExtractErr method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// Member represents the application running on a backend server. -type Member struct { - // Name of the Member. - Name string `json:"name"` - - // Weight of Member. - Weight int `json:"weight"` - - // The administrative state of the member, which is up (true) or down (false). - AdminStateUp bool `json:"admin_state_up"` - - // Owner of the Member. - TenantID string `json:"tenant_id"` - - // Parameter value for the subnet UUID. - SubnetID string `json:"subnet_id"` - - // The Pool to which the Member belongs. - PoolID string `json:"pool_id"` - - // The IP address of the Member. - Address string `json:"address"` - - // The port on which the application is hosted. - ProtocolPort int `json:"protocol_port"` - - // The unique ID for the Member. - ID string `json:"id"` -} - -// MemberPage is the page returned by a pager when traversing over a -// collection of Members in a Pool. -type MemberPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of members has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r MemberPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"members_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a MemberPage struct is empty. -func (r MemberPage) IsEmpty() (bool, error) { - is, err := ExtractMembers(r) - return len(is) == 0, err -} - -// ExtractMembers accepts a Page struct, specifically a MemberPage struct, -// and extracts the elements into a slice of Members structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractMembers(r pagination.Page) ([]Member, error) { - var s struct { - Members []Member `json:"members"` - } - err := (r.(MemberPage)).ExtractInto(&s) - return s.Members, err -} - -type commonMemberResult struct { - gophercloud.Result -} - -// ExtractMember is a function that accepts a result and extracts a member. -func (r commonMemberResult) Extract() (*Member, error) { - var s struct { - Member *Member `json:"member"` - } - err := r.ExtractInto(&s) - return s.Member, err -} - -// CreateMemberResult represents the result of a CreateMember operation. -// Call its Extract method to interpret it as a Member. -type CreateMemberResult struct { - commonMemberResult -} - -// GetMemberResult represents the result of a GetMember operation. -// Call its Extract method to interpret it as a Member. -type GetMemberResult struct { - commonMemberResult -} - -// UpdateMemberResult represents the result of an UpdateMember operation. -// Call its Extract method to interpret it as a Member. -type UpdateMemberResult struct { - commonMemberResult -} - -// DeleteMemberResult represents the result of a DeleteMember operation. -// Call its ExtractErr method to determine if the request succeeded or failed. -type DeleteMemberResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go deleted file mode 100644 index bceca67707f..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go +++ /dev/null @@ -1,25 +0,0 @@ -package pools - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "lbaas" - resourcePath = "pools" - memberPath = "members" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} - -func memberRootURL(c *gophercloud.ServiceClient, poolId string) string { - return c.ServiceURL(rootPath, resourcePath, poolId, memberPath) -} - -func memberResourceURL(c *gophercloud.ServiceClient, poolID string, memeberID string) string { - return c.ServiceURL(rootPath, resourcePath, poolID, memberPath, memeberID) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/doc.go deleted file mode 100644 index ddc44175a79..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/doc.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Package provider gives access to the provider Neutron plugin, allowing -network extended attributes. The provider extended attributes for networks -enable administrative users to specify how network objects map to the -underlying networking infrastructure. These extended attributes also appear -when administrative users query networks. - -For more information about extended attributes, see the NetworkExtAttrs -struct. The actual semantics of these attributes depend on the technology -back end of the particular plug-in. See the plug-in documentation and the -OpenStack Cloud Administrator Guide to understand which values should be -specific for each of these attributes when OpenStack Networking is deployed -with a particular plug-in. The examples shown in this chapter refer to the -Open vSwitch plug-in. - -The default policy settings enable only users with administrative rights to -specify these parameters in requests and to see their values in responses. By -default, the provider network extension attributes are completely hidden from -regular tenants. As a rule of thumb, if these attributes are not visible in a -GET /networks/ operation, this implies the user submitting the -request is not authorized to view or manipulate provider network attributes. - -Example to List Networks with Provider Information - - type NetworkWithProvider { - networks.Network - provider.NetworkProviderExt - } - - var allNetworks []NetworkWithProvider - - allPages, err := networks.List(networkClient, nil).AllPages() - if err != nil { - panic(err) - } - - err = networks.ExtractNetworksInto(allPages, &allNetworks) - if err != nil { - panic(err) - } - - for _, network := range allNetworks { - fmt.Printf("%+v\n", network) - } - -Example to Create a Provider Network - - segments := []provider.Segment{ - provider.Segment{ - NetworkType: "vxlan", - PhysicalNetwork: "br-ex", - SegmentationID: 615, - }, - } - - iTrue := true - networkCreateOpts := networks.CreateOpts{ - Name: "provider-network", - AdminStateUp: &iTrue, - Shared: &iTrue, - } - - createOpts : provider.CreateOptsExt{ - CreateOptsBuilder: networkCreateOpts, - Segments: segments, - } - - network, err := networks.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } -*/ -package provider diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/requests.go deleted file mode 100644 index 32c27970a4c..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/requests.go +++ /dev/null @@ -1,28 +0,0 @@ -package provider - -import ( - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" -) - -// CreateOptsExt adds a Segments option to the base Network CreateOpts. -type CreateOptsExt struct { - networks.CreateOptsBuilder - Segments []Segment `json:"segments,omitempty"` -} - -// ToNetworkCreateMap adds segments to the base network creation options. -func (opts CreateOptsExt) ToNetworkCreateMap() (map[string]interface{}, error) { - base, err := opts.CreateOptsBuilder.ToNetworkCreateMap() - if err != nil { - return nil, err - } - - if opts.Segments == nil { - return base, nil - } - - providerMap := base["network"].(map[string]interface{}) - providerMap["segments"] = opts.Segments - - return base, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/results.go deleted file mode 100644 index 9babd2ab6ed..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/results.go +++ /dev/null @@ -1,62 +0,0 @@ -package provider - -import ( - "encoding/json" - "strconv" -) - -// NetworkProviderExt represents an extended form of a Network with additional -// fields. -type NetworkProviderExt struct { - // Specifies the nature of the physical network mapped to this network - // resource. Examples are flat, vlan, or gre. - NetworkType string `json:"provider:network_type"` - - // Identifies the physical network on top of which this network object is - // being implemented. The OpenStack Networking API does not expose any - // facility for retrieving the list of available physical networks. As an - // example, in the Open vSwitch plug-in this is a symbolic name which is - // then mapped to specific bridges on each compute host through the Open - // vSwitch plug-in configuration file. - PhysicalNetwork string `json:"provider:physical_network"` - - // Identifies an isolated segment on the physical network; the nature of the - // segment depends on the segmentation model defined by network_type. For - // instance, if network_type is vlan, then this is a vlan identifier; - // otherwise, if network_type is gre, then this will be a gre key. - SegmentationID string `json:"-"` - - // Segments is an array of Segment which defines multiple physical bindings - // to logical networks. - Segments []Segment `json:"segments"` -} - -// Segment defines a physical binding to a logical network. -type Segment struct { - PhysicalNetwork string `json:"provider:physical_network"` - NetworkType string `json:"provider:network_type"` - SegmentationID int `json:"provider:segmentation_id"` -} - -func (r *NetworkProviderExt) UnmarshalJSON(b []byte) error { - type tmp NetworkProviderExt - var networkProviderExt struct { - tmp - SegmentationID interface{} `json:"provider:segmentation_id"` - } - - if err := json.Unmarshal(b, &networkProviderExt); err != nil { - return err - } - - *r = NetworkProviderExt(networkProviderExt.tmp) - - switch t := networkProviderExt.SegmentationID.(type) { - case float64: - r.SegmentationID = strconv.FormatFloat(t, 'f', -1, 64) - case string: - r.SegmentationID = string(t) - } - - return nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/doc.go deleted file mode 100644 index 7d8bbcaacba..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/doc.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Package groups provides information and interaction with Security Groups -for the OpenStack Networking service. - -Example to List Security Groups - - listOpts := groups.ListOpts{ - TenantID: "966b3c7d36a24facaf20b7e458bf2192", - } - - allPages, err := groups.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allGroups, err := groups.ExtractGroups(allPages) - if err != nil { - panic(err) - } - - for _, group := range allGroups { - fmt.Printf("%+v\n", group) - } - -Example to Create a Security Group - - createOpts := groups.CreateOpts{ - Name: "group_name", - Description: "A Security Group", - } - - group, err := groups.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Security Group - - groupID := "37d94f8a-d136-465c-ae46-144f0d8ef141" - - updateOpts := groups.UpdateOpts{ - Name: "new_name", - } - - group, err := groups.Update(networkClient, groupID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Security Group - - groupID := "37d94f8a-d136-465c-ae46-144f0d8ef141" - err := groups.Delete(networkClient, groupID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package groups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go deleted file mode 100644 index 0a7ef79cf66..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go +++ /dev/null @@ -1,151 +0,0 @@ -package groups - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the group attributes you want to see returned. SortKey allows you to -// sort by a particular network attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - ID string `q:"id"` - Name string `q:"name"` - TenantID string `q:"tenant_id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// List returns a Pager which allows you to iterate over a collection of -// security groups. It accepts a ListOpts struct, which allows you to filter -// and sort the returned collection for greater efficiency. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} - } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { - return SecGroupPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToSecGroupCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new security group. -type CreateOpts struct { - // Human-readable name for the Security Group. Does not have to be unique. - Name string `json:"name" required:"true"` - - // The UUID of the tenant who owns the Group. Only administrative users - // can specify a tenant UUID other than their own. - TenantID string `json:"tenant_id,omitempty"` - - // Describes the security group. - Description string `json:"description,omitempty"` -} - -// ToSecGroupCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToSecGroupCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "security_group") -} - -// Create is an operation which provisions a new security group with default -// security group rules for the IPv4 and IPv6 ether types. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToSecGroupCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToSecGroupUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contains all the values needed to update an existing security -// group. -type UpdateOpts struct { - // Human-readable name for the Security Group. Does not have to be unique. - Name string `json:"name,omitempty"` - - // Describes the security group. - Description string `json:"description,omitempty"` -} - -// ToSecGroupUpdateMap builds a request body from UpdateOpts. -func (opts UpdateOpts) ToSecGroupUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "security_group") -} - -// Update is an operation which updates an existing security group. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToSecGroupUpdateMap() - if err != nil { - r.Err = err - return - } - - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Get retrieves a particular security group based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// Delete will permanently delete a particular security group based on its -// unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} - -// IDFromName is a convenience function that returns a security group's ID, -// given its name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - count := 0 - id := "" - pages, err := List(client, ListOpts{}).AllPages() - if err != nil { - return "", err - } - - all, err := ExtractGroups(pages) - if err != nil { - return "", err - } - - for _, s := range all { - if s.Name == name { - count++ - id = s.ID - } - } - - switch count { - case 0: - return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "security group"} - case 1: - return id, nil - default: - return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "security group"} - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/results.go deleted file mode 100644 index 8a8e0ffcfdd..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/results.go +++ /dev/null @@ -1,102 +0,0 @@ -package groups - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" - "github.com/gophercloud/gophercloud/pagination" -) - -// SecGroup represents a container for security group rules. -type SecGroup struct { - // The UUID for the security group. - ID string - - // Human-readable name for the security group. Might not be unique. - // Cannot be named "default" as that is automatically created for a tenant. - Name string - - // The security group description. - Description string - - // A slice of security group rules that dictate the permitted behaviour for - // traffic entering and leaving the group. - Rules []rules.SecGroupRule `json:"security_group_rules"` - - // Owner of the security group. - TenantID string `json:"tenant_id"` -} - -// SecGroupPage is the page returned by a pager when traversing over a -// collection of security groups. -type SecGroupPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of security groups has -// reached the end of a page and the pager seeks to traverse over a new one. In -// order to do this, it needs to construct the next page's URL. -func (r SecGroupPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"security_groups_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a SecGroupPage struct is empty. -func (r SecGroupPage) IsEmpty() (bool, error) { - is, err := ExtractGroups(r) - return len(is) == 0, err -} - -// ExtractGroups accepts a Page struct, specifically a SecGroupPage struct, -// and extracts the elements into a slice of SecGroup structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractGroups(r pagination.Page) ([]SecGroup, error) { - var s struct { - SecGroups []SecGroup `json:"security_groups"` - } - err := (r.(SecGroupPage)).ExtractInto(&s) - return s.SecGroups, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a security group. -func (r commonResult) Extract() (*SecGroup, error) { - var s struct { - SecGroup *SecGroup `json:"security_group"` - } - err := r.ExtractInto(&s) - return s.SecGroup, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a SecGroup. -type CreateResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a SecGroup. -type UpdateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a SecGroup. -type GetResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go deleted file mode 100644 index 104cbcc558d..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go +++ /dev/null @@ -1,13 +0,0 @@ -package groups - -import "github.com/gophercloud/gophercloud" - -const rootPath = "security-groups" - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/doc.go deleted file mode 100644 index bf66dc8b40e..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/doc.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Package rules provides information and interaction with Security Group Rules -for the OpenStack Networking service. - -Example to List Security Groups Rules - - listOpts := rules.ListOpts{ - Protocol: "tcp", - } - - allPages, err := rules.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allRules, err := rules.ExtractRules(allPages) - if err != nil { - panic(err) - } - - for _, rule := range allRules { - fmt.Printf("%+v\n", rule) - } - -Example to Create a Security Group Rule - - createOpts := rules.CreateOpts{ - Direction: "ingress", - PortRangeMin: 80, - EtherType: rules.EtherType4, - PortRangeMax: 80, - Protocol: "tcp", - RemoteGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", - SecGroupID: "a7734e61-b545-452d-a3cd-0189cbd9747a", - } - - rule, err := rules.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Security Group Rule - - ruleID := "37d94f8a-d136-465c-ae46-144f0d8ef141" - err := rules.Delete(networkClient, ruleID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package rules diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go deleted file mode 100644 index 197710fc4c2..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go +++ /dev/null @@ -1,154 +0,0 @@ -package rules - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the security group rule attributes you want to see returned. SortKey allows -// you to sort by a particular network attribute. SortDir sets the direction, -// and is either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - Direction string `q:"direction"` - EtherType string `q:"ethertype"` - ID string `q:"id"` - PortRangeMax int `q:"port_range_max"` - PortRangeMin int `q:"port_range_min"` - Protocol string `q:"protocol"` - RemoteGroupID string `q:"remote_group_id"` - RemoteIPPrefix string `q:"remote_ip_prefix"` - SecGroupID string `q:"security_group_id"` - TenantID string `q:"tenant_id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// List returns a Pager which allows you to iterate over a collection of -// security group rules. It accepts a ListOpts struct, which allows you to filter -// and sort the returned collection for greater efficiency. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} - } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { - return SecGroupRulePage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -type RuleDirection string -type RuleProtocol string -type RuleEtherType string - -// Constants useful for CreateOpts -const ( - DirIngress RuleDirection = "ingress" - DirEgress RuleDirection = "egress" - EtherType4 RuleEtherType = "IPv4" - EtherType6 RuleEtherType = "IPv6" - ProtocolAH RuleProtocol = "ah" - ProtocolDCCP RuleProtocol = "dccp" - ProtocolEGP RuleProtocol = "egp" - ProtocolESP RuleProtocol = "esp" - ProtocolGRE RuleProtocol = "gre" - ProtocolICMP RuleProtocol = "icmp" - ProtocolIGMP RuleProtocol = "igmp" - ProtocolIPv6Encap RuleProtocol = "ipv6-encap" - ProtocolIPv6Frag RuleProtocol = "ipv6-frag" - ProtocolIPv6ICMP RuleProtocol = "ipv6-icmp" - ProtocolIPv6NoNxt RuleProtocol = "ipv6-nonxt" - ProtocolIPv6Opts RuleProtocol = "ipv6-opts" - ProtocolIPv6Route RuleProtocol = "ipv6-route" - ProtocolOSPF RuleProtocol = "ospf" - ProtocolPGM RuleProtocol = "pgm" - ProtocolRSVP RuleProtocol = "rsvp" - ProtocolSCTP RuleProtocol = "sctp" - ProtocolTCP RuleProtocol = "tcp" - ProtocolUDP RuleProtocol = "udp" - ProtocolUDPLite RuleProtocol = "udplite" - ProtocolVRRP RuleProtocol = "vrrp" -) - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToSecGroupRuleCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new security group -// rule. -type CreateOpts struct { - // Must be either "ingress" or "egress": the direction in which the security - // group rule is applied. - Direction RuleDirection `json:"direction" required:"true"` - - // Must be "IPv4" or "IPv6", and addresses represented in CIDR must match the - // ingress or egress rules. - EtherType RuleEtherType `json:"ethertype" required:"true"` - - // The security group ID to associate with this security group rule. - SecGroupID string `json:"security_group_id" required:"true"` - - // The maximum port number in the range that is matched by the security group - // rule. The PortRangeMin attribute constrains the PortRangeMax attribute. If - // the protocol is ICMP, this value must be an ICMP type. - PortRangeMax int `json:"port_range_max,omitempty"` - - // The minimum port number in the range that is matched by the security group - // rule. If the protocol is TCP or UDP, this value must be less than or equal - // to the value of the PortRangeMax attribute. If the protocol is ICMP, this - // value must be an ICMP type. - PortRangeMin int `json:"port_range_min,omitempty"` - - // The protocol that is matched by the security group rule. Valid values are - // "tcp", "udp", "icmp" or an empty string. - Protocol RuleProtocol `json:"protocol,omitempty"` - - // The remote group ID to be associated with this security group rule. You can - // specify either RemoteGroupID or RemoteIPPrefix. - RemoteGroupID string `json:"remote_group_id,omitempty"` - - // The remote IP prefix to be associated with this security group rule. You can - // specify either RemoteGroupID or RemoteIPPrefix. This attribute matches the - // specified IP prefix as the source IP address of the IP packet. - RemoteIPPrefix string `json:"remote_ip_prefix,omitempty"` - - // The UUID of the tenant who owns the Rule. Only administrative users - // can specify a tenant UUID other than their own. - TenantID string `json:"tenant_id,omitempty"` -} - -// ToSecGroupRuleCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToSecGroupRuleCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "security_group_rule") -} - -// Create is an operation which adds a new security group rule and associates it -// with an existing security group (whose ID is specified in CreateOpts). -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToSecGroupRuleCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular security group rule based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// Delete will permanently delete a particular security group rule based on its -// unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/results.go deleted file mode 100644 index 0d8c43f8eda..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/results.go +++ /dev/null @@ -1,121 +0,0 @@ -package rules - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// SecGroupRule represents a rule to dictate the behaviour of incoming or -// outgoing traffic for a particular security group. -type SecGroupRule struct { - // The UUID for this security group rule. - ID string - - // The direction in which the security group rule is applied. The only values - // allowed are "ingress" or "egress". For a compute instance, an ingress - // security group rule is applied to incoming (ingress) traffic for that - // instance. An egress rule is applied to traffic leaving the instance. - Direction string - - // Must be IPv4 or IPv6, and addresses represented in CIDR must match the - // ingress or egress rules. - EtherType string `json:"ethertype"` - - // The security group ID to associate with this security group rule. - SecGroupID string `json:"security_group_id"` - - // The minimum port number in the range that is matched by the security group - // rule. If the protocol is TCP or UDP, this value must be less than or equal - // to the value of the PortRangeMax attribute. If the protocol is ICMP, this - // value must be an ICMP type. - PortRangeMin int `json:"port_range_min"` - - // The maximum port number in the range that is matched by the security group - // rule. The PortRangeMin attribute constrains the PortRangeMax attribute. If - // the protocol is ICMP, this value must be an ICMP type. - PortRangeMax int `json:"port_range_max"` - - // The protocol that is matched by the security group rule. Valid values are - // "tcp", "udp", "icmp" or an empty string. - Protocol string - - // The remote group ID to be associated with this security group rule. You - // can specify either RemoteGroupID or RemoteIPPrefix. - RemoteGroupID string `json:"remote_group_id"` - - // The remote IP prefix to be associated with this security group rule. You - // can specify either RemoteGroupID or RemoteIPPrefix . This attribute - // matches the specified IP prefix as the source IP address of the IP packet. - RemoteIPPrefix string `json:"remote_ip_prefix"` - - // The owner of this security group rule. - TenantID string `json:"tenant_id"` -} - -// SecGroupRulePage is the page returned by a pager when traversing over a -// collection of security group rules. -type SecGroupRulePage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of security group rules has -// reached the end of a page and the pager seeks to traverse over a new one. In -// order to do this, it needs to construct the next page's URL. -func (r SecGroupRulePage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"security_group_rules_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a SecGroupRulePage struct is empty. -func (r SecGroupRulePage) IsEmpty() (bool, error) { - is, err := ExtractRules(r) - return len(is) == 0, err -} - -// ExtractRules accepts a Page struct, specifically a SecGroupRulePage struct, -// and extracts the elements into a slice of SecGroupRule structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractRules(r pagination.Page) ([]SecGroupRule, error) { - var s struct { - SecGroupRules []SecGroupRule `json:"security_group_rules"` - } - err := (r.(SecGroupRulePage)).ExtractInto(&s) - return s.SecGroupRules, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a security rule. -func (r commonResult) Extract() (*SecGroupRule, error) { - var s struct { - SecGroupRule *SecGroupRule `json:"security_group_rule"` - } - err := r.ExtractInto(&s) - return s.SecGroupRule, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a SecGroupRule. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a SecGroupRule. -type GetResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go deleted file mode 100644 index a5ede0e89b9..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go +++ /dev/null @@ -1,13 +0,0 @@ -package rules - -import "github.com/gophercloud/gophercloud" - -const rootPath = "security-group-rules" - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go deleted file mode 100644 index e768b71f820..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Package networks contains functionality for working with Neutron network -resources. A network is an isolated virtual layer-2 broadcast domain that is -typically reserved for the tenant who created it (unless you configure the -network to be shared). Tenants can create multiple networks until the -thresholds per-tenant quota is reached. - -In the v2.0 Networking API, the network is the main entity. Ports and subnets -are always associated with a network. - -Example to List Networks - - listOpts := networks.ListOpts{ - TenantID: "a99e9b4e620e4db09a2dfb6e42a01e66", - } - - allPages, err := networks.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allNetworks, err := networks.ExtractNetworks(allPages) - if err != nil { - panic(err) - } - - for _, network := range allNetworks { - fmt.Printf("%+v", network) - } - -Example to Create a Network - - iTrue := true - createOpts := networks.CreateOpts{ - Name: "network_1", - AdminStateUp: &iTrue, - } - - network, err := networks.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Network - - networkID := "484cda0e-106f-4f4b-bb3f-d413710bbe78" - - updateOpts := networks.UpdateOpts{ - Name: "new_name", - } - - network, err := networks.Update(networkClient, networkID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Network - - networkID := "484cda0e-106f-4f4b-bb3f-d413710bbe78" - err := networks.Delete(networkClient, networkID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package networks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go deleted file mode 100644 index 5b61b247192..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go +++ /dev/null @@ -1,165 +0,0 @@ -package networks - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToNetworkListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the network attributes you want to see returned. SortKey allows you to sort -// by a particular network attribute. SortDir sets the direction, and is either -// `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - Status string `q:"status"` - Name string `q:"name"` - AdminStateUp *bool `q:"admin_state_up"` - TenantID string `q:"tenant_id"` - Shared *bool `q:"shared"` - ID string `q:"id"` - Marker string `q:"marker"` - Limit int `q:"limit"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// ToNetworkListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToNetworkListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List returns a Pager which allows you to iterate over a collection of -// networks. It accepts a ListOpts struct, which allows you to filter and sort -// the returned collection for greater efficiency. -func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listURL(c) - if opts != nil { - query, err := opts.ToNetworkListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - return NetworkPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// Get retrieves a specific network based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(getURL(c, id), &r.Body, nil) - return -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToNetworkCreateMap() (map[string]interface{}, error) -} - -// CreateOpts represents options used to create a network. -type CreateOpts struct { - AdminStateUp *bool `json:"admin_state_up,omitempty"` - Name string `json:"name,omitempty"` - Shared *bool `json:"shared,omitempty"` - TenantID string `json:"tenant_id,omitempty"` -} - -// ToNetworkCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "network") -} - -// Create accepts a CreateOpts struct and creates a new network using the values -// provided. This operation does not actually require a request body, i.e. the -// CreateOpts struct argument can be empty. -// -// The tenant ID that is contained in the URI is the tenant that creates the -// network. An admin user, however, has the option of specifying another tenant -// ID in the CreateOpts struct. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToNetworkCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(createURL(c), b, &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToNetworkUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts represents options used to update a network. -type UpdateOpts struct { - AdminStateUp *bool `json:"admin_state_up,omitempty"` - Name string `json:"name,omitempty"` - Shared *bool `json:"shared,omitempty"` -} - -// ToNetworkUpdateMap builds a request body from UpdateOpts. -func (opts UpdateOpts) ToNetworkUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "network") -} - -// Update accepts a UpdateOpts struct and updates an existing network using the -// values provided. For more information, see the Create function. -func Update(c *gophercloud.ServiceClient, networkID string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToNetworkUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(updateURL(c, networkID), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201}, - }) - return -} - -// Delete accepts a unique ID and deletes the network associated with it. -func Delete(c *gophercloud.ServiceClient, networkID string) (r DeleteResult) { - _, r.Err = c.Delete(deleteURL(c, networkID), nil) - return -} - -// IDFromName is a convenience function that returns a network's ID, given -// its name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - count := 0 - id := "" - pages, err := List(client, nil).AllPages() - if err != nil { - return "", err - } - - all, err := ExtractNetworks(pages) - if err != nil { - return "", err - } - - for _, s := range all { - if s.Name == name { - count++ - id = s.ID - } - } - - switch count { - case 0: - return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "network"} - case 1: - return id, nil - default: - return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "network"} - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go deleted file mode 100644 index ffd0259f1d2..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go +++ /dev/null @@ -1,111 +0,0 @@ -package networks - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a network resource. -func (r commonResult) Extract() (*Network, error) { - var s Network - err := r.ExtractInto(&s) - return &s, err -} - -func (r commonResult) ExtractInto(v interface{}) error { - return r.Result.ExtractIntoStructPtr(v, "network") -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a Network. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a Network. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a Network. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// Network represents, well, a network. -type Network struct { - // UUID for the network - ID string `json:"id"` - - // Human-readable name for the network. Might not be unique. - Name string `json:"name"` - - // The administrative state of network. If false (down), the network does not - // forward packets. - AdminStateUp bool `json:"admin_state_up"` - - // Indicates whether network is currently operational. Possible values include - // `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional - // values. - Status string `json:"status"` - - // Subnets associated with this network. - Subnets []string `json:"subnets"` - - // Owner of network. - TenantID string `json:"tenant_id"` - - // Specifies whether the network resource can be accessed by any tenant. - Shared bool `json:"shared"` -} - -// NetworkPage is the page returned by a pager when traversing over a -// collection of networks. -type NetworkPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of networks has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r NetworkPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"networks_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a NetworkPage struct is empty. -func (r NetworkPage) IsEmpty() (bool, error) { - is, err := ExtractNetworks(r) - return len(is) == 0, err -} - -// ExtractNetworks accepts a Page struct, specifically a NetworkPage struct, -// and extracts the elements into a slice of Network structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractNetworks(r pagination.Page) ([]Network, error) { - var s []Network - err := ExtractNetworksInto(r, &s) - return s, err -} - -func ExtractNetworksInto(r pagination.Page, v interface{}) error { - return r.(NetworkPage).Result.ExtractIntoSlicePtr(v, "networks") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go deleted file mode 100644 index 4a8fb1dc7d3..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go +++ /dev/null @@ -1,31 +0,0 @@ -package networks - -import "github.com/gophercloud/gophercloud" - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("networks", id) -} - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("networks") -} - -func getURL(c *gophercloud.ServiceClient, id string) string { - return resourceURL(c, id) -} - -func listURL(c *gophercloud.ServiceClient) string { - return rootURL(c) -} - -func createURL(c *gophercloud.ServiceClient) string { - return rootURL(c) -} - -func updateURL(c *gophercloud.ServiceClient, id string) string { - return resourceURL(c, id) -} - -func deleteURL(c *gophercloud.ServiceClient, id string) string { - return resourceURL(c, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/doc.go deleted file mode 100644 index cfb1774fb4b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/doc.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Package ports contains functionality for working with Neutron port resources. - -A port represents a virtual switch port on a logical network switch. Virtual -instances attach their interfaces into ports. The logical port also defines -the MAC address and the IP address(es) to be assigned to the interfaces -plugged into them. When IP addresses are associated to a port, this also -implies the port is associated with a subnet, as the IP address was taken -from the allocation pool for a specific subnet. - -Example to List Ports - - listOpts := ports.ListOpts{ - DeviceID: "b0b89efe-82f8-461d-958b-adbf80f50c7d", - } - - allPages, err := ports.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allPorts, err := ports.ExtractPorts(allPages) - if err != nil { - panic(err) - } - - for _, port := range allPorts { - fmt.Printf("%+v\n", port) - } - -Example to Create a Port - - createOtps := ports.CreateOpts{ - Name: "private-port", - AdminStateUp: &asu, - NetworkID: "a87cc70a-3e15-4acf-8205-9b711a3531b7", - FixedIPs: []ports.IP{ - {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, - }, - SecurityGroups: &[]string{"foo"}, - AllowedAddressPairs: []ports.AddressPair{ - {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, - }, - } - - port, err := ports.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Port - - portID := "c34bae2b-7641-49b6-bf6d-d8e473620ed8" - - updateOpts := ports.UpdateOpts{ - Name: "new_name", - SecurityGroups: &[]string{}, - } - - port, err := ports.Update(networkClient, portID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Port - - portID := "c34bae2b-7641-49b6-bf6d-d8e473620ed8" - err := ports.Delete(networkClient, portID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package ports diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/requests.go deleted file mode 100644 index fd1e9725764..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/requests.go +++ /dev/null @@ -1,177 +0,0 @@ -package ports - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToPortListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the port attributes you want to see returned. SortKey allows you to sort -// by a particular port attribute. SortDir sets the direction, and is either -// `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - Status string `q:"status"` - Name string `q:"name"` - AdminStateUp *bool `q:"admin_state_up"` - NetworkID string `q:"network_id"` - TenantID string `q:"tenant_id"` - DeviceOwner string `q:"device_owner"` - MACAddress string `q:"mac_address"` - ID string `q:"id"` - DeviceID string `q:"device_id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// ToPortListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToPortListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List returns a Pager which allows you to iterate over a collection of -// ports. It accepts a ListOpts struct, which allows you to filter and sort -// the returned collection for greater efficiency. -// -// Default policy settings return only those ports that are owned by the tenant -// who submits the request, unless the request is submitted by a user with -// administrative rights. -func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listURL(c) - if opts != nil { - query, err := opts.ToPortListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - return PortPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// Get retrieves a specific port based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(getURL(c, id), &r.Body, nil) - return -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToPortCreateMap() (map[string]interface{}, error) -} - -// CreateOpts represents the attributes used when creating a new port. -type CreateOpts struct { - NetworkID string `json:"network_id" required:"true"` - Name string `json:"name,omitempty"` - AdminStateUp *bool `json:"admin_state_up,omitempty"` - MACAddress string `json:"mac_address,omitempty"` - FixedIPs interface{} `json:"fixed_ips,omitempty"` - DeviceID string `json:"device_id,omitempty"` - DeviceOwner string `json:"device_owner,omitempty"` - TenantID string `json:"tenant_id,omitempty"` - SecurityGroups *[]string `json:"security_groups,omitempty"` - AllowedAddressPairs []AddressPair `json:"allowed_address_pairs,omitempty"` -} - -// ToPortCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToPortCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "port") -} - -// Create accepts a CreateOpts struct and creates a new network using the values -// provided. You must remember to provide a NetworkID value. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToPortCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(createURL(c), b, &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToPortUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts represents the attributes used when updating an existing port. -type UpdateOpts struct { - Name string `json:"name,omitempty"` - AdminStateUp *bool `json:"admin_state_up,omitempty"` - FixedIPs interface{} `json:"fixed_ips,omitempty"` - DeviceID string `json:"device_id,omitempty"` - DeviceOwner string `json:"device_owner,omitempty"` - SecurityGroups *[]string `json:"security_groups,omitempty"` - AllowedAddressPairs *[]AddressPair `json:"allowed_address_pairs,omitempty"` -} - -// ToPortUpdateMap builds a request body from UpdateOpts. -func (opts UpdateOpts) ToPortUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "port") -} - -// Update accepts a UpdateOpts struct and updates an existing port using the -// values provided. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToPortUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(updateURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201}, - }) - return -} - -// Delete accepts a unique ID and deletes the port associated with it. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(deleteURL(c, id), nil) - return -} - -// IDFromName is a convenience function that returns a port's ID, -// given its name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - count := 0 - id := "" - pages, err := List(client, nil).AllPages() - if err != nil { - return "", err - } - - all, err := ExtractPorts(pages) - if err != nil { - return "", err - } - - for _, s := range all { - if s.Name == name { - count++ - id = s.ID - } - } - - switch count { - case 0: - return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "port"} - case 1: - return id, nil - default: - return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "port"} - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go deleted file mode 100644 index c50da6dfffd..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go +++ /dev/null @@ -1,136 +0,0 @@ -package ports - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a port resource. -func (r commonResult) Extract() (*Port, error) { - var s struct { - Port *Port `json:"port"` - } - err := r.ExtractInto(&s) - return s.Port, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a Port. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a Port. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a Port. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// IP is a sub-struct that represents an individual IP. -type IP struct { - SubnetID string `json:"subnet_id"` - IPAddress string `json:"ip_address,omitempty"` -} - -// AddressPair contains the IP Address and the MAC address. -type AddressPair struct { - IPAddress string `json:"ip_address,omitempty"` - MACAddress string `json:"mac_address,omitempty"` -} - -// Port represents a Neutron port. See package documentation for a top-level -// description of what this is. -type Port struct { - // UUID for the port. - ID string `json:"id"` - - // Network that this port is associated with. - NetworkID string `json:"network_id"` - - // Human-readable name for the port. Might not be unique. - Name string `json:"name"` - - // Administrative state of port. If false (down), port does not forward - // packets. - AdminStateUp bool `json:"admin_state_up"` - - // Indicates whether network is currently operational. Possible values include - // `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional - // values. - Status string `json:"status"` - - // Mac address to use on this port. - MACAddress string `json:"mac_address"` - - // Specifies IP addresses for the port thus associating the port itself with - // the subnets where the IP addresses are picked from - FixedIPs []IP `json:"fixed_ips"` - - // Owner of network. - TenantID string `json:"tenant_id"` - - // Identifies the entity (e.g.: dhcp agent) using this port. - DeviceOwner string `json:"device_owner"` - - // Specifies the IDs of any security groups associated with a port. - SecurityGroups []string `json:"security_groups"` - - // Identifies the device (e.g., virtual server) using this port. - DeviceID string `json:"device_id"` - - // Identifies the list of IP addresses the port will recognize/accept - AllowedAddressPairs []AddressPair `json:"allowed_address_pairs"` -} - -// PortPage is the page returned by a pager when traversing over a collection -// of network ports. -type PortPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of ports has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r PortPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"ports_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a PortPage struct is empty. -func (r PortPage) IsEmpty() (bool, error) { - is, err := ExtractPorts(r) - return len(is) == 0, err -} - -// ExtractPorts accepts a Page struct, specifically a PortPage struct, -// and extracts the elements into a slice of Port structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractPorts(r pagination.Page) ([]Port, error) { - var s struct { - Ports []Port `json:"ports"` - } - err := (r.(PortPage)).ExtractInto(&s) - return s.Ports, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/urls.go deleted file mode 100644 index 600d6f2fd95..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/urls.go +++ /dev/null @@ -1,31 +0,0 @@ -package ports - -import "github.com/gophercloud/gophercloud" - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("ports", id) -} - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("ports") -} - -func listURL(c *gophercloud.ServiceClient) string { - return rootURL(c) -} - -func getURL(c *gophercloud.ServiceClient, id string) string { - return resourceURL(c, id) -} - -func createURL(c *gophercloud.ServiceClient) string { - return rootURL(c) -} - -func updateURL(c *gophercloud.ServiceClient, id string) string { - return resourceURL(c, id) -} - -func deleteURL(c *gophercloud.ServiceClient, id string) string { - return resourceURL(c, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go deleted file mode 100644 index d0ed8dff06a..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Package subnets contains functionality for working with Neutron subnet -resources. A subnet represents an IP address block that can be used to -assign IP addresses to virtual instances. Each subnet must have a CIDR and -must be associated with a network. IPs can either be selected from the whole -subnet CIDR or from allocation pools specified by the user. - -A subnet can also have a gateway, a list of DNS name servers, and host routes. -This information is pushed to instances whose interfaces are associated with -the subnet. - -Example to List Subnets - - listOpts := subnets.ListOpts{ - IPVersion: 4, - } - - allPages, err := subnets.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allSubnets, err := subnets.ExtractSubnets(allPages) - if err != nil { - panic(err) - } - - for _, subnet := range allSubnets { - fmt.Printf("%+v\n", subnet) - } - -Example to Create a Subnet With Specified Gateway - - var gatewayIP = "192.168.199.1" - createOpts := subnets.CreateOpts{ - NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", - IPVersion: 4, - CIDR: "192.168.199.0/24", - GatewayIP: &gatewayIP, - AllocationPools: []subnets.AllocationPool{ - { - Start: "192.168.199.2", - End: "192.168.199.254", - }, - }, - DNSNameservers: []string{"foo"}, - } - - subnet, err := subnets.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Create a Subnet With No Gateway - - var noGateway = "" - - createOpts := subnets.CreateOpts{ - NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a23", - IPVersion: 4, - CIDR: "192.168.1.0/24", - GatewayIP: &noGateway, - AllocationPools: []subnets.AllocationPool{ - { - Start: "192.168.1.2", - End: "192.168.1.254", - }, - }, - DNSNameservers: []string{}, - } - - subnet, err := subnets.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Create a Subnet With a Default Gateway - - createOpts := subnets.CreateOpts{ - NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a23", - IPVersion: 4, - CIDR: "192.168.1.0/24", - AllocationPools: []subnets.AllocationPool{ - { - Start: "192.168.1.2", - End: "192.168.1.254", - }, - }, - DNSNameservers: []string{}, - } - - subnet, err := subnets.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Subnet - - subnetID := "db77d064-e34f-4d06-b060-f21e28a61c23" - - updateOpts := subnets.UpdateOpts{ - Name: "new_name", - DNSNameservers: []string{"8.8.8.8}, - } - - subnet, err := subnets.Update(networkClient, subnetID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Remove a Gateway From a Subnet - - var noGateway = "" - subnetID := "db77d064-e34f-4d06-b060-f21e28a61c23" - - updateOpts := subnets.UpdateOpts{ - GatewayIP: &noGateway, - } - - subnet, err := subnets.Update(networkClient, subnetID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Subnet - - subnetID := "db77d064-e34f-4d06-b060-f21e28a61c23" - err := subnets.Delete(networkClient, subnetID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package subnets diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go deleted file mode 100644 index c2e74eff440..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go +++ /dev/null @@ -1,231 +0,0 @@ -package subnets - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToSubnetListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the subnet attributes you want to see returned. SortKey allows you to sort -// by a particular subnet attribute. SortDir sets the direction, and is either -// `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - Name string `q:"name"` - EnableDHCP *bool `q:"enable_dhcp"` - NetworkID string `q:"network_id"` - TenantID string `q:"tenant_id"` - IPVersion int `q:"ip_version"` - GatewayIP string `q:"gateway_ip"` - CIDR string `q:"cidr"` - ID string `q:"id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// ToSubnetListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToSubnetListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List returns a Pager which allows you to iterate over a collection of -// subnets. It accepts a ListOpts struct, which allows you to filter and sort -// the returned collection for greater efficiency. -// -// Default policy settings return only those subnets that are owned by the tenant -// who submits the request, unless the request is submitted by a user with -// administrative rights. -func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listURL(c) - if opts != nil { - query, err := opts.ToSubnetListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - return SubnetPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// Get retrieves a specific subnet based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(getURL(c, id), &r.Body, nil) - return -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// List request. -type CreateOptsBuilder interface { - ToSubnetCreateMap() (map[string]interface{}, error) -} - -// CreateOpts represents the attributes used when creating a new subnet. -type CreateOpts struct { - // NetworkID is the UUID of the network the subnet will be associated with. - NetworkID string `json:"network_id" required:"true"` - - // CIDR is the address CIDR of the subnet. - CIDR string `json:"cidr" required:"true"` - - // Name is a human-readable name of the subnet. - Name string `json:"name,omitempty"` - - // The UUID of the tenant who owns the Subnet. Only administrative users - // can specify a tenant UUID other than their own. - TenantID string `json:"tenant_id,omitempty"` - - // AllocationPools are IP Address pools that will be available for DHCP. - AllocationPools []AllocationPool `json:"allocation_pools,omitempty"` - - // GatewayIP sets gateway information for the subnet. Setting to nil will - // cause a default gateway to automatically be created. Setting to an empty - // string will cause the subnet to be created with no gateway. Setting to - // an explicit address will set that address as the gateway. - GatewayIP *string `json:"gateway_ip,omitempty"` - - // IPVersion is the IP version for the subnet. - IPVersion gophercloud.IPVersion `json:"ip_version,omitempty"` - - // EnableDHCP will either enable to disable the DHCP service. - EnableDHCP *bool `json:"enable_dhcp,omitempty"` - - // DNSNameservers are the nameservers to be set via DHCP. - DNSNameservers []string `json:"dns_nameservers,omitempty"` - - // HostRoutes are any static host routes to be set via DHCP. - HostRoutes []HostRoute `json:"host_routes,omitempty"` -} - -// ToSubnetCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToSubnetCreateMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "subnet") - if err != nil { - return nil, err - } - - if m := b["subnet"].(map[string]interface{}); m["gateway_ip"] == "" { - m["gateway_ip"] = nil - } - - return b, nil -} - -// Create accepts a CreateOpts struct and creates a new subnet using the values -// provided. You must remember to provide a valid NetworkID, CIDR and IP -// version. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToSubnetCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(createURL(c), b, &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToSubnetUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts represents the attributes used when updating an existing subnet. -type UpdateOpts struct { - // Name is a human-readable name of the subnet. - Name string `json:"name,omitempty"` - - // AllocationPools are IP Address pools that will be available for DHCP. - AllocationPools []AllocationPool `json:"allocation_pools,omitempty"` - - // GatewayIP sets gateway information for the subnet. Setting to nil will - // cause a default gateway to automatically be created. Setting to an empty - // string will cause the subnet to be created with no gateway. Setting to - // an explicit address will set that address as the gateway. - GatewayIP *string `json:"gateway_ip,omitempty"` - - // DNSNameservers are the nameservers to be set via DHCP. - DNSNameservers []string `json:"dns_nameservers,omitempty"` - - // HostRoutes are any static host routes to be set via DHCP. - HostRoutes []HostRoute `json:"host_routes,omitempty"` - - // EnableDHCP will either enable to disable the DHCP service. - EnableDHCP *bool `json:"enable_dhcp,omitempty"` -} - -// ToSubnetUpdateMap builds a request body from UpdateOpts. -func (opts UpdateOpts) ToSubnetUpdateMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "subnet") - if err != nil { - return nil, err - } - - if m := b["subnet"].(map[string]interface{}); m["gateway_ip"] == "" { - m["gateway_ip"] = nil - } - - return b, nil -} - -// Update accepts a UpdateOpts struct and updates an existing subnet using the -// values provided. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToSubnetUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(updateURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201}, - }) - return -} - -// Delete accepts a unique ID and deletes the subnet associated with it. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(deleteURL(c, id), nil) - return -} - -// IDFromName is a convenience function that returns a subnet's ID, -// given its name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - count := 0 - id := "" - pages, err := List(client, nil).AllPages() - if err != nil { - return "", err - } - - all, err := ExtractSubnets(pages) - if err != nil { - return "", err - } - - for _, s := range all { - if s.Name == name { - count++ - id = s.ID - } - } - - switch count { - case 0: - return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "subnet"} - case 1: - return id, nil - default: - return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "subnet"} - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/results.go deleted file mode 100644 index ade8abc699c..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/results.go +++ /dev/null @@ -1,133 +0,0 @@ -package subnets - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a subnet resource. -func (r commonResult) Extract() (*Subnet, error) { - var s struct { - Subnet *Subnet `json:"subnet"` - } - err := r.ExtractInto(&s) - return s.Subnet, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a Subnet. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a Subnet. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a Subnet. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// AllocationPool represents a sub-range of cidr available for dynamic -// allocation to ports, e.g. {Start: "10.0.0.2", End: "10.0.0.254"} -type AllocationPool struct { - Start string `json:"start"` - End string `json:"end"` -} - -// HostRoute represents a route that should be used by devices with IPs from -// a subnet (not including local subnet route). -type HostRoute struct { - DestinationCIDR string `json:"destination"` - NextHop string `json:"nexthop"` -} - -// Subnet represents a subnet. See package documentation for a top-level -// description of what this is. -type Subnet struct { - // UUID representing the subnet. - ID string `json:"id"` - - // UUID of the parent network. - NetworkID string `json:"network_id"` - - // Human-readable name for the subnet. Might not be unique. - Name string `json:"name"` - - // IP version, either `4' or `6'. - IPVersion int `json:"ip_version"` - - // CIDR representing IP range for this subnet, based on IP version. - CIDR string `json:"cidr"` - - // Default gateway used by devices in this subnet. - GatewayIP string `json:"gateway_ip"` - - // DNS name servers used by hosts in this subnet. - DNSNameservers []string `json:"dns_nameservers"` - - // Sub-ranges of CIDR available for dynamic allocation to ports. - // See AllocationPool. - AllocationPools []AllocationPool `json:"allocation_pools"` - - // Routes that should be used by devices with IPs from this subnet - // (not including local subnet route). - HostRoutes []HostRoute `json:"host_routes"` - - // Specifies whether DHCP is enabled for this subnet or not. - EnableDHCP bool `json:"enable_dhcp"` - - // Owner of network. - TenantID string `json:"tenant_id"` -} - -// SubnetPage is the page returned by a pager when traversing over a collection -// of subnets. -type SubnetPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of subnets has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r SubnetPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"subnets_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a SubnetPage struct is empty. -func (r SubnetPage) IsEmpty() (bool, error) { - is, err := ExtractSubnets(r) - return len(is) == 0, err -} - -// ExtractSubnets accepts a Page struct, specifically a SubnetPage struct, -// and extracts the elements into a slice of Subnet structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractSubnets(r pagination.Page) ([]Subnet, error) { - var s struct { - Subnets []Subnet `json:"subnets"` - } - err := (r.(SubnetPage)).ExtractInto(&s) - return s.Subnets, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/urls.go deleted file mode 100644 index 7a4f2f7dd4c..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/urls.go +++ /dev/null @@ -1,31 +0,0 @@ -package subnets - -import "github.com/gophercloud/gophercloud" - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("subnets", id) -} - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("subnets") -} - -func listURL(c *gophercloud.ServiceClient) string { - return rootURL(c) -} - -func getURL(c *gophercloud.ServiceClient, id string) string { - return resourceURL(c, id) -} - -func createURL(c *gophercloud.ServiceClient) string { - return rootURL(c) -} - -func updateURL(c *gophercloud.ServiceClient, id string) string { - return resourceURL(c, id) -} - -func deleteURL(c *gophercloud.ServiceClient, id string) string { - return resourceURL(c, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go deleted file mode 100644 index 0fa1c083a26..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Package accounts contains functionality for working with Object Storage -account resources. An account is the top-level resource the object storage -hierarchy: containers belong to accounts, objects belong to containers. - -Another way of thinking of an account is like a namespace for all your -resources. It is synonymous with a project or tenant in other OpenStack -services. - -Example to Get an Account - - account, err := accounts.Get(objectStorageClient, nil).Extract() - fmt.Printf("%+v\n", account) - -Example to Update an Account - - metadata := map[string]string{ - "some": "metadata", - } - - updateOpts := accounts.UpdateOpts{ - Metadata: metadata, - } - - updateResult, err := accounts.Update(objectStorageClient, updateOpts).Extract() - fmt.Printf("%+v\n", updateResult) - -*/ -package accounts diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go deleted file mode 100644 index df215878534..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go +++ /dev/null @@ -1,100 +0,0 @@ -package accounts - -import "github.com/gophercloud/gophercloud" - -// GetOptsBuilder allows extensions to add additional headers to the Get -// request. -type GetOptsBuilder interface { - ToAccountGetMap() (map[string]string, error) -} - -// GetOpts is a structure that contains parameters for getting an account's -// metadata. -type GetOpts struct { - Newest bool `h:"X-Newest"` -} - -// ToAccountGetMap formats a GetOpts into a map[string]string of headers. -func (opts GetOpts) ToAccountGetMap() (map[string]string, error) { - return gophercloud.BuildHeaders(opts) -} - -// Get is a function that retrieves an account's metadata. To extract just the -// custom metadata, call the ExtractMetadata method on the GetResult. To extract -// all the headers that are returned (including the metadata), call the -// Extract method on the GetResult. -func Get(c *gophercloud.ServiceClient, opts GetOptsBuilder) (r GetResult) { - h := make(map[string]string) - if opts != nil { - headers, err := opts.ToAccountGetMap() - if err != nil { - r.Err = err - return - } - for k, v := range headers { - h[k] = v - } - } - resp, err := c.Request("HEAD", getURL(c), &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{204}, - }) - if resp != nil { - r.Header = resp.Header - } - r.Err = err - return -} - -// UpdateOptsBuilder allows extensions to add additional headers to the Update -// request. -type UpdateOptsBuilder interface { - ToAccountUpdateMap() (map[string]string, error) -} - -// UpdateOpts is a structure that contains parameters for updating, creating, or -// deleting an account's metadata. -type UpdateOpts struct { - Metadata map[string]string - ContentType string `h:"Content-Type"` - DetectContentType bool `h:"X-Detect-Content-Type"` - TempURLKey string `h:"X-Account-Meta-Temp-URL-Key"` - TempURLKey2 string `h:"X-Account-Meta-Temp-URL-Key-2"` -} - -// ToAccountUpdateMap formats an UpdateOpts into a map[string]string of headers. -func (opts UpdateOpts) ToAccountUpdateMap() (map[string]string, error) { - headers, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, err - } - for k, v := range opts.Metadata { - headers["X-Account-Meta-"+k] = v - } - return headers, err -} - -// Update is a function that creates, updates, or deletes an account's metadata. -// To extract the headers returned, call the Extract method on the UpdateResult. -func Update(c *gophercloud.ServiceClient, opts UpdateOptsBuilder) (r UpdateResult) { - h := make(map[string]string) - if opts != nil { - headers, err := opts.ToAccountUpdateMap() - if err != nil { - r.Err = err - return - } - for k, v := range headers { - h[k] = v - } - } - resp, err := c.Request("POST", updateURL(c), &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{201, 202, 204}, - }) - if resp != nil { - r.Header = resp.Header - } - r.Err = err - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go deleted file mode 100644 index bf5dc846fc3..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go +++ /dev/null @@ -1,167 +0,0 @@ -package accounts - -import ( - "encoding/json" - "strconv" - "strings" - "time" - - "github.com/gophercloud/gophercloud" -) - -// UpdateResult is returned from a call to the Update function. -type UpdateResult struct { - gophercloud.HeaderResult -} - -// UpdateHeader represents the headers returned in the response from an Update -// request. -type UpdateHeader struct { - ContentLength int64 `json:"-"` - ContentType string `json:"Content-Type"` - TransID string `json:"X-Trans-Id"` - Date time.Time `json:"-"` -} - -func (r *UpdateHeader) UnmarshalJSON(b []byte) error { - type tmp UpdateHeader - var s struct { - tmp - ContentLength string `json:"Content-Length"` - Date gophercloud.JSONRFC1123 `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = UpdateHeader(s.tmp) - - switch s.ContentLength { - case "": - r.ContentLength = 0 - default: - r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) - if err != nil { - return err - } - } - - r.Date = time.Time(s.Date) - - return err -} - -// Extract will return a struct of headers returned from a call to Get. To -// obtain a map of headers, call the Extract method on the GetResult. -func (r UpdateResult) Extract() (*UpdateHeader, error) { - var s *UpdateHeader - err := r.ExtractInto(&s) - return s, err -} - -// GetHeader represents the headers returned in the response from a Get request. -type GetHeader struct { - BytesUsed int64 `json:"-"` - ContainerCount int64 `json:"-"` - ContentLength int64 `json:"-"` - ObjectCount int64 `json:"-"` - ContentType string `json:"Content-Type"` - TransID string `json:"X-Trans-Id"` - TempURLKey string `json:"X-Account-Meta-Temp-URL-Key"` - TempURLKey2 string `json:"X-Account-Meta-Temp-URL-Key-2"` - Date time.Time `json:"-"` -} - -func (r *GetHeader) UnmarshalJSON(b []byte) error { - type tmp GetHeader - var s struct { - tmp - BytesUsed string `json:"X-Account-Bytes-Used"` - ContentLength string `json:"Content-Length"` - ContainerCount string `json:"X-Account-Container-Count"` - ObjectCount string `json:"X-Account-Object-Count"` - Date string `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = GetHeader(s.tmp) - - switch s.BytesUsed { - case "": - r.BytesUsed = 0 - default: - r.BytesUsed, err = strconv.ParseInt(s.BytesUsed, 10, 64) - if err != nil { - return err - } - } - - switch s.ContentLength { - case "": - r.ContentLength = 0 - default: - r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) - if err != nil { - return err - } - } - - switch s.ObjectCount { - case "": - r.ObjectCount = 0 - default: - r.ObjectCount, err = strconv.ParseInt(s.ObjectCount, 10, 64) - if err != nil { - return err - } - } - - switch s.ContainerCount { - case "": - r.ContainerCount = 0 - default: - r.ContainerCount, err = strconv.ParseInt(s.ContainerCount, 10, 64) - if err != nil { - return err - } - } - - if s.Date != "" { - r.Date, err = time.Parse(time.RFC1123, s.Date) - } - - return err -} - -// GetResult is returned from a call to the Get function. -type GetResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Get. -func (r GetResult) Extract() (*GetHeader, error) { - var s *GetHeader - err := r.ExtractInto(&s) - return s, err -} - -// ExtractMetadata is a function that takes a GetResult (of type *http.Response) -// and returns the custom metatdata associated with the account. -func (r GetResult) ExtractMetadata() (map[string]string, error) { - if r.Err != nil { - return nil, r.Err - } - - metadata := make(map[string]string) - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Account-Meta-") { - key := strings.TrimPrefix(k, "X-Account-Meta-") - metadata[key] = v[0] - } - } - return metadata, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go deleted file mode 100644 index 71540b1daf3..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go +++ /dev/null @@ -1,11 +0,0 @@ -package accounts - -import "github.com/gophercloud/gophercloud" - -func getURL(c *gophercloud.ServiceClient) string { - return c.Endpoint -} - -func updateURL(c *gophercloud.ServiceClient) string { - return getURL(c) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go deleted file mode 100644 index 1ac8504de72..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Package containers contains functionality for working with Object Storage -container resources. A container serves as a logical namespace for objects -that are placed inside it - an object with the same name in two different -containers represents two different objects. - -In addition to containing objects, you can also use the container to control -access to objects by using an access control list (ACL). - -Example to List Containers - - listOpts := containers.ListOpts{ - Full: true, - } - - allPages, err := containers.List(objectStorageClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allContainers, err := containers.ExtractInfo(allPages) - if err != nil { - panic(err) - } - - for _, container := range allContainers { - fmt.Printf("%+v\n", container) - } - -Example to List Only Container Names - - listOpts := containers.ListOpts{ - Full: false, - } - - allPages, err := containers.List(objectStorageClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allContainers, err := containers.ExtractNames(allPages) - if err != nil { - panic(err) - } - - for _, container := range allContainers { - fmt.Printf("%+v\n", container) - } - -Example to Create a Container - - createOpts := containers.CreateOpts{ - ContentType: "application/json", - Metadata: map[string]string{ - "foo": "bar", - }, - } - - container, err := containers.Create(objectStorageClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Container - - containerName := "my_container" - - updateOpts := containers.UpdateOpts{ - Metadata: map[string]string{ - "bar": "baz", - }, - } - - container, err := containers.Update(objectStorageClient, containerName, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Container - - containerName := "my_container" - - container, err := containers.Delete(objectStorageClient, containerName).Extract() - if err != nil { - panic(err) - } -*/ -package containers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go deleted file mode 100644 index ecb76075b11..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go +++ /dev/null @@ -1,191 +0,0 @@ -package containers - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the List -// request. -type ListOptsBuilder interface { - ToContainerListParams() (bool, string, error) -} - -// ListOpts is a structure that holds options for listing containers. -type ListOpts struct { - Full bool - Limit int `q:"limit"` - Marker string `q:"marker"` - EndMarker string `q:"end_marker"` - Format string `q:"format"` - Prefix string `q:"prefix"` - Delimiter string `q:"delimiter"` -} - -// ToContainerListParams formats a ListOpts into a query string and boolean -// representing whether to list complete information for each container. -func (opts ListOpts) ToContainerListParams() (bool, string, error) { - q, err := gophercloud.BuildQueryString(opts) - return opts.Full, q.String(), err -} - -// List is a function that retrieves containers associated with the account as -// well as account metadata. It returns a pager which can be iterated with the -// EachPage function. -func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"} - - url := listURL(c) - if opts != nil { - full, query, err := opts.ToContainerListParams() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - - if full { - headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"} - } - } - - pager := pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - p := ContainerPage{pagination.MarkerPageBase{PageResult: r}} - p.MarkerPageBase.Owner = p - return p - }) - pager.Headers = headers - return pager -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToContainerCreateMap() (map[string]string, error) -} - -// CreateOpts is a structure that holds parameters for creating a container. -type CreateOpts struct { - Metadata map[string]string - ContainerRead string `h:"X-Container-Read"` - ContainerSyncTo string `h:"X-Container-Sync-To"` - ContainerSyncKey string `h:"X-Container-Sync-Key"` - ContainerWrite string `h:"X-Container-Write"` - ContentType string `h:"Content-Type"` - DetectContentType bool `h:"X-Detect-Content-Type"` - IfNoneMatch string `h:"If-None-Match"` - VersionsLocation string `h:"X-Versions-Location"` -} - -// ToContainerCreateMap formats a CreateOpts into a map of headers. -func (opts CreateOpts) ToContainerCreateMap() (map[string]string, error) { - h, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, err - } - for k, v := range opts.Metadata { - h["X-Container-Meta-"+k] = v - } - return h, nil -} - -// Create is a function that creates a new container. -func Create(c *gophercloud.ServiceClient, containerName string, opts CreateOptsBuilder) (r CreateResult) { - h := make(map[string]string) - if opts != nil { - headers, err := opts.ToContainerCreateMap() - if err != nil { - r.Err = err - return - } - for k, v := range headers { - h[k] = v - } - } - resp, err := c.Request("PUT", createURL(c, containerName), &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{201, 202, 204}, - }) - if resp != nil { - r.Header = resp.Header - } - r.Err = err - return -} - -// Delete is a function that deletes a container. -func Delete(c *gophercloud.ServiceClient, containerName string) (r DeleteResult) { - _, r.Err = c.Delete(deleteURL(c, containerName), nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToContainerUpdateMap() (map[string]string, error) -} - -// UpdateOpts is a structure that holds parameters for updating, creating, or -// deleting a container's metadata. -type UpdateOpts struct { - Metadata map[string]string - ContainerRead string `h:"X-Container-Read"` - ContainerSyncTo string `h:"X-Container-Sync-To"` - ContainerSyncKey string `h:"X-Container-Sync-Key"` - ContainerWrite string `h:"X-Container-Write"` - ContentType string `h:"Content-Type"` - DetectContentType bool `h:"X-Detect-Content-Type"` - RemoveVersionsLocation string `h:"X-Remove-Versions-Location"` - VersionsLocation string `h:"X-Versions-Location"` -} - -// ToContainerUpdateMap formats a UpdateOpts into a map of headers. -func (opts UpdateOpts) ToContainerUpdateMap() (map[string]string, error) { - h, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, err - } - for k, v := range opts.Metadata { - h["X-Container-Meta-"+k] = v - } - return h, nil -} - -// Update is a function that creates, updates, or deletes a container's -// metadata. -func Update(c *gophercloud.ServiceClient, containerName string, opts UpdateOptsBuilder) (r UpdateResult) { - h := make(map[string]string) - if opts != nil { - headers, err := opts.ToContainerUpdateMap() - if err != nil { - r.Err = err - return - } - - for k, v := range headers { - h[k] = v - } - } - resp, err := c.Request("POST", updateURL(c, containerName), &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{201, 202, 204}, - }) - if resp != nil { - r.Header = resp.Header - } - r.Err = err - return -} - -// Get is a function that retrieves the metadata of a container. To extract just -// the custom metadata, pass the GetResult response to the ExtractMetadata -// function. -func Get(c *gophercloud.ServiceClient, containerName string) (r GetResult) { - resp, err := c.Request("HEAD", getURL(c, containerName), &gophercloud.RequestOpts{ - OkCodes: []int{200, 204}, - }) - if resp != nil { - r.Header = resp.Header - } - r.Err = err - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go deleted file mode 100644 index 87682c885b3..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go +++ /dev/null @@ -1,342 +0,0 @@ -package containers - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Container represents a container resource. -type Container struct { - // The total number of bytes stored in the container. - Bytes int64 `json:"bytes"` - - // The total number of objects stored in the container. - Count int64 `json:"count"` - - // The name of the container. - Name string `json:"name"` -} - -// ContainerPage is the page returned by a pager when traversing over a -// collection of containers. -type ContainerPage struct { - pagination.MarkerPageBase -} - -//IsEmpty returns true if a ListResult contains no container names. -func (r ContainerPage) IsEmpty() (bool, error) { - names, err := ExtractNames(r) - return len(names) == 0, err -} - -// LastMarker returns the last container name in a ListResult. -func (r ContainerPage) LastMarker() (string, error) { - names, err := ExtractNames(r) - if err != nil { - return "", err - } - if len(names) == 0 { - return "", nil - } - return names[len(names)-1], nil -} - -// ExtractInfo is a function that takes a ListResult and returns the -// containers' information. -func ExtractInfo(r pagination.Page) ([]Container, error) { - var s []Container - err := (r.(ContainerPage)).ExtractInto(&s) - return s, err -} - -// ExtractNames is a function that takes a ListResult and returns the -// containers' names. -func ExtractNames(page pagination.Page) ([]string, error) { - casted := page.(ContainerPage) - ct := casted.Header.Get("Content-Type") - - switch { - case strings.HasPrefix(ct, "application/json"): - parsed, err := ExtractInfo(page) - if err != nil { - return nil, err - } - - names := make([]string, 0, len(parsed)) - for _, container := range parsed { - names = append(names, container.Name) - } - return names, nil - case strings.HasPrefix(ct, "text/plain"): - names := make([]string, 0, 50) - - body := string(page.(ContainerPage).Body.([]uint8)) - for _, name := range strings.Split(body, "\n") { - if len(name) > 0 { - names = append(names, name) - } - } - - return names, nil - default: - return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) - } -} - -// GetHeader represents the headers returned in the response from a Get request. -type GetHeader struct { - AcceptRanges string `json:"Accept-Ranges"` - BytesUsed int64 `json:"-"` - ContentLength int64 `json:"-"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - ObjectCount int64 `json:"-"` - Read []string `json:"-"` - TransID string `json:"X-Trans-Id"` - VersionsLocation string `json:"X-Versions-Location"` - Write []string `json:"-"` -} - -func (r *GetHeader) UnmarshalJSON(b []byte) error { - type tmp GetHeader - var s struct { - tmp - BytesUsed string `json:"X-Container-Bytes-Used"` - ContentLength string `json:"Content-Length"` - ObjectCount string `json:"X-Container-Object-Count"` - Write string `json:"X-Container-Write"` - Read string `json:"X-Container-Read"` - Date gophercloud.JSONRFC1123 `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = GetHeader(s.tmp) - - switch s.BytesUsed { - case "": - r.BytesUsed = 0 - default: - r.BytesUsed, err = strconv.ParseInt(s.BytesUsed, 10, 64) - if err != nil { - return err - } - } - - switch s.ContentLength { - case "": - r.ContentLength = 0 - default: - r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) - if err != nil { - return err - } - } - - switch s.ObjectCount { - case "": - r.ObjectCount = 0 - default: - r.ObjectCount, err = strconv.ParseInt(s.ObjectCount, 10, 64) - if err != nil { - return err - } - } - - r.Read = strings.Split(s.Read, ",") - r.Write = strings.Split(s.Write, ",") - - r.Date = time.Time(s.Date) - - return err -} - -// GetResult represents the result of a get operation. -type GetResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Get. -func (r GetResult) Extract() (*GetHeader, error) { - var s *GetHeader - err := r.ExtractInto(&s) - return s, err -} - -// ExtractMetadata is a function that takes a GetResult (of type *http.Response) -// and returns the custom metadata associated with the container. -func (r GetResult) ExtractMetadata() (map[string]string, error) { - if r.Err != nil { - return nil, r.Err - } - metadata := make(map[string]string) - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Container-Meta-") { - key := strings.TrimPrefix(k, "X-Container-Meta-") - metadata[key] = v[0] - } - } - return metadata, nil -} - -// CreateHeader represents the headers returned in the response from a Create -// request. -type CreateHeader struct { - ContentLength int64 `json:"-"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *CreateHeader) UnmarshalJSON(b []byte) error { - type tmp CreateHeader - var s struct { - tmp - ContentLength string `json:"Content-Length"` - Date gophercloud.JSONRFC1123 `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = CreateHeader(s.tmp) - - switch s.ContentLength { - case "": - r.ContentLength = 0 - default: - r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) - if err != nil { - return err - } - } - - r.Date = time.Time(s.Date) - - return err -} - -// CreateResult represents the result of a create operation. To extract the -// the headers from the HTTP response, call its Extract method. -type CreateResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Create. -// To extract the headers from the HTTP response, call its Extract method. -func (r CreateResult) Extract() (*CreateHeader, error) { - var s *CreateHeader - err := r.ExtractInto(&s) - return s, err -} - -// UpdateHeader represents the headers returned in the response from a Update -// request. -type UpdateHeader struct { - ContentLength int64 `json:"-"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *UpdateHeader) UnmarshalJSON(b []byte) error { - type tmp UpdateHeader - var s struct { - tmp - ContentLength string `json:"Content-Length"` - Date gophercloud.JSONRFC1123 `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = UpdateHeader(s.tmp) - - switch s.ContentLength { - case "": - r.ContentLength = 0 - default: - r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) - if err != nil { - return err - } - } - - r.Date = time.Time(s.Date) - - return err -} - -// UpdateResult represents the result of an update operation. To extract the -// the headers from the HTTP response, call its Extract method. -type UpdateResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Update. -func (r UpdateResult) Extract() (*UpdateHeader, error) { - var s *UpdateHeader - err := r.ExtractInto(&s) - return s, err -} - -// DeleteHeader represents the headers returned in the response from a Delete -// request. -type DeleteHeader struct { - ContentLength int64 `json:"-"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *DeleteHeader) UnmarshalJSON(b []byte) error { - type tmp DeleteHeader - var s struct { - tmp - ContentLength string `json:"Content-Length"` - Date gophercloud.JSONRFC1123 `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = DeleteHeader(s.tmp) - - switch s.ContentLength { - case "": - r.ContentLength = 0 - default: - r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) - if err != nil { - return err - } - } - - r.Date = time.Time(s.Date) - - return err -} - -// DeleteResult represents the result of a delete operation. To extract the -// the headers from the HTTP response, call its Extract method. -type DeleteResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Delete. -func (r DeleteResult) Extract() (*DeleteHeader, error) { - var s *DeleteHeader - err := r.ExtractInto(&s) - return s, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go deleted file mode 100644 index 9b380470dd7..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go +++ /dev/null @@ -1,23 +0,0 @@ -package containers - -import "github.com/gophercloud/gophercloud" - -func listURL(c *gophercloud.ServiceClient) string { - return c.Endpoint -} - -func createURL(c *gophercloud.ServiceClient, container string) string { - return c.ServiceURL(container) -} - -func getURL(c *gophercloud.ServiceClient, container string) string { - return createURL(c, container) -} - -func deleteURL(c *gophercloud.ServiceClient, container string) string { - return createURL(c, container) -} - -func updateURL(c *gophercloud.ServiceClient, container string) string { - return createURL(c, container) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go deleted file mode 100644 index 1e02430fb42..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Package objects contains functionality for working with Object Storage -object resources. An object is a resource that represents and contains data -- such as documents, images, and so on. You can also store custom metadata -with an object. - -Example to List Objects - - containerName := "my_container" - - listOpts := objects.ListOpts{ - Full: true, - } - - allPages, err := objects.List(objectStorageClient, containerName, listOpts).AllPages() - if err != nil { - panic(err) - } - - allObjects, err := objects.ExtractInfo(allPages) - if err != nil { - panic(err) - } - - for _, object := range allObjects { - fmt.Printf("%+v\n", object) - } - -Example to List Object Names - - containerName := "my_container" - - listOpts := objects.ListOpts{ - Full: false, - } - - allPages, err := objects.List(objectStorageClient, containerName, listOpts).AllPages() - if err != nil { - panic(err) - } - - allObjects, err := objects.ExtractNames(allPages) - if err != nil { - panic(err) - } - - for _, object := range allObjects { - fmt.Printf("%+v\n", object) - } - -Example to Create an Object - - content := "some object content" - objectName := "my_object" - containerName := "my_container" - - createOpts := objects.CreateOpts{ - ContentType: "text/plain" - Content: strings.NewReader(content), - } - - object, err := objects.Create(objectStorageClient, containerName, objectName, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Copy an Object - - objectName := "my_object" - containerName := "my_container" - - copyOpts := objects.CopyOpts{ - Destination: "/newContainer/newObject", - } - - object, err := objects.Copy(objectStorageClient, containerName, objectName, copyOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete an Object - - objectName := "my_object" - containerName := "my_container" - - object, err := objects.Delete(objectStorageClient, containerName, objectName).Extract() - if err != nil { - panic(err) - } - -Example to Download an Object's Data - - objectName := "my_object" - containerName := "my_container" - - object := objects.Download(objectStorageClient, containerName, objectName, nil) - content, err := object.ExtractContent() - if err != nil { - panic(err) - } -*/ -package objects diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go deleted file mode 100644 index 5c4ae44d317..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go +++ /dev/null @@ -1,13 +0,0 @@ -package objects - -import "github.com/gophercloud/gophercloud" - -// ErrWrongChecksum is the error when the checksum generated for an object -// doesn't match the ETAG header. -type ErrWrongChecksum struct { - gophercloud.BaseError -} - -func (e ErrWrongChecksum) Error() string { - return "Local checksum does not match API ETag header" -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go deleted file mode 100644 index f67bfd15904..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go +++ /dev/null @@ -1,461 +0,0 @@ -package objects - -import ( - "bytes" - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "fmt" - "io" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the List -// request. -type ListOptsBuilder interface { - ToObjectListParams() (bool, string, error) -} - -// ListOpts is a structure that holds parameters for listing objects. -type ListOpts struct { - // Full is a true/false value that represents the amount of object information - // returned. If Full is set to true, then the content-type, number of bytes, - // hash date last modified, and name are returned. If set to false or not set, - // then only the object names are returned. - Full bool - Limit int `q:"limit"` - Marker string `q:"marker"` - EndMarker string `q:"end_marker"` - Format string `q:"format"` - Prefix string `q:"prefix"` - Delimiter string `q:"delimiter"` - Path string `q:"path"` -} - -// ToObjectListParams formats a ListOpts into a query string and boolean -// representing whether to list complete information for each object. -func (opts ListOpts) ToObjectListParams() (bool, string, error) { - q, err := gophercloud.BuildQueryString(opts) - return opts.Full, q.String(), err -} - -// List is a function that retrieves all objects in a container. It also returns -// the details for the container. To extract only the object information or names, -// pass the ListResult response to the ExtractInfo or ExtractNames function, -// respectively. -func List(c *gophercloud.ServiceClient, containerName string, opts ListOptsBuilder) pagination.Pager { - headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"} - - url := listURL(c, containerName) - if opts != nil { - full, query, err := opts.ToObjectListParams() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - - if full { - headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"} - } - } - - pager := pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - p := ObjectPage{pagination.MarkerPageBase{PageResult: r}} - p.MarkerPageBase.Owner = p - return p - }) - pager.Headers = headers - return pager -} - -// DownloadOptsBuilder allows extensions to add additional parameters to the -// Download request. -type DownloadOptsBuilder interface { - ToObjectDownloadParams() (map[string]string, string, error) -} - -// DownloadOpts is a structure that holds parameters for downloading an object. -type DownloadOpts struct { - IfMatch string `h:"If-Match"` - IfModifiedSince time.Time `h:"If-Modified-Since"` - IfNoneMatch string `h:"If-None-Match"` - IfUnmodifiedSince time.Time `h:"If-Unmodified-Since"` - Range string `h:"Range"` - Expires string `q:"expires"` - MultipartManifest string `q:"multipart-manifest"` - Signature string `q:"signature"` -} - -// ToObjectDownloadParams formats a DownloadOpts into a query string and map of -// headers. -func (opts DownloadOpts) ToObjectDownloadParams() (map[string]string, string, error) { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return nil, "", err - } - h, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, q.String(), err - } - return h, q.String(), nil -} - -// Download is a function that retrieves the content and metadata for an object. -// To extract just the content, pass the DownloadResult response to the -// ExtractContent function. -func Download(c *gophercloud.ServiceClient, containerName, objectName string, opts DownloadOptsBuilder) (r DownloadResult) { - url := downloadURL(c, containerName, objectName) - h := make(map[string]string) - if opts != nil { - headers, query, err := opts.ToObjectDownloadParams() - if err != nil { - r.Err = err - return - } - for k, v := range headers { - h[k] = v - } - url += query - } - - resp, err := c.Get(url, nil, &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{200, 304}, - }) - if resp != nil { - r.Header = resp.Header - r.Body = resp.Body - } - r.Err = err - return -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToObjectCreateParams() (io.Reader, map[string]string, string, error) -} - -// CreateOpts is a structure that holds parameters for creating an object. -type CreateOpts struct { - Content io.Reader - Metadata map[string]string - CacheControl string `h:"Cache-Control"` - ContentDisposition string `h:"Content-Disposition"` - ContentEncoding string `h:"Content-Encoding"` - ContentLength int64 `h:"Content-Length"` - ContentType string `h:"Content-Type"` - CopyFrom string `h:"X-Copy-From"` - DeleteAfter int `h:"X-Delete-After"` - DeleteAt int `h:"X-Delete-At"` - DetectContentType string `h:"X-Detect-Content-Type"` - ETag string `h:"ETag"` - IfNoneMatch string `h:"If-None-Match"` - ObjectManifest string `h:"X-Object-Manifest"` - TransferEncoding string `h:"Transfer-Encoding"` - Expires string `q:"expires"` - MultipartManifest string `q:"multipart-manifest"` - Signature string `q:"signature"` -} - -// ToObjectCreateParams formats a CreateOpts into a query string and map of -// headers. -func (opts CreateOpts) ToObjectCreateParams() (io.Reader, map[string]string, string, error) { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return nil, nil, "", err - } - h, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, nil, "", err - } - - for k, v := range opts.Metadata { - h["X-Object-Meta-"+k] = v - } - - hash := md5.New() - buf := bytes.NewBuffer([]byte{}) - _, err = io.Copy(io.MultiWriter(hash, buf), opts.Content) - if err != nil { - return nil, nil, "", err - } - localChecksum := fmt.Sprintf("%x", hash.Sum(nil)) - h["ETag"] = localChecksum - - return buf, h, q.String(), nil -} - -// Create is a function that creates a new object or replaces an existing -// object. If the returned response's ETag header fails to match the local -// checksum, the failed request will automatically be retried up to a maximum -// of 3 times. -func Create(c *gophercloud.ServiceClient, containerName, objectName string, opts CreateOptsBuilder) (r CreateResult) { - url := createURL(c, containerName, objectName) - h := make(map[string]string) - var b io.Reader - if opts != nil { - tmpB, headers, query, err := opts.ToObjectCreateParams() - if err != nil { - r.Err = err - return - } - for k, v := range headers { - h[k] = v - } - url += query - b = tmpB - } - - resp, err := c.Put(url, nil, nil, &gophercloud.RequestOpts{ - RawBody: b, - MoreHeaders: h, - }) - r.Err = err - if resp != nil { - r.Header = resp.Header - } - return -} - -// CopyOptsBuilder allows extensions to add additional parameters to the -// Copy request. -type CopyOptsBuilder interface { - ToObjectCopyMap() (map[string]string, error) -} - -// CopyOpts is a structure that holds parameters for copying one object to -// another. -type CopyOpts struct { - Metadata map[string]string - ContentDisposition string `h:"Content-Disposition"` - ContentEncoding string `h:"Content-Encoding"` - ContentType string `h:"Content-Type"` - Destination string `h:"Destination" required:"true"` -} - -// ToObjectCopyMap formats a CopyOpts into a map of headers. -func (opts CopyOpts) ToObjectCopyMap() (map[string]string, error) { - h, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, err - } - for k, v := range opts.Metadata { - h["X-Object-Meta-"+k] = v - } - return h, nil -} - -// Copy is a function that copies one object to another. -func Copy(c *gophercloud.ServiceClient, containerName, objectName string, opts CopyOptsBuilder) (r CopyResult) { - h := make(map[string]string) - headers, err := opts.ToObjectCopyMap() - if err != nil { - r.Err = err - return - } - - for k, v := range headers { - h[k] = v - } - - url := copyURL(c, containerName, objectName) - resp, err := c.Request("COPY", url, &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{201}, - }) - if resp != nil { - r.Header = resp.Header - } - r.Err = err - return -} - -// DeleteOptsBuilder allows extensions to add additional parameters to the -// Delete request. -type DeleteOptsBuilder interface { - ToObjectDeleteQuery() (string, error) -} - -// DeleteOpts is a structure that holds parameters for deleting an object. -type DeleteOpts struct { - MultipartManifest string `q:"multipart-manifest"` -} - -// ToObjectDeleteQuery formats a DeleteOpts into a query string. -func (opts DeleteOpts) ToObjectDeleteQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// Delete is a function that deletes an object. -func Delete(c *gophercloud.ServiceClient, containerName, objectName string, opts DeleteOptsBuilder) (r DeleteResult) { - url := deleteURL(c, containerName, objectName) - if opts != nil { - query, err := opts.ToObjectDeleteQuery() - if err != nil { - r.Err = err - return - } - url += query - } - resp, err := c.Delete(url, nil) - if resp != nil { - r.Header = resp.Header - } - r.Err = err - return -} - -// GetOptsBuilder allows extensions to add additional parameters to the -// Get request. -type GetOptsBuilder interface { - ToObjectGetQuery() (string, error) -} - -// GetOpts is a structure that holds parameters for getting an object's -// metadata. -type GetOpts struct { - Expires string `q:"expires"` - Signature string `q:"signature"` -} - -// ToObjectGetQuery formats a GetOpts into a query string. -func (opts GetOpts) ToObjectGetQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// Get is a function that retrieves the metadata of an object. To extract just -// the custom metadata, pass the GetResult response to the ExtractMetadata -// function. -func Get(c *gophercloud.ServiceClient, containerName, objectName string, opts GetOptsBuilder) (r GetResult) { - url := getURL(c, containerName, objectName) - if opts != nil { - query, err := opts.ToObjectGetQuery() - if err != nil { - r.Err = err - return - } - url += query - } - resp, err := c.Request("HEAD", url, &gophercloud.RequestOpts{ - OkCodes: []int{200, 204}, - }) - if resp != nil { - r.Header = resp.Header - } - r.Err = err - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToObjectUpdateMap() (map[string]string, error) -} - -// UpdateOpts is a structure that holds parameters for updating, creating, or -// deleting an object's metadata. -type UpdateOpts struct { - Metadata map[string]string - ContentDisposition string `h:"Content-Disposition"` - ContentEncoding string `h:"Content-Encoding"` - ContentType string `h:"Content-Type"` - DeleteAfter int `h:"X-Delete-After"` - DeleteAt int `h:"X-Delete-At"` - DetectContentType bool `h:"X-Detect-Content-Type"` -} - -// ToObjectUpdateMap formats a UpdateOpts into a map of headers. -func (opts UpdateOpts) ToObjectUpdateMap() (map[string]string, error) { - h, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, err - } - for k, v := range opts.Metadata { - h["X-Object-Meta-"+k] = v - } - return h, nil -} - -// Update is a function that creates, updates, or deletes an object's metadata. -func Update(c *gophercloud.ServiceClient, containerName, objectName string, opts UpdateOptsBuilder) (r UpdateResult) { - h := make(map[string]string) - if opts != nil { - headers, err := opts.ToObjectUpdateMap() - if err != nil { - r.Err = err - return - } - - for k, v := range headers { - h[k] = v - } - } - url := updateURL(c, containerName, objectName) - resp, err := c.Post(url, nil, nil, &gophercloud.RequestOpts{ - MoreHeaders: h, - }) - if resp != nil { - r.Header = resp.Header - } - r.Err = err - return -} - -// HTTPMethod represents an HTTP method string (e.g. "GET"). -type HTTPMethod string - -var ( - // GET represents an HTTP "GET" method. - GET HTTPMethod = "GET" - - // POST represents an HTTP "POST" method. - POST HTTPMethod = "POST" -) - -// CreateTempURLOpts are options for creating a temporary URL for an object. -type CreateTempURLOpts struct { - // (REQUIRED) Method is the HTTP method to allow for users of the temp URL. - // Valid values are "GET" and "POST". - Method HTTPMethod - - // (REQUIRED) TTL is the number of seconds the temp URL should be active. - TTL int - - // (Optional) Split is the string on which to split the object URL. Since only - // the object path is used in the hash, the object URL needs to be parsed. If - // empty, the default OpenStack URL split point will be used ("/v1/"). - Split string -} - -// CreateTempURL is a function for creating a temporary URL for an object. It -// allows users to have "GET" or "POST" access to a particular tenant's object -// for a limited amount of time. -func CreateTempURL(c *gophercloud.ServiceClient, containerName, objectName string, opts CreateTempURLOpts) (string, error) { - if opts.Split == "" { - opts.Split = "/v1/" - } - duration := time.Duration(opts.TTL) * time.Second - expiry := time.Now().Add(duration).Unix() - getHeader, err := accounts.Get(c, nil).Extract() - if err != nil { - return "", err - } - secretKey := []byte(getHeader.TempURLKey) - url := getURL(c, containerName, objectName) - splitPath := strings.Split(url, opts.Split) - baseURL, objectPath := splitPath[0], splitPath[1] - objectPath = opts.Split + objectPath - body := fmt.Sprintf("%s\n%d\n%s", opts.Method, expiry, objectPath) - hash := hmac.New(sha1.New, secretKey) - hash.Write([]byte(body)) - hexsum := fmt.Sprintf("%x", hash.Sum(nil)) - return fmt.Sprintf("%s%s?temp_url_sig=%s&temp_url_expires=%d", baseURL, objectPath, hexsum, expiry), nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go deleted file mode 100644 index f19b8f4aa55..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go +++ /dev/null @@ -1,496 +0,0 @@ -package objects - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "strconv" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Object is a structure that holds information related to a storage object. -type Object struct { - // Bytes is the total number of bytes that comprise the object. - Bytes int64 `json:"bytes"` - - // ContentType is the content type of the object. - ContentType string `json:"content_type"` - - // Hash represents the MD5 checksum value of the object's content. - Hash string `json:"hash"` - - // LastModified is the time the object was last modified, represented - // as a string. - LastModified time.Time `json:"-"` - - // Name is the unique name for the object. - Name string `json:"name"` -} - -func (r *Object) UnmarshalJSON(b []byte) error { - type tmp Object - var s *struct { - tmp - LastModified gophercloud.JSONRFC3339MilliNoZ `json:"last_modified"` - } - - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = Object(s.tmp) - - r.LastModified = time.Time(s.LastModified) - - return nil - -} - -// ObjectPage is a single page of objects that is returned from a call to the -// List function. -type ObjectPage struct { - pagination.MarkerPageBase -} - -// IsEmpty returns true if a ListResult contains no object names. -func (r ObjectPage) IsEmpty() (bool, error) { - names, err := ExtractNames(r) - return len(names) == 0, err -} - -// LastMarker returns the last object name in a ListResult. -func (r ObjectPage) LastMarker() (string, error) { - names, err := ExtractNames(r) - if err != nil { - return "", err - } - if len(names) == 0 { - return "", nil - } - return names[len(names)-1], nil -} - -// ExtractInfo is a function that takes a page of objects and returns their -// full information. -func ExtractInfo(r pagination.Page) ([]Object, error) { - var s []Object - err := (r.(ObjectPage)).ExtractInto(&s) - return s, err -} - -// ExtractNames is a function that takes a page of objects and returns only -// their names. -func ExtractNames(r pagination.Page) ([]string, error) { - casted := r.(ObjectPage) - ct := casted.Header.Get("Content-Type") - switch { - case strings.HasPrefix(ct, "application/json"): - parsed, err := ExtractInfo(r) - if err != nil { - return nil, err - } - - names := make([]string, 0, len(parsed)) - for _, object := range parsed { - names = append(names, object.Name) - } - - return names, nil - case strings.HasPrefix(ct, "text/plain"): - names := make([]string, 0, 50) - - body := string(r.(ObjectPage).Body.([]uint8)) - for _, name := range strings.Split(body, "\n") { - if len(name) > 0 { - names = append(names, name) - } - } - - return names, nil - case strings.HasPrefix(ct, "text/html"): - return []string{}, nil - default: - return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) - } -} - -// DownloadHeader represents the headers returned in the response from a -// Download request. -type DownloadHeader struct { - AcceptRanges string `json:"Accept-Ranges"` - ContentDisposition string `json:"Content-Disposition"` - ContentEncoding string `json:"Content-Encoding"` - ContentLength int64 `json:"-"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - DeleteAt time.Time `json:"-"` - ETag string `json:"Etag"` - LastModified time.Time `json:"-"` - ObjectManifest string `json:"X-Object-Manifest"` - StaticLargeObject bool `json:"X-Static-Large-Object"` - TransID string `json:"X-Trans-Id"` -} - -func (r *DownloadHeader) UnmarshalJSON(b []byte) error { - type tmp DownloadHeader - var s struct { - tmp - ContentLength string `json:"Content-Length"` - Date gophercloud.JSONRFC1123 `json:"Date"` - DeleteAt gophercloud.JSONUnix `json:"X-Delete-At"` - LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = DownloadHeader(s.tmp) - - switch s.ContentLength { - case "": - r.ContentLength = 0 - default: - r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) - if err != nil { - return err - } - } - - r.Date = time.Time(s.Date) - r.DeleteAt = time.Time(s.DeleteAt) - r.LastModified = time.Time(s.LastModified) - - return nil -} - -// DownloadResult is a *http.Response that is returned from a call to the -// Download function. -type DownloadResult struct { - gophercloud.HeaderResult - Body io.ReadCloser -} - -// Extract will return a struct of headers returned from a call to Download. -func (r DownloadResult) Extract() (*DownloadHeader, error) { - var s *DownloadHeader - err := r.ExtractInto(&s) - return s, err -} - -// ExtractContent is a function that takes a DownloadResult's io.Reader body -// and reads all available data into a slice of bytes. Please be aware that due -// the nature of io.Reader is forward-only - meaning that it can only be read -// once and not rewound. You can recreate a reader from the output of this -// function by using bytes.NewReader(downloadBytes) -func (r *DownloadResult) ExtractContent() ([]byte, error) { - if r.Err != nil { - return nil, r.Err - } - defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, err - } - r.Body.Close() - return body, nil -} - -// GetHeader represents the headers returned in the response from a Get request. -type GetHeader struct { - ContentDisposition string `json:"Content-Disposition"` - ContentEncoding string `json:"Content-Encoding"` - ContentLength int64 `json:"-"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - DeleteAt time.Time `json:"-"` - ETag string `json:"Etag"` - LastModified time.Time `json:"-"` - ObjectManifest string `json:"X-Object-Manifest"` - StaticLargeObject bool `json:"X-Static-Large-Object"` - TransID string `json:"X-Trans-Id"` -} - -func (r *GetHeader) UnmarshalJSON(b []byte) error { - type tmp GetHeader - var s struct { - tmp - ContentLength string `json:"Content-Length"` - Date gophercloud.JSONRFC1123 `json:"Date"` - DeleteAt gophercloud.JSONUnix `json:"X-Delete-At"` - LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = GetHeader(s.tmp) - - switch s.ContentLength { - case "": - r.ContentLength = 0 - default: - r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) - if err != nil { - return err - } - } - - r.Date = time.Time(s.Date) - r.DeleteAt = time.Time(s.DeleteAt) - r.LastModified = time.Time(s.LastModified) - - return nil -} - -// GetResult is a *http.Response that is returned from a call to the Get -// function. -type GetResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Get. -func (r GetResult) Extract() (*GetHeader, error) { - var s *GetHeader - err := r.ExtractInto(&s) - return s, err -} - -// ExtractMetadata is a function that takes a GetResult (of type *http.Response) -// and returns the custom metadata associated with the object. -func (r GetResult) ExtractMetadata() (map[string]string, error) { - if r.Err != nil { - return nil, r.Err - } - metadata := make(map[string]string) - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Object-Meta-") { - key := strings.TrimPrefix(k, "X-Object-Meta-") - metadata[key] = v[0] - } - } - return metadata, nil -} - -// CreateHeader represents the headers returned in the response from a -// Create request. -type CreateHeader struct { - ContentLength int64 `json:"-"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - ETag string `json:"Etag"` - LastModified time.Time `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *CreateHeader) UnmarshalJSON(b []byte) error { - type tmp CreateHeader - var s struct { - tmp - ContentLength string `json:"Content-Length"` - Date gophercloud.JSONRFC1123 `json:"Date"` - LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = CreateHeader(s.tmp) - - switch s.ContentLength { - case "": - r.ContentLength = 0 - default: - r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) - if err != nil { - return err - } - } - - r.Date = time.Time(s.Date) - r.LastModified = time.Time(s.LastModified) - - return nil -} - -// CreateResult represents the result of a create operation. -type CreateResult struct { - checksum string - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Create. -func (r CreateResult) Extract() (*CreateHeader, error) { - //if r.Header.Get("ETag") != fmt.Sprintf("%x", localChecksum) { - // return nil, ErrWrongChecksum{} - //} - var s *CreateHeader - err := r.ExtractInto(&s) - return s, err -} - -// UpdateHeader represents the headers returned in the response from a -// Update request. -type UpdateHeader struct { - ContentLength int64 `json:"-"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *UpdateHeader) UnmarshalJSON(b []byte) error { - type tmp UpdateHeader - var s struct { - tmp - ContentLength string `json:"Content-Length"` - Date gophercloud.JSONRFC1123 `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = UpdateHeader(s.tmp) - - switch s.ContentLength { - case "": - r.ContentLength = 0 - default: - r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) - if err != nil { - return err - } - } - - r.Date = time.Time(s.Date) - - return nil -} - -// UpdateResult represents the result of an update operation. -type UpdateResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Update. -func (r UpdateResult) Extract() (*UpdateHeader, error) { - var s *UpdateHeader - err := r.ExtractInto(&s) - return s, err -} - -// DeleteHeader represents the headers returned in the response from a -// Delete request. -type DeleteHeader struct { - ContentLength int64 `json:"Content-Length"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *DeleteHeader) UnmarshalJSON(b []byte) error { - type tmp DeleteHeader - var s struct { - tmp - ContentLength string `json:"Content-Length"` - Date gophercloud.JSONRFC1123 `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = DeleteHeader(s.tmp) - - switch s.ContentLength { - case "": - r.ContentLength = 0 - default: - r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) - if err != nil { - return err - } - } - - r.Date = time.Time(s.Date) - - return nil -} - -// DeleteResult represents the result of a delete operation. -type DeleteResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Delete. -func (r DeleteResult) Extract() (*DeleteHeader, error) { - var s *DeleteHeader - err := r.ExtractInto(&s) - return s, err -} - -// CopyHeader represents the headers returned in the response from a -// Copy request. -type CopyHeader struct { - ContentLength int64 `json:"-"` - ContentType string `json:"Content-Type"` - CopiedFrom string `json:"X-Copied-From"` - CopiedFromLastModified time.Time `json:"-"` - Date time.Time `json:"-"` - ETag string `json:"Etag"` - LastModified time.Time `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *CopyHeader) UnmarshalJSON(b []byte) error { - type tmp CopyHeader - var s struct { - tmp - ContentLength string `json:"Content-Length"` - CopiedFromLastModified gophercloud.JSONRFC1123 `json:"X-Copied-From-Last-Modified"` - Date gophercloud.JSONRFC1123 `json:"Date"` - LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = CopyHeader(s.tmp) - - switch s.ContentLength { - case "": - r.ContentLength = 0 - default: - r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) - if err != nil { - return err - } - } - - r.Date = time.Time(s.Date) - r.CopiedFromLastModified = time.Time(s.CopiedFromLastModified) - r.LastModified = time.Time(s.LastModified) - - return nil -} - -// CopyResult represents the result of a copy operation. -type CopyResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Copy. -func (r CopyResult) Extract() (*CopyHeader, error) { - var s *CopyHeader - err := r.ExtractInto(&s) - return s, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go deleted file mode 100644 index b3ac304b742..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go +++ /dev/null @@ -1,33 +0,0 @@ -package objects - -import ( - "github.com/gophercloud/gophercloud" -) - -func listURL(c *gophercloud.ServiceClient, container string) string { - return c.ServiceURL(container) -} - -func copyURL(c *gophercloud.ServiceClient, container, object string) string { - return c.ServiceURL(container, object) -} - -func createURL(c *gophercloud.ServiceClient, container, object string) string { - return copyURL(c, container, object) -} - -func getURL(c *gophercloud.ServiceClient, container, object string) string { - return copyURL(c, container, object) -} - -func deleteURL(c *gophercloud.ServiceClient, container, object string) string { - return copyURL(c, container, object) -} - -func downloadURL(c *gophercloud.ServiceClient, container, object string) string { - return copyURL(c, container, object) -} - -func updateURL(c *gophercloud.ServiceClient, container, object string) string { - return copyURL(c, container, object) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/doc.go deleted file mode 100644 index 989dc4ece2b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -/* -Package swauth implements Swift's built-in authentication. - -Example to Authenticate with swauth - - authOpts := swauth.AuthOpts{ - User: "project:user", - Key: "password", - } - - swiftClient, err := swauth.NewObjectStorageV1(providerClient, authOpts) - if err != nil { - panic(err) - } -*/ -package swauth diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/requests.go deleted file mode 100644 index 29bdcbcf76f..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/requests.go +++ /dev/null @@ -1,70 +0,0 @@ -package swauth - -import "github.com/gophercloud/gophercloud" - -// AuthOptsBuilder describes struct types that can be accepted by the Auth call. -type AuthOptsBuilder interface { - ToAuthOptsMap() (map[string]string, error) -} - -// AuthOpts specifies an authentication request. -type AuthOpts struct { - // User is an Swauth-based username in username:tenant format. - User string `h:"X-Auth-User" required:"true"` - - // Key is a secret/password to authenticate the User with. - Key string `h:"X-Auth-Key" required:"true"` -} - -// ToAuthOptsMap formats an AuthOpts structure into a request body. -func (opts AuthOpts) ToAuthOptsMap() (map[string]string, error) { - return gophercloud.BuildHeaders(opts) -} - -// Auth performs an authentication request for a Swauth-based user. -func Auth(c *gophercloud.ProviderClient, opts AuthOptsBuilder) (r GetAuthResult) { - h := make(map[string]string) - - if opts != nil { - headers, err := opts.ToAuthOptsMap() - if err != nil { - r.Err = err - return - } - - for k, v := range headers { - h[k] = v - } - } - - resp, err := c.Request("GET", getURL(c), &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{200}, - }) - - if resp != nil { - r.Header = resp.Header - } - - r.Err = err - - return r -} - -// NewObjectStorageV1 creates a Swauth-authenticated *gophercloud.ServiceClient -// client that can issue ObjectStorage-based API calls. -func NewObjectStorageV1(pc *gophercloud.ProviderClient, authOpts AuthOpts) (*gophercloud.ServiceClient, error) { - auth, err := Auth(pc, authOpts).Extract() - if err != nil { - return nil, err - } - - swiftClient := &gophercloud.ServiceClient{ - ProviderClient: pc, - Endpoint: gophercloud.NormalizeURL(auth.StorageURL), - } - - swiftClient.TokenID = auth.Token - - return swiftClient, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/results.go deleted file mode 100644 index f442f472550..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/results.go +++ /dev/null @@ -1,27 +0,0 @@ -package swauth - -import ( - "github.com/gophercloud/gophercloud" -) - -// GetAuthResult contains the response from the Auth request. Call its Extract -// method to interpret it as an AuthResult. -type GetAuthResult struct { - gophercloud.HeaderResult -} - -// AuthResult contains the authentication information from a Swauth -// authentication request. -type AuthResult struct { - Token string `json:"X-Auth-Token"` - StorageURL string `json:"X-Storage-Url"` - CDNURL string `json:"X-CDN-Management-Url"` -} - -// Extract is a method that attempts to interpret any Swauth authentication -// response as a AuthResult struct. -func (r GetAuthResult) Extract() (*AuthResult, error) { - var s *AuthResult - err := r.ExtractInto(&s) - return s, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/urls.go deleted file mode 100644 index a30cabd60e5..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/urls.go +++ /dev/null @@ -1,7 +0,0 @@ -package swauth - -import "github.com/gophercloud/gophercloud" - -func getURL(c *gophercloud.ProviderClient) string { - return c.IdentityBase + "auth/v1.0" -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go deleted file mode 100644 index 27da19f91a8..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go +++ /dev/null @@ -1,111 +0,0 @@ -package utils - -import ( - "fmt" - "strings" - - "github.com/gophercloud/gophercloud" -) - -// Version is a supported API version, corresponding to a vN package within the appropriate service. -type Version struct { - ID string - Suffix string - Priority int -} - -var goodStatus = map[string]bool{ - "current": true, - "supported": true, - "stable": true, -} - -// ChooseVersion queries the base endpoint of an API to choose the most recent non-experimental alternative from a service's -// published versions. -// It returns the highest-Priority Version among the alternatives that are provided, as well as its corresponding endpoint. -func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) { - type linkResp struct { - Href string `json:"href"` - Rel string `json:"rel"` - } - - type valueResp struct { - ID string `json:"id"` - Status string `json:"status"` - Links []linkResp `json:"links"` - } - - type versionsResp struct { - Values []valueResp `json:"values"` - } - - type response struct { - Versions versionsResp `json:"versions"` - } - - normalize := func(endpoint string) string { - if !strings.HasSuffix(endpoint, "/") { - return endpoint + "/" - } - return endpoint - } - identityEndpoint := normalize(client.IdentityEndpoint) - - // If a full endpoint is specified, check version suffixes for a match first. - for _, v := range recognized { - if strings.HasSuffix(identityEndpoint, v.Suffix) { - return v, identityEndpoint, nil - } - } - - var resp response - _, err := client.Request("GET", client.IdentityBase, &gophercloud.RequestOpts{ - JSONResponse: &resp, - OkCodes: []int{200, 300}, - }) - - if err != nil { - return nil, "", err - } - - var highest *Version - var endpoint string - - for _, value := range resp.Versions.Values { - href := "" - for _, link := range value.Links { - if link.Rel == "self" { - href = normalize(link.Href) - } - } - - for _, version := range recognized { - if strings.Contains(value.ID, version.ID) { - // Prefer a version that exactly matches the provided endpoint. - if href == identityEndpoint { - if href == "" { - return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) - } - return version, href, nil - } - - // Otherwise, find the highest-priority version with a whitelisted status. - if goodStatus[strings.ToLower(value.Status)] { - if highest == nil || version.Priority > highest.Priority { - highest = version - endpoint = href - } - } - } - } - } - - if highest == nil { - return nil, "", fmt.Errorf("No supported version available from endpoint %s", client.IdentityBase) - } - if endpoint == "" { - return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", highest.ID, client.IdentityBase) - } - - return highest, endpoint, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/http.go b/vendor/github.com/gophercloud/gophercloud/pagination/http.go deleted file mode 100644 index 757295c423a..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/http.go +++ /dev/null @@ -1,60 +0,0 @@ -package pagination - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/gophercloud/gophercloud" -) - -// PageResult stores the HTTP response that returned the current page of results. -type PageResult struct { - gophercloud.Result - url.URL -} - -// PageResultFrom parses an HTTP response as JSON and returns a PageResult containing the -// results, interpreting it as JSON if the content type indicates. -func PageResultFrom(resp *http.Response) (PageResult, error) { - var parsedBody interface{} - - defer resp.Body.Close() - rawBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return PageResult{}, err - } - - if strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") { - err = json.Unmarshal(rawBody, &parsedBody) - if err != nil { - return PageResult{}, err - } - } else { - parsedBody = rawBody - } - - return PageResultFromParsed(resp, parsedBody), err -} - -// PageResultFromParsed constructs a PageResult from an HTTP response that has already had its -// body parsed as JSON (and closed). -func PageResultFromParsed(resp *http.Response, body interface{}) PageResult { - return PageResult{ - Result: gophercloud.Result{ - Body: body, - Header: resp.Header, - }, - URL: *resp.Request.URL, - } -} - -// Request performs an HTTP request and extracts the http.Response from the result. -func Request(client *gophercloud.ServiceClient, headers map[string]string, url string) (*http.Response, error) { - return client.Get(url, nil, &gophercloud.RequestOpts{ - MoreHeaders: headers, - OkCodes: []int{200, 204, 300}, - }) -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/linked.go b/vendor/github.com/gophercloud/gophercloud/pagination/linked.go deleted file mode 100644 index 3656fb7f8f4..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/linked.go +++ /dev/null @@ -1,92 +0,0 @@ -package pagination - -import ( - "fmt" - "reflect" - - "github.com/gophercloud/gophercloud" -) - -// LinkedPageBase may be embedded to implement a page that provides navigational "Next" and "Previous" links within its result. -type LinkedPageBase struct { - PageResult - - // LinkPath lists the keys that should be traversed within a response to arrive at the "next" pointer. - // If any link along the path is missing, an empty URL will be returned. - // If any link results in an unexpected value type, an error will be returned. - // When left as "nil", []string{"links", "next"} will be used as a default. - LinkPath []string -} - -// NextPageURL extracts the pagination structure from a JSON response and returns the "next" link, if one is present. -// It assumes that the links are available in a "links" element of the top-level response object. -// If this is not the case, override NextPageURL on your result type. -func (current LinkedPageBase) NextPageURL() (string, error) { - var path []string - var key string - - if current.LinkPath == nil { - path = []string{"links", "next"} - } else { - path = current.LinkPath - } - - submap, ok := current.Body.(map[string]interface{}) - if !ok { - err := gophercloud.ErrUnexpectedType{} - err.Expected = "map[string]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return "", err - } - - for { - key, path = path[0], path[1:len(path)] - - value, ok := submap[key] - if !ok { - return "", nil - } - - if len(path) > 0 { - submap, ok = value.(map[string]interface{}) - if !ok { - err := gophercloud.ErrUnexpectedType{} - err.Expected = "map[string]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) - return "", err - } - } else { - if value == nil { - // Actual null element. - return "", nil - } - - url, ok := value.(string) - if !ok { - err := gophercloud.ErrUnexpectedType{} - err.Expected = "string" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) - return "", err - } - - return url, nil - } - } -} - -// IsEmpty satisifies the IsEmpty method of the Page interface -func (current LinkedPageBase) IsEmpty() (bool, error) { - if b, ok := current.Body.([]interface{}); ok { - return len(b) == 0, nil - } - err := gophercloud.ErrUnexpectedType{} - err.Expected = "[]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return true, err -} - -// GetBody returns the linked page's body. This method is needed to satisfy the -// Page interface. -func (current LinkedPageBase) GetBody() interface{} { - return current.Body -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/marker.go b/vendor/github.com/gophercloud/gophercloud/pagination/marker.go deleted file mode 100644 index 52e53bae850..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/marker.go +++ /dev/null @@ -1,58 +0,0 @@ -package pagination - -import ( - "fmt" - "reflect" - - "github.com/gophercloud/gophercloud" -) - -// MarkerPage is a stricter Page interface that describes additional functionality required for use with NewMarkerPager. -// For convenience, embed the MarkedPageBase struct. -type MarkerPage interface { - Page - - // LastMarker returns the last "marker" value on this page. - LastMarker() (string, error) -} - -// MarkerPageBase is a page in a collection that's paginated by "limit" and "marker" query parameters. -type MarkerPageBase struct { - PageResult - - // Owner is a reference to the embedding struct. - Owner MarkerPage -} - -// NextPageURL generates the URL for the page of results after this one. -func (current MarkerPageBase) NextPageURL() (string, error) { - currentURL := current.URL - - mark, err := current.Owner.LastMarker() - if err != nil { - return "", err - } - - q := currentURL.Query() - q.Set("marker", mark) - currentURL.RawQuery = q.Encode() - - return currentURL.String(), nil -} - -// IsEmpty satisifies the IsEmpty method of the Page interface -func (current MarkerPageBase) IsEmpty() (bool, error) { - if b, ok := current.Body.([]interface{}); ok { - return len(b) == 0, nil - } - err := gophercloud.ErrUnexpectedType{} - err.Expected = "[]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return true, err -} - -// GetBody returns the linked page's body. This method is needed to satisfy the -// Page interface. -func (current MarkerPageBase) GetBody() interface{} { - return current.Body -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go deleted file mode 100644 index 6f1609ef2e3..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go +++ /dev/null @@ -1,238 +0,0 @@ -package pagination - -import ( - "errors" - "fmt" - "net/http" - "reflect" - "strings" - - "github.com/gophercloud/gophercloud" -) - -var ( - // ErrPageNotAvailable is returned from a Pager when a next or previous page is requested, but does not exist. - ErrPageNotAvailable = errors.New("The requested page does not exist.") -) - -// Page must be satisfied by the result type of any resource collection. -// It allows clients to interact with the resource uniformly, regardless of whether or not or how it's paginated. -// Generally, rather than implementing this interface directly, implementors should embed one of the concrete PageBase structs, -// instead. -// Depending on the pagination strategy of a particular resource, there may be an additional subinterface that the result type -// will need to implement. -type Page interface { - - // NextPageURL generates the URL for the page of data that follows this collection. - // Return "" if no such page exists. - NextPageURL() (string, error) - - // IsEmpty returns true if this Page has no items in it. - IsEmpty() (bool, error) - - // GetBody returns the Page Body. This is used in the `AllPages` method. - GetBody() interface{} -} - -// Pager knows how to advance through a specific resource collection, one page at a time. -type Pager struct { - client *gophercloud.ServiceClient - - initialURL string - - createPage func(r PageResult) Page - - Err error - - // Headers supplies additional HTTP headers to populate on each paged request. - Headers map[string]string -} - -// NewPager constructs a manually-configured pager. -// Supply the URL for the first page, a function that requests a specific page given a URL, and a function that counts a page. -func NewPager(client *gophercloud.ServiceClient, initialURL string, createPage func(r PageResult) Page) Pager { - return Pager{ - client: client, - initialURL: initialURL, - createPage: createPage, - } -} - -// WithPageCreator returns a new Pager that substitutes a different page creation function. This is -// useful for overriding List functions in delegation. -func (p Pager) WithPageCreator(createPage func(r PageResult) Page) Pager { - return Pager{ - client: p.client, - initialURL: p.initialURL, - createPage: createPage, - } -} - -func (p Pager) fetchNextPage(url string) (Page, error) { - resp, err := Request(p.client, p.Headers, url) - if err != nil { - return nil, err - } - - remembered, err := PageResultFrom(resp) - if err != nil { - return nil, err - } - - return p.createPage(remembered), nil -} - -// EachPage iterates over each page returned by a Pager, yielding one at a time to a handler function. -// Return "false" from the handler to prematurely stop iterating. -func (p Pager) EachPage(handler func(Page) (bool, error)) error { - if p.Err != nil { - return p.Err - } - currentURL := p.initialURL - for { - currentPage, err := p.fetchNextPage(currentURL) - if err != nil { - return err - } - - empty, err := currentPage.IsEmpty() - if err != nil { - return err - } - if empty { - return nil - } - - ok, err := handler(currentPage) - if err != nil { - return err - } - if !ok { - return nil - } - - currentURL, err = currentPage.NextPageURL() - if err != nil { - return err - } - if currentURL == "" { - return nil - } - } -} - -// AllPages returns all the pages from a `List` operation in a single page, -// allowing the user to retrieve all the pages at once. -func (p Pager) AllPages() (Page, error) { - // pagesSlice holds all the pages until they get converted into as Page Body. - var pagesSlice []interface{} - // body will contain the final concatenated Page body. - var body reflect.Value - - // Grab a test page to ascertain the page body type. - testPage, err := p.fetchNextPage(p.initialURL) - if err != nil { - return nil, err - } - // Store the page type so we can use reflection to create a new mega-page of - // that type. - pageType := reflect.TypeOf(testPage) - - // if it's a single page, just return the testPage (first page) - if _, found := pageType.FieldByName("SinglePageBase"); found { - return testPage, nil - } - - // Switch on the page body type. Recognized types are `map[string]interface{}`, - // `[]byte`, and `[]interface{}`. - switch pb := testPage.GetBody().(type) { - case map[string]interface{}: - // key is the map key for the page body if the body type is `map[string]interface{}`. - var key string - // Iterate over the pages to concatenate the bodies. - err = p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().(map[string]interface{}) - for k, v := range b { - // If it's a linked page, we don't want the `links`, we want the other one. - if !strings.HasSuffix(k, "links") { - // check the field's type. we only want []interface{} (which is really []map[string]interface{}) - switch vt := v.(type) { - case []interface{}: - key = k - pagesSlice = append(pagesSlice, vt...) - } - } - } - return true, nil - }) - if err != nil { - return nil, err - } - // Set body to value of type `map[string]interface{}` - body = reflect.MakeMap(reflect.MapOf(reflect.TypeOf(key), reflect.TypeOf(pagesSlice))) - body.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(pagesSlice)) - case []byte: - // Iterate over the pages to concatenate the bodies. - err = p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().([]byte) - pagesSlice = append(pagesSlice, b) - // seperate pages with a comma - pagesSlice = append(pagesSlice, []byte{10}) - return true, nil - }) - if err != nil { - return nil, err - } - if len(pagesSlice) > 0 { - // Remove the trailing comma. - pagesSlice = pagesSlice[:len(pagesSlice)-1] - } - var b []byte - // Combine the slice of slices in to a single slice. - for _, slice := range pagesSlice { - b = append(b, slice.([]byte)...) - } - // Set body to value of type `bytes`. - body = reflect.New(reflect.TypeOf(b)).Elem() - body.SetBytes(b) - case []interface{}: - // Iterate over the pages to concatenate the bodies. - err = p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().([]interface{}) - pagesSlice = append(pagesSlice, b...) - return true, nil - }) - if err != nil { - return nil, err - } - // Set body to value of type `[]interface{}` - body = reflect.MakeSlice(reflect.TypeOf(pagesSlice), len(pagesSlice), len(pagesSlice)) - for i, s := range pagesSlice { - body.Index(i).Set(reflect.ValueOf(s)) - } - default: - err := gophercloud.ErrUnexpectedType{} - err.Expected = "map[string]interface{}/[]byte/[]interface{}" - err.Actual = fmt.Sprintf("%T", pb) - return nil, err - } - - // Each `Extract*` function is expecting a specific type of page coming back, - // otherwise the type assertion in those functions will fail. pageType is needed - // to create a type in this method that has the same type that the `Extract*` - // function is expecting and set the Body of that object to the concatenated - // pages. - page := reflect.New(pageType) - // Set the page body to be the concatenated pages. - page.Elem().FieldByName("Body").Set(body) - // Set any additional headers that were pass along. The `objectstorage` pacakge, - // for example, passes a Content-Type header. - h := make(http.Header) - for k, v := range p.Headers { - h.Add(k, v) - } - page.Elem().FieldByName("Header").Set(reflect.ValueOf(h)) - // Type assert the page to a Page interface so that the type assertion in the - // `Extract*` methods will work. - return page.Elem().Interface().(Page), err -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go b/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go deleted file mode 100644 index 912daea3642..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package pagination contains utilities and convenience structs that implement common pagination idioms within OpenStack APIs. -*/ -package pagination diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/single.go b/vendor/github.com/gophercloud/gophercloud/pagination/single.go deleted file mode 100644 index 4251d6491ef..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/single.go +++ /dev/null @@ -1,33 +0,0 @@ -package pagination - -import ( - "fmt" - "reflect" - - "github.com/gophercloud/gophercloud" -) - -// SinglePageBase may be embedded in a Page that contains all of the results from an operation at once. -type SinglePageBase PageResult - -// NextPageURL always returns "" to indicate that there are no more pages to return. -func (current SinglePageBase) NextPageURL() (string, error) { - return "", nil -} - -// IsEmpty satisifies the IsEmpty method of the Page interface -func (current SinglePageBase) IsEmpty() (bool, error) { - if b, ok := current.Body.([]interface{}); ok { - return len(b) == 0, nil - } - err := gophercloud.ErrUnexpectedType{} - err.Expected = "[]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return true, err -} - -// GetBody returns the single page's body. This method is needed to satisfy the -// Page interface. -func (current SinglePageBase) GetBody() interface{} { - return current.Body -} diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go deleted file mode 100644 index 6afc8f8b721..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/params.go +++ /dev/null @@ -1,463 +0,0 @@ -package gophercloud - -import ( - "encoding/json" - "fmt" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -/* -BuildRequestBody builds a map[string]interface from the given `struct`. If -parent is not an empty string, the final map[string]interface returned will -encapsulate the built one. For example: - - disk := 1 - createOpts := flavors.CreateOpts{ - ID: "1", - Name: "m1.tiny", - Disk: &disk, - RAM: 512, - VCPUs: 1, - RxTxFactor: 1.0, - } - - body, err := gophercloud.BuildRequestBody(createOpts, "flavor") - -The above example can be run as-is, however it is recommended to look at how -BuildRequestBody is used within Gophercloud to more fully understand how it -fits within the request process as a whole rather than use it directly as shown -above. -*/ -func BuildRequestBody(opts interface{}, parent string) (map[string]interface{}, error) { - optsValue := reflect.ValueOf(opts) - if optsValue.Kind() == reflect.Ptr { - optsValue = optsValue.Elem() - } - - optsType := reflect.TypeOf(opts) - if optsType.Kind() == reflect.Ptr { - optsType = optsType.Elem() - } - - optsMap := make(map[string]interface{}) - if optsValue.Kind() == reflect.Struct { - //fmt.Printf("optsValue.Kind() is a reflect.Struct: %+v\n", optsValue.Kind()) - for i := 0; i < optsValue.NumField(); i++ { - v := optsValue.Field(i) - f := optsType.Field(i) - - if f.Name != strings.Title(f.Name) { - //fmt.Printf("Skipping field: %s...\n", f.Name) - continue - } - - //fmt.Printf("Starting on field: %s...\n", f.Name) - - zero := isZero(v) - //fmt.Printf("v is zero?: %v\n", zero) - - // if the field has a required tag that's set to "true" - if requiredTag := f.Tag.Get("required"); requiredTag == "true" { - //fmt.Printf("Checking required field [%s]:\n\tv: %+v\n\tisZero:%v\n", f.Name, v.Interface(), zero) - // if the field's value is zero, return a missing-argument error - if zero { - // if the field has a 'required' tag, it can't have a zero-value - err := ErrMissingInput{} - err.Argument = f.Name - return nil, err - } - } - - if xorTag := f.Tag.Get("xor"); xorTag != "" { - //fmt.Printf("Checking `xor` tag for field [%s] with value %+v:\n\txorTag: %s\n", f.Name, v, xorTag) - xorField := optsValue.FieldByName(xorTag) - var xorFieldIsZero bool - if reflect.ValueOf(xorField.Interface()) == reflect.Zero(xorField.Type()) { - xorFieldIsZero = true - } else { - if xorField.Kind() == reflect.Ptr { - xorField = xorField.Elem() - } - xorFieldIsZero = isZero(xorField) - } - if !(zero != xorFieldIsZero) { - err := ErrMissingInput{} - err.Argument = fmt.Sprintf("%s/%s", f.Name, xorTag) - err.Info = fmt.Sprintf("Exactly one of %s and %s must be provided", f.Name, xorTag) - return nil, err - } - } - - if orTag := f.Tag.Get("or"); orTag != "" { - //fmt.Printf("Checking `or` tag for field with:\n\tname: %+v\n\torTag:%s\n", f.Name, orTag) - //fmt.Printf("field is zero?: %v\n", zero) - if zero { - orField := optsValue.FieldByName(orTag) - var orFieldIsZero bool - if reflect.ValueOf(orField.Interface()) == reflect.Zero(orField.Type()) { - orFieldIsZero = true - } else { - if orField.Kind() == reflect.Ptr { - orField = orField.Elem() - } - orFieldIsZero = isZero(orField) - } - if orFieldIsZero { - err := ErrMissingInput{} - err.Argument = fmt.Sprintf("%s/%s", f.Name, orTag) - err.Info = fmt.Sprintf("At least one of %s and %s must be provided", f.Name, orTag) - return nil, err - } - } - } - - if v.Kind() == reflect.Struct || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct) { - if zero { - //fmt.Printf("value before change: %+v\n", optsValue.Field(i)) - if jsonTag := f.Tag.Get("json"); jsonTag != "" { - jsonTagPieces := strings.Split(jsonTag, ",") - if len(jsonTagPieces) > 1 && jsonTagPieces[1] == "omitempty" { - if v.CanSet() { - if !v.IsNil() { - if v.Kind() == reflect.Ptr { - v.Set(reflect.Zero(v.Type())) - } - } - //fmt.Printf("value after change: %+v\n", optsValue.Field(i)) - } - } - } - continue - } - - //fmt.Printf("Calling BuildRequestBody with:\n\tv: %+v\n\tf.Name:%s\n", v.Interface(), f.Name) - _, err := BuildRequestBody(v.Interface(), f.Name) - if err != nil { - return nil, err - } - } - } - - //fmt.Printf("opts: %+v \n", opts) - - b, err := json.Marshal(opts) - if err != nil { - return nil, err - } - - //fmt.Printf("string(b): %s\n", string(b)) - - err = json.Unmarshal(b, &optsMap) - if err != nil { - return nil, err - } - - //fmt.Printf("optsMap: %+v\n", optsMap) - - if parent != "" { - optsMap = map[string]interface{}{parent: optsMap} - } - //fmt.Printf("optsMap after parent added: %+v\n", optsMap) - return optsMap, nil - } - // Return an error if the underlying type of 'opts' isn't a struct. - return nil, fmt.Errorf("Options type is not a struct.") -} - -// EnabledState is a convenience type, mostly used in Create and Update -// operations. Because the zero value of a bool is FALSE, we need to use a -// pointer instead to indicate zero-ness. -type EnabledState *bool - -// Convenience vars for EnabledState values. -var ( - iTrue = true - iFalse = false - - Enabled EnabledState = &iTrue - Disabled EnabledState = &iFalse -) - -// IPVersion is a type for the possible IP address versions. Valid instances -// are IPv4 and IPv6 -type IPVersion int - -const ( - // IPv4 is used for IP version 4 addresses - IPv4 IPVersion = 4 - // IPv6 is used for IP version 6 addresses - IPv6 IPVersion = 6 -) - -// IntToPointer is a function for converting integers into integer pointers. -// This is useful when passing in options to operations. -func IntToPointer(i int) *int { - return &i -} - -/* -MaybeString is an internal function to be used by request methods in individual -resource packages. - -It takes a string that might be a zero value and returns either a pointer to its -address or nil. This is useful for allowing users to conveniently omit values -from an options struct by leaving them zeroed, but still pass nil to the JSON -serializer so they'll be omitted from the request body. -*/ -func MaybeString(original string) *string { - if original != "" { - return &original - } - return nil -} - -/* -MaybeInt is an internal function to be used by request methods in individual -resource packages. - -Like MaybeString, it accepts an int that may or may not be a zero value, and -returns either a pointer to its address or nil. It's intended to hint that the -JSON serializer should omit its field. -*/ -func MaybeInt(original int) *int { - if original != 0 { - return &original - } - return nil -} - -/* -func isUnderlyingStructZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Ptr: - return isUnderlyingStructZero(v.Elem()) - default: - return isZero(v) - } -} -*/ - -var t time.Time - -func isZero(v reflect.Value) bool { - //fmt.Printf("\n\nchecking isZero for value: %+v\n", v) - switch v.Kind() { - case reflect.Ptr: - if v.IsNil() { - return true - } - return false - case reflect.Func, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - z := true - for i := 0; i < v.Len(); i++ { - z = z && isZero(v.Index(i)) - } - return z - case reflect.Struct: - if v.Type() == reflect.TypeOf(t) { - if v.Interface().(time.Time).IsZero() { - return true - } - return false - } - z := true - for i := 0; i < v.NumField(); i++ { - z = z && isZero(v.Field(i)) - } - return z - } - // Compare other types directly: - z := reflect.Zero(v.Type()) - //fmt.Printf("zero type for value: %+v\n\n\n", z) - return v.Interface() == z.Interface() -} - -/* -BuildQueryString is an internal function to be used by request methods in -individual resource packages. - -It accepts a tagged structure and expands it into a URL struct. Field names are -converted into query parameters based on a "q" tag. For example: - - type struct Something { - Bar string `q:"x_bar"` - Baz int `q:"lorem_ipsum"` - } - - instance := Something{ - Bar: "AAA", - Baz: "BBB", - } - -will be converted into "?x_bar=AAA&lorem_ipsum=BBB". - -The struct's fields may be strings, integers, or boolean values. Fields left at -their type's zero value will be omitted from the query. -*/ -func BuildQueryString(opts interface{}) (*url.URL, error) { - optsValue := reflect.ValueOf(opts) - if optsValue.Kind() == reflect.Ptr { - optsValue = optsValue.Elem() - } - - optsType := reflect.TypeOf(opts) - if optsType.Kind() == reflect.Ptr { - optsType = optsType.Elem() - } - - params := url.Values{} - - if optsValue.Kind() == reflect.Struct { - for i := 0; i < optsValue.NumField(); i++ { - v := optsValue.Field(i) - f := optsType.Field(i) - qTag := f.Tag.Get("q") - - // if the field has a 'q' tag, it goes in the query string - if qTag != "" { - tags := strings.Split(qTag, ",") - - // if the field is set, add it to the slice of query pieces - if !isZero(v) { - loop: - switch v.Kind() { - case reflect.Ptr: - v = v.Elem() - goto loop - case reflect.String: - params.Add(tags[0], v.String()) - case reflect.Int: - params.Add(tags[0], strconv.FormatInt(v.Int(), 10)) - case reflect.Bool: - params.Add(tags[0], strconv.FormatBool(v.Bool())) - case reflect.Slice: - switch v.Type().Elem() { - case reflect.TypeOf(0): - for i := 0; i < v.Len(); i++ { - params.Add(tags[0], strconv.FormatInt(v.Index(i).Int(), 10)) - } - default: - for i := 0; i < v.Len(); i++ { - params.Add(tags[0], v.Index(i).String()) - } - } - } - } else { - // Otherwise, the field is not set. - if len(tags) == 2 && tags[1] == "required" { - // And the field is required. Return an error. - return nil, fmt.Errorf("Required query parameter [%s] not set.", f.Name) - } - } - } - } - - return &url.URL{RawQuery: params.Encode()}, nil - } - // Return an error if the underlying type of 'opts' isn't a struct. - return nil, fmt.Errorf("Options type is not a struct.") -} - -/* -BuildHeaders is an internal function to be used by request methods in -individual resource packages. - -It accepts an arbitrary tagged structure and produces a string map that's -suitable for use as the HTTP headers of an outgoing request. Field names are -mapped to header names based in "h" tags. - - type struct Something { - Bar string `h:"x_bar"` - Baz int `h:"lorem_ipsum"` - } - - instance := Something{ - Bar: "AAA", - Baz: "BBB", - } - -will be converted into: - - map[string]string{ - "x_bar": "AAA", - "lorem_ipsum": "BBB", - } - -Untagged fields and fields left at their zero values are skipped. Integers, -booleans and string values are supported. -*/ -func BuildHeaders(opts interface{}) (map[string]string, error) { - optsValue := reflect.ValueOf(opts) - if optsValue.Kind() == reflect.Ptr { - optsValue = optsValue.Elem() - } - - optsType := reflect.TypeOf(opts) - if optsType.Kind() == reflect.Ptr { - optsType = optsType.Elem() - } - - optsMap := make(map[string]string) - if optsValue.Kind() == reflect.Struct { - for i := 0; i < optsValue.NumField(); i++ { - v := optsValue.Field(i) - f := optsType.Field(i) - hTag := f.Tag.Get("h") - - // if the field has a 'h' tag, it goes in the header - if hTag != "" { - tags := strings.Split(hTag, ",") - - // if the field is set, add it to the slice of query pieces - if !isZero(v) { - switch v.Kind() { - case reflect.String: - optsMap[tags[0]] = v.String() - case reflect.Int: - optsMap[tags[0]] = strconv.FormatInt(v.Int(), 10) - case reflect.Bool: - optsMap[tags[0]] = strconv.FormatBool(v.Bool()) - } - } else { - // Otherwise, the field is not set. - if len(tags) == 2 && tags[1] == "required" { - // And the field is required. Return an error. - return optsMap, fmt.Errorf("Required header not set.") - } - } - } - - } - return optsMap, nil - } - // Return an error if the underlying type of 'opts' isn't a struct. - return optsMap, fmt.Errorf("Options type is not a struct.") -} - -// IDSliceToQueryString takes a slice of elements and converts them into a query -// string. For example, if name=foo and slice=[]int{20, 40, 60}, then the -// result would be `?name=20&name=40&name=60' -func IDSliceToQueryString(name string, ids []int) string { - str := "" - for k, v := range ids { - if k == 0 { - str += "?" - } else { - str += "&" - } - str += fmt.Sprintf("%s=%s", name, strconv.Itoa(v)) - } - return str -} - -// IntWithinRange returns TRUE if an integer falls within a defined range, and -// FALSE if not. -func IntWithinRange(val, min, max int) bool { - return val > min && val < max -} diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go deleted file mode 100644 index f88682381dd..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ /dev/null @@ -1,307 +0,0 @@ -package gophercloud - -import ( - "bytes" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "strings" -) - -// DefaultUserAgent is the default User-Agent string set in the request header. -const DefaultUserAgent = "gophercloud/2.0.0" - -// UserAgent represents a User-Agent header. -type UserAgent struct { - // prepend is the slice of User-Agent strings to prepend to DefaultUserAgent. - // All the strings to prepend are accumulated and prepended in the Join method. - prepend []string -} - -// Prepend prepends a user-defined string to the default User-Agent string. Users -// may pass in one or more strings to prepend. -func (ua *UserAgent) Prepend(s ...string) { - ua.prepend = append(s, ua.prepend...) -} - -// Join concatenates all the user-defined User-Agend strings with the default -// Gophercloud User-Agent string. -func (ua *UserAgent) Join() string { - uaSlice := append(ua.prepend, DefaultUserAgent) - return strings.Join(uaSlice, " ") -} - -// ProviderClient stores details that are required to interact with any -// services within a specific provider's API. -// -// Generally, you acquire a ProviderClient by calling the NewClient method in -// the appropriate provider's child package, providing whatever authentication -// credentials are required. -type ProviderClient struct { - // IdentityBase is the base URL used for a particular provider's identity - // service - it will be used when issuing authenticatation requests. It - // should point to the root resource of the identity service, not a specific - // identity version. - IdentityBase string - - // IdentityEndpoint is the identity endpoint. This may be a specific version - // of the identity service. If this is the case, this endpoint is used rather - // than querying versions first. - IdentityEndpoint string - - // TokenID is the ID of the most recently issued valid token. - TokenID string - - // EndpointLocator describes how this provider discovers the endpoints for - // its constituent services. - EndpointLocator EndpointLocator - - // HTTPClient allows users to interject arbitrary http, https, or other transit behaviors. - HTTPClient http.Client - - // UserAgent represents the User-Agent header in the HTTP request. - UserAgent UserAgent - - // ReauthFunc is the function used to re-authenticate the user if the request - // fails with a 401 HTTP response code. This a needed because there may be multiple - // authentication functions for different Identity service versions. - ReauthFunc func() error - - Debug bool -} - -// AuthenticatedHeaders returns a map of HTTP headers that are common for all -// authenticated service requests. -func (client *ProviderClient) AuthenticatedHeaders() map[string]string { - if client.TokenID == "" { - return map[string]string{} - } - return map[string]string{"X-Auth-Token": client.TokenID} -} - -// RequestOpts customizes the behavior of the provider.Request() method. -type RequestOpts struct { - // JSONBody, if provided, will be encoded as JSON and used as the body of the HTTP request. The - // content type of the request will default to "application/json" unless overridden by MoreHeaders. - // It's an error to specify both a JSONBody and a RawBody. - JSONBody interface{} - // RawBody contains an io.Reader that will be consumed by the request directly. No content-type - // will be set unless one is provided explicitly by MoreHeaders. - RawBody io.Reader - // JSONResponse, if provided, will be populated with the contents of the response body parsed as - // JSON. - JSONResponse interface{} - // OkCodes contains a list of numeric HTTP status codes that should be interpreted as success. If - // the response has a different code, an error will be returned. - OkCodes []int - // MoreHeaders specifies additional HTTP headers to be provide on the request. If a header is - // provided with a blank value (""), that header will be *omitted* instead: use this to suppress - // the default Accept header or an inferred Content-Type, for example. - MoreHeaders map[string]string - // ErrorContext specifies the resource error type to return if an error is encountered. - // This lets resources override default error messages based on the response status code. - ErrorContext error -} - -var applicationJSON = "application/json" - -// Request performs an HTTP request using the ProviderClient's current HTTPClient. An authentication -// header will automatically be provided. -func (client *ProviderClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { - var body io.Reader - var contentType *string - - // Derive the content body by either encoding an arbitrary object as JSON, or by taking a provided - // io.ReadSeeker as-is. Default the content-type to application/json. - if options.JSONBody != nil { - if options.RawBody != nil { - panic("Please provide only one of JSONBody or RawBody to gophercloud.Request().") - } - - rendered, err := json.Marshal(options.JSONBody) - if err != nil { - return nil, err - } - - body = bytes.NewReader(rendered) - contentType = &applicationJSON - } - - if options.RawBody != nil { - body = options.RawBody - } - - // Construct the http.Request. - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, err - } - - // Populate the request headers. Apply options.MoreHeaders last, to give the caller the chance to - // modify or omit any header. - if contentType != nil { - req.Header.Set("Content-Type", *contentType) - } - req.Header.Set("Accept", applicationJSON) - - for k, v := range client.AuthenticatedHeaders() { - req.Header.Add(k, v) - } - - // Set the User-Agent header - req.Header.Set("User-Agent", client.UserAgent.Join()) - - if options.MoreHeaders != nil { - for k, v := range options.MoreHeaders { - if v != "" { - req.Header.Set(k, v) - } else { - req.Header.Del(k) - } - } - } - - // Set connection parameter to close the connection immediately when we've got the response - req.Close = true - - // Issue the request. - resp, err := client.HTTPClient.Do(req) - if err != nil { - return nil, err - } - - // Allow default OkCodes if none explicitly set - if options.OkCodes == nil { - options.OkCodes = defaultOkCodes(method) - } - - // Validate the HTTP response status. - var ok bool - for _, code := range options.OkCodes { - if resp.StatusCode == code { - ok = true - break - } - } - - if !ok { - body, _ := ioutil.ReadAll(resp.Body) - resp.Body.Close() - //pc := make([]uintptr, 1) - //runtime.Callers(2, pc) - //f := runtime.FuncForPC(pc[0]) - respErr := ErrUnexpectedResponseCode{ - URL: url, - Method: method, - Expected: options.OkCodes, - Actual: resp.StatusCode, - Body: body, - } - //respErr.Function = "gophercloud.ProviderClient.Request" - - errType := options.ErrorContext - switch resp.StatusCode { - case http.StatusBadRequest: - err = ErrDefault400{respErr} - if error400er, ok := errType.(Err400er); ok { - err = error400er.Error400(respErr) - } - case http.StatusUnauthorized: - if client.ReauthFunc != nil { - err = client.ReauthFunc() - if err != nil { - e := &ErrUnableToReauthenticate{} - e.ErrOriginal = respErr - return nil, e - } - if options.RawBody != nil { - if seeker, ok := options.RawBody.(io.Seeker); ok { - seeker.Seek(0, 0) - } - } - resp, err = client.Request(method, url, options) - if err != nil { - switch err.(type) { - case *ErrUnexpectedResponseCode: - e := &ErrErrorAfterReauthentication{} - e.ErrOriginal = err.(*ErrUnexpectedResponseCode) - return nil, e - default: - e := &ErrErrorAfterReauthentication{} - e.ErrOriginal = err - return nil, e - } - } - return resp, nil - } - err = ErrDefault401{respErr} - if error401er, ok := errType.(Err401er); ok { - err = error401er.Error401(respErr) - } - case http.StatusNotFound: - err = ErrDefault404{respErr} - if error404er, ok := errType.(Err404er); ok { - err = error404er.Error404(respErr) - } - case http.StatusMethodNotAllowed: - err = ErrDefault405{respErr} - if error405er, ok := errType.(Err405er); ok { - err = error405er.Error405(respErr) - } - case http.StatusRequestTimeout: - err = ErrDefault408{respErr} - if error408er, ok := errType.(Err408er); ok { - err = error408er.Error408(respErr) - } - case 429: - err = ErrDefault429{respErr} - if error429er, ok := errType.(Err429er); ok { - err = error429er.Error429(respErr) - } - case http.StatusInternalServerError: - err = ErrDefault500{respErr} - if error500er, ok := errType.(Err500er); ok { - err = error500er.Error500(respErr) - } - case http.StatusServiceUnavailable: - err = ErrDefault503{respErr} - if error503er, ok := errType.(Err503er); ok { - err = error503er.Error503(respErr) - } - } - - if err == nil { - err = respErr - } - - return resp, err - } - - // Parse the response body as JSON, if requested to do so. - if options.JSONResponse != nil { - defer resp.Body.Close() - if err := json.NewDecoder(resp.Body).Decode(options.JSONResponse); err != nil { - return nil, err - } - } - - return resp, nil -} - -func defaultOkCodes(method string) []int { - switch { - case method == "GET": - return []int{200} - case method == "POST": - return []int{201, 202} - case method == "PUT": - return []int{201, 202} - case method == "PATCH": - return []int{200, 204} - case method == "DELETE": - return []int{202, 204} - } - - return []int{} -} diff --git a/vendor/github.com/gophercloud/gophercloud/results.go b/vendor/github.com/gophercloud/gophercloud/results.go deleted file mode 100644 index e64feee19ed..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/results.go +++ /dev/null @@ -1,382 +0,0 @@ -package gophercloud - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "reflect" - "strconv" - "time" -) - -/* -Result is an internal type to be used by individual resource packages, but its -methods will be available on a wide variety of user-facing embedding types. - -It acts as a base struct that other Result types, returned from request -functions, can embed for convenience. All Results capture basic information -from the HTTP transaction that was performed, including the response body, -HTTP headers, and any errors that happened. - -Generally, each Result type will have an Extract method that can be used to -further interpret the result's payload in a specific context. Extensions or -providers can then provide additional extraction functions to pull out -provider- or extension-specific information as well. -*/ -type Result struct { - // Body is the payload of the HTTP response from the server. In most cases, - // this will be the deserialized JSON structure. - Body interface{} - - // Header contains the HTTP header structure from the original response. - Header http.Header - - // Err is an error that occurred during the operation. It's deferred until - // extraction to make it easier to chain the Extract call. - Err error -} - -// ExtractInto allows users to provide an object into which `Extract` will extract -// the `Result.Body`. This would be useful for OpenStack providers that have -// different fields in the response object than OpenStack proper. -func (r Result) ExtractInto(to interface{}) error { - if r.Err != nil { - return r.Err - } - - if reader, ok := r.Body.(io.Reader); ok { - if readCloser, ok := reader.(io.Closer); ok { - defer readCloser.Close() - } - return json.NewDecoder(reader).Decode(to) - } - - b, err := json.Marshal(r.Body) - if err != nil { - return err - } - err = json.Unmarshal(b, to) - - return err -} - -func (r Result) extractIntoPtr(to interface{}, label string) error { - if label == "" { - return r.ExtractInto(&to) - } - - var m map[string]interface{} - err := r.ExtractInto(&m) - if err != nil { - return err - } - - b, err := json.Marshal(m[label]) - if err != nil { - return err - } - - toValue := reflect.ValueOf(to) - if toValue.Kind() == reflect.Ptr { - toValue = toValue.Elem() - } - - switch toValue.Kind() { - case reflect.Slice: - typeOfV := toValue.Type().Elem() - if typeOfV.Kind() == reflect.Struct { - if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { - newSlice := reflect.MakeSlice(reflect.SliceOf(typeOfV), 0, 0) - newType := reflect.New(typeOfV).Elem() - - for _, v := range m[label].([]interface{}) { - b, err := json.Marshal(v) - if err != nil { - return err - } - - for i := 0; i < newType.NumField(); i++ { - s := newType.Field(i).Addr().Interface() - err = json.NewDecoder(bytes.NewReader(b)).Decode(s) - if err != nil { - return err - } - } - newSlice = reflect.Append(newSlice, newType) - } - toValue.Set(newSlice) - } - } - case reflect.Struct: - typeOfV := toValue.Type() - if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { - for i := 0; i < toValue.NumField(); i++ { - toField := toValue.Field(i) - if toField.Kind() == reflect.Struct { - s := toField.Addr().Interface() - err = json.NewDecoder(bytes.NewReader(b)).Decode(s) - if err != nil { - return err - } - } - } - } - } - - err = json.Unmarshal(b, &to) - return err -} - -// ExtractIntoStructPtr will unmarshal the Result (r) into the provided -// interface{} (to). -// -// NOTE: For internal use only -// -// `to` must be a pointer to an underlying struct type -// -// If provided, `label` will be filtered out of the response -// body prior to `r` being unmarshalled into `to`. -func (r Result) ExtractIntoStructPtr(to interface{}, label string) error { - if r.Err != nil { - return r.Err - } - - t := reflect.TypeOf(to) - if k := t.Kind(); k != reflect.Ptr { - return fmt.Errorf("Expected pointer, got %v", k) - } - switch t.Elem().Kind() { - case reflect.Struct: - return r.extractIntoPtr(to, label) - default: - return fmt.Errorf("Expected pointer to struct, got: %v", t) - } -} - -// ExtractIntoSlicePtr will unmarshal the Result (r) into the provided -// interface{} (to). -// -// NOTE: For internal use only -// -// `to` must be a pointer to an underlying slice type -// -// If provided, `label` will be filtered out of the response -// body prior to `r` being unmarshalled into `to`. -func (r Result) ExtractIntoSlicePtr(to interface{}, label string) error { - if r.Err != nil { - return r.Err - } - - t := reflect.TypeOf(to) - if k := t.Kind(); k != reflect.Ptr { - return fmt.Errorf("Expected pointer, got %v", k) - } - switch t.Elem().Kind() { - case reflect.Slice: - return r.extractIntoPtr(to, label) - default: - return fmt.Errorf("Expected pointer to slice, got: %v", t) - } -} - -// PrettyPrintJSON creates a string containing the full response body as -// pretty-printed JSON. It's useful for capturing test fixtures and for -// debugging extraction bugs. If you include its output in an issue related to -// a buggy extraction function, we will all love you forever. -func (r Result) PrettyPrintJSON() string { - pretty, err := json.MarshalIndent(r.Body, "", " ") - if err != nil { - panic(err.Error()) - } - return string(pretty) -} - -// ErrResult is an internal type to be used by individual resource packages, but -// its methods will be available on a wide variety of user-facing embedding -// types. -// -// It represents results that only contain a potential error and -// nothing else. Usually, if the operation executed successfully, the Err field -// will be nil; otherwise it will be stocked with a relevant error. Use the -// ExtractErr method -// to cleanly pull it out. -type ErrResult struct { - Result -} - -// ExtractErr is a function that extracts error information, or nil, from a result. -func (r ErrResult) ExtractErr() error { - return r.Err -} - -/* -HeaderResult is an internal type to be used by individual resource packages, but -its methods will be available on a wide variety of user-facing embedding types. - -It represents a result that only contains an error (possibly nil) and an -http.Header. This is used, for example, by the objectstorage packages in -openstack, because most of the operations don't return response bodies, but do -have relevant information in headers. -*/ -type HeaderResult struct { - Result -} - -// ExtractInto allows users to provide an object into which `Extract` will -// extract the http.Header headers of the result. -func (r HeaderResult) ExtractInto(to interface{}) error { - if r.Err != nil { - return r.Err - } - - tmpHeaderMap := map[string]string{} - for k, v := range r.Header { - if len(v) > 0 { - tmpHeaderMap[k] = v[0] - } - } - - b, err := json.Marshal(tmpHeaderMap) - if err != nil { - return err - } - err = json.Unmarshal(b, to) - - return err -} - -// RFC3339Milli describes a common time format used by some API responses. -const RFC3339Milli = "2006-01-02T15:04:05.999999Z" - -type JSONRFC3339Milli time.Time - -func (jt *JSONRFC3339Milli) UnmarshalJSON(data []byte) error { - b := bytes.NewBuffer(data) - dec := json.NewDecoder(b) - var s string - if err := dec.Decode(&s); err != nil { - return err - } - t, err := time.Parse(RFC3339Milli, s) - if err != nil { - return err - } - *jt = JSONRFC3339Milli(t) - return nil -} - -const RFC3339MilliNoZ = "2006-01-02T15:04:05.999999" - -type JSONRFC3339MilliNoZ time.Time - -func (jt *JSONRFC3339MilliNoZ) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(RFC3339MilliNoZ, s) - if err != nil { - return err - } - *jt = JSONRFC3339MilliNoZ(t) - return nil -} - -type JSONRFC1123 time.Time - -func (jt *JSONRFC1123) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - return err - } - *jt = JSONRFC1123(t) - return nil -} - -type JSONUnix time.Time - -func (jt *JSONUnix) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - unix, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return err - } - t = time.Unix(unix, 0) - *jt = JSONUnix(t) - return nil -} - -// RFC3339NoZ is the time format used in Heat (Orchestration). -const RFC3339NoZ = "2006-01-02T15:04:05" - -type JSONRFC3339NoZ time.Time - -func (jt *JSONRFC3339NoZ) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(RFC3339NoZ, s) - if err != nil { - return err - } - *jt = JSONRFC3339NoZ(t) - return nil -} - -/* -Link is an internal type to be used in packages of collection resources that are -paginated in a certain way. - -It's a response substructure common to many paginated collection results that is -used to point to related pages. Usually, the one we care about is the one with -Rel field set to "next". -*/ -type Link struct { - Href string `json:"href"` - Rel string `json:"rel"` -} - -/* -ExtractNextURL is an internal function useful for packages of collection -resources that are paginated in a certain way. - -It attempts to extract the "next" URL from slice of Link structs, or -"" if no such URL is present. -*/ -func ExtractNextURL(links []Link) (string, error) { - var url string - - for _, l := range links { - if l.Rel == "next" { - url = l.Href - } - } - - if url == "" { - return "", nil - } - - return url, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/service_client.go b/vendor/github.com/gophercloud/gophercloud/service_client.go deleted file mode 100644 index 1160fefa7c2..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/service_client.go +++ /dev/null @@ -1,122 +0,0 @@ -package gophercloud - -import ( - "io" - "net/http" - "strings" -) - -// ServiceClient stores details required to interact with a specific service API implemented by a provider. -// Generally, you'll acquire these by calling the appropriate `New` method on a ProviderClient. -type ServiceClient struct { - // ProviderClient is a reference to the provider that implements this service. - *ProviderClient - - // Endpoint is the base URL of the service's API, acquired from a service catalog. - // It MUST end with a /. - Endpoint string - - // ResourceBase is the base URL shared by the resources within a service's API. It should include - // the API version and, like Endpoint, MUST end with a / if set. If not set, the Endpoint is used - // as-is, instead. - ResourceBase string - - // This is the service client type (e.g. compute, sharev2). - // NOTE: FOR INTERNAL USE ONLY. DO NOT SET. GOPHERCLOUD WILL SET THIS. - // It is only exported because it gets set in a different package. - Type string - - // The microversion of the service to use. Set this to use a particular microversion. - Microversion string -} - -// ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /. -func (client *ServiceClient) ResourceBaseURL() string { - if client.ResourceBase != "" { - return client.ResourceBase - } - return client.Endpoint -} - -// ServiceURL constructs a URL for a resource belonging to this provider. -func (client *ServiceClient) ServiceURL(parts ...string) string { - return client.ResourceBaseURL() + strings.Join(parts, "/") -} - -func (client *ServiceClient) initReqOpts(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) { - if v, ok := (JSONBody).(io.Reader); ok { - opts.RawBody = v - } else if JSONBody != nil { - opts.JSONBody = JSONBody - } - - if JSONResponse != nil { - opts.JSONResponse = JSONResponse - } - - if opts.MoreHeaders == nil { - opts.MoreHeaders = make(map[string]string) - } - - if client.Microversion != "" { - client.setMicroversionHeader(opts) - } -} - -// Get calls `Request` with the "GET" HTTP verb. -func (client *ServiceClient) Get(url string, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, nil, JSONResponse, opts) - return client.Request("GET", url, opts) -} - -// Post calls `Request` with the "POST" HTTP verb. -func (client *ServiceClient) Post(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, JSONBody, JSONResponse, opts) - return client.Request("POST", url, opts) -} - -// Put calls `Request` with the "PUT" HTTP verb. -func (client *ServiceClient) Put(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, JSONBody, JSONResponse, opts) - return client.Request("PUT", url, opts) -} - -// Patch calls `Request` with the "PATCH" HTTP verb. -func (client *ServiceClient) Patch(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, JSONBody, JSONResponse, opts) - return client.Request("PATCH", url, opts) -} - -// Delete calls `Request` with the "DELETE" HTTP verb. -func (client *ServiceClient) Delete(url string, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, nil, nil, opts) - return client.Request("DELETE", url, opts) -} - -func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { - switch client.Type { - case "compute": - opts.MoreHeaders["X-OpenStack-Nova-API-Version"] = client.Microversion - case "sharev2": - opts.MoreHeaders["X-OpenStack-Manila-API-Version"] = client.Microversion - } - - if client.Type != "" { - opts.MoreHeaders["OpenStack-API-Version"] = client.Type + " " + client.Microversion - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/util.go b/vendor/github.com/gophercloud/gophercloud/util.go deleted file mode 100644 index 68f9a5d3eca..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/util.go +++ /dev/null @@ -1,102 +0,0 @@ -package gophercloud - -import ( - "fmt" - "net/url" - "path/filepath" - "strings" - "time" -) - -// WaitFor polls a predicate function, once per second, up to a timeout limit. -// This is useful to wait for a resource to transition to a certain state. -// To handle situations when the predicate might hang indefinitely, the -// predicate will be prematurely cancelled after the timeout. -// Resource packages will wrap this in a more convenient function that's -// specific to a certain resource, but it can also be useful on its own. -func WaitFor(timeout int, predicate func() (bool, error)) error { - type WaitForResult struct { - Success bool - Error error - } - - start := time.Now().Unix() - - for { - // If a timeout is set, and that's been exceeded, shut it down. - if timeout >= 0 && time.Now().Unix()-start >= int64(timeout) { - return fmt.Errorf("A timeout occurred") - } - - time.Sleep(1 * time.Second) - - var result WaitForResult - ch := make(chan bool, 1) - go func() { - defer close(ch) - satisfied, err := predicate() - result.Success = satisfied - result.Error = err - }() - - select { - case <-ch: - if result.Error != nil { - return result.Error - } - if result.Success { - return nil - } - // If the predicate has not finished by the timeout, cancel it. - case <-time.After(time.Duration(timeout) * time.Second): - return fmt.Errorf("A timeout occurred") - } - } -} - -// NormalizeURL is an internal function to be used by provider clients. -// -// It ensures that each endpoint URL has a closing `/`, as expected by -// ServiceClient's methods. -func NormalizeURL(url string) string { - if !strings.HasSuffix(url, "/") { - return url + "/" - } - return url -} - -// NormalizePathURL is used to convert rawPath to a fqdn, using basePath as -// a reference in the filesystem, if necessary. basePath is assumed to contain -// either '.' when first used, or the file:// type fqdn of the parent resource. -// e.g. myFavScript.yaml => file://opt/lib/myFavScript.yaml -func NormalizePathURL(basePath, rawPath string) (string, error) { - u, err := url.Parse(rawPath) - if err != nil { - return "", err - } - // if a scheme is defined, it must be a fqdn already - if u.Scheme != "" { - return u.String(), nil - } - // if basePath is a url, then child resources are assumed to be relative to it - bu, err := url.Parse(basePath) - if err != nil { - return "", err - } - var basePathSys, absPathSys string - if bu.Scheme != "" { - basePathSys = filepath.FromSlash(bu.Path) - absPathSys = filepath.Join(basePathSys, rawPath) - bu.Path = filepath.ToSlash(absPathSys) - return bu.String(), nil - } - - absPathSys = filepath.Join(basePath, rawPath) - u.Path = filepath.ToSlash(absPathSys) - if err != nil { - return "", err - } - u.Scheme = "file" - return u.String(), nil - -} diff --git a/vendor/github.com/hashicorp/consul/LICENSE b/vendor/github.com/hashicorp/consul/LICENSE deleted file mode 100644 index c33dcc7c928..00000000000 --- a/vendor/github.com/hashicorp/consul/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md deleted file mode 100644 index 7e64988f42d..00000000000 --- a/vendor/github.com/hashicorp/consul/api/README.md +++ /dev/null @@ -1,43 +0,0 @@ -Consul API client -================= - -This package provides the `api` package which attempts to -provide programmatic access to the full Consul API. - -Currently, all of the Consul APIs included in version 0.6.0 are supported. - -Documentation -============= - -The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api) - -Usage -===== - -Below is an example of using the Consul client: - -```go -// Get a new client -client, err := api.NewClient(api.DefaultConfig()) -if err != nil { - panic(err) -} - -// Get a handle to the KV API -kv := client.KV() - -// PUT a new KV pair -p := &api.KVPair{Key: "foo", Value: []byte("test")} -_, err = kv.Put(p, nil) -if err != nil { - panic(err) -} - -// Lookup the pair -pair, _, err := kv.Get("foo", nil) -if err != nil { - panic(err) -} -fmt.Printf("KV: %v", pair) - -``` diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go deleted file mode 100644 index 6ea0a752e58..00000000000 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ /dev/null @@ -1,193 +0,0 @@ -package api - -import ( - "time" -) - -const ( - // ACLCLientType is the client type token - ACLClientType = "client" - - // ACLManagementType is the management type token - ACLManagementType = "management" -) - -// ACLEntry is used to represent an ACL entry -type ACLEntry struct { - CreateIndex uint64 - ModifyIndex uint64 - ID string - Name string - Type string - Rules string -} - -// ACLReplicationStatus is used to represent the status of ACL replication. -type ACLReplicationStatus struct { - Enabled bool - Running bool - SourceDatacenter string - ReplicatedIndex uint64 - LastSuccess time.Time - LastError time.Time -} - -// ACL can be used to query the ACL endpoints -type ACL struct { - c *Client -} - -// ACL returns a handle to the ACL endpoints -func (c *Client) ACL() *ACL { - return &ACL{c} -} - -// Bootstrap is used to perform a one-time ACL bootstrap operation on a cluster -// to get the first management token. -func (a *ACL) Bootstrap() (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/bootstrap") - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Create is used to generate a new token with the given parameters -func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/create") - r.setWriteOptions(q) - r.obj = acl - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Update is used to update the rules of an existing token -func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/update") - r.setWriteOptions(q) - r.obj = acl - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// Destroy is used to destroy a given ACL token ID -func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) - r.setWriteOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// Clone is used to return a new token cloned from an existing one -func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) - r.setWriteOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Info is used to query for information about an ACL token -func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/info/"+id) - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List is used to get all the ACL tokens -func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/list") - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// Replication returns the status of the ACL replication process in the datacenter -func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/replication") - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries *ACLReplicationStatus - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go deleted file mode 100644 index 533b2455782..00000000000 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ /dev/null @@ -1,621 +0,0 @@ -package api - -import ( - "bufio" - "fmt" -) - -// AgentCheck represents a check known to the agent -type AgentCheck struct { - Node string - CheckID string - Name string - Status string - Notes string - Output string - ServiceID string - ServiceName string -} - -// AgentService represents a service known to the agent -type AgentService struct { - ID string - Service string - Tags []string - Port int - Address string - EnableTagOverride bool - CreateIndex uint64 - ModifyIndex uint64 -} - -// AgentMember represents a cluster member known to the agent -type AgentMember struct { - Name string - Addr string - Port uint16 - Tags map[string]string - Status int - ProtocolMin uint8 - ProtocolMax uint8 - ProtocolCur uint8 - DelegateMin uint8 - DelegateMax uint8 - DelegateCur uint8 -} - -// AllSegments is used to select for all segments in MembersOpts. -const AllSegments = "_all" - -// MembersOpts is used for querying member information. -type MembersOpts struct { - // WAN is whether to show members from the WAN. - WAN bool - - // Segment is the LAN segment to show members for. Setting this to the - // AllSegments value above will show members in all segments. - Segment string -} - -// AgentServiceRegistration is used to register a new service -type AgentServiceRegistration struct { - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Tags []string `json:",omitempty"` - Port int `json:",omitempty"` - Address string `json:",omitempty"` - EnableTagOverride bool `json:",omitempty"` - Check *AgentServiceCheck - Checks AgentServiceChecks -} - -// AgentCheckRegistration is used to register a new check -type AgentCheckRegistration struct { - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Notes string `json:",omitempty"` - ServiceID string `json:",omitempty"` - AgentServiceCheck -} - -// AgentServiceCheck is used to define a node or service level check -type AgentServiceCheck struct { - Args []string `json:"ScriptArgs,omitempty"` - Script string `json:",omitempty"` // Deprecated, use Args. - DockerContainerID string `json:",omitempty"` - Shell string `json:",omitempty"` // Only supported for Docker. - Interval string `json:",omitempty"` - Timeout string `json:",omitempty"` - TTL string `json:",omitempty"` - HTTP string `json:",omitempty"` - Header map[string][]string `json:",omitempty"` - Method string `json:",omitempty"` - TCP string `json:",omitempty"` - Status string `json:",omitempty"` - Notes string `json:",omitempty"` - TLSSkipVerify bool `json:",omitempty"` - - // In Consul 0.7 and later, checks that are associated with a service - // may also contain this optional DeregisterCriticalServiceAfter field, - // which is a timeout in the same Go time format as Interval and TTL. If - // a check is in the critical state for more than this configured value, - // then its associated service (and all of its associated checks) will - // automatically be deregistered. - DeregisterCriticalServiceAfter string `json:",omitempty"` -} -type AgentServiceChecks []*AgentServiceCheck - -// AgentToken is used when updating ACL tokens for an agent. -type AgentToken struct { - Token string -} - -// Metrics info is used to store different types of metric values from the agent. -type MetricsInfo struct { - Timestamp string - Gauges []GaugeValue - Points []PointValue - Counters []SampledValue - Samples []SampledValue -} - -// GaugeValue stores one value that is updated as time goes on, such as -// the amount of memory allocated. -type GaugeValue struct { - Name string - Value float32 - Labels map[string]string -} - -// PointValue holds a series of points for a metric. -type PointValue struct { - Name string - Points []float32 -} - -// SampledValue stores info about a metric that is incremented over time, -// such as the number of requests to an HTTP endpoint. -type SampledValue struct { - Name string - Count int - Sum float64 - Min float64 - Max float64 - Mean float64 - Stddev float64 - Labels map[string]string -} - -// Agent can be used to query the Agent endpoints -type Agent struct { - c *Client - - // cache the node name - nodeName string -} - -// Agent returns a handle to the agent endpoints -func (c *Client) Agent() *Agent { - return &Agent{c: c} -} - -// Self is used to query the agent we are speaking to for -// information about itself -func (a *Agent) Self() (map[string]map[string]interface{}, error) { - r := a.c.newRequest("GET", "/v1/agent/self") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]map[string]interface{} - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Metrics is used to query the agent we are speaking to for -// its current internal metric data -func (a *Agent) Metrics() (*MetricsInfo, error) { - r := a.c.newRequest("GET", "/v1/agent/metrics") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out *MetricsInfo - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Reload triggers a configuration reload for the agent we are connected to. -func (a *Agent) Reload() error { - r := a.c.newRequest("PUT", "/v1/agent/reload") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// NodeName is used to get the node name of the agent -func (a *Agent) NodeName() (string, error) { - if a.nodeName != "" { - return a.nodeName, nil - } - info, err := a.Self() - if err != nil { - return "", err - } - name := info["Config"]["NodeName"].(string) - a.nodeName = name - return name, nil -} - -// Checks returns the locally registered checks -func (a *Agent) Checks() (map[string]*AgentCheck, error) { - r := a.c.newRequest("GET", "/v1/agent/checks") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]*AgentCheck - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Services returns the locally registered services -func (a *Agent) Services() (map[string]*AgentService, error) { - r := a.c.newRequest("GET", "/v1/agent/services") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]*AgentService - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Members returns the known gossip members. The WAN -// flag can be used to query a server for WAN members. -func (a *Agent) Members(wan bool) ([]*AgentMember, error) { - r := a.c.newRequest("GET", "/v1/agent/members") - if wan { - r.params.Set("wan", "1") - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*AgentMember - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// MembersOpts returns the known gossip members and can be passed -// additional options for WAN/segment filtering. -func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) { - r := a.c.newRequest("GET", "/v1/agent/members") - r.params.Set("segment", opts.Segment) - if opts.WAN { - r.params.Set("wan", "1") - } - - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*AgentMember - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// ServiceRegister is used to register a new service with -// the local agent -func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { - r := a.c.newRequest("PUT", "/v1/agent/service/register") - r.obj = service - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ServiceDeregister is used to deregister a service with -// the local agent -func (a *Agent) ServiceDeregister(serviceID string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// PassTTL is used to set a TTL check to the passing state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) PassTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "pass") -} - -// WarnTTL is used to set a TTL check to the warning state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) WarnTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "warn") -} - -// FailTTL is used to set a TTL check to the failing state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) FailTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "fail") -} - -// updateTTL is used to update the TTL of a check. This is the internal -// method that uses the old API that's present in Consul versions prior to -// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed -// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, -// but keep the old Pass/Warn/Fail methods using the old API under the hood. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 and the server endpoints will -// be removed in 0.9. -func (a *Agent) updateTTL(checkID, note, status string) error { - switch status { - case "pass": - case "warn": - case "fail": - default: - return fmt.Errorf("Invalid status: %s", status) - } - endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) - r := a.c.newRequest("PUT", endpoint) - r.params.Set("note", note) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// checkUpdate is the payload for a PUT for a check update. -type checkUpdate struct { - // Status is one of the api.Health* states: HealthPassing - // ("passing"), HealthWarning ("warning"), or HealthCritical - // ("critical"). - Status string - - // Output is the information to post to the UI for operators as the - // output of the process that decided to hit the TTL check. This is - // different from the note field that's associated with the check - // itself. - Output string -} - -// UpdateTTL is used to update the TTL of a check. This uses the newer API -// that was introduced in Consul 0.6.4 and later. We translate the old status -// strings for compatibility (though a newer version of Consul will still be -// required to use this API). -func (a *Agent) UpdateTTL(checkID, output, status string) error { - switch status { - case "pass", HealthPassing: - status = HealthPassing - case "warn", HealthWarning: - status = HealthWarning - case "fail", HealthCritical: - status = HealthCritical - default: - return fmt.Errorf("Invalid status: %s", status) - } - - endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) - r := a.c.newRequest("PUT", endpoint) - r.obj = &checkUpdate{ - Status: status, - Output: output, - } - - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// CheckRegister is used to register a new check with -// the local agent -func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { - r := a.c.newRequest("PUT", "/v1/agent/check/register") - r.obj = check - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// CheckDeregister is used to deregister a check with -// the local agent -func (a *Agent) CheckDeregister(checkID string) error { - r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Join is used to instruct the agent to attempt a join to -// another cluster member -func (a *Agent) Join(addr string, wan bool) error { - r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) - if wan { - r.params.Set("wan", "1") - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Leave is used to have the agent gracefully leave the cluster and shutdown -func (a *Agent) Leave() error { - r := a.c.newRequest("PUT", "/v1/agent/leave") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ForceLeave is used to have the agent eject a failed node -func (a *Agent) ForceLeave(node string) error { - r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// EnableServiceMaintenance toggles service maintenance mode on -// for the given service ID. -func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) - r.params.Set("enable", "true") - r.params.Set("reason", reason) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// DisableServiceMaintenance toggles service maintenance mode off -// for the given service ID. -func (a *Agent) DisableServiceMaintenance(serviceID string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) - r.params.Set("enable", "false") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// EnableNodeMaintenance toggles node maintenance mode on for the -// agent we are connected to. -func (a *Agent) EnableNodeMaintenance(reason string) error { - r := a.c.newRequest("PUT", "/v1/agent/maintenance") - r.params.Set("enable", "true") - r.params.Set("reason", reason) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// DisableNodeMaintenance toggles node maintenance mode off for the -// agent we are connected to. -func (a *Agent) DisableNodeMaintenance() error { - r := a.c.newRequest("PUT", "/v1/agent/maintenance") - r.params.Set("enable", "false") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Monitor returns a channel which will receive streaming logs from the agent -// Providing a non-nil stopCh can be used to close the connection and stop the -// log stream. An empty string will be sent down the given channel when there's -// nothing left to stream, after which the caller should close the stopCh. -func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { - r := a.c.newRequest("GET", "/v1/agent/monitor") - r.setQueryOptions(q) - if loglevel != "" { - r.params.Add("loglevel", loglevel) - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - - logCh := make(chan string, 64) - go func() { - defer resp.Body.Close() - - scanner := bufio.NewScanner(resp.Body) - for { - select { - case <-stopCh: - close(logCh) - return - default: - } - if scanner.Scan() { - // An empty string signals to the caller that - // the scan is done, so make sure we only emit - // that when the scanner says it's done, not if - // we happen to ingest an empty line. - if text := scanner.Text(); text != "" { - logCh <- text - } else { - logCh <- " " - } - } else { - logCh <- "" - } - } - }() - - return logCh, nil -} - -// UpdateACLToken updates the agent's "acl_token". See updateToken for more -// details. -func (c *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) { - return c.updateToken("acl_token", token, q) -} - -// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken -// for more details. -func (c *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) { - return c.updateToken("acl_agent_token", token, q) -} - -// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See -// updateToken for more details. -func (c *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) { - return c.updateToken("acl_agent_master_token", token, q) -} - -// UpdateACLReplicationToken updates the agent's "acl_replication_token". See -// updateToken for more details. -func (c *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) { - return c.updateToken("acl_replication_token", token, q) -} - -// updateToken can be used to update an agent's ACL token after the agent has -// started. The tokens are not persisted, so will need to be updated again if -// the agent is restarted. -func (c *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target)) - r.setWriteOptions(q) - r.obj = &AgentToken{Token: token} - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go deleted file mode 100644 index 97a524b5ee5..00000000000 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ /dev/null @@ -1,781 +0,0 @@ -package api - -import ( - "bytes" - "context" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/go-rootcerts" -) - -const ( - // HTTPAddrEnvName defines an environment variable name which sets - // the HTTP address if there is no -http-addr specified. - HTTPAddrEnvName = "CONSUL_HTTP_ADDR" - - // HTTPTokenEnvName defines an environment variable name which sets - // the HTTP token. - HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" - - // HTTPAuthEnvName defines an environment variable name which sets - // the HTTP authentication header. - HTTPAuthEnvName = "CONSUL_HTTP_AUTH" - - // HTTPSSLEnvName defines an environment variable name which sets - // whether or not to use HTTPS. - HTTPSSLEnvName = "CONSUL_HTTP_SSL" - - // HTTPCAFile defines an environment variable name which sets the - // CA file to use for talking to Consul over TLS. - HTTPCAFile = "CONSUL_CACERT" - - // HTTPCAPath defines an environment variable name which sets the - // path to a directory of CA certs to use for talking to Consul over TLS. - HTTPCAPath = "CONSUL_CAPATH" - - // HTTPClientCert defines an environment variable name which sets the - // client cert file to use for talking to Consul over TLS. - HTTPClientCert = "CONSUL_CLIENT_CERT" - - // HTTPClientKey defines an environment variable name which sets the - // client key file to use for talking to Consul over TLS. - HTTPClientKey = "CONSUL_CLIENT_KEY" - - // HTTPTLSServerName defines an environment variable name which sets the - // server name to use as the SNI host when connecting via TLS - HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME" - - // HTTPSSLVerifyEnvName defines an environment variable name which sets - // whether or not to disable certificate checking. - HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" -) - -// QueryOptions are used to parameterize a query -type QueryOptions struct { - // Providing a datacenter overwrites the DC provided - // by the Config - Datacenter string - - // AllowStale allows any Consul server (non-leader) to service - // a read. This allows for lower latency and higher throughput - AllowStale bool - - // RequireConsistent forces the read to be fully consistent. - // This is more expensive but prevents ever performing a stale - // read. - RequireConsistent bool - - // WaitIndex is used to enable a blocking query. Waits - // until the timeout or the next index is reached - WaitIndex uint64 - - // WaitTime is used to bound the duration of a wait. - // Defaults to that of the Config, but can be overridden. - WaitTime time.Duration - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string - - // Near is used to provide a node name that will sort the results - // in ascending order based on the estimated round trip time from - // that node. Setting this to "_agent" will use the agent's node - // for the sort. - Near string - - // NodeMeta is used to filter results by nodes with the given - // metadata key/value pairs. Currently, only one key/value pair can - // be provided for filtering. - NodeMeta map[string]string - - // RelayFactor is used in keyring operations to cause reponses to be - // relayed back to the sender through N other random nodes. Must be - // a value from 0 to 5 (inclusive). - RelayFactor uint8 - - // ctx is an optional context pass through to the underlying HTTP - // request layer. Use Context() and WithContext() to manage this. - ctx context.Context -} - -func (o *QueryOptions) Context() context.Context { - if o != nil && o.ctx != nil { - return o.ctx - } - return context.Background() -} - -func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions { - o2 := new(QueryOptions) - if o != nil { - *o2 = *o - } - o2.ctx = ctx - return o2 -} - -// WriteOptions are used to parameterize a write -type WriteOptions struct { - // Providing a datacenter overwrites the DC provided - // by the Config - Datacenter string - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string - - // RelayFactor is used in keyring operations to cause reponses to be - // relayed back to the sender through N other random nodes. Must be - // a value from 0 to 5 (inclusive). - RelayFactor uint8 - - // ctx is an optional context pass through to the underlying HTTP - // request layer. Use Context() and WithContext() to manage this. - ctx context.Context -} - -func (o *WriteOptions) Context() context.Context { - if o != nil && o.ctx != nil { - return o.ctx - } - return context.Background() -} - -func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions { - o2 := new(WriteOptions) - if o != nil { - *o2 = *o - } - o2.ctx = ctx - return o2 -} - -// QueryMeta is used to return meta data about a query -type QueryMeta struct { - // LastIndex. This can be used as a WaitIndex to perform - // a blocking query - LastIndex uint64 - - // Time of last contact from the leader for the - // server servicing the request - LastContact time.Duration - - // Is there a known leader - KnownLeader bool - - // How long did the request take - RequestTime time.Duration - - // Is address translation enabled for HTTP responses on this agent - AddressTranslationEnabled bool -} - -// WriteMeta is used to return meta data about a write -type WriteMeta struct { - // How long did the request take - RequestTime time.Duration -} - -// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication -type HttpBasicAuth struct { - // Username to use for HTTP Basic Authentication - Username string - - // Password to use for HTTP Basic Authentication - Password string -} - -// Config is used to configure the creation of a client -type Config struct { - // Address is the address of the Consul server - Address string - - // Scheme is the URI scheme for the Consul server - Scheme string - - // Datacenter to use. If not provided, the default agent datacenter is used. - Datacenter string - - // Transport is the Transport to use for the http client. - Transport *http.Transport - - // HttpClient is the client to use. Default will be - // used if not provided. - HttpClient *http.Client - - // HttpAuth is the auth info to use for http access. - HttpAuth *HttpBasicAuth - - // WaitTime limits how long a Watch will block. If not provided, - // the agent default values will be used. - WaitTime time.Duration - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string - - TLSConfig TLSConfig -} - -// TLSConfig is used to generate a TLSClientConfig that's useful for talking to -// Consul using TLS. -type TLSConfig struct { - // Address is the optional address of the Consul server. The port, if any - // will be removed from here and this will be set to the ServerName of the - // resulting config. - Address string - - // CAFile is the optional path to the CA certificate used for Consul - // communication, defaults to the system bundle if not specified. - CAFile string - - // CAPath is the optional path to a directory of CA certificates to use for - // Consul communication, defaults to the system bundle if not specified. - CAPath string - - // CertFile is the optional path to the certificate for Consul - // communication. If this is set then you need to also set KeyFile. - CertFile string - - // KeyFile is the optional path to the private key for Consul communication. - // If this is set then you need to also set CertFile. - KeyFile string - - // InsecureSkipVerify if set to true will disable TLS host verification. - InsecureSkipVerify bool -} - -// DefaultConfig returns a default configuration for the client. By default this -// will pool and reuse idle connections to Consul. If you have a long-lived -// client object, this is the desired behavior and should make the most efficient -// use of the connections to Consul. If you don't reuse a client object , which -// is not recommended, then you may notice idle connections building up over -// time. To avoid this, use the DefaultNonPooledConfig() instead. -func DefaultConfig() *Config { - return defaultConfig(cleanhttp.DefaultPooledTransport) -} - -// DefaultNonPooledConfig returns a default configuration for the client which -// does not pool connections. This isn't a recommended configuration because it -// will reconnect to Consul on every request, but this is useful to avoid the -// accumulation of idle connections if you make many client objects during the -// lifetime of your application. -func DefaultNonPooledConfig() *Config { - return defaultConfig(cleanhttp.DefaultTransport) -} - -// defaultConfig returns the default configuration for the client, using the -// given function to make the transport. -func defaultConfig(transportFn func() *http.Transport) *Config { - config := &Config{ - Address: "127.0.0.1:8500", - Scheme: "http", - Transport: transportFn(), - } - - if addr := os.Getenv(HTTPAddrEnvName); addr != "" { - config.Address = addr - } - - if token := os.Getenv(HTTPTokenEnvName); token != "" { - config.Token = token - } - - if auth := os.Getenv(HTTPAuthEnvName); auth != "" { - var username, password string - if strings.Contains(auth, ":") { - split := strings.SplitN(auth, ":", 2) - username = split[0] - password = split[1] - } else { - username = auth - } - - config.HttpAuth = &HttpBasicAuth{ - Username: username, - Password: password, - } - } - - if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { - enabled, err := strconv.ParseBool(ssl) - if err != nil { - log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err) - } - - if enabled { - config.Scheme = "https" - } - } - - if v := os.Getenv(HTTPTLSServerName); v != "" { - config.TLSConfig.Address = v - } - if v := os.Getenv(HTTPCAFile); v != "" { - config.TLSConfig.CAFile = v - } - if v := os.Getenv(HTTPCAPath); v != "" { - config.TLSConfig.CAPath = v - } - if v := os.Getenv(HTTPClientCert); v != "" { - config.TLSConfig.CertFile = v - } - if v := os.Getenv(HTTPClientKey); v != "" { - config.TLSConfig.KeyFile = v - } - if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" { - doVerify, err := strconv.ParseBool(v) - if err != nil { - log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err) - } - if !doVerify { - config.TLSConfig.InsecureSkipVerify = true - } - } - - return config -} - -// TLSConfig is used to generate a TLSClientConfig that's useful for talking to -// Consul using TLS. -func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) { - tlsClientConfig := &tls.Config{ - InsecureSkipVerify: tlsConfig.InsecureSkipVerify, - } - - if tlsConfig.Address != "" { - server := tlsConfig.Address - hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]") - if hasPort { - var err error - server, _, err = net.SplitHostPort(server) - if err != nil { - return nil, err - } - } - tlsClientConfig.ServerName = server - } - - if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" { - tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) - if err != nil { - return nil, err - } - tlsClientConfig.Certificates = []tls.Certificate{tlsCert} - } - - rootConfig := &rootcerts.Config{ - CAFile: tlsConfig.CAFile, - CAPath: tlsConfig.CAPath, - } - if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil { - return nil, err - } - - return tlsClientConfig, nil -} - -// Client provides a client to the Consul API -type Client struct { - config Config -} - -// NewClient returns a new client -func NewClient(config *Config) (*Client, error) { - // bootstrap the config - defConfig := DefaultConfig() - - if len(config.Address) == 0 { - config.Address = defConfig.Address - } - - if len(config.Scheme) == 0 { - config.Scheme = defConfig.Scheme - } - - if config.Transport == nil { - config.Transport = defConfig.Transport - } - - if config.TLSConfig.Address == "" { - config.TLSConfig.Address = defConfig.TLSConfig.Address - } - - if config.TLSConfig.CAFile == "" { - config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile - } - - if config.TLSConfig.CAPath == "" { - config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath - } - - if config.TLSConfig.CertFile == "" { - config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile - } - - if config.TLSConfig.KeyFile == "" { - config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile - } - - if !config.TLSConfig.InsecureSkipVerify { - config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify - } - - if config.HttpClient == nil { - var err error - config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig) - if err != nil { - return nil, err - } - } - - parts := strings.SplitN(config.Address, "://", 2) - if len(parts) == 2 { - switch parts[0] { - case "http": - config.Scheme = "http" - case "https": - config.Scheme = "https" - case "unix": - trans := cleanhttp.DefaultTransport() - trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial("unix", parts[1]) - } - config.HttpClient = &http.Client{ - Transport: trans, - } - default: - return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) - } - config.Address = parts[1] - } - - if config.Token == "" { - config.Token = defConfig.Token - } - - return &Client{config: *config}, nil -} - -// NewHttpClient returns an http client configured with the given Transport and TLS -// config. -func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) { - client := &http.Client{ - Transport: transport, - } - - if transport.TLSClientConfig == nil { - tlsClientConfig, err := SetupTLSConfig(&tlsConf) - - if err != nil { - return nil, err - } - - transport.TLSClientConfig = tlsClientConfig - } - - return client, nil -} - -// request is used to help build up a request -type request struct { - config *Config - method string - url *url.URL - params url.Values - body io.Reader - header http.Header - obj interface{} - ctx context.Context -} - -// setQueryOptions is used to annotate the request with -// additional query options -func (r *request) setQueryOptions(q *QueryOptions) { - if q == nil { - return - } - if q.Datacenter != "" { - r.params.Set("dc", q.Datacenter) - } - if q.AllowStale { - r.params.Set("stale", "") - } - if q.RequireConsistent { - r.params.Set("consistent", "") - } - if q.WaitIndex != 0 { - r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) - } - if q.WaitTime != 0 { - r.params.Set("wait", durToMsec(q.WaitTime)) - } - if q.Token != "" { - r.header.Set("X-Consul-Token", q.Token) - } - if q.Near != "" { - r.params.Set("near", q.Near) - } - if len(q.NodeMeta) > 0 { - for key, value := range q.NodeMeta { - r.params.Add("node-meta", key+":"+value) - } - } - if q.RelayFactor != 0 { - r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) - } - r.ctx = q.ctx -} - -// durToMsec converts a duration to a millisecond specified string. If the -// user selected a positive value that rounds to 0 ms, then we will use 1 ms -// so they get a short delay, otherwise Consul will translate the 0 ms into -// a huge default delay. -func durToMsec(dur time.Duration) string { - ms := dur / time.Millisecond - if dur > 0 && ms == 0 { - ms = 1 - } - return fmt.Sprintf("%dms", ms) -} - -// serverError is a string we look for to detect 500 errors. -const serverError = "Unexpected response code: 500" - -// IsRetryableError returns true for 500 errors from the Consul servers, and -// network connection errors. These are usually retryable at a later time. -// This applies to reads but NOT to writes. This may return true for errors -// on writes that may have still gone through, so do not use this to retry -// any write operations. -func IsRetryableError(err error) bool { - if err == nil { - return false - } - - if _, ok := err.(net.Error); ok { - return true - } - - // TODO (slackpad) - Make a real error type here instead of using - // a string check. - return strings.Contains(err.Error(), serverError) -} - -// setWriteOptions is used to annotate the request with -// additional write options -func (r *request) setWriteOptions(q *WriteOptions) { - if q == nil { - return - } - if q.Datacenter != "" { - r.params.Set("dc", q.Datacenter) - } - if q.Token != "" { - r.header.Set("X-Consul-Token", q.Token) - } - if q.RelayFactor != 0 { - r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) - } - r.ctx = q.ctx -} - -// toHTTP converts the request to an HTTP request -func (r *request) toHTTP() (*http.Request, error) { - // Encode the query parameters - r.url.RawQuery = r.params.Encode() - - // Check if we should encode the body - if r.body == nil && r.obj != nil { - b, err := encodeBody(r.obj) - if err != nil { - return nil, err - } - r.body = b - } - - // Create the HTTP request - req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) - if err != nil { - return nil, err - } - - req.URL.Host = r.url.Host - req.URL.Scheme = r.url.Scheme - req.Host = r.url.Host - req.Header = r.header - - // Setup auth - if r.config.HttpAuth != nil { - req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) - } - if r.ctx != nil { - return req.WithContext(r.ctx), nil - } else { - return req, nil - } -} - -// newRequest is used to create a new request -func (c *Client) newRequest(method, path string) *request { - r := &request{ - config: &c.config, - method: method, - url: &url.URL{ - Scheme: c.config.Scheme, - Host: c.config.Address, - Path: path, - }, - params: make(map[string][]string), - header: make(http.Header), - } - if c.config.Datacenter != "" { - r.params.Set("dc", c.config.Datacenter) - } - if c.config.WaitTime != 0 { - r.params.Set("wait", durToMsec(r.config.WaitTime)) - } - if c.config.Token != "" { - r.header.Set("X-Consul-Token", r.config.Token) - } - return r -} - -// doRequest runs a request with our client -func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { - req, err := r.toHTTP() - if err != nil { - return 0, nil, err - } - start := time.Now() - resp, err := c.config.HttpClient.Do(req) - diff := time.Since(start) - return diff, resp, err -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Consul conventions. -func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - r := c.newRequest("GET", endpoint) - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if err := decodeBody(resp, out); err != nil { - return nil, err - } - return qm, nil -} - -// write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Consul conventions. -func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - r := c.newRequest("PUT", endpoint) - r.setWriteOptions(q) - r.obj = in - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - if out != nil { - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - } else if _, err := ioutil.ReadAll(resp.Body); err != nil { - return nil, err - } - return wm, nil -} - -// parseQueryMeta is used to help parse query meta-data -func parseQueryMeta(resp *http.Response, q *QueryMeta) error { - header := resp.Header - - // Parse the X-Consul-Index - index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) - } - q.LastIndex = index - - // Parse the X-Consul-LastContact - last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) - } - q.LastContact = time.Duration(last) * time.Millisecond - - // Parse the X-Consul-KnownLeader - switch header.Get("X-Consul-KnownLeader") { - case "true": - q.KnownLeader = true - default: - q.KnownLeader = false - } - - // Parse X-Consul-Translate-Addresses - switch header.Get("X-Consul-Translate-Addresses") { - case "true": - q.AddressTranslationEnabled = true - default: - q.AddressTranslationEnabled = false - } - - return nil -} - -// decodeBody is used to JSON decode a body -func decodeBody(resp *http.Response, out interface{}) error { - dec := json.NewDecoder(resp.Body) - return dec.Decode(out) -} - -// encodeBody is used to encode a request body -func encodeBody(obj interface{}) (io.Reader, error) { - buf := bytes.NewBuffer(nil) - enc := json.NewEncoder(buf) - if err := enc.Encode(obj); err != nil { - return nil, err - } - return buf, nil -} - -// requireOK is used to wrap doRequest and check for a 200 -func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { - if e != nil { - if resp != nil { - resp.Body.Close() - } - return d, nil, e - } - if resp.StatusCode != 200 { - var buf bytes.Buffer - io.Copy(&buf, resp.Body) - resp.Body.Close() - return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) - } - return d, resp, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go deleted file mode 100644 index babfc9a1df4..00000000000 --- a/vendor/github.com/hashicorp/consul/api/catalog.go +++ /dev/null @@ -1,198 +0,0 @@ -package api - -type Node struct { - ID string - Node string - Address string - Datacenter string - TaggedAddresses map[string]string - Meta map[string]string - CreateIndex uint64 - ModifyIndex uint64 -} - -type CatalogService struct { - ID string - Node string - Address string - Datacenter string - TaggedAddresses map[string]string - NodeMeta map[string]string - ServiceID string - ServiceName string - ServiceAddress string - ServiceTags []string - ServicePort int - ServiceEnableTagOverride bool - CreateIndex uint64 - ModifyIndex uint64 -} - -type CatalogNode struct { - Node *Node - Services map[string]*AgentService -} - -type CatalogRegistration struct { - ID string - Node string - Address string - TaggedAddresses map[string]string - NodeMeta map[string]string - Datacenter string - Service *AgentService - Check *AgentCheck -} - -type CatalogDeregistration struct { - Node string - Address string // Obsolete. - Datacenter string - ServiceID string - CheckID string -} - -// Catalog can be used to query the Catalog endpoints -type Catalog struct { - c *Client -} - -// Catalog returns a handle to the catalog endpoints -func (c *Client) Catalog() *Catalog { - return &Catalog{c} -} - -func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/catalog/register") - r.setWriteOptions(q) - r.obj = reg - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/catalog/deregister") - r.setWriteOptions(q) - r.obj = dereg - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -// Datacenters is used to query for all the known datacenters -func (c *Catalog) Datacenters() ([]string, error) { - r := c.c.newRequest("GET", "/v1/catalog/datacenters") - _, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []string - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Nodes is used to query all the known nodes -func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/nodes") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*Node - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Services is used to query for all known services -func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/services") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out map[string][]string - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Service is used to query catalog entries for a given service -func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/service/"+service) - r.setQueryOptions(q) - if tag != "" { - r.params.Set("tag", tag) - } - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CatalogService - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Node is used to query for service information about a single node -func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out *CatalogNode - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go deleted file mode 100644 index 90214e392ce..00000000000 --- a/vendor/github.com/hashicorp/consul/api/coordinate.go +++ /dev/null @@ -1,68 +0,0 @@ -package api - -import ( - "github.com/hashicorp/serf/coordinate" -) - -// CoordinateEntry represents a node and its associated network coordinate. -type CoordinateEntry struct { - Node string - Segment string - Coord *coordinate.Coordinate -} - -// CoordinateDatacenterMap has the coordinates for servers in a given datacenter -// and area. Network coordinates are only compatible within the same area. -type CoordinateDatacenterMap struct { - Datacenter string - AreaID string - Coordinates []CoordinateEntry -} - -// Coordinate can be used to query the coordinate endpoints -type Coordinate struct { - c *Client -} - -// Coordinate returns a handle to the coordinate endpoints -func (c *Client) Coordinate() *Coordinate { - return &Coordinate{c} -} - -// Datacenters is used to return the coordinates of all the servers in the WAN -// pool. -func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { - r := c.c.newRequest("GET", "/v1/coordinate/datacenters") - _, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*CoordinateDatacenterMap - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Nodes is used to return the coordinates of all the nodes in the LAN pool. -func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/coordinate/nodes") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CoordinateEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go deleted file mode 100644 index 85b5b069b03..00000000000 --- a/vendor/github.com/hashicorp/consul/api/event.go +++ /dev/null @@ -1,104 +0,0 @@ -package api - -import ( - "bytes" - "strconv" -) - -// Event can be used to query the Event endpoints -type Event struct { - c *Client -} - -// UserEvent represents an event that was fired by the user -type UserEvent struct { - ID string - Name string - Payload []byte - NodeFilter string - ServiceFilter string - TagFilter string - Version int - LTime uint64 -} - -// Event returns a handle to the event endpoints -func (c *Client) Event() *Event { - return &Event{c} -} - -// Fire is used to fire a new user event. Only the Name, Payload and Filters -// are respected. This returns the ID or an associated error. Cross DC requests -// are supported. -func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { - r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) - r.setWriteOptions(q) - if params.NodeFilter != "" { - r.params.Set("node", params.NodeFilter) - } - if params.ServiceFilter != "" { - r.params.Set("service", params.ServiceFilter) - } - if params.TagFilter != "" { - r.params.Set("tag", params.TagFilter) - } - if params.Payload != nil { - r.body = bytes.NewReader(params.Payload) - } - - rtt, resp, err := requireOK(e.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out UserEvent - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// List is used to get the most recent events an agent has received. -// This list can be optionally filtered by the name. This endpoint supports -// quasi-blocking queries. The index is not monotonic, nor does it provide provide -// LastContact or KnownLeader. -func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { - r := e.c.newRequest("GET", "/v1/event/list") - r.setQueryOptions(q) - if name != "" { - r.params.Set("name", name) - } - rtt, resp, err := requireOK(e.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*UserEvent - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// IDToIndex is a bit of a hack. This simulates the index generation to -// convert an event ID into a WaitIndex. -func (e *Event) IDToIndex(uuid string) uint64 { - lower := uuid[0:8] + uuid[9:13] + uuid[14:18] - upper := uuid[19:23] + uuid[24:36] - lowVal, err := strconv.ParseUint(lower, 16, 64) - if err != nil { - panic("Failed to convert " + lower) - } - highVal, err := strconv.ParseUint(upper, 16, 64) - if err != nil { - panic("Failed to convert " + upper) - } - return lowVal ^ highVal -} diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go deleted file mode 100644 index 38c105fdb93..00000000000 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ /dev/null @@ -1,200 +0,0 @@ -package api - -import ( - "fmt" - "strings" -) - -const ( - // HealthAny is special, and is used as a wild card, - // not as a specific state. - HealthAny = "any" - HealthPassing = "passing" - HealthWarning = "warning" - HealthCritical = "critical" - HealthMaint = "maintenance" -) - -const ( - // NodeMaint is the special key set by a node in maintenance mode. - NodeMaint = "_node_maintenance" - - // ServiceMaintPrefix is the prefix for a service in maintenance mode. - ServiceMaintPrefix = "_service_maintenance:" -) - -// HealthCheck is used to represent a single check -type HealthCheck struct { - Node string - CheckID string - Name string - Status string - Notes string - Output string - ServiceID string - ServiceName string - ServiceTags []string -} - -// HealthChecks is a collection of HealthCheck structs. -type HealthChecks []*HealthCheck - -// AggregatedStatus returns the "best" status for the list of health checks. -// Because a given entry may have many service and node-level health checks -// attached, this function determines the best representative of the status as -// as single string using the following heuristic: -// -// maintenance > critical > warning > passing -// -func (c HealthChecks) AggregatedStatus() string { - var passing, warning, critical, maintenance bool - for _, check := range c { - id := string(check.CheckID) - if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { - maintenance = true - continue - } - - switch check.Status { - case HealthPassing: - passing = true - case HealthWarning: - warning = true - case HealthCritical: - critical = true - default: - return "" - } - } - - switch { - case maintenance: - return HealthMaint - case critical: - return HealthCritical - case warning: - return HealthWarning - case passing: - return HealthPassing - default: - return HealthPassing - } -} - -// ServiceEntry is used for the health service endpoint -type ServiceEntry struct { - Node *Node - Service *AgentService - Checks HealthChecks -} - -// Health can be used to query the Health endpoints -type Health struct { - c *Client -} - -// Health returns a handle to the health endpoints -func (c *Client) Health() *Health { - return &Health{c} -} - -// Node is used to query for checks belonging to a given node -func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Checks is used to return the checks associated with a service -func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/checks/"+service) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Service is used to query health information along with service info -// for a given service. It can optionally do server-side filtering on a tag -// or nodes with passing health checks only. -func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/service/"+service) - r.setQueryOptions(q) - if tag != "" { - r.params.Set("tag", tag) - } - if passingOnly { - r.params.Set(HealthPassing, "1") - } - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*ServiceEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// State is used to retrieve all the checks in a given state. -// The wildcard "any" state can also be used for all checks. -func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - switch state { - case HealthAny: - case HealthWarning: - case HealthCritical: - case HealthPassing: - default: - return nil, nil, fmt.Errorf("Unsupported state: %v", state) - } - r := h.c.newRequest("GET", "/v1/health/state/"+state) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go deleted file mode 100644 index 97f51568559..00000000000 --- a/vendor/github.com/hashicorp/consul/api/kv.go +++ /dev/null @@ -1,420 +0,0 @@ -package api - -import ( - "bytes" - "fmt" - "io" - "net/http" - "strconv" - "strings" -) - -// KVPair is used to represent a single K/V entry -type KVPair struct { - // Key is the name of the key. It is also part of the URL path when accessed - // via the API. - Key string - - // CreateIndex holds the index corresponding the creation of this KVPair. This - // is a read-only field. - CreateIndex uint64 - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 - - // LockIndex holds the index corresponding to a lock on this key, if any. This - // is a read-only field. - LockIndex uint64 - - // Flags are any user-defined flags on the key. It is up to the implementer - // to check these values, since Consul does not treat them specially. - Flags uint64 - - // Value is the value for the key. This can be any value, but it will be - // base64 encoded upon transport. - Value []byte - - // Session is a string representing the ID of the session. Any other - // interactions with this key over the same session must specify the same - // session ID. - Session string -} - -// KVPairs is a list of KVPair objects -type KVPairs []*KVPair - -// KVOp constants give possible operations available in a KVTxn. -type KVOp string - -const ( - KVSet KVOp = "set" - KVDelete KVOp = "delete" - KVDeleteCAS KVOp = "delete-cas" - KVDeleteTree KVOp = "delete-tree" - KVCAS KVOp = "cas" - KVLock KVOp = "lock" - KVUnlock KVOp = "unlock" - KVGet KVOp = "get" - KVGetTree KVOp = "get-tree" - KVCheckSession KVOp = "check-session" - KVCheckIndex KVOp = "check-index" - KVCheckNotExists KVOp = "check-not-exists" -) - -// KVTxnOp defines a single operation inside a transaction. -type KVTxnOp struct { - Verb KVOp - Key string - Value []byte - Flags uint64 - Index uint64 - Session string -} - -// KVTxnOps defines a set of operations to be performed inside a single -// transaction. -type KVTxnOps []*KVTxnOp - -// KVTxnResponse has the outcome of a transaction. -type KVTxnResponse struct { - Results []*KVPair - Errors TxnErrors -} - -// KV is used to manipulate the K/V API -type KV struct { - c *Client -} - -// KV is used to return a handle to the K/V apis -func (c *Client) KV() *KV { - return &KV{c} -} - -// Get is used to lookup a single key. The returned pointer -// to the KVPair will be nil if the key does not exist. -func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { - resp, qm, err := k.getInternal(key, nil, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []*KVPair - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List is used to lookup all keys under a prefix -func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { - resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []*KVPair - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// Keys is used to list all the keys under a prefix. Optionally, -// a separator can be used to limit the responses. -func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { - params := map[string]string{"keys": ""} - if separator != "" { - params["separator"] = separator - } - resp, qm, err := k.getInternal(prefix, params, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []string - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { - r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) - r.setQueryOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - rtt, resp, err := k.c.doRequest(r) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == 404 { - resp.Body.Close() - return nil, qm, nil - } else if resp.StatusCode != 200 { - resp.Body.Close() - return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) - } - return resp, qm, nil -} - -// Put is used to write a new value. Only the -// Key, Flags and Value is respected. -func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { - params := make(map[string]string, 1) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - _, wm, err := k.put(p.Key, params, p.Value, q) - return wm, err -} - -// CAS is used for a Check-And-Set operation. The Key, -// ModifyIndex, Flags and Value are respected. Returns true -// on success or false on failures. -func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) - return k.put(p.Key, params, p.Value, q) -} - -// Acquire is used for a lock acquisition operation. The Key, -// Flags, Value and Session are respected. Returns true -// on success or false on failures. -func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["acquire"] = p.Session - return k.put(p.Key, params, p.Value, q) -} - -// Release is used for a lock release operation. The Key, -// Flags, Value and Session are respected. Returns true -// on success or false on failures. -func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["release"] = p.Session - return k.put(p.Key, params, p.Value, q) -} - -func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { - if len(key) > 0 && key[0] == '/' { - return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) - } - - r := k.c.newRequest("PUT", "/v1/kv/"+key) - r.setWriteOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - r.body = bytes.NewReader(body) - rtt, resp, err := requireOK(k.c.doRequest(r)) - if err != nil { - return false, nil, err - } - defer resp.Body.Close() - - qm := &WriteMeta{} - qm.RequestTime = rtt - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(buf.String(), "true") - return res, qm, nil -} - -// Delete is used to delete a single key -func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { - _, qm, err := k.deleteInternal(key, nil, w) - return qm, err -} - -// DeleteCAS is used for a Delete Check-And-Set operation. The Key -// and ModifyIndex are respected. Returns true on success or false on failures. -func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := map[string]string{ - "cas": strconv.FormatUint(p.ModifyIndex, 10), - } - return k.deleteInternal(p.Key, params, q) -} - -// DeleteTree is used to delete all keys under a prefix -func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { - _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) - return qm, err -} - -func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { - r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) - r.setWriteOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - rtt, resp, err := requireOK(k.c.doRequest(r)) - if err != nil { - return false, nil, err - } - defer resp.Body.Close() - - qm := &WriteMeta{} - qm.RequestTime = rtt - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(buf.String(), "true") - return res, qm, nil -} - -// TxnOp is the internal format we send to Consul. It's not specific to KV, -// though currently only KV operations are supported. -type TxnOp struct { - KV *KVTxnOp -} - -// TxnOps is a list of transaction operations. -type TxnOps []*TxnOp - -// TxnResult is the internal format we receive from Consul. -type TxnResult struct { - KV *KVPair -} - -// TxnResults is a list of TxnResult objects. -type TxnResults []*TxnResult - -// TxnError is used to return information about an operation in a transaction. -type TxnError struct { - OpIndex int - What string -} - -// TxnErrors is a list of TxnError objects. -type TxnErrors []*TxnError - -// TxnResponse is the internal format we receive from Consul. -type TxnResponse struct { - Results TxnResults - Errors TxnErrors -} - -// Txn is used to apply multiple KV operations in a single, atomic transaction. -// -// Note that Go will perform the required base64 encoding on the values -// automatically because the type is a byte slice. Transactions are defined as a -// list of operations to perform, using the KVOp constants and KVTxnOp structure -// to define operations. If any operation fails, none of the changes are applied -// to the state store. Note that this hides the internal raw transaction interface -// and munges the input and output types into KV-specific ones for ease of use. -// If there are more non-KV operations in the future we may break out a new -// transaction API client, but it will be easy to keep this KV-specific variant -// supported. -// -// Even though this is generally a write operation, we take a QueryOptions input -// and return a QueryMeta output. If the transaction contains only read ops, then -// Consul will fast-path it to a different endpoint internally which supports -// consistency controls, but not blocking. If there are write operations then -// the request will always be routed through raft and any consistency settings -// will be ignored. -// -// Here's an example: -// -// ops := KVTxnOps{ -// &KVTxnOp{ -// Verb: KVLock, -// Key: "test/lock", -// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", -// Value: []byte("hello"), -// }, -// &KVTxnOp{ -// Verb: KVGet, -// Key: "another/key", -// }, -// } -// ok, response, _, err := kv.Txn(&ops, nil) -// -// If there is a problem making the transaction request then an error will be -// returned. Otherwise, the ok value will be true if the transaction succeeded -// or false if it was rolled back. The response is a structured return value which -// will have the outcome of the transaction. Its Results member will have entries -// for each operation. Deleted keys will have a nil entry in the, and to save -// space, the Value of each key in the Results will be nil unless the operation -// is a KVGet. If the transaction was rolled back, the Errors member will have -// entries referencing the index of the operation that failed along with an error -// message. -func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { - r := k.c.newRequest("PUT", "/v1/txn") - r.setQueryOptions(q) - - // Convert into the internal format since this is an all-KV txn. - ops := make(TxnOps, 0, len(txn)) - for _, kvOp := range txn { - ops = append(ops, &TxnOp{KV: kvOp}) - } - r.obj = ops - rtt, resp, err := k.c.doRequest(r) - if err != nil { - return false, nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { - var txnResp TxnResponse - if err := decodeBody(resp, &txnResp); err != nil { - return false, nil, nil, err - } - - // Convert from the internal format. - kvResp := KVTxnResponse{ - Errors: txnResp.Errors, - } - for _, result := range txnResp.Results { - kvResp.Results = append(kvResp.Results, result.KV) - } - return resp.StatusCode == http.StatusOK, &kvResp, qm, nil - } - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) - } - return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) -} diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go deleted file mode 100644 index 41f72e7d23a..00000000000 --- a/vendor/github.com/hashicorp/consul/api/lock.go +++ /dev/null @@ -1,385 +0,0 @@ -package api - -import ( - "fmt" - "sync" - "time" -) - -const ( - // DefaultLockSessionName is the Session Name we assign if none is provided - DefaultLockSessionName = "Consul API Lock" - - // DefaultLockSessionTTL is the default session TTL if no Session is provided - // when creating a new Lock. This is used because we do not have another - // other check to depend upon. - DefaultLockSessionTTL = "15s" - - // DefaultLockWaitTime is how long we block for at a time to check if lock - // acquisition is possible. This affects the minimum time it takes to cancel - // a Lock acquisition. - DefaultLockWaitTime = 15 * time.Second - - // DefaultLockRetryTime is how long we wait after a failed lock acquisition - // before attempting to do the lock again. This is so that once a lock-delay - // is in effect, we do not hot loop retrying the acquisition. - DefaultLockRetryTime = 5 * time.Second - - // DefaultMonitorRetryTime is how long we wait after a failed monitor check - // of a lock (500 response code). This allows the monitor to ride out brief - // periods of unavailability, subject to the MonitorRetries setting in the - // lock options which is by default set to 0, disabling this feature. This - // affects locks and semaphores. - DefaultMonitorRetryTime = 2 * time.Second - - // LockFlagValue is a magic flag we set to indicate a key - // is being used for a lock. It is used to detect a potential - // conflict with a semaphore. - LockFlagValue = 0x2ddccbc058a50c18 -) - -var ( - // ErrLockHeld is returned if we attempt to double lock - ErrLockHeld = fmt.Errorf("Lock already held") - - // ErrLockNotHeld is returned if we attempt to unlock a lock - // that we do not hold. - ErrLockNotHeld = fmt.Errorf("Lock not held") - - // ErrLockInUse is returned if we attempt to destroy a lock - // that is in use. - ErrLockInUse = fmt.Errorf("Lock in use") - - // ErrLockConflict is returned if the flags on a key - // used for a lock do not match expectation - ErrLockConflict = fmt.Errorf("Existing key does not match lock use") -) - -// Lock is used to implement client-side leader election. It is follows the -// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. -type Lock struct { - c *Client - opts *LockOptions - - isHeld bool - sessionRenew chan struct{} - lockSession string - l sync.Mutex -} - -// LockOptions is used to parameterize the Lock behavior. -type LockOptions struct { - Key string // Must be set and have write permissions - Value []byte // Optional, value to associate with the lock - Session string // Optional, created if not specified - SessionOpts *SessionEntry // Optional, options to use when creating a session - SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) - SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) - MonitorRetries int // Optional, defaults to 0 which means no retries - MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime - LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime - LockTryOnce bool // Optional, defaults to false which means try forever -} - -// LockKey returns a handle to a lock struct which can be used -// to acquire and release the mutex. The key used must have -// write permissions. -func (c *Client) LockKey(key string) (*Lock, error) { - opts := &LockOptions{ - Key: key, - } - return c.LockOpts(opts) -} - -// LockOpts returns a handle to a lock struct which can be used -// to acquire and release the mutex. The key used must have -// write permissions. -func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { - if opts.Key == "" { - return nil, fmt.Errorf("missing key") - } - if opts.SessionName == "" { - opts.SessionName = DefaultLockSessionName - } - if opts.SessionTTL == "" { - opts.SessionTTL = DefaultLockSessionTTL - } else { - if _, err := time.ParseDuration(opts.SessionTTL); err != nil { - return nil, fmt.Errorf("invalid SessionTTL: %v", err) - } - } - if opts.MonitorRetryTime == 0 { - opts.MonitorRetryTime = DefaultMonitorRetryTime - } - if opts.LockWaitTime == 0 { - opts.LockWaitTime = DefaultLockWaitTime - } - l := &Lock{ - c: c, - opts: opts, - } - return l, nil -} - -// Lock attempts to acquire the lock and blocks while doing so. -// Providing a non-nil stopCh can be used to abort the lock attempt. -// Returns a channel that is closed if our lock is lost or an error. -// This channel could be closed at any time due to session invalidation, -// communication errors, operator intervention, etc. It is NOT safe to -// assume that the lock is held until Unlock() unless the Session is specifically -// created without any associated health checks. By default Consul sessions -// prefer liveness over safety and an application must be able to handle -// the lock being lost. -func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { - // Hold the lock as we try to acquire - l.l.Lock() - defer l.l.Unlock() - - // Check if we already hold the lock - if l.isHeld { - return nil, ErrLockHeld - } - - // Check if we need to create a session first - l.lockSession = l.opts.Session - if l.lockSession == "" { - s, err := l.createSession() - if err != nil { - return nil, fmt.Errorf("failed to create session: %v", err) - } - - l.sessionRenew = make(chan struct{}) - l.lockSession = s - session := l.c.Session() - go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !l.isHeld { - close(l.sessionRenew) - l.sessionRenew = nil - } - }() - } - - // Setup the query options - kv := l.c.KV() - qOpts := &QueryOptions{ - WaitTime: l.opts.LockWaitTime, - } - - start := time.Now() - attempts := 0 -WAIT: - // Check if we should quit - select { - case <-stopCh: - return nil, nil - default: - } - - // Handle the one-shot mode. - if l.opts.LockTryOnce && attempts > 0 { - elapsed := time.Since(start) - if elapsed > qOpts.WaitTime { - return nil, nil - } - - qOpts.WaitTime -= elapsed - } - attempts++ - - // Look for an existing lock, blocking until not taken - pair, meta, err := kv.Get(l.opts.Key, qOpts) - if err != nil { - return nil, fmt.Errorf("failed to read lock: %v", err) - } - if pair != nil && pair.Flags != LockFlagValue { - return nil, ErrLockConflict - } - locked := false - if pair != nil && pair.Session == l.lockSession { - goto HELD - } - if pair != nil && pair.Session != "" { - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } - - // Try to acquire the lock - pair = l.lockEntry(l.lockSession) - locked, _, err = kv.Acquire(pair, nil) - if err != nil { - return nil, fmt.Errorf("failed to acquire lock: %v", err) - } - - // Handle the case of not getting the lock - if !locked { - // Determine why the lock failed - qOpts.WaitIndex = 0 - pair, meta, err = kv.Get(l.opts.Key, qOpts) - if pair != nil && pair.Session != "" { - //If the session is not null, this means that a wait can safely happen - //using a long poll - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } else { - // If the session is empty and the lock failed to acquire, then it means - // a lock-delay is in effect and a timed wait must be used - select { - case <-time.After(DefaultLockRetryTime): - goto WAIT - case <-stopCh: - return nil, nil - } - } - } - -HELD: - // Watch to ensure we maintain leadership - leaderCh := make(chan struct{}) - go l.monitorLock(l.lockSession, leaderCh) - - // Set that we own the lock - l.isHeld = true - - // Locked! All done - return leaderCh, nil -} - -// Unlock released the lock. It is an error to call this -// if the lock is not currently held. -func (l *Lock) Unlock() error { - // Hold the lock as we try to release - l.l.Lock() - defer l.l.Unlock() - - // Ensure the lock is actually held - if !l.isHeld { - return ErrLockNotHeld - } - - // Set that we no longer own the lock - l.isHeld = false - - // Stop the session renew - if l.sessionRenew != nil { - defer func() { - close(l.sessionRenew) - l.sessionRenew = nil - }() - } - - // Get the lock entry, and clear the lock session - lockEnt := l.lockEntry(l.lockSession) - l.lockSession = "" - - // Release the lock explicitly - kv := l.c.KV() - _, _, err := kv.Release(lockEnt, nil) - if err != nil { - return fmt.Errorf("failed to release lock: %v", err) - } - return nil -} - -// Destroy is used to cleanup the lock entry. It is not necessary -// to invoke. It will fail if the lock is in use. -func (l *Lock) Destroy() error { - // Hold the lock as we try to release - l.l.Lock() - defer l.l.Unlock() - - // Check if we already hold the lock - if l.isHeld { - return ErrLockHeld - } - - // Look for an existing lock - kv := l.c.KV() - pair, _, err := kv.Get(l.opts.Key, nil) - if err != nil { - return fmt.Errorf("failed to read lock: %v", err) - } - - // Nothing to do if the lock does not exist - if pair == nil { - return nil - } - - // Check for possible flag conflict - if pair.Flags != LockFlagValue { - return ErrLockConflict - } - - // Check if it is in use - if pair.Session != "" { - return ErrLockInUse - } - - // Attempt the delete - didRemove, _, err := kv.DeleteCAS(pair, nil) - if err != nil { - return fmt.Errorf("failed to remove lock: %v", err) - } - if !didRemove { - return ErrLockInUse - } - return nil -} - -// createSession is used to create a new managed session -func (l *Lock) createSession() (string, error) { - session := l.c.Session() - se := l.opts.SessionOpts - if se == nil { - se = &SessionEntry{ - Name: l.opts.SessionName, - TTL: l.opts.SessionTTL, - } - } - id, _, err := session.Create(se, nil) - if err != nil { - return "", err - } - return id, nil -} - -// lockEntry returns a formatted KVPair for the lock -func (l *Lock) lockEntry(session string) *KVPair { - return &KVPair{ - Key: l.opts.Key, - Value: l.opts.Value, - Session: session, - Flags: LockFlagValue, - } -} - -// monitorLock is a long running routine to monitor a lock ownership -// It closes the stopCh if we lose our leadership. -func (l *Lock) monitorLock(session string, stopCh chan struct{}) { - defer close(stopCh) - kv := l.c.KV() - opts := &QueryOptions{RequireConsistent: true} -WAIT: - retries := l.opts.MonitorRetries -RETRY: - pair, meta, err := kv.Get(l.opts.Key, opts) - if err != nil { - // If configured we can try to ride out a brief Consul unavailability - // by doing retries. Note that we have to attempt the retry in a non- - // blocking fashion so that we have a clean place to reset the retry - // counter if service is restored. - if retries > 0 && IsRetryableError(err) { - time.Sleep(l.opts.MonitorRetryTime) - retries-- - opts.WaitIndex = 0 - goto RETRY - } - return - } - if pair != nil && pair.Session == session { - opts.WaitIndex = meta.LastIndex - goto WAIT - } -} diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go deleted file mode 100644 index 079e2248663..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator.go +++ /dev/null @@ -1,11 +0,0 @@ -package api - -// Operator can be used to perform low-level operator tasks for Consul. -type Operator struct { - c *Client -} - -// Operator returns a handle to the operator endpoints. -func (c *Client) Operator() *Operator { - return &Operator{c} -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go deleted file mode 100644 index a630b694cd5..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_area.go +++ /dev/null @@ -1,193 +0,0 @@ -// The /v1/operator/area endpoints are available only in Consul Enterprise and -// interact with its network area subsystem. Network areas are used to link -// together Consul servers in different Consul datacenters. With network areas, -// Consul datacenters can be linked together in ways other than a fully-connected -// mesh, as is required for Consul's WAN. -package api - -import ( - "net" - "time" -) - -// Area defines a network area. -type Area struct { - // ID is this identifier for an area (a UUID). This must be left empty - // when creating a new area. - ID string - - // PeerDatacenter is the peer Consul datacenter that will make up the - // other side of this network area. Network areas always involve a pair - // of datacenters: the datacenter where the area was created, and the - // peer datacenter. This is required. - PeerDatacenter string - - // RetryJoin specifies the address of Consul servers to join to, such as - // an IPs or hostnames with an optional port number. This is optional. - RetryJoin []string - - // UseTLS specifies whether gossip over this area should be encrypted with TLS - // if possible. - UseTLS bool -} - -// AreaJoinResponse is returned when a join occurs and gives the result for each -// address. -type AreaJoinResponse struct { - // The address that was joined. - Address string - - // Whether or not the join was a success. - Joined bool - - // If we couldn't join, this is the message with information. - Error string -} - -// SerfMember is a generic structure for reporting information about members in -// a Serf cluster. This is only used by the area endpoints right now, but this -// could be expanded to other endpoints in the future. -type SerfMember struct { - // ID is the node identifier (a UUID). - ID string - - // Name is the node name. - Name string - - // Addr has the IP address. - Addr net.IP - - // Port is the RPC port. - Port uint16 - - // Datacenter is the DC name. - Datacenter string - - // Role is "client", "server", or "unknown". - Role string - - // Build has the version of the Consul agent. - Build string - - // Protocol is the protocol of the Consul agent. - Protocol int - - // Status is the Serf health status "none", "alive", "leaving", "left", - // or "failed". - Status string - - // RTT is the estimated round trip time from the server handling the - // request to the this member. This will be negative if no RTT estimate - // is available. - RTT time.Duration -} - -// AreaCreate will create a new network area. The ID in the given structure must -// be empty and a generated ID will be returned on success. -func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) { - r := op.c.newRequest("POST", "/v1/operator/area") - r.setWriteOptions(q) - r.obj = area - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// AreaUpdate will update the configuration of the network area with the given ID. -func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) { - r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID) - r.setWriteOptions(q) - r.obj = area - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// AreaGet returns a single network area. -func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) { - var out []*Area - qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// AreaList returns all the available network areas. -func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) { - var out []*Area - qm, err := op.c.query("/v1/operator/area", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// AreaDelete deletes the given network area. -func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) { - r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID) - r.setWriteOptions(q) - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} - -// AreaJoin attempts to join the given set of join addresses to the given -// network area. See the Area structure for details about join addresses. -func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) { - r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join") - r.setWriteOptions(q) - r.obj = addresses - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out []*AreaJoinResponse - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, wm, nil -} - -// AreaMembers lists the Serf information about the members in the given area. -func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) { - var out []*SerfMember - qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go deleted file mode 100644 index b179406dc12..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go +++ /dev/null @@ -1,219 +0,0 @@ -package api - -import ( - "bytes" - "fmt" - "io" - "strconv" - "strings" - "time" -) - -// AutopilotConfiguration is used for querying/setting the Autopilot configuration. -// Autopilot helps manage operator tasks related to Consul servers like removing -// failed servers from the Raft quorum. -type AutopilotConfiguration struct { - // CleanupDeadServers controls whether to remove dead servers from the Raft - // peer list when a new server joins - CleanupDeadServers bool - - // LastContactThreshold is the limit on the amount of time a server can go - // without leader contact before being considered unhealthy. - LastContactThreshold *ReadableDuration - - // MaxTrailingLogs is the amount of entries in the Raft Log that a server can - // be behind before being considered unhealthy. - MaxTrailingLogs uint64 - - // ServerStabilizationTime is the minimum amount of time a server must be - // in a stable, healthy state before it can be added to the cluster. Only - // applicable with Raft protocol version 3 or higher. - ServerStabilizationTime *ReadableDuration - - // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating - // servers into zones for redundancy. If left blank, this feature will be disabled. - RedundancyZoneTag string - - // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration - // strategy of waiting until enough newer-versioned servers have been added to the - // cluster before promoting them to voters. - DisableUpgradeMigration bool - - // (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when - // performing upgrade migrations. If left blank, the Consul version will be used. - UpgradeVersionTag string - - // CreateIndex holds the index corresponding the creation of this configuration. - // This is a read-only field. - CreateIndex uint64 - - // ModifyIndex will be set to the index of the last update when retrieving the - // Autopilot configuration. Resubmitting a configuration with - // AutopilotCASConfiguration will perform a check-and-set operation which ensures - // there hasn't been a subsequent update since the configuration was retrieved. - ModifyIndex uint64 -} - -// ServerHealth is the health (from the leader's point of view) of a server. -type ServerHealth struct { - // ID is the raft ID of the server. - ID string - - // Name is the node name of the server. - Name string - - // Address is the address of the server. - Address string - - // The status of the SerfHealth check for the server. - SerfStatus string - - // Version is the Consul version of the server. - Version string - - // Leader is whether this server is currently the leader. - Leader bool - - // LastContact is the time since this node's last contact with the leader. - LastContact *ReadableDuration - - // LastTerm is the highest leader term this server has a record of in its Raft log. - LastTerm uint64 - - // LastIndex is the last log index this server has a record of in its Raft log. - LastIndex uint64 - - // Healthy is whether or not the server is healthy according to the current - // Autopilot config. - Healthy bool - - // Voter is whether this is a voting server. - Voter bool - - // StableSince is the last time this server's Healthy value changed. - StableSince time.Time -} - -// OperatorHealthReply is a representation of the overall health of the cluster -type OperatorHealthReply struct { - // Healthy is true if all the servers in the cluster are healthy. - Healthy bool - - // FailureTolerance is the number of healthy servers that could be lost without - // an outage occurring. - FailureTolerance int - - // Servers holds the health of each server. - Servers []ServerHealth -} - -// ReadableDuration is a duration type that is serialized to JSON in human readable format. -type ReadableDuration time.Duration - -func NewReadableDuration(dur time.Duration) *ReadableDuration { - d := ReadableDuration(dur) - return &d -} - -func (d *ReadableDuration) String() string { - return d.Duration().String() -} - -func (d *ReadableDuration) Duration() time.Duration { - if d == nil { - return time.Duration(0) - } - return time.Duration(*d) -} - -func (d *ReadableDuration) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil -} - -func (d *ReadableDuration) UnmarshalJSON(raw []byte) error { - if d == nil { - return fmt.Errorf("cannot unmarshal to nil pointer") - } - - str := string(raw) - if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' { - return fmt.Errorf("must be enclosed with quotes: %s", str) - } - dur, err := time.ParseDuration(str[1 : len(str)-1]) - if err != nil { - return err - } - *d = ReadableDuration(dur) - return nil -} - -// AutopilotGetConfiguration is used to query the current Autopilot configuration. -func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) { - r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out AutopilotConfiguration - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - - return &out, nil -} - -// AutopilotSetConfiguration is used to set the current Autopilot configuration. -func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error { - r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") - r.setWriteOptions(q) - r.obj = conf - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// AutopilotCASConfiguration is used to perform a Check-And-Set update on the -// Autopilot configuration. The ModifyIndex value will be respected. Returns -// true on success or false on failures. -func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) { - r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") - r.setWriteOptions(q) - r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10)) - r.obj = conf - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return false, err - } - defer resp.Body.Close() - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(buf.String(), "true") - - return res, nil -} - -// AutopilotServerHealth -func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { - r := op.c.newRequest("GET", "/v1/operator/autopilot/health") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out OperatorHealthReply - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go deleted file mode 100644 index 6b614296cea..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_keyring.go +++ /dev/null @@ -1,86 +0,0 @@ -package api - -// keyringRequest is used for performing Keyring operations -type keyringRequest struct { - Key string -} - -// KeyringResponse is returned when listing the gossip encryption keys -type KeyringResponse struct { - // Whether this response is for a WAN ring - WAN bool - - // The datacenter name this request corresponds to - Datacenter string - - // Segment has the network segment this request corresponds to. - Segment string - - // A map of the encryption keys to the number of nodes they're installed on - Keys map[string]int - - // The total number of nodes in this ring - NumNodes int -} - -// KeyringInstall is used to install a new gossip encryption key into the cluster -func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { - r := op.c.newRequest("POST", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// KeyringList is used to list the gossip keys installed in the cluster -func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { - r := op.c.newRequest("GET", "/v1/operator/keyring") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*KeyringResponse - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// KeyringRemove is used to remove a gossip encryption key from the cluster -func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// KeyringUse is used to change the active gossip encryption key -func (op *Operator) KeyringUse(key string, q *WriteOptions) error { - r := op.c.newRequest("PUT", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go deleted file mode 100644 index a9844df2dd3..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_raft.go +++ /dev/null @@ -1,89 +0,0 @@ -package api - -// RaftServer has information about a server in the Raft configuration. -type RaftServer struct { - // ID is the unique ID for the server. These are currently the same - // as the address, but they will be changed to a real GUID in a future - // release of Consul. - ID string - - // Node is the node name of the server, as known by Consul, or this - // will be set to "(unknown)" otherwise. - Node string - - // Address is the IP:port of the server, used for Raft communications. - Address string - - // Leader is true if this server is the current cluster leader. - Leader bool - - // Protocol version is the raft protocol version used by the server - ProtocolVersion string - - // Voter is true if this server has a vote in the cluster. This might - // be false if the server is staging and still coming online, or if - // it's a non-voting server, which will be added in a future release of - // Consul. - Voter bool -} - -// RaftConfiguration is returned when querying for the current Raft configuration. -type RaftConfiguration struct { - // Servers has the list of servers in the Raft configuration. - Servers []*RaftServer - - // Index has the Raft index of this configuration. - Index uint64 -} - -// RaftGetConfiguration is used to query the current Raft peer set. -func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { - r := op.c.newRequest("GET", "/v1/operator/raft/configuration") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out RaftConfiguration - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} - -// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft -// quorum but no longer known to Serf or the catalog) by address in the form of -// "IP:port". -func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") - r.setWriteOptions(q) - - r.params.Set("address", string(address)) - - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - - resp.Body.Close() - return nil -} - -// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft -// quorum but no longer known to Serf or the catalog) by ID. -func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") - r.setWriteOptions(q) - - r.params.Set("id", string(id)) - - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - - resp.Body.Close() - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_segment.go b/vendor/github.com/hashicorp/consul/api/operator_segment.go deleted file mode 100644 index 92b05d3c03b..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_segment.go +++ /dev/null @@ -1,11 +0,0 @@ -package api - -// SegmentList returns all the available LAN segments. -func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) { - var out []string - qm, err := op.c.query("/v1/operator/segment", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go deleted file mode 100644 index ff210de3f00..00000000000 --- a/vendor/github.com/hashicorp/consul/api/prepared_query.go +++ /dev/null @@ -1,198 +0,0 @@ -package api - -// QueryDatacenterOptions sets options about how we fail over if there are no -// healthy nodes in the local datacenter. -type QueryDatacenterOptions struct { - // NearestN is set to the number of remote datacenters to try, based on - // network coordinates. - NearestN int - - // Datacenters is a fixed list of datacenters to try after NearestN. We - // never try a datacenter multiple times, so those are subtracted from - // this list before proceeding. - Datacenters []string -} - -// QueryDNSOptions controls settings when query results are served over DNS. -type QueryDNSOptions struct { - // TTL is the time to live for the served DNS results. - TTL string -} - -// ServiceQuery is used to query for a set of healthy nodes offering a specific -// service. -type ServiceQuery struct { - // Service is the service to query. - Service string - - // Near allows baking in the name of a node to automatically distance- - // sort from. The magic "_agent" value is supported, which sorts near - // the agent which initiated the request by default. - Near string - - // Failover controls what we do if there are no healthy nodes in the - // local datacenter. - Failover QueryDatacenterOptions - - // If OnlyPassing is true then we will only include nodes with passing - // health checks (critical AND warning checks will cause a node to be - // discarded) - OnlyPassing bool - - // Tags are a set of required and/or disallowed tags. If a tag is in - // this list it must be present. If the tag is preceded with "!" then - // it is disallowed. - Tags []string - - // NodeMeta is a map of required node metadata fields. If a key/value - // pair is in this map it must be present on the node in order for the - // service entry to be returned. - NodeMeta map[string]string -} - -// QueryTemplate carries the arguments for creating a templated query. -type QueryTemplate struct { - // Type specifies the type of the query template. Currently only - // "name_prefix_match" is supported. This field is required. - Type string - - // Regexp allows specifying a regex pattern to match against the name - // of the query being executed. - Regexp string -} - -// PrepatedQueryDefinition defines a complete prepared query. -type PreparedQueryDefinition struct { - // ID is this UUID-based ID for the query, always generated by Consul. - ID string - - // Name is an optional friendly name for the query supplied by the - // user. NOTE - if this feature is used then it will reduce the security - // of any read ACL associated with this query/service since this name - // can be used to locate nodes with supplying any ACL. - Name string - - // Session is an optional session to tie this query's lifetime to. If - // this is omitted then the query will not expire. - Session string - - // Token is the ACL token used when the query was created, and it is - // used when a query is subsequently executed. This token, or a token - // with management privileges, must be used to change the query later. - Token string - - // Service defines a service query (leaving things open for other types - // later). - Service ServiceQuery - - // DNS has options that control how the results of this query are - // served over DNS. - DNS QueryDNSOptions - - // Template is used to pass through the arguments for creating a - // prepared query with an attached template. If a template is given, - // interpolations are possible in other struct fields. - Template QueryTemplate -} - -// PreparedQueryExecuteResponse has the results of executing a query. -type PreparedQueryExecuteResponse struct { - // Service is the service that was queried. - Service string - - // Nodes has the nodes that were output by the query. - Nodes []ServiceEntry - - // DNS has the options for serving these results over DNS. - DNS QueryDNSOptions - - // Datacenter is the datacenter that these results came from. - Datacenter string - - // Failovers is a count of how many times we had to query a remote - // datacenter. - Failovers int -} - -// PreparedQuery can be used to query the prepared query endpoints. -type PreparedQuery struct { - c *Client -} - -// PreparedQuery returns a handle to the prepared query endpoints. -func (c *Client) PreparedQuery() *PreparedQuery { - return &PreparedQuery{c} -} - -// Create makes a new prepared query. The ID of the new query is returned. -func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) { - r := c.c.newRequest("POST", "/v1/query") - r.setWriteOptions(q) - r.obj = query - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Update makes updates to an existing prepared query. -func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) { - return c.c.write("/v1/query/"+query.ID, query, nil, q) -} - -// List is used to fetch all the prepared queries (always requires a management -// token). -func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { - var out []*PreparedQueryDefinition - qm, err := c.c.query("/v1/query", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Get is used to fetch a specific prepared query. -func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { - var out []*PreparedQueryDefinition - qm, err := c.c.query("/v1/query/"+queryID, &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Delete is used to delete a specific prepared query. -func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("DELETE", "/v1/query/"+queryID) - r.setWriteOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} - -// Execute is used to execute a specific prepared query. You can execute using -// a query ID or name. -func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) { - var out *PreparedQueryExecuteResponse - qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go deleted file mode 100644 index 745a208c99d..00000000000 --- a/vendor/github.com/hashicorp/consul/api/raw.go +++ /dev/null @@ -1,24 +0,0 @@ -package api - -// Raw can be used to do raw queries against custom endpoints -type Raw struct { - c *Client -} - -// Raw returns a handle to query endpoints -func (c *Client) Raw() *Raw { - return &Raw{c} -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Consul conventions. -func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - return raw.c.query(endpoint, out, q) -} - -// Write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Consul conventions. -func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - return raw.c.write(endpoint, in, out, q) -} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go deleted file mode 100644 index d0c57417788..00000000000 --- a/vendor/github.com/hashicorp/consul/api/semaphore.go +++ /dev/null @@ -1,513 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "path" - "sync" - "time" -) - -const ( - // DefaultSemaphoreSessionName is the Session Name we assign if none is provided - DefaultSemaphoreSessionName = "Consul API Semaphore" - - // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided - // when creating a new Semaphore. This is used because we do not have another - // other check to depend upon. - DefaultSemaphoreSessionTTL = "15s" - - // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore - // acquisition is possible. This affects the minimum time it takes to cancel - // a Semaphore acquisition. - DefaultSemaphoreWaitTime = 15 * time.Second - - // DefaultSemaphoreKey is the key used within the prefix to - // use for coordination between all the contenders. - DefaultSemaphoreKey = ".lock" - - // SemaphoreFlagValue is a magic flag we set to indicate a key - // is being used for a semaphore. It is used to detect a potential - // conflict with a lock. - SemaphoreFlagValue = 0xe0f69a2baa414de0 -) - -var ( - // ErrSemaphoreHeld is returned if we attempt to double lock - ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") - - // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore - // that we do not hold. - ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") - - // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore - // that is in use. - ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") - - // ErrSemaphoreConflict is returned if the flags on a key - // used for a semaphore do not match expectation - ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") -) - -// Semaphore is used to implement a distributed semaphore -// using the Consul KV primitives. -type Semaphore struct { - c *Client - opts *SemaphoreOptions - - isHeld bool - sessionRenew chan struct{} - lockSession string - l sync.Mutex -} - -// SemaphoreOptions is used to parameterize the Semaphore -type SemaphoreOptions struct { - Prefix string // Must be set and have write permissions - Limit int // Must be set, and be positive - Value []byte // Optional, value to associate with the contender entry - Session string // Optional, created if not specified - SessionName string // Optional, defaults to DefaultLockSessionName - SessionTTL string // Optional, defaults to DefaultLockSessionTTL - MonitorRetries int // Optional, defaults to 0 which means no retries - MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime - SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime - SemaphoreTryOnce bool // Optional, defaults to false which means try forever -} - -// semaphoreLock is written under the DefaultSemaphoreKey and -// is used to coordinate between all the contenders. -type semaphoreLock struct { - // Limit is the integer limit of holders. This is used to - // verify that all the holders agree on the value. - Limit int - - // Holders is a list of all the semaphore holders. - // It maps the session ID to true. It is used as a set effectively. - Holders map[string]bool -} - -// SemaphorePrefix is used to created a Semaphore which will operate -// at the given KV prefix and uses the given limit for the semaphore. -// The prefix must have write privileges, and the limit must be agreed -// upon by all contenders. -func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { - opts := &SemaphoreOptions{ - Prefix: prefix, - Limit: limit, - } - return c.SemaphoreOpts(opts) -} - -// SemaphoreOpts is used to create a Semaphore with the given options. -// The prefix must have write privileges, and the limit must be agreed -// upon by all contenders. If a Session is not provided, one will be created. -func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { - if opts.Prefix == "" { - return nil, fmt.Errorf("missing prefix") - } - if opts.Limit <= 0 { - return nil, fmt.Errorf("semaphore limit must be positive") - } - if opts.SessionName == "" { - opts.SessionName = DefaultSemaphoreSessionName - } - if opts.SessionTTL == "" { - opts.SessionTTL = DefaultSemaphoreSessionTTL - } else { - if _, err := time.ParseDuration(opts.SessionTTL); err != nil { - return nil, fmt.Errorf("invalid SessionTTL: %v", err) - } - } - if opts.MonitorRetryTime == 0 { - opts.MonitorRetryTime = DefaultMonitorRetryTime - } - if opts.SemaphoreWaitTime == 0 { - opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime - } - s := &Semaphore{ - c: c, - opts: opts, - } - return s, nil -} - -// Acquire attempts to reserve a slot in the semaphore, blocking until -// success, interrupted via the stopCh or an error is encountered. -// Providing a non-nil stopCh can be used to abort the attempt. -// On success, a channel is returned that represents our slot. -// This channel could be closed at any time due to session invalidation, -// communication errors, operator intervention, etc. It is NOT safe to -// assume that the slot is held until Release() unless the Session is specifically -// created without any associated health checks. By default Consul sessions -// prefer liveness over safety and an application must be able to handle -// the session being lost. -func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { - // Hold the lock as we try to acquire - s.l.Lock() - defer s.l.Unlock() - - // Check if we already hold the semaphore - if s.isHeld { - return nil, ErrSemaphoreHeld - } - - // Check if we need to create a session first - s.lockSession = s.opts.Session - if s.lockSession == "" { - sess, err := s.createSession() - if err != nil { - return nil, fmt.Errorf("failed to create session: %v", err) - } - - s.sessionRenew = make(chan struct{}) - s.lockSession = sess - session := s.c.Session() - go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !s.isHeld { - close(s.sessionRenew) - s.sessionRenew = nil - } - }() - } - - // Create the contender entry - kv := s.c.KV() - made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) - if err != nil || !made { - return nil, fmt.Errorf("failed to make contender entry: %v", err) - } - - // Setup the query options - qOpts := &QueryOptions{ - WaitTime: s.opts.SemaphoreWaitTime, - } - - start := time.Now() - attempts := 0 -WAIT: - // Check if we should quit - select { - case <-stopCh: - return nil, nil - default: - } - - // Handle the one-shot mode. - if s.opts.SemaphoreTryOnce && attempts > 0 { - elapsed := time.Since(start) - if elapsed > qOpts.WaitTime { - return nil, nil - } - - qOpts.WaitTime -= elapsed - } - attempts++ - - // Read the prefix - pairs, meta, err := kv.List(s.opts.Prefix, qOpts) - if err != nil { - return nil, fmt.Errorf("failed to read prefix: %v", err) - } - - // Decode the lock - lockPair := s.findLock(pairs) - if lockPair.Flags != SemaphoreFlagValue { - return nil, ErrSemaphoreConflict - } - lock, err := s.decodeLock(lockPair) - if err != nil { - return nil, err - } - - // Verify we agree with the limit - if lock.Limit != s.opts.Limit { - return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", - lock.Limit, s.opts.Limit) - } - - // Prune the dead holders - s.pruneDeadHolders(lock, pairs) - - // Check if the lock is held - if len(lock.Holders) >= lock.Limit { - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } - - // Create a new lock with us as a holder - lock.Holders[s.lockSession] = true - newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) - if err != nil { - return nil, err - } - - // Attempt the acquisition - didSet, _, err := kv.CAS(newLock, nil) - if err != nil { - return nil, fmt.Errorf("failed to update lock: %v", err) - } - if !didSet { - // Update failed, could have been a race with another contender, - // retry the operation - goto WAIT - } - - // Watch to ensure we maintain ownership of the slot - lockCh := make(chan struct{}) - go s.monitorLock(s.lockSession, lockCh) - - // Set that we own the lock - s.isHeld = true - - // Acquired! All done - return lockCh, nil -} - -// Release is used to voluntarily give up our semaphore slot. It is -// an error to call this if the semaphore has not been acquired. -func (s *Semaphore) Release() error { - // Hold the lock as we try to release - s.l.Lock() - defer s.l.Unlock() - - // Ensure the lock is actually held - if !s.isHeld { - return ErrSemaphoreNotHeld - } - - // Set that we no longer own the lock - s.isHeld = false - - // Stop the session renew - if s.sessionRenew != nil { - defer func() { - close(s.sessionRenew) - s.sessionRenew = nil - }() - } - - // Get and clear the lock session - lockSession := s.lockSession - s.lockSession = "" - - // Remove ourselves as a lock holder - kv := s.c.KV() - key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) -READ: - pair, _, err := kv.Get(key, nil) - if err != nil { - return err - } - if pair == nil { - pair = &KVPair{} - } - lock, err := s.decodeLock(pair) - if err != nil { - return err - } - - // Create a new lock without us as a holder - if _, ok := lock.Holders[lockSession]; ok { - delete(lock.Holders, lockSession) - newLock, err := s.encodeLock(lock, pair.ModifyIndex) - if err != nil { - return err - } - - // Swap the locks - didSet, _, err := kv.CAS(newLock, nil) - if err != nil { - return fmt.Errorf("failed to update lock: %v", err) - } - if !didSet { - goto READ - } - } - - // Destroy the contender entry - contenderKey := path.Join(s.opts.Prefix, lockSession) - if _, err := kv.Delete(contenderKey, nil); err != nil { - return err - } - return nil -} - -// Destroy is used to cleanup the semaphore entry. It is not necessary -// to invoke. It will fail if the semaphore is in use. -func (s *Semaphore) Destroy() error { - // Hold the lock as we try to acquire - s.l.Lock() - defer s.l.Unlock() - - // Check if we already hold the semaphore - if s.isHeld { - return ErrSemaphoreHeld - } - - // List for the semaphore - kv := s.c.KV() - pairs, _, err := kv.List(s.opts.Prefix, nil) - if err != nil { - return fmt.Errorf("failed to read prefix: %v", err) - } - - // Find the lock pair, bail if it doesn't exist - lockPair := s.findLock(pairs) - if lockPair.ModifyIndex == 0 { - return nil - } - if lockPair.Flags != SemaphoreFlagValue { - return ErrSemaphoreConflict - } - - // Decode the lock - lock, err := s.decodeLock(lockPair) - if err != nil { - return err - } - - // Prune the dead holders - s.pruneDeadHolders(lock, pairs) - - // Check if there are any holders - if len(lock.Holders) > 0 { - return ErrSemaphoreInUse - } - - // Attempt the delete - didRemove, _, err := kv.DeleteCAS(lockPair, nil) - if err != nil { - return fmt.Errorf("failed to remove semaphore: %v", err) - } - if !didRemove { - return ErrSemaphoreInUse - } - return nil -} - -// createSession is used to create a new managed session -func (s *Semaphore) createSession() (string, error) { - session := s.c.Session() - se := &SessionEntry{ - Name: s.opts.SessionName, - TTL: s.opts.SessionTTL, - Behavior: SessionBehaviorDelete, - } - id, _, err := session.Create(se, nil) - if err != nil { - return "", err - } - return id, nil -} - -// contenderEntry returns a formatted KVPair for the contender -func (s *Semaphore) contenderEntry(session string) *KVPair { - return &KVPair{ - Key: path.Join(s.opts.Prefix, session), - Value: s.opts.Value, - Session: session, - Flags: SemaphoreFlagValue, - } -} - -// findLock is used to find the KV Pair which is used for coordination -func (s *Semaphore) findLock(pairs KVPairs) *KVPair { - key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) - for _, pair := range pairs { - if pair.Key == key { - return pair - } - } - return &KVPair{Flags: SemaphoreFlagValue} -} - -// decodeLock is used to decode a semaphoreLock from an -// entry in Consul -func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { - // Handle if there is no lock - if pair == nil || pair.Value == nil { - return &semaphoreLock{ - Limit: s.opts.Limit, - Holders: make(map[string]bool), - }, nil - } - - l := &semaphoreLock{} - if err := json.Unmarshal(pair.Value, l); err != nil { - return nil, fmt.Errorf("lock decoding failed: %v", err) - } - return l, nil -} - -// encodeLock is used to encode a semaphoreLock into a KVPair -// that can be PUT -func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { - enc, err := json.Marshal(l) - if err != nil { - return nil, fmt.Errorf("lock encoding failed: %v", err) - } - pair := &KVPair{ - Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), - Value: enc, - Flags: SemaphoreFlagValue, - ModifyIndex: oldIndex, - } - return pair, nil -} - -// pruneDeadHolders is used to remove all the dead lock holders -func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { - // Gather all the live holders - alive := make(map[string]struct{}, len(pairs)) - for _, pair := range pairs { - if pair.Session != "" { - alive[pair.Session] = struct{}{} - } - } - - // Remove any holders that are dead - for holder := range lock.Holders { - if _, ok := alive[holder]; !ok { - delete(lock.Holders, holder) - } - } -} - -// monitorLock is a long running routine to monitor a semaphore ownership -// It closes the stopCh if we lose our slot. -func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { - defer close(stopCh) - kv := s.c.KV() - opts := &QueryOptions{RequireConsistent: true} -WAIT: - retries := s.opts.MonitorRetries -RETRY: - pairs, meta, err := kv.List(s.opts.Prefix, opts) - if err != nil { - // If configured we can try to ride out a brief Consul unavailability - // by doing retries. Note that we have to attempt the retry in a non- - // blocking fashion so that we have a clean place to reset the retry - // counter if service is restored. - if retries > 0 && IsRetryableError(err) { - time.Sleep(s.opts.MonitorRetryTime) - retries-- - opts.WaitIndex = 0 - goto RETRY - } - return - } - lockPair := s.findLock(pairs) - lock, err := s.decodeLock(lockPair) - if err != nil { - return - } - s.pruneDeadHolders(lock, pairs) - if _, ok := lock.Holders[session]; ok { - opts.WaitIndex = meta.LastIndex - goto WAIT - } -} diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go deleted file mode 100644 index 1613f11a60c..00000000000 --- a/vendor/github.com/hashicorp/consul/api/session.go +++ /dev/null @@ -1,224 +0,0 @@ -package api - -import ( - "errors" - "fmt" - "time" -) - -const ( - // SessionBehaviorRelease is the default behavior and causes - // all associated locks to be released on session invalidation. - SessionBehaviorRelease = "release" - - // SessionBehaviorDelete is new in Consul 0.5 and changes the - // behavior to delete all associated locks on session invalidation. - // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. - SessionBehaviorDelete = "delete" -) - -var ErrSessionExpired = errors.New("session expired") - -// SessionEntry represents a session in consul -type SessionEntry struct { - CreateIndex uint64 - ID string - Name string - Node string - Checks []string - LockDelay time.Duration - Behavior string - TTL string -} - -// Session can be used to query the Session endpoints -type Session struct { - c *Client -} - -// Session returns a handle to the session endpoints -func (c *Client) Session() *Session { - return &Session{c} -} - -// CreateNoChecks is like Create but is used specifically to create -// a session with no associated health checks. -func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { - body := make(map[string]interface{}) - body["Checks"] = []string{} - if se != nil { - if se.Name != "" { - body["Name"] = se.Name - } - if se.Node != "" { - body["Node"] = se.Node - } - if se.LockDelay != 0 { - body["LockDelay"] = durToMsec(se.LockDelay) - } - if se.Behavior != "" { - body["Behavior"] = se.Behavior - } - if se.TTL != "" { - body["TTL"] = se.TTL - } - } - return s.create(body, q) - -} - -// Create makes a new session. Providing a session entry can -// customize the session. It can also be nil to use defaults. -func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { - var obj interface{} - if se != nil { - body := make(map[string]interface{}) - obj = body - if se.Name != "" { - body["Name"] = se.Name - } - if se.Node != "" { - body["Node"] = se.Node - } - if se.LockDelay != 0 { - body["LockDelay"] = durToMsec(se.LockDelay) - } - if len(se.Checks) > 0 { - body["Checks"] = se.Checks - } - if se.Behavior != "" { - body["Behavior"] = se.Behavior - } - if se.TTL != "" { - body["TTL"] = se.TTL - } - } - return s.create(obj, q) -} - -func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { - var out struct{ ID string } - wm, err := s.c.write("/v1/session/create", obj, &out, q) - if err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Destroy invalidates a given session -func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { - wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Renew renews the TTL on a given session -func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { - r := s.c.newRequest("PUT", "/v1/session/renew/"+id) - r.setWriteOptions(q) - rtt, resp, err := s.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - - if resp.StatusCode == 404 { - return nil, wm, nil - } else if resp.StatusCode != 200 { - return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) - } - - var entries []*SessionEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, fmt.Errorf("Failed to read response: %v", err) - } - if len(entries) > 0 { - return entries[0], wm, nil - } - return nil, wm, nil -} - -// RenewPeriodic is used to periodically invoke Session.Renew on a -// session until a doneCh is closed. This is meant to be used in a long running -// goroutine to ensure a session stays valid. -func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error { - ctx := q.Context() - - ttl, err := time.ParseDuration(initialTTL) - if err != nil { - return err - } - - waitDur := ttl / 2 - lastRenewTime := time.Now() - var lastErr error - for { - if time.Since(lastRenewTime) > ttl { - return lastErr - } - select { - case <-time.After(waitDur): - entry, _, err := s.Renew(id, q) - if err != nil { - waitDur = time.Second - lastErr = err - continue - } - if entry == nil { - return ErrSessionExpired - } - - // Handle the server updating the TTL - ttl, _ = time.ParseDuration(entry.TTL) - waitDur = ttl / 2 - lastRenewTime = time.Now() - - case <-doneCh: - // Attempt a session destroy - s.Destroy(id, q) - return nil - - case <-ctx.Done(): - // Bail immediately since attempting the destroy would - // use the canceled context in q, which would just bail. - return ctx.Err() - } - } -} - -// Info looks up a single session -func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/info/"+id, &entries, q) - if err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List gets sessions for a node -func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/node/"+node, &entries, q) - if err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// List gets all active sessions -func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/list", &entries, q) - if err != nil { - return nil, nil, err - } - return entries, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go deleted file mode 100644 index e902377dd5c..00000000000 --- a/vendor/github.com/hashicorp/consul/api/snapshot.go +++ /dev/null @@ -1,47 +0,0 @@ -package api - -import ( - "io" -) - -// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of -// Consul's internal state and restore snapshots for disaster recovery. -type Snapshot struct { - c *Client -} - -// Snapshot returns a handle that exposes the snapshot endpoints. -func (c *Client) Snapshot() *Snapshot { - return &Snapshot{c} -} - -// Save requests a new snapshot and provides an io.ReadCloser with the snapshot -// data to save. If this doesn't return an error, then it's the responsibility -// of the caller to close it. Only a subset of the QueryOptions are supported: -// Datacenter, AllowStale, and Token. -func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { - r := s.c.newRequest("GET", "/v1/snapshot") - r.setQueryOptions(q) - - rtt, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - return resp.Body, qm, nil -} - -// Restore streams in an existing snapshot and attempts to restore it. -func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { - r := s.c.newRequest("PUT", "/v1/snapshot") - r.body = in - r.setWriteOptions(q) - _, _, err := requireOK(s.c.doRequest(r)) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go deleted file mode 100644 index 74ef61a678f..00000000000 --- a/vendor/github.com/hashicorp/consul/api/status.go +++ /dev/null @@ -1,43 +0,0 @@ -package api - -// Status can be used to query the Status endpoints -type Status struct { - c *Client -} - -// Status returns a handle to the status endpoints -func (c *Client) Status() *Status { - return &Status{c} -} - -// Leader is used to query for a known leader -func (s *Status) Leader() (string, error) { - r := s.c.newRequest("GET", "/v1/status/leader") - _, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return "", err - } - defer resp.Body.Close() - - var leader string - if err := decodeBody(resp, &leader); err != nil { - return "", err - } - return leader, nil -} - -// Peers is used to query for a known raft peers -func (s *Status) Peers() ([]string, error) { - r := s.c.newRequest("GET", "/v1/status/peers") - _, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var peers []string - if err := decodeBody(resp, &peers); err != nil { - return nil, err - } - return peers, nil -} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE deleted file mode 100644 index e87a115e462..00000000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile deleted file mode 100644 index da17640e644..00000000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -default: test - -test: - go vet ./... - go test -race ./... - -updatedeps: - go get -f -t -u ./... - go get -f -u ./... - -.PHONY: default test updatedeps diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md deleted file mode 100644 index 0d6f9ed40af..00000000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/README.md +++ /dev/null @@ -1,43 +0,0 @@ -go-retryablehttp -================ - -[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] - -[travis]: http://travis-ci.org/hashicorp/go-retryablehttp -[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp - -The `retryablehttp` package provides a familiar HTTP client interface with -automatic retries and exponential backoff. It is a thin wrapper over the -standard `net/http` client library and exposes nearly the same public API. This -makes `retryablehttp` very easy to drop into existing programs. - -`retryablehttp` performs automatic retries under certain conditions. Mainly, if -an error is returned by the client (connection errors, etc.), or if a 500-range -response code is received, then a retry is invoked after a wait period. -Otherwise, the response is returned and left to the caller to interpret. - -The main difference from `net/http` is that requests which take a request body -(POST/PUT et. al) require an `io.ReadSeeker` to be provided. This enables the -request body to be "rewound" if the initial request fails so that the full -request can be attempted again. - -Example Use -=========== - -Using this library should look almost identical to what you would do with -`net/http`. The most simple example of a GET request is shown below: - -```go -resp, err := retryablehttp.Get("/foo") -if err != nil { - panic(err) -} -``` - -The returned response object is an `*http.Response`, the same thing you would -usually get from `net/http`. Had the request failed one or more times, the above -call would block and retry with exponential backoff. - -For more usage and examples see the -[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go deleted file mode 100644 index 2ecd1ae8828..00000000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ /dev/null @@ -1,311 +0,0 @@ -// The retryablehttp package provides a familiar HTTP client interface with -// automatic retries and exponential backoff. It is a thin wrapper over the -// standard net/http client library and exposes nearly the same public API. -// This makes retryablehttp very easy to drop into existing programs. -// -// retryablehttp performs automatic retries under certain conditions. Mainly, if -// an error is returned by the client (connection errors etc), or if a 500-range -// response is received, then a retry is invoked. Otherwise, the response is -// returned and left to the caller to interpret. -// -// The main difference from net/http is that requests which take a request body -// (POST/PUT et. al) require an io.ReadSeeker to be provided. This enables the -// request body to be "rewound" if the initial request fails so that the full -// request can be attempted again. -package retryablehttp - -import ( - "fmt" - "io" - "io/ioutil" - "log" - "math" - "net/http" - "net/url" - "os" - "strings" - "time" - - "github.com/hashicorp/go-cleanhttp" -) - -var ( - // Default retry configuration - defaultRetryWaitMin = 1 * time.Second - defaultRetryWaitMax = 30 * time.Second - defaultRetryMax = 4 - - // defaultClient is used for performing requests without explicitly making - // a new client. It is purposely private to avoid modifications. - defaultClient = NewClient() - - // We need to consume response bodies to maintain http connections, but - // limit the size we consume to respReadLimit. - respReadLimit = int64(4096) -) - -// LenReader is an interface implemented by many in-memory io.Reader's. Used -// for automatically sending the right Content-Length header when possible. -type LenReader interface { - Len() int -} - -// Request wraps the metadata needed to create HTTP requests. -type Request struct { - // body is a seekable reader over the request body payload. This is - // used to rewind the request data in between retries. - body io.ReadSeeker - - // Embed an HTTP request directly. This makes a *Request act exactly - // like an *http.Request so that all meta methods are supported. - *http.Request -} - -// NewRequest creates a new wrapped request. -func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) { - // Wrap the body in a noop ReadCloser if non-nil. This prevents the - // reader from being closed by the HTTP client. - var rcBody io.ReadCloser - if body != nil { - rcBody = ioutil.NopCloser(body) - } - - // Make the request with the noop-closer for the body. - httpReq, err := http.NewRequest(method, url, rcBody) - if err != nil { - return nil, err - } - - // Check if we can set the Content-Length automatically. - if lr, ok := body.(LenReader); ok { - httpReq.ContentLength = int64(lr.Len()) - } - - return &Request{body, httpReq}, nil -} - -// RequestLogHook allows a function to run before each retry. The HTTP -// request which will be made, and the retry number (0 for the initial -// request) are available to users. The internal logger is exposed to -// consumers. -type RequestLogHook func(*log.Logger, *http.Request, int) - -// ResponseLogHook is like RequestLogHook, but allows running a function -// on each HTTP response. This function will be invoked at the end of -// every HTTP request executed, regardless of whether a subsequent retry -// needs to be performed or not. If the response body is read or closed -// from this method, this will affect the response returned from Do(). -type ResponseLogHook func(*log.Logger, *http.Response) - -// CheckRetry specifies a policy for handling retries. It is called -// following each request with the response and error values returned by -// the http.Client. If CheckRetry returns false, the Client stops retrying -// and returns the response to the caller. If CheckRetry returns an error, -// that error value is returned in lieu of the error from the request. The -// Client will close any response body when retrying, but if the retry is -// aborted it is up to the CheckResponse callback to properly close any -// response body before returning. -type CheckRetry func(resp *http.Response, err error) (bool, error) - -// Backoff specifies a policy for how long to wait between retries. -// It is called after a failing request to determine the amount of time -// that should pass before trying again. -type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration - -// Client is used to make HTTP requests. It adds additional functionality -// like automatic retries to tolerate minor outages. -type Client struct { - HTTPClient *http.Client // Internal HTTP client. - Logger *log.Logger // Customer logger instance. - - RetryWaitMin time.Duration // Minimum time to wait - RetryWaitMax time.Duration // Maximum time to wait - RetryMax int // Maximum number of retries - - // RequestLogHook allows a user-supplied function to be called - // before each retry. - RequestLogHook RequestLogHook - - // ResponseLogHook allows a user-supplied function to be called - // with the response from each HTTP request executed. - ResponseLogHook ResponseLogHook - - // CheckRetry specifies the policy for handling retries, and is called - // after each request. The default policy is DefaultRetryPolicy. - CheckRetry CheckRetry - - // Backoff specifies the policy for how long to wait between retries - Backoff Backoff -} - -// NewClient creates a new Client with default settings. -func NewClient() *Client { - return &Client{ - HTTPClient: cleanhttp.DefaultClient(), - Logger: log.New(os.Stderr, "", log.LstdFlags), - RetryWaitMin: defaultRetryWaitMin, - RetryWaitMax: defaultRetryWaitMax, - RetryMax: defaultRetryMax, - CheckRetry: DefaultRetryPolicy, - Backoff: DefaultBackoff, - } -} - -// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which -// will retry on connection errors and server errors. -func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) { - if err != nil { - return true, err - } - // Check the response code. We retry on 500-range responses to allow - // the server time to recover, as 500's are typically not permanent - // errors and may relate to outages on the server side. This will catch - // invalid response codes as well, like 0 and 999. - if resp.StatusCode == 0 || resp.StatusCode >= 500 { - return true, nil - } - - return false, nil -} - -// DefaultBackoff provides a default callback for Client.Backoff which -// will perform exponential backoff based on the attempt number and limited -// by the provided minimum and maximum durations. -func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { - mult := math.Pow(2, float64(attemptNum)) * float64(min) - sleep := time.Duration(mult) - if float64(sleep) != mult || sleep > max { - sleep = max - } - return sleep -} - -// Do wraps calling an HTTP method with retries. -func (c *Client) Do(req *Request) (*http.Response, error) { - c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL) - - for i := 0; ; i++ { - var code int // HTTP response code - - // Always rewind the request body when non-nil. - if req.body != nil { - if _, err := req.body.Seek(0, 0); err != nil { - return nil, fmt.Errorf("failed to seek body: %v", err) - } - } - - if c.RequestLogHook != nil { - c.RequestLogHook(c.Logger, req.Request, i) - } - - // Attempt the request - resp, err := c.HTTPClient.Do(req.Request) - - // Check if we should continue with retries. - checkOK, checkErr := c.CheckRetry(resp, err) - - if err != nil { - c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) - } else { - // Call this here to maintain the behavior of logging all requests, - // even if CheckRetry signals to stop. - if c.ResponseLogHook != nil { - // Call the response logger function if provided. - c.ResponseLogHook(c.Logger, resp) - } - } - - // Now decide if we should continue. - if !checkOK { - if checkErr != nil { - err = checkErr - } - return resp, err - } - - // We're going to retry, consume any response to reuse the connection. - if err == nil { - c.drainBody(resp.Body) - } - - remain := c.RetryMax - i - if remain == 0 { - break - } - wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) - desc := fmt.Sprintf("%s %s", req.Method, req.URL) - if code > 0 { - desc = fmt.Sprintf("%s (status: %d)", desc, code) - } - c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) - time.Sleep(wait) - } - - // Return an error if we fall out of the retry loop - return nil, fmt.Errorf("%s %s giving up after %d attempts", - req.Method, req.URL, c.RetryMax+1) -} - -// Try to read the response body so we can reuse this connection. -func (c *Client) drainBody(body io.ReadCloser) { - defer body.Close() - _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) - if err != nil { - c.Logger.Printf("[ERR] error reading response body: %v", err) - } -} - -// Get is a shortcut for doing a GET request without making a new client. -func Get(url string) (*http.Response, error) { - return defaultClient.Get(url) -} - -// Get is a convenience helper for doing simple GET requests. -func (c *Client) Get(url string) (*http.Response, error) { - req, err := NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return c.Do(req) -} - -// Head is a shortcut for doing a HEAD request without making a new client. -func Head(url string) (*http.Response, error) { - return defaultClient.Head(url) -} - -// Head is a convenience method for doing simple HEAD requests. -func (c *Client) Head(url string) (*http.Response, error) { - req, err := NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return c.Do(req) -} - -// Post is a shortcut for doing a POST request without making a new client. -func Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) { - return defaultClient.Post(url, bodyType, body) -} - -// Post is a convenience method for doing simple POST requests. -func (c *Client) Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) { - req, err := NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return c.Do(req) -} - -// PostForm is a shortcut to perform a POST with form data without creating -// a new client. -func PostForm(url string, data url.Values) (*http.Response, error) { - return defaultClient.PostForm(url, data) -} - -// PostForm is a convenience method for doing simple POST operations using -// pre-filled url.Values form data. -func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { - return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/vendor/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE deleted file mode 100644 index c33dcc7c928..00000000000 --- a/vendor/github.com/hashicorp/serf/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go deleted file mode 100644 index 403ec780142..00000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/client.go +++ /dev/null @@ -1,232 +0,0 @@ -package coordinate - -import ( - "fmt" - "math" - "sort" - "sync" - "time" -) - -// Client manages the estimated network coordinate for a given node, and adjusts -// it as the node observes round trip times and estimated coordinates from other -// nodes. The core algorithm is based on Vivaldi, see the documentation for Config -// for more details. -type Client struct { - // coord is the current estimate of the client's network coordinate. - coord *Coordinate - - // origin is a coordinate sitting at the origin. - origin *Coordinate - - // config contains the tuning parameters that govern the performance of - // the algorithm. - config *Config - - // adjustmentIndex is the current index into the adjustmentSamples slice. - adjustmentIndex uint - - // adjustment is used to store samples for the adjustment calculation. - adjustmentSamples []float64 - - // latencyFilterSamples is used to store the last several RTT samples, - // keyed by node name. We will use the config's LatencyFilterSamples - // value to determine how many samples we keep, per node. - latencyFilterSamples map[string][]float64 - - // stats is used to record events that occur when updating coordinates. - stats ClientStats - - // mutex enables safe concurrent access to the client. - mutex sync.RWMutex -} - -// ClientStats is used to record events that occur when updating coordinates. -type ClientStats struct { - // Resets is incremented any time we reset our local coordinate because - // our calculations have resulted in an invalid state. - Resets int -} - -// NewClient creates a new Client and verifies the configuration is valid. -func NewClient(config *Config) (*Client, error) { - if !(config.Dimensionality > 0) { - return nil, fmt.Errorf("dimensionality must be >0") - } - - return &Client{ - coord: NewCoordinate(config), - origin: NewCoordinate(config), - config: config, - adjustmentIndex: 0, - adjustmentSamples: make([]float64, config.AdjustmentWindowSize), - latencyFilterSamples: make(map[string][]float64), - }, nil -} - -// GetCoordinate returns a copy of the coordinate for this client. -func (c *Client) GetCoordinate() *Coordinate { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return c.coord.Clone() -} - -// SetCoordinate forces the client's coordinate to a known state. -func (c *Client) SetCoordinate(coord *Coordinate) error { - c.mutex.Lock() - defer c.mutex.Unlock() - - if err := c.checkCoordinate(coord); err != nil { - return err - } - - c.coord = coord.Clone() - return nil -} - -// ForgetNode removes any client state for the given node. -func (c *Client) ForgetNode(node string) { - c.mutex.Lock() - defer c.mutex.Unlock() - - delete(c.latencyFilterSamples, node) -} - -// Stats returns a copy of stats for the client. -func (c *Client) Stats() ClientStats { - c.mutex.Lock() - defer c.mutex.Unlock() - - return c.stats -} - -// checkCoordinate returns an error if the coordinate isn't compatible with -// this client, or if the coordinate itself isn't valid. This assumes the mutex -// has been locked already. -func (c *Client) checkCoordinate(coord *Coordinate) error { - if !c.coord.IsCompatibleWith(coord) { - return fmt.Errorf("dimensions aren't compatible") - } - - if !coord.IsValid() { - return fmt.Errorf("coordinate is invalid") - } - - return nil -} - -// latencyFilter applies a simple moving median filter with a new sample for -// a node. This assumes that the mutex has been locked already. -func (c *Client) latencyFilter(node string, rttSeconds float64) float64 { - samples, ok := c.latencyFilterSamples[node] - if !ok { - samples = make([]float64, 0, c.config.LatencyFilterSize) - } - - // Add the new sample and trim the list, if needed. - samples = append(samples, rttSeconds) - if len(samples) > int(c.config.LatencyFilterSize) { - samples = samples[1:] - } - c.latencyFilterSamples[node] = samples - - // Sort a copy of the samples and return the median. - sorted := make([]float64, len(samples)) - copy(sorted, samples) - sort.Float64s(sorted) - return sorted[len(sorted)/2] -} - -// updateVivialdi updates the Vivaldi portion of the client's coordinate. This -// assumes that the mutex has been locked already. -func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) { - const zeroThreshold = 1.0e-6 - - dist := c.coord.DistanceTo(other).Seconds() - if rttSeconds < zeroThreshold { - rttSeconds = zeroThreshold - } - wrongness := math.Abs(dist-rttSeconds) / rttSeconds - - totalError := c.coord.Error + other.Error - if totalError < zeroThreshold { - totalError = zeroThreshold - } - weight := c.coord.Error / totalError - - c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight) - if c.coord.Error > c.config.VivaldiErrorMax { - c.coord.Error = c.config.VivaldiErrorMax - } - - delta := c.config.VivaldiCC * weight - force := delta * (rttSeconds - dist) - c.coord = c.coord.ApplyForce(c.config, force, other) -} - -// updateAdjustment updates the adjustment portion of the client's coordinate, if -// the feature is enabled. This assumes that the mutex has been locked already. -func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) { - if c.config.AdjustmentWindowSize == 0 { - return - } - - // Note that the existing adjustment factors don't figure in to this - // calculation so we use the raw distance here. - dist := c.coord.rawDistanceTo(other) - c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist - c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize - - sum := 0.0 - for _, sample := range c.adjustmentSamples { - sum += sample - } - c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize)) -} - -// updateGravity applies a small amount of gravity to pull coordinates towards -// the center of the coordinate system to combat drift. This assumes that the -// mutex is locked already. -func (c *Client) updateGravity() { - dist := c.origin.DistanceTo(c.coord).Seconds() - force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0) - c.coord = c.coord.ApplyForce(c.config, force, c.origin) -} - -// Update takes other, a coordinate for another node, and rtt, a round trip -// time observation for a ping to that node, and updates the estimated position of -// the client's coordinate. Returns the updated coordinate. -func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) (*Coordinate, error) { - c.mutex.Lock() - defer c.mutex.Unlock() - - if err := c.checkCoordinate(other); err != nil { - return nil, err - } - - const maxRTT = 10 * time.Second - if rtt <= 0 || rtt > maxRTT { - return nil, fmt.Errorf("round trip time not in valid range, duration %v is not a positive value less than %v ", rtt, maxRTT) - } - - rttSeconds := c.latencyFilter(node, rtt.Seconds()) - c.updateVivaldi(other, rttSeconds) - c.updateAdjustment(other, rttSeconds) - c.updateGravity() - if !c.coord.IsValid() { - c.stats.Resets++ - c.coord = NewCoordinate(c.config) - } - - return c.coord.Clone(), nil -} - -// DistanceTo returns the estimated RTT from the client's coordinate to other, the -// coordinate for another node. -func (c *Client) DistanceTo(other *Coordinate) time.Duration { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return c.coord.DistanceTo(other) -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go deleted file mode 100644 index b85a8ab7b00..00000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/config.go +++ /dev/null @@ -1,70 +0,0 @@ -package coordinate - -// Config is used to set the parameters of the Vivaldi-based coordinate mapping -// algorithm. -// -// The following references are called out at various points in the documentation -// here: -// -// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system." -// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004. -// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates -// in the Wild." NSDI. Vol. 7. 2007. -// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for -// host-based network coordinate systems." Networking, IEEE/ACM Transactions -// on 18.1 (2010): 27-40. -type Config struct { - // The dimensionality of the coordinate system. As discussed in [2], more - // dimensions improves the accuracy of the estimates up to a point. Per [2] - // we chose 8 dimensions plus a non-Euclidean height. - Dimensionality uint - - // VivaldiErrorMax is the default error value when a node hasn't yet made - // any observations. It also serves as an upper limit on the error value in - // case observations cause the error value to increase without bound. - VivaldiErrorMax float64 - - // VivaldiCE is a tuning factor that controls the maximum impact an - // observation can have on a node's confidence. See [1] for more details. - VivaldiCE float64 - - // VivaldiCC is a tuning factor that controls the maximum impact an - // observation can have on a node's coordinate. See [1] for more details. - VivaldiCC float64 - - // AdjustmentWindowSize is a tuning factor that determines how many samples - // we retain to calculate the adjustment factor as discussed in [3]. Setting - // this to zero disables this feature. - AdjustmentWindowSize uint - - // HeightMin is the minimum value of the height parameter. Since this - // always must be positive, it will introduce a small amount error, so - // the chosen value should be relatively small compared to "normal" - // coordinates. - HeightMin float64 - - // LatencyFilterSamples is the maximum number of samples that are retained - // per node, in order to compute a median. The intent is to ride out blips - // but still keep the delay low, since our time to probe any given node is - // pretty infrequent. See [2] for more details. - LatencyFilterSize uint - - // GravityRho is a tuning factor that sets how much gravity has an effect - // to try to re-center coordinates. See [2] for more details. - GravityRho float64 -} - -// DefaultConfig returns a Config that has some default values suitable for -// basic testing of the algorithm, but not tuned to any particular type of cluster. -func DefaultConfig() *Config { - return &Config{ - Dimensionality: 8, - VivaldiErrorMax: 1.5, - VivaldiCE: 0.25, - VivaldiCC: 0.25, - AdjustmentWindowSize: 20, - HeightMin: 10.0e-6, - LatencyFilterSize: 3, - GravityRho: 150.0, - } -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go deleted file mode 100644 index fbe792c90d4..00000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go +++ /dev/null @@ -1,203 +0,0 @@ -package coordinate - -import ( - "math" - "math/rand" - "time" -) - -// Coordinate is a specialized structure for holding network coordinates for the -// Vivaldi-based coordinate mapping algorithm. All of the fields should be public -// to enable this to be serialized. All values in here are in units of seconds. -type Coordinate struct { - // Vec is the Euclidean portion of the coordinate. This is used along - // with the other fields to provide an overall distance estimate. The - // units here are seconds. - Vec []float64 - - // Err reflects the confidence in the given coordinate and is updated - // dynamically by the Vivaldi Client. This is dimensionless. - Error float64 - - // Adjustment is a distance offset computed based on a calculation over - // observations from all other nodes over a fixed window and is updated - // dynamically by the Vivaldi Client. The units here are seconds. - Adjustment float64 - - // Height is a distance offset that accounts for non-Euclidean effects - // which model the access links from nodes to the core Internet. The access - // links are usually set by bandwidth and congestion, and the core links - // usually follow distance based on geography. - Height float64 -} - -const ( - // secondsToNanoseconds is used to convert float seconds to nanoseconds. - secondsToNanoseconds = 1.0e9 - - // zeroThreshold is used to decide if two coordinates are on top of each - // other. - zeroThreshold = 1.0e-6 -) - -// ErrDimensionalityConflict will be panic-d if you try to perform operations -// with incompatible dimensions. -type DimensionalityConflictError struct{} - -// Adds the error interface. -func (e DimensionalityConflictError) Error() string { - return "coordinate dimensionality does not match" -} - -// NewCoordinate creates a new coordinate at the origin, using the given config -// to supply key initial values. -func NewCoordinate(config *Config) *Coordinate { - return &Coordinate{ - Vec: make([]float64, config.Dimensionality), - Error: config.VivaldiErrorMax, - Adjustment: 0.0, - Height: config.HeightMin, - } -} - -// Clone creates an independent copy of this coordinate. -func (c *Coordinate) Clone() *Coordinate { - vec := make([]float64, len(c.Vec)) - copy(vec, c.Vec) - return &Coordinate{ - Vec: vec, - Error: c.Error, - Adjustment: c.Adjustment, - Height: c.Height, - } -} - -// componentIsValid returns false if a floating point value is a NaN or an -// infinity. -func componentIsValid(f float64) bool { - return !math.IsInf(f, 0) && !math.IsNaN(f) -} - -// IsValid returns false if any component of a coordinate isn't valid, per the -// componentIsValid() helper above. -func (c *Coordinate) IsValid() bool { - for i := range c.Vec { - if !componentIsValid(c.Vec[i]) { - return false - } - } - - return componentIsValid(c.Error) && - componentIsValid(c.Adjustment) && - componentIsValid(c.Height) -} - -// IsCompatibleWith checks to see if the two coordinates are compatible -// dimensionally. If this returns true then you are guaranteed to not get -// any runtime errors operating on them. -func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool { - return len(c.Vec) == len(other.Vec) -} - -// ApplyForce returns the result of applying the force from the direction of the -// other coordinate. -func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate { - if !c.IsCompatibleWith(other) { - panic(DimensionalityConflictError{}) - } - - ret := c.Clone() - unit, mag := unitVectorAt(c.Vec, other.Vec) - ret.Vec = add(ret.Vec, mul(unit, force)) - if mag > zeroThreshold { - ret.Height = (ret.Height+other.Height)*force/mag + ret.Height - ret.Height = math.Max(ret.Height, config.HeightMin) - } - return ret -} - -// DistanceTo returns the distance between this coordinate and the other -// coordinate, including adjustments. -func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration { - if !c.IsCompatibleWith(other) { - panic(DimensionalityConflictError{}) - } - - dist := c.rawDistanceTo(other) - adjustedDist := dist + c.Adjustment + other.Adjustment - if adjustedDist > 0.0 { - dist = adjustedDist - } - return time.Duration(dist * secondsToNanoseconds) -} - -// rawDistanceTo returns the Vivaldi distance between this coordinate and the -// other coordinate in seconds, not including adjustments. This assumes the -// dimensions have already been checked to be compatible. -func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 { - return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height -} - -// add returns the sum of vec1 and vec2. This assumes the dimensions have -// already been checked to be compatible. -func add(vec1 []float64, vec2 []float64) []float64 { - ret := make([]float64, len(vec1)) - for i := range ret { - ret[i] = vec1[i] + vec2[i] - } - return ret -} - -// diff returns the difference between the vec1 and vec2. This assumes the -// dimensions have already been checked to be compatible. -func diff(vec1 []float64, vec2 []float64) []float64 { - ret := make([]float64, len(vec1)) - for i := range ret { - ret[i] = vec1[i] - vec2[i] - } - return ret -} - -// mul returns vec multiplied by a scalar factor. -func mul(vec []float64, factor float64) []float64 { - ret := make([]float64, len(vec)) - for i := range vec { - ret[i] = vec[i] * factor - } - return ret -} - -// magnitude computes the magnitude of the vec. -func magnitude(vec []float64) float64 { - sum := 0.0 - for i := range vec { - sum += vec[i] * vec[i] - } - return math.Sqrt(sum) -} - -// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two -// positions are the same then a random unit vector is returned. We also return -// the distance between the points for use in the later height calculation. -func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) { - ret := diff(vec1, vec2) - - // If the coordinates aren't on top of each other we can normalize. - if mag := magnitude(ret); mag > zeroThreshold { - return mul(ret, 1.0/mag), mag - } - - // Otherwise, just return a random unit vector. - for i := range ret { - ret[i] = rand.Float64() - 0.5 - } - if mag := magnitude(ret); mag > zeroThreshold { - return mul(ret, 1.0/mag), 0.0 - } - - // And finally just give up and make a unit vector along the first - // dimension. This should be exceedingly rare. - ret = make([]float64, len(ret)) - ret[0] = 1.0 - return ret, 0.0 -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go deleted file mode 100644 index 6fb033c0cd3..00000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/phantom.go +++ /dev/null @@ -1,187 +0,0 @@ -package coordinate - -import ( - "fmt" - "math" - "math/rand" - "time" -) - -// GenerateClients returns a slice with nodes number of clients, all with the -// given config. -func GenerateClients(nodes int, config *Config) ([]*Client, error) { - clients := make([]*Client, nodes) - for i, _ := range clients { - client, err := NewClient(config) - if err != nil { - return nil, err - } - - clients[i] = client - } - return clients, nil -} - -// GenerateLine returns a truth matrix as if all the nodes are in a straight linke -// with the given spacing between them. -func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rtt := time.Duration(j-i) * spacing - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional -// grid with the given spacing between them. -func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - n := int(math.Sqrt(float64(nodes))) - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - x1, y1 := float64(i%n), float64(i/n) - x2, y2 := float64(j%n), float64(j/n) - dx, dy := x2-x1, y2-y1 - dist := math.Sqrt(dx*dx + dy*dy) - rtt := time.Duration(dist * float64(spacing)) - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateSplit returns a truth matrix as if half the nodes are close together in -// one location and half the nodes are close together in another. The lan factor -// is used to separate the nodes locally and the wan factor represents the split -// between the two sides. -func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - split := nodes / 2 - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rtt := lan - if (i <= split && j > split) || (i > split && j <= split) { - rtt += wan - } - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed -// around a circle with the given radius. The first node is at the "center" of the -// circle because it's equidistant from all the other nodes, but we place it at -// double the radius, so it should show up above all the other nodes in height. -func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - var rtt time.Duration - if i == 0 { - rtt = 2 * radius - } else { - t1 := 2.0 * math.Pi * float64(i) / float64(nodes) - x1, y1 := math.Cos(t1), math.Sin(t1) - t2 := 2.0 * math.Pi * float64(j) / float64(nodes) - x2, y2 := math.Cos(t2), math.Sin(t2) - dx, dy := x2-x1, y2-y1 - dist := math.Sqrt(dx*dx + dy*dy) - rtt = time.Duration(dist * float64(radius)) - } - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateRandom returns a truth matrix for a set of nodes with normally -// distributed delays, with the given mean and deviation. The RNG is re-seeded -// so you always get the same matrix for a given size. -func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration { - rand.Seed(1) - - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds() - rtt := time.Duration(rttSeconds * secondsToNanoseconds) - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// Simulate runs the given number of cycles using the given list of clients and -// truth matrix. On each cycle, each client will pick a random node and observe -// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for -// each simulation run to get deterministic results (for this algorithm and the -// underlying algorithm which will use random numbers for position vectors when -// starting out with everything at the origin). -func Simulate(clients []*Client, truth [][]time.Duration, cycles int) { - rand.Seed(1) - - nodes := len(clients) - for cycle := 0; cycle < cycles; cycle++ { - for i, _ := range clients { - if j := rand.Intn(nodes); j != i { - c := clients[j].GetCoordinate() - rtt := truth[i][j] - node := fmt.Sprintf("node_%d", j) - clients[i].Update(node, c, rtt) - } - } - } -} - -// Stats is returned from the Evaluate function with a summary of the algorithm -// performance. -type Stats struct { - ErrorMax float64 - ErrorAvg float64 -} - -// Evaluate uses the coordinates of the given clients to calculate estimated -// distances and compares them with the given truth matrix, returning summary -// stats. -func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) { - nodes := len(clients) - count := 0 - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds() - actual := truth[i][j].Seconds() - error := math.Abs(est-actual) / actual - stats.ErrorMax = math.Max(stats.ErrorMax, error) - stats.ErrorAvg += error - count += 1 - } - } - - stats.ErrorAvg /= float64(count) - fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax) - return -} diff --git a/vendor/github.com/hashicorp/terraform/backend/atlas/backend.go b/vendor/github.com/hashicorp/terraform/backend/atlas/backend.go deleted file mode 100644 index 660327ae025..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/atlas/backend.go +++ /dev/null @@ -1,163 +0,0 @@ -package atlas - -import ( - "context" - "fmt" - "net/url" - "os" - "strings" - "sync" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/cli" - "github.com/mitchellh/colorstring" -) - -// Backend is an implementation of EnhancedBackend that performs all operations -// in Atlas. State must currently also be stored in Atlas, although it is worth -// investigating in the future if state storage can be external as well. -type Backend struct { - // CLI and Colorize control the CLI output. If CLI is nil then no CLI - // output will be done. If CLIColor is nil then no coloring will be done. - CLI cli.Ui - CLIColor *colorstring.Colorize - - // ContextOpts are the base context options to set when initializing a - // Terraform context. Many of these will be overridden or merged by - // Operation. See Operation for more details. - ContextOpts *terraform.ContextOpts - - //--------------------------------------------------------------- - // Internal fields, do not set - //--------------------------------------------------------------- - // stateClient is the legacy state client, setup in Configure - stateClient *stateClient - - // schema is the schema for configuration, set by init - schema *schema.Backend - once sync.Once - - // opLock locks operations - opLock sync.Mutex -} - -func (b *Backend) Input( - ui terraform.UIInput, c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - b.once.Do(b.init) - return b.schema.Input(ui, c) -} - -func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) { - b.once.Do(b.init) - return b.schema.Validate(c) -} - -func (b *Backend) Configure(c *terraform.ResourceConfig) error { - b.once.Do(b.init) - return b.schema.Configure(c) -} - -func (b *Backend) States() ([]string, error) { - return nil, backend.ErrNamedStatesNotSupported -} - -func (b *Backend) DeleteState(name string) error { - return backend.ErrNamedStatesNotSupported -} - -func (b *Backend) State(name string) (state.State, error) { - if name != backend.DefaultStateName { - return nil, backend.ErrNamedStatesNotSupported - } - - return &remote.State{Client: b.stateClient}, nil -} - -// Colorize returns the Colorize structure that can be used for colorizing -// output. This is gauranteed to always return a non-nil value and so is useful -// as a helper to wrap any potentially colored strings. -func (b *Backend) Colorize() *colorstring.Colorize { - if b.CLIColor != nil { - return b.CLIColor - } - - return &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Disable: true, - } -} - -func (b *Backend) init() { - b.schema = &schema.Backend{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: schemaDescriptions["name"], - }, - - "access_token": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: schemaDescriptions["access_token"], - DefaultFunc: schema.EnvDefaultFunc("ATLAS_TOKEN", nil), - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: schemaDescriptions["address"], - DefaultFunc: schema.EnvDefaultFunc("ATLAS_ADDRESS", defaultAtlasServer), - }, - }, - - ConfigureFunc: b.schemaConfigure, - } -} - -func (b *Backend) schemaConfigure(ctx context.Context) error { - d := schema.FromContextBackendConfig(ctx) - - // Parse the address - addr := d.Get("address").(string) - addrUrl, err := url.Parse(addr) - if err != nil { - return fmt.Errorf("Error parsing 'address': %s", err) - } - - // Parse the org/env - name := d.Get("name").(string) - parts := strings.Split(name, "/") - if len(parts) != 2 { - return fmt.Errorf("malformed name '%s', expected format '/'", name) - } - org := parts[0] - env := parts[1] - - // Setup the client - b.stateClient = &stateClient{ - Server: addr, - ServerURL: addrUrl, - AccessToken: d.Get("access_token").(string), - User: org, - Name: env, - - // This is optionally set during Atlas Terraform runs. - RunId: os.Getenv("ATLAS_RUN_ID"), - } - - return nil -} - -var schemaDescriptions = map[string]string{ - "name": "Full name of the environment in Atlas, such as 'hashicorp/myenv'", - "access_token": "Access token to use to access Atlas. If ATLAS_TOKEN is set then\n" + - "this will override any saved value for this.", - "address": "Address to your Atlas installation. This defaults to the publicly\n" + - "hosted version at 'https://atlas.hashicorp.com/'. This address\n" + - "should contain the full HTTP scheme to use.", -} diff --git a/vendor/github.com/hashicorp/terraform/backend/atlas/cli.go b/vendor/github.com/hashicorp/terraform/backend/atlas/cli.go deleted file mode 100644 index 5b3656eff3f..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/atlas/cli.go +++ /dev/null @@ -1,13 +0,0 @@ -package atlas - -import ( - "github.com/hashicorp/terraform/backend" -) - -// backend.CLI impl. -func (b *Backend) CLIInit(opts *backend.CLIOpts) error { - b.CLI = opts.CLI - b.CLIColor = opts.CLIColor - b.ContextOpts = opts.ContextOpts - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/backend/atlas/state_client.go b/vendor/github.com/hashicorp/terraform/backend/atlas/state_client.go deleted file mode 100644 index e49cb719241..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/atlas/state_client.go +++ /dev/null @@ -1,319 +0,0 @@ -package atlas - -import ( - "bytes" - "crypto/md5" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "fmt" - "io" - "log" - "net/http" - "net/url" - "os" - "path" - - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/go-retryablehttp" - "github.com/hashicorp/go-rootcerts" - "github.com/hashicorp/terraform/state/remote" - "github.com/hashicorp/terraform/terraform" -) - -const ( - // defaultAtlasServer is used when no address is given - defaultAtlasServer = "https://atlas.hashicorp.com/" - atlasTokenHeader = "X-Atlas-Token" -) - -// AtlasClient implements the Client interface for an Atlas compatible server. -type stateClient struct { - Server string - ServerURL *url.URL - User string - Name string - AccessToken string - RunId string - HTTPClient *retryablehttp.Client - - conflictHandlingAttempted bool -} - -func (c *stateClient) Get() (*remote.Payload, error) { - // Make the HTTP request - req, err := retryablehttp.NewRequest("GET", c.url().String(), nil) - if err != nil { - return nil, fmt.Errorf("Failed to make HTTP request: %v", err) - } - - req.Header.Set(atlasTokenHeader, c.AccessToken) - - // Request the url - client, err := c.http() - if err != nil { - return nil, err - } - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - // Handle the common status codes - switch resp.StatusCode { - case http.StatusOK: - // Handled after - case http.StatusNoContent: - return nil, nil - case http.StatusNotFound: - return nil, nil - case http.StatusUnauthorized: - return nil, fmt.Errorf("HTTP remote state endpoint requires auth") - case http.StatusForbidden: - return nil, fmt.Errorf("HTTP remote state endpoint invalid auth") - case http.StatusInternalServerError: - return nil, fmt.Errorf("HTTP remote state internal server error") - default: - return nil, fmt.Errorf( - "Unexpected HTTP response code: %d\n\nBody: %s", - resp.StatusCode, c.readBody(resp.Body)) - } - - // Read in the body - buf := bytes.NewBuffer(nil) - if _, err := io.Copy(buf, resp.Body); err != nil { - return nil, fmt.Errorf("Failed to read remote state: %v", err) - } - - // Create the payload - payload := &remote.Payload{ - Data: buf.Bytes(), - } - - if len(payload.Data) == 0 { - return nil, nil - } - - // Check for the MD5 - if raw := resp.Header.Get("Content-MD5"); raw != "" { - md5, err := base64.StdEncoding.DecodeString(raw) - if err != nil { - return nil, fmt.Errorf("Failed to decode Content-MD5 '%s': %v", raw, err) - } - - payload.MD5 = md5 - } else { - // Generate the MD5 - hash := md5.Sum(payload.Data) - payload.MD5 = hash[:] - } - - return payload, nil -} - -func (c *stateClient) Put(state []byte) error { - // Get the target URL - base := c.url() - - // Generate the MD5 - hash := md5.Sum(state) - b64 := base64.StdEncoding.EncodeToString(hash[:]) - - // Make the HTTP client and request - req, err := retryablehttp.NewRequest("PUT", base.String(), bytes.NewReader(state)) - if err != nil { - return fmt.Errorf("Failed to make HTTP request: %v", err) - } - - // Prepare the request - req.Header.Set(atlasTokenHeader, c.AccessToken) - req.Header.Set("Content-MD5", b64) - req.Header.Set("Content-Type", "application/json") - req.ContentLength = int64(len(state)) - - // Make the request - client, err := c.http() - if err != nil { - return err - } - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("Failed to upload state: %v", err) - } - defer resp.Body.Close() - - // Handle the error codes - switch resp.StatusCode { - case http.StatusOK: - return nil - case http.StatusConflict: - return c.handleConflict(c.readBody(resp.Body), state) - default: - return fmt.Errorf( - "HTTP error: %d\n\nBody: %s", - resp.StatusCode, c.readBody(resp.Body)) - } -} - -func (c *stateClient) Delete() error { - // Make the HTTP request - req, err := retryablehttp.NewRequest("DELETE", c.url().String(), nil) - if err != nil { - return fmt.Errorf("Failed to make HTTP request: %v", err) - } - req.Header.Set(atlasTokenHeader, c.AccessToken) - - // Make the request - client, err := c.http() - if err != nil { - return err - } - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("Failed to delete state: %v", err) - } - defer resp.Body.Close() - - // Handle the error codes - switch resp.StatusCode { - case http.StatusOK: - return nil - case http.StatusNoContent: - return nil - case http.StatusNotFound: - return nil - default: - return fmt.Errorf( - "HTTP error: %d\n\nBody: %s", - resp.StatusCode, c.readBody(resp.Body)) - } -} - -func (c *stateClient) readBody(b io.Reader) string { - var buf bytes.Buffer - if _, err := io.Copy(&buf, b); err != nil { - return fmt.Sprintf("Error reading body: %s", err) - } - - result := buf.String() - if result == "" { - result = "" - } - - return result -} - -func (c *stateClient) url() *url.URL { - values := url.Values{} - - values.Add("atlas_run_id", c.RunId) - - return &url.URL{ - Scheme: c.ServerURL.Scheme, - Host: c.ServerURL.Host, - Path: path.Join("api/v1/terraform/state", c.User, c.Name), - RawQuery: values.Encode(), - } -} - -func (c *stateClient) http() (*retryablehttp.Client, error) { - if c.HTTPClient != nil { - return c.HTTPClient, nil - } - tlsConfig := &tls.Config{} - err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ - CAFile: os.Getenv("ATLAS_CAFILE"), - CAPath: os.Getenv("ATLAS_CAPATH"), - }) - if err != nil { - return nil, err - } - rc := retryablehttp.NewClient() - - rc.CheckRetry = func(resp *http.Response, err error) (bool, error) { - if err != nil { - // don't bother retrying if the certs don't match - if err, ok := err.(*url.Error); ok { - if _, ok := err.Err.(x509.UnknownAuthorityError); ok { - return false, nil - } - } - // continue retrying - return true, nil - } - return retryablehttp.DefaultRetryPolicy(resp, err) - } - - t := cleanhttp.DefaultTransport() - t.TLSClientConfig = tlsConfig - rc.HTTPClient.Transport = t - - c.HTTPClient = rc - return rc, nil -} - -// Atlas returns an HTTP 409 - Conflict if the pushed state reports the same -// Serial number but the checksum of the raw content differs. This can -// sometimes happen when Terraform changes state representation internally -// between versions in a way that's semantically neutral but affects the JSON -// output and therefore the checksum. -// -// Here we detect and handle this situation by ticking the serial and retrying -// iff for the previous state and the proposed state: -// -// * the serials match -// * the parsed states are Equal (semantically equivalent) -// -// In other words, in this situation Terraform can override Atlas's detected -// conflict by asserting that the state it is pushing is indeed correct. -func (c *stateClient) handleConflict(msg string, state []byte) error { - log.Printf("[DEBUG] Handling Atlas conflict response: %s", msg) - - if c.conflictHandlingAttempted { - log.Printf("[DEBUG] Already attempted conflict resolution; returning conflict.") - } else { - c.conflictHandlingAttempted = true - log.Printf("[DEBUG] Atlas reported conflict, checking for equivalent states.") - - payload, err := c.Get() - if err != nil { - return conflictHandlingError(err) - } - - currentState, err := terraform.ReadState(bytes.NewReader(payload.Data)) - if err != nil { - return conflictHandlingError(err) - } - - proposedState, err := terraform.ReadState(bytes.NewReader(state)) - if err != nil { - return conflictHandlingError(err) - } - - if statesAreEquivalent(currentState, proposedState) { - log.Printf("[DEBUG] States are equivalent, incrementing serial and retrying.") - proposedState.Serial++ - var buf bytes.Buffer - if err := terraform.WriteState(proposedState, &buf); err != nil { - return conflictHandlingError(err) - - } - return c.Put(buf.Bytes()) - } else { - log.Printf("[DEBUG] States are not equivalent, returning conflict.") - } - } - - return fmt.Errorf( - "Atlas detected a remote state conflict.\n\nMessage: %s", msg) -} - -func conflictHandlingError(err error) error { - return fmt.Errorf( - "Error while handling a conflict response from Atlas: %s", err) -} - -func statesAreEquivalent(current, proposed *terraform.State) bool { - return current.Serial == proposed.Serial && current.Equal(proposed) -} diff --git a/vendor/github.com/hashicorp/terraform/backend/backend.go b/vendor/github.com/hashicorp/terraform/backend/backend.go deleted file mode 100644 index efa42d4b6fa..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/backend.go +++ /dev/null @@ -1,169 +0,0 @@ -// Package backend provides interfaces that the CLI uses to interact with -// Terraform. A backend provides the abstraction that allows the same CLI -// to simultaneously support both local and remote operations for seamlessly -// using Terraform in a team environment. -package backend - -import ( - "context" - "errors" - "time" - - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" -) - -// This is the name of the default, initial state that every backend -// must have. This state cannot be deleted. -const DefaultStateName = "default" - -// Error value to return when a named state operation isn't supported. -// This must be returned rather than a custom error so that the Terraform -// CLI can detect it and handle it appropriately. -var ErrNamedStatesNotSupported = errors.New("named states not supported") - -// Backend is the minimal interface that must be implemented to enable Terraform. -type Backend interface { - // Ask for input and configure the backend. Similar to - // terraform.ResourceProvider. - Input(terraform.UIInput, *terraform.ResourceConfig) (*terraform.ResourceConfig, error) - Validate(*terraform.ResourceConfig) ([]string, []error) - Configure(*terraform.ResourceConfig) error - - // State returns the current state for this environment. This state may - // not be loaded locally: the proper APIs should be called on state.State - // to load the state. If the state.State is a state.Locker, it's up to the - // caller to call Lock and Unlock as needed. - // - // If the named state doesn't exist it will be created. The "default" state - // is always assumed to exist. - State(name string) (state.State, error) - - // DeleteState removes the named state if it exists. It is an error - // to delete the default state. - // - // DeleteState does not prevent deleting a state that is in use. It is the - // responsibility of the caller to hold a Lock on the state when calling - // this method. - DeleteState(name string) error - - // States returns a list of configured named states. - States() ([]string, error) -} - -// Enhanced implements additional behavior on top of a normal backend. -// -// Enhanced backends allow customizing the behavior of Terraform operations. -// This allows Terraform to potentially run operations remotely, load -// configurations from external sources, etc. -type Enhanced interface { - Backend - - // Operation performs a Terraform operation such as refresh, plan, apply. - // It is up to the implementation to determine what "performing" means. - // This DOES NOT BLOCK. The context returned as part of RunningOperation - // should be used to block for completion. - // If the state used in the operation can be locked, it is the - // responsibility of the Backend to lock the state for the duration of the - // running operation. - Operation(context.Context, *Operation) (*RunningOperation, error) -} - -// Local implements additional behavior on a Backend that allows local -// operations in addition to remote operations. -// -// This enables more behaviors of Terraform that require more data such -// as `console`, `import`, `graph`. These require direct access to -// configurations, variables, and more. Not all backends may support this -// so we separate it out into its own optional interface. -type Local interface { - // Context returns a runnable terraform Context. The operation parameter - // doesn't need a Type set but it needs other options set such as Module. - Context(*Operation) (*terraform.Context, state.State, error) -} - -// An operation represents an operation for Terraform to execute. -// -// Note that not all fields are supported by all backends and can result -// in an error if set. All backend implementations should show user-friendly -// errors explaining any incorrectly set values. For example, the local -// backend doesn't support a PlanId being set. -// -// The operation options are purposely designed to have maximal compatibility -// between Terraform and Terraform Servers (a commercial product offered by -// HashiCorp). Therefore, it isn't expected that other implementation support -// every possible option. The struct here is generalized in order to allow -// even partial implementations to exist in the open, without walling off -// remote functionality 100% behind a commercial wall. Anyone can implement -// against this interface and have Terraform interact with it just as it -// would with HashiCorp-provided Terraform Servers. -type Operation struct { - // Type is the operation to perform. - Type OperationType - - // PlanId is an opaque value that backends can use to execute a specific - // plan for an apply operation. - // - // PlanOutBackend is the backend to store with the plan. This is the - // backend that will be used when applying the plan. - PlanId string - PlanRefresh bool // PlanRefresh will do a refresh before a plan - PlanOutPath string // PlanOutPath is the path to save the plan - PlanOutBackend *terraform.BackendState - - // Module settings specify the root module to use for operations. - Module *module.Tree - - // Plan is a plan that was passed as an argument. This is valid for - // plan and apply arguments but may not work for all backends. - Plan *terraform.Plan - - // The options below are more self-explanatory and affect the runtime - // behavior of the operation. - Destroy bool - Targets []string - Variables map[string]interface{} - AutoApprove bool - DestroyForce bool - - // Input/output/control options. - UIIn terraform.UIInput - UIOut terraform.UIOutput - - // If LockState is true, the Operation must Lock any - // state.Lockers for its duration, and Unlock when complete. - LockState bool - - // The duration to retry obtaining a State lock. - StateLockTimeout time.Duration - - // Workspace is the name of the workspace that this operation should run - // in, which controls which named state is used. - Workspace string -} - -// RunningOperation is the result of starting an operation. -type RunningOperation struct { - // Context should be used to track Done and Err for errors. - // - // For implementers of a backend, this context should not wrap the - // passed in context. Otherwise, canceling the parent context will - // immediately mark this context as "done" but those aren't the semantics - // we want: we want this context to be done only when the operation itself - // is fully done. - context.Context - - // Err is the error of the operation. This is populated after - // the operation has completed. - Err error - - // PlanEmpty is populated after a Plan operation completes without error - // to note whether a plan is empty or has changes. - PlanEmpty bool - - // State is the final state after the operation completed. Persisting - // this state is managed by the backend. This should only be read - // after the operation completes to avoid read/write races. - State *terraform.State -} diff --git a/vendor/github.com/hashicorp/terraform/backend/cli.go b/vendor/github.com/hashicorp/terraform/backend/cli.go deleted file mode 100644 index 40a66e69859..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/cli.go +++ /dev/null @@ -1,83 +0,0 @@ -package backend - -import ( - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/cli" - "github.com/mitchellh/colorstring" -) - -// CLI is an optional interface that can be implemented to be initialized -// with information from the Terraform CLI. If this is implemented, this -// initialization function will be called with data to help interact better -// with a CLI. -// -// This interface was created to improve backend interaction with the -// official Terraform CLI while making it optional for API users to have -// to provide full CLI interaction to every backend. -// -// If you're implementing a Backend, it is acceptable to require CLI -// initialization. In this case, your backend should be coded to error -// on other methods (such as State, Operation) if CLI initialization was not -// done with all required fields. -type CLI interface { - Backend - - // CLIIinit is called once with options. The options passed to this - // function may not be modified after calling this since they can be - // read/written at any time by the Backend implementation. - // - // This may be called before or after Configure is called, so if settings - // here affect configurable settings, care should be taken to handle - // whether they should be overwritten or not. - CLIInit(*CLIOpts) error -} - -// CLIOpts are the options passed into CLIInit for the CLI interface. -// -// These options represent the functionality the CLI exposes and often -// maps to meta-flags available on every CLI (such as -input). -// -// When implementing a backend, it isn't expected that every option applies. -// Your backend should be documented clearly to explain to end users what -// options have an affect and what won't. In some cases, it may even make sense -// to error in your backend when an option is set so that users don't make -// a critically incorrect assumption about behavior. -type CLIOpts struct { - // CLI and Colorize control the CLI output. If CLI is nil then no CLI - // output will be done. If CLIColor is nil then no coloring will be done. - CLI cli.Ui - CLIColor *colorstring.Colorize - - // StatePath is the local path where state is read from. - // - // StateOutPath is the local path where the state will be written. - // If this is empty, it will default to StatePath. - // - // StateBackupPath is the local path where a backup file will be written. - // If this is empty, no backup will be taken. - StatePath string - StateOutPath string - StateBackupPath string - - // ContextOpts are the base context options to set when initializing a - // Terraform context. Many of these will be overridden or merged by - // Operation. See Operation for more details. - ContextOpts *terraform.ContextOpts - - // Input will ask for necessary input prior to performing any operations. - // - // Validation will perform validation prior to running an operation. The - // variable naming doesn't match the style of others since we have a func - // Validate. - Input bool - Validation bool - - // RunningInAutomation indicates that commands are being run by an - // automated system rather than directly at a command prompt. - // - // This is a hint not to produce messages that expect that a user can - // run a follow-up command, perhaps because Terraform is running in - // some sort of workflow automation tool that abstracts away the - // exact commands that are being run. - RunningInAutomation bool -} diff --git a/vendor/github.com/hashicorp/terraform/backend/init/init.go b/vendor/github.com/hashicorp/terraform/backend/init/init.go deleted file mode 100644 index d499968c742..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/init/init.go +++ /dev/null @@ -1,124 +0,0 @@ -// Package init contains the list of backends that can be initialized and -// basic helper functions for initializing those backends. -package init - -import ( - "sync" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/terraform" - - backendatlas "github.com/hashicorp/terraform/backend/atlas" - backendlegacy "github.com/hashicorp/terraform/backend/legacy" - backendlocal "github.com/hashicorp/terraform/backend/local" - backendAzure "github.com/hashicorp/terraform/backend/remote-state/azure" - backendconsul "github.com/hashicorp/terraform/backend/remote-state/consul" - backendetcdv3 "github.com/hashicorp/terraform/backend/remote-state/etcdv3" - backendGCS "github.com/hashicorp/terraform/backend/remote-state/gcs" - backendinmem "github.com/hashicorp/terraform/backend/remote-state/inmem" - backendManta "github.com/hashicorp/terraform/backend/remote-state/manta" - backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3" - backendSwift "github.com/hashicorp/terraform/backend/remote-state/swift" -) - -// backends is the list of available backends. This is a global variable -// because backends are currently hardcoded into Terraform and can't be -// modified without recompilation. -// -// To read an available backend, use the Backend function. This ensures -// safe concurrent read access to the list of built-in backends. -// -// Backends are hardcoded into Terraform because the API for backends uses -// complex structures and supporting that over the plugin system is currently -// prohibitively difficult. For those wanting to implement a custom backend, -// they can do so with recompilation. -var backends map[string]func() backend.Backend -var backendsLock sync.Mutex - -func init() { - // Our hardcoded backends. We don't need to acquire a lock here - // since init() code is serial and can't spawn goroutines. - backends = map[string]func() backend.Backend{ - "atlas": func() backend.Backend { return &backendatlas.Backend{} }, - "local": func() backend.Backend { return &backendlocal.Local{} }, - "consul": func() backend.Backend { return backendconsul.New() }, - "inmem": func() backend.Backend { return backendinmem.New() }, - "swift": func() backend.Backend { return backendSwift.New() }, - "s3": func() backend.Backend { return backendS3.New() }, - "azure": deprecateBackend(backendAzure.New(), - `Warning: "azure" name is deprecated, please use "azurerm"`), - "azurerm": func() backend.Backend { return backendAzure.New() }, - "etcdv3": func() backend.Backend { return backendetcdv3.New() }, - "gcs": func() backend.Backend { return backendGCS.New() }, - "manta": func() backend.Backend { return backendManta.New() }, - } - - // Add the legacy remote backends that haven't yet been convertd to - // the new backend API. - backendlegacy.Init(backends) -} - -// Backend returns the initialization factory for the given backend, or -// nil if none exists. -func Backend(name string) func() backend.Backend { - backendsLock.Lock() - defer backendsLock.Unlock() - return backends[name] -} - -// Set sets a new backend in the list of backends. If f is nil then the -// backend will be removed from the map. If this backend already exists -// then it will be overwritten. -// -// This method sets this backend globally and care should be taken to do -// this only before Terraform is executing to prevent odd behavior of backends -// changing mid-execution. -func Set(name string, f func() backend.Backend) { - backendsLock.Lock() - defer backendsLock.Unlock() - - if f == nil { - delete(backends, name) - return - } - - backends[name] = f -} - -// deprecatedBackendShim is used to wrap a backend and inject a deprecation -// warning into the Validate method. -type deprecatedBackendShim struct { - backend.Backend - Message string -} - -// Validate the Backend then add the deprecation warning. -func (b deprecatedBackendShim) Validate(c *terraform.ResourceConfig) ([]string, []error) { - warns, errs := b.Backend.Validate(c) - warns = append(warns, b.Message) - return warns, errs -} - -// DeprecateBackend can be used to wrap a backend to retrun a deprecation -// warning during validation. -func deprecateBackend(b backend.Backend, message string) func() backend.Backend { - // Since a Backend wrapped by deprecatedBackendShim can no longer be - // asserted as an Enhanced or Local backend, disallow those types here - // entirely. If something other than a basic backend.Backend needs to be - // deprecated, we can add that functionality to schema.Backend or the - // backend itself. - if _, ok := b.(backend.Enhanced); ok { - panic("cannot use DeprecateBackend on an Enhanced Backend") - } - - if _, ok := b.(backend.Local); ok { - panic("cannot use DeprecateBackend on a Local Backend") - } - - return func() backend.Backend { - return deprecatedBackendShim{ - Backend: b, - Message: message, - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/backend/legacy/backend.go b/vendor/github.com/hashicorp/terraform/backend/legacy/backend.go deleted file mode 100644 index a8b0cad9fb5..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/legacy/backend.go +++ /dev/null @@ -1,75 +0,0 @@ -package legacy - -import ( - "fmt" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/mapstructure" -) - -// Backend is an implementation of backend.Backend for legacy remote state -// clients. -type Backend struct { - // Type is the type of remote state client to support - Type string - - // client is set after Configure is called and client is initialized. - client remote.Client -} - -func (b *Backend) Input( - ui terraform.UIInput, c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - // Return the config as-is, legacy doesn't support input - return c, nil -} - -func (b *Backend) Validate(*terraform.ResourceConfig) ([]string, []error) { - // No validation was supported for old clients - return nil, nil -} - -func (b *Backend) Configure(c *terraform.ResourceConfig) error { - // Legacy remote state was only map[string]string config - var conf map[string]string - if err := mapstructure.Decode(c.Raw, &conf); err != nil { - return fmt.Errorf( - "Failed to decode %q configuration: %s\n\n"+ - "This backend expects all configuration keys and values to be\n"+ - "strings. Please verify your configuration and try again.", - b.Type, err) - } - - client, err := remote.NewClient(b.Type, conf) - if err != nil { - return fmt.Errorf( - "Failed to configure remote backend %q: %s", - b.Type, err) - } - - // Set our client - b.client = client - return nil -} - -func (b *Backend) State(name string) (state.State, error) { - if name != backend.DefaultStateName { - return nil, backend.ErrNamedStatesNotSupported - } - - if b.client == nil { - panic("State called with nil remote state client") - } - - return &remote.State{Client: b.client}, nil -} - -func (b *Backend) States() ([]string, error) { - return nil, backend.ErrNamedStatesNotSupported -} - -func (b *Backend) DeleteState(string) error { - return backend.ErrNamedStatesNotSupported -} diff --git a/vendor/github.com/hashicorp/terraform/backend/legacy/legacy.go b/vendor/github.com/hashicorp/terraform/backend/legacy/legacy.go deleted file mode 100644 index be3163bd53a..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/legacy/legacy.go +++ /dev/null @@ -1,28 +0,0 @@ -// Package legacy contains a backend implementation that can be used -// with the legacy remote state clients. -package legacy - -import ( - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/state/remote" -) - -// Init updates the backend/init package map of initializers to support -// all the remote state types. -// -// If a type is already in the map, it will not be added. This will allow -// us to slowly convert the legacy types to first-class backends. -func Init(m map[string]func() backend.Backend) { - for k, _ := range remote.BuiltinClients { - if _, ok := m[k]; !ok { - // Copy the "k" value since the variable "k" is reused for - // each key (address doesn't change). - typ := k - - // Build the factory function to return a backend of typ - m[k] = func() backend.Backend { - return &Backend{Type: typ} - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend.go b/vendor/github.com/hashicorp/terraform/backend/local/backend.go deleted file mode 100644 index 6b119c2972b..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/local/backend.go +++ /dev/null @@ -1,421 +0,0 @@ -package local - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "sync" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/cli" - "github.com/mitchellh/colorstring" -) - -const ( - DefaultWorkspaceDir = "terraform.tfstate.d" - DefaultWorkspaceFile = "environment" - DefaultStateFilename = "terraform.tfstate" - DefaultBackupExtension = ".backup" -) - -// Local is an implementation of EnhancedBackend that performs all operations -// locally. This is the "default" backend and implements normal Terraform -// behavior as it is well known. -type Local struct { - // CLI and Colorize control the CLI output. If CLI is nil then no CLI - // output will be done. If CLIColor is nil then no coloring will be done. - CLI cli.Ui - CLIColor *colorstring.Colorize - - // The State* paths are set from the backend config, and may be left blank - // to use the defaults. If the actual paths for the local backend state are - // needed, use the StatePaths method. - // - // StatePath is the local path where state is read from. - // - // StateOutPath is the local path where the state will be written. - // If this is empty, it will default to StatePath. - // - // StateBackupPath is the local path where a backup file will be written. - // Set this to "-" to disable state backup. - // - // StateWorkspaceDir is the path to the folder containing data for - // non-default workspaces. This defaults to DefaultWorkspaceDir if not set. - StatePath string - StateOutPath string - StateBackupPath string - StateWorkspaceDir string - - // We only want to create a single instance of a local state, so store them - // here as they're loaded. - states map[string]state.State - - // Terraform context. Many of these will be overridden or merged by - // Operation. See Operation for more details. - ContextOpts *terraform.ContextOpts - - // OpInput will ask for necessary input prior to performing any operations. - // - // OpValidation will perform validation prior to running an operation. The - // variable naming doesn't match the style of others since we have a func - // Validate. - OpInput bool - OpValidation bool - - // Backend, if non-nil, will use this backend for non-enhanced behavior. - // This allows local behavior with remote state storage. It is a way to - // "upgrade" a non-enhanced backend to an enhanced backend with typical - // behavior. - // - // If this is nil, local performs normal state loading and storage. - Backend backend.Backend - - // RunningInAutomation indicates that commands are being run by an - // automated system rather than directly at a command prompt. - // - // This is a hint not to produce messages that expect that a user can - // run a follow-up command, perhaps because Terraform is running in - // some sort of workflow automation tool that abstracts away the - // exact commands that are being run. - RunningInAutomation bool - - schema *schema.Backend - opLock sync.Mutex - once sync.Once -} - -func (b *Local) Input( - ui terraform.UIInput, c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - b.once.Do(b.init) - - f := b.schema.Input - if b.Backend != nil { - f = b.Backend.Input - } - - return f(ui, c) -} - -func (b *Local) Validate(c *terraform.ResourceConfig) ([]string, []error) { - b.once.Do(b.init) - - f := b.schema.Validate - if b.Backend != nil { - f = b.Backend.Validate - } - - return f(c) -} - -func (b *Local) Configure(c *terraform.ResourceConfig) error { - b.once.Do(b.init) - - f := b.schema.Configure - if b.Backend != nil { - f = b.Backend.Configure - } - - return f(c) -} - -func (b *Local) States() ([]string, error) { - // If we have a backend handling state, defer to that. - if b.Backend != nil { - return b.Backend.States() - } - - // the listing always start with "default" - envs := []string{backend.DefaultStateName} - - entries, err := ioutil.ReadDir(b.stateWorkspaceDir()) - // no error if there's no envs configured - if os.IsNotExist(err) { - return envs, nil - } - if err != nil { - return nil, err - } - - var listed []string - for _, entry := range entries { - if entry.IsDir() { - listed = append(listed, filepath.Base(entry.Name())) - } - } - - sort.Strings(listed) - envs = append(envs, listed...) - - return envs, nil -} - -// DeleteState removes a named state. -// The "default" state cannot be removed. -func (b *Local) DeleteState(name string) error { - // If we have a backend handling state, defer to that. - if b.Backend != nil { - return b.Backend.DeleteState(name) - } - - if name == "" { - return errors.New("empty state name") - } - - if name == backend.DefaultStateName { - return errors.New("cannot delete default state") - } - - delete(b.states, name) - return os.RemoveAll(filepath.Join(b.stateWorkspaceDir(), name)) -} - -func (b *Local) State(name string) (state.State, error) { - statePath, stateOutPath, backupPath := b.StatePaths(name) - - // If we have a backend handling state, delegate to that. - if b.Backend != nil { - return b.Backend.State(name) - } - - if s, ok := b.states[name]; ok { - return s, nil - } - - if err := b.createState(name); err != nil { - return nil, err - } - - // Otherwise, we need to load the state. - var s state.State = &state.LocalState{ - Path: statePath, - PathOut: stateOutPath, - } - - // If we are backing up the state, wrap it - if backupPath != "" { - s = &state.BackupState{ - Real: s, - Path: backupPath, - } - } - - if b.states == nil { - b.states = map[string]state.State{} - } - b.states[name] = s - return s, nil -} - -// Operation implements backend.Enhanced -// -// This will initialize an in-memory terraform.Context to perform the -// operation within this process. -// -// The given operation parameter will be merged with the ContextOpts on -// the structure with the following rules. If a rule isn't specified and the -// name conflicts, assume that the field is overwritten if set. -func (b *Local) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { - // Determine the function to call for our operation - var f func(context.Context, *backend.Operation, *backend.RunningOperation) - switch op.Type { - case backend.OperationTypeRefresh: - f = b.opRefresh - case backend.OperationTypePlan: - f = b.opPlan - case backend.OperationTypeApply: - f = b.opApply - default: - return nil, fmt.Errorf( - "Unsupported operation type: %s\n\n"+ - "This is a bug in Terraform and should be reported. The local backend\n"+ - "is built-in to Terraform and should always support all operations.", - op.Type) - } - - // Lock - b.opLock.Lock() - - // Build our running operation - runningCtx, runningCtxCancel := context.WithCancel(context.Background()) - runningOp := &backend.RunningOperation{Context: runningCtx} - - // Do it - go func() { - defer b.opLock.Unlock() - defer runningCtxCancel() - f(ctx, op, runningOp) - }() - - // Return - return runningOp, nil -} - -// Colorize returns the Colorize structure that can be used for colorizing -// output. This is gauranteed to always return a non-nil value and so is useful -// as a helper to wrap any potentially colored strings. -func (b *Local) Colorize() *colorstring.Colorize { - if b.CLIColor != nil { - return b.CLIColor - } - - return &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Disable: true, - } -} - -func (b *Local) init() { - b.schema = &schema.Backend{ - Schema: map[string]*schema.Schema{ - "path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "", - }, - - "workspace_dir": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "", - }, - - "environment_dir": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "", - ConflictsWith: []string{"workspace_dir"}, - - Deprecated: "workspace_dir should be used instead, with the same meaning", - }, - }, - - ConfigureFunc: b.schemaConfigure, - } -} - -func (b *Local) schemaConfigure(ctx context.Context) error { - d := schema.FromContextBackendConfig(ctx) - - // Set the path if it is set - pathRaw, ok := d.GetOk("path") - if ok { - path := pathRaw.(string) - if path == "" { - return fmt.Errorf("configured path is empty") - } - - b.StatePath = path - b.StateOutPath = path - } - - if raw, ok := d.GetOk("workspace_dir"); ok { - path := raw.(string) - if path != "" { - b.StateWorkspaceDir = path - } - } - - // Legacy name, which ConflictsWith workspace_dir - if raw, ok := d.GetOk("environment_dir"); ok { - path := raw.(string) - if path != "" { - b.StateWorkspaceDir = path - } - } - - return nil -} - -// StatePaths returns the StatePath, StateOutPath, and StateBackupPath as -// configured from the CLI. -func (b *Local) StatePaths(name string) (string, string, string) { - statePath := b.StatePath - stateOutPath := b.StateOutPath - backupPath := b.StateBackupPath - - if name == "" { - name = backend.DefaultStateName - } - - if name == backend.DefaultStateName { - if statePath == "" { - statePath = DefaultStateFilename - } - } else { - statePath = filepath.Join(b.stateWorkspaceDir(), name, DefaultStateFilename) - } - - if stateOutPath == "" { - stateOutPath = statePath - } - - switch backupPath { - case "-": - backupPath = "" - case "": - backupPath = stateOutPath + DefaultBackupExtension - } - - return statePath, stateOutPath, backupPath -} - -// this only ensures that the named directory exists -func (b *Local) createState(name string) error { - if name == backend.DefaultStateName { - return nil - } - - stateDir := filepath.Join(b.stateWorkspaceDir(), name) - s, err := os.Stat(stateDir) - if err == nil && s.IsDir() { - // no need to check for os.IsNotExist, since that is covered by os.MkdirAll - // which will catch the other possible errors as well. - return nil - } - - err = os.MkdirAll(stateDir, 0755) - if err != nil { - return err - } - - return nil -} - -// stateWorkspaceDir returns the directory where state environments are stored. -func (b *Local) stateWorkspaceDir() string { - if b.StateWorkspaceDir != "" { - return b.StateWorkspaceDir - } - - return DefaultWorkspaceDir -} - -func (b *Local) pluginInitRequired(providerErr *terraform.ResourceProviderError) { - b.CLI.Output(b.Colorize().Color(fmt.Sprintf( - strings.TrimSpace(errPluginInit)+"\n", - providerErr))) -} - -// this relies on multierror to format the plugin errors below the copy -const errPluginInit = ` -[reset][bold][yellow]Plugin reinitialization required. Please run "terraform init".[reset] -[yellow]Reason: Could not satisfy plugin requirements. - -Plugins are external binaries that Terraform uses to access and manipulate -resources. The configuration provided requires plugins which can't be located, -don't satisfy the version constraints, or are otherwise incompatible. - -[reset][red]%s - -[reset][yellow]Terraform automatically discovers provider requirements from your -configuration, including providers used in child modules. To see the -requirements and constraints from each module, run "terraform providers". -` diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_apply.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_apply.go deleted file mode 100644 index 9789e0b7c4a..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/local/backend_apply.go +++ /dev/null @@ -1,327 +0,0 @@ -package local - -import ( - "bytes" - "context" - "errors" - "fmt" - "log" - "strings" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/command/clistate" - "github.com/hashicorp/terraform/command/format" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" -) - -func (b *Local) opApply( - ctx context.Context, - op *backend.Operation, - runningOp *backend.RunningOperation) { - log.Printf("[INFO] backend/local: starting Apply operation") - - // If we have a nil module at this point, then set it to an empty tree - // to avoid any potential crashes. - if op.Plan == nil && op.Module == nil && !op.Destroy { - runningOp.Err = fmt.Errorf(strings.TrimSpace(applyErrNoConfig)) - return - } - - // If we have a nil module at this point, then set it to an empty tree - // to avoid any potential crashes. - if op.Module == nil { - op.Module = module.NewEmptyTree() - } - - // Setup our count hook that keeps track of resource changes - countHook := new(CountHook) - stateHook := new(StateHook) - if b.ContextOpts == nil { - b.ContextOpts = new(terraform.ContextOpts) - } - old := b.ContextOpts.Hooks - defer func() { b.ContextOpts.Hooks = old }() - b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook, stateHook) - - // Get our context - tfCtx, opState, err := b.context(op) - if err != nil { - runningOp.Err = err - return - } - - if op.LockState { - lockCtx, cancel := context.WithTimeout(ctx, op.StateLockTimeout) - defer cancel() - - lockInfo := state.NewLockInfo() - lockInfo.Operation = op.Type.String() - lockID, err := clistate.Lock(lockCtx, opState, lockInfo, b.CLI, b.Colorize()) - if err != nil { - runningOp.Err = errwrap.Wrapf("Error locking state: {{err}}", err) - return - } - - defer func() { - if err := clistate.Unlock(opState, lockID, b.CLI, b.Colorize()); err != nil { - runningOp.Err = multierror.Append(runningOp.Err, err) - } - }() - } - - // Setup the state - runningOp.State = tfCtx.State() - - // If we weren't given a plan, then we refresh/plan - if op.Plan == nil { - // If we're refreshing before apply, perform that - if op.PlanRefresh { - log.Printf("[INFO] backend/local: apply calling Refresh") - _, err := tfCtx.Refresh() - if err != nil { - runningOp.Err = errwrap.Wrapf("Error refreshing state: {{err}}", err) - return - } - } - - // Perform the plan - log.Printf("[INFO] backend/local: apply calling Plan") - plan, err := tfCtx.Plan() - if err != nil { - runningOp.Err = errwrap.Wrapf("Error running plan: {{err}}", err) - return - } - - dispPlan := format.NewPlan(plan) - trivialPlan := dispPlan.Empty() - hasUI := op.UIOut != nil && op.UIIn != nil - mustConfirm := hasUI && ((op.Destroy && !op.DestroyForce) || (!op.Destroy && !op.AutoApprove && !trivialPlan)) - if mustConfirm { - var desc, query string - if op.Destroy { - // Default destroy message - desc = "Terraform will destroy all your managed infrastructure, as shown above.\n" + - "There is no undo. Only 'yes' will be accepted to confirm." - query = "Do you really want to destroy?" - } else { - desc = "Terraform will perform the actions described above.\n" + - "Only 'yes' will be accepted to approve." - query = "Do you want to perform these actions?" - } - - if !trivialPlan { - // Display the plan of what we are going to apply/destroy. - b.renderPlan(dispPlan) - b.CLI.Output("") - } - - v, err := op.UIIn.Input(&terraform.InputOpts{ - Id: "approve", - Query: query, - Description: desc, - }) - if err != nil { - runningOp.Err = errwrap.Wrapf("Error asking for approval: {{err}}", err) - return - } - if v != "yes" { - if op.Destroy { - runningOp.Err = errors.New("Destroy cancelled.") - } else { - runningOp.Err = errors.New("Apply cancelled.") - } - return - } - } - } - - // Setup our hook for continuous state updates - stateHook.State = opState - - // Start the apply in a goroutine so that we can be interrupted. - var applyState *terraform.State - var applyErr error - doneCh := make(chan struct{}) - go func() { - defer close(doneCh) - _, applyErr = tfCtx.Apply() - // we always want the state, even if apply failed - applyState = tfCtx.State() - }() - - // Wait for the apply to finish or for us to be interrupted so - // we can handle it properly. - err = nil - select { - case <-ctx.Done(): - if b.CLI != nil { - b.CLI.Output("stopping apply operation...") - } - - // try to force a PersistState just in case the process is terminated - // before we can complete. - if err := opState.PersistState(); err != nil { - // We can't error out from here, but warn the user if there was an error. - // If this isn't transient, we will catch it again below, and - // attempt to save the state another way. - if b.CLI != nil { - b.CLI.Error(fmt.Sprintf(earlyStateWriteErrorFmt, err)) - } - } - - // Stop execution - go tfCtx.Stop() - - // Wait for completion still - <-doneCh - case <-doneCh: - } - - // Store the final state - runningOp.State = applyState - - // Persist the state - if err := opState.WriteState(applyState); err != nil { - runningOp.Err = b.backupStateForError(applyState, err) - return - } - if err := opState.PersistState(); err != nil { - runningOp.Err = b.backupStateForError(applyState, err) - return - } - - if applyErr != nil { - runningOp.Err = fmt.Errorf( - "Error applying plan:\n\n"+ - "%s\n\n"+ - "Terraform does not automatically rollback in the face of errors.\n"+ - "Instead, your Terraform state file has been partially updated with\n"+ - "any resources that successfully completed. Please address the error\n"+ - "above and apply again to incrementally change your infrastructure.", - multierror.Flatten(applyErr)) - return - } - - // If we have a UI, output the results - if b.CLI != nil { - if op.Destroy { - b.CLI.Output(b.Colorize().Color(fmt.Sprintf( - "[reset][bold][green]\n"+ - "Destroy complete! Resources: %d destroyed.", - countHook.Removed))) - } else { - b.CLI.Output(b.Colorize().Color(fmt.Sprintf( - "[reset][bold][green]\n"+ - "Apply complete! Resources: %d added, %d changed, %d destroyed.", - countHook.Added, - countHook.Changed, - countHook.Removed))) - } - - // only show the state file help message if the state is local. - if (countHook.Added > 0 || countHook.Changed > 0) && b.StateOutPath != "" { - b.CLI.Output(b.Colorize().Color(fmt.Sprintf( - "[reset]\n"+ - "The state of your infrastructure has been saved to the path\n"+ - "below. This state is required to modify and destroy your\n"+ - "infrastructure, so keep it safe. To inspect the complete state\n"+ - "use the `terraform show` command.\n\n"+ - "State path: %s", - b.StateOutPath))) - } - } -} - -// backupStateForError is called in a scenario where we're unable to persist the -// state for some reason, and will attempt to save a backup copy of the state -// to local disk to help the user recover. This is a "last ditch effort" sort -// of thing, so we really don't want to end up in this codepath; we should do -// everything we possibly can to get the state saved _somewhere_. -func (b *Local) backupStateForError(applyState *terraform.State, err error) error { - b.CLI.Error(fmt.Sprintf("Failed to save state: %s\n", err)) - - local := &state.LocalState{Path: "errored.tfstate"} - writeErr := local.WriteState(applyState) - if writeErr != nil { - b.CLI.Error(fmt.Sprintf( - "Also failed to create local state file for recovery: %s\n\n", writeErr, - )) - // To avoid leaving the user with no state at all, our last resort - // is to print the JSON state out onto the terminal. This is an awful - // UX, so we should definitely avoid doing this if at all possible, - // but at least the user has _some_ path to recover if we end up - // here for some reason. - stateBuf := new(bytes.Buffer) - jsonErr := terraform.WriteState(applyState, stateBuf) - if jsonErr != nil { - b.CLI.Error(fmt.Sprintf( - "Also failed to JSON-serialize the state to print it: %s\n\n", jsonErr, - )) - return errors.New(stateWriteFatalError) - } - - b.CLI.Output(stateBuf.String()) - - return errors.New(stateWriteConsoleFallbackError) - } - - return errors.New(stateWriteBackedUpError) -} - -const applyErrNoConfig = ` -No configuration files found! - -Apply requires configuration to be present. Applying without a configuration -would mark everything for destruction, which is normally not what is desired. -If you would like to destroy everything, please run 'terraform destroy' instead -which does not require any configuration files. -` - -const stateWriteBackedUpError = `Failed to persist state to backend. - -The error shown above has prevented Terraform from writing the updated state -to the configured backend. To allow for recovery, the state has been written -to the file "errored.tfstate" in the current working directory. - -Running "terraform apply" again at this point will create a forked state, -making it harder to recover. - -To retry writing this state, use the following command: - terraform state push errored.tfstate -` - -const stateWriteConsoleFallbackError = `Failed to persist state to backend. - -The errors shown above prevented Terraform from writing the updated state to -the configured backend and from creating a local backup file. As a fallback, -the raw state data is printed above as a JSON object. - -To retry writing this state, copy the state data (from the first { to the -last } inclusive) and save it into a local file called errored.tfstate, then -run the following command: - terraform state push errored.tfstate -` - -const stateWriteFatalError = `Failed to save state after apply. - -A catastrophic error has prevented Terraform from persisting the state file -or creating a backup. Unfortunately this means that the record of any resources -created during this apply has been lost, and such resources may exist outside -of Terraform's management. - -For resources that support import, it is possible to recover by manually -importing each resource using its id from the target system. - -This is a serious bug in Terraform and should be reported. -` - -const earlyStateWriteErrorFmt = `Error saving current state: %s - -Terraform encountered an error attempting to save the state before canceling -the current operation. Once the operation is complete another attempt will be -made to save the final state. -` diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_local.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_local.go deleted file mode 100644 index aa056a1a16d..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/local/backend_local.go +++ /dev/null @@ -1,133 +0,0 @@ -package local - -import ( - "errors" - "log" - - "github.com/hashicorp/terraform/command/format" - - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" -) - -// backend.Local implementation. -func (b *Local) Context(op *backend.Operation) (*terraform.Context, state.State, error) { - // Make sure the type is invalid. We use this as a way to know not - // to ask for input/validate. - op.Type = backend.OperationTypeInvalid - - return b.context(op) -} - -func (b *Local) context(op *backend.Operation) (*terraform.Context, state.State, error) { - // Get the state. - s, err := b.State(op.Workspace) - if err != nil { - return nil, nil, errwrap.Wrapf("Error loading state: {{err}}", err) - } - - if err := s.RefreshState(); err != nil { - return nil, nil, errwrap.Wrapf("Error loading state: {{err}}", err) - } - - // Initialize our context options - var opts terraform.ContextOpts - if v := b.ContextOpts; v != nil { - opts = *v - } - - // Copy set options from the operation - opts.Destroy = op.Destroy - opts.Module = op.Module - opts.Targets = op.Targets - opts.UIInput = op.UIIn - if op.Variables != nil { - opts.Variables = op.Variables - } - - // Load our state - // By the time we get here, the backend creation code in "command" took - // care of making s.State() return a state compatible with our plan, - // if any, so we can safely pass this value in both the plan context - // and new context cases below. - opts.State = s.State() - - // Build the context - var tfCtx *terraform.Context - if op.Plan != nil { - tfCtx, err = op.Plan.Context(&opts) - } else { - tfCtx, err = terraform.NewContext(&opts) - } - - // any errors resolving plugins returns this - if rpe, ok := err.(*terraform.ResourceProviderError); ok { - b.pluginInitRequired(rpe) - // we wrote the full UI error here, so return a generic error for flow - // control in the command. - return nil, nil, errors.New("error satisfying plugin requirements") - } - - if err != nil { - return nil, nil, err - } - - // If we have an operation, then we automatically do the input/validate - // here since every option requires this. - if op.Type != backend.OperationTypeInvalid { - // If input asking is enabled, then do that - if op.Plan == nil && b.OpInput { - mode := terraform.InputModeProvider - mode |= terraform.InputModeVar - mode |= terraform.InputModeVarUnset - - if err := tfCtx.Input(mode); err != nil { - return nil, nil, errwrap.Wrapf("Error asking for user input: {{err}}", err) - } - } - - // If validation is enabled, validate - if b.OpValidation { - diags := tfCtx.Validate() - if len(diags) > 0 { - if diags.HasErrors() { - // If there are warnings _and_ errors then we'll take this - // path and return them all together in this error. - return nil, nil, diags.Err() - } - - // For now we can't propagate warnings any further without - // printing them directly to the UI, so we'll need to - // format them here ourselves. - for _, diag := range diags { - if diag.Severity() != tfdiags.Warning { - continue - } - if b.CLI != nil { - b.CLI.Warn(format.Diagnostic(diag, b.Colorize(), 72)) - } else { - desc := diag.Description() - log.Printf("[WARN] backend/local: %s", desc.Summary) - } - } - - // Make a newline before continuing - b.CLI.Output("") - } - } - } - - return tfCtx, s, nil -} - -const validateWarnHeader = ` -There are warnings related to your configuration. If no errors occurred, -Terraform will continue despite these warnings. It is a good idea to resolve -these warnings in the near future. - -Warnings: -` diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_plan.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_plan.go deleted file mode 100644 index 380ce17421a..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/local/backend_plan.go +++ /dev/null @@ -1,264 +0,0 @@ -package local - -import ( - "bytes" - "context" - "fmt" - "log" - "os" - "strings" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/command/clistate" - "github.com/hashicorp/terraform/command/format" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" -) - -func (b *Local) opPlan( - ctx context.Context, - op *backend.Operation, - runningOp *backend.RunningOperation) { - log.Printf("[INFO] backend/local: starting Plan operation") - - if b.CLI != nil && op.Plan != nil { - b.CLI.Output(b.Colorize().Color( - "[reset][bold][yellow]" + - "The plan command received a saved plan file as input. This command\n" + - "will output the saved plan. This will not modify the already-existing\n" + - "plan. If you wish to generate a new plan, please pass in a configuration\n" + - "directory as an argument.\n\n")) - } - - // A local plan requires either a plan or a module - if op.Plan == nil && op.Module == nil && !op.Destroy { - runningOp.Err = fmt.Errorf(strings.TrimSpace(planErrNoConfig)) - return - } - - // If we have a nil module at this point, then set it to an empty tree - // to avoid any potential crashes. - if op.Module == nil { - op.Module = module.NewEmptyTree() - } - - // Setup our count hook that keeps track of resource changes - countHook := new(CountHook) - if b.ContextOpts == nil { - b.ContextOpts = new(terraform.ContextOpts) - } - old := b.ContextOpts.Hooks - defer func() { b.ContextOpts.Hooks = old }() - b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook) - - // Get our context - tfCtx, opState, err := b.context(op) - if err != nil { - runningOp.Err = err - return - } - - if op.LockState { - lockCtx, cancel := context.WithTimeout(ctx, op.StateLockTimeout) - defer cancel() - - lockInfo := state.NewLockInfo() - lockInfo.Operation = op.Type.String() - lockID, err := clistate.Lock(lockCtx, opState, lockInfo, b.CLI, b.Colorize()) - if err != nil { - runningOp.Err = errwrap.Wrapf("Error locking state: {{err}}", err) - return - } - - defer func() { - if err := clistate.Unlock(opState, lockID, b.CLI, b.Colorize()); err != nil { - runningOp.Err = multierror.Append(runningOp.Err, err) - } - }() - } - - // Setup the state - runningOp.State = tfCtx.State() - - // If we're refreshing before plan, perform that - if op.PlanRefresh { - log.Printf("[INFO] backend/local: plan calling Refresh") - - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color(strings.TrimSpace(planRefreshing) + "\n")) - } - - _, err := tfCtx.Refresh() - if err != nil { - runningOp.Err = errwrap.Wrapf("Error refreshing state: {{err}}", err) - return - } - if b.CLI != nil { - b.CLI.Output("\n------------------------------------------------------------------------") - } - } - - // Perform the plan in a goroutine so we can be interrupted - var plan *terraform.Plan - var planErr error - doneCh := make(chan struct{}) - go func() { - defer close(doneCh) - log.Printf("[INFO] backend/local: plan calling Plan") - plan, planErr = tfCtx.Plan() - }() - - select { - case <-ctx.Done(): - if b.CLI != nil { - b.CLI.Output("stopping plan operation...") - } - - // Stop execution - go tfCtx.Stop() - - // Wait for completion still - <-doneCh - case <-doneCh: - } - - if planErr != nil { - runningOp.Err = errwrap.Wrapf("Error running plan: {{err}}", planErr) - return - } - // Record state - runningOp.PlanEmpty = plan.Diff.Empty() - - // Save the plan to disk - if path := op.PlanOutPath; path != "" { - // Write the backend if we have one - plan.Backend = op.PlanOutBackend - - // This works around a bug (#12871) which is no longer possible to - // trigger but will exist for already corrupted upgrades. - if plan.Backend != nil && plan.State != nil { - plan.State.Remote = nil - } - - log.Printf("[INFO] backend/local: writing plan output to: %s", path) - f, err := os.Create(path) - if err == nil { - err = terraform.WritePlan(plan, f) - } - f.Close() - if err != nil { - runningOp.Err = fmt.Errorf("Error writing plan file: %s", err) - return - } - } - - // Perform some output tasks if we have a CLI to output to. - if b.CLI != nil { - dispPlan := format.NewPlan(plan) - if dispPlan.Empty() { - b.CLI.Output("\n" + b.Colorize().Color(strings.TrimSpace(planNoChanges))) - return - } - - b.renderPlan(dispPlan) - - // Give the user some next-steps, unless we're running in an automation - // tool which is presumed to provide its own UI for further actions. - if !b.RunningInAutomation { - - b.CLI.Output("\n------------------------------------------------------------------------") - - if path := op.PlanOutPath; path == "" { - b.CLI.Output(fmt.Sprintf( - "\n" + strings.TrimSpace(planHeaderNoOutput) + "\n", - )) - } else { - b.CLI.Output(fmt.Sprintf( - "\n"+strings.TrimSpace(planHeaderYesOutput)+"\n", - path, path, - )) - } - - } - } -} - -func (b *Local) renderPlan(dispPlan *format.Plan) { - - headerBuf := &bytes.Buffer{} - fmt.Fprintf(headerBuf, "\n%s\n", strings.TrimSpace(planHeaderIntro)) - counts := dispPlan.ActionCounts() - if counts[terraform.DiffCreate] > 0 { - fmt.Fprintf(headerBuf, "%s create\n", format.DiffActionSymbol(terraform.DiffCreate)) - } - if counts[terraform.DiffUpdate] > 0 { - fmt.Fprintf(headerBuf, "%s update in-place\n", format.DiffActionSymbol(terraform.DiffUpdate)) - } - if counts[terraform.DiffDestroy] > 0 { - fmt.Fprintf(headerBuf, "%s destroy\n", format.DiffActionSymbol(terraform.DiffDestroy)) - } - if counts[terraform.DiffDestroyCreate] > 0 { - fmt.Fprintf(headerBuf, "%s destroy and then create replacement\n", format.DiffActionSymbol(terraform.DiffDestroyCreate)) - } - if counts[terraform.DiffRefresh] > 0 { - fmt.Fprintf(headerBuf, "%s read (data resources)\n", format.DiffActionSymbol(terraform.DiffRefresh)) - } - - b.CLI.Output(b.Colorize().Color(headerBuf.String())) - - b.CLI.Output("Terraform will perform the following actions:\n") - - b.CLI.Output(dispPlan.Format(b.Colorize())) - - stats := dispPlan.Stats() - b.CLI.Output(b.Colorize().Color(fmt.Sprintf( - "[reset][bold]Plan:[reset] "+ - "%d to add, %d to change, %d to destroy.", - stats.ToAdd, stats.ToChange, stats.ToDestroy, - ))) -} - -const planErrNoConfig = ` -No configuration files found! - -Plan requires configuration to be present. Planning without a configuration -would mark everything for destruction, which is normally not what is desired. -If you would like to destroy everything, please run plan with the "-destroy" -flag or create a single empty configuration file. Otherwise, please create -a Terraform configuration file in the path being executed and try again. -` - -const planHeaderIntro = ` -An execution plan has been generated and is shown below. -Resource actions are indicated with the following symbols: -` - -const planHeaderNoOutput = ` -Note: You didn't specify an "-out" parameter to save this plan, so Terraform -can't guarantee that exactly these actions will be performed if -"terraform apply" is subsequently run. -` - -const planHeaderYesOutput = ` -This plan was saved to: %s - -To perform exactly these actions, run the following command to apply: - terraform apply %q -` - -const planNoChanges = ` -[reset][bold][green]No changes. Infrastructure is up-to-date.[reset][green] - -This means that Terraform did not detect any differences between your -configuration and real physical resources that exist. As a result, no -actions need to be performed. -` - -const planRefreshing = ` -[reset][bold]Refreshing Terraform state in-memory prior to plan...[reset] -The refreshed state will be used to calculate this plan, but will not be -persisted to local or remote state storage. -` diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_refresh.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_refresh.go deleted file mode 100644 index 0cf50b759ee..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/local/backend_refresh.go +++ /dev/null @@ -1,131 +0,0 @@ -package local - -import ( - "context" - "fmt" - "log" - "os" - "strings" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/command/clistate" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" -) - -func (b *Local) opRefresh( - ctx context.Context, - op *backend.Operation, - runningOp *backend.RunningOperation) { - // Check if our state exists if we're performing a refresh operation. We - // only do this if we're managing state with this backend. - if b.Backend == nil { - if _, err := os.Stat(b.StatePath); err != nil { - if os.IsNotExist(err) { - err = nil - } - - if err != nil { - runningOp.Err = fmt.Errorf( - "There was an error reading the Terraform state that is needed\n"+ - "for refreshing. The path and error are shown below.\n\n"+ - "Path: %s\n\nError: %s", - b.StatePath, err) - return - } - } - } - - // If we have no config module given to use, create an empty tree to - // avoid crashes when Terraform.Context is initialized. - if op.Module == nil { - op.Module = module.NewEmptyTree() - } - - // Get our context - tfCtx, opState, err := b.context(op) - if err != nil { - runningOp.Err = err - return - } - - if op.LockState { - lockCtx, cancel := context.WithTimeout(ctx, op.StateLockTimeout) - defer cancel() - - lockInfo := state.NewLockInfo() - lockInfo.Operation = op.Type.String() - lockID, err := clistate.Lock(lockCtx, opState, lockInfo, b.CLI, b.Colorize()) - if err != nil { - runningOp.Err = errwrap.Wrapf("Error locking state: {{err}}", err) - return - } - - defer func() { - if err := clistate.Unlock(opState, lockID, b.CLI, b.Colorize()); err != nil { - runningOp.Err = multierror.Append(runningOp.Err, err) - } - }() - } - - // Set our state - runningOp.State = opState.State() - if runningOp.State.Empty() || !runningOp.State.HasResources() { - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color( - strings.TrimSpace(refreshNoState) + "\n")) - } - } - - // Perform the refresh in a goroutine so we can be interrupted - var newState *terraform.State - var refreshErr error - doneCh := make(chan struct{}) - go func() { - defer close(doneCh) - newState, err = tfCtx.Refresh() - log.Printf("[INFO] backend/local: plan calling Plan") - }() - - select { - case <-ctx.Done(): - if b.CLI != nil { - b.CLI.Output("stopping refresh operation...") - } - - // Stop execution - go tfCtx.Stop() - - // Wait for completion still - <-doneCh - case <-doneCh: - } - - // write the resulting state to the running op - runningOp.State = newState - if refreshErr != nil { - runningOp.Err = errwrap.Wrapf("Error refreshing state: {{err}}", refreshErr) - return - } - - // Write and persist the state - if err := opState.WriteState(newState); err != nil { - runningOp.Err = errwrap.Wrapf("Error writing state: {{err}}", err) - return - } - if err := opState.PersistState(); err != nil { - runningOp.Err = errwrap.Wrapf("Error saving state: {{err}}", err) - return - } -} - -const refreshNoState = ` -[reset][bold][yellow]Empty or non-existent state file.[reset][yellow] - -Refresh will do nothing. Refresh does not error or return an erroneous -exit status because many automation scripts use refresh, plan, then apply -and may not have a state file yet for the first run. -` diff --git a/vendor/github.com/hashicorp/terraform/backend/local/cli.go b/vendor/github.com/hashicorp/terraform/backend/local/cli.go deleted file mode 100644 index f9edfd44968..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/local/cli.go +++ /dev/null @@ -1,24 +0,0 @@ -package local - -import ( - "github.com/hashicorp/terraform/backend" -) - -// backend.CLI impl. -func (b *Local) CLIInit(opts *backend.CLIOpts) error { - b.CLI = opts.CLI - b.CLIColor = opts.CLIColor - b.ContextOpts = opts.ContextOpts - b.OpInput = opts.Input - b.OpValidation = opts.Validation - b.RunningInAutomation = opts.RunningInAutomation - - // Only configure state paths if we didn't do so via the configure func. - if b.StatePath == "" { - b.StatePath = opts.StatePath - b.StateOutPath = opts.StateOutPath - b.StateBackupPath = opts.StateBackupPath - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/counthookaction_string.go b/vendor/github.com/hashicorp/terraform/backend/local/counthookaction_string.go deleted file mode 100644 index 507bab917a4..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/local/counthookaction_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=countHookAction hook_count_action.go"; DO NOT EDIT. - -package local - -import "strconv" - -const _countHookAction_name = "countHookActionAddcountHookActionChangecountHookActionRemove" - -var _countHookAction_index = [...]uint8{0, 18, 39, 60} - -func (i countHookAction) String() string { - if i >= countHookAction(len(_countHookAction_index)-1) { - return "countHookAction(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _countHookAction_name[_countHookAction_index[i]:_countHookAction_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/hook_count.go b/vendor/github.com/hashicorp/terraform/backend/local/hook_count.go deleted file mode 100644 index 4708159dc0c..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/local/hook_count.go +++ /dev/null @@ -1,115 +0,0 @@ -package local - -import ( - "strings" - "sync" - - "github.com/hashicorp/terraform/terraform" -) - -// CountHook is a hook that counts the number of resources -// added, removed, changed during the course of an apply. -type CountHook struct { - Added int - Changed int - Removed int - - ToAdd int - ToChange int - ToRemove int - ToRemoveAndAdd int - - pending map[string]countHookAction - - sync.Mutex - terraform.NilHook -} - -func (h *CountHook) Reset() { - h.Lock() - defer h.Unlock() - - h.pending = nil - h.Added = 0 - h.Changed = 0 - h.Removed = 0 -} - -func (h *CountHook) PreApply( - n *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (terraform.HookAction, error) { - h.Lock() - defer h.Unlock() - - if d.Empty() { - return terraform.HookActionContinue, nil - } - - if h.pending == nil { - h.pending = make(map[string]countHookAction) - } - - action := countHookActionChange - if d.GetDestroy() { - action = countHookActionRemove - } else if s.ID == "" { - action = countHookActionAdd - } - - h.pending[n.HumanId()] = action - - return terraform.HookActionContinue, nil -} - -func (h *CountHook) PostApply( - n *terraform.InstanceInfo, - s *terraform.InstanceState, - e error) (terraform.HookAction, error) { - h.Lock() - defer h.Unlock() - - if h.pending != nil { - if a, ok := h.pending[n.HumanId()]; ok { - delete(h.pending, n.HumanId()) - - if e == nil { - switch a { - case countHookActionAdd: - h.Added += 1 - case countHookActionChange: - h.Changed += 1 - case countHookActionRemove: - h.Removed += 1 - } - } - } - } - - return terraform.HookActionContinue, nil -} - -func (h *CountHook) PostDiff( - n *terraform.InstanceInfo, d *terraform.InstanceDiff) ( - terraform.HookAction, error) { - h.Lock() - defer h.Unlock() - - // We don't count anything for data sources - if strings.HasPrefix(n.Id, "data.") { - return terraform.HookActionContinue, nil - } - - switch d.ChangeType() { - case terraform.DiffDestroyCreate: - h.ToRemoveAndAdd += 1 - case terraform.DiffCreate: - h.ToAdd += 1 - case terraform.DiffDestroy: - h.ToRemove += 1 - case terraform.DiffUpdate: - h.ToChange += 1 - } - - return terraform.HookActionContinue, nil -} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/hook_count_action.go b/vendor/github.com/hashicorp/terraform/backend/local/hook_count_action.go deleted file mode 100644 index 9a28464c2fd..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/local/hook_count_action.go +++ /dev/null @@ -1,11 +0,0 @@ -package local - -//go:generate stringer -type=countHookAction hook_count_action.go - -type countHookAction byte - -const ( - countHookActionAdd countHookAction = iota - countHookActionChange - countHookActionRemove -) diff --git a/vendor/github.com/hashicorp/terraform/backend/local/hook_state.go b/vendor/github.com/hashicorp/terraform/backend/local/hook_state.go deleted file mode 100644 index 5483c4344ff..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/local/hook_state.go +++ /dev/null @@ -1,33 +0,0 @@ -package local - -import ( - "sync" - - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" -) - -// StateHook is a hook that continuously updates the state by calling -// WriteState on a state.State. -type StateHook struct { - terraform.NilHook - sync.Mutex - - State state.State -} - -func (h *StateHook) PostStateUpdate( - s *terraform.State) (terraform.HookAction, error) { - h.Lock() - defer h.Unlock() - - if h.State != nil { - // Write the new state - if err := h.State.WriteState(s); err != nil { - return terraform.HookActionHalt, err - } - } - - // Continue forth - return terraform.HookActionContinue, nil -} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/testing.go b/vendor/github.com/hashicorp/terraform/backend/local/testing.go deleted file mode 100644 index beb88508e11..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/local/testing.go +++ /dev/null @@ -1,100 +0,0 @@ -package local - -import ( - "io/ioutil" - "path/filepath" - "testing" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" -) - -// TestLocal returns a configured Local struct with temporary paths and -// in-memory ContextOpts. -// -// No operations will be called on the returned value, so you can still set -// public fields without any locks. -func TestLocal(t *testing.T) *Local { - tempDir := testTempDir(t) - return &Local{ - StatePath: filepath.Join(tempDir, "state.tfstate"), - StateOutPath: filepath.Join(tempDir, "state.tfstate"), - StateBackupPath: filepath.Join(tempDir, "state.tfstate.bak"), - StateWorkspaceDir: filepath.Join(tempDir, "state.tfstate.d"), - ContextOpts: &terraform.ContextOpts{}, - } -} - -// TestLocalProvider modifies the ContextOpts of the *Local parameter to -// have a provider with the given name. -func TestLocalProvider(t *testing.T, b *Local, name string) *terraform.MockResourceProvider { - // Build a mock resource provider for in-memory operations - p := new(terraform.MockResourceProvider) - p.DiffReturn = &terraform.InstanceDiff{} - p.RefreshFn = func( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - return s, nil - } - p.ResourcesReturn = []terraform.ResourceType{ - terraform.ResourceType{ - Name: "test_instance", - }, - } - - // Initialize the opts - if b.ContextOpts == nil { - b.ContextOpts = &terraform.ContextOpts{} - } - - // Setup our provider - b.ContextOpts.ProviderResolver = terraform.ResourceProviderResolverFixed( - map[string]terraform.ResourceProviderFactory{ - name: terraform.ResourceProviderFactoryFixed(p), - }, - ) - - return p -} - -// TestNewLocalSingle is a factory for creating a TestLocalSingleState. -// This function matches the signature required for backend/init. -func TestNewLocalSingle() backend.Backend { - return &TestLocalSingleState{} -} - -// TestLocalSingleState is a backend implementation that wraps Local -// and modifies it to only support single states (returns -// ErrNamedStatesNotSupported for multi-state operations). -// -// This isn't an actual use case, this is exported just to provide a -// easy way to test that behavior. -type TestLocalSingleState struct { - Local -} - -func (b *TestLocalSingleState) State(name string) (state.State, error) { - if name != backend.DefaultStateName { - return nil, backend.ErrNamedStatesNotSupported - } - - return b.Local.State(name) -} - -func (b *TestLocalSingleState) States() ([]string, error) { - return nil, backend.ErrNamedStatesNotSupported -} - -func (b *TestLocalSingleState) DeleteState(string) error { - return backend.ErrNamedStatesNotSupported -} - -func testTempDir(t *testing.T) string { - d, err := ioutil.TempDir("", "tf") - if err != nil { - t.Fatalf("err: %s", err) - } - - return d -} diff --git a/vendor/github.com/hashicorp/terraform/backend/nil.go b/vendor/github.com/hashicorp/terraform/backend/nil.go deleted file mode 100644 index e2f11e91d15..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/nil.go +++ /dev/null @@ -1,39 +0,0 @@ -package backend - -import ( - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" -) - -// Nil is a no-op implementation of Backend. -// -// This is useful to embed within another struct to implement all of the -// backend interface for testing. -type Nil struct{} - -func (Nil) Input( - ui terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - return c, nil -} - -func (Nil) Validate(*terraform.ResourceConfig) ([]string, []error) { - return nil, nil -} - -func (Nil) Configure(*terraform.ResourceConfig) error { - return nil -} - -func (Nil) State(string) (state.State, error) { - // We have to return a non-nil state to adhere to the interface - return &state.InmemState{}, nil -} - -func (Nil) DeleteState(string) error { - return nil -} - -func (Nil) States() ([]string, error) { - return []string{DefaultStateName}, nil -} diff --git a/vendor/github.com/hashicorp/terraform/backend/operation_type.go b/vendor/github.com/hashicorp/terraform/backend/operation_type.go deleted file mode 100644 index 1739dc7fc4f..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/operation_type.go +++ /dev/null @@ -1,14 +0,0 @@ -package backend - -//go:generate stringer -type=OperationType operation_type.go - -// OperationType is an enum used with Operation to specify the operation -// type to perform for Terraform. -type OperationType uint - -const ( - OperationTypeInvalid OperationType = iota - OperationTypeRefresh - OperationTypePlan - OperationTypeApply -) diff --git a/vendor/github.com/hashicorp/terraform/backend/operationtype_string.go b/vendor/github.com/hashicorp/terraform/backend/operationtype_string.go deleted file mode 100644 index 16b7b381941..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/operationtype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=OperationType operation_type.go"; DO NOT EDIT. - -package backend - -import "strconv" - -const _OperationType_name = "OperationTypeInvalidOperationTypeRefreshOperationTypePlanOperationTypeApply" - -var _OperationType_index = [...]uint8{0, 20, 40, 57, 75} - -func (i OperationType) String() string { - if i >= OperationType(len(_OperationType_index)-1) { - return "OperationType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _OperationType_name[_OperationType_index[i]:_OperationType_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend.go deleted file mode 100644 index 38e6de5daa4..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend.go +++ /dev/null @@ -1,225 +0,0 @@ -package azure - -import ( - "context" - "fmt" - - armStorage "github.com/Azure/azure-sdk-for-go/arm/storage" - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" -) - -// New creates a new backend for S3 remote state. -func New() backend.Backend { - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "storage_account_name": { - Type: schema.TypeString, - Required: true, - Description: "The name of the storage account.", - }, - - "container_name": { - Type: schema.TypeString, - Required: true, - Description: "The container name.", - }, - - "key": { - Type: schema.TypeString, - Required: true, - Description: "The blob key.", - }, - - "environment": { - Type: schema.TypeString, - Optional: true, - Description: "The Azure cloud environment.", - DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", ""), - }, - - "access_key": { - Type: schema.TypeString, - Optional: true, - Description: "The access key.", - DefaultFunc: schema.EnvDefaultFunc("ARM_ACCESS_KEY", ""), - }, - - "resource_group_name": { - Type: schema.TypeString, - Optional: true, - Description: "The resource group name.", - }, - - "arm_subscription_id": { - Type: schema.TypeString, - Optional: true, - Description: "The Subscription ID.", - DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""), - }, - - "arm_client_id": { - Type: schema.TypeString, - Optional: true, - Description: "The Client ID.", - DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""), - }, - - "arm_client_secret": { - Type: schema.TypeString, - Optional: true, - Description: "The Client Secret.", - DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""), - }, - - "arm_tenant_id": { - Type: schema.TypeString, - Optional: true, - Description: "The Tenant ID.", - DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""), - }, - }, - } - - result := &Backend{Backend: s} - result.Backend.ConfigureFunc = result.configure - return result -} - -type Backend struct { - *schema.Backend - - // The fields below are set from configure - blobClient storage.BlobStorageClient - - containerName string - keyName string - leaseID string -} - -type BackendConfig struct { - AccessKey string - Environment string - ClientID string - ClientSecret string - ResourceGroupName string - StorageAccountName string - SubscriptionID string - TenantID string -} - -func (b *Backend) configure(ctx context.Context) error { - if b.containerName != "" { - return nil - } - - // Grab the resource data - data := schema.FromContextBackendConfig(ctx) - - b.containerName = data.Get("container_name").(string) - b.keyName = data.Get("key").(string) - - config := BackendConfig{ - AccessKey: data.Get("access_key").(string), - ClientID: data.Get("arm_client_id").(string), - ClientSecret: data.Get("arm_client_secret").(string), - Environment: data.Get("environment").(string), - ResourceGroupName: data.Get("resource_group_name").(string), - StorageAccountName: data.Get("storage_account_name").(string), - SubscriptionID: data.Get("arm_subscription_id").(string), - TenantID: data.Get("arm_tenant_id").(string), - } - - blobClient, err := getBlobClient(config) - if err != nil { - return err - } - b.blobClient = blobClient - - return nil -} - -func getBlobClient(config BackendConfig) (storage.BlobStorageClient, error) { - var client storage.BlobStorageClient - - env, err := getAzureEnvironment(config.Environment) - if err != nil { - return client, err - } - - accessKey, err := getAccessKey(config, env) - if err != nil { - return client, err - } - - storageClient, err := storage.NewClient(config.StorageAccountName, accessKey, env.StorageEndpointSuffix, - storage.DefaultAPIVersion, true) - if err != nil { - return client, fmt.Errorf("Error creating storage client for storage account %q: %s", config.StorageAccountName, err) - } - - client = storageClient.GetBlobService() - return client, nil -} - -func getAccessKey(config BackendConfig, env azure.Environment) (string, error) { - if config.AccessKey != "" { - return config.AccessKey, nil - } - - rgOk := config.ResourceGroupName != "" - subOk := config.SubscriptionID != "" - clientIDOk := config.ClientID != "" - clientSecretOK := config.ClientSecret != "" - tenantIDOk := config.TenantID != "" - if !rgOk || !subOk || !clientIDOk || !clientSecretOK || !tenantIDOk { - return "", fmt.Errorf("resource_group_name and credentials must be provided when access_key is absent") - } - - oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID) - if err != nil { - return "", err - } - - spt, err := adal.NewServicePrincipalToken(*oauthConfig, config.ClientID, config.ClientSecret, env.ResourceManagerEndpoint) - if err != nil { - return "", err - } - - accountsClient := armStorage.NewAccountsClientWithBaseURI(env.ResourceManagerEndpoint, config.SubscriptionID) - accountsClient.Authorizer = autorest.NewBearerAuthorizer(spt) - - keys, err := accountsClient.ListKeys(config.ResourceGroupName, config.StorageAccountName) - if err != nil { - return "", fmt.Errorf("Error retrieving keys for storage account %q: %s", config.StorageAccountName, err) - } - - if keys.Keys == nil { - return "", fmt.Errorf("Nil key returned for storage account %q", config.StorageAccountName) - } - - accessKeys := *keys.Keys - return *accessKeys[0].Value, nil -} - -func getAzureEnvironment(environment string) (azure.Environment, error) { - if environment == "" { - return azure.PublicCloud, nil - } - - env, err := azure.EnvironmentFromName(environment) - if err != nil { - // try again with wrapped value to support readable values like german instead of AZUREGERMANCLOUD - var innerErr error - env, innerErr = azure.EnvironmentFromName(fmt.Sprintf("AZURE%sCLOUD", environment)) - if innerErr != nil { - return env, fmt.Errorf("invalid 'environment' configuration: %s", err) - } - } - - return env, nil -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend_state.go deleted file mode 100644 index c7bc02755f0..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend_state.go +++ /dev/null @@ -1,141 +0,0 @@ -package azure - -import ( - "fmt" - "sort" - "strings" - - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" - "github.com/hashicorp/terraform/terraform" -) - -const ( - // This will be used as directory name, the odd looking colon is simply to - // reduce the chance of name conflicts with existing objects. - keyEnvPrefix = "env:" -) - -func (b *Backend) States() ([]string, error) { - prefix := b.keyName + keyEnvPrefix - params := storage.ListBlobsParameters{ - Prefix: prefix, - } - - container := b.blobClient.GetContainerReference(b.containerName) - resp, err := container.ListBlobs(params) - if err != nil { - return nil, err - } - - envs := map[string]struct{}{} - for _, obj := range resp.Blobs { - key := obj.Name - if strings.HasPrefix(key, prefix) { - name := strings.TrimPrefix(key, prefix) - // we store the state in a key, not a directory - if strings.Contains(name, "/") { - continue - } - - envs[name] = struct{}{} - } - } - - result := []string{backend.DefaultStateName} - for name := range envs { - result = append(result, name) - } - sort.Strings(result[1:]) - return result, nil -} - -func (b *Backend) DeleteState(name string) error { - if name == backend.DefaultStateName || name == "" { - return fmt.Errorf("can't delete default state") - } - - containerReference := b.blobClient.GetContainerReference(b.containerName) - blobReference := containerReference.GetBlobReference(b.path(name)) - options := &storage.DeleteBlobOptions{} - - return blobReference.Delete(options) -} - -func (b *Backend) State(name string) (state.State, error) { - client := &RemoteClient{ - blobClient: b.blobClient, - containerName: b.containerName, - keyName: b.path(name), - } - - stateMgr := &remote.State{Client: client} - - //if this isn't the default state name, we need to create the object so - //it's listed by States. - if name != backend.DefaultStateName { - // take a lock on this state while we write it - lockInfo := state.NewLockInfo() - lockInfo.Operation = "init" - lockId, err := client.Lock(lockInfo) - if err != nil { - return nil, fmt.Errorf("failed to lock azure state: %s", err) - } - - // Local helper function so we can call it multiple places - lockUnlock := func(parent error) error { - if err := stateMgr.Unlock(lockId); err != nil { - return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) - } - return parent - } - - // Grab the value - if err := stateMgr.RefreshState(); err != nil { - err = lockUnlock(err) - return nil, err - } - - // If we have no state, we have to create an empty state - if v := stateMgr.State(); v == nil { - if err := stateMgr.WriteState(terraform.NewState()); err != nil { - err = lockUnlock(err) - return nil, err - } - if err := stateMgr.PersistState(); err != nil { - err = lockUnlock(err) - return nil, err - } - } - - // Unlock, the state should now be initialized - if err := lockUnlock(nil); err != nil { - return nil, err - } - - } - - return stateMgr, nil -} - -func (b *Backend) client() *RemoteClient { - return &RemoteClient{} -} - -func (b *Backend) path(name string) string { - if name == backend.DefaultStateName { - return b.keyName - } - - return b.keyName + keyEnvPrefix + name -} - -const errStateUnlock = ` -Error unlocking Azure state. Lock ID: %s - -Error: %s - -You may have to force-unlock this state in order to use it again. -` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/client.go deleted file mode 100644 index 52999579b81..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/client.go +++ /dev/null @@ -1,271 +0,0 @@ -package azure - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "log" - - "encoding/base64" - "github.com/Azure/azure-sdk-for-go/storage" - multierror "github.com/hashicorp/go-multierror" - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" - "github.com/hashicorp/terraform/terraform" -) - -const ( - leaseHeader = "x-ms-lease-id" - // Must be lower case - lockInfoMetaKey = "terraformlockid" -) - -type RemoteClient struct { - blobClient storage.BlobStorageClient - containerName string - keyName string - leaseID string -} - -func (c *RemoteClient) Get() (*remote.Payload, error) { - containerReference := c.blobClient.GetContainerReference(c.containerName) - blobReference := containerReference.GetBlobReference(c.keyName) - options := &storage.GetBlobOptions{} - - if c.leaseID != "" { - options.LeaseID = c.leaseID - } - - blob, err := blobReference.Get(options) - if err != nil { - if storErr, ok := err.(storage.AzureStorageServiceError); ok { - if storErr.Code == "BlobNotFound" { - return nil, nil - } - } - return nil, err - } - - defer blob.Close() - - buf := bytes.NewBuffer(nil) - if _, err := io.Copy(buf, blob); err != nil { - return nil, fmt.Errorf("Failed to read remote state: %s", err) - } - - payload := &remote.Payload{ - Data: buf.Bytes(), - } - - // If there was no data, then return nil - if len(payload.Data) == 0 { - return nil, nil - } - - return payload, nil -} - -func (c *RemoteClient) Put(data []byte) error { - getOptions := &storage.GetBlobMetadataOptions{} - setOptions := &storage.SetBlobPropertiesOptions{} - putOptions := &storage.PutBlobOptions{} - - containerReference := c.blobClient.GetContainerReference(c.containerName) - blobReference := containerReference.GetBlobReference(c.keyName) - - blobReference.Properties.ContentType = "application/json" - blobReference.Properties.ContentLength = int64(len(data)) - - if c.leaseID != "" { - getOptions.LeaseID = c.leaseID - setOptions.LeaseID = c.leaseID - putOptions.LeaseID = c.leaseID - } - - exists, err := blobReference.Exists() - if err != nil { - return err - } - - if exists { - err = blobReference.GetMetadata(getOptions) - if err != nil { - return err - } - } - - reader := bytes.NewReader(data) - - err = blobReference.CreateBlockBlobFromReader(reader, putOptions) - if err != nil { - return err - } - - return blobReference.SetProperties(setOptions) -} - -func (c *RemoteClient) Delete() error { - containerReference := c.blobClient.GetContainerReference(c.containerName) - blobReference := containerReference.GetBlobReference(c.keyName) - options := &storage.DeleteBlobOptions{} - - if c.leaseID != "" { - options.LeaseID = c.leaseID - } - - return blobReference.Delete(options) -} - -func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { - stateName := fmt.Sprintf("%s/%s", c.containerName, c.keyName) - info.Path = stateName - - if info.ID == "" { - lockID, err := uuid.GenerateUUID() - if err != nil { - return "", err - } - - info.ID = lockID - } - - getLockInfoErr := func(err error) error { - lockInfo, infoErr := c.getLockInfo() - if infoErr != nil { - err = multierror.Append(err, infoErr) - } - - return &state.LockError{ - Err: err, - Info: lockInfo, - } - } - - containerReference := c.blobClient.GetContainerReference(c.containerName) - blobReference := containerReference.GetBlobReference(c.keyName) - leaseID, err := blobReference.AcquireLease(-1, info.ID, &storage.LeaseOptions{}) - if err != nil { - if storErr, ok := err.(storage.AzureStorageServiceError); ok && storErr.Code != "BlobNotFound" { - return "", getLockInfoErr(err) - } - - // failed to lock as there was no state blob, write empty state - stateMgr := &remote.State{Client: c} - - // ensure state is actually empty - if err := stateMgr.RefreshState(); err != nil { - return "", fmt.Errorf("Failed to refresh state before writing empty state for locking: %s", err) - } - - log.Print("[DEBUG] Could not lock as state blob did not exist, creating with empty state") - - if v := stateMgr.State(); v == nil { - if err := stateMgr.WriteState(terraform.NewState()); err != nil { - return "", fmt.Errorf("Failed to write empty state for locking: %s", err) - } - if err := stateMgr.PersistState(); err != nil { - return "", fmt.Errorf("Failed to persist empty state for locking: %s", err) - } - } - - leaseID, err = blobReference.AcquireLease(-1, info.ID, &storage.LeaseOptions{}) - if err != nil { - return "", getLockInfoErr(err) - } - } - - info.ID = leaseID - c.leaseID = leaseID - - if err := c.writeLockInfo(info); err != nil { - return "", err - } - - return info.ID, nil -} - -func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { - containerReference := c.blobClient.GetContainerReference(c.containerName) - blobReference := containerReference.GetBlobReference(c.keyName) - err := blobReference.GetMetadata(&storage.GetBlobMetadataOptions{}) - if err != nil { - return nil, err - } - - raw := blobReference.Metadata[lockInfoMetaKey] - if raw == "" { - return nil, fmt.Errorf("blob metadata %q was empty", lockInfoMetaKey) - } - - data, err := base64.StdEncoding.DecodeString(raw) - if err != nil { - return nil, err - } - - lockInfo := &state.LockInfo{} - err = json.Unmarshal(data, lockInfo) - if err != nil { - return nil, err - } - - return lockInfo, nil -} - -// writes info to blob meta data, deletes metadata entry if info is nil -func (c *RemoteClient) writeLockInfo(info *state.LockInfo) error { - containerReference := c.blobClient.GetContainerReference(c.containerName) - blobReference := containerReference.GetBlobReference(c.keyName) - err := blobReference.GetMetadata(&storage.GetBlobMetadataOptions{ - LeaseID: c.leaseID, - }) - if err != nil { - return err - } - - if info == nil { - delete(blobReference.Metadata, lockInfoMetaKey) - } else { - value := base64.StdEncoding.EncodeToString(info.Marshal()) - blobReference.Metadata[lockInfoMetaKey] = value - } - - opts := &storage.SetBlobMetadataOptions{ - LeaseID: c.leaseID, - } - return blobReference.SetMetadata(opts) -} - -func (c *RemoteClient) Unlock(id string) error { - lockErr := &state.LockError{} - - lockInfo, err := c.getLockInfo() - if err != nil { - lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) - return lockErr - } - lockErr.Info = lockInfo - - if lockInfo.ID != id { - lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) - return lockErr - } - - if err := c.writeLockInfo(nil); err != nil { - lockErr.Err = fmt.Errorf("failed to delete lock info from metadata: %s", err) - return lockErr - } - - containerReference := c.blobClient.GetContainerReference(c.containerName) - blobReference := containerReference.GetBlobReference(c.keyName) - err = blobReference.ReleaseLease(id, &storage.LeaseOptions{}) - if err != nil { - lockErr.Err = err - return lockErr - } - - c.leaseID = "" - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend.go deleted file mode 100644 index 271a60b6341..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend.go +++ /dev/null @@ -1,180 +0,0 @@ -package consul - -import ( - "context" - "net" - "strings" - "time" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" -) - -// New creates a new backend for Consul remote state. -func New() backend.Backend { - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "path": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Path to store state in Consul", - }, - - "access_token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Access token for a Consul ACL", - Default: "", // To prevent input - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Address to the Consul Cluster", - Default: "", // To prevent input - }, - - "scheme": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Scheme to communicate to Consul with", - Default: "", // To prevent input - }, - - "datacenter": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Datacenter to communicate with", - Default: "", // To prevent input - }, - - "http_auth": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "HTTP Auth in the format of 'username:password'", - Default: "", // To prevent input - }, - - "gzip": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Description: "Compress the state data using gzip", - Default: false, - }, - - "lock": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Description: "Lock state access", - Default: true, - }, - - "ca_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "A path to a PEM-encoded certificate authority used to verify the remote agent's certificate.", - DefaultFunc: schema.EnvDefaultFunc("CONSUL_CACERT", ""), - }, - - "cert_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "A path to a PEM-encoded certificate provided to the remote agent; requires use of key_file.", - DefaultFunc: schema.EnvDefaultFunc("CONSUL_CLIENT_CERT", ""), - }, - - "key_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "A path to a PEM-encoded private key, required if cert_file is specified.", - DefaultFunc: schema.EnvDefaultFunc("CONSUL_CLIENT_KEY", ""), - }, - }, - } - - result := &Backend{Backend: s} - result.Backend.ConfigureFunc = result.configure - return result -} - -type Backend struct { - *schema.Backend - - // The fields below are set from configure - client *consulapi.Client - configData *schema.ResourceData - lock bool -} - -func (b *Backend) configure(ctx context.Context) error { - // Grab the resource data - b.configData = schema.FromContextBackendConfig(ctx) - - // Store the lock information - b.lock = b.configData.Get("lock").(bool) - - data := b.configData - - // Configure the client - config := consulapi.DefaultConfig() - - // replace the default Transport Dialer to reduce the KeepAlive - config.Transport.DialContext = dialContext - - if v, ok := data.GetOk("access_token"); ok && v.(string) != "" { - config.Token = v.(string) - } - if v, ok := data.GetOk("address"); ok && v.(string) != "" { - config.Address = v.(string) - } - if v, ok := data.GetOk("scheme"); ok && v.(string) != "" { - config.Scheme = v.(string) - } - if v, ok := data.GetOk("datacenter"); ok && v.(string) != "" { - config.Datacenter = v.(string) - } - - if v, ok := data.GetOk("ca_file"); ok && v.(string) != "" { - config.TLSConfig.CAFile = v.(string) - } - if v, ok := data.GetOk("cert_file"); ok && v.(string) != "" { - config.TLSConfig.CertFile = v.(string) - } - if v, ok := data.GetOk("key_file"); ok && v.(string) != "" { - config.TLSConfig.KeyFile = v.(string) - } - - if v, ok := data.GetOk("http_auth"); ok && v.(string) != "" { - auth := v.(string) - - var username, password string - if strings.Contains(auth, ":") { - split := strings.SplitN(auth, ":", 2) - username = split[0] - password = split[1] - } else { - username = auth - } - - config.HttpAuth = &consulapi.HttpBasicAuth{ - Username: username, - Password: password, - } - } - - client, err := consulapi.NewClient(config) - if err != nil { - return err - } - - b.client = client - return nil -} - -// dialContext is the DialContext function for the consul client transport. -// This is stored in a package var to inject a different dialer for tests. -var dialContext = (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 17 * time.Second, -}).DialContext diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend_state.go deleted file mode 100644 index 95010aa0e6a..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend_state.go +++ /dev/null @@ -1,155 +0,0 @@ -package consul - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" - "github.com/hashicorp/terraform/terraform" -) - -const ( - keyEnvPrefix = "-env:" -) - -func (b *Backend) States() ([]string, error) { - // List our raw path - prefix := b.configData.Get("path").(string) + keyEnvPrefix - keys, _, err := b.client.KV().Keys(prefix, "/", nil) - if err != nil { - return nil, err - } - - // Find the envs, we use a map since we can get duplicates with - // path suffixes. - envs := map[string]struct{}{} - for _, key := range keys { - // Consul should ensure this but it doesn't hurt to check again - if strings.HasPrefix(key, prefix) { - key = strings.TrimPrefix(key, prefix) - - // Ignore anything with a "/" in it since we store the state - // directly in a key not a directory. - if idx := strings.IndexRune(key, '/'); idx >= 0 { - continue - } - - envs[key] = struct{}{} - } - } - - result := make([]string, 1, len(envs)+1) - result[0] = backend.DefaultStateName - for k, _ := range envs { - result = append(result, k) - } - - return result, nil -} - -func (b *Backend) DeleteState(name string) error { - if name == backend.DefaultStateName || name == "" { - return fmt.Errorf("can't delete default state") - } - - // Determine the path of the data - path := b.path(name) - - // Delete it. We just delete it without any locking since - // the DeleteState API is documented as such. - _, err := b.client.KV().Delete(path, nil) - return err -} - -func (b *Backend) State(name string) (state.State, error) { - // Determine the path of the data - path := b.path(name) - - // Determine whether to gzip or not - gzip := b.configData.Get("gzip").(bool) - - // Build the state client - var stateMgr state.State = &remote.State{ - Client: &RemoteClient{ - Client: b.client, - Path: path, - GZip: gzip, - lockState: b.lock, - }, - } - - // If we're not locking, disable it - if !b.lock { - stateMgr = &state.LockDisabled{Inner: stateMgr} - } - - // the default state always exists - if name == backend.DefaultStateName { - return stateMgr, nil - } - - // Grab a lock, we use this to write an empty state if one doesn't - // exist already. We have to write an empty state as a sentinel value - // so States() knows it exists. - lockInfo := state.NewLockInfo() - lockInfo.Operation = "init" - lockId, err := stateMgr.Lock(lockInfo) - if err != nil { - return nil, fmt.Errorf("failed to lock state in Consul: %s", err) - } - - // Local helper function so we can call it multiple places - lockUnlock := func(parent error) error { - if err := stateMgr.Unlock(lockId); err != nil { - return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) - } - - return parent - } - - // Grab the value - if err := stateMgr.RefreshState(); err != nil { - err = lockUnlock(err) - return nil, err - } - - // If we have no state, we have to create an empty state - if v := stateMgr.State(); v == nil { - if err := stateMgr.WriteState(terraform.NewState()); err != nil { - err = lockUnlock(err) - return nil, err - } - if err := stateMgr.PersistState(); err != nil { - err = lockUnlock(err) - return nil, err - } - } - - // Unlock, the state should now be initialized - if err := lockUnlock(nil); err != nil { - return nil, err - } - - return stateMgr, nil -} - -func (b *Backend) path(name string) string { - path := b.configData.Get("path").(string) - if name != backend.DefaultStateName { - path += fmt.Sprintf("%s%s", keyEnvPrefix, name) - } - - return path -} - -const errStateUnlock = ` -Error unlocking Consul state. Lock ID: %s - -Error: %s - -You may have to force-unlock this state in order to use it again. -The Consul backend acquires a lock during initialization to ensure -the minimum required key/values are prepared. -` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/client.go deleted file mode 100644 index bd37712f3fe..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/client.go +++ /dev/null @@ -1,468 +0,0 @@ -package consul - -import ( - "bytes" - "compress/gzip" - "context" - "crypto/md5" - "encoding/json" - "errors" - "fmt" - "log" - "sync" - "time" - - consulapi "github.com/hashicorp/consul/api" - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" -) - -const ( - lockSuffix = "/.lock" - lockInfoSuffix = "/.lockinfo" - - // The Session TTL associated with this lock. - lockSessionTTL = "15s" - - // the delay time from when a session is lost to when the - // lock is released by the server - lockDelay = 5 * time.Second - // interval between attempts to reacquire a lost lock - lockReacquireInterval = 2 * time.Second -) - -var lostLockErr = errors.New("consul lock was lost") - -// RemoteClient is a remote client that stores data in Consul. -type RemoteClient struct { - Client *consulapi.Client - Path string - GZip bool - - mu sync.Mutex - // lockState is true if we're using locks - lockState bool - - // The index of the last state we wrote. - // If this is > 0, Put will perform a CAS to ensure that the state wasn't - // changed during the operation. This is important even with locks, because - // if the client loses the lock for some reason, then reacquires it, we - // need to make sure that the state was not modified. - modifyIndex uint64 - - consulLock *consulapi.Lock - lockCh <-chan struct{} - - info *state.LockInfo - - // cancel our goroutine which is monitoring the lock to automatically - // reacquire it when possible. - monitorCancel context.CancelFunc - monitorWG sync.WaitGroup - - // sessionCancel cancels the Context use for session.RenewPeriodic, and is - // called when unlocking, or before creating a new lock if the lock is - // lost. - sessionCancel context.CancelFunc -} - -func (c *RemoteClient) Get() (*remote.Payload, error) { - c.mu.Lock() - defer c.mu.Unlock() - - pair, _, err := c.Client.KV().Get(c.Path, nil) - if err != nil { - return nil, err - } - if pair == nil { - return nil, nil - } - - c.modifyIndex = pair.ModifyIndex - - payload := pair.Value - // If the payload starts with 0x1f, it's gzip, not json - if len(pair.Value) >= 1 && pair.Value[0] == '\x1f' { - if data, err := uncompressState(pair.Value); err == nil { - payload = data - } else { - return nil, err - } - } - - md5 := md5.Sum(pair.Value) - return &remote.Payload{ - Data: payload, - MD5: md5[:], - }, nil -} - -func (c *RemoteClient) Put(data []byte) error { - c.mu.Lock() - defer c.mu.Unlock() - - payload := data - if c.GZip { - if compressedState, err := compressState(data); err == nil { - payload = compressedState - } else { - return err - } - } - - kv := c.Client.KV() - - // default to doing a CAS - verb := consulapi.KVCAS - - // Assume a 0 index doesn't need a CAS for now, since we are either - // creating a new state or purposely overwriting one. - if c.modifyIndex == 0 { - verb = consulapi.KVSet - } - - // KV.Put doesn't return the new index, so we use a single operation - // transaction to get the new index with a single request. - txOps := consulapi.KVTxnOps{ - &consulapi.KVTxnOp{ - Verb: verb, - Key: c.Path, - Value: payload, - Index: c.modifyIndex, - }, - } - - ok, resp, _, err := kv.Txn(txOps, nil) - if err != nil { - return err - } - - // transaction was rolled back - if !ok { - return fmt.Errorf("consul CAS failed with transaction errors: %v", resp.Errors) - } - - if len(resp.Results) != 1 { - // this probably shouldn't happen - return fmt.Errorf("expected on 1 response value, got: %d", len(resp.Results)) - } - - c.modifyIndex = resp.Results[0].ModifyIndex - return nil -} - -func (c *RemoteClient) Delete() error { - c.mu.Lock() - defer c.mu.Unlock() - - kv := c.Client.KV() - _, err := kv.Delete(c.Path, nil) - return err -} - -func (c *RemoteClient) putLockInfo(info *state.LockInfo) error { - info.Path = c.Path - info.Created = time.Now().UTC() - - kv := c.Client.KV() - _, err := kv.Put(&consulapi.KVPair{ - Key: c.Path + lockInfoSuffix, - Value: info.Marshal(), - }, nil) - - return err -} - -func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { - path := c.Path + lockInfoSuffix - pair, _, err := c.Client.KV().Get(path, nil) - if err != nil { - return nil, err - } - if pair == nil { - return nil, nil - } - - li := &state.LockInfo{} - err = json.Unmarshal(pair.Value, li) - if err != nil { - return nil, fmt.Errorf("error unmarshaling lock info: %s", err) - } - - return li, nil -} - -func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { - c.mu.Lock() - defer c.mu.Unlock() - - if !c.lockState { - return "", nil - } - - c.info = info - - // These checks only are to ensure we strictly follow the specification. - // Terraform shouldn't ever re-lock, so provide errors for the 2 possible - // states if this is called. - select { - case <-c.lockCh: - // We had a lock, but lost it. - return "", errors.New("lost consul lock, cannot re-lock") - default: - if c.lockCh != nil { - // we have an active lock already - return "", fmt.Errorf("state %q already locked", c.Path) - } - } - - return c.lock() -} - -// the lock implementation. -// Only to be called while holding Client.mu -func (c *RemoteClient) lock() (string, error) { - // We create a new session here, so it can be canceled when the lock is - // lost or unlocked. - lockSession, err := c.createSession() - if err != nil { - return "", err - } - - // store the session ID for correlation with consul logs - c.info.Info = "consul session: " + lockSession - - opts := &consulapi.LockOptions{ - Key: c.Path + lockSuffix, - Session: lockSession, - - // only wait briefly, so terraform has the choice to fail fast or - // retry as needed. - LockWaitTime: time.Second, - LockTryOnce: true, - - // Don't let the lock monitor give up right away, as it's possible the - // session is still OK. While the session is refreshed at a rate of - // TTL/2, the lock monitor is an idle blocking request and is more - // susceptible to being closed by a lower network layer. - MonitorRetries: 5, - // - // The delay between lock monitor retries. - // While the session has a 15s TTL plus a 5s wait period on a lost - // lock, if we can't get our lock back in 10+ seconds something is - // wrong so we're going to drop the session and start over. - MonitorRetryTime: 2 * time.Second, - } - - c.consulLock, err = c.Client.LockOpts(opts) - if err != nil { - return "", err - } - - lockErr := &state.LockError{} - - lockCh, err := c.consulLock.Lock(make(chan struct{})) - if err != nil { - lockErr.Err = err - return "", lockErr - } - - if lockCh == nil { - lockInfo, e := c.getLockInfo() - if e != nil { - lockErr.Err = e - return "", lockErr - } - - lockErr.Info = lockInfo - - return "", lockErr - } - - c.lockCh = lockCh - - err = c.putLockInfo(c.info) - if err != nil { - if unlockErr := c.unlock(c.info.ID); unlockErr != nil { - err = multierror.Append(err, unlockErr) - } - - return "", err - } - - // Start a goroutine to monitor the lock state. - // If we lose the lock to due communication issues with the consul agent, - // attempt to immediately reacquire the lock. Put will verify the integrity - // of the state by using a CAS operation. - ctx, cancel := context.WithCancel(context.Background()) - c.monitorCancel = cancel - c.monitorWG.Add(1) - go func() { - defer c.monitorWG.Done() - select { - case <-c.lockCh: - log.Println("[ERROR] lost consul lock") - for { - c.mu.Lock() - // We lost our lock, so we need to cancel the session too. - // The CancelFunc is only replaced while holding Client.mu, so - // this is safe to call here. This will be replaced by the - // lock() call below. - c.sessionCancel() - - c.consulLock = nil - _, err := c.lock() - c.mu.Unlock() - - if err != nil { - // We failed to get the lock, keep trying as long as - // terraform is running. There may be changes in progress, - // so there's no use in aborting. Either we eventually - // reacquire the lock, or a Put will fail on a CAS. - log.Printf("[ERROR] could not reacquire lock: %s", err) - time.Sleep(lockReacquireInterval) - - select { - case <-ctx.Done(): - return - default: - } - continue - } - - // if the error was nil, the new lock started a new copy of - // this goroutine. - return - } - - case <-ctx.Done(): - return - } - }() - - if testLockHook != nil { - testLockHook() - } - - return c.info.ID, nil -} - -// called after a lock is acquired -var testLockHook func() - -func (c *RemoteClient) createSession() (string, error) { - // create the context first. Even if the session creation fails, we assume - // that the CancelFunc is always callable. - ctx, cancel := context.WithCancel(context.Background()) - c.sessionCancel = cancel - - session := c.Client.Session() - se := &consulapi.SessionEntry{ - Name: consulapi.DefaultLockSessionName, - TTL: lockSessionTTL, - LockDelay: lockDelay, - } - - id, _, err := session.Create(se, nil) - if err != nil { - return "", err - } - - log.Println("[INFO] created consul lock session", id) - - // keep the session renewed - go session.RenewPeriodic(lockSessionTTL, id, nil, ctx.Done()) - - return id, nil -} - -func (c *RemoteClient) Unlock(id string) error { - c.mu.Lock() - defer c.mu.Unlock() - - if !c.lockState { - return nil - } - - return c.unlock(id) -} - -// the unlock implementation. -// Only to be called while holding Client.mu -func (c *RemoteClient) unlock(id string) error { - // this doesn't use the lock id, because the lock is tied to the consul client. - if c.consulLock == nil || c.lockCh == nil { - return nil - } - - // cancel our monitoring goroutine - c.monitorCancel() - - defer func() { - c.consulLock = nil - - // The consul session is only used for this single lock, so cancel it - // after we unlock. - // The session is only created and replaced holding Client.mu, so the - // CancelFunc must be non-nil. - c.sessionCancel() - }() - - select { - case <-c.lockCh: - return lostLockErr - default: - } - - kv := c.Client.KV() - - var errs error - - if _, err := kv.Delete(c.Path+lockInfoSuffix, nil); err != nil { - errs = multierror.Append(errs, err) - } - - if err := c.consulLock.Unlock(); err != nil { - errs = multierror.Append(errs, err) - } - - // the monitoring goroutine may be in a select on the lockCh, so we need to - // wait for it to return before changing the value. - c.monitorWG.Wait() - c.lockCh = nil - - // This is only cleanup, and will fail if the lock was immediately taken by - // another client, so we don't report an error to the user here. - c.consulLock.Destroy() - - return errs -} - -func compressState(data []byte) ([]byte, error) { - b := new(bytes.Buffer) - gz := gzip.NewWriter(b) - if _, err := gz.Write(data); err != nil { - return nil, err - } - if err := gz.Flush(); err != nil { - return nil, err - } - if err := gz.Close(); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -func uncompressState(data []byte) ([]byte, error) { - b := new(bytes.Buffer) - gz, err := gzip.NewReader(bytes.NewReader(data)) - if err != nil { - return nil, err - } - b.ReadFrom(gz) - if err := gz.Close(); err != nil { - return nil, err - } - return b.Bytes(), nil -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend.go deleted file mode 100644 index fb3f5e20295..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend.go +++ /dev/null @@ -1,157 +0,0 @@ -package etcd - -import ( - "context" - - etcdv3 "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/pkg/transport" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - endpointsKey = "endpoints" - usernameKey = "username" - usernameEnvVarName = "ETCDV3_USERNAME" - passwordKey = "password" - passwordEnvVarName = "ETCDV3_PASSWORD" - prefixKey = "prefix" - lockKey = "lock" - cacertPathKey = "cacert_path" - certPathKey = "cert_path" - keyPathKey = "key_path" -) - -func New() backend.Backend { - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - endpointsKey: &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - MinItems: 1, - Required: true, - Description: "Endpoints for the etcd cluster.", - }, - - usernameKey: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Username used to connect to the etcd cluster.", - DefaultFunc: schema.EnvDefaultFunc(usernameEnvVarName, ""), - }, - - passwordKey: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Password used to connect to the etcd cluster.", - DefaultFunc: schema.EnvDefaultFunc(passwordEnvVarName, ""), - }, - - prefixKey: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "An optional prefix to be added to keys when to storing state in etcd.", - Default: "", - }, - - lockKey: &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Description: "Whether to lock state access.", - Default: true, - }, - - cacertPathKey: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "The path to a PEM-encoded CA bundle with which to verify certificates of TLS-enabled etcd servers.", - Default: "", - }, - - certPathKey: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "The path to a PEM-encoded certificate to provide to etcd for secure client identification.", - Default: "", - }, - - keyPathKey: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "The path to a PEM-encoded key to provide to etcd for secure client identification.", - Default: "", - }, - }, - } - - result := &Backend{Backend: s} - result.Backend.ConfigureFunc = result.configure - return result -} - -type Backend struct { - *schema.Backend - - // The fields below are set from configure. - client *etcdv3.Client - data *schema.ResourceData - lock bool - prefix string -} - -func (b *Backend) configure(ctx context.Context) error { - var err error - // Grab the resource data. - b.data = schema.FromContextBackendConfig(ctx) - // Store the lock information. - b.lock = b.data.Get(lockKey).(bool) - // Store the prefix information. - b.prefix = b.data.Get(prefixKey).(string) - // Initialize a client to test config. - b.client, err = b.rawClient() - // Return err, if any. - return err -} - -func (b *Backend) rawClient() (*etcdv3.Client, error) { - config := etcdv3.Config{} - tlsInfo := transport.TLSInfo{} - - if v, ok := b.data.GetOk(endpointsKey); ok { - config.Endpoints = retrieveEndpoints(v) - } - if v, ok := b.data.GetOk(usernameKey); ok && v.(string) != "" { - config.Username = v.(string) - } - if v, ok := b.data.GetOk(passwordKey); ok && v.(string) != "" { - config.Password = v.(string) - } - if v, ok := b.data.GetOk(cacertPathKey); ok && v.(string) != "" { - tlsInfo.TrustedCAFile = v.(string) - } - if v, ok := b.data.GetOk(certPathKey); ok && v.(string) != "" { - tlsInfo.CertFile = v.(string) - } - if v, ok := b.data.GetOk(keyPathKey); ok && v.(string) != "" { - tlsInfo.KeyFile = v.(string) - } - - if tlsCfg, err := tlsInfo.ClientConfig(); err != nil { - return nil, err - } else if !tlsInfo.Empty() { - config.TLS = tlsCfg // Assign TLS configuration only if it valid and non-empty. - } - - return etcdv3.New(config) -} - -func retrieveEndpoints(v interface{}) []string { - var endpoints []string - list := v.([]interface{}) - for _, ep := range list { - endpoints = append(endpoints, ep.(string)) - } - return endpoints -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend_state.go deleted file mode 100644 index 4c9f78a9b51..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend_state.go +++ /dev/null @@ -1,103 +0,0 @@ -package etcd - -import ( - "context" - "fmt" - "sort" - "strings" - - etcdv3 "github.com/coreos/etcd/clientv3" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" - "github.com/hashicorp/terraform/terraform" -) - -func (b *Backend) States() ([]string, error) { - res, err := b.client.Get(context.TODO(), b.prefix, etcdv3.WithPrefix(), etcdv3.WithKeysOnly()) - if err != nil { - return nil, err - } - - result := make([]string, 1, len(res.Kvs)+1) - result[0] = backend.DefaultStateName - for _, kv := range res.Kvs { - result = append(result, strings.TrimPrefix(string(kv.Key), b.prefix)) - } - sort.Strings(result[1:]) - - return result, nil -} - -func (b *Backend) DeleteState(name string) error { - if name == backend.DefaultStateName || name == "" { - return fmt.Errorf("Can't delete default state.") - } - - key := b.determineKey(name) - - _, err := b.client.Delete(context.TODO(), key) - return err -} - -func (b *Backend) State(name string) (state.State, error) { - var stateMgr state.State = &remote.State{ - Client: &RemoteClient{ - Client: b.client, - DoLock: b.lock, - Key: b.determineKey(name), - }, - } - - if !b.lock { - stateMgr = &state.LockDisabled{Inner: stateMgr} - } - - lockInfo := state.NewLockInfo() - lockInfo.Operation = "init" - lockId, err := stateMgr.Lock(lockInfo) - if err != nil { - return nil, fmt.Errorf("Failed to lock state in etcd: %s.", err) - } - - lockUnlock := func(parent error) error { - if err := stateMgr.Unlock(lockId); err != nil { - return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) - } - return parent - } - - if err := stateMgr.RefreshState(); err != nil { - err = lockUnlock(err) - return nil, err - } - - if v := stateMgr.State(); v == nil { - if err := stateMgr.WriteState(terraform.NewState()); err != nil { - err = lockUnlock(err) - return nil, err - } - if err := stateMgr.PersistState(); err != nil { - err = lockUnlock(err) - return nil, err - } - } - - if err := lockUnlock(nil); err != nil { - return nil, err - } - - return stateMgr, nil -} - -func (b *Backend) determineKey(name string) string { - return b.prefix + name -} - -const errStateUnlock = ` -Error unlocking etcd state. Lock ID: %s - -Error: %s - -You may have to force-unlock this state in order to use it again. -` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/client.go deleted file mode 100644 index 155e8d8c16c..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/client.go +++ /dev/null @@ -1,211 +0,0 @@ -package etcd - -import ( - "context" - "crypto/md5" - "encoding/json" - "fmt" - "sync" - "time" - - etcdv3 "github.com/coreos/etcd/clientv3" - etcdv3sync "github.com/coreos/etcd/clientv3/concurrency" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" -) - -const ( - lockAcquireTimeout = 2 * time.Second - lockInfoSuffix = ".lockinfo" -) - -// RemoteClient is a remote client that will store data in etcd. -type RemoteClient struct { - Client *etcdv3.Client - DoLock bool - Key string - - etcdMutex *etcdv3sync.Mutex - etcdSession *etcdv3sync.Session - info *state.LockInfo - mu sync.Mutex - modRevision int64 -} - -func (c *RemoteClient) Get() (*remote.Payload, error) { - c.mu.Lock() - defer c.mu.Unlock() - - res, err := c.Client.KV.Get(context.TODO(), c.Key) - if err != nil { - return nil, err - } - if res.Count == 0 { - return nil, nil - } - if res.Count >= 2 { - return nil, fmt.Errorf("Expected a single result but got %d.", res.Count) - } - - c.modRevision = res.Kvs[0].ModRevision - - payload := res.Kvs[0].Value - md5 := md5.Sum(payload) - - return &remote.Payload{ - Data: payload, - MD5: md5[:], - }, nil -} - -func (c *RemoteClient) Put(data []byte) error { - c.mu.Lock() - defer c.mu.Unlock() - - res, err := etcdv3.NewKV(c.Client).Txn(context.TODO()).If( - etcdv3.Compare(etcdv3.ModRevision(c.Key), "=", c.modRevision), - ).Then( - etcdv3.OpPut(c.Key, string(data)), - etcdv3.OpGet(c.Key), - ).Commit() - - if err != nil { - return err - } - if !res.Succeeded { - return fmt.Errorf("The transaction did not succeed.") - } - if len(res.Responses) != 2 { - return fmt.Errorf("Expected two responses but got %d.", len(res.Responses)) - } - - c.modRevision = res.Responses[1].GetResponseRange().Kvs[0].ModRevision - return nil -} - -func (c *RemoteClient) Delete() error { - c.mu.Lock() - defer c.mu.Unlock() - - _, err := c.Client.KV.Delete(context.TODO(), c.Key) - return err -} - -func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { - c.mu.Lock() - defer c.mu.Unlock() - - if !c.DoLock { - return "", nil - } - if c.etcdSession != nil { - return "", fmt.Errorf("state %q already locked", c.Key) - } - - c.info = info - return c.lock() -} - -func (c *RemoteClient) Unlock(id string) error { - c.mu.Lock() - defer c.mu.Unlock() - - if !c.DoLock { - return nil - } - - return c.unlock(id) -} - -func (c *RemoteClient) deleteLockInfo(info *state.LockInfo) error { - res, err := c.Client.KV.Delete(context.TODO(), c.Key+lockInfoSuffix) - if err != nil { - return err - } - if res.Deleted == 0 { - return fmt.Errorf("No keys deleted for %s when deleting lock info.", c.Key+lockInfoSuffix) - } - return nil -} - -func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { - res, err := c.Client.KV.Get(context.TODO(), c.Key+lockInfoSuffix) - if err != nil { - return nil, err - } - if res.Count == 0 { - return nil, nil - } - - li := &state.LockInfo{} - err = json.Unmarshal(res.Kvs[0].Value, li) - if err != nil { - return nil, fmt.Errorf("Error unmarshaling lock info: %s.", err) - } - - return li, nil -} - -func (c *RemoteClient) putLockInfo(info *state.LockInfo) error { - c.info.Path = c.etcdMutex.Key() - c.info.Created = time.Now().UTC() - - _, err := c.Client.KV.Put(context.TODO(), c.Key+lockInfoSuffix, string(c.info.Marshal())) - return err -} - -func (c *RemoteClient) lock() (string, error) { - session, err := etcdv3sync.NewSession(c.Client) - if err != nil { - return "", nil - } - - ctx, cancel := context.WithTimeout(context.TODO(), lockAcquireTimeout) - defer cancel() - - mutex := etcdv3sync.NewMutex(session, c.Key) - if err1 := mutex.Lock(ctx); err1 != nil { - lockInfo, err2 := c.getLockInfo() - if err2 != nil { - return "", &state.LockError{Err: err2} - } - return "", &state.LockError{Info: lockInfo, Err: err1} - } - - c.etcdMutex = mutex - c.etcdSession = session - - err = c.putLockInfo(c.info) - if err != nil { - if unlockErr := c.unlock(c.info.ID); unlockErr != nil { - err = multierror.Append(err, unlockErr) - } - return "", err - } - - return c.info.ID, nil -} - -func (c *RemoteClient) unlock(id string) error { - if c.etcdMutex == nil { - return nil - } - - var errs error - - if err := c.deleteLockInfo(c.info); err != nil { - errs = multierror.Append(errs, err) - } - if err := c.etcdMutex.Unlock(context.TODO()); err != nil { - errs = multierror.Append(errs, err) - } - if err := c.etcdSession.Close(); err != nil { - errs = multierror.Append(errs, err) - } - - c.etcdMutex = nil - c.etcdSession = nil - - return errs -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend.go deleted file mode 100644 index 12e8d43ed7f..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend.go +++ /dev/null @@ -1,166 +0,0 @@ -// Package gcs implements remote storage of state on Google Cloud Storage (GCS). -package gcs - -import ( - "context" - "encoding/json" - "fmt" - "os" - "strings" - - "cloud.google.com/go/storage" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/pathorcontents" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "golang.org/x/oauth2/jwt" - "google.golang.org/api/option" -) - -// gcsBackend implements "backend".Backend for GCS. -// Input(), Validate() and Configure() are implemented by embedding *schema.Backend. -// State(), DeleteState() and States() are implemented explicitly. -type gcsBackend struct { - *schema.Backend - - storageClient *storage.Client - storageContext context.Context - - bucketName string - prefix string - defaultStateFile string - - projectID string - region string -} - -func New() backend.Backend { - be := &gcsBackend{} - be.Backend = &schema.Backend{ - ConfigureFunc: be.configure, - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - Description: "The name of the Google Cloud Storage bucket", - }, - - "path": { - Type: schema.TypeString, - Optional: true, - Description: "Path of the default state file", - Deprecated: "Use the \"prefix\" option instead", - }, - - "prefix": { - Type: schema.TypeString, - Optional: true, - Description: "The directory where state files will be saved inside the bucket", - }, - - "credentials": { - Type: schema.TypeString, - Optional: true, - Description: "Google Cloud JSON Account Key", - Default: "", - }, - - "project": { - Type: schema.TypeString, - Optional: true, - Description: "Google Cloud Project ID", - Default: "", - }, - - "region": { - Type: schema.TypeString, - Optional: true, - Description: "Region / location in which to create the bucket", - Default: "", - }, - }, - } - - return be -} - -func (b *gcsBackend) configure(ctx context.Context) error { - if b.storageClient != nil { - return nil - } - - // ctx is a background context with the backend config added. - // Since no context is passed to remoteClient.Get(), .Lock(), etc. but - // one is required for calling the GCP API, we're holding on to this - // context here and re-use it later. - b.storageContext = ctx - - data := schema.FromContextBackendConfig(b.storageContext) - - b.bucketName = data.Get("bucket").(string) - b.prefix = strings.TrimLeft(data.Get("prefix").(string), "/") - if b.prefix != "" && !strings.HasSuffix(b.prefix, "/") { - b.prefix = b.prefix + "/" - } - - b.defaultStateFile = strings.TrimLeft(data.Get("path").(string), "/") - - b.projectID = data.Get("project").(string) - if id := os.Getenv("GOOGLE_PROJECT"); b.projectID == "" && id != "" { - b.projectID = id - } - b.region = data.Get("region").(string) - if r := os.Getenv("GOOGLE_REGION"); b.projectID == "" && r != "" { - b.region = r - } - - var opts []option.ClientOption - - creds := data.Get("credentials").(string) - if creds == "" { - creds = os.Getenv("GOOGLE_CREDENTIALS") - } - - if creds != "" { - var account accountFile - - // to mirror how the provider works, we accept the file path or the contents - contents, _, err := pathorcontents.Read(creds) - if err != nil { - return fmt.Errorf("Error loading credentials: %s", err) - } - - if err := json.Unmarshal([]byte(contents), &account); err != nil { - return fmt.Errorf("Error parsing credentials '%s': %s", contents, err) - } - - conf := jwt.Config{ - Email: account.ClientEmail, - PrivateKey: []byte(account.PrivateKey), - Scopes: []string{storage.ScopeReadWrite}, - TokenURL: "https://accounts.google.com/o/oauth2/token", - } - - opts = append(opts, option.WithHTTPClient(conf.Client(ctx))) - } else { - opts = append(opts, option.WithScopes(storage.ScopeReadWrite)) - } - - opts = append(opts, option.WithUserAgent(terraform.UserAgentString())) - client, err := storage.NewClient(b.storageContext, opts...) - if err != nil { - return fmt.Errorf("storage.NewClient() failed: %v", err) - } - - b.storageClient = client - - return nil -} - -// accountFile represents the structure of the account file JSON file. -type accountFile struct { - PrivateKeyId string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - ClientEmail string `json:"client_email"` - ClientId string `json:"client_id"` -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend_state.go deleted file mode 100644 index eddcbcbac37..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend_state.go +++ /dev/null @@ -1,158 +0,0 @@ -package gcs - -import ( - "fmt" - "path" - "sort" - "strings" - - "cloud.google.com/go/storage" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/iterator" -) - -const ( - stateFileSuffix = ".tfstate" - lockFileSuffix = ".tflock" -) - -// States returns a list of names for the states found on GCS. The default -// state is always returned as the first element in the slice. -func (b *gcsBackend) States() ([]string, error) { - states := []string{backend.DefaultStateName} - - bucket := b.storageClient.Bucket(b.bucketName) - objs := bucket.Objects(b.storageContext, &storage.Query{ - Delimiter: "/", - Prefix: b.prefix, - }) - for { - attrs, err := objs.Next() - if err == iterator.Done { - break - } - if err != nil { - return nil, fmt.Errorf("querying Cloud Storage failed: %v", err) - } - - name := path.Base(attrs.Name) - if !strings.HasSuffix(name, stateFileSuffix) { - continue - } - st := strings.TrimSuffix(name, stateFileSuffix) - - if st != backend.DefaultStateName { - states = append(states, st) - } - } - - sort.Strings(states[1:]) - return states, nil -} - -// DeleteState deletes the named state. The "default" state cannot be deleted. -func (b *gcsBackend) DeleteState(name string) error { - if name == backend.DefaultStateName { - return fmt.Errorf("cowardly refusing to delete the %q state", name) - } - - c, err := b.client(name) - if err != nil { - return err - } - - return c.Delete() -} - -// client returns a remoteClient for the named state. -func (b *gcsBackend) client(name string) (*remoteClient, error) { - if name == "" { - return nil, fmt.Errorf("%q is not a valid state name", name) - } - - return &remoteClient{ - storageContext: b.storageContext, - storageClient: b.storageClient, - bucketName: b.bucketName, - stateFilePath: b.stateFile(name), - lockFilePath: b.lockFile(name), - }, nil -} - -// State reads and returns the named state from GCS. If the named state does -// not yet exist, a new state file is created. -func (b *gcsBackend) State(name string) (state.State, error) { - c, err := b.client(name) - if err != nil { - return nil, err - } - - st := &remote.State{Client: c} - - // Grab the value - if err := st.RefreshState(); err != nil { - return nil, err - } - - // If we have no state, we have to create an empty state - if v := st.State(); v == nil { - - lockInfo := state.NewLockInfo() - lockInfo.Operation = "init" - lockID, err := st.Lock(lockInfo) - if err != nil { - return nil, err - } - - // Local helper function so we can call it multiple places - unlock := func(baseErr error) error { - if err := st.Unlock(lockID); err != nil { - const unlockErrMsg = `%v - Additionally, unlocking the state file on Google Cloud Storage failed: - - Error message: %q - Lock ID (gen): %v - Lock file URL: %v - - You may have to force-unlock this state in order to use it again. - The GCloud backend acquires a lock during initialization to ensure - the initial state file is created.` - return fmt.Errorf(unlockErrMsg, baseErr, err.Error(), lockID, c.lockFileURL()) - } - - return baseErr - } - - if err := st.WriteState(terraform.NewState()); err != nil { - return nil, unlock(err) - } - if err := st.PersistState(); err != nil { - return nil, unlock(err) - } - - // Unlock, the state should now be initialized - if err := unlock(nil); err != nil { - return nil, err - } - - } - - return st, nil -} - -func (b *gcsBackend) stateFile(name string) string { - if name == backend.DefaultStateName && b.defaultStateFile != "" { - return b.defaultStateFile - } - return path.Join(b.prefix, name+stateFileSuffix) -} - -func (b *gcsBackend) lockFile(name string) string { - if name == backend.DefaultStateName && b.defaultStateFile != "" { - return strings.TrimSuffix(b.defaultStateFile, stateFileSuffix) + lockFileSuffix - } - return path.Join(b.prefix, name+lockFileSuffix) -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/client.go deleted file mode 100644 index a392c969f9b..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/client.go +++ /dev/null @@ -1,168 +0,0 @@ -package gcs - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "strconv" - - "cloud.google.com/go/storage" - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" - "golang.org/x/net/context" -) - -// remoteClient is used by "state/remote".State to read and write -// blobs representing state. -// Implements "state/remote".ClientLocker -type remoteClient struct { - storageContext context.Context - storageClient *storage.Client - bucketName string - stateFilePath string - lockFilePath string -} - -func (c *remoteClient) Get() (payload *remote.Payload, err error) { - stateFileReader, err := c.stateFile().NewReader(c.storageContext) - if err != nil { - if err == storage.ErrObjectNotExist { - return nil, nil - } else { - return nil, fmt.Errorf("Failed to open state file at %v: %v", c.stateFileURL(), err) - } - } - defer stateFileReader.Close() - - stateFileContents, err := ioutil.ReadAll(stateFileReader) - if err != nil { - return nil, fmt.Errorf("Failed to read state file from %v: %v", c.stateFileURL(), err) - } - - stateFileAttrs, err := c.stateFile().Attrs(c.storageContext) - if err != nil { - return nil, fmt.Errorf("Failed to read state file attrs from %v: %v", c.stateFileURL(), err) - } - - result := &remote.Payload{ - Data: stateFileContents, - MD5: stateFileAttrs.MD5, - } - - return result, nil -} - -func (c *remoteClient) Put(data []byte) error { - err := func() error { - stateFileWriter := c.stateFile().NewWriter(c.storageContext) - if _, err := stateFileWriter.Write(data); err != nil { - return err - } - return stateFileWriter.Close() - }() - if err != nil { - return fmt.Errorf("Failed to upload state to %v: %v", c.stateFileURL(), err) - } - - return nil -} - -func (c *remoteClient) Delete() error { - if err := c.stateFile().Delete(c.storageContext); err != nil { - return fmt.Errorf("Failed to delete state file %v: %v", c.stateFileURL(), err) - } - - return nil -} - -// Lock writes to a lock file, ensuring file creation. Returns the generation -// number, which must be passed to Unlock(). -func (c *remoteClient) Lock(info *state.LockInfo) (string, error) { - infoJson, err := json.Marshal(info) - if err != nil { - return "", err - } - - lockFile := c.lockFile() - w := lockFile.If(storage.Conditions{DoesNotExist: true}).NewWriter(c.storageContext) - err = func() error { - if _, err := w.Write(infoJson); err != nil { - return err - } - return w.Close() - }() - if err != nil { - return "", c.lockError(fmt.Errorf("writing %q failed: %v", c.lockFileURL(), err)) - } - - info.ID = strconv.FormatInt(w.Attrs().Generation, 10) - info.Path = c.lockFileURL() - - return info.ID, nil -} - -func (c *remoteClient) Unlock(id string) error { - gen, err := strconv.ParseInt(id, 10, 64) - if err != nil { - return err - } - - if err := c.lockFile().If(storage.Conditions{GenerationMatch: gen}).Delete(c.storageContext); err != nil { - return c.lockError(err) - } - - return nil -} - -func (c *remoteClient) lockError(err error) *state.LockError { - lockErr := &state.LockError{ - Err: err, - } - - info, infoErr := c.lockInfo() - if infoErr != nil { - lockErr.Err = multierror.Append(lockErr.Err, infoErr) - } else { - lockErr.Info = info - } - return lockErr -} - -// lockInfo reads the lock file, parses its contents and returns the parsed -// LockInfo struct. -func (c *remoteClient) lockInfo() (*state.LockInfo, error) { - r, err := c.lockFile().NewReader(c.storageContext) - if err != nil { - return nil, err - } - defer r.Close() - - rawData, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - info := &state.LockInfo{} - if err := json.Unmarshal(rawData, info); err != nil { - return nil, err - } - - return info, nil -} - -func (c *remoteClient) stateFile() *storage.ObjectHandle { - return c.storageClient.Bucket(c.bucketName).Object(c.stateFilePath) -} - -func (c *remoteClient) stateFileURL() string { - return fmt.Sprintf("gs://%v/%v", c.bucketName, c.stateFilePath) -} - -func (c *remoteClient) lockFile() *storage.ObjectHandle { - return c.storageClient.Bucket(c.bucketName).Object(c.lockFilePath) -} - -func (c *remoteClient) lockFileURL() string { - return fmt.Sprintf("gs://%v/%v", c.bucketName, c.lockFilePath) -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/backend.go deleted file mode 100644 index 5eab8d0c6be..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/backend.go +++ /dev/null @@ -1,208 +0,0 @@ -package inmem - -import ( - "context" - "errors" - "fmt" - "sort" - "sync" - "time" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" - "github.com/hashicorp/terraform/terraform" -) - -// we keep the states and locks in package-level variables, so that they can be -// accessed from multiple instances of the backend. This better emulates -// backend instances accessing a single remote data store. -var ( - states stateMap - locks lockMap -) - -func init() { - Reset() -} - -// Reset clears out all existing state and lock data. -// This is used to initialize the package during init, as well as between -// tests. -func Reset() { - states = stateMap{ - m: map[string]*remote.State{}, - } - - locks = lockMap{ - m: map[string]*state.LockInfo{}, - } -} - -// New creates a new backend for Inmem remote state. -func New() backend.Backend { - // Set the schema - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "lock_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "initializes the state in a locked configuration", - }, - }, - } - backend := &Backend{Backend: s} - backend.Backend.ConfigureFunc = backend.configure - return backend -} - -type Backend struct { - *schema.Backend -} - -func (b *Backend) configure(ctx context.Context) error { - states.Lock() - defer states.Unlock() - - defaultClient := &RemoteClient{ - Name: backend.DefaultStateName, - } - - states.m[backend.DefaultStateName] = &remote.State{ - Client: defaultClient, - } - - // set the default client lock info per the test config - data := schema.FromContextBackendConfig(ctx) - if v, ok := data.GetOk("lock_id"); ok && v.(string) != "" { - info := state.NewLockInfo() - info.ID = v.(string) - info.Operation = "test" - info.Info = "test config" - - locks.lock(backend.DefaultStateName, info) - } - - return nil -} - -func (b *Backend) States() ([]string, error) { - states.Lock() - defer states.Unlock() - - var workspaces []string - - for s := range states.m { - workspaces = append(workspaces, s) - } - - sort.Strings(workspaces) - return workspaces, nil -} - -func (b *Backend) DeleteState(name string) error { - states.Lock() - defer states.Unlock() - - if name == backend.DefaultStateName || name == "" { - return fmt.Errorf("can't delete default state") - } - - delete(states.m, name) - return nil -} - -func (b *Backend) State(name string) (state.State, error) { - states.Lock() - defer states.Unlock() - - s := states.m[name] - if s == nil { - s = &remote.State{ - Client: &RemoteClient{ - Name: name, - }, - } - states.m[name] = s - - // to most closely replicate other implementations, we are going to - // take a lock and create a new state if it doesn't exist. - lockInfo := state.NewLockInfo() - lockInfo.Operation = "init" - lockID, err := s.Lock(lockInfo) - if err != nil { - return nil, fmt.Errorf("failed to lock inmem state: %s", err) - } - defer s.Unlock(lockID) - - // If we have no state, we have to create an empty state - if v := s.State(); v == nil { - if err := s.WriteState(terraform.NewState()); err != nil { - return nil, err - } - if err := s.PersistState(); err != nil { - return nil, err - } - } - } - - return s, nil -} - -type stateMap struct { - sync.Mutex - m map[string]*remote.State -} - -// Global level locks for inmem backends. -type lockMap struct { - sync.Mutex - m map[string]*state.LockInfo -} - -func (l *lockMap) lock(name string, info *state.LockInfo) (string, error) { - l.Lock() - defer l.Unlock() - - lockInfo := l.m[name] - if lockInfo != nil { - lockErr := &state.LockError{ - Info: lockInfo, - } - - lockErr.Err = errors.New("state locked") - // make a copy of the lock info to avoid any testing shenanigans - *lockErr.Info = *lockInfo - return "", lockErr - } - - info.Created = time.Now().UTC() - l.m[name] = info - - return info.ID, nil -} - -func (l *lockMap) unlock(name, id string) error { - l.Lock() - defer l.Unlock() - - lockInfo := l.m[name] - - if lockInfo == nil { - return errors.New("state not locked") - } - - lockErr := &state.LockError{ - Info: &state.LockInfo{}, - } - - if id != lockInfo.ID { - lockErr.Err = errors.New("invalid lock id") - *lockErr.Info = *lockInfo - return lockErr - } - - delete(l.m, name) - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/client.go deleted file mode 100644 index 51c8d7251b9..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/client.go +++ /dev/null @@ -1,47 +0,0 @@ -package inmem - -import ( - "crypto/md5" - - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" -) - -// RemoteClient is a remote client that stores data in memory for testing. -type RemoteClient struct { - Data []byte - MD5 []byte - Name string -} - -func (c *RemoteClient) Get() (*remote.Payload, error) { - if c.Data == nil { - return nil, nil - } - - return &remote.Payload{ - Data: c.Data, - MD5: c.MD5, - }, nil -} - -func (c *RemoteClient) Put(data []byte) error { - md5 := md5.Sum(data) - - c.Data = data - c.MD5 = md5[:] - return nil -} - -func (c *RemoteClient) Delete() error { - c.Data = nil - c.MD5 = nil - return nil -} - -func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { - return locks.lock(c.Name, info) -} -func (c *RemoteClient) Unlock(id string) error { - return locks.unlock(c.Name, id) -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/backend.go deleted file mode 100644 index 221fddaa777..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/backend.go +++ /dev/null @@ -1,177 +0,0 @@ -package manta - -import ( - "context" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "os" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" - triton "github.com/joyent/triton-go" - "github.com/joyent/triton-go/authentication" - "github.com/joyent/triton-go/storage" -) - -func New() backend.Backend { - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "account": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{"TRITON_ACCOUNT", "SDC_ACCOUNT"}, ""), - }, - - "url": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{"MANTA_URL"}, "https://us-east.manta.joyent.com"), - }, - - "key_material": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{"TRITON_KEY_MATERIAL", "SDC_KEY_MATERIAL"}, ""), - }, - - "key_id": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{"TRITON_KEY_ID", "SDC_KEY_ID"}, ""), - }, - - "insecure_skip_tls_verify": { - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TRITON_SKIP_TLS_VERIFY", ""), - }, - - "path": { - Type: schema.TypeString, - Required: true, - }, - - "objectName": { - Type: schema.TypeString, - Optional: true, - Default: "terraform.tfstate", - }, - }, - } - - result := &Backend{Backend: s} - result.Backend.ConfigureFunc = result.configure - return result -} - -type Backend struct { - *schema.Backend - data *schema.ResourceData - - // The fields below are set from configure - storageClient *storage.StorageClient - path string - objectName string -} - -type BackendConfig struct { - AccountId string - KeyId string - AccountUrl string - KeyMaterial string - SkipTls bool -} - -func (b *Backend) configure(ctx context.Context) error { - if b.path != "" { - return nil - } - - data := schema.FromContextBackendConfig(ctx) - - config := &BackendConfig{ - AccountId: data.Get("account").(string), - AccountUrl: data.Get("url").(string), - KeyId: data.Get("key_id").(string), - SkipTls: data.Get("insecure_skip_tls_verify").(bool), - } - - if v, ok := data.GetOk("key_material"); ok { - config.KeyMaterial = v.(string) - } - - b.path = data.Get("path").(string) - b.objectName = data.Get("objectName").(string) - - var validationError *multierror.Error - - if data.Get("account").(string) == "" { - validationError = multierror.Append(validationError, errors.New("`Account` must be configured for the Triton provider")) - } - if data.Get("key_id").(string) == "" { - validationError = multierror.Append(validationError, errors.New("`Key ID` must be configured for the Triton provider")) - } - if b.path == "" { - validationError = multierror.Append(validationError, errors.New("`Path` must be configured for the Triton provider")) - } - - if validationError != nil { - return validationError - } - - var signer authentication.Signer - var err error - - if config.KeyMaterial == "" { - signer, err = authentication.NewSSHAgentSigner(config.KeyId, config.AccountId) - if err != nil { - return errwrap.Wrapf("Error Creating SSH Agent Signer: {{err}}", err) - } - } else { - var keyBytes []byte - if _, err = os.Stat(config.KeyMaterial); err == nil { - keyBytes, err = ioutil.ReadFile(config.KeyMaterial) - if err != nil { - return fmt.Errorf("Error reading key material from %s: %s", - config.KeyMaterial, err) - } - block, _ := pem.Decode(keyBytes) - if block == nil { - return fmt.Errorf( - "Failed to read key material '%s': no key found", config.KeyMaterial) - } - - if block.Headers["Proc-Type"] == "4,ENCRYPTED" { - return fmt.Errorf( - "Failed to read key '%s': password protected keys are\n"+ - "not currently supported. Please decrypt the key prior to use.", config.KeyMaterial) - } - - } else { - keyBytes = []byte(config.KeyMaterial) - } - - signer, err = authentication.NewPrivateKeySigner(config.KeyId, keyBytes, config.AccountId) - if err != nil { - return errwrap.Wrapf("Error Creating SSH Private Key Signer: {{err}}", err) - } - } - - clientConfig := &triton.ClientConfig{ - MantaURL: config.AccountUrl, - AccountName: config.AccountId, - Signers: []authentication.Signer{signer}, - } - triton, err := storage.NewClient(clientConfig) - if err != nil { - return err - } - - b.storageClient = triton - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/backend_state.go deleted file mode 100644 index 3909573ef25..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/backend_state.go +++ /dev/null @@ -1,144 +0,0 @@ -package manta - -import ( - "context" - "errors" - "fmt" - "path" - "sort" - "strings" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" - "github.com/hashicorp/terraform/terraform" - "github.com/joyent/triton-go/storage" -) - -func (b *Backend) States() ([]string, error) { - result := []string{backend.DefaultStateName} - - objs, err := b.storageClient.Dir().List(context.Background(), &storage.ListDirectoryInput{ - DirectoryName: path.Join(mantaDefaultRootStore, b.path), - }) - if err != nil { - if strings.Contains(err.Error(), "ResourceNotFound") { - return result, nil - } - return nil, err - } - - for _, obj := range objs.Entries { - if obj.Type == "directory" && obj.Name != "" { - result = append(result, obj.Name) - } - } - - sort.Strings(result[1:]) - return result, nil -} - -func (b *Backend) DeleteState(name string) error { - if name == backend.DefaultStateName || name == "" { - return fmt.Errorf("can't delete default state") - } - - //firstly we need to delete the state file - err := b.storageClient.Objects().Delete(context.Background(), &storage.DeleteObjectInput{ - ObjectPath: path.Join(mantaDefaultRootStore, b.statePath(name), b.objectName), - }) - if err != nil { - return err - } - - //then we need to delete the state folder - err = b.storageClient.Objects().Delete(context.Background(), &storage.DeleteObjectInput{ - ObjectPath: path.Join(mantaDefaultRootStore, b.statePath(name)), - }) - if err != nil { - return err - } - - return nil -} - -func (b *Backend) State(name string) (state.State, error) { - if name == "" { - return nil, errors.New("missing state name") - } - - client := &RemoteClient{ - storageClient: b.storageClient, - directoryName: b.statePath(name), - keyName: b.objectName, - } - - stateMgr := &remote.State{Client: client} - - //if this isn't the default state name, we need to create the object so - //it's listed by States. - if name != backend.DefaultStateName { - - // take a lock on this state while we write it - lockInfo := state.NewLockInfo() - lockInfo.Operation = "init" - lockId, err := client.Lock(lockInfo) - if err != nil { - return nil, fmt.Errorf("failed to lock manta state: %s", err) - } - - // Local helper function so we can call it multiple places - lockUnlock := func(parent error) error { - if err := stateMgr.Unlock(lockId); err != nil { - return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) - } - return parent - } - - // Grab the value - if err := stateMgr.RefreshState(); err != nil { - err = lockUnlock(err) - return nil, err - } - - // If we have no state, we have to create an empty state - if v := stateMgr.State(); v == nil { - if err := stateMgr.WriteState(terraform.NewState()); err != nil { - err = lockUnlock(err) - return nil, err - } - if err := stateMgr.PersistState(); err != nil { - err = lockUnlock(err) - return nil, err - } - } - - // Unlock, the state should now be initialized - if err := lockUnlock(nil); err != nil { - return nil, err - } - - } - - return stateMgr, nil -} - -func (b *Backend) client() *RemoteClient { - return &RemoteClient{} -} - -func (b *Backend) statePath(name string) string { - if name == backend.DefaultStateName { - return b.path - } - - return path.Join(b.path, name) -} - -const errStateUnlock = ` -Error unlocking Manta state. Lock ID: %s - -Error: %s - -You may have to force-unlock this state in order to use it again. -` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/client.go deleted file mode 100644 index 7743801c977..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/client.go +++ /dev/null @@ -1,201 +0,0 @@ -package manta - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "log" - "path" - - "strings" - - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" - "github.com/joyent/triton-go/storage" -) - -const ( - mantaDefaultRootStore = "/stor" - lockFileName = "tflock" -) - -type RemoteClient struct { - storageClient *storage.StorageClient - directoryName string - keyName string - statePath string -} - -func (c *RemoteClient) Get() (*remote.Payload, error) { - output, err := c.storageClient.Objects().Get(context.Background(), &storage.GetObjectInput{ - ObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, c.keyName), - }) - if err != nil { - if strings.Contains(err.Error(), "ResourceNotFound") { - return nil, nil - } - return nil, err - } - defer output.ObjectReader.Close() - - buf := bytes.NewBuffer(nil) - if _, err := io.Copy(buf, output.ObjectReader); err != nil { - return nil, fmt.Errorf("Failed to read remote state: %s", err) - } - - payload := &remote.Payload{ - Data: buf.Bytes(), - } - - // If there was no data, then return nil - if len(payload.Data) == 0 { - return nil, nil - } - - return payload, nil - -} - -func (c *RemoteClient) Put(data []byte) error { - contentType := "application/json" - contentLength := int64(len(data)) - - params := &storage.PutObjectInput{ - ContentType: contentType, - ContentLength: uint64(contentLength), - ObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, c.keyName), - ObjectReader: bytes.NewReader(data), - } - - log.Printf("[DEBUG] Uploading remote state to Manta: %#v", params) - err := c.storageClient.Objects().Put(context.Background(), params) - if err != nil { - return err - } - - return nil -} - -func (c *RemoteClient) Delete() error { - err := c.storageClient.Objects().Delete(context.Background(), &storage.DeleteObjectInput{ - ObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, c.keyName), - }) - - return err -} - -func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { - - //At Joyent, we want to make sure that the State directory exists before we interact with it - //We don't expect users to have to create it in advance - //The order of operations of Backend State as follows: - // * Get - if this doesn't exist then we continue as though it's new - // * Lock - we make sure that the state directory exists as it's the entrance to writing to Manta - // * Put - put the state up there - // * Unlock - unlock the directory - //We can always guarantee that the user can put their state in the specified location because of this - err := c.storageClient.Dir().Put(context.Background(), &storage.PutDirectoryInput{ - DirectoryName: path.Join(mantaDefaultRootStore, c.directoryName), - }) - if err != nil { - return "", err - } - - //firstly we want to check that a lock doesn't already exist - lockErr := &state.LockError{} - lockInfo, err := c.getLockInfo() - if err != nil { - if !strings.Contains(err.Error(), "ResourceNotFound") { - lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) - return "", lockErr - } - } - - if lockInfo != nil { - lockErr := &state.LockError{ - Err: fmt.Errorf("A lock is already acquired"), - Info: lockInfo, - } - return "", lockErr - } - - info.Path = path.Join(c.directoryName, lockFileName) - - if info.ID == "" { - lockID, err := uuid.GenerateUUID() - if err != nil { - return "", err - } - - info.ID = lockID - } - - data := info.Marshal() - - contentType := "application/json" - contentLength := int64(len(data)) - - params := &storage.PutObjectInput{ - ContentType: contentType, - ContentLength: uint64(contentLength), - ObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, lockFileName), - ObjectReader: bytes.NewReader(data), - } - - log.Printf("[DEBUG] Creating manta state lock: %#v", params) - err = c.storageClient.Objects().Put(context.Background(), params) - if err != nil { - return "", err - } - - return info.ID, nil -} - -func (c *RemoteClient) Unlock(id string) error { - lockErr := &state.LockError{} - - lockInfo, err := c.getLockInfo() - if err != nil { - lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) - return lockErr - } - lockErr.Info = lockInfo - - if lockInfo.ID != id { - lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) - return lockErr - } - - err = c.storageClient.Objects().Delete(context.Background(), &storage.DeleteObjectInput{ - ObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, lockFileName), - }) - - return err -} - -func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { - output, err := c.storageClient.Objects().Get(context.Background(), &storage.GetObjectInput{ - ObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, lockFileName), - }) - if err != nil { - return nil, err - } - - defer output.ObjectReader.Close() - - buf := bytes.NewBuffer(nil) - if _, err := io.Copy(buf, output.ObjectReader); err != nil { - return nil, fmt.Errorf("Failed to read lock info: %s", err) - } - - lockInfo := &state.LockInfo{} - err = json.Unmarshal(buf.Bytes(), lockInfo) - if err != nil { - return nil, err - } - - return lockInfo, nil -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend.go deleted file mode 100644 index f5607e62397..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend.go +++ /dev/null @@ -1,267 +0,0 @@ -package s3 - -import ( - "context" - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" - - terraformAWS "github.com/terraform-providers/terraform-provider-aws/aws" -) - -// New creates a new backend for S3 remote state. -func New() backend.Backend { - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - Description: "The name of the S3 bucket", - }, - - "key": { - Type: schema.TypeString, - Required: true, - Description: "The path to the state file inside the bucket", - ValidateFunc: func(v interface{}, s string) ([]string, []error) { - // s3 will strip leading slashes from an object, so while this will - // technically be accepted by s3, it will break our workspace hierarchy. - if strings.HasPrefix(v.(string), "/") { - return nil, []error{fmt.Errorf("key must not start with '/'")} - } - return nil, nil - }, - }, - - "region": { - Type: schema.TypeString, - Required: true, - Description: "The region of the S3 bucket.", - DefaultFunc: schema.EnvDefaultFunc("AWS_DEFAULT_REGION", nil), - }, - - "endpoint": { - Type: schema.TypeString, - Optional: true, - Description: "A custom endpoint for the S3 API", - DefaultFunc: schema.EnvDefaultFunc("AWS_S3_ENDPOINT", ""), - }, - - "encrypt": { - Type: schema.TypeBool, - Optional: true, - Description: "Whether to enable server side encryption of the state file", - Default: false, - }, - - "acl": { - Type: schema.TypeString, - Optional: true, - Description: "Canned ACL to be applied to the state file", - Default: "", - }, - - "access_key": { - Type: schema.TypeString, - Optional: true, - Description: "AWS access key", - Default: "", - }, - - "secret_key": { - Type: schema.TypeString, - Optional: true, - Description: "AWS secret key", - Default: "", - }, - - "kms_key_id": { - Type: schema.TypeString, - Optional: true, - Description: "The ARN of a KMS Key to use for encrypting the state", - Default: "", - }, - - "lock_table": { - Type: schema.TypeString, - Optional: true, - Description: "DynamoDB table for state locking", - Default: "", - Deprecated: "please use the dynamodb_table attribute", - }, - - "dynamodb_table": { - Type: schema.TypeString, - Optional: true, - Description: "DynamoDB table for state locking and consistency", - Default: "", - }, - - "profile": { - Type: schema.TypeString, - Optional: true, - Description: "AWS profile name", - Default: "", - }, - - "shared_credentials_file": { - Type: schema.TypeString, - Optional: true, - Description: "Path to a shared credentials file", - Default: "", - }, - - "token": { - Type: schema.TypeString, - Optional: true, - Description: "MFA token", - Default: "", - }, - - "skip_credentials_validation": { - Type: schema.TypeBool, - Optional: true, - Description: "Skip the credentials validation via STS API.", - Default: false, - }, - - "skip_get_ec2_platforms": { - Type: schema.TypeBool, - Optional: true, - Description: "Skip getting the supported EC2 platforms.", - Default: false, - }, - - "skip_region_validation": { - Type: schema.TypeBool, - Optional: true, - Description: "Skip static validation of region name.", - Default: false, - }, - - "skip_requesting_account_id": { - Type: schema.TypeBool, - Optional: true, - Description: "Skip requesting the account ID.", - Default: false, - }, - - "skip_metadata_api_check": { - Type: schema.TypeBool, - Optional: true, - Description: "Skip the AWS Metadata API check.", - Default: false, - }, - - "role_arn": { - Type: schema.TypeString, - Optional: true, - Description: "The role to be assumed", - Default: "", - }, - - "session_name": { - Type: schema.TypeString, - Optional: true, - Description: "The session name to use when assuming the role.", - Default: "", - }, - - "external_id": { - Type: schema.TypeString, - Optional: true, - Description: "The external ID to use when assuming the role", - Default: "", - }, - - "assume_role_policy": { - Type: schema.TypeString, - Optional: true, - Description: "The permissions applied when assuming a role.", - Default: "", - }, - - "workspace_key_prefix": { - Type: schema.TypeString, - Optional: true, - Description: "The prefix applied to the non-default state path inside the bucket", - Default: "env:", - }, - }, - } - - result := &Backend{Backend: s} - result.Backend.ConfigureFunc = result.configure - return result -} - -type Backend struct { - *schema.Backend - - // The fields below are set from configure - s3Client *s3.S3 - dynClient *dynamodb.DynamoDB - - bucketName string - keyName string - serverSideEncryption bool - acl string - kmsKeyID string - ddbTable string - workspaceKeyPrefix string -} - -func (b *Backend) configure(ctx context.Context) error { - if b.s3Client != nil { - return nil - } - - // Grab the resource data - data := schema.FromContextBackendConfig(ctx) - - b.bucketName = data.Get("bucket").(string) - b.keyName = data.Get("key").(string) - b.serverSideEncryption = data.Get("encrypt").(bool) - b.acl = data.Get("acl").(string) - b.kmsKeyID = data.Get("kms_key_id").(string) - b.workspaceKeyPrefix = data.Get("workspace_key_prefix").(string) - - b.ddbTable = data.Get("dynamodb_table").(string) - if b.ddbTable == "" { - // try the depracted field - b.ddbTable = data.Get("lock_table").(string) - } - - cfg := &terraformAWS.Config{ - AccessKey: data.Get("access_key").(string), - AssumeRoleARN: data.Get("role_arn").(string), - AssumeRoleExternalID: data.Get("external_id").(string), - AssumeRolePolicy: data.Get("assume_role_policy").(string), - AssumeRoleSessionName: data.Get("session_name").(string), - CredsFilename: data.Get("shared_credentials_file").(string), - Profile: data.Get("profile").(string), - Region: data.Get("region").(string), - S3Endpoint: data.Get("endpoint").(string), - SecretKey: data.Get("secret_key").(string), - Token: data.Get("token").(string), - SkipCredsValidation: data.Get("skip_credentials_validation").(bool), - SkipGetEC2Platforms: data.Get("skip_get_ec2_platforms").(bool), - SkipRegionValidation: data.Get("skip_region_validation").(bool), - SkipRequestingAccountId: data.Get("skip_requesting_account_id").(bool), - SkipMetadataApiCheck: data.Get("skip_metadata_api_check").(bool), - } - - client, err := cfg.Client() - if err != nil { - return err - } - - b.s3Client = client.(*terraformAWS.AWSClient).S3() - b.dynClient = client.(*terraformAWS.AWSClient).DynamoDB() - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend_state.go deleted file mode 100644 index f38b199b043..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend_state.go +++ /dev/null @@ -1,189 +0,0 @@ -package s3 - -import ( - "errors" - "fmt" - "sort" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" - "github.com/hashicorp/terraform/terraform" -) - -func (b *Backend) States() ([]string, error) { - params := &s3.ListObjectsInput{ - Bucket: &b.bucketName, - Prefix: aws.String(b.workspaceKeyPrefix + "/"), - } - - resp, err := b.s3Client.ListObjects(params) - if err != nil { - return nil, err - } - - envs := []string{backend.DefaultStateName} - for _, obj := range resp.Contents { - env := b.keyEnv(*obj.Key) - if env != "" { - envs = append(envs, env) - } - } - - sort.Strings(envs[1:]) - return envs, nil -} - -// extract the env name from the S3 key -func (b *Backend) keyEnv(key string) string { - // we have 3 parts, the prefix, the env name, and the key name - parts := strings.SplitN(key, "/", 3) - if len(parts) < 3 { - // no env here - return "" - } - - // shouldn't happen since we listed by prefix - if parts[0] != b.workspaceKeyPrefix { - return "" - } - - // not our key, so don't include it in our listing - if parts[2] != b.keyName { - return "" - } - - return parts[1] -} - -func (b *Backend) DeleteState(name string) error { - if name == backend.DefaultStateName || name == "" { - return fmt.Errorf("can't delete default state") - } - - client, err := b.remoteClient(name) - if err != nil { - return err - } - - return client.Delete() -} - -// get a remote client configured for this state -func (b *Backend) remoteClient(name string) (*RemoteClient, error) { - if name == "" { - return nil, errors.New("missing state name") - } - - client := &RemoteClient{ - s3Client: b.s3Client, - dynClient: b.dynClient, - bucketName: b.bucketName, - path: b.path(name), - serverSideEncryption: b.serverSideEncryption, - acl: b.acl, - kmsKeyID: b.kmsKeyID, - ddbTable: b.ddbTable, - } - - return client, nil -} - -func (b *Backend) State(name string) (state.State, error) { - client, err := b.remoteClient(name) - if err != nil { - return nil, err - } - - stateMgr := &remote.State{Client: client} - // Check to see if this state already exists. - // If we're trying to force-unlock a state, we can't take the lock before - // fetching the state. If the state doesn't exist, we have to assume this - // is a normal create operation, and take the lock at that point. - // - // If we need to force-unlock, but for some reason the state no longer - // exists, the user will have to use aws tools to manually fix the - // situation. - existing, err := b.States() - if err != nil { - return nil, err - } - - exists := false - for _, s := range existing { - if s == name { - exists = true - break - } - } - - // We need to create the object so it's listed by States. - if !exists { - // take a lock on this state while we write it - lockInfo := state.NewLockInfo() - lockInfo.Operation = "init" - lockId, err := client.Lock(lockInfo) - if err != nil { - return nil, fmt.Errorf("failed to lock s3 state: %s", err) - } - - // Local helper function so we can call it multiple places - lockUnlock := func(parent error) error { - if err := stateMgr.Unlock(lockId); err != nil { - return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) - } - return parent - } - - // Grab the value - // This is to ensure that no one beat us to writing a state between - // the `exists` check and taking the lock. - if err := stateMgr.RefreshState(); err != nil { - err = lockUnlock(err) - return nil, err - } - - // If we have no state, we have to create an empty state - if v := stateMgr.State(); v == nil { - if err := stateMgr.WriteState(terraform.NewState()); err != nil { - err = lockUnlock(err) - return nil, err - } - if err := stateMgr.PersistState(); err != nil { - err = lockUnlock(err) - return nil, err - } - } - - // Unlock, the state should now be initialized - if err := lockUnlock(nil); err != nil { - return nil, err - } - - } - - return stateMgr, nil -} - -func (b *Backend) client() *RemoteClient { - return &RemoteClient{} -} - -func (b *Backend) path(name string) string { - if name == backend.DefaultStateName { - return b.keyName - } - - return strings.Join([]string{b.workspaceKeyPrefix, name, b.keyName}, "/") -} - -const errStateUnlock = ` -Error unlocking S3 state. Lock ID: %s - -Error: %s - -You may have to force-unlock this state in order to use it again. -` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/client.go deleted file mode 100644 index b9233145a2b..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/client.go +++ /dev/null @@ -1,416 +0,0 @@ -package s3 - -import ( - "bytes" - "crypto/md5" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/s3" - multierror "github.com/hashicorp/go-multierror" - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" -) - -// Store the last saved serial in dynamo with this suffix for consistency checks. -const ( - stateIDSuffix = "-md5" - s3ErrCodeInternalError = "InternalError" -) - -type RemoteClient struct { - s3Client *s3.S3 - dynClient *dynamodb.DynamoDB - bucketName string - path string - serverSideEncryption bool - acl string - kmsKeyID string - ddbTable string -} - -var ( - // The amount of time we will retry a state waiting for it to match the - // expected checksum. - consistencyRetryTimeout = 10 * time.Second - - // delay when polling the state - consistencyRetryPollInterval = 2 * time.Second -) - -// test hook called when checksums don't match -var testChecksumHook func() - -func (c *RemoteClient) Get() (payload *remote.Payload, err error) { - deadline := time.Now().Add(consistencyRetryTimeout) - - // If we have a checksum, and the returned payload doesn't match, we retry - // up until deadline. - for { - payload, err = c.get() - if err != nil { - return nil, err - } - - // If the remote state was manually removed the payload will be nil, - // but if there's still a digest entry for that state we will still try - // to compare the MD5 below. - var digest []byte - if payload != nil { - digest = payload.MD5 - } - - // verify that this state is what we expect - if expected, err := c.getMD5(); err != nil { - log.Printf("[WARNING] failed to fetch state md5: %s", err) - } else if len(expected) > 0 && !bytes.Equal(expected, digest) { - log.Printf("[WARNING] state md5 mismatch: expected '%x', got '%x'", expected, digest) - - if testChecksumHook != nil { - testChecksumHook() - } - - if time.Now().Before(deadline) { - time.Sleep(consistencyRetryPollInterval) - log.Println("[INFO] retrying S3 RemoteClient.Get...") - continue - } - - return nil, fmt.Errorf(errBadChecksumFmt, digest) - } - - break - } - - return payload, err -} - -func (c *RemoteClient) get() (*remote.Payload, error) { - var output *s3.GetObjectOutput - var err error - - // we immediately retry on an internal error, as those are usually transient - maxRetries := 2 - for retryCount := 0; ; retryCount++ { - output, err = c.s3Client.GetObject(&s3.GetObjectInput{ - Bucket: &c.bucketName, - Key: &c.path, - }) - - if err != nil { - if awserr, ok := err.(awserr.Error); ok { - switch awserr.Code() { - case s3.ErrCodeNoSuchKey: - return nil, nil - case s3ErrCodeInternalError: - if retryCount > maxRetries { - return nil, err - } - log.Println("[WARN] s3 internal error, retrying...") - continue - } - } - return nil, err - } - break - } - - defer output.Body.Close() - - buf := bytes.NewBuffer(nil) - if _, err := io.Copy(buf, output.Body); err != nil { - return nil, fmt.Errorf("Failed to read remote state: %s", err) - } - - sum := md5.Sum(buf.Bytes()) - payload := &remote.Payload{ - Data: buf.Bytes(), - MD5: sum[:], - } - - // If there was no data, then return nil - if len(payload.Data) == 0 { - return nil, nil - } - - return payload, nil -} - -func (c *RemoteClient) Put(data []byte) error { - contentType := "application/json" - contentLength := int64(len(data)) - - // we immediately retry on an internal error, as those are usually transient - maxRetries := 2 - for retryCount := 0; ; retryCount++ { - i := &s3.PutObjectInput{ - ContentType: &contentType, - ContentLength: &contentLength, - Body: bytes.NewReader(data), - Bucket: &c.bucketName, - Key: &c.path, - } - - if c.serverSideEncryption { - if c.kmsKeyID != "" { - i.SSEKMSKeyId = &c.kmsKeyID - i.ServerSideEncryption = aws.String("aws:kms") - } else { - i.ServerSideEncryption = aws.String("AES256") - } - } - - if c.acl != "" { - i.ACL = aws.String(c.acl) - } - - log.Printf("[DEBUG] Uploading remote state to S3: %#v", i) - - _, err := c.s3Client.PutObject(i) - if err != nil { - if awserr, ok := err.(awserr.Error); ok { - if awserr.Code() == s3ErrCodeInternalError { - if retryCount > maxRetries { - return fmt.Errorf("failed to upload state: %s", err) - } - log.Println("[WARN] s3 internal error, retrying...") - continue - } - } - return fmt.Errorf("failed to upload state: %s", err) - } - break - } - - sum := md5.Sum(data) - if err := c.putMD5(sum[:]); err != nil { - // if this errors out, we unfortunately have to error out altogether, - // since the next Get will inevitably fail. - return fmt.Errorf("failed to store state MD5: %s", err) - - } - - return nil -} - -func (c *RemoteClient) Delete() error { - _, err := c.s3Client.DeleteObject(&s3.DeleteObjectInput{ - Bucket: &c.bucketName, - Key: &c.path, - }) - - if err != nil { - return err - } - - if err := c.deleteMD5(); err != nil { - log.Printf("error deleting state md5: %s", err) - } - - return nil -} - -func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { - if c.ddbTable == "" { - return "", nil - } - - info.Path = c.lockPath() - - if info.ID == "" { - lockID, err := uuid.GenerateUUID() - if err != nil { - return "", err - } - - info.ID = lockID - } - - putParams := &dynamodb.PutItemInput{ - Item: map[string]*dynamodb.AttributeValue{ - "LockID": {S: aws.String(c.lockPath())}, - "Info": {S: aws.String(string(info.Marshal()))}, - }, - TableName: aws.String(c.ddbTable), - ConditionExpression: aws.String("attribute_not_exists(LockID)"), - } - _, err := c.dynClient.PutItem(putParams) - - if err != nil { - lockInfo, infoErr := c.getLockInfo() - if infoErr != nil { - err = multierror.Append(err, infoErr) - } - - lockErr := &state.LockError{ - Err: err, - Info: lockInfo, - } - return "", lockErr - } - - return info.ID, nil -} - -func (c *RemoteClient) getMD5() ([]byte, error) { - if c.ddbTable == "" { - return nil, nil - } - - getParams := &dynamodb.GetItemInput{ - Key: map[string]*dynamodb.AttributeValue{ - "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, - }, - ProjectionExpression: aws.String("LockID, Digest"), - TableName: aws.String(c.ddbTable), - ConsistentRead: aws.Bool(true), - } - - resp, err := c.dynClient.GetItem(getParams) - if err != nil { - return nil, err - } - - var val string - if v, ok := resp.Item["Digest"]; ok && v.S != nil { - val = *v.S - } - - sum, err := hex.DecodeString(val) - if err != nil || len(sum) != md5.Size { - return nil, errors.New("invalid md5") - } - - return sum, nil -} - -// store the hash of the state to that clients can check for stale state files. -func (c *RemoteClient) putMD5(sum []byte) error { - if c.ddbTable == "" { - return nil - } - - if len(sum) != md5.Size { - return errors.New("invalid payload md5") - } - - putParams := &dynamodb.PutItemInput{ - Item: map[string]*dynamodb.AttributeValue{ - "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, - "Digest": {S: aws.String(hex.EncodeToString(sum))}, - }, - TableName: aws.String(c.ddbTable), - } - _, err := c.dynClient.PutItem(putParams) - if err != nil { - log.Printf("[WARNING] failed to record state serial in dynamodb: %s", err) - } - - return nil -} - -// remove the hash value for a deleted state -func (c *RemoteClient) deleteMD5() error { - if c.ddbTable == "" { - return nil - } - - params := &dynamodb.DeleteItemInput{ - Key: map[string]*dynamodb.AttributeValue{ - "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, - }, - TableName: aws.String(c.ddbTable), - } - if _, err := c.dynClient.DeleteItem(params); err != nil { - return err - } - return nil -} - -func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { - getParams := &dynamodb.GetItemInput{ - Key: map[string]*dynamodb.AttributeValue{ - "LockID": {S: aws.String(c.lockPath())}, - }, - ProjectionExpression: aws.String("LockID, Info"), - TableName: aws.String(c.ddbTable), - ConsistentRead: aws.Bool(true), - } - - resp, err := c.dynClient.GetItem(getParams) - if err != nil { - return nil, err - } - - var infoData string - if v, ok := resp.Item["Info"]; ok && v.S != nil { - infoData = *v.S - } - - lockInfo := &state.LockInfo{} - err = json.Unmarshal([]byte(infoData), lockInfo) - if err != nil { - return nil, err - } - - return lockInfo, nil -} - -func (c *RemoteClient) Unlock(id string) error { - if c.ddbTable == "" { - return nil - } - - lockErr := &state.LockError{} - - // TODO: store the path and lock ID in separate fields, and have proper - // projection expression only delete the lock if both match, rather than - // checking the ID from the info field first. - lockInfo, err := c.getLockInfo() - if err != nil { - lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) - return lockErr - } - lockErr.Info = lockInfo - - if lockInfo.ID != id { - lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) - return lockErr - } - - params := &dynamodb.DeleteItemInput{ - Key: map[string]*dynamodb.AttributeValue{ - "LockID": {S: aws.String(c.lockPath())}, - }, - TableName: aws.String(c.ddbTable), - } - _, err = c.dynClient.DeleteItem(params) - - if err != nil { - lockErr.Err = err - return lockErr - } - return nil -} - -func (c *RemoteClient) lockPath() string { - return fmt.Sprintf("%s/%s", c.bucketName, c.path) -} - -const errBadChecksumFmt = `state data in S3 does not have the expected content. - -This may be caused by unusually long delays in S3 processing a previous state -update. Please wait for a minute or two and try again. If this problem -persists, and neither S3 nor DynamoDB are experiencing an outage, you may need -to manually verify the remote state and update the Digest value stored in the -DynamoDB table to the following value: %x -` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend.go deleted file mode 100644 index e1e4e95b298..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend.go +++ /dev/null @@ -1,325 +0,0 @@ -package swift - -import ( - "context" - "fmt" - "log" - "strconv" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" - tf_openstack "github.com/terraform-providers/terraform-provider-openstack/openstack" -) - -// New creates a new backend for Swift remote state. -func New() backend.Backend { - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "auth_url": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_URL", nil), - Description: descriptions["auth_url"], - }, - - "user_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_USER_ID", ""), - Description: descriptions["user_name"], - }, - - "user_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_USERNAME", ""), - Description: descriptions["user_name"], - }, - - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "OS_TENANT_ID", - "OS_PROJECT_ID", - }, ""), - Description: descriptions["tenant_id"], - }, - - "tenant_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "OS_TENANT_NAME", - "OS_PROJECT_NAME", - }, ""), - Description: descriptions["tenant_name"], - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Sensitive: true, - DefaultFunc: schema.EnvDefaultFunc("OS_PASSWORD", ""), - Description: descriptions["password"], - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_TOKEN", ""), - Description: descriptions["token"], - }, - - "domain_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "OS_USER_DOMAIN_ID", - "OS_PROJECT_DOMAIN_ID", - "OS_DOMAIN_ID", - }, ""), - Description: descriptions["domain_id"], - }, - - "domain_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "OS_USER_DOMAIN_NAME", - "OS_PROJECT_DOMAIN_NAME", - "OS_DOMAIN_NAME", - "OS_DEFAULT_DOMAIN", - }, ""), - Description: descriptions["domain_name"], - }, - - "region_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - Description: descriptions["region_name"], - }, - - "insecure": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_INSECURE", ""), - Description: descriptions["insecure"], - }, - - "endpoint_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_ENDPOINT_TYPE", ""), - }, - - "cacert_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_CACERT", ""), - Description: descriptions["cacert_file"], - }, - - "cert": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_CERT", ""), - Description: descriptions["cert"], - }, - - "key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_KEY", ""), - Description: descriptions["key"], - }, - - "path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: descriptions["path"], - Deprecated: "Use container instead", - ConflictsWith: []string{"container"}, - }, - - "container": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: descriptions["container"], - }, - - "archive_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: descriptions["archive_path"], - Deprecated: "Use archive_container instead", - ConflictsWith: []string{"archive_container"}, - }, - - "archive_container": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: descriptions["archive_container"], - }, - - "expire_after": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: descriptions["expire_after"], - }, - }, - } - - result := &Backend{Backend: s} - result.Backend.ConfigureFunc = result.configure - return result -} - -var descriptions map[string]string - -func init() { - descriptions = map[string]string{ - "auth_url": "The Identity authentication URL.", - - "user_name": "Username to login with.", - - "user_id": "User ID to login with.", - - "tenant_id": "The ID of the Tenant (Identity v2) or Project (Identity v3)\n" + - "to login with.", - - "tenant_name": "The name of the Tenant (Identity v2) or Project (Identity v3)\n" + - "to login with.", - - "password": "Password to login with.", - - "token": "Authentication token to use as an alternative to username/password.", - - "domain_id": "The ID of the Domain to scope to (Identity v3).", - - "domain_name": "The name of the Domain to scope to (Identity v3).", - - "region_name": "The name of the Region to use.", - - "insecure": "Trust self-signed certificates.", - - "cacert_file": "A Custom CA certificate.", - - "endpoint_type": "The catalog endpoint type to use.", - - "cert": "A client certificate to authenticate with.", - - "key": "A client private key to authenticate with.", - - "path": "Swift container path to use.", - - "container": "Swift container to create", - - "archive_path": "Swift container path to archive state to.", - - "archive_container": "Swift container to archive state to.", - - "expire_after": "Archive object expiry duration.", - } -} - -type Backend struct { - *schema.Backend - - // Fields below are set from configure - client *gophercloud.ServiceClient - archive bool - archiveContainer string - expireSecs int - container string -} - -func (b *Backend) configure(ctx context.Context) error { - if b.client != nil { - return nil - } - - // Grab the resource data - data := schema.FromContextBackendConfig(ctx) - - config := &tf_openstack.Config{ - CACertFile: data.Get("cacert_file").(string), - ClientCertFile: data.Get("cert").(string), - ClientKeyFile: data.Get("key").(string), - DomainID: data.Get("domain_id").(string), - DomainName: data.Get("domain_name").(string), - EndpointType: data.Get("endpoint_type").(string), - IdentityEndpoint: data.Get("auth_url").(string), - Insecure: data.Get("insecure").(bool), - Password: data.Get("password").(string), - Token: data.Get("token").(string), - TenantID: data.Get("tenant_id").(string), - TenantName: data.Get("tenant_name").(string), - Username: data.Get("user_name").(string), - UserID: data.Get("user_id").(string), - } - - if err := config.LoadAndValidate(); err != nil { - return err - } - - // Assign Container - b.container = data.Get("container").(string) - if b.container == "" { - // Check deprecated field - b.container = data.Get("path").(string) - } - - // Enable object archiving? - if archiveContainer, ok := data.GetOk("archive_container"); ok { - log.Printf("[DEBUG] Archive_container set, enabling object versioning") - b.archive = true - b.archiveContainer = archiveContainer.(string) - } else if archivePath, ok := data.GetOk("archive_path"); ok { - log.Printf("[DEBUG] Archive_path set, enabling object versioning") - b.archive = true - b.archiveContainer = archivePath.(string) - } - - // Enable object expiry? - if expireRaw, ok := data.GetOk("expire_after"); ok { - expire := expireRaw.(string) - log.Printf("[DEBUG] Requested that remote state expires after %s", expire) - - if strings.HasSuffix(expire, "d") { - log.Printf("[DEBUG] Got a days expire after duration. Converting to hours") - days, err := strconv.Atoi(expire[:len(expire)-1]) - if err != nil { - return fmt.Errorf("Error converting expire_after value %s to int: %s", expire, err) - } - - expire = fmt.Sprintf("%dh", days*24) - log.Printf("[DEBUG] Expire after %s hours", expire) - } - - expireDur, err := time.ParseDuration(expire) - if err != nil { - log.Printf("[DEBUG] Error parsing duration %s: %s", expire, err) - return fmt.Errorf("Error parsing expire_after duration '%s': %s", expire, err) - } - log.Printf("[DEBUG] Seconds duration = %d", int(expireDur.Seconds())) - b.expireSecs = int(expireDur.Seconds()) - } - - objClient, err := openstack.NewObjectStorageV1(config.OsClient, gophercloud.EndpointOpts{ - Region: data.Get("region_name").(string), - }) - if err != nil { - return err - } - - b.client = objClient - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend_state.go deleted file mode 100644 index b8ab9810742..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend_state.go +++ /dev/null @@ -1,31 +0,0 @@ -package swift - -import ( - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/state/remote" -) - -func (b *Backend) States() ([]string, error) { - return nil, backend.ErrNamedStatesNotSupported -} - -func (b *Backend) DeleteState(name string) error { - return backend.ErrNamedStatesNotSupported -} - -func (b *Backend) State(name string) (state.State, error) { - if name != backend.DefaultStateName { - return nil, backend.ErrNamedStatesNotSupported - } - - client := &RemoteClient{ - client: b.client, - container: b.container, - archive: b.archive, - archiveContainer: b.archiveContainer, - expireSecs: b.expireSecs, - } - - return &remote.State{Client: client}, nil -} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/client.go deleted file mode 100644 index 1f8bf464998..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/client.go +++ /dev/null @@ -1,115 +0,0 @@ -package swift - -import ( - "bytes" - "crypto/md5" - "log" - "os" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" - "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" - - "github.com/hashicorp/terraform/state/remote" -) - -const ( - TFSTATE_NAME = "tfstate.tf" - TFSTATE_LOCK_NAME = "tfstate.lock" -) - -// RemoteClient implements the Client interface for an Openstack Swift server. -type RemoteClient struct { - client *gophercloud.ServiceClient - container string - archive bool - archiveContainer string - expireSecs int -} - -func (c *RemoteClient) Get() (*remote.Payload, error) { - log.Printf("[DEBUG] Getting object %s in container %s", TFSTATE_NAME, c.container) - result := objects.Download(c.client, c.container, TFSTATE_NAME, nil) - - // Extract any errors from result - _, err := result.Extract() - - // 404 response is to be expected if the object doesn't already exist! - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Println("[DEBUG] Object doesn't exist to download.") - return nil, nil - } - - bytes, err := result.ExtractContent() - if err != nil { - return nil, err - } - - hash := md5.Sum(bytes) - payload := &remote.Payload{ - Data: bytes, - MD5: hash[:md5.Size], - } - - return payload, nil -} - -func (c *RemoteClient) Put(data []byte) error { - if err := c.ensureContainerExists(); err != nil { - return err - } - - log.Printf("[DEBUG] Putting object %s in container %s", TFSTATE_NAME, c.container) - reader := bytes.NewReader(data) - createOpts := objects.CreateOpts{ - Content: reader, - } - - if c.expireSecs != 0 { - log.Printf("[DEBUG] ExpireSecs = %d", c.expireSecs) - createOpts.DeleteAfter = c.expireSecs - } - - result := objects.Create(c.client, c.container, TFSTATE_NAME, createOpts) - - return result.Err -} - -func (c *RemoteClient) Delete() error { - log.Printf("[DEBUG] Deleting object %s in container %s", TFSTATE_NAME, c.container) - result := objects.Delete(c.client, c.container, TFSTATE_NAME, nil) - return result.Err -} - -func (c *RemoteClient) ensureContainerExists() error { - containerOpts := &containers.CreateOpts{} - - if c.archive { - log.Printf("[DEBUG] Creating archive container %s", c.archiveContainer) - result := containers.Create(c.client, c.archiveContainer, nil) - if result.Err != nil { - log.Printf("[DEBUG] Error creating archive container %s: %s", c.archiveContainer, result.Err) - return result.Err - } - - log.Printf("[DEBUG] Enabling Versioning on container %s", c.container) - containerOpts.VersionsLocation = c.archiveContainer - } - - log.Printf("[DEBUG] Creating container %s", c.container) - result := containers.Create(c.client, c.container, containerOpts) - if result.Err != nil { - return result.Err - } - - return nil -} - -func multiEnv(ks []string) string { - for _, k := range ks { - if v := os.Getenv(k); v != "" { - return v - } - } - return "" -} diff --git a/vendor/github.com/hashicorp/terraform/backend/testing.go b/vendor/github.com/hashicorp/terraform/backend/testing.go deleted file mode 100644 index a608b0c365c..00000000000 --- a/vendor/github.com/hashicorp/terraform/backend/testing.go +++ /dev/null @@ -1,321 +0,0 @@ -package backend - -import ( - "reflect" - "sort" - "testing" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" -) - -// TestBackendConfig validates and configures the backend with the -// given configuration. -func TestBackendConfig(t *testing.T, b Backend, c map[string]interface{}) Backend { - t.Helper() - - // Get the proper config structure - rc, err := config.NewRawConfig(c) - if err != nil { - t.Fatalf("bad: %s", err) - } - conf := terraform.NewResourceConfig(rc) - - // Validate - warns, errs := b.Validate(conf) - if len(warns) > 0 { - t.Fatalf("warnings: %s", warns) - } - if len(errs) > 0 { - t.Fatalf("errors: %s", errs) - } - - // Configure - if err := b.Configure(conf); err != nil { - t.Fatalf("err: %s", err) - } - - return b -} - -// TestBackend will test the functionality of a Backend. The backend is -// assumed to already be configured. This will test state functionality. -// If the backend reports it doesn't support multi-state by returning the -// error ErrNamedStatesNotSupported, then it will not test that. -// -// If you want to test locking, two backends must be given. If b2 is nil, -// then state locking won't be tested. -func TestBackend(t *testing.T, b1, b2 Backend) { - t.Helper() - - testBackendStates(t, b1) - - if b2 != nil { - testBackendStateLock(t, b1, b2) - } -} - -func testBackendStates(t *testing.T, b Backend) { - t.Helper() - - states, err := b.States() - if err == ErrNamedStatesNotSupported { - t.Logf("TestBackend: named states not supported in %T, skipping", b) - return - } - - // Test it starts with only the default - if len(states) != 1 || states[0] != DefaultStateName { - t.Fatalf("should only have default to start: %#v", states) - } - - // Create a couple states - foo, err := b.State("foo") - if err != nil { - t.Fatalf("error: %s", err) - } - if err := foo.RefreshState(); err != nil { - t.Fatalf("bad: %s", err) - } - if v := foo.State(); v.HasResources() { - t.Fatalf("should be empty: %s", v) - } - - bar, err := b.State("bar") - if err != nil { - t.Fatalf("error: %s", err) - } - if err := bar.RefreshState(); err != nil { - t.Fatalf("bad: %s", err) - } - if v := bar.State(); v.HasResources() { - t.Fatalf("should be empty: %s", v) - } - - // Verify they are distinct states that can be read back from storage - { - // start with a fresh state, and record the lineage being - // written to "bar" - barState := terraform.NewState() - - // creating the named state may have created a lineage, so use that if it exists. - if s := bar.State(); s != nil && s.Lineage != "" { - barState.Lineage = s.Lineage - } - barLineage := barState.Lineage - - // the foo lineage should be distinct from bar, and unchanged after - // modifying bar - fooState := terraform.NewState() - // creating the named state may have created a lineage, so use that if it exists. - if s := foo.State(); s != nil && s.Lineage != "" { - fooState.Lineage = s.Lineage - } - fooLineage := fooState.Lineage - - // write a known state to foo - if err := foo.WriteState(fooState); err != nil { - t.Fatal("error writing foo state:", err) - } - if err := foo.PersistState(); err != nil { - t.Fatal("error persisting foo state:", err) - } - - // write a distinct known state to bar - if err := bar.WriteState(barState); err != nil { - t.Fatalf("bad: %s", err) - } - if err := bar.PersistState(); err != nil { - t.Fatalf("bad: %s", err) - } - - // verify that foo is unchanged with the existing state manager - if err := foo.RefreshState(); err != nil { - t.Fatal("error refreshing foo:", err) - } - fooState = foo.State() - switch { - case fooState == nil: - t.Fatal("nil state read from foo") - case fooState.Lineage == barLineage: - t.Fatalf("bar lineage read from foo: %#v", fooState) - case fooState.Lineage != fooLineage: - t.Fatal("foo lineage alterred") - } - - // fetch foo again from the backend - foo, err = b.State("foo") - if err != nil { - t.Fatal("error re-fetching state:", err) - } - if err := foo.RefreshState(); err != nil { - t.Fatal("error refreshing foo:", err) - } - fooState = foo.State() - switch { - case fooState == nil: - t.Fatal("nil state read from foo") - case fooState.Lineage != fooLineage: - t.Fatal("incorrect state returned from backend") - } - - // fetch the bar again from the backend - bar, err = b.State("bar") - if err != nil { - t.Fatal("error re-fetching state:", err) - } - if err := bar.RefreshState(); err != nil { - t.Fatal("error refreshing bar:", err) - } - barState = bar.State() - switch { - case barState == nil: - t.Fatal("nil state read from bar") - case barState.Lineage != barLineage: - t.Fatal("incorrect state returned from backend") - } - } - - // Verify we can now list them - { - // we determined that named stated are supported earlier - states, err := b.States() - if err != nil { - t.Fatal(err) - } - - sort.Strings(states) - expected := []string{"bar", "default", "foo"} - if !reflect.DeepEqual(states, expected) { - t.Fatalf("bad: %#v", states) - } - } - - // Delete some states - if err := b.DeleteState("foo"); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the default state can't be deleted - if err := b.DeleteState(DefaultStateName); err == nil { - t.Fatal("expected error") - } - - // Create and delete the foo state again. - // Make sure that there are no leftover artifacts from a deleted state - // preventing re-creation. - foo, err = b.State("foo") - if err != nil { - t.Fatalf("error: %s", err) - } - if err := foo.RefreshState(); err != nil { - t.Fatalf("bad: %s", err) - } - if v := foo.State(); v.HasResources() { - t.Fatalf("should be empty: %s", v) - } - // and delete it again - if err := b.DeleteState("foo"); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify deletion - { - states, err := b.States() - if err == ErrNamedStatesNotSupported { - t.Logf("TestBackend: named states not supported in %T, skipping", b) - return - } - - sort.Strings(states) - expected := []string{"bar", "default"} - if !reflect.DeepEqual(states, expected) { - t.Fatalf("bad: %#v", states) - } - } -} - -func testBackendStateLock(t *testing.T, b1, b2 Backend) { - t.Helper() - - // Get the default state for each - b1StateMgr, err := b1.State(DefaultStateName) - if err != nil { - t.Fatalf("error: %s", err) - } - if err := b1StateMgr.RefreshState(); err != nil { - t.Fatalf("bad: %s", err) - } - - // Fast exit if this doesn't support locking at all - if _, ok := b1StateMgr.(state.Locker); !ok { - t.Logf("TestBackend: backend %T doesn't support state locking, not testing", b1) - return - } - - t.Logf("TestBackend: testing state locking for %T", b1) - - b2StateMgr, err := b2.State(DefaultStateName) - if err != nil { - t.Fatalf("error: %s", err) - } - if err := b2StateMgr.RefreshState(); err != nil { - t.Fatalf("bad: %s", err) - } - - // Reassign so its obvious whats happening - lockerA := b1StateMgr.(state.Locker) - lockerB := b2StateMgr.(state.Locker) - - infoA := state.NewLockInfo() - infoA.Operation = "test" - infoA.Who = "clientA" - - infoB := state.NewLockInfo() - infoB.Operation = "test" - infoB.Who = "clientB" - - lockIDA, err := lockerA.Lock(infoA) - if err != nil { - t.Fatal("unable to get initial lock:", err) - } - - // Make sure we can still get the state.State from another instance even - // when locked. This should only happen when a state is loaded via the - // backend, and as a remote state. - _, err = b2.State(DefaultStateName) - if err != nil { - t.Fatalf("failed to read locked state from another backend instance: %s", err) - } - - // If the lock ID is blank, assume locking is disabled - if lockIDA == "" { - t.Logf("TestBackend: %T: empty string returned for lock, assuming disabled", b1) - return - } - - _, err = lockerB.Lock(infoB) - if err == nil { - lockerA.Unlock(lockIDA) - t.Fatal("client B obtained lock while held by client A") - } - - if err := lockerA.Unlock(lockIDA); err != nil { - t.Fatal("error unlocking client A", err) - } - - lockIDB, err := lockerB.Lock(infoB) - if err != nil { - t.Fatal("unable to obtain lock from client B") - } - - if lockIDB == lockIDA { - t.Fatalf("duplicate lock IDs: %q", lockIDB) - } - - if err = lockerB.Unlock(lockIDB); err != nil { - t.Fatal("error unlocking client B:", err) - } - -} diff --git a/vendor/github.com/hashicorp/terraform/command/apply.go b/vendor/github.com/hashicorp/terraform/command/apply.go deleted file mode 100644 index c65b2df5178..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/apply.go +++ /dev/null @@ -1,398 +0,0 @@ -package command - -import ( - "bytes" - "context" - "fmt" - "os" - "sort" - "strings" - - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/go-getter" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/terraform" -) - -// ApplyCommand is a Command implementation that applies a Terraform -// configuration and actually builds or changes infrastructure. -type ApplyCommand struct { - Meta - - // If true, then this apply command will become the "destroy" - // command. It is just like apply but only processes a destroy. - Destroy bool -} - -func (c *ApplyCommand) Run(args []string) int { - var destroyForce, refresh, autoApprove bool - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - cmdName := "apply" - if c.Destroy { - cmdName = "destroy" - } - - cmdFlags := c.Meta.flagSet(cmdName) - if c.Destroy { - cmdFlags.BoolVar(&destroyForce, "force", false, "force") - } - cmdFlags.BoolVar(&refresh, "refresh", true, "refresh") - if !c.Destroy { - cmdFlags.BoolVar(&autoApprove, "auto-approve", false, "skip interactive approval of plan before applying") - } - cmdFlags.IntVar( - &c.Meta.parallelism, "parallelism", DefaultParallelism, "parallelism") - cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") - cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") - cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") - cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") - cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - // Get the args. The "maybeInit" flag tracks whether we may need to - // initialize the configuration from a remote path. This is true as long - // as we have an argument. - args = cmdFlags.Args() - maybeInit := len(args) == 1 - configPath, err := ModulePath(args) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // Check for user-supplied plugin path - if c.pluginPath, err = c.loadPluginPath(); err != nil { - c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) - return 1 - } - - if !c.Destroy && maybeInit { - // We need the pwd for the getter operation below - pwd, err := os.Getwd() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error getting pwd: %s", err)) - return 1 - } - - // Do a detect to determine if we need to do an init + apply. - if detected, err := getter.Detect(configPath, pwd, getter.Detectors); err != nil { - c.Ui.Error(fmt.Sprintf( - "Invalid path: %s", err)) - return 1 - } else if !strings.HasPrefix(detected, "file") { - // If this isn't a file URL then we're doing an init + - // apply. - var init InitCommand - init.Meta = c.Meta - if code := init.Run([]string{detected}); code != 0 { - return code - } - - // Change the config path to be the cwd - configPath = pwd - } - } - - // Check if the path is a plan - plan, err := c.Plan(configPath) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - if c.Destroy && plan != nil { - c.Ui.Error(fmt.Sprintf( - "Destroy can't be called with a plan file.")) - return 1 - } - if plan != nil { - // Reset the config path for backend loading - configPath = "" - } - - var diags tfdiags.Diagnostics - - // Load the module if we don't have one yet (not running from plan) - var mod *module.Tree - if plan == nil { - var modDiags tfdiags.Diagnostics - mod, modDiags = c.Module(configPath) - diags = diags.Append(modDiags) - if modDiags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - } - - var conf *config.Config - if mod != nil { - conf = mod.Config() - } - - // Load the backend - b, err := c.Backend(&BackendOpts{ - Config: conf, - Plan: plan, - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // Build the operation - opReq := c.Operation() - opReq.Destroy = c.Destroy - opReq.Module = mod - opReq.Plan = plan - opReq.PlanRefresh = refresh - opReq.Type = backend.OperationTypeApply - opReq.AutoApprove = autoApprove - opReq.DestroyForce = destroyForce - - // Perform the operation - ctx, ctxCancel := context.WithCancel(context.Background()) - defer ctxCancel() - - op, err := b.Operation(ctx, opReq) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error starting operation: %s", err)) - return 1 - } - - // Wait for the operation to complete or an interrupt to occur - select { - case <-c.ShutdownCh: - // Cancel our context so we can start gracefully exiting - ctxCancel() - - // Notify the user - c.Ui.Output(outputInterrupt) - - // Still get the result, since there is still one - select { - case <-c.ShutdownCh: - c.Ui.Error( - "Two interrupts received. Exiting immediately. Note that data\n" + - "loss may have occurred.") - return 1 - case <-op.Done(): - } - case <-op.Done(): - if err := op.Err; err != nil { - diags = diags.Append(err) - } - } - - c.showDiagnostics(diags) - if diags.HasErrors() { - return 1 - } - - if !c.Destroy { - // Get the right module that we used. If we ran a plan, then use - // that module. - if plan != nil { - mod = plan.Module - } - - if outputs := outputsAsString(op.State, terraform.RootModulePath, mod.Config().Outputs, true); outputs != "" { - c.Ui.Output(c.Colorize().Color(outputs)) - } - } - - return 0 -} - -func (c *ApplyCommand) Help() string { - if c.Destroy { - return c.helpDestroy() - } - - return c.helpApply() -} - -func (c *ApplyCommand) Synopsis() string { - if c.Destroy { - return "Destroy Terraform-managed infrastructure" - } - - return "Builds or changes infrastructure" -} - -func (c *ApplyCommand) helpApply() string { - helpText := ` -Usage: terraform apply [options] [DIR-OR-PLAN] - - Builds or changes infrastructure according to Terraform configuration - files in DIR. - - By default, apply scans the current directory for the configuration - and applies the changes appropriately. However, a path to another - configuration or an execution plan can be provided. Execution plans can be - used to only execute a pre-determined set of actions. - -Options: - - -backup=path Path to backup the existing state file before - modifying. Defaults to the "-state-out" path with - ".backup" extension. Set to "-" to disable backup. - - -lock=true Lock the state file when locking is supported. - - -lock-timeout=0s Duration to retry a state lock. - - -auto-approve Skip interactive approval of plan before applying. - - -input=true Ask for input for variables if not directly set. - - -no-color If specified, output won't contain any color. - - -parallelism=n Limit the number of parallel resource operations. - Defaults to 10. - - -refresh=true Update state prior to checking for differences. This - has no effect if a plan file is given to apply. - - -state=path Path to read and save state (unless state-out - is specified). Defaults to "terraform.tfstate". - - -state-out=path Path to write state to that is different than - "-state". This can be used to preserve the old - state. - - -target=resource Resource to target. Operation will be limited to this - resource and its dependencies. This flag can be used - multiple times. - - -var 'foo=bar' Set a variable in the Terraform configuration. This - flag can be set multiple times. - - -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" or any ".auto.tfvars" - files are present, they will be automatically loaded. - - -` - return strings.TrimSpace(helpText) -} - -func (c *ApplyCommand) helpDestroy() string { - helpText := ` -Usage: terraform destroy [options] [DIR] - - Destroy Terraform-managed infrastructure. - -Options: - - -backup=path Path to backup the existing state file before - modifying. Defaults to the "-state-out" path with - ".backup" extension. Set to "-" to disable backup. - - -force Don't ask for input for destroy confirmation. - - -lock=true Lock the state file when locking is supported. - - -lock-timeout=0s Duration to retry a state lock. - - -no-color If specified, output won't contain any color. - - -parallelism=n Limit the number of concurrent operations. - Defaults to 10. - - -refresh=true Update state prior to checking for differences. This - has no effect if a plan file is given to apply. - - -state=path Path to read and save state (unless state-out - is specified). Defaults to "terraform.tfstate". - - -state-out=path Path to write state to that is different than - "-state". This can be used to preserve the old - state. - - -target=resource Resource to target. Operation will be limited to this - resource and its dependencies. This flag can be used - multiple times. - - -var 'foo=bar' Set a variable in the Terraform configuration. This - flag can be set multiple times. - - -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" or any ".auto.tfvars" - files are present, they will be automatically loaded. - - -` - return strings.TrimSpace(helpText) -} - -func outputsAsString(state *terraform.State, modPath []string, schema []*config.Output, includeHeader bool) string { - if state == nil { - return "" - } - - ms := state.ModuleByPath(modPath) - if ms == nil { - return "" - } - - outputs := ms.Outputs - outputBuf := new(bytes.Buffer) - if len(outputs) > 0 { - schemaMap := make(map[string]*config.Output) - if schema != nil { - for _, s := range schema { - schemaMap[s.Name] = s - } - } - - if includeHeader { - outputBuf.WriteString("[reset][bold][green]\nOutputs:\n\n") - } - - // Output the outputs in alphabetical order - keyLen := 0 - ks := make([]string, 0, len(outputs)) - for key, _ := range outputs { - ks = append(ks, key) - if len(key) > keyLen { - keyLen = len(key) - } - } - sort.Strings(ks) - - for _, k := range ks { - schema, ok := schemaMap[k] - if ok && schema.Sensitive { - outputBuf.WriteString(fmt.Sprintf("%s = \n", k)) - continue - } - - v := outputs[k] - switch typedV := v.Value.(type) { - case string: - outputBuf.WriteString(fmt.Sprintf("%s = %s\n", k, typedV)) - case []interface{}: - outputBuf.WriteString(formatListOutput("", k, typedV)) - outputBuf.WriteString("\n") - case map[string]interface{}: - outputBuf.WriteString(formatMapOutput("", k, typedV)) - outputBuf.WriteString("\n") - } - } - } - - return strings.TrimSpace(outputBuf.String()) -} - -const outputInterrupt = `Interrupt received. -Please wait for Terraform to exit or data loss may occur. -Gracefully shutting down...` diff --git a/vendor/github.com/hashicorp/terraform/command/autocomplete.go b/vendor/github.com/hashicorp/terraform/command/autocomplete.go deleted file mode 100644 index 1ce90e4c9fb..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/autocomplete.go +++ /dev/null @@ -1,67 +0,0 @@ -package command - -import ( - "github.com/posener/complete" -) - -// This file contains some re-usable predictors for auto-complete. The -// command-specific autocomplete configurations live within each command's -// own source file, as AutocompleteArgs and AutocompleteFlags methods on each -// Command implementation. - -// For completing the value of boolean flags like -foo false -var completePredictBoolean = complete.PredictSet("true", "false") - -// We don't currently have a real predictor for module sources, but -// we'll probably add one later. -var completePredictModuleSource = complete.PredictAnything - -type completePredictSequence []complete.Predictor - -func (s completePredictSequence) Predict(a complete.Args) []string { - // Only one level of command is stripped off the prefix of a.Completed - // here, so nested subcommands like "workspace new" will need to provide - // dummy entries (e.g. complete.PredictNothing) as placeholders for - // all but the first subcommand. For example, "workspace new" needs - // one placeholder for the argument "new". - idx := len(a.Completed) - if idx >= len(s) { - return nil - } - - return s[idx].Predict(a) -} - -func (m *Meta) completePredictWorkspaceName() complete.Predictor { - return complete.PredictFunc(func(a complete.Args) []string { - // There are lot of things that can fail in here, so if we encounter - // any error then we'll just return nothing and not support autocomplete - // until whatever error is fixed. (The user can't actually see the error - // here, but other commands should produce a user-visible error before - // too long.) - - // We assume here that we want to autocomplete for the current working - // directory, since we don't have enough context to know where to - // find any config path argument, and it might be _after_ the argument - // we're trying to complete here anyway. - configPath, err := ModulePath(nil) - if err != nil { - return nil - } - - cfg, err := m.Config(configPath) - if err != nil { - return nil - } - - b, err := m.Backend(&BackendOpts{ - Config: cfg, - }) - if err != nil { - return nil - } - - names, _ := b.States() - return names - }) -} diff --git a/vendor/github.com/hashicorp/terraform/command/cli_ui.go b/vendor/github.com/hashicorp/terraform/command/cli_ui.go deleted file mode 100644 index 7679b1ed8ec..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/cli_ui.go +++ /dev/null @@ -1,51 +0,0 @@ -package command - -import ( - "fmt" - - "github.com/mitchellh/cli" - "github.com/mitchellh/colorstring" -) - -// ColoredUi is a Ui implementation that colors its output according -// to the given color schemes for the given type of output. -type ColorizeUi struct { - Colorize *colorstring.Colorize - OutputColor string - InfoColor string - ErrorColor string - WarnColor string - Ui cli.Ui -} - -func (u *ColorizeUi) Ask(query string) (string, error) { - return u.Ui.Ask(u.colorize(query, u.OutputColor)) -} - -func (u *ColorizeUi) AskSecret(query string) (string, error) { - return u.Ui.AskSecret(u.colorize(query, u.OutputColor)) -} - -func (u *ColorizeUi) Output(message string) { - u.Ui.Output(u.colorize(message, u.OutputColor)) -} - -func (u *ColorizeUi) Info(message string) { - u.Ui.Info(u.colorize(message, u.InfoColor)) -} - -func (u *ColorizeUi) Error(message string) { - u.Ui.Error(u.colorize(message, u.ErrorColor)) -} - -func (u *ColorizeUi) Warn(message string) { - u.Ui.Warn(u.colorize(message, u.WarnColor)) -} - -func (u *ColorizeUi) colorize(message string, color string) string { - if color == "" { - return message - } - - return u.Colorize.Color(fmt.Sprintf("%s%s[reset]", color, message)) -} diff --git a/vendor/github.com/hashicorp/terraform/command/command.go b/vendor/github.com/hashicorp/terraform/command/command.go deleted file mode 100644 index 0cd11da0878..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/command.go +++ /dev/null @@ -1,98 +0,0 @@ -package command - -import ( - "fmt" - "log" - "os" - "runtime" - - "github.com/hashicorp/terraform/terraform" -) - -// Set to true when we're testing -var test bool = false - -// DefaultDataDir is the default directory for storing local data. -const DefaultDataDir = ".terraform" - -// PluginPathFile is the name of the file in the data dir which stores the list -// of directories supplied by the user with the `-plugin-dir` flag during init. -const PluginPathFile = "plugin_path" - -// pluginMachineName is the directory name used in new plugin paths. -const pluginMachineName = runtime.GOOS + "_" + runtime.GOARCH - -// DefaultPluginVendorDir is the location in the config directory to look for -// user-added plugin binaries. Terraform only reads from this path if it -// exists, it is never created by terraform. -const DefaultPluginVendorDir = "terraform.d/plugins/" + pluginMachineName - -// DefaultStateFilename is the default filename used for the state file. -const DefaultStateFilename = "terraform.tfstate" - -// DefaultVarsFilename is the default filename used for vars -const DefaultVarsFilename = "terraform.tfvars" - -// DefaultBackupExtension is added to the state file to form the path -const DefaultBackupExtension = ".backup" - -// DefaultParallelism is the limit Terraform places on total parallel -// operations as it walks the dependency graph. -const DefaultParallelism = 10 - -// ErrUnsupportedLocalOp is the common error message shown for operations -// that require a backend.Local. -const ErrUnsupportedLocalOp = `The configured backend doesn't support this operation. - -The "backend" in Terraform defines how Terraform operates. The default -backend performs all operations locally on your machine. Your configuration -is configured to use a non-local backend. This backend doesn't support this -operation. - -If you want to use the state from the backend but force all other data -(configuration, variables, etc.) to come locally, you can force local -behavior with the "-local" flag. -` - -// ModulePath returns the path to the root module from the CLI args. -// -// This centralizes the logic for any commands that expect a module path -// on their CLI args. This will verify that only one argument is given -// and that it is a path to configuration. -// -// If your command accepts more than one arg, then change the slice bounds -// to pass validation. -func ModulePath(args []string) (string, error) { - // TODO: test - - if len(args) > 1 { - return "", fmt.Errorf("Too many command line arguments. Configuration path expected.") - } - - if len(args) == 0 { - path, err := os.Getwd() - if err != nil { - return "", fmt.Errorf("Error getting pwd: %s", err) - } - - return path, nil - } - - return args[0], nil -} - -func (m *Meta) validateContext(ctx *terraform.Context) bool { - log.Println("[INFO] Validating the context...") - diags := ctx.Validate() - log.Printf("[INFO] Validation result: %d diagnostics", len(diags)) - - if len(diags) > 0 { - m.Ui.Output( - "There are warnings and/or errors related to your configuration. Please\n" + - "fix these before continuing.\n") - - m.showDiagnostics(diags) - } - - return !diags.HasErrors() -} diff --git a/vendor/github.com/hashicorp/terraform/command/console.go b/vendor/github.com/hashicorp/terraform/command/console.go deleted file mode 100644 index cf7e15f61e1..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/console.go +++ /dev/null @@ -1,159 +0,0 @@ -package command - -import ( - "bufio" - "fmt" - "strings" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/helper/wrappedstreams" - "github.com/hashicorp/terraform/repl" - "github.com/hashicorp/terraform/tfdiags" - - "github.com/mitchellh/cli" -) - -// ConsoleCommand is a Command implementation that applies a Terraform -// configuration and actually builds or changes infrastructure. -type ConsoleCommand struct { - Meta -} - -func (c *ConsoleCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - cmdFlags := c.Meta.flagSet("console") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - configPath, err := ModulePath(cmdFlags.Args()) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - var diags tfdiags.Diagnostics - - // Load the module - mod, diags := c.Module(configPath) - if diags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - - var conf *config.Config - if mod != nil { - conf = mod.Config() - } - - // Load the backend - b, err := c.Backend(&BackendOpts{ - Config: conf, - }) - - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // We require a local backend - local, ok := b.(backend.Local) - if !ok { - c.Ui.Error(ErrUnsupportedLocalOp) - return 1 - } - - // Build the operation - opReq := c.Operation() - opReq.Module = mod - - // Get the context - ctx, _, err := local.Context(opReq) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // Setup the UI so we can output directly to stdout - ui := &cli.BasicUi{ - Writer: wrappedstreams.Stdout(), - ErrorWriter: wrappedstreams.Stderr(), - } - - // IO Loop - session := &repl.Session{ - Interpolater: ctx.Interpolater(), - } - - // Determine if stdin is a pipe. If so, we evaluate directly. - if c.StdinPiped() { - return c.modePiped(session, ui) - } - - return c.modeInteractive(session, ui) -} - -func (c *ConsoleCommand) modePiped(session *repl.Session, ui cli.Ui) int { - var lastResult string - scanner := bufio.NewScanner(wrappedstreams.Stdin()) - for scanner.Scan() { - // Handle it. If there is an error exit immediately - result, err := session.Handle(strings.TrimSpace(scanner.Text())) - if err != nil { - ui.Error(err.Error()) - return 1 - } - - // Store the last result - lastResult = result - } - - // Output the final result - ui.Output(lastResult) - - return 0 -} - -func (c *ConsoleCommand) Help() string { - helpText := ` -Usage: terraform console [options] [DIR] - - Starts an interactive console for experimenting with Terraform - interpolations. - - This will open an interactive console that you can use to type - interpolations into and inspect their values. This command loads the - current state. This lets you explore and test interpolations before - using them in future configurations. - - This command will never modify your state. - - DIR can be set to a directory with a Terraform state to load. By - default, this will default to the current working directory. - -Options: - - -state=path Path to read state. Defaults to "terraform.tfstate" - - -var 'foo=bar' Set a variable in the Terraform configuration. This - flag can be set multiple times. - - -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" or any ".auto.tfvars" - files are present, they will be automatically loaded. - - -` - return strings.TrimSpace(helpText) -} - -func (c *ConsoleCommand) Synopsis() string { - return "Interactive console for Terraform interpolations" -} diff --git a/vendor/github.com/hashicorp/terraform/command/console_interactive.go b/vendor/github.com/hashicorp/terraform/command/console_interactive.go deleted file mode 100644 index f963528bcd9..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/console_interactive.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build !solaris - -// The readline library we use doesn't currently support solaris so -// we just build tag it off. - -package command - -import ( - "fmt" - "io" - - "github.com/hashicorp/terraform/helper/wrappedreadline" - "github.com/hashicorp/terraform/repl" - - "github.com/chzyer/readline" - "github.com/mitchellh/cli" -) - -func (c *ConsoleCommand) modeInteractive(session *repl.Session, ui cli.Ui) int { - // Configure input - l, err := readline.NewEx(wrappedreadline.Override(&readline.Config{ - Prompt: "> ", - InterruptPrompt: "^C", - EOFPrompt: "exit", - HistorySearchFold: true, - })) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error initializing console: %s", - err)) - return 1 - } - defer l.Close() - - for { - // Read a line - line, err := l.Readline() - if err == readline.ErrInterrupt { - if len(line) == 0 { - break - } else { - continue - } - } else if err == io.EOF { - break - } - - out, err := session.Handle(line) - if err == repl.ErrSessionExit { - break - } - if err != nil { - ui.Error(err.Error()) - continue - } - - ui.Output(out) - } - - return 0 -} diff --git a/vendor/github.com/hashicorp/terraform/command/console_interactive_solaris.go b/vendor/github.com/hashicorp/terraform/command/console_interactive_solaris.go deleted file mode 100644 index ecb025d4f82..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/console_interactive_solaris.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build solaris - -package command - -import ( - "fmt" - - "github.com/hashicorp/terraform/repl" - "github.com/mitchellh/cli" -) - -func (c *ConsoleCommand) modeInteractive(session *repl.Session, ui cli.Ui) int { - ui.Error(fmt.Sprintf( - "The readline library Terraform currently uses for the interactive\n" + - "console is not supported by Solaris. Interactive mode is therefore\n" + - "not supported on Solaris currently.")) - return 1 -} diff --git a/vendor/github.com/hashicorp/terraform/command/debug_command.go b/vendor/github.com/hashicorp/terraform/command/debug_command.go deleted file mode 100644 index 7058553b9f5..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/debug_command.go +++ /dev/null @@ -1,30 +0,0 @@ -package command - -import ( - "strings" - - "github.com/mitchellh/cli" -) - -// DebugCommand is a Command implementation that just shows help for -// the subcommands nested below it. -type DebugCommand struct { - Meta -} - -func (c *DebugCommand) Run(args []string) int { - return cli.RunResultHelp -} - -func (c *DebugCommand) Help() string { - helpText := ` -Usage: terraform debug [options] [args] - - This command has subcommands for debug output management -` - return strings.TrimSpace(helpText) -} - -func (c *DebugCommand) Synopsis() string { - return "Debug output management (experimental)" -} diff --git a/vendor/github.com/hashicorp/terraform/command/debug_json2dot.go b/vendor/github.com/hashicorp/terraform/command/debug_json2dot.go deleted file mode 100644 index 64361bcf1ba..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/debug_json2dot.go +++ /dev/null @@ -1,66 +0,0 @@ -package command - -import ( - "fmt" - "os" - "strings" - - "github.com/hashicorp/terraform/dag" - "github.com/mitchellh/cli" -) - -// DebugJSON2DotCommand is a Command implementation that translates a json -// graph debug log to Dot format. -type DebugJSON2DotCommand struct { - Meta -} - -func (c *DebugJSON2DotCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - cmdFlags := c.Meta.flagSet("debug json2dot") - - if err := cmdFlags.Parse(args); err != nil { - return cli.RunResultHelp - } - - fileName := cmdFlags.Arg(0) - if fileName == "" { - return cli.RunResultHelp - } - - f, err := os.Open(fileName) - if err != nil { - c.Ui.Error(fmt.Sprintf(errInvalidLog, err)) - return cli.RunResultHelp - } - - dot, err := dag.JSON2Dot(f) - if err != nil { - c.Ui.Error(fmt.Sprintf(errInvalidLog, err)) - return cli.RunResultHelp - } - - c.Ui.Output(string(dot)) - return 0 -} - -func (c *DebugJSON2DotCommand) Help() string { - helpText := ` -Usage: terraform debug json2dot input.json - - Translate a graph debug file to dot format. - - This command takes a single json graph log file and converts it to a single - dot graph written to stdout. -` - return strings.TrimSpace(helpText) -} - -func (c *DebugJSON2DotCommand) Synopsis() string { - return "Convert json graph log to dot" -} - -const errInvalidLog = `Error parsing log file: %[1]s` diff --git a/vendor/github.com/hashicorp/terraform/command/flag_kv.go b/vendor/github.com/hashicorp/terraform/command/flag_kv.go deleted file mode 100644 index b084c5135d9..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/flag_kv.go +++ /dev/null @@ -1,43 +0,0 @@ -package command - -import ( - "fmt" - "strings" -) - -// FlagStringKV is a flag.Value implementation for parsing user variables -// from the command-line in the format of '-var key=value', where value is -// only ever a primitive. -type FlagStringKV map[string]string - -func (v *FlagStringKV) String() string { - return "" -} - -func (v *FlagStringKV) Set(raw string) error { - idx := strings.Index(raw, "=") - if idx == -1 { - return fmt.Errorf("No '=' value in arg: %s", raw) - } - - if *v == nil { - *v = make(map[string]string) - } - - key, value := raw[0:idx], raw[idx+1:] - (*v)[key] = value - return nil -} - -// FlagStringSlice is a flag.Value implementation for parsing targets from the -// command line, e.g. -target=aws_instance.foo -target=aws_vpc.bar -type FlagStringSlice []string - -func (v *FlagStringSlice) String() string { - return "" -} -func (v *FlagStringSlice) Set(raw string) error { - *v = append(*v, raw) - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/command/fmt.go b/vendor/github.com/hashicorp/terraform/command/fmt.go deleted file mode 100644 index 9fefd25178d..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/fmt.go +++ /dev/null @@ -1,126 +0,0 @@ -package command - -import ( - "bytes" - "flag" - "fmt" - "io" - "os" - "strings" - - "github.com/hashicorp/hcl/hcl/fmtcmd" - "github.com/mitchellh/cli" -) - -const ( - stdinArg = "-" - fileExtension = "tf" -) - -// FmtCommand is a Command implementation that rewrites Terraform config -// files to a canonical format and style. -type FmtCommand struct { - Meta - opts fmtcmd.Options - check bool - input io.Reader // STDIN if nil -} - -func (c *FmtCommand) Run(args []string) int { - if c.input == nil { - c.input = os.Stdin - } - - args, err := c.Meta.process(args, false) - if err != nil { - return 1 - } - - cmdFlags := flag.NewFlagSet("fmt", flag.ContinueOnError) - cmdFlags.BoolVar(&c.opts.List, "list", true, "list") - cmdFlags.BoolVar(&c.opts.Write, "write", true, "write") - cmdFlags.BoolVar(&c.opts.Diff, "diff", false, "diff") - cmdFlags.BoolVar(&c.check, "check", false, "check") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - args = cmdFlags.Args() - if len(args) > 1 { - c.Ui.Error("The fmt command expects at most one argument.") - cmdFlags.Usage() - return 1 - } - - var dirs []string - if len(args) == 0 { - dirs = []string{"."} - } else if args[0] == stdinArg { - c.opts.List = false - c.opts.Write = false - } else { - dirs = []string{args[0]} - } - - var output io.Writer - list := c.opts.List // preserve the original value of -list - if c.check { - // set to true so we can use the list output to check - // if the input needs formatting - c.opts.List = true - c.opts.Write = false - output = &bytes.Buffer{} - } else { - output = &cli.UiWriter{Ui: c.Ui} - } - - err = fmtcmd.Run(dirs, []string{fileExtension}, c.input, output, c.opts) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error running fmt: %s", err)) - return 2 - } - - if c.check { - buf := output.(*bytes.Buffer) - ok := buf.Len() == 0 - if list { - io.Copy(&cli.UiWriter{Ui: c.Ui}, buf) - } - if ok { - return 0 - } else { - return 3 - } - } - - return 0 -} - -func (c *FmtCommand) Help() string { - helpText := ` -Usage: terraform fmt [options] [DIR] - - Rewrites all Terraform configuration files to a canonical format. - - If DIR is not specified then the current working directory will be used. - If DIR is "-" then content will be read from STDIN. - -Options: - - -list=true List files whose formatting differs (always false if using STDIN) - - -write=true Write result to source file instead of STDOUT (always false if using STDIN or -check) - - -diff=false Display diffs of formatting changes - - -check=false Check if the input is formatted. Exit status will be 0 if all input is properly formatted and non-zero otherwise. - -` - return strings.TrimSpace(helpText) -} - -func (c *FmtCommand) Synopsis() string { - return "Rewrites config files to canonical format" -} diff --git a/vendor/github.com/hashicorp/terraform/command/get.go b/vendor/github.com/hashicorp/terraform/command/get.go deleted file mode 100644 index ba8729d79a8..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/get.go +++ /dev/null @@ -1,90 +0,0 @@ -package command - -import ( - "flag" - "fmt" - "strings" - - "github.com/hashicorp/terraform/config/module" -) - -// GetCommand is a Command implementation that takes a Terraform -// configuration and downloads all the modules. -type GetCommand struct { - Meta -} - -func (c *GetCommand) Run(args []string) int { - var update bool - - args, err := c.Meta.process(args, false) - if err != nil { - return 1 - } - - cmdFlags := flag.NewFlagSet("get", flag.ContinueOnError) - cmdFlags.BoolVar(&update, "update", false, "update") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - path, err := ModulePath(cmdFlags.Args()) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - mode := module.GetModeGet - if update { - mode = module.GetModeUpdate - } - - if err := getModules(&c.Meta, path, mode); err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - return 0 -} - -func (c *GetCommand) Help() string { - helpText := ` -Usage: terraform get [options] PATH - - Downloads and installs modules needed for the configuration given by - PATH. - - This recursively downloads all modules needed, such as modules - imported by modules imported by the root and so on. If a module is - already downloaded, it will not be redownloaded or checked for updates - unless the -update flag is specified. - -Options: - - -update=false If true, modules already downloaded will be checked - for updates and updated if necessary. - - -no-color If specified, output won't contain any color. - -` - return strings.TrimSpace(helpText) -} - -func (c *GetCommand) Synopsis() string { - return "Download and install modules for the configuration" -} - -func getModules(m *Meta, path string, mode module.GetMode) error { - mod, err := module.NewTreeModule("", path) - if err != nil { - return fmt.Errorf("Error loading configuration: %s", err) - } - - err = mod.Load(m.moduleStorage(m.DataDir(), mode)) - if err != nil { - return fmt.Errorf("Error loading modules: %s", err) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/command/graph.go b/vendor/github.com/hashicorp/terraform/command/graph.go deleted file mode 100644 index 7723043e8f4..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/graph.go +++ /dev/null @@ -1,199 +0,0 @@ -package command - -import ( - "flag" - "fmt" - "strings" - - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/terraform" -) - -// GraphCommand is a Command implementation that takes a Terraform -// configuration and outputs the dependency tree in graphical form. -type GraphCommand struct { - Meta -} - -func (c *GraphCommand) Run(args []string) int { - var moduleDepth int - var verbose bool - var drawCycles bool - var graphTypeStr string - - args, err := c.Meta.process(args, false) - if err != nil { - return 1 - } - - cmdFlags := flag.NewFlagSet("graph", flag.ContinueOnError) - c.addModuleDepthFlag(cmdFlags, &moduleDepth) - cmdFlags.BoolVar(&verbose, "verbose", false, "verbose") - cmdFlags.BoolVar(&drawCycles, "draw-cycles", false, "draw-cycles") - cmdFlags.StringVar(&graphTypeStr, "type", "", "type") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - configPath, err := ModulePath(cmdFlags.Args()) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // Check if the path is a plan - plan, err := c.Plan(configPath) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - if plan != nil { - // Reset for backend loading - configPath = "" - } - - var diags tfdiags.Diagnostics - - // Load the module - var mod *module.Tree - if plan == nil { - var modDiags tfdiags.Diagnostics - mod, modDiags = c.Module(configPath) - diags = diags.Append(modDiags) - if modDiags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - } - - var conf *config.Config - if mod != nil { - conf = mod.Config() - } - - // Load the backend - b, err := c.Backend(&BackendOpts{ - Config: conf, - Plan: plan, - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // We require a local backend - local, ok := b.(backend.Local) - if !ok { - c.Ui.Error(ErrUnsupportedLocalOp) - return 1 - } - - // Building a graph may require config module to be present, even if it's - // empty. - if mod == nil && plan == nil { - mod = module.NewEmptyTree() - } - - // Build the operation - opReq := c.Operation() - opReq.Module = mod - opReq.Plan = plan - - // Get the context - ctx, _, err := local.Context(opReq) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // Determine the graph type - graphType := terraform.GraphTypePlan - if plan != nil { - graphType = terraform.GraphTypeApply - } - - if graphTypeStr != "" { - v, ok := terraform.GraphTypeMap[graphTypeStr] - if !ok { - c.Ui.Error(fmt.Sprintf("Invalid graph type requested: %s", graphTypeStr)) - return 1 - } - - graphType = v - } - - // Skip validation during graph generation - we want to see the graph even if - // it is invalid for some reason. - g, err := ctx.Graph(graphType, &terraform.ContextGraphOpts{ - Verbose: verbose, - Validate: false, - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error creating graph: %s", err)) - return 1 - } - - graphStr, err := terraform.GraphDot(g, &dag.DotOpts{ - DrawCycles: drawCycles, - MaxDepth: moduleDepth, - Verbose: verbose, - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error converting graph: %s", err)) - return 1 - } - - if diags.HasErrors() { - // For this command we only show diagnostics if there are errors, - // because printing out naked warnings could upset a naive program - // consuming our dot output. - c.showDiagnostics(diags) - return 1 - } - - c.Ui.Output(graphStr) - - return 0 -} - -func (c *GraphCommand) Help() string { - helpText := ` -Usage: terraform graph [options] [DIR] - - Outputs the visual execution graph of Terraform resources according to - configuration files in DIR (or the current directory if omitted). - - The graph is outputted in DOT format. The typical program that can - read this format is GraphViz, but many web services are also available - to read this format. - - The -type flag can be used to control the type of graph shown. Terraform - creates different graphs for different operations. See the options below - for the list of types supported. The default type is "plan" if a - configuration is given, and "apply" if a plan file is passed as an - argument. - -Options: - - -draw-cycles Highlight any cycles in the graph with colored edges. - This helps when diagnosing cycle errors. - - -no-color If specified, output won't contain any color. - - -type=plan Type of graph to output. Can be: plan, plan-destroy, apply, - validate, input, refresh. - - -` - return strings.TrimSpace(helpText) -} - -func (c *GraphCommand) Synopsis() string { - return "Create a visual graph of Terraform resources" -} diff --git a/vendor/github.com/hashicorp/terraform/command/hcl_printer.go b/vendor/github.com/hashicorp/terraform/command/hcl_printer.go deleted file mode 100644 index 677e5c0aaaa..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/hcl_printer.go +++ /dev/null @@ -1,196 +0,0 @@ -package command - -// Marshal an object as an hcl value. -import ( - "bytes" - "fmt" - "regexp" - - "github.com/hashicorp/hcl/hcl/printer" -) - -// This will only work operate on []interface{}, map[string]interface{}, and -// primitive types. -func encodeHCL(i interface{}) ([]byte, error) { - state := &encodeState{} - err := state.encode(i) - if err != nil { - return nil, err - } - - hcl := state.Bytes() - if len(hcl) == 0 { - return hcl, nil - } - - // the HCL parser requires an assignment. Strip it off again later - fakeAssignment := append([]byte("X = "), hcl...) - - // use the real hcl parser to verify our output, and format it canonically - hcl, err = printer.Format(fakeAssignment) - if err != nil { - return nil, err - } - - // now strip that first assignment off - eq := regexp.MustCompile(`=\s+`).FindIndex(hcl) - - // strip of an extra \n if it's there - end := len(hcl) - if hcl[end-1] == '\n' { - end -= 1 - } - - return hcl[eq[1]:end], nil -} - -type encodeState struct { - bytes.Buffer -} - -func (e *encodeState) encode(i interface{}) error { - switch v := i.(type) { - case []interface{}: - return e.encodeList(v) - - case map[string]interface{}: - return e.encodeMap(v) - - case int, int8, int32, int64, uint8, uint32, uint64: - return e.encodeInt(i) - - case float32, float64: - return e.encodeFloat(i) - - case string: - return e.encodeString(v) - - case nil: - return nil - - default: - return fmt.Errorf("invalid type %T", i) - } - -} - -func (e *encodeState) encodeList(l []interface{}) error { - e.WriteString("[") - for i, v := range l { - err := e.encode(v) - if err != nil { - return err - } - if i < len(l)-1 { - e.WriteString(", ") - } - } - e.WriteString("]") - return nil -} - -func (e *encodeState) encodeMap(m map[string]interface{}) error { - e.WriteString("{\n") - for i, k := range sortedKeys(m) { - v := m[k] - - e.WriteString(fmt.Sprintf("%q = ", k)) - err := e.encode(v) - if err != nil { - return err - } - if i < len(m)-1 { - e.WriteString("\n") - } - } - e.WriteString("}") - return nil -} - -func (e *encodeState) encodeInt(i interface{}) error { - _, err := fmt.Fprintf(e, "%d", i) - return err -} - -func (e *encodeState) encodeFloat(f interface{}) error { - _, err := fmt.Fprintf(e, "%g", f) - return err -} - -func (e *encodeState) encodeString(s string) error { - e.Write(quoteHCLString(s)) - return nil -} - -// Quote an HCL string, which may contain interpolations. -// Since the string was already parsed from HCL, we have to assume the -// required characters are sanely escaped. All we need to do is escape double -// quotes in the string, unless they are in an interpolation block. -func quoteHCLString(s string) []byte { - out := make([]byte, 0, len(s)) - out = append(out, '"') - - // our parse states - var ( - outer = 1 // the starting state for the string - dollar = 2 // look for '{' in the next character - interp = 3 // inside an interpolation block - escape = 4 // take the next character and pop back to prev state - ) - - // we could have nested interpolations - state := stack{} - state.push(outer) - - for i := 0; i < len(s); i++ { - switch state.peek() { - case outer: - switch s[i] { - case '"': - out = append(out, '\\') - case '$': - state.push(dollar) - case '\\': - state.push(escape) - } - case dollar: - state.pop() - switch s[i] { - case '{': - state.push(interp) - case '\\': - state.push(escape) - } - case interp: - switch s[i] { - case '}': - state.pop() - } - case escape: - state.pop() - } - - out = append(out, s[i]) - } - - out = append(out, '"') - - return out -} - -type stack []int - -func (s *stack) push(i int) { - *s = append(*s, i) -} - -func (s *stack) pop() int { - last := len(*s) - 1 - i := (*s)[last] - *s = (*s)[:last] - return i -} - -func (s *stack) peek() int { - return (*s)[len(*s)-1] -} diff --git a/vendor/github.com/hashicorp/terraform/command/hook_ui.go b/vendor/github.com/hashicorp/terraform/command/hook_ui.go deleted file mode 100644 index 76b1ca59cff..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/hook_ui.go +++ /dev/null @@ -1,414 +0,0 @@ -package command - -import ( - "bufio" - "bytes" - "fmt" - "sort" - "strings" - "sync" - "time" - "unicode" - - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/cli" - "github.com/mitchellh/colorstring" -) - -const defaultPeriodicUiTimer = 10 * time.Second -const maxIdLen = 80 - -type UiHook struct { - terraform.NilHook - - Colorize *colorstring.Colorize - Ui cli.Ui - PeriodicUiTimer time.Duration - - l sync.Mutex - once sync.Once - resources map[string]uiResourceState - ui cli.Ui -} - -// uiResourceState tracks the state of a single resource -type uiResourceState struct { - Name string - ResourceId string - Op uiResourceOp - Start time.Time - - DoneCh chan struct{} // To be used for cancellation - - done chan struct{} // used to coordinate tests -} - -// uiResourceOp is an enum for operations on a resource -type uiResourceOp byte - -const ( - uiResourceUnknown uiResourceOp = iota - uiResourceCreate - uiResourceModify - uiResourceDestroy -) - -func (h *UiHook) PreApply( - n *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (terraform.HookAction, error) { - h.once.Do(h.init) - - // if there's no diff, there's nothing to output - if d.Empty() { - return terraform.HookActionContinue, nil - } - - id := n.HumanId() - addr := n.ResourceAddress() - - op := uiResourceModify - if d.Destroy { - op = uiResourceDestroy - } else if s.ID == "" { - op = uiResourceCreate - } - - var operation string - switch op { - case uiResourceModify: - operation = "Modifying..." - case uiResourceDestroy: - operation = "Destroying..." - case uiResourceCreate: - operation = "Creating..." - case uiResourceUnknown: - return terraform.HookActionContinue, nil - } - - attrBuf := new(bytes.Buffer) - - // Get all the attributes that are changing, and sort them. Also - // determine the longest key so that we can align them all. - keyLen := 0 - - dAttrs := d.CopyAttributes() - keys := make([]string, 0, len(dAttrs)) - for key, _ := range dAttrs { - // Skip the ID since we do that specially - if key == "id" { - continue - } - - keys = append(keys, key) - if len(key) > keyLen { - keyLen = len(key) - } - } - sort.Strings(keys) - - // Go through and output each attribute - for _, attrK := range keys { - attrDiff, _ := d.GetAttribute(attrK) - - v := attrDiff.New - u := attrDiff.Old - if attrDiff.NewComputed { - v = "" - } - - if attrDiff.Sensitive { - u = "" - v = "" - } - - attrBuf.WriteString(fmt.Sprintf( - " %s:%s %#v => %#v\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, - v)) - } - - attrString := strings.TrimSpace(attrBuf.String()) - if attrString != "" { - attrString = "\n " + attrString - } - - var stateId, stateIdSuffix string - if s != nil && s.ID != "" { - stateId = s.ID - stateIdSuffix = fmt.Sprintf(" (ID: %s)", truncateId(s.ID, maxIdLen)) - } - - h.ui.Output(h.Colorize.Color(fmt.Sprintf( - "[reset][bold]%s: %s%s[reset]%s", - addr, - operation, - stateIdSuffix, - attrString))) - - uiState := uiResourceState{ - Name: id, - ResourceId: stateId, - Op: op, - Start: time.Now().Round(time.Second), - DoneCh: make(chan struct{}), - done: make(chan struct{}), - } - - h.l.Lock() - h.resources[id] = uiState - h.l.Unlock() - - // Start goroutine that shows progress - go h.stillApplying(uiState) - - return terraform.HookActionContinue, nil -} - -func (h *UiHook) stillApplying(state uiResourceState) { - defer close(state.done) - for { - select { - case <-state.DoneCh: - return - - case <-time.After(h.PeriodicUiTimer): - // Timer up, show status - } - - var msg string - switch state.Op { - case uiResourceModify: - msg = "Still modifying..." - case uiResourceDestroy: - msg = "Still destroying..." - case uiResourceCreate: - msg = "Still creating..." - case uiResourceUnknown: - return - } - - idSuffix := "" - if v := state.ResourceId; v != "" { - idSuffix = fmt.Sprintf("ID: %s, ", truncateId(v, maxIdLen)) - } - - h.ui.Output(h.Colorize.Color(fmt.Sprintf( - "[reset][bold]%s: %s (%s%s elapsed)[reset]", - state.Name, - msg, - idSuffix, - time.Now().Round(time.Second).Sub(state.Start), - ))) - } -} - -func (h *UiHook) PostApply( - n *terraform.InstanceInfo, - s *terraform.InstanceState, - applyerr error) (terraform.HookAction, error) { - - id := n.HumanId() - addr := n.ResourceAddress() - - h.l.Lock() - state := h.resources[id] - if state.DoneCh != nil { - close(state.DoneCh) - } - - delete(h.resources, id) - h.l.Unlock() - - var stateIdSuffix string - if s != nil && s.ID != "" { - stateIdSuffix = fmt.Sprintf(" (ID: %s)", truncateId(s.ID, maxIdLen)) - } - - var msg string - switch state.Op { - case uiResourceModify: - msg = "Modifications complete" - case uiResourceDestroy: - msg = "Destruction complete" - case uiResourceCreate: - msg = "Creation complete" - case uiResourceUnknown: - return terraform.HookActionContinue, nil - } - - if applyerr != nil { - // Errors are collected and printed in ApplyCommand, no need to duplicate - return terraform.HookActionContinue, nil - } - - colorized := h.Colorize.Color(fmt.Sprintf( - "[reset][bold]%s: %s after %s%s[reset]", - addr, msg, time.Now().Round(time.Second).Sub(state.Start), stateIdSuffix)) - - h.ui.Output(colorized) - - return terraform.HookActionContinue, nil -} - -func (h *UiHook) PreDiff( - n *terraform.InstanceInfo, - s *terraform.InstanceState) (terraform.HookAction, error) { - return terraform.HookActionContinue, nil -} - -func (h *UiHook) PreProvision( - n *terraform.InstanceInfo, - provId string) (terraform.HookAction, error) { - addr := n.ResourceAddress() - h.ui.Output(h.Colorize.Color(fmt.Sprintf( - "[reset][bold]%s: Provisioning with '%s'...[reset]", - addr, provId))) - return terraform.HookActionContinue, nil -} - -func (h *UiHook) ProvisionOutput( - n *terraform.InstanceInfo, - provId string, - msg string) { - addr := n.ResourceAddress() - var buf bytes.Buffer - buf.WriteString(h.Colorize.Color("[reset]")) - - prefix := fmt.Sprintf("%s (%s): ", addr, provId) - s := bufio.NewScanner(strings.NewReader(msg)) - s.Split(scanLines) - for s.Scan() { - line := strings.TrimRightFunc(s.Text(), unicode.IsSpace) - if line != "" { - buf.WriteString(fmt.Sprintf("%s%s\n", prefix, line)) - } - } - - h.ui.Output(strings.TrimSpace(buf.String())) -} - -func (h *UiHook) PreRefresh( - n *terraform.InstanceInfo, - s *terraform.InstanceState) (terraform.HookAction, error) { - h.once.Do(h.init) - - addr := n.ResourceAddress() - - var stateIdSuffix string - // Data resources refresh before they have ids, whereas managed - // resources are only refreshed when they have ids. - if s.ID != "" { - stateIdSuffix = fmt.Sprintf(" (ID: %s)", truncateId(s.ID, maxIdLen)) - } - - h.ui.Output(h.Colorize.Color(fmt.Sprintf( - "[reset][bold]%s: Refreshing state...%s", - addr, stateIdSuffix))) - return terraform.HookActionContinue, nil -} - -func (h *UiHook) PreImportState( - n *terraform.InstanceInfo, - id string) (terraform.HookAction, error) { - h.once.Do(h.init) - - addr := n.ResourceAddress() - h.ui.Output(h.Colorize.Color(fmt.Sprintf( - "[reset][bold]%s: Importing from ID %q...", - addr, id))) - return terraform.HookActionContinue, nil -} - -func (h *UiHook) PostImportState( - n *terraform.InstanceInfo, - s []*terraform.InstanceState) (terraform.HookAction, error) { - h.once.Do(h.init) - - addr := n.ResourceAddress() - h.ui.Output(h.Colorize.Color(fmt.Sprintf( - "[reset][bold][green]%s: Import complete!", addr))) - for _, s := range s { - h.ui.Output(h.Colorize.Color(fmt.Sprintf( - "[reset][green] Imported %s (ID: %s)", - s.Ephemeral.Type, s.ID))) - } - - return terraform.HookActionContinue, nil -} - -func (h *UiHook) init() { - if h.Colorize == nil { - panic("colorize not given") - } - if h.PeriodicUiTimer == 0 { - h.PeriodicUiTimer = defaultPeriodicUiTimer - } - - h.resources = make(map[string]uiResourceState) - - // Wrap the ui so that it is safe for concurrency regardless of the - // underlying reader/writer that is in place. - h.ui = &cli.ConcurrentUi{Ui: h.Ui} -} - -// scanLines is basically copied from the Go standard library except -// we've modified it to also fine `\r`. -func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - if i := bytes.IndexByte(data, '\n'); i >= 0 { - // We have a full newline-terminated line. - return i + 1, dropCR(data[0:i]), nil - } - if i := bytes.IndexByte(data, '\r'); i >= 0 { - // We have a full newline-terminated line. - return i + 1, dropCR(data[0:i]), nil - } - // If we're at EOF, we have a final, non-terminated line. Return it. - if atEOF { - return len(data), dropCR(data), nil - } - // Request more data. - return 0, nil, nil -} - -// dropCR drops a terminal \r from the data. -func dropCR(data []byte) []byte { - if len(data) > 0 && data[len(data)-1] == '\r' { - return data[0 : len(data)-1] - } - return data -} - -func truncateId(id string, maxLen int) string { - totalLength := len(id) - if totalLength <= maxLen { - return id - } - if maxLen < 5 { - // We don't shorten to less than 5 chars - // as that would be pointless with ... (3 chars) - maxLen = 5 - } - - dots := "..." - partLen := maxLen / 2 - - leftIdx := partLen - 1 - leftPart := id[0:leftIdx] - - rightIdx := totalLength - partLen - 1 - - overlap := maxLen - (partLen*2 + len(dots)) - if overlap < 0 { - rightIdx -= overlap - } - - rightPart := id[rightIdx:] - - return leftPart + dots + rightPart -} diff --git a/vendor/github.com/hashicorp/terraform/command/import.go b/vendor/github.com/hashicorp/terraform/command/import.go deleted file mode 100644 index cbaeec5f492..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/import.go +++ /dev/null @@ -1,347 +0,0 @@ -package command - -import ( - "fmt" - "log" - "os" - "strings" - - "github.com/hashicorp/hcl2/hcl" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/terraform" - "github.com/hashicorp/terraform/tfdiags" -) - -// ImportCommand is a cli.Command implementation that imports resources -// into the Terraform state. -type ImportCommand struct { - Meta -} - -func (c *ImportCommand) Run(args []string) int { - // Get the pwd since its our default -config flag value - pwd, err := os.Getwd() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error getting pwd: %s", err)) - return 1 - } - - var configPath string - args, err = c.Meta.process(args, true) - if err != nil { - return 1 - } - - cmdFlags := c.Meta.flagSet("import") - cmdFlags.IntVar(&c.Meta.parallelism, "parallelism", 0, "parallelism") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") - cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") - cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") - cmdFlags.StringVar(&configPath, "config", pwd, "path") - cmdFlags.StringVar(&c.Meta.provider, "provider", "", "provider") - cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") - cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") - cmdFlags.BoolVar(&c.Meta.allowMissingConfig, "allow-missing-config", false, "allow missing config") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - args = cmdFlags.Args() - if len(args) != 2 { - c.Ui.Error("The import command expects two arguments.") - cmdFlags.Usage() - return 1 - } - - // Validate the provided resource address for syntax - addr, err := terraform.ParseResourceAddress(args[0]) - if err != nil { - c.Ui.Error(fmt.Sprintf(importCommandInvalidAddressFmt, err)) - return 1 - } - if !addr.HasResourceSpec() { - // module.foo target isn't allowed for import - c.Ui.Error(importCommandMissingResourceSpecMsg) - return 1 - } - if addr.Mode != config.ManagedResourceMode { - // can't import to a data resource address - c.Ui.Error(importCommandResourceModeMsg) - return 1 - } - - var diags tfdiags.Diagnostics - - // Load the module - var mod *module.Tree - if configPath != "" { - if empty, _ := config.IsEmptyDir(configPath); empty { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "No Terraform configuration files", - Detail: fmt.Sprintf( - "The directory %s does not contain any Terraform configuration files (.tf or .tf.json). To specify a different configuration directory, use the -config=\"...\" command line option.", - configPath, - ), - }) - c.showDiagnostics(diags) - return 1 - } - - var modDiags tfdiags.Diagnostics - mod, modDiags = c.Module(configPath) - diags = diags.Append(modDiags) - if modDiags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - } - - // Verify that the given address points to something that exists in config. - // This is to reduce the risk that a typo in the resource address will - // import something that Terraform will want to immediately destroy on - // the next plan, and generally acts as a reassurance of user intent. - targetMod := mod.Child(addr.Path) - if targetMod == nil { - modulePath := addr.WholeModuleAddress().String() - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Import to non-existent module", - Detail: fmt.Sprintf( - "%s is not defined in the configuration. Please add configuration for this module before importing into it.", - modulePath, - ), - }) - c.showDiagnostics(diags) - return 1 - } - rcs := targetMod.Config().Resources - var rc *config.Resource - for _, thisRc := range rcs { - if addr.MatchesConfig(targetMod, thisRc) { - rc = thisRc - break - } - } - if !c.Meta.allowMissingConfig && rc == nil { - modulePath := addr.WholeModuleAddress().String() - if modulePath == "" { - modulePath = "the root module" - } - - c.showDiagnostics(diags) - - // This is not a diagnostic because currently our diagnostics printer - // doesn't support having a code example in the detail, and there's - // a code example in this message. - // TODO: Improve the diagnostics printer so we can use it for this - // message. - c.Ui.Error(fmt.Sprintf( - importCommandMissingResourceFmt, - addr, modulePath, addr.Type, addr.Name, - )) - return 1 - } - - // Check for user-supplied plugin path - if c.pluginPath, err = c.loadPluginPath(); err != nil { - c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) - return 1 - } - - // Load the backend - b, err := c.Backend(&BackendOpts{ - Config: mod.Config(), - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // We require a backend.Local to build a context. - // This isn't necessarily a "local.Local" backend, which provides local - // operations, however that is the only current implementation. A - // "local.Local" backend also doesn't necessarily provide local state, as - // that may be delegated to a "remotestate.Backend". - local, ok := b.(backend.Local) - if !ok { - c.Ui.Error(ErrUnsupportedLocalOp) - return 1 - } - - // Build the operation - opReq := c.Operation() - opReq.Module = mod - - // Get the context - ctx, state, err := local.Context(opReq) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // Perform the import. Note that as you can see it is possible for this - // API to import more than one resource at once. For now, we only allow - // one while we stabilize this feature. - newState, err := ctx.Import(&terraform.ImportOpts{ - Targets: []*terraform.ImportTarget{ - &terraform.ImportTarget{ - Addr: args[0], - ID: args[1], - Provider: c.Meta.provider, - }, - }, - }) - if err != nil { - diags = diags.Append(err) - c.showDiagnostics(diags) - return 1 - } - - // Persist the final state - log.Printf("[INFO] Writing state output to: %s", c.Meta.StateOutPath()) - if err := state.WriteState(newState); err != nil { - c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) - return 1 - } - if err := state.PersistState(); err != nil { - c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) - return 1 - } - - c.Ui.Output(c.Colorize().Color("[reset][green]\n" + importCommandSuccessMsg)) - - if c.Meta.allowMissingConfig && rc == nil { - c.Ui.Output(c.Colorize().Color("[reset][yellow]\n" + importCommandAllowMissingResourceMsg)) - } - - c.showDiagnostics(diags) - if diags.HasErrors() { - return 1 - } - - return 0 -} - -func (c *ImportCommand) Help() string { - helpText := ` -Usage: terraform import [options] ADDR ID - - Import existing infrastructure into your Terraform state. - - This will find and import the specified resource into your Terraform - state, allowing existing infrastructure to come under Terraform - management without having to be initially created by Terraform. - - The ADDR specified is the address to import the resource to. Please - see the documentation online for resource addresses. The ID is a - resource-specific ID to identify that resource being imported. Please - reference the documentation for the resource type you're importing to - determine the ID syntax to use. It typically matches directly to the ID - that the provider uses. - - The current implementation of Terraform import can only import resources - into the state. It does not generate configuration. A future version of - Terraform will also generate configuration. - - Because of this, prior to running terraform import it is necessary to write - a resource configuration block for the resource manually, to which the - imported object will be attached. - - This command will not modify your infrastructure, but it will make - network requests to inspect parts of your infrastructure relevant to - the resource being imported. - -Options: - - -backup=path Path to backup the existing state file before - modifying. Defaults to the "-state-out" path with - ".backup" extension. Set to "-" to disable backup. - - -config=path Path to a directory of Terraform configuration files - to use to configure the provider. Defaults to pwd. - If no config files are present, they must be provided - via the input prompts or env vars. - - -allow-missing-config Allow import when no resource configuration block exists. - - -input=true Ask for input for variables if not directly set. - - -lock=true Lock the state file when locking is supported. - - -lock-timeout=0s Duration to retry a state lock. - - -no-color If specified, output won't contain any color. - - -provider=provider Specific provider to use for import. This is used for - specifying aliases, such as "aws.eu". Defaults to the - normal provider prefix of the resource being imported. - - -state=PATH Path to the source state file. Defaults to the configured - backend, or "terraform.tfstate" - - -state-out=PATH Path to the destination state file to write to. If this - isn't specified, the source state file will be used. This - can be a new or existing path. - - -var 'foo=bar' Set a variable in the Terraform configuration. This - flag can be set multiple times. This is only useful - with the "-config" flag. - - -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" or any ".auto.tfvars" - files are present, they will be automatically loaded. - - -` - return strings.TrimSpace(helpText) -} - -func (c *ImportCommand) Synopsis() string { - return "Import existing infrastructure into Terraform" -} - -const importCommandInvalidAddressFmt = `Error: %s - -For information on valid syntax, see: -https://www.terraform.io/docs/internals/resource-addressing.html -` - -const importCommandMissingResourceSpecMsg = `Error: resource address must include a full resource spec - -For information on valid syntax, see: -https://www.terraform.io/docs/internals/resource-addressing.html -` - -const importCommandResourceModeMsg = `Error: resource address must refer to a managed resource. - -Data resources cannot be imported. -` - -const importCommandMissingResourceFmt = `[reset][bold][red]Error:[reset][bold] resource address %q does not exist in the configuration.[reset] - -Before importing this resource, please create its configuration in %s. For example: - -resource %q %q { - # (resource arguments) -} -` - -const importCommandSuccessMsg = `Import successful! - -The resources that were imported are shown above. These resources are now in -your Terraform state and will henceforth be managed by Terraform. -` - -const importCommandAllowMissingResourceMsg = `Import does not generate resource configuration, you must create a resource -configuration block that matches the current or desired state manually. - -If there is no matching resource configuration block for the imported -resource, Terraform will delete the resource on the next "terraform apply". -It is recommended that you run "terraform plan" to verify that the -configuration is correct and complete. -` diff --git a/vendor/github.com/hashicorp/terraform/command/init.go b/vendor/github.com/hashicorp/terraform/command/init.go deleted file mode 100644 index 66b1c7028a4..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/init.go +++ /dev/null @@ -1,676 +0,0 @@ -package command - -import ( - "fmt" - "log" - "os" - "sort" - "strings" - - "github.com/posener/complete" - - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/helper/variables" - "github.com/hashicorp/terraform/plugin" - "github.com/hashicorp/terraform/plugin/discovery" - "github.com/hashicorp/terraform/terraform" -) - -// InitCommand is a Command implementation that takes a Terraform -// module and clones it to the working directory. -type InitCommand struct { - Meta - - // getPlugins is for the -get-plugins flag - getPlugins bool - - // providerInstaller is used to download and install providers that - // aren't found locally. This uses a discovery.ProviderInstaller instance - // by default, but it can be overridden here as a way to mock fetching - // providers for tests. - providerInstaller discovery.Installer -} - -func (c *InitCommand) Run(args []string) int { - var flagFromModule string - var flagBackend, flagGet, flagUpgrade bool - var flagConfigExtra map[string]interface{} - var flagPluginPath FlagStringSlice - var flagVerifyPlugins bool - - args, err := c.Meta.process(args, false) - if err != nil { - return 1 - } - cmdFlags := c.flagSet("init") - cmdFlags.BoolVar(&flagBackend, "backend", true, "") - cmdFlags.Var((*variables.FlagAny)(&flagConfigExtra), "backend-config", "") - cmdFlags.StringVar(&flagFromModule, "from-module", "", "copy the source of the given module into the directory before init") - cmdFlags.BoolVar(&flagGet, "get", true, "") - cmdFlags.BoolVar(&c.getPlugins, "get-plugins", true, "") - cmdFlags.BoolVar(&c.forceInitCopy, "force-copy", false, "suppress prompts about copying state data") - cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") - cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") - cmdFlags.BoolVar(&c.reconfigure, "reconfigure", false, "reconfigure") - cmdFlags.BoolVar(&flagUpgrade, "upgrade", false, "") - cmdFlags.Var(&flagPluginPath, "plugin-dir", "plugin directory") - cmdFlags.BoolVar(&flagVerifyPlugins, "verify-plugins", true, "verify plugins") - - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - if len(flagPluginPath) > 0 { - c.pluginPath = flagPluginPath - c.getPlugins = false - } - - // set providerInstaller if we don't have a test version already - if c.providerInstaller == nil { - c.providerInstaller = &discovery.ProviderInstaller{ - Dir: c.pluginDir(), - Cache: c.pluginCache(), - PluginProtocolVersion: plugin.Handshake.ProtocolVersion, - SkipVerify: !flagVerifyPlugins, - Ui: c.Ui, - } - } - - // Validate the arg count - args = cmdFlags.Args() - if len(args) > 1 { - c.Ui.Error("The init command expects at most one argument.\n") - cmdFlags.Usage() - return 1 - } - - if err := c.storePluginPath(c.pluginPath); err != nil { - c.Ui.Error(fmt.Sprintf("Error saving -plugin-path values: %s", err)) - return 1 - } - - // Get our pwd. We don't always need it but always getting it is easier - // than the logic to determine if it is or isn't needed. - pwd, err := os.Getwd() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error getting pwd: %s", err)) - return 1 - } - - // If an argument is provided then it overrides our working directory. - path := pwd - if len(args) == 1 { - path = args[0] - } - - // This will track whether we outputted anything so that we know whether - // to output a newline before the success message - var header bool - - if flagFromModule != "" { - src := flagFromModule - - empty, err := config.IsEmptyDir(path) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error validating destination directory: %s", err)) - return 1 - } - if !empty { - c.Ui.Error(strings.TrimSpace(errInitCopyNotEmpty)) - return 1 - } - - c.Ui.Output(c.Colorize().Color(fmt.Sprintf( - "[reset][bold]Copying configuration[reset] from %q...", src, - ))) - header = true - - s := module.NewStorage("", c.Services, c.Credentials) - if err := s.GetModule(path, src); err != nil { - c.Ui.Error(fmt.Sprintf("Error copying source module: %s", err)) - return 1 - } - } - - // If our directory is empty, then we're done. We can't get or setup - // the backend with an empty directory. - if empty, err := config.IsEmptyDir(path); err != nil { - c.Ui.Error(fmt.Sprintf( - "Error checking configuration: %s", err)) - return 1 - } else if empty { - c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitEmpty))) - return 0 - } - - var back backend.Backend - - // If we're performing a get or loading the backend, then we perform - // some extra tasks. - if flagGet || flagBackend { - conf, err := c.Config(path) - if err != nil { - // Since this may be the user's first ever interaction with Terraform, - // we'll provide some additional context in this case. - c.Ui.Error(strings.TrimSpace(errInitConfigError)) - c.showDiagnostics(err) - return 1 - } - - // If we requested downloading modules and have modules in the config - if flagGet && len(conf.Modules) > 0 { - header = true - - getMode := module.GetModeGet - if flagUpgrade { - getMode = module.GetModeUpdate - c.Ui.Output(c.Colorize().Color(fmt.Sprintf( - "[reset][bold]Upgrading modules..."))) - } else { - c.Ui.Output(c.Colorize().Color(fmt.Sprintf( - "[reset][bold]Initializing modules..."))) - } - - if err := getModules(&c.Meta, path, getMode); err != nil { - c.Ui.Error(fmt.Sprintf( - "Error downloading modules: %s", err)) - return 1 - } - - } - - // If we're requesting backend configuration or looking for required - // plugins, load the backend - if flagBackend { - header = true - - // Only output that we're initializing a backend if we have - // something in the config. We can be UNSETTING a backend as well - // in which case we choose not to show this. - if conf.Terraform != nil && conf.Terraform.Backend != nil { - c.Ui.Output(c.Colorize().Color(fmt.Sprintf( - "\n[reset][bold]Initializing the backend..."))) - } - - opts := &BackendOpts{ - Config: conf, - ConfigExtra: flagConfigExtra, - Init: true, - } - if back, err = c.Backend(opts); err != nil { - c.Ui.Error(err.Error()) - return 1 - } - } - } - - if back == nil { - // If we didn't initialize a backend then we'll try to at least - // instantiate one. This might fail if it wasn't already initalized - // by a previous run, so we must still expect that "back" may be nil - // in code that follows. - back, err = c.Backend(nil) - if err != nil { - // This is fine. We'll proceed with no backend, then. - back = nil - } - } - - var state *terraform.State - - // If we have a functional backend (either just initialized or initialized - // on a previous run) we'll use the current state as a potential source - // of provider dependencies. - if back != nil { - sMgr, err := back.State(c.Workspace()) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error loading state: %s", err)) - return 1 - } - - if err := sMgr.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf( - "Error refreshing state: %s", err)) - return 1 - } - - state = sMgr.State() - } - - if v := os.Getenv(ProviderSkipVerifyEnvVar); v != "" { - c.ignorePluginChecksum = true - } - - // Now that we have loaded all modules, check the module tree for missing providers. - err = c.getProviders(path, state, flagUpgrade) - if err != nil { - // this function provides its own output - log.Printf("[ERROR] %s", err) - return 1 - } - - // If we outputted information, then we need to output a newline - // so that our success message is nicely spaced out from prior text. - if header { - c.Ui.Output("") - } - - c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitSuccess))) - if !c.RunningInAutomation { - // If we're not running in an automation wrapper, give the user - // some more detailed next steps that are appropriate for interactive - // shell usage. - c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitSuccessCLI))) - } - - return 0 -} - -// Load the complete module tree, and fetch any missing providers. -// This method outputs its own Ui. -func (c *InitCommand) getProviders(path string, state *terraform.State, upgrade bool) error { - mod, diags := c.Module(path) - if diags.HasErrors() { - c.showDiagnostics(diags) - return diags.Err() - } - - if err := terraform.CheckRequiredVersion(mod); err != nil { - diags = diags.Append(err) - c.showDiagnostics(diags) - return err - } - - var available discovery.PluginMetaSet - if upgrade { - // If we're in upgrade mode, we ignore any auto-installed plugins - // in "available", causing us to reinstall and possibly upgrade them. - available = c.providerPluginManuallyInstalledSet() - } else { - available = c.providerPluginSet() - } - - requirements := terraform.ModuleTreeDependencies(mod, state).AllPluginRequirements() - if len(requirements) == 0 { - // nothing to initialize - return nil - } - - c.Ui.Output(c.Colorize().Color( - "\n[reset][bold]Initializing provider plugins...", - )) - - missing := c.missingPlugins(available, requirements) - internal := c.internalProviders() - - var errs error - if c.getPlugins { - if len(missing) > 0 { - c.Ui.Output(fmt.Sprintf("- Checking for available provider plugins on %s...", - discovery.GetReleaseHost())) - } - - for provider, reqd := range missing { - if _, isInternal := internal[provider]; isInternal { - // Ignore internal providers; they are not eligible for - // installation. - continue - } - - _, err := c.providerInstaller.Get(provider, reqd.Versions) - - if err != nil { - switch err { - case discovery.ErrorNoSuchProvider: - c.Ui.Error(fmt.Sprintf(errProviderNotFound, provider, DefaultPluginVendorDir)) - case discovery.ErrorNoSuitableVersion: - if reqd.Versions.Unconstrained() { - // This should never happen, but might crop up if we catch - // the releases server in a weird state where the provider's - // directory is present but does not yet contain any - // versions. We'll treat it like ErrorNoSuchProvider, then. - c.Ui.Error(fmt.Sprintf(errProviderNotFound, provider, DefaultPluginVendorDir)) - } else { - c.Ui.Error(fmt.Sprintf(errProviderVersionsUnsuitable, provider, reqd.Versions)) - } - case discovery.ErrorNoVersionCompatible: - // FIXME: This error message is sub-awesome because we don't - // have enough information here to tell the user which versions - // we considered and which versions might be compatible. - constraint := reqd.Versions.String() - if constraint == "" { - constraint = "(any version)" - } - c.Ui.Error(fmt.Sprintf(errProviderIncompatible, provider, constraint)) - default: - c.Ui.Error(fmt.Sprintf(errProviderInstallError, provider, err.Error(), DefaultPluginVendorDir)) - } - - errs = multierror.Append(errs, err) - } - } - - if errs != nil { - return errs - } - } else if len(missing) > 0 { - // we have missing providers, but aren't going to try and download them - var lines []string - for provider, reqd := range missing { - if reqd.Versions.Unconstrained() { - lines = append(lines, fmt.Sprintf("* %s (any version)\n", provider)) - } else { - lines = append(lines, fmt.Sprintf("* %s (%s)\n", provider, reqd.Versions)) - } - errs = multierror.Append(errs, fmt.Errorf("missing provider %q", provider)) - } - sort.Strings(lines) - c.Ui.Error(fmt.Sprintf(errMissingProvidersNoInstall, strings.Join(lines, ""), DefaultPluginVendorDir)) - return errs - } - - // With all the providers downloaded, we'll generate our lock file - // that ensures the provider binaries remain unchanged until we init - // again. If anything changes, other commands that use providers will - // fail with an error instructing the user to re-run this command. - available = c.providerPluginSet() // re-discover to see newly-installed plugins - chosen := choosePlugins(available, internal, requirements) - digests := map[string][]byte{} - for name, meta := range chosen { - digest, err := meta.SHA256() - if err != nil { - c.Ui.Error(fmt.Sprintf("failed to read provider plugin %s: %s", meta.Path, err)) - return err - } - digests[name] = digest - if c.ignorePluginChecksum { - digests[name] = nil - } - } - err := c.providerPluginsLock().Write(digests) - if err != nil { - c.Ui.Error(fmt.Sprintf("failed to save provider manifest: %s", err)) - return err - } - - { - // Purge any auto-installed plugins that aren't being used. - purged, err := c.providerInstaller.PurgeUnused(chosen) - if err != nil { - // Failure to purge old plugins is not a fatal error - c.Ui.Warn(fmt.Sprintf("failed to purge unused plugins: %s", err)) - } - if purged != nil { - for meta := range purged { - log.Printf("[DEBUG] Purged unused %s plugin %s", meta.Name, meta.Path) - } - } - } - - // If any providers have "floating" versions (completely unconstrained) - // we'll suggest the user constrain with a pessimistic constraint to - // avoid implicitly adopting a later major release. - constraintSuggestions := make(map[string]discovery.ConstraintStr) - for name, meta := range chosen { - req := requirements[name] - if req == nil { - // should never happen, but we don't want to crash here, so we'll - // be cautious. - continue - } - - if req.Versions.Unconstrained() && meta.Version != discovery.VersionZero { - // meta.Version.MustParse is safe here because our "chosen" metas - // were already filtered for validity of versions. - constraintSuggestions[name] = meta.Version.MustParse().MinorUpgradeConstraintStr() - } - } - if len(constraintSuggestions) != 0 { - names := make([]string, 0, len(constraintSuggestions)) - for name := range constraintSuggestions { - names = append(names, name) - } - sort.Strings(names) - - c.Ui.Output(outputInitProvidersUnconstrained) - for _, name := range names { - c.Ui.Output(fmt.Sprintf("* provider.%s: version = %q", name, constraintSuggestions[name])) - } - } - - return nil -} - -func (c *InitCommand) AutocompleteArgs() complete.Predictor { - return complete.PredictDirs("") -} - -func (c *InitCommand) AutocompleteFlags() complete.Flags { - return complete.Flags{ - "-backend": completePredictBoolean, - "-backend-config": complete.PredictFiles("*.tfvars"), // can also be key=value, but we can't "predict" that - "-force-copy": complete.PredictNothing, - "-from-module": completePredictModuleSource, - "-get": completePredictBoolean, - "-get-plugins": completePredictBoolean, - "-input": completePredictBoolean, - "-lock": completePredictBoolean, - "-lock-timeout": complete.PredictAnything, - "-no-color": complete.PredictNothing, - "-plugin-dir": complete.PredictDirs(""), - "-reconfigure": complete.PredictNothing, - "-upgrade": completePredictBoolean, - "-verify-plugins": completePredictBoolean, - } -} - -func (c *InitCommand) Help() string { - helpText := ` -Usage: terraform init [options] [DIR] - - Initialize a new or existing Terraform working directory by creating - initial files, loading any remote state, downloading modules, etc. - - This is the first command that should be run for any new or existing - Terraform configuration per machine. This sets up all the local data - necessary to run Terraform that is typically not committed to version - control. - - This command is always safe to run multiple times. Though subsequent runs - may give errors, this command will never delete your configuration or - state. Even so, if you have important information, please back it up prior - to running this command, just in case. - - If no arguments are given, the configuration in this working directory - is initialized. - -Options: - - -backend=true Configure the backend for this configuration. - - -backend-config=path This can be either a path to an HCL file with key/value - assignments (same format as terraform.tfvars) or a - 'key=value' format. This is merged with what is in the - configuration file. This can be specified multiple - times. The backend type must be in the configuration - itself. - - -force-copy Suppress prompts about copying state data. This is - equivalent to providing a "yes" to all confirmation - prompts. - - -from-module=SOURCE Copy the contents of the given module into the target - directory before initialization. - - -get=true Download any modules for this configuration. - - -get-plugins=true Download any missing plugins for this configuration. - - -input=true Ask for input if necessary. If false, will error if - input was required. - - -lock=true Lock the state file when locking is supported. - - -lock-timeout=0s Duration to retry a state lock. - - -no-color If specified, output won't contain any color. - - -plugin-dir Directory containing plugin binaries. This overrides all - default search paths for plugins, and prevents the - automatic installation of plugins. This flag can be used - multiple times. - - -reconfigure Reconfigure the backend, ignoring any saved - configuration. - - -upgrade=false If installing modules (-get) or plugins (-get-plugins), - ignore previously-downloaded objects and install the - latest version allowed within configured constraints. - - -verify-plugins=true Verify the authenticity and integrity of automatically - downloaded plugins. -` - return strings.TrimSpace(helpText) -} - -func (c *InitCommand) Synopsis() string { - return "Initialize a Terraform working directory" -} - -const errInitConfigError = ` -There are some problems with the configuration, described below. - -The Terraform configuration must be valid before initialization so that -Terraform can determine which modules and providers need to be installed. -` - -const errInitCopyNotEmpty = ` -The working directory already contains files. The -from-module option requires -an empty directory into which a copy of the referenced module will be placed. - -To initialize the configuration already in this working directory, omit the --from-module option. -` - -const outputInitEmpty = ` -[reset][bold]Terraform initialized in an empty directory![reset] - -The directory has no Terraform configuration files. You may begin working -with Terraform immediately by creating Terraform configuration files. -` - -const outputInitSuccess = ` -[reset][bold][green]Terraform has been successfully initialized![reset][green] -` - -const outputInitSuccessCLI = `[reset][green] -You may now begin working with Terraform. Try running "terraform plan" to see -any changes that are required for your infrastructure. All Terraform commands -should now work. - -If you ever set or change modules or backend configuration for Terraform, -rerun this command to reinitialize your working directory. If you forget, other -commands will detect it and remind you to do so if necessary. -` - -const outputInitProvidersUnconstrained = ` -The following providers do not have any version constraints in configuration, -so the latest version was installed. - -To prevent automatic upgrades to new major versions that may contain breaking -changes, it is recommended to add version = "..." constraints to the -corresponding provider blocks in configuration, with the constraint strings -suggested below. -` - -const errProviderNotFound = ` -[reset][bold][red]Provider %[1]q not available for installation.[reset][red] - -A provider named %[1]q could not be found in the official repository. - -This may result from mistyping the provider name, or the given provider may -be a third-party provider that cannot be installed automatically. - -In the latter case, the plugin must be installed manually by locating and -downloading a suitable distribution package and placing the plugin's executable -file in the following directory: - %[2]s - -Terraform detects necessary plugins by inspecting the configuration and state. -To view the provider versions requested by each module, run -"terraform providers". -` - -const errProviderVersionsUnsuitable = ` -[reset][bold][red]No provider %[1]q plugins meet the constraint %[2]q.[reset][red] - -The version constraint is derived from the "version" argument within the -provider %[1]q block in configuration. Child modules may also apply -provider version constraints. To view the provider versions requested by each -module in the current configuration, run "terraform providers". - -To proceed, the version constraints for this provider must be relaxed by -either adjusting or removing the "version" argument in the provider blocks -throughout the configuration. -` - -const errProviderIncompatible = ` -[reset][bold][red]No available provider %[1]q plugins are compatible with this Terraform version.[reset][red] - -From time to time, new Terraform major releases can change the requirements for -plugins such that older plugins become incompatible. - -Terraform checked all of the plugin versions matching the given constraint: - %[2]s - -Unfortunately, none of the suitable versions are compatible with this version -of Terraform. If you have recently upgraded Terraform, it may be necessary to -move to a newer major release of this provider. Alternatively, if you are -attempting to upgrade the provider to a new major version you may need to -also upgrade Terraform to support the new version. - -Consult the documentation for this provider for more information on -compatibility between provider versions and Terraform versions. -` - -const errProviderInstallError = ` -[reset][bold][red]Error installing provider %[1]q: %[2]s.[reset][red] - -Terraform analyses the configuration and state and automatically downloads -plugins for the providers used. However, when attempting to download this -plugin an unexpected error occured. - -This may be caused if for some reason Terraform is unable to reach the -plugin repository. The repository may be unreachable if access is blocked -by a firewall. - -If automatic installation is not possible or desirable in your environment, -you may alternatively manually install plugins by downloading a suitable -distribution package and placing the plugin's executable file in the -following directory: - %[3]s -` - -const errMissingProvidersNoInstall = ` -[reset][bold][red]Missing required providers.[reset][red] - -The following provider constraints are not met by the currently-installed -provider plugins: - -%[1]s -Terraform can automatically download and install plugins to meet the given -constraints, but this step was skipped due to the use of -get-plugins=false -and/or -plugin-dir on the command line. - -If automatic installation is not possible or desirable in your environment, -you may manually install plugins by downloading a suitable distribution package -and placing the plugin's executable file in one of the directories given in -by -plugin-dir on the command line, or in the following directory if custom -plugin directories are not set: - %[2]s -` diff --git a/vendor/github.com/hashicorp/terraform/command/internal_plugin.go b/vendor/github.com/hashicorp/terraform/command/internal_plugin.go deleted file mode 100644 index 01d8c77b93a..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/internal_plugin.go +++ /dev/null @@ -1,90 +0,0 @@ -package command - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/plugin" - "github.com/kardianos/osext" -) - -// InternalPluginCommand is a Command implementation that allows plugins to be -// compiled into the main Terraform binary and executed via a subcommand. -type InternalPluginCommand struct { - Meta -} - -const TFSPACE = "-TFSPACE-" - -// BuildPluginCommandString builds a special string for executing internal -// plugins. It has the following format: -// -// /path/to/terraform-TFSPACE-internal-plugin-TFSPACE-terraform-provider-aws -// -// We split the string on -TFSPACE- to build the command executor. The reason we -// use -TFSPACE- is so we can support spaces in the /path/to/terraform part. -func BuildPluginCommandString(pluginType, pluginName string) (string, error) { - terraformPath, err := osext.Executable() - if err != nil { - return "", err - } - parts := []string{terraformPath, "internal-plugin", pluginType, pluginName} - return strings.Join(parts, TFSPACE), nil -} - -func (c *InternalPluginCommand) Run(args []string) int { - if len(args) != 2 { - log.Printf("Wrong number of args; expected: terraform internal-plugin pluginType pluginName") - return 1 - } - - pluginType := args[0] - pluginName := args[1] - - log.SetPrefix(fmt.Sprintf("%s-%s (internal) ", pluginName, pluginType)) - - switch pluginType { - case "provider": - pluginFunc, found := InternalProviders[pluginName] - if !found { - log.Printf("[ERROR] Could not load provider: %s", pluginName) - return 1 - } - log.Printf("[INFO] Starting provider plugin %s", pluginName) - plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: pluginFunc, - }) - case "provisioner": - pluginFunc, found := InternalProvisioners[pluginName] - if !found { - log.Printf("[ERROR] Could not load provisioner: %s", pluginName) - return 1 - } - log.Printf("[INFO] Starting provisioner plugin %s", pluginName) - plugin.Serve(&plugin.ServeOpts{ - ProvisionerFunc: pluginFunc, - }) - default: - log.Printf("[ERROR] Invalid plugin type %s", pluginType) - return 1 - } - - return 0 -} - -func (c *InternalPluginCommand) Help() string { - helpText := ` -Usage: terraform internal-plugin pluginType pluginName - - Runs an internally-compiled version of a plugin from the terraform binary. - - NOTE: this is an internal command and you should not call it yourself. -` - - return strings.TrimSpace(helpText) -} - -func (c *InternalPluginCommand) Synopsis() string { - return "internal plugin command" -} diff --git a/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go b/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go deleted file mode 100644 index 7993e9a548d..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go +++ /dev/null @@ -1,26 +0,0 @@ -// -// This file is automatically generated by scripts/generate-plugins.go -- Do not edit! -// -package command - -import ( - chefprovisioner "github.com/hashicorp/terraform/builtin/provisioners/chef" - fileprovisioner "github.com/hashicorp/terraform/builtin/provisioners/file" - habitatprovisioner "github.com/hashicorp/terraform/builtin/provisioners/habitat" - localexecprovisioner "github.com/hashicorp/terraform/builtin/provisioners/local-exec" - remoteexecprovisioner "github.com/hashicorp/terraform/builtin/provisioners/remote-exec" - saltmasterlessprovisioner "github.com/hashicorp/terraform/builtin/provisioners/salt-masterless" - - "github.com/hashicorp/terraform/plugin" -) - -var InternalProviders = map[string]plugin.ProviderFunc{} - -var InternalProvisioners = map[string]plugin.ProvisionerFunc{ - "chef": chefprovisioner.Provisioner, - "file": fileprovisioner.Provisioner, - "habitat": habitatprovisioner.Provisioner, - "local-exec": localexecprovisioner.Provisioner, - "remote-exec": remoteexecprovisioner.Provisioner, - "salt-masterless": saltmasterlessprovisioner.Provisioner, -} diff --git a/vendor/github.com/hashicorp/terraform/command/meta.go b/vendor/github.com/hashicorp/terraform/command/meta.go deleted file mode 100644 index 27f7765f95d..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/meta.go +++ /dev/null @@ -1,643 +0,0 @@ -package command - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/backend/local" - "github.com/hashicorp/terraform/command/format" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/helper/experiment" - "github.com/hashicorp/terraform/helper/variables" - "github.com/hashicorp/terraform/helper/wrappedstreams" - "github.com/hashicorp/terraform/svchost/auth" - "github.com/hashicorp/terraform/svchost/disco" - "github.com/hashicorp/terraform/terraform" - "github.com/hashicorp/terraform/tfdiags" - "github.com/mitchellh/cli" - "github.com/mitchellh/colorstring" -) - -// Meta are the meta-options that are available on all or most commands. -type Meta struct { - // The exported fields below should be set by anyone using a - // command with a Meta field. These are expected to be set externally - // (not from within the command itself). - - Color bool // True if output should be colored - GlobalPluginDirs []string // Additional paths to search for plugins - PluginOverrides *PluginOverrides // legacy overrides from .terraformrc file - Ui cli.Ui // Ui for output - - // ExtraHooks are extra hooks to add to the context. - ExtraHooks []terraform.Hook - - // Services provides access to remote endpoint information for - // "terraform-native' services running at a specific user-facing hostname. - Services *disco.Disco - - // Credentials provides access to credentials for "terraform-native" - // services, which are accessed by a service hostname. - Credentials auth.CredentialsSource - - // RunningInAutomation indicates that commands are being run by an - // automated system rather than directly at a command prompt. - // - // This is a hint to various command routines that it may be confusing - // to print out messages that suggest running specific follow-up - // commands, since the user consuming the output will not be - // in a position to run such commands. - // - // The intended use-case of this flag is when Terraform is running in - // some sort of workflow orchestration tool which is abstracting away - // the specific commands being run. - RunningInAutomation bool - - // PluginCacheDir, if non-empty, enables caching of downloaded plugins - // into the given directory. - PluginCacheDir string - - // OverrideDataDir, if non-empty, overrides the return value of the - // DataDir method for situations where the local .terraform/ directory - // is not suitable, e.g. because of a read-only filesystem. - OverrideDataDir string - - // When this channel is closed, the command will be cancelled. - ShutdownCh <-chan struct{} - - //---------------------------------------------------------- - // Protected: commands can set these - //---------------------------------------------------------- - - // Modify the data directory location. This should be accessed through the - // DataDir method. - dataDir string - - // pluginPath is a user defined set of directories to look for plugins. - // This is set during init with the `-plugin-dir` flag, saved to a file in - // the data directory. - // This overrides all other search paths when discovering plugins. - pluginPath []string - - ignorePluginChecksum bool - - // Override certain behavior for tests within this package - testingOverrides *testingOverrides - - //---------------------------------------------------------- - // Private: do not set these - //---------------------------------------------------------- - - // backendState is the currently active backend state - backendState *terraform.BackendState - - // Variables for the context (private) - autoKey string - autoVariables map[string]interface{} - input bool - variables map[string]interface{} - - // Targets for this context (private) - targets []string - - // Internal fields - color bool - oldUi cli.Ui - - // The fields below are expected to be set by the command via - // command line flags. See the Apply command for an example. - // - // statePath is the path to the state file. If this is empty, then - // no state will be loaded. It is also okay for this to be a path to - // a file that doesn't exist; it is assumed that this means that there - // is simply no state. - // - // stateOutPath is used to override the output path for the state. - // If not provided, the StatePath is used causing the old state to - // be overriden. - // - // backupPath is used to backup the state file before writing a modified - // version. It defaults to stateOutPath + DefaultBackupExtension - // - // parallelism is used to control the number of concurrent operations - // allowed when walking the graph - // - // shadow is used to enable/disable the shadow graph - // - // provider is to specify specific resource providers - // - // stateLock is set to false to disable state locking - // - // stateLockTimeout is the optional duration to retry a state locks locks - // when it is already locked by another process. - // - // forceInitCopy suppresses confirmation for copying state data during - // init. - // - // reconfigure forces init to ignore any stored configuration. - statePath string - stateOutPath string - backupPath string - parallelism int - shadow bool - provider string - stateLock bool - stateLockTimeout time.Duration - forceInitCopy bool - reconfigure bool - - // errWriter is the write side of a pipe for the FlagSet output. We need to - // keep track of this to close previous pipes between tests. Normal - // operation never needs to close this. - errWriter *io.PipeWriter - // done chan to wait for the scanner goroutine - errScannerDone chan struct{} - - // Used with the import command to allow import of state when no matching config exists. - allowMissingConfig bool -} - -type PluginOverrides struct { - Providers map[string]string - Provisioners map[string]string -} - -type testingOverrides struct { - ProviderResolver terraform.ResourceProviderResolver - Provisioners map[string]terraform.ResourceProvisionerFactory -} - -// initStatePaths is used to initialize the default values for -// statePath, stateOutPath, and backupPath -func (m *Meta) initStatePaths() { - if m.statePath == "" { - m.statePath = DefaultStateFilename - } - if m.stateOutPath == "" { - m.stateOutPath = m.statePath - } - if m.backupPath == "" { - m.backupPath = m.stateOutPath + DefaultBackupExtension - } -} - -// StateOutPath returns the true output path for the state file -func (m *Meta) StateOutPath() string { - return m.stateOutPath -} - -// Colorize returns the colorization structure for a command. -func (m *Meta) Colorize() *colorstring.Colorize { - return &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Disable: !m.color, - Reset: true, - } -} - -// DataDir returns the directory where local data will be stored. -// Defaults to DefaultDataDir in the current working directory. -func (m *Meta) DataDir() string { - if m.OverrideDataDir != "" { - return m.OverrideDataDir - } - return DefaultDataDir -} - -const ( - // InputModeEnvVar is the environment variable that, if set to "false" or - // "0", causes terraform commands to behave as if the `-input=false` flag was - // specified. - InputModeEnvVar = "TF_INPUT" -) - -// InputMode returns the type of input we should ask for in the form of -// terraform.InputMode which is passed directly to Context.Input. -func (m *Meta) InputMode() terraform.InputMode { - if test || !m.input { - return 0 - } - - if envVar := os.Getenv(InputModeEnvVar); envVar != "" { - if v, err := strconv.ParseBool(envVar); err == nil { - if !v { - return 0 - } - } - } - - var mode terraform.InputMode - mode |= terraform.InputModeProvider - mode |= terraform.InputModeVar - mode |= terraform.InputModeVarUnset - - return mode -} - -// UIInput returns a UIInput object to be used for asking for input. -func (m *Meta) UIInput() terraform.UIInput { - return &UIInput{ - Colorize: m.Colorize(), - } -} - -// StdinPiped returns true if the input is piped. -func (m *Meta) StdinPiped() bool { - fi, err := wrappedstreams.Stdin().Stat() - if err != nil { - // If there is an error, let's just say its not piped - return false - } - - return fi.Mode()&os.ModeNamedPipe != 0 -} - -const ( - ProviderSkipVerifyEnvVar = "TF_SKIP_PROVIDER_VERIFY" -) - -// contextOpts returns the options to use to initialize a Terraform -// context with the settings from this Meta. -func (m *Meta) contextOpts() *terraform.ContextOpts { - var opts terraform.ContextOpts - opts.Hooks = []terraform.Hook{m.uiHook(), &terraform.DebugHook{}} - opts.Hooks = append(opts.Hooks, m.ExtraHooks...) - - vs := make(map[string]interface{}) - for k, v := range opts.Variables { - vs[k] = v - } - for k, v := range m.autoVariables { - vs[k] = v - } - for k, v := range m.variables { - vs[k] = v - } - opts.Variables = vs - - opts.Targets = m.targets - opts.UIInput = m.UIInput() - opts.Parallelism = m.parallelism - opts.Shadow = m.shadow - - // If testingOverrides are set, we'll skip the plugin discovery process - // and just work with what we've been given, thus allowing the tests - // to provide mock providers and provisioners. - if m.testingOverrides != nil { - opts.ProviderResolver = m.testingOverrides.ProviderResolver - opts.Provisioners = m.testingOverrides.Provisioners - } else { - opts.ProviderResolver = m.providerResolver() - opts.Provisioners = m.provisionerFactories() - } - - opts.ProviderSHA256s = m.providerPluginsLock().Read() - if v := os.Getenv(ProviderSkipVerifyEnvVar); v != "" { - opts.SkipProviderVerify = true - } - - opts.Meta = &terraform.ContextMeta{ - Env: m.Workspace(), - } - - return &opts -} - -// flags adds the meta flags to the given FlagSet. -func (m *Meta) flagSet(n string) *flag.FlagSet { - f := flag.NewFlagSet(n, flag.ContinueOnError) - f.BoolVar(&m.input, "input", true, "input") - f.Var((*variables.Flag)(&m.variables), "var", "variables") - f.Var((*variables.FlagFile)(&m.variables), "var-file", "variable file") - f.Var((*FlagStringSlice)(&m.targets), "target", "resource to target") - - if m.autoKey != "" { - f.Var((*variables.FlagFile)(&m.autoVariables), m.autoKey, "variable file") - } - - // Advanced (don't need documentation, or unlikely to be set) - f.BoolVar(&m.shadow, "shadow", true, "shadow graph") - - // Experimental features - experiment.Flag(f) - - // Create an io.Writer that writes to our Ui properly for errors. - // This is kind of a hack, but it does the job. Basically: create - // a pipe, use a scanner to break it into lines, and output each line - // to the UI. Do this forever. - - // If a previous pipe exists, we need to close that first. - // This should only happen in testing. - if m.errWriter != nil { - m.errWriter.Close() - } - - if m.errScannerDone != nil { - <-m.errScannerDone - } - - errR, errW := io.Pipe() - errScanner := bufio.NewScanner(errR) - m.errWriter = errW - m.errScannerDone = make(chan struct{}) - go func() { - defer close(m.errScannerDone) - for errScanner.Scan() { - m.Ui.Error(errScanner.Text()) - } - }() - f.SetOutput(errW) - - // Set the default Usage to empty - f.Usage = func() {} - - // command that bypass locking will supply their own flag on this var, but - // set the initial meta value to true as a failsafe. - m.stateLock = true - - return f -} - -// moduleStorage returns the module.Storage implementation used to store -// modules for commands. -func (m *Meta) moduleStorage(root string, mode module.GetMode) *module.Storage { - s := module.NewStorage(filepath.Join(root, "modules"), m.Services, m.Credentials) - s.Ui = m.Ui - s.Mode = mode - return s -} - -// process will process the meta-parameters out of the arguments. This -// will potentially modify the args in-place. It will return the resulting -// slice. -// -// vars says whether or not we support variables. -func (m *Meta) process(args []string, vars bool) ([]string, error) { - // We do this so that we retain the ability to technically call - // process multiple times, even if we have no plans to do so - if m.oldUi != nil { - m.Ui = m.oldUi - } - - // Set colorization - m.color = m.Color - for i, v := range args { - if v == "-no-color" { - m.color = false - m.Color = false - args = append(args[:i], args[i+1:]...) - break - } - } - - // Set the UI - m.oldUi = m.Ui - m.Ui = &cli.ConcurrentUi{ - Ui: &ColorizeUi{ - Colorize: m.Colorize(), - ErrorColor: "[red]", - WarnColor: "[yellow]", - Ui: m.oldUi, - }, - } - - // If we support vars and the default var file exists, add it to - // the args... - m.autoKey = "" - if vars { - var preArgs []string - - if _, err := os.Stat(DefaultVarsFilename); err == nil { - m.autoKey = "var-file-default" - preArgs = append(preArgs, "-"+m.autoKey, DefaultVarsFilename) - } - - if _, err := os.Stat(DefaultVarsFilename + ".json"); err == nil { - m.autoKey = "var-file-default" - preArgs = append(preArgs, "-"+m.autoKey, DefaultVarsFilename+".json") - } - - wd, err := os.Getwd() - if err != nil { - return nil, err - } - - fis, err := ioutil.ReadDir(wd) - if err != nil { - return nil, err - } - - // make sure we add the files in order - sort.Slice(fis, func(i, j int) bool { - return fis[i].Name() < fis[j].Name() - }) - - for _, fi := range fis { - name := fi.Name() - // Ignore directories, non-var-files, and ignored files - if fi.IsDir() || !isAutoVarFile(name) || config.IsIgnoredFile(name) { - continue - } - - m.autoKey = "var-file-default" - preArgs = append(preArgs, "-"+m.autoKey, name) - } - - args = append(preArgs, args...) - } - - return args, nil -} - -// uiHook returns the UiHook to use with the context. -func (m *Meta) uiHook() *UiHook { - return &UiHook{ - Colorize: m.Colorize(), - Ui: m.Ui, - } -} - -// confirm asks a yes/no confirmation. -func (m *Meta) confirm(opts *terraform.InputOpts) (bool, error) { - if !m.Input() { - return false, errors.New("input is disabled") - } - for { - v, err := m.UIInput().Input(opts) - if err != nil { - return false, fmt.Errorf( - "Error asking for confirmation: %s", err) - } - - switch strings.ToLower(v) { - case "no": - return false, nil - case "yes": - return true, nil - } - } -} - -// showDiagnostics displays error and warning messages in the UI. -// -// "Diagnostics" here means the Diagnostics type from the tfdiag package, -// though as a convenience this function accepts anything that could be -// passed to the "Append" method on that type, converting it to Diagnostics -// before displaying it. -// -// Internally this function uses Diagnostics.Append, and so it will panic -// if given unsupported value types, just as Append does. -func (m *Meta) showDiagnostics(vals ...interface{}) { - var diags tfdiags.Diagnostics - diags = diags.Append(vals...) - - for _, diag := range diags { - // TODO: Actually measure the terminal width and pass it here. - // For now, we don't have easy access to the writer that - // ui.Error (etc) are writing to and thus can't interrogate - // to see if it's a terminal and what size it is. - msg := format.Diagnostic(diag, m.Colorize(), 78) - switch diag.Severity() { - case tfdiags.Error: - m.Ui.Error(msg) - case tfdiags.Warning: - m.Ui.Warn(msg) - default: - m.Ui.Output(msg) - } - } -} - -const ( - // ModuleDepthDefault is the default value for - // module depth, which can be overridden by flag - // or env var - ModuleDepthDefault = -1 - - // ModuleDepthEnvVar is the name of the environment variable that can be used to set module depth. - ModuleDepthEnvVar = "TF_MODULE_DEPTH" -) - -func (m *Meta) addModuleDepthFlag(flags *flag.FlagSet, moduleDepth *int) { - flags.IntVar(moduleDepth, "module-depth", ModuleDepthDefault, "module-depth") - if envVar := os.Getenv(ModuleDepthEnvVar); envVar != "" { - if md, err := strconv.Atoi(envVar); err == nil { - *moduleDepth = md - } - } -} - -// outputShadowError outputs the error from ctx.ShadowError. If the -// error is nil then nothing happens. If output is false then it isn't -// outputted to the user (you can define logic to guard against outputting). -func (m *Meta) outputShadowError(err error, output bool) bool { - // Do nothing if no error - if err == nil { - return false - } - - // If not outputting, do nothing - if !output { - return false - } - - // Write the shadow error output to a file - path := fmt.Sprintf("terraform-error-%d.log", time.Now().UTC().Unix()) - if err := ioutil.WriteFile(path, []byte(err.Error()), 0644); err != nil { - // If there is an error writing it, just let it go - log.Printf("[ERROR] Error writing shadow error: %s", err) - return false - } - - // Output! - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][bold][yellow]\nExperimental feature failure! Please report a bug.\n\n"+ - "This is not an error. Your Terraform operation completed successfully.\n"+ - "Your real infrastructure is unaffected by this message.\n\n"+ - "[reset][yellow]While running, Terraform sometimes tests experimental features in the\n"+ - "background. These features cannot affect real state and never touch\n"+ - "real infrastructure. If the features work properly, you see nothing.\n"+ - "If the features fail, this message appears.\n\n"+ - "You can report an issue at: https://github.com/hashicorp/terraform/issues\n\n"+ - "The failure was written to %q. Please\n"+ - "double check this file contains no sensitive information and report\n"+ - "it with your issue.\n\n"+ - "This is not an error. Your terraform operation completed successfully\n"+ - "and your real infrastructure is unaffected by this message.", - path, - ))) - - return true -} - -// WorkspaceNameEnvVar is the name of the environment variable that can be used -// to set the name of the Terraform workspace, overriding the workspace chosen -// by `terraform workspace select`. -// -// Note that this environment variable is ignored by `terraform workspace new` -// and `terraform workspace delete`. -const WorkspaceNameEnvVar = "TF_WORKSPACE" - -// Workspace returns the name of the currently configured workspace, corresponding -// to the desired named state. -func (m *Meta) Workspace() string { - current, _ := m.WorkspaceOverridden() - return current -} - -// WorkspaceOverridden returns the name of the currently configured workspace, -// corresponding to the desired named state, as well as a bool saying whether -// this was set via the TF_WORKSPACE environment variable. -func (m *Meta) WorkspaceOverridden() (string, bool) { - if envVar := os.Getenv(WorkspaceNameEnvVar); envVar != "" { - return envVar, true - } - - envData, err := ioutil.ReadFile(filepath.Join(m.DataDir(), local.DefaultWorkspaceFile)) - current := string(bytes.TrimSpace(envData)) - if current == "" { - current = backend.DefaultStateName - } - - if err != nil && !os.IsNotExist(err) { - // always return the default if we can't get a workspace name - log.Printf("[ERROR] failed to read current workspace: %s", err) - } - - return current, false -} - -// SetWorkspace saves the given name as the current workspace in the local -// filesystem. -func (m *Meta) SetWorkspace(name string) error { - err := os.MkdirAll(m.DataDir(), 0755) - if err != nil { - return err - } - - err = ioutil.WriteFile(filepath.Join(m.DataDir(), local.DefaultWorkspaceFile), []byte(name), 0644) - if err != nil { - return err - } - return nil -} - -// isAutoVarFile determines if the file ends with .auto.tfvars or .auto.tfvars.json -func isAutoVarFile(path string) bool { - return strings.HasSuffix(path, ".auto.tfvars") || - strings.HasSuffix(path, ".auto.tfvars.json") -} diff --git a/vendor/github.com/hashicorp/terraform/command/meta_backend.go b/vendor/github.com/hashicorp/terraform/command/meta_backend.go deleted file mode 100644 index 4a52ef00ec3..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/meta_backend.go +++ /dev/null @@ -1,1740 +0,0 @@ -package command - -// This file contains all the Backend-related function calls on Meta, -// exported and private. - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "log" - "path/filepath" - "strings" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/command/clistate" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/mapstructure" - - backendinit "github.com/hashicorp/terraform/backend/init" - backendlocal "github.com/hashicorp/terraform/backend/local" -) - -// BackendOpts are the options used to initialize a backend.Backend. -type BackendOpts struct { - // Module is the root module from which we will extract the terraform and - // backend configuration. - Config *config.Config - - // ConfigFile is a path to a file that contains configuration that - // is merged directly into the backend configuration when loaded - // from a file. - ConfigFile string - - // ConfigExtra is extra configuration to merge into the backend - // configuration after the extra file above. - ConfigExtra map[string]interface{} - - // Plan is a plan that is being used. If this is set, the backend - // configuration and output configuration will come from this plan. - Plan *terraform.Plan - - // Init should be set to true if initialization is allowed. If this is - // false, then any configuration that requires configuration will show - // an error asking the user to reinitialize. - Init bool - - // ForceLocal will force a purely local backend, including state. - // You probably don't want to set this. - ForceLocal bool -} - -// Backend initializes and returns the backend for this CLI session. -// -// The backend is used to perform the actual Terraform operations. This -// abstraction enables easily sliding in new Terraform behavior such as -// remote state storage, remote operations, etc. while allowing the CLI -// to remain mostly identical. -// -// This will initialize a new backend for each call, which can carry some -// overhead with it. Please reuse the returned value for optimal behavior. -// -// Only one backend should be used per Meta. This function is stateful -// and is unsafe to create multiple backends used at once. This function -// can be called multiple times with each backend being "live" (usable) -// one at a time. -func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, error) { - // If no opts are set, then initialize - if opts == nil { - opts = &BackendOpts{} - } - - // Initialize a backend from the config unless we're forcing a purely - // local operation. - var b backend.Backend - if !opts.ForceLocal { - var err error - - // If we have a plan then, we get the the backend from there. Otherwise, - // the backend comes from the configuration. - if opts.Plan != nil { - b, err = m.backendFromPlan(opts) - } else { - b, err = m.backendFromConfig(opts) - } - if err != nil { - return nil, err - } - - log.Printf("[INFO] command: backend initialized: %T", b) - } - - // Setup the CLI opts we pass into backends that support it - cliOpts := &backend.CLIOpts{ - CLI: m.Ui, - CLIColor: m.Colorize(), - StatePath: m.statePath, - StateOutPath: m.stateOutPath, - StateBackupPath: m.backupPath, - ContextOpts: m.contextOpts(), - Input: m.Input(), - RunningInAutomation: m.RunningInAutomation, - } - - // Don't validate if we have a plan. Validation is normally harmless here, - // but validation requires interpolation, and `file()` function calls may - // not have the original files in the current execution context. - cliOpts.Validation = opts.Plan == nil - - // If the backend supports CLI initialization, do it. - if cli, ok := b.(backend.CLI); ok { - if err := cli.CLIInit(cliOpts); err != nil { - return nil, fmt.Errorf( - "Error initializing backend %T: %s\n\n"+ - "This is a bug, please report it to the backend developer", - b, err) - } - } - - // If the result of loading the backend is an enhanced backend, - // then return that as-is. This works even if b == nil (it will be !ok). - if enhanced, ok := b.(backend.Enhanced); ok { - return enhanced, nil - } - - // We either have a non-enhanced backend or no backend configured at - // all. In either case, we use local as our enhanced backend and the - // non-enhanced (if any) as the state backend. - - if !opts.ForceLocal { - log.Printf("[INFO] command: backend %T is not enhanced, wrapping in local", b) - } - - // Build the local backend - local := &backendlocal.Local{Backend: b} - if err := local.CLIInit(cliOpts); err != nil { - // Local backend isn't allowed to fail. It would be a bug. - panic(err) - } - - return local, nil -} - -// IsLocalBackend returns true if the backend is a local backend. We use this -// for some checks that require a remote backend. -func (m *Meta) IsLocalBackend(b backend.Backend) bool { - // Is it a local backend? - bLocal, ok := b.(*backendlocal.Local) - - // If it is, does it not have an alternate state backend? - if ok { - ok = bLocal.Backend == nil - } - - return ok -} - -// Operation initializes a new backend.Operation struct. -// -// This prepares the operation. After calling this, the caller is expected -// to modify fields of the operation such as Sequence to specify what will -// be called. -func (m *Meta) Operation() *backend.Operation { - return &backend.Operation{ - PlanOutBackend: m.backendState, - Targets: m.targets, - UIIn: m.UIInput(), - UIOut: m.Ui, - Workspace: m.Workspace(), - LockState: m.stateLock, - StateLockTimeout: m.stateLockTimeout, - } -} - -// backendConfig returns the local configuration for the backend -func (m *Meta) backendConfig(opts *BackendOpts) (*config.Backend, error) { - if opts.Config == nil { - // check if the config was missing, or just not required - conf, err := m.Config(".") - if err != nil { - return nil, err - } - - if conf == nil { - log.Println("[INFO] command: no config, returning nil") - return nil, nil - } - - log.Println("[WARNING] BackendOpts.Config not set, but config found") - opts.Config = conf - } - - c := opts.Config - - // If there is no Terraform configuration block, no backend config - if c.Terraform == nil { - log.Println("[INFO] command: empty terraform config, returning nil") - return nil, nil - } - - // Get the configuration for the backend itself. - backend := c.Terraform.Backend - if backend == nil { - log.Println("[INFO] command: empty backend config, returning nil") - return nil, nil - } - - // If we have a config file set, load that and merge. - if opts.ConfigFile != "" { - log.Printf( - "[DEBUG] command: loading extra backend config from: %s", - opts.ConfigFile) - rc, err := m.backendConfigFile(opts.ConfigFile) - if err != nil { - return nil, fmt.Errorf( - "Error loading extra configuration file for backend: %s", err) - } - - // Merge in the configuration - backend.RawConfig = backend.RawConfig.Merge(rc) - } - - // If we have extra config values, merge that - if len(opts.ConfigExtra) > 0 { - log.Printf( - "[DEBUG] command: adding extra backend config from CLI") - rc, err := config.NewRawConfig(opts.ConfigExtra) - if err != nil { - return nil, fmt.Errorf( - "Error adding extra configuration file for backend: %s", err) - } - - // Merge in the configuration - backend.RawConfig = backend.RawConfig.Merge(rc) - } - - // Validate the backend early. We have to do this before the normal - // config validation pass since backend loading happens earlier. - if errs := backend.Validate(); len(errs) > 0 { - return nil, multierror.Append(nil, errs...) - } - - // Return the configuration which may or may not be set - return backend, nil -} - -// backendConfigFile loads the extra configuration to merge with the -// backend configuration from an extra file if specified by -// BackendOpts.ConfigFile. -func (m *Meta) backendConfigFile(path string) (*config.RawConfig, error) { - // Read the file - d, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - - // Parse it - hclRoot, err := hcl.Parse(string(d)) - if err != nil { - return nil, err - } - - // Decode it - var c map[string]interface{} - if err := hcl.DecodeObject(&c, hclRoot); err != nil { - return nil, err - } - - return config.NewRawConfig(c) -} - -// backendFromConfig returns the initialized (not configured) backend -// directly from the config/state.. -// -// This function handles any edge cases around backend config loading. For -// example: legacy remote state, new config changes, backend type changes, -// etc. -// -// This function may query the user for input unless input is disabled, in -// which case this function will error. -func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, error) { - // Get the local backend configuration. - c, err := m.backendConfig(opts) - if err != nil { - return nil, fmt.Errorf("Error loading backend config: %s", err) - } - - // cHash defaults to zero unless c is set - var cHash uint64 - if c != nil { - // We need to rehash to get the value since we may have merged the - // config with an extra ConfigFile. We don't do this when merging - // because we do want the ORIGINAL value on c so that we store - // that to not detect drift. This is covered in tests. - cHash = c.Rehash() - } - - // Get the path to where we store a local cache of backend configuration - // if we're using a remote backend. This may not yet exist which means - // we haven't used a non-local backend before. That is okay. - statePath := filepath.Join(m.DataDir(), DefaultStateFilename) - sMgr := &state.LocalState{Path: statePath} - if err := sMgr.RefreshState(); err != nil { - return nil, fmt.Errorf("Error loading state: %s", err) - } - - // Load the state, it must be non-nil for the tests below but can be empty - s := sMgr.State() - if s == nil { - log.Printf("[DEBUG] command: no data state file found for backend config") - s = terraform.NewState() - } - - // if we want to force reconfiguration of the backend, we set the backend - // state to nil on this copy. This will direct us through the correct - // configuration path in the switch statement below. - if m.reconfigure { - s.Backend = nil - } - - // Upon return, we want to set the state we're using in-memory so that - // we can access it for commands. - m.backendState = nil - defer func() { - if s := sMgr.State(); s != nil && !s.Backend.Empty() { - m.backendState = s.Backend - } - }() - - // This giant switch statement covers all eight possible combinations - // of state settings between: configuring new backends, saved (previously- - // configured) backends, and legacy remote state. - switch { - // No configuration set at all. Pure local state. - case c == nil && s.Remote.Empty() && s.Backend.Empty(): - return nil, nil - - // We're unsetting a backend (moving from backend => local) - case c == nil && s.Remote.Empty() && !s.Backend.Empty(): - if !opts.Init { - initReason := fmt.Sprintf( - "Unsetting the previously set backend %q", - s.Backend.Type) - m.backendInitRequired(initReason) - return nil, errBackendInitRequired - } - - return m.backend_c_r_S(c, sMgr, true) - - // We have a legacy remote state configuration but no new backend config - case c == nil && !s.Remote.Empty() && s.Backend.Empty(): - return m.backend_c_R_s(c, sMgr) - - // We have a legacy remote state configuration simultaneously with a - // saved backend configuration while at the same time disabling backend - // configuration. - // - // This is a naturally impossible case: Terraform will never put you - // in this state, though it is theoretically possible through manual edits - case c == nil && !s.Remote.Empty() && !s.Backend.Empty(): - if !opts.Init { - initReason := fmt.Sprintf( - "Unsetting the previously set backend %q", - s.Backend.Type) - m.backendInitRequired(initReason) - return nil, errBackendInitRequired - } - - return m.backend_c_R_S(c, sMgr) - - // Configuring a backend for the first time. - case c != nil && s.Remote.Empty() && s.Backend.Empty(): - if !opts.Init { - initReason := fmt.Sprintf( - "Initial configuration of the requested backend %q", - c.Type) - m.backendInitRequired(initReason) - return nil, errBackendInitRequired - } - - return m.backend_C_r_s(c, sMgr) - - // Potentially changing a backend configuration - case c != nil && s.Remote.Empty() && !s.Backend.Empty(): - // If our configuration is the same, then we're just initializing - // a previously configured remote backend. - if !s.Backend.Empty() { - hash := s.Backend.Hash - // on init we need an updated hash containing any extra options - // that were added after merging. - if opts.Init { - hash = s.Backend.Rehash() - } - if hash == cHash { - return m.backend_C_r_S_unchanged(c, sMgr) - } - } - - if !opts.Init { - initReason := fmt.Sprintf( - "Backend configuration changed for %q", - c.Type) - m.backendInitRequired(initReason) - return nil, errBackendInitRequired - } - - log.Printf( - "[WARN] command: backend config change! saved: %d, new: %d", - s.Backend.Hash, cHash) - return m.backend_C_r_S_changed(c, sMgr, true) - - // Configuring a backend for the first time while having legacy - // remote state. This is very possible if a Terraform user configures - // a backend prior to ever running Terraform on an old state. - case c != nil && !s.Remote.Empty() && s.Backend.Empty(): - if !opts.Init { - initReason := fmt.Sprintf( - "Initial configuration for backend %q", - c.Type) - m.backendInitRequired(initReason) - return nil, errBackendInitRequired - } - - return m.backend_C_R_s(c, sMgr) - - // Configuring a backend with both a legacy remote state set - // and a pre-existing backend saved. - case c != nil && !s.Remote.Empty() && !s.Backend.Empty(): - // If the hashes are the same, we have a legacy remote state with - // an unchanged stored backend state. - hash := s.Backend.Hash - if opts.Init { - hash = s.Backend.Rehash() - } - if hash == cHash { - if !opts.Init { - initReason := fmt.Sprintf( - "Legacy remote state found with configured backend %q", - c.Type) - m.backendInitRequired(initReason) - return nil, errBackendInitRequired - } - - return m.backend_C_R_S_unchanged(c, sMgr, true) - } - - if !opts.Init { - initReason := fmt.Sprintf( - "Reconfiguring the backend %q", - c.Type) - m.backendInitRequired(initReason) - return nil, errBackendInitRequired - } - - // We have change in all three - return m.backend_C_R_S_changed(c, sMgr) - default: - // This should be impossible since all state possibilties are - // tested above, but we need a default case anyways and we should - // protect against the scenario where a case is somehow removed. - return nil, fmt.Errorf( - "Unhandled backend configuration state. This is a bug. Please\n"+ - "report this error with the following information.\n\n"+ - "Config Nil: %v\n"+ - "Saved Backend Empty: %v\n"+ - "Legacy Remote Empty: %v\n", - c == nil, s.Backend.Empty(), s.Remote.Empty()) - } -} - -// backendFromPlan loads the backend from a given plan file. -func (m *Meta) backendFromPlan(opts *BackendOpts) (backend.Backend, error) { - // Precondition check - if opts.Plan == nil { - panic("plan should not be nil") - } - - // We currently don't allow "-state" to be specified. - if m.statePath != "" { - return nil, fmt.Errorf( - "State path cannot be specified with a plan file. The plan itself contains\n" + - "the state to use. If you wish to change that, please create a new plan\n" + - "and specify the state path when creating the plan.") - } - - planBackend := opts.Plan.Backend - planState := opts.Plan.State - if planState == nil { - // The state can be nil, we just have to make it empty for the logic - // in this function. - planState = terraform.NewState() - } - - // Validation only for non-local plans - local := planState.Remote.Empty() && planBackend.Empty() - if !local { - // We currently don't allow "-state-out" to be specified. - if m.stateOutPath != "" { - return nil, fmt.Errorf(strings.TrimSpace(errBackendPlanStateFlag)) - } - } - - /* - // Determine the path where we'd be writing state - path := DefaultStateFilename - if !planState.Remote.Empty() || !planBackend.Empty() { - path = filepath.Join(m.DataDir(), DefaultStateFilename) - } - - // If the path exists, then we need to verify we're writing the same - // state lineage. If the path doesn't exist that's okay. - _, err := os.Stat(path) - if err != nil && !os.IsNotExist(err) { - return nil, fmt.Errorf("Error checking state destination: %s", err) - } - if err == nil { - // The file exists, we need to read it and compare - if err := m.backendFromPlan_compareStates(state, path); err != nil { - return nil, err - } - } - */ - - // If we have a stateOutPath, we must also specify it as the - // input path so we can check it properly. We restore it after this - // function exits. - original := m.statePath - m.statePath = m.stateOutPath - defer func() { m.statePath = original }() - - var b backend.Backend - var err error - switch { - // No remote state at all, all local - case planState.Remote.Empty() && planBackend.Empty(): - log.Printf("[INFO] command: initializing local backend from plan (not set)") - - // Get the local backend - b, err = m.Backend(&BackendOpts{ForceLocal: true}) - - // New backend configuration set - case planState.Remote.Empty() && !planBackend.Empty(): - log.Printf( - "[INFO] command: initializing backend from plan: %s", - planBackend.Type) - - b, err = m.backendInitFromSaved(planBackend) - - // Legacy remote state set - case !planState.Remote.Empty() && planBackend.Empty(): - log.Printf( - "[INFO] command: initializing legacy remote backend from plan: %s", - planState.Remote.Type) - - // Write our current state to an inmemory state just so that we - // have it in the format of state.State - inmem := &state.InmemState{} - inmem.WriteState(planState) - - // Get the backend through the normal means of legacy state - b, err = m.backend_c_R_s(nil, inmem) - - // Both set, this can't happen in a plan. - case !planState.Remote.Empty() && !planBackend.Empty(): - return nil, fmt.Errorf(strings.TrimSpace(errBackendPlanBoth)) - } - - // If we had an error, return that - if err != nil { - return nil, err - } - - env := m.Workspace() - - // Get the state so we can determine the effect of using this plan - realMgr, err := b.State(env) - if err != nil { - return nil, fmt.Errorf("Error reading state: %s", err) - } - - if m.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), m.stateLockTimeout) - defer cancel() - - // Lock the state if we can - lockInfo := state.NewLockInfo() - lockInfo.Operation = "backend from plan" - - lockID, err := clistate.Lock(lockCtx, realMgr, lockInfo, m.Ui, m.Colorize()) - if err != nil { - return nil, fmt.Errorf("Error locking state: %s", err) - } - defer clistate.Unlock(realMgr, lockID, m.Ui, m.Colorize()) - } - - if err := realMgr.RefreshState(); err != nil { - return nil, fmt.Errorf("Error reading state: %s", err) - } - real := realMgr.State() - if real != nil { - // If they're not the same lineage, don't allow this - if !real.SameLineage(planState) { - return nil, fmt.Errorf(strings.TrimSpace(errBackendPlanLineageDiff)) - } - - // Compare ages - comp, err := real.CompareAges(planState) - if err != nil { - return nil, fmt.Errorf("Error comparing state ages for safety: %s", err) - } - switch comp { - case terraform.StateAgeEqual: - // State ages are equal, this is perfect - - case terraform.StateAgeReceiverOlder: - // Real state is somehow older, this is okay. - - case terraform.StateAgeReceiverNewer: - // If we have an older serial it is a problem but if we have a - // differing serial but are still identical, just let it through. - if real.Equal(planState) { - log.Printf( - "[WARN] command: state in plan has older serial, but Equal is true") - break - } - - // The real state is newer, this is not allowed. - return nil, fmt.Errorf( - strings.TrimSpace(errBackendPlanOlder), - planState.Serial, real.Serial) - } - } - - // Write the state - newState := opts.Plan.State.DeepCopy() - if newState != nil { - newState.Remote = nil - newState.Backend = nil - } - - // realMgr locked above - if err := realMgr.WriteState(newState); err != nil { - return nil, fmt.Errorf("Error writing state: %s", err) - } - if err := realMgr.PersistState(); err != nil { - return nil, fmt.Errorf("Error writing state: %s", err) - } - - return b, nil -} - -//------------------------------------------------------------------- -// Backend Config Scenarios -// -// The functions below cover handling all the various scenarios that -// can exist when loading a backend. They are named in the format of -// "backend_C_R_S" where C, R, S may be upper or lowercase. Lowercase -// means it is false, uppercase means it is true. The full set of eight -// possible cases is handled. -// -// The fields are: -// -// * C - Backend configuration is set and changed in TF files -// * R - Legacy remote state is set -// * S - Backend configuration is set in the state -// -//------------------------------------------------------------------- - -// Unconfiguring a backend (moving from backend => local). -func (m *Meta) backend_c_r_S( - c *config.Backend, sMgr state.State, output bool) (backend.Backend, error) { - s := sMgr.State() - - // Get the backend type for output - backendType := s.Backend.Type - - copy := m.forceInitCopy - if !copy { - var err error - // Confirm with the user that the copy should occur - copy, err = m.confirm(&terraform.InputOpts{ - Id: "backend-migrate-to-local", - Query: fmt.Sprintf("Do you want to copy the state from %q?", s.Backend.Type), - Description: fmt.Sprintf( - strings.TrimSpace(inputBackendMigrateLocal), s.Backend.Type), - }) - if err != nil { - return nil, fmt.Errorf( - "Error asking for state copy action: %s", err) - } - } - - // If we're copying, perform the migration - if copy { - // Grab a purely local backend to get the local state if it exists - localB, err := m.Backend(&BackendOpts{ForceLocal: true}) - if err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendLocalRead), err) - } - - // Initialize the configured backend - b, err := m.backend_C_r_S_unchanged(c, sMgr) - if err != nil { - return nil, fmt.Errorf( - strings.TrimSpace(errBackendSavedUnsetConfig), s.Backend.Type, err) - } - - // Perform the migration - err = m.backendMigrateState(&backendMigrateOpts{ - OneType: s.Backend.Type, - TwoType: "local", - One: b, - Two: localB, - }) - if err != nil { - return nil, err - } - } - - // Remove the stored metadata - s.Backend = nil - if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearSaved), err) - } - if err := sMgr.PersistState(); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearSaved), err) - } - - if output { - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][green]\n\n"+ - strings.TrimSpace(successBackendUnset), backendType))) - } - - // Return no backend - return nil, nil -} - -// Legacy remote state -func (m *Meta) backend_c_R_s( - c *config.Backend, sMgr state.State) (backend.Backend, error) { - s := sMgr.State() - - // Warn the user - m.Ui.Warn(strings.TrimSpace(warnBackendLegacy) + "\n") - - // We need to convert the config to map[string]interface{} since that - // is what the backends expect. - var configMap map[string]interface{} - if err := mapstructure.Decode(s.Remote.Config, &configMap); err != nil { - return nil, fmt.Errorf("Error configuring remote state: %s", err) - } - - // Create the config - rawC, err := config.NewRawConfig(configMap) - if err != nil { - return nil, fmt.Errorf("Error configuring remote state: %s", err) - } - config := terraform.NewResourceConfig(rawC) - - // Get the backend - f := backendinit.Backend(s.Remote.Type) - if f == nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendLegacyUnknown), s.Remote.Type) - } - b := f() - - // Configure - if err := b.Configure(config); err != nil { - return nil, fmt.Errorf(errBackendLegacyConfig, err) - } - - return b, nil -} - -// Unsetting backend, saved backend, legacy remote state -func (m *Meta) backend_c_R_S( - c *config.Backend, sMgr state.State) (backend.Backend, error) { - // Notify the user - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset]%s\n\n", - strings.TrimSpace(outputBackendUnsetWithLegacy)))) - - // Get the backend type for later - backendType := sMgr.State().Backend.Type - - // First, perform the configured => local tranasition - if _, err := m.backend_c_r_S(c, sMgr, false); err != nil { - return nil, err - } - - // Grab a purely local backend - localB, err := m.Backend(&BackendOpts{ForceLocal: true}) - if err != nil { - return nil, fmt.Errorf(errBackendLocalRead, err) - } - - // Grab the state - s := sMgr.State() - - // Ask the user if they want to migrate their existing remote state - copy := m.forceInitCopy - if !copy { - copy, err = m.confirm(&terraform.InputOpts{ - Id: "backend-migrate-to-new", - Query: fmt.Sprintf( - "Do you want to copy the legacy remote state from %q?", - s.Remote.Type), - Description: strings.TrimSpace(inputBackendMigrateLegacyLocal), - }) - if err != nil { - return nil, fmt.Errorf( - "Error asking for state copy action: %s", err) - } - } - - // If the user wants a copy, copy! - if copy { - // Initialize the legacy backend - oldB, err := m.backendInitFromLegacy(s.Remote) - if err != nil { - return nil, err - } - - // Perform the migration - err = m.backendMigrateState(&backendMigrateOpts{ - OneType: s.Remote.Type, - TwoType: "local", - One: oldB, - Two: localB, - }) - if err != nil { - return nil, err - } - } - - // Unset the remote state - s = sMgr.State() - if s == nil { - s = terraform.NewState() - } - s.Remote = nil - if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearLegacy), err) - } - if err := sMgr.PersistState(); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearLegacy), err) - } - - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][green]\n\n"+ - strings.TrimSpace(successBackendUnset), backendType))) - - return nil, nil -} - -// Configuring a backend for the first time with legacy remote state. -func (m *Meta) backend_C_R_s( - c *config.Backend, sMgr state.State) (backend.Backend, error) { - // Notify the user - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset]%s\n\n", - strings.TrimSpace(outputBackendConfigureWithLegacy)))) - - // First, configure the new backend - b, err := m.backendInitFromConfig(c) - if err != nil { - return nil, err - } - - // Next, save the new configuration. This will not overwrite our - // legacy remote state. We'll handle that after. - s := sMgr.State() - if s == nil { - s = terraform.NewState() - } - s.Backend = &terraform.BackendState{ - Type: c.Type, - Config: c.RawConfig.Raw, - Hash: c.Hash, - } - if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(errBackendWriteSaved, err) - } - if err := sMgr.PersistState(); err != nil { - return nil, fmt.Errorf(errBackendWriteSaved, err) - } - - // I don't know how this is possible but if we don't have remote - // state config anymore somehow, just return the backend. This - // shouldn't be possible, though. - if s.Remote.Empty() { - return b, nil - } - - // Finally, ask the user if they want to copy the state from - // their old remote state location. - copy := m.forceInitCopy - if !copy { - copy, err = m.confirm(&terraform.InputOpts{ - Id: "backend-migrate-to-new", - Query: fmt.Sprintf( - "Do you want to copy the legacy remote state from %q?", - s.Remote.Type), - Description: strings.TrimSpace(inputBackendMigrateLegacy), - }) - if err != nil { - return nil, fmt.Errorf( - "Error asking for state copy action: %s", err) - } - } - - // If the user wants a copy, copy! - if copy { - // Initialize the legacy backend - oldB, err := m.backendInitFromLegacy(s.Remote) - if err != nil { - return nil, err - } - - // Perform the migration - err = m.backendMigrateState(&backendMigrateOpts{ - OneType: s.Remote.Type, - TwoType: c.Type, - One: oldB, - Two: b, - }) - if err != nil { - return nil, err - } - } - - // Unset the remote state - s = sMgr.State() - if s == nil { - s = terraform.NewState() - } - s.Remote = nil - if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearLegacy), err) - } - if err := sMgr.PersistState(); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearLegacy), err) - } - - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][green]\n"+strings.TrimSpace(successBackendSet), s.Backend.Type))) - - return b, nil -} - -// Configuring a backend for the first time. -func (m *Meta) backend_C_r_s( - c *config.Backend, sMgr state.State) (backend.Backend, error) { - // Get the backend - b, err := m.backendInitFromConfig(c) - if err != nil { - return nil, err - } - - // Grab a purely local backend to get the local state if it exists - localB, err := m.Backend(&BackendOpts{ForceLocal: true}) - if err != nil { - return nil, fmt.Errorf(errBackendLocalRead, err) - } - - env := m.Workspace() - - localState, err := localB.State(env) - if err != nil { - return nil, fmt.Errorf(errBackendLocalRead, err) - } - if err := localState.RefreshState(); err != nil { - return nil, fmt.Errorf(errBackendLocalRead, err) - } - - // If the local state is not empty, we need to potentially do a - // state migration to the new backend (with user permission), unless the - // destination is also "local" - if localS := localState.State(); !localS.Empty() { - // Perform the migration - err = m.backendMigrateState(&backendMigrateOpts{ - OneType: "local", - TwoType: c.Type, - One: localB, - Two: b, - }) - if err != nil { - return nil, err - } - - // we usually remove the local state after migration to prevent - // confusion, but adding a default local backend block to the config - // can get us here too. Don't delete our state if the old and new paths - // are the same. - erase := true - if newLocalB, ok := b.(*backendlocal.Local); ok { - if localB, ok := localB.(*backendlocal.Local); ok { - if newLocalB.StatePath == localB.StatePath { - erase = false - } - } - } - - if erase { - // We always delete the local state, unless that was our new state too. - if err := localState.WriteState(nil); err != nil { - return nil, fmt.Errorf(errBackendMigrateLocalDelete, err) - } - if err := localState.PersistState(); err != nil { - return nil, fmt.Errorf(errBackendMigrateLocalDelete, err) - } - } - } - - if m.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), m.stateLockTimeout) - defer cancel() - - // Lock the state if we can - lockInfo := state.NewLockInfo() - lockInfo.Operation = "backend from config" - - lockID, err := clistate.Lock(lockCtx, sMgr, lockInfo, m.Ui, m.Colorize()) - if err != nil { - return nil, fmt.Errorf("Error locking state: %s", err) - } - defer clistate.Unlock(sMgr, lockID, m.Ui, m.Colorize()) - } - - // Store the metadata in our saved state location - s := sMgr.State() - if s == nil { - s = terraform.NewState() - } - s.Backend = &terraform.BackendState{ - Type: c.Type, - Config: c.RawConfig.Raw, - Hash: c.Hash, - } - - if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(errBackendWriteSaved, err) - } - if err := sMgr.PersistState(); err != nil { - return nil, fmt.Errorf(errBackendWriteSaved, err) - } - - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][green]\n"+strings.TrimSpace(successBackendSet), s.Backend.Type))) - - // Return the backend - return b, nil -} - -// Changing a previously saved backend. -func (m *Meta) backend_C_r_S_changed( - c *config.Backend, sMgr state.State, output bool) (backend.Backend, error) { - if output { - // Notify the user - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset]%s\n\n", - strings.TrimSpace(outputBackendReconfigure)))) - } - - // Get the old state - s := sMgr.State() - - // Get the backend - b, err := m.backendInitFromConfig(c) - if err != nil { - return nil, fmt.Errorf( - "Error initializing new backend: %s", err) - } - - // Check with the user if we want to migrate state - copy := m.forceInitCopy - if !copy { - copy, err = m.confirm(&terraform.InputOpts{ - Id: "backend-migrate-to-new", - Query: fmt.Sprintf("Do you want to copy the state from %q?", s.Backend.Type), - Description: strings.TrimSpace(fmt.Sprintf(inputBackendMigrateChange, s.Backend.Type, c.Type)), - }) - if err != nil { - return nil, fmt.Errorf( - "Error asking for state copy action: %s", err) - } - } - - // If we are, then we need to initialize the old backend and - // perform the copy. - if copy { - // Grab the existing backend - oldB, err := m.backend_C_r_S_unchanged(c, sMgr) - if err != nil { - return nil, fmt.Errorf( - "Error loading previously configured backend: %s", err) - } - - // Perform the migration - err = m.backendMigrateState(&backendMigrateOpts{ - OneType: s.Backend.Type, - TwoType: c.Type, - One: oldB, - Two: b, - }) - if err != nil { - return nil, err - } - } - - if m.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), m.stateLockTimeout) - defer cancel() - - // Lock the state if we can - lockInfo := state.NewLockInfo() - lockInfo.Operation = "backend from config" - - lockID, err := clistate.Lock(lockCtx, sMgr, lockInfo, m.Ui, m.Colorize()) - if err != nil { - return nil, fmt.Errorf("Error locking state: %s", err) - } - defer clistate.Unlock(sMgr, lockID, m.Ui, m.Colorize()) - } - - // Update the backend state - s = sMgr.State() - if s == nil { - s = terraform.NewState() - } - s.Backend = &terraform.BackendState{ - Type: c.Type, - Config: c.RawConfig.Raw, - Hash: c.Hash, - } - - if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(errBackendWriteSaved, err) - } - if err := sMgr.PersistState(); err != nil { - return nil, fmt.Errorf(errBackendWriteSaved, err) - } - - if output { - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][green]\n"+strings.TrimSpace(successBackendSet), s.Backend.Type))) - } - - return b, nil -} - -// Initiailizing an unchanged saved backend -func (m *Meta) backend_C_r_S_unchanged( - c *config.Backend, sMgr state.State) (backend.Backend, error) { - s := sMgr.State() - - // it's possible for a backend to be unchanged, and the config itself to - // have changed by moving a parameter from the config to `-backend-config` - // In this case we only need to update the Hash. - if c != nil && s.Backend.Hash != c.Hash { - s.Backend.Hash = c.Hash - if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(errBackendWriteSaved, err) - } - } - - // Create the config. We do this from the backend state since this - // has the complete configuration data whereas the config itself - // may require input. - rawC, err := config.NewRawConfig(s.Backend.Config) - if err != nil { - return nil, fmt.Errorf("Error configuring backend: %s", err) - } - config := terraform.NewResourceConfig(rawC) - - // Get the backend - f := backendinit.Backend(s.Backend.Type) - if f == nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), s.Backend.Type) - } - b := f() - - // Configure - if err := b.Configure(config); err != nil { - return nil, fmt.Errorf(errBackendSavedConfig, s.Backend.Type, err) - } - - return b, nil -} - -// Initiailizing a changed saved backend with legacy remote state. -func (m *Meta) backend_C_R_S_changed( - c *config.Backend, sMgr state.State) (backend.Backend, error) { - // Notify the user - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset]%s\n\n", - strings.TrimSpace(outputBackendSavedWithLegacyChanged)))) - - // Reconfigure the backend first - if _, err := m.backend_C_r_S_changed(c, sMgr, false); err != nil { - return nil, err - } - - // Handle the case where we have all set but unchanged - b, err := m.backend_C_R_S_unchanged(c, sMgr, false) - if err != nil { - return nil, err - } - - // Output success message - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][green]\n\n"+ - strings.TrimSpace(successBackendReconfigureWithLegacy), c.Type))) - - return b, nil -} - -// Initiailizing an unchanged saved backend with legacy remote state. -func (m *Meta) backend_C_R_S_unchanged( - c *config.Backend, sMgr state.State, output bool) (backend.Backend, error) { - if output { - // Notify the user - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset]%s\n\n", - strings.TrimSpace(outputBackendSavedWithLegacy)))) - } - - // Load the backend from the state - s := sMgr.State() - b, err := m.backendInitFromSaved(s.Backend) - if err != nil { - return nil, err - } - - // Ask if the user wants to move their legacy remote state - copy := m.forceInitCopy - if !copy { - copy, err = m.confirm(&terraform.InputOpts{ - Id: "backend-migrate-to-new", - Query: fmt.Sprintf( - "Do you want to copy the legacy remote state from %q?", - s.Remote.Type), - Description: strings.TrimSpace(inputBackendMigrateLegacy), - }) - if err != nil { - return nil, fmt.Errorf( - "Error asking for state copy action: %s", err) - } - } - - // If the user wants a copy, copy! - if copy { - // Initialize the legacy backend - oldB, err := m.backendInitFromLegacy(s.Remote) - if err != nil { - return nil, err - } - - // Perform the migration - err = m.backendMigrateState(&backendMigrateOpts{ - OneType: s.Remote.Type, - TwoType: s.Backend.Type, - One: oldB, - Two: b, - }) - if err != nil { - return nil, err - } - } - - if m.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), m.stateLockTimeout) - defer cancel() - - // Lock the state if we can - lockInfo := state.NewLockInfo() - lockInfo.Operation = "backend from config" - - lockID, err := clistate.Lock(lockCtx, sMgr, lockInfo, m.Ui, m.Colorize()) - if err != nil { - return nil, fmt.Errorf("Error locking state: %s", err) - } - defer clistate.Unlock(sMgr, lockID, m.Ui, m.Colorize()) - } - - // Unset the remote state - s = sMgr.State() - if s == nil { - s = terraform.NewState() - } - s.Remote = nil - - if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearLegacy), err) - } - if err := sMgr.PersistState(); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearLegacy), err) - } - - if output { - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][green]\n\n"+ - strings.TrimSpace(successBackendLegacyUnset), s.Backend.Type))) - } - - return b, nil -} - -//------------------------------------------------------------------- -// Reusable helper functions for backend management -//------------------------------------------------------------------- - -func (m *Meta) backendInitFromConfig(c *config.Backend) (backend.Backend, error) { - // Create the config. - config := terraform.NewResourceConfig(c.RawConfig) - - // Get the backend - f := backendinit.Backend(c.Type) - if f == nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendNewUnknown), c.Type) - } - b := f() - - // TODO: test - // Ask for input if we have input enabled - if m.Input() { - var err error - config, err = b.Input(m.UIInput(), config) - if err != nil { - return nil, fmt.Errorf( - "Error asking for input to configure the backend %q: %s", - c.Type, err) - } - } - - // Validate - warns, errs := b.Validate(config) - for _, warning := range warns { - // We just write warnings directly to the UI. This isn't great - // since we're a bit deep here to be pushing stuff out into the - // UI, but sufficient to let us print out deprecation warnings - // and the like. - m.Ui.Warn(warning) - } - if len(errs) > 0 { - return nil, fmt.Errorf( - "Error configuring the backend %q: %s", - c.Type, multierror.Append(nil, errs...)) - } - - // Configure - if err := b.Configure(config); err != nil { - return nil, fmt.Errorf(errBackendNewConfig, c.Type, err) - } - - return b, nil -} - -func (m *Meta) backendInitFromLegacy(s *terraform.RemoteState) (backend.Backend, error) { - // We need to convert the config to map[string]interface{} since that - // is what the backends expect. - var configMap map[string]interface{} - if err := mapstructure.Decode(s.Config, &configMap); err != nil { - return nil, fmt.Errorf("Error configuring remote state: %s", err) - } - - // Create the config - rawC, err := config.NewRawConfig(configMap) - if err != nil { - return nil, fmt.Errorf("Error configuring remote state: %s", err) - } - config := terraform.NewResourceConfig(rawC) - - // Get the backend - f := backendinit.Backend(s.Type) - if f == nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendLegacyUnknown), s.Type) - } - b := f() - - // Configure - if err := b.Configure(config); err != nil { - return nil, fmt.Errorf(errBackendLegacyConfig, err) - } - - return b, nil -} - -func (m *Meta) backendInitFromSaved(s *terraform.BackendState) (backend.Backend, error) { - // Create the config. We do this from the backend state since this - // has the complete configuration data whereas the config itself - // may require input. - rawC, err := config.NewRawConfig(s.Config) - if err != nil { - return nil, fmt.Errorf("Error configuring backend: %s", err) - } - config := terraform.NewResourceConfig(rawC) - - // Get the backend - f := backendinit.Backend(s.Type) - if f == nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), s.Type) - } - b := f() - - // Configure - if err := b.Configure(config); err != nil { - return nil, fmt.Errorf(errBackendSavedConfig, s.Type, err) - } - - return b, nil -} - -func (m *Meta) backendInitRequired(reason string) { - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset]"+strings.TrimSpace(errBackendInit)+"\n", reason))) -} - -//------------------------------------------------------------------- -// Output constants and initialization code -//------------------------------------------------------------------- - -// errBackendInitRequired is the final error message shown when reinit -// is required for some reason. The error message includes the reason. -var errBackendInitRequired = errors.New( - "Initialization required. Please see the error message above.") - -const errBackendLegacyConfig = ` -One or more errors occurred while configuring the legacy remote state. -If fixing these errors requires changing your remote state configuration, -you must switch your configuration to the new remote backend configuration. -You can learn more about remote backends at the URL below: - -https://www.terraform.io/docs/backends/index.html - -The error(s) configuring the legacy remote state: - -%s -` - -const errBackendLegacyUnknown = ` -The legacy remote state type %q could not be found. - -Terraform 0.9.0 shipped with backwards compatibility for all built-in -legacy remote state types. This error may mean that you were using a -custom Terraform build that perhaps supported a different type of -remote state. - -Please check with the creator of the remote state above and try again. -` - -const errBackendLocalRead = ` -Error reading local state: %s - -Terraform is trying to read your local state to determine if there is -state to migrate to your newly configured backend. Terraform can't continue -without this check because that would risk losing state. Please resolve the -error above and try again. -` - -const errBackendMigrateLocalDelete = ` -Error deleting local state after migration: %s - -Your local state is deleted after successfully migrating it to the newly -configured backend. As part of the deletion process, a backup is made at -the standard backup path unless explicitly asked not to. To cleanly operate -with a backend, we must delete the local state file. Please resolve the -issue above and retry the command. -` - -const errBackendMigrateNew = ` -Error migrating local state to backend: %s - -Your local state remains intact and unmodified. Please resolve the error -above and try again. -` - -const errBackendNewConfig = ` -Error configuring the backend %q: %s - -Please update the configuration in your Terraform files to fix this error -then run this command again. -` - -const errBackendNewRead = ` -Error reading newly configured backend state: %s - -Terraform is trying to read the state from your newly configured backend -to determine the copy process for your existing state. Backends are expected -to not error even if there is no state yet written. Please resolve the -error above and try again. -` - -const errBackendNewUnknown = ` -The backend %q could not be found. - -This is the backend specified in your Terraform configuration file. -This error could be a simple typo in your configuration, but it can also -be caused by using a Terraform version that doesn't support the specified -backend type. Please check your configuration and your Terraform version. - -If you'd like to run Terraform and store state locally, you can fix this -error by removing the backend configuration from your configuration. -` - -const errBackendRemoteRead = ` -Error reading backend state: %s - -Terraform is trying to read the state from your configured backend to -determine if there is any migration steps necessary. Terraform can't continue -without this check because that would risk losing state. Please resolve the -error above and try again. -` - -const errBackendSavedConfig = ` -Error configuring the backend %q: %s - -Please update the configuration in your Terraform files to fix this error. -If you'd like to update the configuration interactively without storing -the values in your configuration, run "terraform init". -` - -const errBackendSavedUnsetConfig = ` -Error configuring the existing backend %q: %s - -Terraform must configure the existing backend in order to copy the state -from the existing backend, as requested. Please resolve the error and try -again. If you choose to not copy the existing state, Terraform will not -configure the backend. If the configuration is invalid, please update your -Terraform configuration with proper configuration for this backend first -before unsetting the backend. -` - -const errBackendSavedUnknown = ` -The backend %q could not be found. - -This is the backend that this Terraform environment is configured to use -both in your configuration and saved locally as your last-used backend. -If it isn't found, it could mean an alternate version of Terraform was -used with this configuration. Please use the proper version of Terraform that -contains support for this backend. - -If you'd like to force remove this backend, you must update your configuration -to not use the backend and run "terraform init" (or any other command) again. -` - -const errBackendClearLegacy = ` -Error clearing the legacy remote state configuration: %s - -Terraform completed configuring your backend. It is now safe to remove -the legacy remote state configuration, but an error occurred while trying -to do so. Please look at the error above, resolve it, and try again. -` - -const errBackendClearSaved = ` -Error clearing the backend configuration: %s - -Terraform removes the saved backend configuration when you're removing a -configured backend. This must be done so future Terraform runs know to not -use the backend configuration. Please look at the error above, resolve it, -and try again. -` - -const errBackendInit = ` -[reset][bold][yellow]Backend reinitialization required. Please run "terraform init".[reset] -[yellow]Reason: %s - -The "backend" is the interface that Terraform uses to store state, -perform operations, etc. If this message is showing up, it means that the -Terraform configuration you're using is using a custom configuration for -the Terraform backend. - -Changes to backend configurations require reinitialization. This allows -Terraform to setup the new configuration, copy existing state, etc. This is -only done during "terraform init". Please run that command now then try again. - -If the change reason above is incorrect, please verify your configuration -hasn't changed and try again. At this point, no changes to your existing -configuration or state have been made. -` - -const errBackendWriteSaved = ` -Error saving the backend configuration: %s - -Terraform saves the complete backend configuration in a local file for -configuring the backend on future operations. This cannot be disabled. Errors -are usually due to simple file permission errors. Please look at the error -above, resolve it, and try again. -` - -const errBackendPlanBoth = ` -The plan file contained both a legacy remote state and backend configuration. -This is not allowed. Please recreate the plan file with the latest version of -Terraform. -` - -const errBackendPlanLineageDiff = ` -The plan file contains a state with a differing lineage than the current -state. By continuing, your current state would be overwritten by the state -in the plan. Please either update the plan with the latest state or delete -your current state and try again. - -"Lineage" is a unique identifier generated only once on the creation of -a new, empty state. If these values differ, it means they were created new -at different times. Therefore, Terraform must assume that they're completely -different states. - -The most common cause of seeing this error is using a plan that was -created against a different state. Perhaps the plan is very old and the -state has since been recreated, or perhaps the plan was against a competely -different infrastructure. -` - -const errBackendPlanStateFlag = ` -The -state and -state-out flags cannot be set with a plan that has a remote -state. The plan itself contains the configuration for the remote backend to -store state. The state will be written there for consistency. - -If you wish to change this behavior, please create a plan from local state. -You may use the state flags with plans from local state to affect where -the final state is written. -` - -const errBackendPlanOlder = ` -This plan was created against an older state than is current. Please create -a new plan file against the latest state and try again. - -Terraform doesn't allow you to run plans that were created from older -states since it doesn't properly represent the latest changes Terraform -may have made, and can result in unsafe behavior. - -Plan Serial: %[1]d -Current Serial: %[2]d -` - -const inputBackendMigrateChange = ` -Would you like to copy the state from your prior backend %q to the -newly configured %q backend? If you're reconfiguring the same backend, -answering "yes" or "no" shouldn't make a difference. Please answer exactly -"yes" or "no". -` - -const inputBackendMigrateLegacy = ` -Terraform can copy the existing state in your legacy remote state -backend to your newly configured backend. Please answer "yes" or "no". -` - -const inputBackendMigrateLegacyLocal = ` -Terraform can copy the existing state in your legacy remote state -backend to your local state. Please answer "yes" or "no". -` - -const inputBackendMigrateLocal = ` -Terraform has detected you're unconfiguring your previously set backend. -Would you like to copy the state from %q to local state? Please answer -"yes" or "no". If you answer "no", you will start with a blank local state. -` - -const outputBackendConfigureWithLegacy = ` -[reset][bold]New backend configuration detected with legacy remote state![reset] - -Terraform has detected that you're attempting to configure a new backend. -At the same time, legacy remote state configuration was found. Terraform will -first configure the new backend, and then ask if you'd like to migrate -your remote state to the new backend. -` - -const outputBackendReconfigure = ` -[reset][bold]Backend configuration changed![reset] - -Terraform has detected that the configuration specified for the backend -has changed. Terraform will now reconfigure for this backend. If you didn't -intend to reconfigure your backend please undo any changes to the "backend" -section in your Terraform configuration. -` - -const outputBackendSavedWithLegacy = ` -[reset][bold]Legacy remote state was detected![reset] - -Terraform has detected you still have legacy remote state enabled while -also having a backend configured. Terraform will now ask if you want to -migrate your legacy remote state data to the configured backend. -` - -const outputBackendSavedWithLegacyChanged = ` -[reset][bold]Legacy remote state was detected while also changing your current backend!reset] - -Terraform has detected that you have legacy remote state, a configured -current backend, and you're attempting to reconfigure your backend. To handle -all of these changes, Terraform will first reconfigure your backend. After -this, Terraform will handle optionally copying your legacy remote state -into the newly configured backend. -` - -const outputBackendUnsetWithLegacy = ` -[reset][bold]Detected a request to unset the backend with legacy remote state present![reset] - -Terraform has detected that you're attempting to unset a previously configured -backend (by not having the "backend" configuration set in your Terraform files). -At the same time, legacy remote state was detected. To handle this complex -scenario, Terraform will first unset your configured backend, and then -ask you how to handle the legacy remote state. This will be multi-step -process. -` - -const successBackendLegacyUnset = ` -Terraform has successfully migrated from legacy remote state to your -configured backend (%q). -` - -const successBackendReconfigureWithLegacy = ` -Terraform has successfully reconfigured your backend and migrate -from legacy remote state to the new backend. -` - -const successBackendUnset = ` -Successfully unset the backend %q. Terraform will now operate locally. -` - -const successBackendSet = ` -Successfully configured the backend %q! Terraform will automatically -use this backend unless the backend configuration changes. -` - -const warnBackendLegacy = ` -Deprecation warning: This environment is configured to use legacy remote state. -Remote state changed significantly in Terraform 0.9. Please update your remote -state configuration to use the new 'backend' settings. For now, Terraform -will continue to use your existing settings. Legacy remote state support -will be removed in Terraform 0.11. - -You can find a guide for upgrading here: - -https://www.terraform.io/docs/backends/legacy-0-8.html -` diff --git a/vendor/github.com/hashicorp/terraform/command/meta_backend_migrate.go b/vendor/github.com/hashicorp/terraform/command/meta_backend_migrate.go deleted file mode 100644 index 552d70887aa..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/meta_backend_migrate.go +++ /dev/null @@ -1,520 +0,0 @@ -package command - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/command/clistate" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" -) - -// backendMigrateState handles migrating (copying) state from one backend -// to another. This function handles asking the user for confirmation -// as well as the copy itself. -// -// This function can handle all scenarios of state migration regardless -// of the existence of state in either backend. -// -// After migrating the state, the existing state in the first backend -// remains untouched. -// -// This will attempt to lock both states for the migration. -func (m *Meta) backendMigrateState(opts *backendMigrateOpts) error { - // We need to check what the named state status is. If we're converting - // from multi-state to single-state for example, we need to handle that. - var oneSingle, twoSingle bool - oneStates, err := opts.One.States() - if err == backend.ErrNamedStatesNotSupported { - oneSingle = true - err = nil - } - if err != nil { - return fmt.Errorf(strings.TrimSpace( - errMigrateLoadStates), opts.OneType, err) - } - - _, err = opts.Two.States() - if err == backend.ErrNamedStatesNotSupported { - twoSingle = true - err = nil - } - if err != nil { - return fmt.Errorf(strings.TrimSpace( - errMigrateLoadStates), opts.TwoType, err) - } - - // Setup defaults - opts.oneEnv = backend.DefaultStateName - opts.twoEnv = backend.DefaultStateName - opts.force = m.forceInitCopy - - // Determine migration behavior based on whether the source/destination - // supports multi-state. - switch { - // Single-state to single-state. This is the easiest case: we just - // copy the default state directly. - case oneSingle && twoSingle: - return m.backendMigrateState_s_s(opts) - - // Single-state to multi-state. This is easy since we just copy - // the default state and ignore the rest in the destination. - case oneSingle && !twoSingle: - return m.backendMigrateState_s_s(opts) - - // Multi-state to single-state. If the source has more than the default - // state this is complicated since we have to ask the user what to do. - case !oneSingle && twoSingle: - // If the source only has one state and it is the default, - // treat it as if it doesn't support multi-state. - if len(oneStates) == 1 && oneStates[0] == backend.DefaultStateName { - return m.backendMigrateState_s_s(opts) - } - - return m.backendMigrateState_S_s(opts) - - // Multi-state to multi-state. We merge the states together (migrating - // each from the source to the destination one by one). - case !oneSingle && !twoSingle: - // If the source only has one state and it is the default, - // treat it as if it doesn't support multi-state. - if len(oneStates) == 1 && oneStates[0] == backend.DefaultStateName { - return m.backendMigrateState_s_s(opts) - } - - return m.backendMigrateState_S_S(opts) - } - - return nil -} - -//------------------------------------------------------------------- -// State Migration Scenarios -// -// The functions below cover handling all the various scenarios that -// can exist when migrating state. They are named in an immediately not -// obvious format but is simple: -// -// Format: backendMigrateState_s1_s2[_suffix] -// -// When s1 or s2 is lower case, it means that it is a single state backend. -// When either is uppercase, it means that state is a multi-state backend. -// The suffix is used to disambiguate multiple cases with the same type of -// states. -// -//------------------------------------------------------------------- - -// Multi-state to multi-state. -func (m *Meta) backendMigrateState_S_S(opts *backendMigrateOpts) error { - // Ask the user if they want to migrate their existing remote state - migrate, err := m.confirm(&terraform.InputOpts{ - Id: "backend-migrate-multistate-to-multistate", - Query: fmt.Sprintf( - "Do you want to migrate all workspaces to %q?", - opts.TwoType), - Description: fmt.Sprintf( - strings.TrimSpace(inputBackendMigrateMultiToMulti), - opts.OneType, opts.TwoType), - }) - if err != nil { - return fmt.Errorf( - "Error asking for state migration action: %s", err) - } - if !migrate { - return fmt.Errorf("Migration aborted by user.") - } - - // Read all the states - oneStates, err := opts.One.States() - if err != nil { - return fmt.Errorf(strings.TrimSpace( - errMigrateLoadStates), opts.OneType, err) - } - - // Sort the states so they're always copied alphabetically - sort.Strings(oneStates) - - // Go through each and migrate - for _, name := range oneStates { - // Copy the same names - opts.oneEnv = name - opts.twoEnv = name - - // Force it, we confirmed above - opts.force = true - - // Perform the migration - if err := m.backendMigrateState_s_s(opts); err != nil { - return fmt.Errorf(strings.TrimSpace( - errMigrateMulti), name, opts.OneType, opts.TwoType, err) - } - } - - return nil -} - -// Multi-state to single state. -func (m *Meta) backendMigrateState_S_s(opts *backendMigrateOpts) error { - currentEnv := m.Workspace() - - migrate := opts.force - if !migrate { - var err error - // Ask the user if they want to migrate their existing remote state - migrate, err = m.confirm(&terraform.InputOpts{ - Id: "backend-migrate-multistate-to-single", - Query: fmt.Sprintf( - "Destination state %q doesn't support workspaces.\n"+ - "Do you want to copy only your current workspace?", - opts.TwoType), - Description: fmt.Sprintf( - strings.TrimSpace(inputBackendMigrateMultiToSingle), - opts.OneType, opts.TwoType, currentEnv), - }) - if err != nil { - return fmt.Errorf( - "Error asking for state migration action: %s", err) - } - } - - if !migrate { - return fmt.Errorf("Migration aborted by user.") - } - - // Copy the default state - opts.oneEnv = currentEnv - - // now switch back to the default env so we can acccess the new backend - m.SetWorkspace(backend.DefaultStateName) - - return m.backendMigrateState_s_s(opts) -} - -// Single state to single state, assumed default state name. -func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { - stateOne, err := opts.One.State(opts.oneEnv) - if err != nil { - return fmt.Errorf(strings.TrimSpace( - errMigrateSingleLoadDefault), opts.OneType, err) - } - if err := stateOne.RefreshState(); err != nil { - return fmt.Errorf(strings.TrimSpace( - errMigrateSingleLoadDefault), opts.OneType, err) - } - - stateTwo, err := opts.Two.State(opts.twoEnv) - if err != nil { - return fmt.Errorf(strings.TrimSpace( - errMigrateSingleLoadDefault), opts.TwoType, err) - } - if err := stateTwo.RefreshState(); err != nil { - return fmt.Errorf(strings.TrimSpace( - errMigrateSingleLoadDefault), opts.TwoType, err) - } - - // Check if we need migration at all. - // This is before taking a lock, because they may also correspond to the same lock. - one := stateOne.State() - two := stateTwo.State() - - // no reason to migrate if the state is already there - if one.Equal(two) { - // Equal isn't identical; it doesn't check lineage. - if one != nil && two != nil && one.Lineage == two.Lineage { - return nil - } - } - - if m.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), m.stateLockTimeout) - defer cancel() - - lockInfoOne := state.NewLockInfo() - lockInfoOne.Operation = "migration" - lockInfoOne.Info = "source state" - - lockIDOne, err := clistate.Lock(lockCtx, stateOne, lockInfoOne, m.Ui, m.Colorize()) - if err != nil { - return fmt.Errorf("Error locking source state: %s", err) - } - defer clistate.Unlock(stateOne, lockIDOne, m.Ui, m.Colorize()) - - lockInfoTwo := state.NewLockInfo() - lockInfoTwo.Operation = "migration" - lockInfoTwo.Info = "destination state" - - lockIDTwo, err := clistate.Lock(lockCtx, stateTwo, lockInfoTwo, m.Ui, m.Colorize()) - if err != nil { - return fmt.Errorf("Error locking destination state: %s", err) - } - defer clistate.Unlock(stateTwo, lockIDTwo, m.Ui, m.Colorize()) - - // We now own a lock, so double check that we have the version - // corresponding to the lock. - if err := stateOne.RefreshState(); err != nil { - return fmt.Errorf(strings.TrimSpace( - errMigrateSingleLoadDefault), opts.OneType, err) - } - if err := stateTwo.RefreshState(); err != nil { - return fmt.Errorf(strings.TrimSpace( - errMigrateSingleLoadDefault), opts.OneType, err) - } - - one = stateOne.State() - two = stateTwo.State() - } - - // Clear the legacy remote state in both cases. If we're at the migration - // step then this won't be used anymore. - if one != nil { - one.Remote = nil - } - if two != nil { - two.Remote = nil - } - - var confirmFunc func(state.State, state.State, *backendMigrateOpts) (bool, error) - switch { - // No migration necessary - case one.Empty() && two.Empty(): - return nil - - // No migration necessary if we're inheriting state. - case one.Empty() && !two.Empty(): - return nil - - // We have existing state moving into no state. Ask the user if - // they'd like to do this. - case !one.Empty() && two.Empty(): - confirmFunc = m.backendMigrateEmptyConfirm - - // Both states are non-empty, meaning we need to determine which - // state should be used and update accordingly. - case !one.Empty() && !two.Empty(): - confirmFunc = m.backendMigrateNonEmptyConfirm - } - - if confirmFunc == nil { - panic("confirmFunc must not be nil") - } - - if !opts.force { - // Abort if we can't ask for input. - if !m.input { - return errors.New("error asking for state migration action: input disabled") - } - - // Confirm with the user whether we want to copy state over - confirm, err := confirmFunc(stateOne, stateTwo, opts) - if err != nil { - return err - } - if !confirm { - return nil - } - } - - // Confirmed! Write. - if err := stateTwo.WriteState(one); err != nil { - return fmt.Errorf(strings.TrimSpace(errBackendStateCopy), - opts.OneType, opts.TwoType, err) - } - if err := stateTwo.PersistState(); err != nil { - return fmt.Errorf(strings.TrimSpace(errBackendStateCopy), - opts.OneType, opts.TwoType, err) - } - - // And we're done. - return nil -} - -func (m *Meta) backendMigrateEmptyConfirm(one, two state.State, opts *backendMigrateOpts) (bool, error) { - inputOpts := &terraform.InputOpts{ - Id: "backend-migrate-copy-to-empty", - Query: fmt.Sprintf( - "Do you want to copy state from %q to %q?", - opts.OneType, opts.TwoType), - Description: fmt.Sprintf( - strings.TrimSpace(inputBackendMigrateEmpty), - opts.OneType, opts.TwoType), - } - - // Confirm with the user that the copy should occur - for { - v, err := m.UIInput().Input(inputOpts) - if err != nil { - return false, fmt.Errorf( - "Error asking for state copy action: %s", err) - } - - switch strings.ToLower(v) { - case "no": - return false, nil - - case "yes": - return true, nil - } - } -} - -func (m *Meta) backendMigrateNonEmptyConfirm( - stateOne, stateTwo state.State, opts *backendMigrateOpts) (bool, error) { - // We need to grab both states so we can write them to a file - one := stateOne.State() - two := stateTwo.State() - - // Save both to a temporary - td, err := ioutil.TempDir("", "terraform") - if err != nil { - return false, fmt.Errorf("Error creating temporary directory: %s", err) - } - defer os.RemoveAll(td) - - // Helper to write the state - saveHelper := func(n, path string, s *terraform.State) error { - f, err := os.Create(path) - if err != nil { - return err - } - defer f.Close() - - return terraform.WriteState(s, f) - } - - // Write the states - onePath := filepath.Join(td, fmt.Sprintf("1-%s.tfstate", opts.OneType)) - twoPath := filepath.Join(td, fmt.Sprintf("2-%s.tfstate", opts.TwoType)) - if err := saveHelper(opts.OneType, onePath, one); err != nil { - return false, fmt.Errorf("Error saving temporary state: %s", err) - } - if err := saveHelper(opts.TwoType, twoPath, two); err != nil { - return false, fmt.Errorf("Error saving temporary state: %s", err) - } - - // Ask for confirmation - inputOpts := &terraform.InputOpts{ - Id: "backend-migrate-to-backend", - Query: fmt.Sprintf( - "Do you want to copy state from %q to %q?", - opts.OneType, opts.TwoType), - Description: fmt.Sprintf( - strings.TrimSpace(inputBackendMigrateNonEmpty), - opts.OneType, opts.TwoType, onePath, twoPath), - } - - // Confirm with the user that the copy should occur - for { - v, err := m.UIInput().Input(inputOpts) - if err != nil { - return false, fmt.Errorf( - "Error asking for state copy action: %s", err) - } - - switch strings.ToLower(v) { - case "no": - return false, nil - - case "yes": - return true, nil - } - } -} - -type backendMigrateOpts struct { - OneType, TwoType string - One, Two backend.Backend - - // Fields below are set internally when migrate is called - - oneEnv string // source env - twoEnv string // dest env - force bool // if true, won't ask for confirmation -} - -const errMigrateLoadStates = ` -Error inspecting state in %q: %s - -Prior to changing backends, Terraform inspects the source and destination -states to determine what kind of migration steps need to be taken, if any. -Terraform failed to load the states. The data in both the source and the -destination remain unmodified. Please resolve the above error and try again. -` - -const errMigrateSingleLoadDefault = ` -Error loading state from %q: %s - -Terraform failed to load the default state from %[1]q. -State migration cannot occur unless the state can be loaded. Backend -modification and state migration has been aborted. The state in both the -source and the destination remain unmodified. Please resolve the -above error and try again. -` - -const errMigrateMulti = ` -Error migrating the workspace %q from %q to %q: - -%s - -Terraform copies workspaces in alphabetical order. Any workspaces -alphabetically earlier than this one have been copied. Any workspaces -later than this haven't been modified in the destination. No workspaces -in the source state have been modified. - -Please resolve the error above and run the initialization command again. -This will attempt to copy (with permission) all workspaces again. -` - -const errBackendStateCopy = ` -Error copying state from %q to %q: %s - -The state in %[1]q remains intact and unmodified. Please resolve the -error above and try again. -` - -const inputBackendMigrateEmpty = ` -Pre-existing state was found in %q while migrating to %q. No existing -state was found in %[2]q. Do you want to copy the state from %[1]q to -%[2]q? Enter "yes" to copy and "no" to start with an empty state. -` - -const inputBackendMigrateNonEmpty = ` -Pre-existing state was found in %q while migrating to %q. An existing -non-empty state exists in %[2]q. The two states have been saved to temporary -files that will be removed after responding to this query. - -One (%[1]q): %[3]s -Two (%[2]q): %[4]s - -Do you want to copy the state from %[1]q to %[2]q? Enter "yes" to copy -and "no" to start with the existing state in %[2]q. -` - -const inputBackendMigrateMultiToSingle = ` -The existing backend %[1]q supports workspaces and you currently are -using more than one. The target backend %[2]q doesn't support workspaces. -If you continue, Terraform will offer to copy your current workspace -%[3]q to the default workspace in the target. Your existing workspaces -in the source backend won't be modified. If you want to switch workspaces, -back them up, or cancel altogether, answer "no" and Terraform will abort. -` - -const inputBackendMigrateMultiToMulti = ` -Both the existing backend %[1]q and the target backend %[2]q support -workspaces. When migrating between backends, Terraform will copy all -workspaces (with the same names). THIS WILL OVERWRITE any conflicting -states in the destination. - -Terraform initialization doesn't currently migrate only select workspaces. -If you want to migrate a select number of workspaces, you must manually -pull and push those states. - -If you answer "yes", Terraform will migrate all states. If you answer -"no", Terraform will abort. -` diff --git a/vendor/github.com/hashicorp/terraform/command/meta_new.go b/vendor/github.com/hashicorp/terraform/command/meta_new.go deleted file mode 100644 index 9a935a3ca00..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/meta_new.go +++ /dev/null @@ -1,175 +0,0 @@ -package command - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strconv" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/terraform" - "github.com/hashicorp/terraform/tfdiags" -) - -// NOTE: Temporary file until this branch is cleaned up. - -// Input returns whether or not input asking is enabled. -func (m *Meta) Input() bool { - if test || !m.input { - return false - } - - if envVar := os.Getenv(InputModeEnvVar); envVar != "" { - if v, err := strconv.ParseBool(envVar); err == nil && !v { - return false - } - } - - return true -} - -// Module loads the module tree for the given root path. -// -// It expects the modules to already be downloaded. This will never -// download any modules. -// -// The configuration is validated before returning, so the returned diagnostics -// may contain warnings and/or errors. If the diagnostics contains only -// warnings, the caller may treat the returned module.Tree as valid after -// presenting the warnings to the user. -func (m *Meta) Module(path string) (*module.Tree, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - mod, err := module.NewTreeModule("", path) - if err != nil { - // Check for the error where we have no config files - if errwrap.ContainsType(err, new(config.ErrNoConfigsFound)) { - return nil, nil - } - - diags = diags.Append(err) - return nil, diags - } - - err = mod.Load(m.moduleStorage(m.DataDir(), module.GetModeNone)) - if err != nil { - diags = diags.Append(errwrap.Wrapf("Error loading modules: {{err}}", err)) - return nil, diags - } - - diags = diags.Append(mod.Validate()) - - return mod, diags -} - -// Config loads the root config for the path specified. Path may be a directory -// or file. The absence of configuration is not an error and returns a nil Config. -func (m *Meta) Config(path string) (*config.Config, error) { - // If no explicit path was given then it is okay for there to be - // no backend configuration found. - emptyOk := path == "" - - // If we had no path set, it is an error. We can't initialize unset - if path == "" { - path = "." - } - - // Expand the path - if !filepath.IsAbs(path) { - var err error - path, err = filepath.Abs(path) - if err != nil { - return nil, fmt.Errorf( - "Error expanding path to backend config %q: %s", path, err) - } - } - - log.Printf("[DEBUG] command: loading backend config file: %s", path) - - // We first need to determine if we're loading a file or a directory. - fi, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) && emptyOk { - log.Printf( - "[INFO] command: backend config not found, returning nil: %s", - path) - return nil, nil - } - - return nil, err - } - - var f func(string) (*config.Config, error) = config.LoadFile - if fi.IsDir() { - f = config.LoadDir - } - - // Load the configuration - c, err := f(path) - if err != nil { - // Check for the error where we have no config files and return nil - // as the configuration type. - if errwrap.ContainsType(err, new(config.ErrNoConfigsFound)) { - log.Printf( - "[INFO] command: backend config not found, returning nil: %s", - path) - return nil, nil - } - - return nil, err - } - - return c, nil -} - -// Plan returns the plan for the given path. -// -// This only has an effect if the path itself looks like a plan. -// If error is nil and the plan is nil, then the path didn't look like -// a plan. -// -// Error will be non-nil if path looks like a plan and loading the plan -// failed. -func (m *Meta) Plan(path string) (*terraform.Plan, error) { - // Open the path no matter if its a directory or file - f, err := os.Open(path) - defer f.Close() - if err != nil { - return nil, fmt.Errorf( - "Failed to load Terraform configuration or plan: %s", err) - } - - // Stat it so we can check if its a directory - fi, err := f.Stat() - if err != nil { - return nil, fmt.Errorf( - "Failed to load Terraform configuration or plan: %s", err) - } - - // If this path is a directory, then it can't be a plan. Not an error. - if fi.IsDir() { - return nil, nil - } - - // Read the plan - p, err := terraform.ReadPlan(f) - if err != nil { - return nil, err - } - - // We do a validation here that seems odd but if any plan is given, - // we must not have set any extra variables. The plan itself contains - // the variables and those aren't overwritten. - if len(m.variables) > 0 { - return nil, fmt.Errorf( - "You can't set variables with the '-var' or '-var-file' flag\n" + - "when you're applying a plan file. The variables used when\n" + - "the plan was created will be used. If you wish to use different\n" + - "variable values, create a new plan file.") - } - - return p, nil -} diff --git a/vendor/github.com/hashicorp/terraform/command/output.go b/vendor/github.com/hashicorp/terraform/command/output.go deleted file mode 100644 index 8677858dac4..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/output.go +++ /dev/null @@ -1,292 +0,0 @@ -package command - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "sort" - "strings" -) - -// OutputCommand is a Command implementation that reads an output -// from a Terraform state and prints it. -type OutputCommand struct { - Meta -} - -func (c *OutputCommand) Run(args []string) int { - args, err := c.Meta.process(args, false) - if err != nil { - return 1 - } - - var module string - var jsonOutput bool - cmdFlags := flag.NewFlagSet("output", flag.ContinueOnError) - cmdFlags.BoolVar(&jsonOutput, "json", false, "json") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") - cmdFlags.StringVar(&module, "module", "", "module") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - args = cmdFlags.Args() - if len(args) > 1 { - c.Ui.Error( - "The output command expects exactly one argument with the name\n" + - "of an output variable or no arguments to show all outputs.\n") - cmdFlags.Usage() - return 1 - } - - name := "" - if len(args) > 0 { - name = args[0] - } - - // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - env := c.Workspace() - - // Get the state - stateStore, err := b.State(env) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - if err := stateStore.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - if module == "" { - module = "root" - } else { - module = "root." + module - } - - // Get the proper module we want to get outputs for - modPath := strings.Split(module, ".") - - state := stateStore.State() - mod := state.ModuleByPath(modPath) - if mod == nil { - c.Ui.Error(fmt.Sprintf( - "The module %s could not be found. There is nothing to output.", - module)) - return 1 - } - - if state.Empty() || len(mod.Outputs) == 0 { - c.Ui.Error( - "The state file either has no outputs defined, or all the defined\n" + - "outputs are empty. Please define an output in your configuration\n" + - "with the `output` keyword and run `terraform refresh` for it to\n" + - "become available. If you are using interpolation, please verify\n" + - "the interpolated value is not empty. You can use the \n" + - "`terraform console` command to assist.") - return 1 - } - - if name == "" { - if jsonOutput { - jsonOutputs, err := json.MarshalIndent(mod.Outputs, "", " ") - if err != nil { - return 1 - } - - c.Ui.Output(string(jsonOutputs)) - return 0 - } else { - c.Ui.Output(outputsAsString(state, modPath, nil, false)) - return 0 - } - } - - v, ok := mod.Outputs[name] - if !ok { - c.Ui.Error(fmt.Sprintf( - "The output variable requested could not be found in the state\n" + - "file. If you recently added this to your configuration, be\n" + - "sure to run `terraform apply`, since the state won't be updated\n" + - "with new output variables until that command is run.")) - return 1 - } - - if jsonOutput { - jsonOutputs, err := json.MarshalIndent(v, "", " ") - if err != nil { - return 1 - } - - c.Ui.Output(string(jsonOutputs)) - } else { - switch output := v.Value.(type) { - case string: - c.Ui.Output(output) - return 0 - case []interface{}: - c.Ui.Output(formatListOutput("", "", output)) - return 0 - case map[string]interface{}: - c.Ui.Output(formatMapOutput("", "", output)) - return 0 - default: - c.Ui.Error(fmt.Sprintf("Unknown output type: %T", v.Type)) - return 1 - } - } - - return 0 -} - -func formatNestedList(indent string, outputList []interface{}) string { - outputBuf := new(bytes.Buffer) - outputBuf.WriteString(fmt.Sprintf("%s[", indent)) - - lastIdx := len(outputList) - 1 - - for i, value := range outputList { - outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, " ", value)) - if i != lastIdx { - outputBuf.WriteString(",") - } - } - - outputBuf.WriteString(fmt.Sprintf("\n%s]", indent)) - return strings.TrimPrefix(outputBuf.String(), "\n") -} - -func formatListOutput(indent, outputName string, outputList []interface{}) string { - keyIndent := "" - - outputBuf := new(bytes.Buffer) - - if outputName != "" { - outputBuf.WriteString(fmt.Sprintf("%s%s = [", indent, outputName)) - keyIndent = " " - } - - lastIdx := len(outputList) - 1 - - for i, value := range outputList { - switch typedValue := value.(type) { - case string: - outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, keyIndent, value)) - case []interface{}: - outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent, - formatNestedList(indent+keyIndent, typedValue))) - case map[string]interface{}: - outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent, - formatNestedMap(indent+keyIndent, typedValue))) - } - - if lastIdx != i { - outputBuf.WriteString(",") - } - } - - if outputName != "" { - if len(outputList) > 0 { - outputBuf.WriteString(fmt.Sprintf("\n%s]", indent)) - } else { - outputBuf.WriteString("]") - } - } - - return strings.TrimPrefix(outputBuf.String(), "\n") -} - -func formatNestedMap(indent string, outputMap map[string]interface{}) string { - ks := make([]string, 0, len(outputMap)) - for k, _ := range outputMap { - ks = append(ks, k) - } - sort.Strings(ks) - - outputBuf := new(bytes.Buffer) - outputBuf.WriteString(fmt.Sprintf("%s{", indent)) - - lastIdx := len(outputMap) - 1 - for i, k := range ks { - v := outputMap[k] - outputBuf.WriteString(fmt.Sprintf("\n%s%s = %v", indent+" ", k, v)) - - if lastIdx != i { - outputBuf.WriteString(",") - } - } - - outputBuf.WriteString(fmt.Sprintf("\n%s}", indent)) - - return strings.TrimPrefix(outputBuf.String(), "\n") -} - -func formatMapOutput(indent, outputName string, outputMap map[string]interface{}) string { - ks := make([]string, 0, len(outputMap)) - for k, _ := range outputMap { - ks = append(ks, k) - } - sort.Strings(ks) - - keyIndent := "" - - outputBuf := new(bytes.Buffer) - if outputName != "" { - outputBuf.WriteString(fmt.Sprintf("%s%s = {", indent, outputName)) - keyIndent = " " - } - - for _, k := range ks { - v := outputMap[k] - outputBuf.WriteString(fmt.Sprintf("\n%s%s%s = %v", indent, keyIndent, k, v)) - } - - if outputName != "" { - if len(outputMap) > 0 { - outputBuf.WriteString(fmt.Sprintf("\n%s}", indent)) - } else { - outputBuf.WriteString("}") - } - } - - return strings.TrimPrefix(outputBuf.String(), "\n") -} - -func (c *OutputCommand) Help() string { - helpText := ` -Usage: terraform output [options] [NAME] - - Reads an output variable from a Terraform state file and prints - the value. With no additional arguments, output will display all - the outputs for the root module. If NAME is not specified, all - outputs are printed. - -Options: - - -state=path Path to the state file to read. Defaults to - "terraform.tfstate". - - -no-color If specified, output won't contain any color. - - -module=name If specified, returns the outputs for a - specific module - - -json If specified, machine readable output will be - printed in JSON format - -` - return strings.TrimSpace(helpText) -} - -func (c *OutputCommand) Synopsis() string { - return "Read an output from a state file" -} diff --git a/vendor/github.com/hashicorp/terraform/command/plan.go b/vendor/github.com/hashicorp/terraform/command/plan.go deleted file mode 100644 index ec882b63937..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/plan.go +++ /dev/null @@ -1,217 +0,0 @@ -package command - -import ( - "context" - "fmt" - "strings" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/tfdiags" -) - -// PlanCommand is a Command implementation that compares a Terraform -// configuration to an actual infrastructure and shows the differences. -type PlanCommand struct { - Meta -} - -func (c *PlanCommand) Run(args []string) int { - var destroy, refresh, detailed bool - var outPath string - var moduleDepth int - - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - cmdFlags := c.Meta.flagSet("plan") - cmdFlags.BoolVar(&destroy, "destroy", false, "destroy") - cmdFlags.BoolVar(&refresh, "refresh", true, "refresh") - c.addModuleDepthFlag(cmdFlags, &moduleDepth) - cmdFlags.StringVar(&outPath, "out", "", "path") - cmdFlags.IntVar( - &c.Meta.parallelism, "parallelism", DefaultParallelism, "parallelism") - cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") - cmdFlags.BoolVar(&detailed, "detailed-exitcode", false, "detailed-exitcode") - cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") - cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - configPath, err := ModulePath(cmdFlags.Args()) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // Check for user-supplied plugin path - if c.pluginPath, err = c.loadPluginPath(); err != nil { - c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) - return 1 - } - - // Check if the path is a plan - plan, err := c.Plan(configPath) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - if plan != nil { - // Disable refreshing no matter what since we only want to show the plan - refresh = false - - // Set the config path to empty for backend loading - configPath = "" - } - - var diags tfdiags.Diagnostics - - // Load the module if we don't have one yet (not running from plan) - var mod *module.Tree - if plan == nil { - var modDiags tfdiags.Diagnostics - mod, modDiags = c.Module(configPath) - diags = diags.Append(modDiags) - if modDiags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - } - - var conf *config.Config - if mod != nil { - conf = mod.Config() - } - // Load the backend - b, err := c.Backend(&BackendOpts{ - Config: conf, - Plan: plan, - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // Build the operation - opReq := c.Operation() - opReq.Destroy = destroy - opReq.Module = mod - opReq.Plan = plan - opReq.PlanRefresh = refresh - opReq.PlanOutPath = outPath - opReq.Type = backend.OperationTypePlan - - // Perform the operation - ctx, ctxCancel := context.WithCancel(context.Background()) - defer ctxCancel() - - op, err := b.Operation(ctx, opReq) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error starting operation: %s", err)) - return 1 - } - - select { - case <-c.ShutdownCh: - // Cancel our context so we can start gracefully exiting - ctxCancel() - - // Notify the user - c.Ui.Output(outputInterrupt) - - // Still get the result, since there is still one - select { - case <-c.ShutdownCh: - c.Ui.Error( - "Two interrupts received. Exiting immediately") - return 1 - case <-op.Done(): - } - case <-op.Done(): - if err := op.Err; err != nil { - diags = diags.Append(err) - } - } - - c.showDiagnostics(diags) - if diags.HasErrors() { - return 1 - } - - if detailed && !op.PlanEmpty { - return 2 - } - - return 0 -} - -func (c *PlanCommand) Help() string { - helpText := ` -Usage: terraform plan [options] [DIR-OR-PLAN] - - Generates an execution plan for Terraform. - - This execution plan can be reviewed prior to running apply to get a - sense for what Terraform will do. Optionally, the plan can be saved to - a Terraform plan file, and apply can take this plan file to execute - this plan exactly. - - If a saved plan is passed as an argument, this command will output - the saved plan contents. It will not modify the given plan. - -Options: - - -destroy If set, a plan will be generated to destroy all resources - managed by the given configuration and state. - - -detailed-exitcode Return detailed exit codes when the command exits. This - will change the meaning of exit codes to: - 0 - Succeeded, diff is empty (no changes) - 1 - Errored - 2 - Succeeded, there is a diff - - -input=true Ask for input for variables if not directly set. - - -lock=true Lock the state file when locking is supported. - - -lock-timeout=0s Duration to retry a state lock. - - -module-depth=n Specifies the depth of modules to show in the output. - This does not affect the plan itself, only the output - shown. By default, this is -1, which will expand all. - - -no-color If specified, output won't contain any color. - - -out=path Write a plan file to the given path. This can be used as - input to the "apply" command. - - -parallelism=n Limit the number of concurrent operations. Defaults to 10. - - -refresh=true Update state prior to checking for differences. - - -state=statefile Path to a Terraform state file to use to look - up Terraform-managed resources. By default it will - use the state "terraform.tfstate" if it exists. - - -target=resource Resource to target. Operation will be limited to this - resource and its dependencies. This flag can be used - multiple times. - - -var 'foo=bar' Set a variable in the Terraform configuration. This - flag can be set multiple times. - - -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" or any ".auto.tfvars" - files are present, they will be automatically loaded. -` - return strings.TrimSpace(helpText) -} - -func (c *PlanCommand) Synopsis() string { - return "Generate and show an execution plan" -} diff --git a/vendor/github.com/hashicorp/terraform/command/plugins.go b/vendor/github.com/hashicorp/terraform/command/plugins.go deleted file mode 100644 index 5eba6b6988e..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/plugins.go +++ /dev/null @@ -1,381 +0,0 @@ -package command - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - - plugin "github.com/hashicorp/go-plugin" - terraformProvider "github.com/hashicorp/terraform/builtin/providers/terraform" - tfplugin "github.com/hashicorp/terraform/plugin" - "github.com/hashicorp/terraform/plugin/discovery" - "github.com/hashicorp/terraform/terraform" - "github.com/kardianos/osext" -) - -// multiVersionProviderResolver is an implementation of -// terraform.ResourceProviderResolver that matches the given version constraints -// against a set of versioned provider plugins to find the newest version of -// each that satisfies the given constraints. -type multiVersionProviderResolver struct { - Available discovery.PluginMetaSet - - // Internal is a map that overrides the usual plugin selection process - // for internal plugins. These plugins do not support version constraints - // (will produce an error if one is set). This should be used only in - // exceptional circumstances since it forces the provider's release - // schedule to be tied to that of Terraform Core. - Internal map[string]terraform.ResourceProviderFactory -} - -func choosePlugins(avail discovery.PluginMetaSet, internal map[string]terraform.ResourceProviderFactory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta { - candidates := avail.ConstrainVersions(reqd) - ret := map[string]discovery.PluginMeta{} - for name, metas := range candidates { - // If the provider is in our internal map then we ignore any - // discovered plugins for it since these are dealt with separately. - if _, isInternal := internal[name]; isInternal { - continue - } - - if len(metas) == 0 { - continue - } - ret[name] = metas.Newest() - } - return ret -} - -func (r *multiVersionProviderResolver) ResolveProviders( - reqd discovery.PluginRequirements, -) (map[string]terraform.ResourceProviderFactory, []error) { - factories := make(map[string]terraform.ResourceProviderFactory, len(reqd)) - var errs []error - - chosen := choosePlugins(r.Available, r.Internal, reqd) - for name, req := range reqd { - if factory, isInternal := r.Internal[name]; isInternal { - if !req.Versions.Unconstrained() { - errs = append(errs, fmt.Errorf("provider.%s: this provider is built in to Terraform and so it does not support version constraints", name)) - continue - } - factories[name] = factory - continue - } - - if newest, available := chosen[name]; available { - digest, err := newest.SHA256() - if err != nil { - errs = append(errs, fmt.Errorf("provider.%s: failed to load plugin to verify its signature: %s", name, err)) - continue - } - if !reqd[name].AcceptsSHA256(digest) { - errs = append(errs, fmt.Errorf("provider.%s: new or changed plugin executable", name)) - continue - } - - client := tfplugin.Client(newest) - factories[name] = providerFactory(client) - } else { - msg := fmt.Sprintf("provider.%s: no suitable version installed", name) - - required := req.Versions.String() - // no version is unconstrained - if required == "" { - required = "(any version)" - } - - foundVersions := []string{} - for meta := range r.Available.WithName(name) { - foundVersions = append(foundVersions, fmt.Sprintf("%q", meta.Version)) - } - - found := "none" - if len(foundVersions) > 0 { - found = strings.Join(foundVersions, ", ") - } - - msg += fmt.Sprintf("\n version requirements: %q\n versions installed: %s", required, found) - - errs = append(errs, errors.New(msg)) - } - } - - return factories, errs -} - -// store the user-supplied path for plugin discovery -func (m *Meta) storePluginPath(pluginPath []string) error { - if len(pluginPath) == 0 { - return nil - } - - js, err := json.MarshalIndent(pluginPath, "", " ") - if err != nil { - return err - } - - // if this fails, so will WriteFile - os.MkdirAll(m.DataDir(), 0755) - - return ioutil.WriteFile(filepath.Join(m.DataDir(), PluginPathFile), js, 0644) -} - -// Load the user-defined plugin search path into Meta.pluginPath if the file -// exists. -func (m *Meta) loadPluginPath() ([]string, error) { - js, err := ioutil.ReadFile(filepath.Join(m.DataDir(), PluginPathFile)) - if os.IsNotExist(err) { - return nil, nil - } - - if err != nil { - return nil, err - } - - var pluginPath []string - if err := json.Unmarshal(js, &pluginPath); err != nil { - return nil, err - } - - return pluginPath, nil -} - -// the default location for automatically installed plugins -func (m *Meta) pluginDir() string { - return filepath.Join(m.DataDir(), "plugins", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) -} - -// pluginDirs return a list of directories to search for plugins. -// -// Earlier entries in this slice get priority over later when multiple copies -// of the same plugin version are found, but newer versions always override -// older versions where both satisfy the provider version constraints. -func (m *Meta) pluginDirs(includeAutoInstalled bool) []string { - // user defined paths take precedence - if len(m.pluginPath) > 0 { - return m.pluginPath - } - - // When searching the following directories, earlier entries get precedence - // if the same plugin version is found twice, but newer versions will - // always get preference below regardless of where they are coming from. - // TODO: Add auto-install dir, default vendor dir and optional override - // vendor dir(s). - dirs := []string{"."} - - // Look in the same directory as the Terraform executable. - // If found, this replaces what we found in the config path. - exePath, err := osext.Executable() - if err != nil { - log.Printf("[ERROR] Error discovering exe directory: %s", err) - } else { - dirs = append(dirs, filepath.Dir(exePath)) - } - - // add the user vendor directory - dirs = append(dirs, DefaultPluginVendorDir) - - if includeAutoInstalled { - dirs = append(dirs, m.pluginDir()) - } - dirs = append(dirs, m.GlobalPluginDirs...) - - return dirs -} - -func (m *Meta) pluginCache() discovery.PluginCache { - dir := m.PluginCacheDir - if dir == "" { - return nil // cache disabled - } - - dir = filepath.Join(dir, pluginMachineName) - - return discovery.NewLocalPluginCache(dir) -} - -// providerPluginSet returns the set of valid providers that were discovered in -// the defined search paths. -func (m *Meta) providerPluginSet() discovery.PluginMetaSet { - plugins := discovery.FindPlugins("provider", m.pluginDirs(true)) - - // Add providers defined in the legacy .terraformrc, - if m.PluginOverrides != nil { - plugins = plugins.OverridePaths(m.PluginOverrides.Providers) - } - - plugins, _ = plugins.ValidateVersions() - - for p := range plugins { - log.Printf("[DEBUG] found valid plugin: %q", p.Name) - } - - return plugins -} - -// providerPluginAutoInstalledSet returns the set of providers that exist -// within the auto-install directory. -func (m *Meta) providerPluginAutoInstalledSet() discovery.PluginMetaSet { - plugins := discovery.FindPlugins("provider", []string{m.pluginDir()}) - plugins, _ = plugins.ValidateVersions() - - for p := range plugins { - log.Printf("[DEBUG] found valid plugin: %q", p.Name) - } - - return plugins -} - -// providerPluginManuallyInstalledSet returns the set of providers that exist -// in all locations *except* the auto-install directory. -func (m *Meta) providerPluginManuallyInstalledSet() discovery.PluginMetaSet { - plugins := discovery.FindPlugins("provider", m.pluginDirs(false)) - - // Add providers defined in the legacy .terraformrc, - if m.PluginOverrides != nil { - plugins = plugins.OverridePaths(m.PluginOverrides.Providers) - } - - plugins, _ = plugins.ValidateVersions() - - for p := range plugins { - log.Printf("[DEBUG] found valid plugin: %q", p.Name) - } - - return plugins -} - -func (m *Meta) providerResolver() terraform.ResourceProviderResolver { - return &multiVersionProviderResolver{ - Available: m.providerPluginSet(), - Internal: m.internalProviders(), - } -} - -func (m *Meta) internalProviders() map[string]terraform.ResourceProviderFactory { - return map[string]terraform.ResourceProviderFactory{ - "terraform": func() (terraform.ResourceProvider, error) { - return terraformProvider.Provider(), nil - }, - } -} - -// filter the requirements returning only the providers that we can't resolve -func (m *Meta) missingPlugins(avail discovery.PluginMetaSet, reqd discovery.PluginRequirements) discovery.PluginRequirements { - missing := make(discovery.PluginRequirements) - - for n, r := range reqd { - log.Printf("[DEBUG] plugin requirements: %q=%q", n, r.Versions) - } - - candidates := avail.ConstrainVersions(reqd) - - for name, versionSet := range reqd { - if metas := candidates[name]; metas.Count() == 0 { - missing[name] = versionSet - } - } - - return missing -} - -func (m *Meta) provisionerFactories() map[string]terraform.ResourceProvisionerFactory { - dirs := m.pluginDirs(true) - plugins := discovery.FindPlugins("provisioner", dirs) - plugins, _ = plugins.ValidateVersions() - - // For now our goal is to just find the latest version of each plugin - // we have on the system. All provisioners should be at version 0.0.0 - // currently, so there should actually only be one instance of each plugin - // name here, even though the discovery interface forces us to pretend - // that might not be true. - - factories := make(map[string]terraform.ResourceProvisionerFactory) - - // Wire up the internal provisioners first. These might be overridden - // by discovered provisioners below. - for name := range InternalProvisioners { - client, err := internalPluginClient("provisioner", name) - if err != nil { - log.Printf("[WARN] failed to build command line for internal plugin %q: %s", name, err) - continue - } - factories[name] = provisionerFactory(client) - } - - byName := plugins.ByName() - for name, metas := range byName { - // Since we validated versions above and we partitioned the sets - // by name, we're guaranteed that the metas in our set all have - // valid versions and that there's at least one meta. - newest := metas.Newest() - client := tfplugin.Client(newest) - factories[name] = provisionerFactory(client) - } - - return factories -} - -func internalPluginClient(kind, name string) (*plugin.Client, error) { - cmdLine, err := BuildPluginCommandString(kind, name) - if err != nil { - return nil, err - } - - // See the docstring for BuildPluginCommandString for why we need to do - // this split here. - cmdArgv := strings.Split(cmdLine, TFSPACE) - - cfg := &plugin.ClientConfig{ - Cmd: exec.Command(cmdArgv[0], cmdArgv[1:]...), - HandshakeConfig: tfplugin.Handshake, - Managed: true, - Plugins: tfplugin.PluginMap, - } - - return plugin.NewClient(cfg), nil -} - -func providerFactory(client *plugin.Client) terraform.ResourceProviderFactory { - return func() (terraform.ResourceProvider, error) { - // Request the RPC client so we can get the provider - // so we can build the actual RPC-implemented provider. - rpcClient, err := client.Client() - if err != nil { - return nil, err - } - - raw, err := rpcClient.Dispense(tfplugin.ProviderPluginName) - if err != nil { - return nil, err - } - - return raw.(terraform.ResourceProvider), nil - } -} - -func provisionerFactory(client *plugin.Client) terraform.ResourceProvisionerFactory { - return func() (terraform.ResourceProvisioner, error) { - // Request the RPC client so we can get the provisioner - // so we can build the actual RPC-implemented provisioner. - rpcClient, err := client.Client() - if err != nil { - return nil, err - } - - raw, err := rpcClient.Dispense(tfplugin.ProvisionerPluginName) - if err != nil { - return nil, err - } - - return raw.(terraform.ResourceProvisioner), nil - } -} diff --git a/vendor/github.com/hashicorp/terraform/command/plugins_lock.go b/vendor/github.com/hashicorp/terraform/command/plugins_lock.go deleted file mode 100644 index a1dadf83d24..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/plugins_lock.go +++ /dev/null @@ -1,86 +0,0 @@ -package command - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" -) - -func (m *Meta) providerPluginsLock() *pluginSHA256LockFile { - return &pluginSHA256LockFile{ - Filename: filepath.Join(m.pluginDir(), "lock.json"), - } -} - -type pluginSHA256LockFile struct { - Filename string -} - -// Read loads the lock information from the file and returns it. If the file -// cannot be read, an empty map is returned to indicate that _no_ providers -// are acceptable, since the user must run "terraform init" to lock some -// providers before a context can be created. -func (pf *pluginSHA256LockFile) Read() map[string][]byte { - // Returning an empty map is different than nil because it causes - // us to reject all plugins as uninitialized, rather than applying no - // constraints at all. - // - // We don't surface any specific errors here because we want it to all - // roll up into our more-user-friendly error that appears when plugin - // constraint verification fails during context creation. - digests := make(map[string][]byte) - - buf, err := ioutil.ReadFile(pf.Filename) - if err != nil { - // This is expected if the user runs any context-using command before - // running "terraform init". - log.Printf("[INFO] Failed to read plugin lock file %s: %s", pf.Filename, err) - return digests - } - - var strDigests map[string]string - err = json.Unmarshal(buf, &strDigests) - if err != nil { - // This should never happen unless the user directly edits the file. - log.Printf("[WARNING] Plugin lock file %s failed to parse as JSON: %s", pf.Filename, err) - return digests - } - - for name, strDigest := range strDigests { - var digest []byte - _, err := fmt.Sscanf(strDigest, "%x", &digest) - if err == nil { - digests[name] = digest - } else { - // This should never happen unless the user directly edits the file. - log.Printf("[WARNING] Plugin lock file %s has invalid digest for %q", pf.Filename, name) - } - } - - return digests -} - -// Write persists lock information to disk, where it will be retrieved by -// future calls to Read. This entirely replaces any previous lock information, -// so the given map must be comprehensive. -func (pf *pluginSHA256LockFile) Write(digests map[string][]byte) error { - strDigests := map[string]string{} - for name, digest := range digests { - strDigests[name] = fmt.Sprintf("%x", digest) - } - - buf, err := json.MarshalIndent(strDigests, "", " ") - if err != nil { - // should never happen - return fmt.Errorf("failed to serialize plugin lock as JSON: %s", err) - } - - os.MkdirAll( - filepath.Dir(pf.Filename), os.ModePerm, - ) // ignore error since WriteFile below will generate a better one anyway - - return ioutil.WriteFile(pf.Filename, buf, os.ModePerm) -} diff --git a/vendor/github.com/hashicorp/terraform/command/providers.go b/vendor/github.com/hashicorp/terraform/command/providers.go deleted file mode 100644 index 49d43962ea3..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/providers.go +++ /dev/null @@ -1,133 +0,0 @@ -package command - -import ( - "fmt" - "sort" - - "github.com/hashicorp/terraform/moduledeps" - "github.com/hashicorp/terraform/terraform" - "github.com/xlab/treeprint" -) - -// ProvidersCommand is a Command implementation that prints out information -// about the providers used in the current configuration/state. -type ProvidersCommand struct { - Meta -} - -func (c *ProvidersCommand) Help() string { - return providersCommandHelp -} - -func (c *ProvidersCommand) Synopsis() string { - return "Prints a tree of the providers used in the configuration" -} - -func (c *ProvidersCommand) Run(args []string) int { - c.Meta.process(args, false) - - cmdFlags := c.Meta.flagSet("providers") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - configPath, err := ModulePath(cmdFlags.Args()) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // Load the config - root, diags := c.Module(configPath) - if diags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - if root == nil { - c.Ui.Error(fmt.Sprintf( - "No configuration files found in the directory: %s\n\n"+ - "This command requires configuration to run.", - configPath)) - return 1 - } - - // Load the backend - b, err := c.Backend(&BackendOpts{ - Config: root.Config(), - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // Get the state - env := c.Workspace() - state, err := b.State(env) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - if err := state.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - s := state.State() - - depTree := terraform.ModuleTreeDependencies(root, s) - depTree.SortDescendents() - - printRoot := treeprint.New() - providersCommandPopulateTreeNode(printRoot, depTree) - - c.Ui.Output(printRoot.String()) - - c.showDiagnostics(diags) - if diags.HasErrors() { - return 1 - } - - return 0 -} - -func providersCommandPopulateTreeNode(node treeprint.Tree, deps *moduledeps.Module) { - names := make([]string, 0, len(deps.Providers)) - for name := range deps.Providers { - names = append(names, string(name)) - } - sort.Strings(names) - - for _, name := range names { - dep := deps.Providers[moduledeps.ProviderInstance(name)] - versionsStr := dep.Constraints.String() - if versionsStr != "" { - versionsStr = " " + versionsStr - } - var reasonStr string - switch dep.Reason { - case moduledeps.ProviderDependencyInherited: - reasonStr = " (inherited)" - case moduledeps.ProviderDependencyFromState: - reasonStr = " (from state)" - } - node.AddNode(fmt.Sprintf("provider.%s%s%s", name, versionsStr, reasonStr)) - } - - for _, child := range deps.Children { - childNode := node.AddBranch(fmt.Sprintf("module.%s", child.Name)) - providersCommandPopulateTreeNode(childNode, child) - } -} - -const providersCommandHelp = ` -Usage: terraform providers [dir] - - Prints out a tree of modules in the referenced configuration annotated with - their provider requirements. - - This provides an overview of all of the provider requirements across all - referenced modules, as an aid to understanding why particular provider - plugins are needed and why particular versions are selected. - -` diff --git a/vendor/github.com/hashicorp/terraform/command/push.go b/vendor/github.com/hashicorp/terraform/command/push.go deleted file mode 100644 index 039696fd38d..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/push.go +++ /dev/null @@ -1,553 +0,0 @@ -package command - -import ( - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - "github.com/hashicorp/atlas-go/archive" - "github.com/hashicorp/atlas-go/v1" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/version" -) - -type PushCommand struct { - Meta - - // client is the client to use for the actual push operations. - // If this isn't set, then the Atlas client is used. This should - // really only be set for testing reasons (and is hence not exported). - client pushClient -} - -func (c *PushCommand) Run(args []string) int { - var atlasAddress, atlasToken string - var archiveVCS, moduleUpload bool - var name string - var overwrite []string - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - cmdFlags := c.Meta.flagSet("push") - cmdFlags.StringVar(&atlasAddress, "atlas-address", "", "") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") - cmdFlags.StringVar(&atlasToken, "token", "", "") - cmdFlags.BoolVar(&moduleUpload, "upload-modules", true, "") - cmdFlags.StringVar(&name, "name", "", "") - cmdFlags.BoolVar(&archiveVCS, "vcs", true, "") - cmdFlags.Var((*FlagStringSlice)(&overwrite), "overwrite", "") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - // Make a map of the set values - overwriteMap := make(map[string]struct{}, len(overwrite)) - for _, v := range overwrite { - overwriteMap[v] = struct{}{} - } - - // This is a map of variables specifically from the CLI that we want to overwrite. - // We need this because there is a chance that the user is trying to modify - // a variable we don't see in our context, but which exists in this Terraform - // Enterprise workspace. - cliVars := make(map[string]string) - for k, v := range c.variables { - if _, ok := overwriteMap[k]; ok { - if val, ok := v.(string); ok { - cliVars[k] = val - } else { - c.Ui.Error(fmt.Sprintf("Error reading value for variable: %s", k)) - return 1 - } - } - } - - // Get the path to the configuration depending on the args. - configPath, err := ModulePath(cmdFlags.Args()) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // Check if the path is a plan - plan, err := c.Plan(configPath) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - if plan != nil { - c.Ui.Error( - "A plan file cannot be given as the path to the configuration.\n" + - "A path to a module (directory with configuration) must be given.") - return 1 - } - - // Load the module - mod, diags := c.Module(configPath) - if diags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - if mod == nil { - c.Ui.Error(fmt.Sprintf( - "No configuration files found in the directory: %s\n\n"+ - "This command requires configuration to run.", - configPath)) - return 1 - } - - var conf *config.Config - if mod != nil { - conf = mod.Config() - } - - // Load the backend - b, err := c.Backend(&BackendOpts{ - Config: conf, - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // We require a non-local backend - if c.IsLocalBackend(b) { - c.Ui.Error( - "A remote backend is not enabled. For Atlas to run Terraform\n" + - "for you, remote state must be used and configured. Remote \n" + - "state via any backend is accepted, not just Atlas. To configure\n" + - "a backend, please see the documentation at the URL below:\n\n" + - "https://www.terraform.io/docs/state/remote.html") - return 1 - } - - // We require a local backend - local, ok := b.(backend.Local) - if !ok { - c.Ui.Error(ErrUnsupportedLocalOp) - return 1 - } - - // Build the operation - opReq := c.Operation() - opReq.Module = mod - opReq.Plan = plan - - // Get the context - ctx, _, err := local.Context(opReq) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // Get the configuration - config := ctx.Module().Config() - if name == "" { - if config.Atlas == nil || config.Atlas.Name == "" { - c.Ui.Error( - "The name of this Terraform configuration in Atlas must be\n" + - "specified within your configuration or the command-line. To\n" + - "set it on the command-line, use the `-name` parameter.") - return 1 - } - name = config.Atlas.Name - } - - // Initialize the client if it isn't given. - if c.client == nil { - // Make sure to nil out our client so our token isn't sitting around - defer func() { c.client = nil }() - - // Initialize it to the default client, we set custom settings later - client := atlas.DefaultClient() - if atlasAddress != "" { - client, err = atlas.NewClient(atlasAddress) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error initializing Atlas client: %s", err)) - return 1 - } - } - - client.DefaultHeader.Set(version.Header, version.Version) - - if atlasToken != "" { - client.Token = atlasToken - } - - c.client = &atlasPushClient{Client: client} - } - - // Get the variables we already have in atlas - atlasVars, err := c.client.Get(name) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error looking up previously pushed configuration: %s", err)) - return 1 - } - - // Set remote variables in the context if we don't have a value here. These - // don't have to be correct, it just prevents the Input walk from prompting - // the user for input. - ctxVars := ctx.Variables() - atlasVarSentry := "ATLAS_78AC153CA649EAA44815DAD6CBD4816D" - for k, _ := range atlasVars { - if _, ok := ctxVars[k]; !ok { - ctx.SetVariable(k, atlasVarSentry) - } - } - - // Ask for input - if err := ctx.Input(c.InputMode()); err != nil { - c.Ui.Error(fmt.Sprintf( - "Error while asking for variable input:\n\n%s", err)) - return 1 - } - - // Now that we've gone through the input walk, we can be sure we have all - // the variables we're going to get. - // We are going to keep these separate from the atlas variables until - // upload, so we can notify the user which local variables we're sending. - serializedVars, err := tfVars(ctx.Variables()) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "An error has occurred while serializing the variables for uploading:\n"+ - "%s", err)) - return 1 - } - - // Get the absolute path for our data directory, since the Extra field - // value below needs to be absolute. - dataDirAbs, err := filepath.Abs(c.DataDir()) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error while expanding the data directory %q: %s", c.DataDir(), err)) - return 1 - } - - // Build the archiving options, which includes everything it can - // by default according to VCS rules but forcing the data directory. - archiveOpts := &archive.ArchiveOpts{ - VCS: archiveVCS, - Extra: map[string]string{ - DefaultDataDir: archive.ExtraEntryDir, - }, - } - - // Always store the state file in here so we can find state - statePathKey := fmt.Sprintf("%s/%s", DefaultDataDir, DefaultStateFilename) - archiveOpts.Extra[statePathKey] = filepath.Join(dataDirAbs, DefaultStateFilename) - if moduleUpload { - // If we're uploading modules, explicitly add that directory if exists. - moduleKey := fmt.Sprintf("%s/%s", DefaultDataDir, "modules") - moduleDir := filepath.Join(dataDirAbs, "modules") - _, err := os.Stat(moduleDir) - if err == nil { - archiveOpts.Extra[moduleKey] = filepath.Join(dataDirAbs, "modules") - } - if err != nil && !os.IsNotExist(err) { - c.Ui.Error(fmt.Sprintf( - "Error checking for module dir %q: %s", moduleDir, err)) - return 1 - } - } else { - // If we're not uploading modules, explicitly exclude add that - archiveOpts.Exclude = append( - archiveOpts.Exclude, - filepath.Join(c.DataDir(), "modules")) - } - - archiveR, err := archive.CreateArchive(configPath, archiveOpts) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "An error has occurred while archiving the module for uploading:\n"+ - "%s", err)) - return 1 - } - - // List of the vars we're uploading to display to the user. - // We always upload all vars from atlas, but only report them if they are overwritten. - var setVars []string - - // variables to upload - var uploadVars []atlas.TFVar - - // first add all the variables we want to send which have been serialized - // from the local context. - for _, sv := range serializedVars { - _, inOverwrite := overwriteMap[sv.Key] - _, inAtlas := atlasVars[sv.Key] - - // We have a variable that's not in atlas, so always send it. - if !inAtlas { - uploadVars = append(uploadVars, sv) - setVars = append(setVars, sv.Key) - } - - // We're overwriting an atlas variable. - // We also want to check that we - // don't send the dummy sentry value back to atlas. This could happen - // if it's specified as an overwrite on the cli, but we didn't set a - // new value. - if inAtlas && inOverwrite && sv.Value != atlasVarSentry { - uploadVars = append(uploadVars, sv) - setVars = append(setVars, sv.Key) - - // remove this value from the atlas vars, because we're going to - // send back the remainder regardless. - delete(atlasVars, sv.Key) - } - } - - // now send back all the existing atlas vars, inserting any overwrites from the cli. - for k, av := range atlasVars { - if v, ok := cliVars[k]; ok { - av.Value = v - setVars = append(setVars, k) - } - uploadVars = append(uploadVars, av) - } - - sort.Strings(setVars) - if len(setVars) > 0 { - c.Ui.Output( - "The following variables will be set or overwritten within Atlas from\n" + - "their local values. All other variables are already set within Atlas.\n" + - "If you want to modify the value of a variable, use the Atlas web\n" + - "interface or set it locally and use the -overwrite flag.\n\n") - for _, v := range setVars { - c.Ui.Output(fmt.Sprintf(" * %s", v)) - } - - // Newline - c.Ui.Output("") - } - - // Upsert! - opts := &pushUpsertOptions{ - Name: name, - Archive: archiveR, - Variables: ctx.Variables(), - TFVars: uploadVars, - } - - c.Ui.Output("Uploading Terraform configuration...") - vsn, err := c.client.Upsert(opts) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "An error occurred while uploading the module:\n\n%s", err)) - return 1 - } - - c.Ui.Output(c.Colorize().Color(fmt.Sprintf( - "[reset][bold][green]Configuration %q uploaded! (v%d)", - name, vsn))) - - c.showDiagnostics(diags) - if diags.HasErrors() { - return 1 - } - - return 0 -} - -func (c *PushCommand) Help() string { - helpText := ` -Usage: terraform push [options] [DIR] - - Upload this Terraform module to an Atlas server for remote - infrastructure management. - -Options: - - -atlas-address= An alternate address to an Atlas instance. Defaults - to https://atlas.hashicorp.com - - -upload-modules=true If true (default), then the modules are locked at - their current checkout and uploaded completely. This - prevents Atlas from running "terraform get". - - -name= Name of the configuration in Atlas. This can also - be set in the configuration itself. Format is - typically: "username/name". - - -token= Access token to use to upload. If blank or unspecified, - the ATLAS_TOKEN environmental variable will be used. - - -overwrite=foo Variable keys that should overwrite values in Atlas. - Otherwise, variables already set in Atlas will overwrite - local values. This flag can be repeated. - - -var 'foo=bar' Set a variable in the Terraform configuration. This - flag can be set multiple times. - - -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" or any ".auto.tfvars" - files are present, they will be automatically loaded. - - -vcs=true If true (default), push will upload only files - committed to your VCS, if detected. - - -no-color If specified, output won't contain any color. - -` - return strings.TrimSpace(helpText) -} - -func sortedKeys(m map[string]interface{}) []string { - var keys []string - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// build the set of TFVars for push -func tfVars(vars map[string]interface{}) ([]atlas.TFVar, error) { - var tfVars []atlas.TFVar - var err error - -RANGE: - for _, k := range sortedKeys(vars) { - v := vars[k] - - var hcl []byte - tfv := atlas.TFVar{Key: k} - - switch v := v.(type) { - case string: - tfv.Value = v - - default: - // everything that's not a string is now HCL encoded - hcl, err = encodeHCL(v) - if err != nil { - break RANGE - } - - tfv.Value = string(hcl) - tfv.IsHCL = true - } - - tfVars = append(tfVars, tfv) - } - - return tfVars, err -} - -func (c *PushCommand) Synopsis() string { - return "Upload this Terraform module to Atlas to run" -} - -// pushClient is implemented internally to control where pushes go. This is -// either to Atlas or a mock for testing. We still return a map to make it -// easier to check for variable existence when filtering the overrides. -type pushClient interface { - Get(string) (map[string]atlas.TFVar, error) - Upsert(*pushUpsertOptions) (int, error) -} - -type pushUpsertOptions struct { - Name string - Archive *archive.Archive - Variables map[string]interface{} - TFVars []atlas.TFVar -} - -type atlasPushClient struct { - Client *atlas.Client -} - -func (c *atlasPushClient) Get(name string) (map[string]atlas.TFVar, error) { - user, name, err := atlas.ParseSlug(name) - if err != nil { - return nil, err - } - - version, err := c.Client.TerraformConfigLatest(user, name) - if err != nil { - return nil, err - } - - variables := make(map[string]atlas.TFVar) - - if version == nil { - return variables, nil - } - - // Variables is superseded by TFVars - if version.TFVars == nil { - for k, v := range version.Variables { - variables[k] = atlas.TFVar{Key: k, Value: v} - } - } else { - for _, v := range version.TFVars { - variables[v.Key] = v - } - } - - return variables, nil -} - -func (c *atlasPushClient) Upsert(opts *pushUpsertOptions) (int, error) { - user, name, err := atlas.ParseSlug(opts.Name) - if err != nil { - return 0, err - } - - data := &atlas.TerraformConfigVersion{ - TFVars: opts.TFVars, - } - - version, err := c.Client.CreateTerraformConfigVersion( - user, name, data, opts.Archive, opts.Archive.Size) - if err != nil { - return 0, err - } - - return version, nil -} - -type mockPushClient struct { - File string - - GetCalled bool - GetName string - GetResult map[string]atlas.TFVar - GetError error - - UpsertCalled bool - UpsertOptions *pushUpsertOptions - UpsertVersion int - UpsertError error -} - -func (c *mockPushClient) Get(name string) (map[string]atlas.TFVar, error) { - c.GetCalled = true - c.GetName = name - return c.GetResult, c.GetError -} - -func (c *mockPushClient) Upsert(opts *pushUpsertOptions) (int, error) { - f, err := os.Create(c.File) - if err != nil { - return 0, err - } - defer f.Close() - - data := opts.Archive - size := opts.Archive.Size - if _, err := io.CopyN(f, data, size); err != nil { - return 0, err - } - - c.UpsertCalled = true - c.UpsertOptions = opts - return c.UpsertVersion, c.UpsertError -} diff --git a/vendor/github.com/hashicorp/terraform/command/refresh.go b/vendor/github.com/hashicorp/terraform/command/refresh.go deleted file mode 100644 index eec74281ce6..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/refresh.go +++ /dev/null @@ -1,152 +0,0 @@ -package command - -import ( - "context" - "fmt" - "strings" - - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/terraform" - "github.com/hashicorp/terraform/tfdiags" -) - -// RefreshCommand is a cli.Command implementation that refreshes the state -// file. -type RefreshCommand struct { - Meta -} - -func (c *RefreshCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - cmdFlags := c.Meta.flagSet("refresh") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") - cmdFlags.IntVar(&c.Meta.parallelism, "parallelism", 0, "parallelism") - cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") - cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") - cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") - cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - configPath, err := ModulePath(cmdFlags.Args()) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - var diags tfdiags.Diagnostics - - // Load the module - mod, diags := c.Module(configPath) - if diags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - - // Check for user-supplied plugin path - if c.pluginPath, err = c.loadPluginPath(); err != nil { - c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) - return 1 - } - - var conf *config.Config - if mod != nil { - conf = mod.Config() - } - - // Load the backend - b, err := c.Backend(&BackendOpts{ - Config: conf, - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // Build the operation - opReq := c.Operation() - opReq.Type = backend.OperationTypeRefresh - opReq.Module = mod - - // Perform the operation - op, err := b.Operation(context.Background(), opReq) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error starting operation: %s", err)) - return 1 - } - - // Wait for the operation to complete - <-op.Done() - if err := op.Err; err != nil { - diags = diags.Append(err) - } - - c.showDiagnostics(diags) - if diags.HasErrors() { - return 1 - } - - // Output the outputs - if outputs := outputsAsString(op.State, terraform.RootModulePath, nil, true); outputs != "" { - c.Ui.Output(c.Colorize().Color(outputs)) - } - - return 0 -} - -func (c *RefreshCommand) Help() string { - helpText := ` -Usage: terraform refresh [options] [dir] - - Update the state file of your infrastructure with metadata that matches - the physical resources they are tracking. - - This will not modify your infrastructure, but it can modify your - state file to update metadata. This metadata might cause new changes - to occur when you generate a plan or call apply next. - -Options: - - -backup=path Path to backup the existing state file before - modifying. Defaults to the "-state-out" path with - ".backup" extension. Set to "-" to disable backup. - - -input=true Ask for input for variables if not directly set. - - -lock=true Lock the state file when locking is supported. - - -lock-timeout=0s Duration to retry a state lock. - - -no-color If specified, output won't contain any color. - - -state=path Path to read and save state (unless state-out - is specified). Defaults to "terraform.tfstate". - - -state-out=path Path to write updated state file. By default, the - "-state" path will be used. - - -target=resource Resource to target. Operation will be limited to this - resource and its dependencies. This flag can be used - multiple times. - - -var 'foo=bar' Set a variable in the Terraform configuration. This - flag can be set multiple times. - - -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" or any ".auto.tfvars" - files are present, they will be automatically loaded. - -` - return strings.TrimSpace(helpText) -} - -func (c *RefreshCommand) Synopsis() string { - return "Update local state file against real resources" -} diff --git a/vendor/github.com/hashicorp/terraform/command/show.go b/vendor/github.com/hashicorp/terraform/command/show.go deleted file mode 100644 index d8d1c84efef..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/show.go +++ /dev/null @@ -1,146 +0,0 @@ -package command - -import ( - "flag" - "fmt" - "os" - "strings" - - "github.com/hashicorp/terraform/command/format" - "github.com/hashicorp/terraform/terraform" -) - -// ShowCommand is a Command implementation that reads and outputs the -// contents of a Terraform plan or state file. -type ShowCommand struct { - Meta -} - -func (c *ShowCommand) Run(args []string) int { - var moduleDepth int - - args, err := c.Meta.process(args, false) - if err != nil { - return 1 - } - - cmdFlags := flag.NewFlagSet("show", flag.ContinueOnError) - c.addModuleDepthFlag(cmdFlags, &moduleDepth) - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - args = cmdFlags.Args() - if len(args) > 1 { - c.Ui.Error( - "The show command expects at most one argument with the path\n" + - "to a Terraform state or plan file.\n") - cmdFlags.Usage() - return 1 - } - - var planErr, stateErr error - var path string - var plan *terraform.Plan - var state *terraform.State - if len(args) > 0 { - path = args[0] - f, err := os.Open(path) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error loading file: %s", err)) - return 1 - } - defer f.Close() - - plan, err = terraform.ReadPlan(f) - if err != nil { - if _, err := f.Seek(0, 0); err != nil { - c.Ui.Error(fmt.Sprintf("Error reading file: %s", err)) - return 1 - } - - plan = nil - planErr = err - } - if plan == nil { - state, err = terraform.ReadState(f) - if err != nil { - stateErr = err - } - } - } else { - // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - env := c.Workspace() - - // Get the state - stateStore, err := b.State(env) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - if err := stateStore.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - state = stateStore.State() - if state == nil { - c.Ui.Output("No state.") - return 0 - } - } - - if plan == nil && state == nil { - c.Ui.Error(fmt.Sprintf( - "Terraform couldn't read the given file as a state or plan file.\n"+ - "The errors while attempting to read the file as each format are\n"+ - "shown below.\n\n"+ - "State read error: %s\n\nPlan read error: %s", - stateErr, - planErr)) - return 1 - } - - if plan != nil { - dispPlan := format.NewPlan(plan) - c.Ui.Output(dispPlan.Format(c.Colorize())) - return 0 - } - - c.Ui.Output(format.State(&format.StateOpts{ - State: state, - Color: c.Colorize(), - ModuleDepth: moduleDepth, - })) - return 0 -} - -func (c *ShowCommand) Help() string { - helpText := ` -Usage: terraform show [options] [path] - - Reads and outputs a Terraform state or plan file in a human-readable - form. If no path is specified, the current state will be shown. - -Options: - - -module-depth=n Specifies the depth of modules to show in the output. - By default this is -1, which will expand all. - - -no-color If specified, output won't contain any color. - -` - return strings.TrimSpace(helpText) -} - -func (c *ShowCommand) Synopsis() string { - return "Inspect Terraform state or plan" -} diff --git a/vendor/github.com/hashicorp/terraform/command/state_command.go b/vendor/github.com/hashicorp/terraform/command/state_command.go deleted file mode 100644 index 7e3a6af1b8c..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/state_command.go +++ /dev/null @@ -1,40 +0,0 @@ -package command - -import ( - "strings" - - "github.com/mitchellh/cli" -) - -// StateCommand is a Command implementation that just shows help for -// the subcommands nested below it. -type StateCommand struct { - StateMeta -} - -func (c *StateCommand) Run(args []string) int { - return cli.RunResultHelp -} - -func (c *StateCommand) Help() string { - helpText := ` -Usage: terraform state [options] [args] - - This command has subcommands for advanced state management. - - These subcommands can be used to slice and dice the Terraform state. - This is sometimes necessary in advanced cases. For your safety, all - state management commands that modify the state create a timestamped - backup of the state prior to making modifications. - - The structure and output of the commands is specifically tailored to work - well with the common Unix utilities such as grep, awk, etc. We recommend - using those tools to perform more advanced state tasks. - -` - return strings.TrimSpace(helpText) -} - -func (c *StateCommand) Synopsis() string { - return "Advanced state management" -} diff --git a/vendor/github.com/hashicorp/terraform/command/state_list.go b/vendor/github.com/hashicorp/terraform/command/state_list.go deleted file mode 100644 index 211f5b3d082..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/state_list.go +++ /dev/null @@ -1,119 +0,0 @@ -package command - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/cli" -) - -// StateListCommand is a Command implementation that lists the resources -// within a state file. -type StateListCommand struct { - Meta - StateMeta -} - -func (c *StateListCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - cmdFlags := c.Meta.flagSet("state list") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") - if err := cmdFlags.Parse(args); err != nil { - return cli.RunResultHelp - } - args = cmdFlags.Args() - - // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - env := c.Workspace() - // Get the state - state, err := b.State(env) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - if err := state.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - stateReal := state.State() - if stateReal == nil { - c.Ui.Error(fmt.Sprintf(errStateNotFound)) - return 1 - } - - filter := &terraform.StateFilter{State: stateReal} - results, err := filter.Filter(args...) - if err != nil { - c.Ui.Error(fmt.Sprintf(errStateFilter, err)) - return cli.RunResultHelp - } - - for _, result := range results { - if _, ok := result.Value.(*terraform.InstanceState); ok { - c.Ui.Output(result.Address) - } - } - - return 0 -} - -func (c *StateListCommand) Help() string { - helpText := ` -Usage: terraform state list [options] [pattern...] - - List resources in the Terraform state. - - This command lists resources in the Terraform state. The pattern argument - can be used to filter the resources by resource or module. If no pattern - is given, all resources are listed. - - The pattern argument is meant to provide very simple filtering. For - advanced filtering, please use tools such as "grep". The output of this - command is designed to be friendly for this usage. - - The pattern argument accepts any resource targeting syntax. Please - refer to the documentation on resource targeting syntax for more - information. - -Options: - - -state=statefile Path to a Terraform state file to use to look - up Terraform-managed resources. By default it will - use the state "terraform.tfstate" if it exists. - -` - return strings.TrimSpace(helpText) -} - -func (c *StateListCommand) Synopsis() string { - return "List resources in the state" -} - -const errStateFilter = `Error filtering state: %[1]s - -Please ensure that all your addresses are formatted properly.` - -const errStateLoadingState = `Error loading the state: %[1]s - -Please ensure that your Terraform state exists and that you've -configured it properly. You can use the "-state" flag to point -Terraform at another state file.` - -const errStateNotFound = `No state file was found! - -State management commands require a state file. Run this command -in a directory where Terraform has been run or use the -state flag -to point the command to a specific state location.` diff --git a/vendor/github.com/hashicorp/terraform/command/state_meta.go b/vendor/github.com/hashicorp/terraform/command/state_meta.go deleted file mode 100644 index aa79e9d47e7..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/state_meta.go +++ /dev/null @@ -1,104 +0,0 @@ -package command - -import ( - "errors" - "fmt" - "time" - - backendlocal "github.com/hashicorp/terraform/backend/local" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" -) - -// StateMeta is the meta struct that should be embedded in state subcommands. -type StateMeta struct { - Meta -} - -// State returns the state for this meta. This gets the appropriate state from -// the backend, but changes the way that backups are done. This configures -// backups to be timestamped rather than just the original state path plus a -// backup path. -func (c *StateMeta) State() (state.State, error) { - var realState state.State - backupPath := c.backupPath - stateOutPath := c.statePath - - // use the specified state - if c.statePath != "" { - realState = &state.LocalState{ - Path: c.statePath, - } - } else { - // Load the backend - b, err := c.Backend(nil) - if err != nil { - return nil, err - } - - env := c.Workspace() - // Get the state - s, err := b.State(env) - if err != nil { - return nil, err - } - - // Get a local backend - localRaw, err := c.Backend(&BackendOpts{ForceLocal: true}) - if err != nil { - // This should never fail - panic(err) - } - localB := localRaw.(*backendlocal.Local) - _, stateOutPath, _ = localB.StatePaths(env) - if err != nil { - return nil, err - } - - realState = s - } - - // We always backup state commands, so set the back if none was specified - // (the default is "-", but some tests bypass the flag parsing). - if backupPath == "-" || backupPath == "" { - // Determine the backup path. stateOutPath is set to the resulting - // file where state is written (cached in the case of remote state) - backupPath = fmt.Sprintf( - "%s.%d%s", - stateOutPath, - time.Now().UTC().Unix(), - DefaultBackupExtension) - } - - // Wrap it for backups - realState = &state.BackupState{ - Real: realState, - Path: backupPath, - } - - return realState, nil -} - -// filterInstance filters a single instance out of filter results. -func (c *StateMeta) filterInstance(rs []*terraform.StateFilterResult) (*terraform.StateFilterResult, error) { - var result *terraform.StateFilterResult - for _, r := range rs { - if _, ok := r.Value.(*terraform.InstanceState); !ok { - continue - } - - if result != nil { - return nil, errors.New(errStateMultiple) - } - - result = r - } - - return result, nil -} - -const errStateMultiple = `Multiple instances found for the given pattern! - -This command requires that the pattern match exactly one instance -of a resource. To view the matched instances, use "terraform state list". -Please modify the pattern to match only a single instance.` diff --git a/vendor/github.com/hashicorp/terraform/command/state_mv.go b/vendor/github.com/hashicorp/terraform/command/state_mv.go deleted file mode 100644 index e2f89c9639a..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/state_mv.go +++ /dev/null @@ -1,242 +0,0 @@ -package command - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/cli" -) - -// StateMvCommand is a Command implementation that shows a single resource. -type StateMvCommand struct { - StateMeta -} - -func (c *StateMvCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - // We create two metas to track the two states - var backupPathOut, statePathOut string - - cmdFlags := c.Meta.flagSet("state mv") - cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") - cmdFlags.StringVar(&c.statePath, "state", "", "path") - cmdFlags.StringVar(&backupPathOut, "backup-out", "-", "backup") - cmdFlags.StringVar(&statePathOut, "state-out", "", "path") - if err := cmdFlags.Parse(args); err != nil { - return cli.RunResultHelp - } - args = cmdFlags.Args() - if len(args) != 2 { - c.Ui.Error("Exactly two arguments expected.\n") - return cli.RunResultHelp - } - - // Read the from state - stateFrom, err := c.State() - if err != nil { - c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) - return 1 - } - - if err := stateFrom.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - stateFromReal := stateFrom.State() - if stateFromReal == nil { - c.Ui.Error(fmt.Sprintf(errStateNotFound)) - return 1 - } - - // Read the destination state - stateTo := stateFrom - stateToReal := stateFromReal - - if statePathOut != "" { - c.statePath = statePathOut - c.backupPath = backupPathOut - stateTo, err = c.State() - if err != nil { - c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) - return 1 - } - - if err := stateTo.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - stateToReal = stateTo.State() - if stateToReal == nil { - stateToReal = terraform.NewState() - } - } - - // Filter what we're moving - filter := &terraform.StateFilter{State: stateFromReal} - results, err := filter.Filter(args[0]) - if err != nil { - c.Ui.Error(fmt.Sprintf(errStateMv, err)) - return cli.RunResultHelp - } - if len(results) == 0 { - c.Ui.Output(fmt.Sprintf("Item to move doesn't exist: %s", args[0])) - return 1 - } - - // Get the item to add to the state - add := c.addableResult(results) - - // Do the actual move - if err := stateFromReal.Remove(args[0]); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMv, err)) - return 1 - } - - if err := stateToReal.Add(args[0], args[1], add); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMv, err)) - return 1 - } - - // Write the new state - if err := stateTo.WriteState(stateToReal); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMvPersist, err)) - return 1 - } - - if err := stateTo.PersistState(); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMvPersist, err)) - return 1 - } - - // Write the old state if it is different - if stateTo != stateFrom { - if err := stateFrom.WriteState(stateFromReal); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMvPersist, err)) - return 1 - } - - if err := stateFrom.PersistState(); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMvPersist, err)) - return 1 - } - } - - c.Ui.Output(fmt.Sprintf( - "Moved %s to %s", args[0], args[1])) - return 0 -} - -// addableResult takes the result from a filter operation and returns what to -// call State.Add with. The reason we do this is because in the module case -// we must add the list of all modules returned versus just the root module. -func (c *StateMvCommand) addableResult(results []*terraform.StateFilterResult) interface{} { - switch v := results[0].Value.(type) { - case *terraform.ModuleState: - // If a module state then we should add the full list of modules - result := []*terraform.ModuleState{v} - if len(results) > 1 { - for _, r := range results[1:] { - if ms, ok := r.Value.(*terraform.ModuleState); ok { - result = append(result, ms) - } - } - } - - return result - - case *terraform.ResourceState: - // If a resource state with more than one result, it has a multi-count - // and we need to add all of them. - result := []*terraform.ResourceState{v} - if len(results) > 1 { - for _, r := range results[1:] { - rs, ok := r.Value.(*terraform.ResourceState) - if !ok { - continue - } - - if rs.Type == v.Type { - result = append(result, rs) - } - } - } - - // If we only have one item, add it directly - if len(result) == 1 { - return result[0] - } - - return result - - default: - // By default just add the first result - return v - } -} - -func (c *StateMvCommand) Help() string { - helpText := ` -Usage: terraform state mv [options] SOURCE DESTINATION - - This command will move an item matched by the address given to the - destination address. This command can also move to a destination address - in a completely different state file. - - This can be used for simple resource renaming, moving items to and from - a module, moving entire modules, and more. And because this command can also - move data to a completely new state, it can also be used for refactoring - one configuration into multiple separately managed Terraform configurations. - - This command will output a backup copy of the state prior to saving any - changes. The backup cannot be disabled. Due to the destructive nature - of this command, backups are required. - - If you're moving an item to a different state file, a backup will be created - for each state file. - -Options: - - -backup=PATH Path where Terraform should write the backup for the original - state. This can't be disabled. If not set, Terraform - will write it to the same path as the statefile with - a ".backup" extension. - - -backup-out=PATH Path where Terraform should write the backup for the destination - state. This can't be disabled. If not set, Terraform - will write it to the same path as the destination state - file with a backup extension. This only needs - to be specified if -state-out is set to a different path - than -state. - - -state=PATH Path to the source state file. Defaults to the configured - backend, or "terraform.tfstate" - - -state-out=PATH Path to the destination state file to write to. If this - isn't specified, the source state file will be used. This - can be a new or existing path. - -` - return strings.TrimSpace(helpText) -} - -func (c *StateMvCommand) Synopsis() string { - return "Move an item in the state" -} - -const errStateMv = `Error moving state: %[1]s - -Please ensure your addresses and state paths are valid. No -state was persisted. Your existing states are untouched.` - -const errStateMvPersist = `Error saving the state: %s - -The state wasn't saved properly. If the error happening after a partial -write occurred, a backup file will have been created. Otherwise, the state -is in the same state it was when the operation started.` diff --git a/vendor/github.com/hashicorp/terraform/command/state_pull.go b/vendor/github.com/hashicorp/terraform/command/state_pull.go deleted file mode 100644 index c010d2a5e39..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/state_pull.go +++ /dev/null @@ -1,83 +0,0 @@ -package command - -import ( - "bytes" - "fmt" - "strings" - - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/cli" -) - -// StatePullCommand is a Command implementation that shows a single resource. -type StatePullCommand struct { - Meta - StateMeta -} - -func (c *StatePullCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - cmdFlags := c.Meta.flagSet("state pull") - if err := cmdFlags.Parse(args); err != nil { - return cli.RunResultHelp - } - args = cmdFlags.Args() - - // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // Get the state - env := c.Workspace() - state, err := b.State(env) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - if err := state.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - s := state.State() - if s == nil { - // Output on "error" so it shows up on stderr - c.Ui.Error("Empty state (no state)") - - return 0 - } - - var buf bytes.Buffer - if err := terraform.WriteState(s, &buf); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - c.Ui.Output(buf.String()) - return 0 -} - -func (c *StatePullCommand) Help() string { - helpText := ` -Usage: terraform state pull [options] - - Pull the state from its location and output it to stdout. - - This command "pulls" the current state and outputs it to stdout. - The primary use of this is for state stored remotely. This command - will still work with local state but is less useful for this. - -` - return strings.TrimSpace(helpText) -} - -func (c *StatePullCommand) Synopsis() string { - return "Pull current state and output to stdout" -} diff --git a/vendor/github.com/hashicorp/terraform/command/state_push.go b/vendor/github.com/hashicorp/terraform/command/state_push.go deleted file mode 100644 index 5b7d76978f9..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/state_push.go +++ /dev/null @@ -1,167 +0,0 @@ -package command - -import ( - "fmt" - "io" - "os" - "strings" - - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/cli" -) - -// StatePushCommand is a Command implementation that shows a single resource. -type StatePushCommand struct { - Meta - StateMeta -} - -func (c *StatePushCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - var flagForce bool - cmdFlags := c.Meta.flagSet("state push") - cmdFlags.BoolVar(&flagForce, "force", false, "") - if err := cmdFlags.Parse(args); err != nil { - return cli.RunResultHelp - } - args = cmdFlags.Args() - - if len(args) != 1 { - c.Ui.Error("Exactly one argument expected: path to state to push") - return 1 - } - - // Determine our reader for the input state. This is the filepath - // or stdin if "-" is given. - var r io.Reader = os.Stdin - if args[0] != "-" { - f, err := os.Open(args[0]) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // Note: we don't need to defer a Close here because we do a close - // automatically below directly after the read. - - r = f - } - - // Read the state - sourceState, err := terraform.ReadState(r) - if c, ok := r.(io.Closer); ok { - // Close the reader if possible right now since we're done with it. - c.Close() - } - if err != nil { - c.Ui.Error(fmt.Sprintf("Error reading source state %q: %s", args[0], err)) - return 1 - } - - // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // Get the state - env := c.Workspace() - state, err := b.State(env) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load destination state: %s", err)) - return 1 - } - if err := state.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load destination state: %s", err)) - return 1 - } - dstState := state.State() - - // If we're not forcing, then perform safety checks - if !flagForce && !dstState.Empty() { - if !dstState.SameLineage(sourceState) { - c.Ui.Error(strings.TrimSpace(errStatePushLineage)) - return 1 - } - - age, err := dstState.CompareAges(sourceState) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - if age == terraform.StateAgeReceiverNewer { - c.Ui.Error(strings.TrimSpace(errStatePushSerialNewer)) - return 1 - } - } - - // Overwrite it - if err := state.WriteState(sourceState); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to write state: %s", err)) - return 1 - } - if err := state.PersistState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to write state: %s", err)) - return 1 - } - - return 0 -} - -func (c *StatePushCommand) Help() string { - helpText := ` -Usage: terraform state push [options] PATH - - Update remote state from a local state file at PATH. - - This command "pushes" a local state and overwrites remote state - with a local state file. The command will protect you against writing - an older serial or a different state file lineage unless you specify the - "-force" flag. - - This command works with local state (it will overwrite the local - state), but is less useful for this use case. - - If PATH is "-", then this command will read the state to push from stdin. - Data from stdin is not streamed to the backend: it is loaded completely - (until pipe close), verified, and then pushed. - -Options: - - -force Write the state even if lineages don't match or the - remote serial is higher. - -` - return strings.TrimSpace(helpText) -} - -func (c *StatePushCommand) Synopsis() string { - return "Update remote state from a local state file" -} - -const errStatePushLineage = ` -The lineages do not match! The state will not be pushed. - -The "lineage" is a unique identifier given to a state on creation. It helps -protect Terraform from overwriting a seemingly unrelated state file since it -represents potentially losing real state. - -Please verify you're pushing the correct state. If you're sure you are, you -can force the behavior with the "-force" flag. -` - -const errStatePushSerialNewer = ` -The destination state has a higher serial number! The state will not be pushed. - -A higher serial could indicate that there is data in the destination state -that was not present when the source state was created. As a protection measure, -Terraform will not automatically overwrite this state. - -Please verify you're pushing the correct state. If you're sure you are, you -can force the behavior with the "-force" flag. -` diff --git a/vendor/github.com/hashicorp/terraform/command/state_rm.go b/vendor/github.com/hashicorp/terraform/command/state_rm.go deleted file mode 100644 index 53bb50d01f1..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/state_rm.go +++ /dev/null @@ -1,113 +0,0 @@ -package command - -import ( - "fmt" - "strings" - - "github.com/mitchellh/cli" -) - -// StateRmCommand is a Command implementation that shows a single resource. -type StateRmCommand struct { - StateMeta -} - -func (c *StateRmCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - cmdFlags := c.Meta.flagSet("state show") - cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") - cmdFlags.StringVar(&c.statePath, "state", "", "path") - if err := cmdFlags.Parse(args); err != nil { - return cli.RunResultHelp - } - args = cmdFlags.Args() - - if len(args) < 1 { - c.Ui.Error("At least one resource address is required.") - return 1 - } - - state, err := c.State() - if err != nil { - c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) - return 1 - } - if err := state.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - stateReal := state.State() - if stateReal == nil { - c.Ui.Error(fmt.Sprintf(errStateNotFound)) - return 1 - } - - if err := stateReal.Remove(args...); err != nil { - c.Ui.Error(fmt.Sprintf(errStateRm, err)) - return 1 - } - - c.Ui.Output(fmt.Sprintf("%d items removed.", len(args))) - - if err := state.WriteState(stateReal); err != nil { - c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) - return 1 - } - - if err := state.PersistState(); err != nil { - c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) - return 1 - } - - c.Ui.Output("Item removal successful.") - return 0 -} - -func (c *StateRmCommand) Help() string { - helpText := ` -Usage: terraform state rm [options] ADDRESS... - - Remove one or more items from the Terraform state. - - This command removes one or more items from the Terraform state based - on the address given. You can view and list the available resources - with "terraform state list". - - This command creates a timestamped backup of the state on every invocation. - This can't be disabled. Due to the destructive nature of this command, - the backup is ensured by Terraform for safety reasons. - -Options: - - -backup=PATH Path where Terraform should write the backup - state. This can't be disabled. If not set, Terraform - will write it to the same path as the statefile with - a backup extension. - - -state=PATH Path to the source state file. Defaults to the configured - backend, or "terraform.tfstate" - -` - return strings.TrimSpace(helpText) -} - -func (c *StateRmCommand) Synopsis() string { - return "Remove an item from the state" -} - -const errStateRm = `Error removing items from the state: %s - -The state was not saved. No items were removed from the persisted -state. No backup was created since no modification occurred. Please -resolve the issue above and try again.` - -const errStateRmPersist = `Error saving the state: %s - -The state was not saved. No items were removed from the persisted -state. No backup was created since no modification occurred. Please -resolve the issue above and try again.` diff --git a/vendor/github.com/hashicorp/terraform/command/state_show.go b/vendor/github.com/hashicorp/terraform/command/state_show.go deleted file mode 100644 index d44d0c07e16..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/state_show.go +++ /dev/null @@ -1,125 +0,0 @@ -package command - -import ( - "fmt" - "sort" - "strings" - - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/cli" - "github.com/ryanuber/columnize" -) - -// StateShowCommand is a Command implementation that shows a single resource. -type StateShowCommand struct { - Meta - StateMeta -} - -func (c *StateShowCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - cmdFlags := c.Meta.flagSet("state show") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") - if err := cmdFlags.Parse(args); err != nil { - return cli.RunResultHelp - } - args = cmdFlags.Args() - - // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // Get the state - env := c.Workspace() - state, err := b.State(env) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - if err := state.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - stateReal := state.State() - if stateReal == nil { - c.Ui.Error(fmt.Sprintf(errStateNotFound)) - return 1 - } - - filter := &terraform.StateFilter{State: stateReal} - results, err := filter.Filter(args...) - if err != nil { - c.Ui.Error(fmt.Sprintf(errStateFilter, err)) - return 1 - } - - if len(results) == 0 { - return 0 - } - - instance, err := c.filterInstance(results) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - if instance == nil { - return 0 - } - - is := instance.Value.(*terraform.InstanceState) - - // Sort the keys - var keys []string - for k, _ := range is.Attributes { - keys = append(keys, k) - } - sort.Strings(keys) - - // Build the output - var output []string - output = append(output, fmt.Sprintf("id | %s", is.ID)) - for _, k := range keys { - if k != "id" { - output = append(output, fmt.Sprintf("%s | %s", k, is.Attributes[k])) - } - } - - // Output - config := columnize.DefaultConfig() - config.Glue = " = " - c.Ui.Output(columnize.Format(output, config)) - return 0 -} - -func (c *StateShowCommand) Help() string { - helpText := ` -Usage: terraform state show [options] ADDRESS - - Shows the attributes of a resource in the Terraform state. - - This command shows the attributes of a single resource in the Terraform - state. The address argument must be used to specify a single resource. - You can view the list of available resources with "terraform state list". - -Options: - - -state=statefile Path to a Terraform state file to use to look - up Terraform-managed resources. By default it will - use the state "terraform.tfstate" if it exists. - -` - return strings.TrimSpace(helpText) -} - -func (c *StateShowCommand) Synopsis() string { - return "Show a resource in the state" -} diff --git a/vendor/github.com/hashicorp/terraform/command/taint.go b/vendor/github.com/hashicorp/terraform/command/taint.go deleted file mode 100644 index 626f88a46f3..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/taint.go +++ /dev/null @@ -1,227 +0,0 @@ -package command - -import ( - "context" - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/command/clistate" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" -) - -// TaintCommand is a cli.Command implementation that manually taints -// a resource, marking it for recreation. -type TaintCommand struct { - Meta -} - -func (c *TaintCommand) Run(args []string) int { - args, err := c.Meta.process(args, false) - if err != nil { - return 1 - } - - var allowMissing bool - var module string - cmdFlags := c.Meta.flagSet("taint") - cmdFlags.BoolVar(&allowMissing, "allow-missing", false, "module") - cmdFlags.StringVar(&module, "module", "", "module") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") - cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") - cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") - cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") - cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - // Require the one argument for the resource to taint - args = cmdFlags.Args() - if len(args) != 1 { - c.Ui.Error("The taint command expects exactly one argument.") - cmdFlags.Usage() - return 1 - } - - name := args[0] - if module == "" { - module = "root" - } else { - module = "root." + module - } - - rsk, err := terraform.ParseResourceStateKey(name) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to parse resource name: %s", err)) - return 1 - } - - if !rsk.Mode.Taintable() { - c.Ui.Error(fmt.Sprintf("Resource '%s' cannot be tainted", name)) - return 1 - } - - // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // Get the state - env := c.Workspace() - st, err := b.State(env) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - if err := st.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - if c.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), c.stateLockTimeout) - defer cancel() - - lockInfo := state.NewLockInfo() - lockInfo.Operation = "taint" - lockID, err := clistate.Lock(lockCtx, st, lockInfo, c.Ui, c.Colorize()) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error locking state: %s", err)) - return 1 - } - - defer clistate.Unlock(st, lockID, c.Ui, c.Colorize()) - } - - // Get the actual state structure - s := st.State() - if s.Empty() { - if allowMissing { - return c.allowMissingExit(name, module) - } - - c.Ui.Error(fmt.Sprintf( - "The state is empty. The most common reason for this is that\n" + - "an invalid state file path was given or Terraform has never\n " + - "been run for this infrastructure. Infrastructure must exist\n" + - "for it to be tainted.")) - return 1 - } - - // Get the proper module we want to taint - modPath := strings.Split(module, ".") - mod := s.ModuleByPath(modPath) - if mod == nil { - if allowMissing { - return c.allowMissingExit(name, module) - } - - c.Ui.Error(fmt.Sprintf( - "The module %s could not be found. There is nothing to taint.", - module)) - return 1 - } - - // If there are no resources in this module, it is an error - if len(mod.Resources) == 0 { - if allowMissing { - return c.allowMissingExit(name, module) - } - - c.Ui.Error(fmt.Sprintf( - "The module %s has no resources. There is nothing to taint.", - module)) - return 1 - } - - // Get the resource we're looking for - rs, ok := mod.Resources[name] - if !ok { - if allowMissing { - return c.allowMissingExit(name, module) - } - - c.Ui.Error(fmt.Sprintf( - "The resource %s couldn't be found in the module %s.", - name, - module)) - return 1 - } - - // Taint the resource - rs.Taint() - - log.Printf("[INFO] Writing state output to: %s", c.Meta.StateOutPath()) - if err := st.WriteState(s); err != nil { - c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) - return 1 - } - if err := st.PersistState(); err != nil { - c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) - return 1 - } - - c.Ui.Output(fmt.Sprintf( - "The resource %s in the module %s has been marked as tainted!", - name, module)) - return 0 -} - -func (c *TaintCommand) Help() string { - helpText := ` -Usage: terraform taint [options] name - - Manually mark a resource as tainted, forcing a destroy and recreate - on the next plan/apply. - - This will not modify your infrastructure. This command changes your - state to mark a resource as tainted so that during the next plan or - apply, that resource will be destroyed and recreated. This command on - its own will not modify infrastructure. This command can be undone by - reverting the state backup file that is created. - -Options: - - -allow-missing If specified, the command will succeed (exit code 0) - even if the resource is missing. - - -backup=path Path to backup the existing state file before - modifying. Defaults to the "-state-out" path with - ".backup" extension. Set to "-" to disable backup. - - -lock=true Lock the state file when locking is supported. - - -lock-timeout=0s Duration to retry a state lock. - - -module=path The module path where the resource lives. By - default this will be root. Child modules can be specified - by names. Ex. "consul" or "consul.vpc" (nested modules). - - -no-color If specified, output won't contain any color. - - -state=path Path to read and save state (unless state-out - is specified). Defaults to "terraform.tfstate". - - -state-out=path Path to write updated state file. By default, the - "-state" path will be used. - -` - return strings.TrimSpace(helpText) -} - -func (c *TaintCommand) Synopsis() string { - return "Manually mark a resource for recreation" -} - -func (c *TaintCommand) allowMissingExit(name, module string) int { - c.Ui.Output(fmt.Sprintf( - "The resource %s in the module %s was not found, but\n"+ - "-allow-missing is set, so we're exiting successfully.", - name, module)) - return 0 -} diff --git a/vendor/github.com/hashicorp/terraform/command/ui_input.go b/vendor/github.com/hashicorp/terraform/command/ui_input.go deleted file mode 100644 index 9c8873d4675..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/ui_input.go +++ /dev/null @@ -1,159 +0,0 @@ -package command - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "log" - "os" - "os/signal" - "strings" - "sync" - "unicode" - - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/colorstring" -) - -var defaultInputReader io.Reader -var defaultInputWriter io.Writer -var testInputResponse []string -var testInputResponseMap map[string]string - -// UIInput is an implementation of terraform.UIInput that asks the CLI -// for input stdin. -type UIInput struct { - // Colorize will color the output. - Colorize *colorstring.Colorize - - // Reader and Writer for IO. If these aren't set, they will default to - // Stdout and Stderr respectively. - Reader io.Reader - Writer io.Writer - - interrupted bool - l sync.Mutex - once sync.Once -} - -func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) { - i.once.Do(i.init) - - r := i.Reader - w := i.Writer - if r == nil { - r = defaultInputReader - } - if w == nil { - w = defaultInputWriter - } - if r == nil { - r = os.Stdin - } - if w == nil { - w = os.Stdout - } - - // Make sure we only ask for input once at a time. Terraform - // should enforce this, but it doesn't hurt to verify. - i.l.Lock() - defer i.l.Unlock() - - // If we're interrupted, then don't ask for input - if i.interrupted { - return "", errors.New("interrupted") - } - - // If we have test results, return those. testInputResponse is the - // "old" way of doing it and we should remove that. - if testInputResponse != nil { - v := testInputResponse[0] - testInputResponse = testInputResponse[1:] - return v, nil - } - - // testInputResponseMap is the new way for test responses, based on - // the query ID. - if testInputResponseMap != nil { - v, ok := testInputResponseMap[opts.Id] - if !ok { - return "", fmt.Errorf("unexpected input request in test: %s", opts.Id) - } - - return v, nil - } - - log.Printf("[DEBUG] command: asking for input: %q", opts.Query) - - // Listen for interrupts so we can cancel the input ask - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, os.Interrupt) - defer signal.Stop(sigCh) - - // Build the output format for asking - var buf bytes.Buffer - buf.WriteString("[reset]") - buf.WriteString(fmt.Sprintf("[bold]%s[reset]\n", opts.Query)) - if opts.Description != "" { - s := bufio.NewScanner(strings.NewReader(opts.Description)) - for s.Scan() { - buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) - } - buf.WriteString("\n") - } - if opts.Default != "" { - buf.WriteString(" [bold]Default:[reset] ") - buf.WriteString(opts.Default) - buf.WriteString("\n") - } - buf.WriteString(" [bold]Enter a value:[reset] ") - - // Ask the user for their input - if _, err := fmt.Fprint(w, i.Colorize.Color(buf.String())); err != nil { - return "", err - } - - // Listen for the input in a goroutine. This will allow us to - // interrupt this if we are interrupted (SIGINT) - result := make(chan string, 1) - go func() { - buf := bufio.NewReader(r) - line, err := buf.ReadString('\n') - if err != nil { - log.Printf("[ERR] UIInput scan err: %s", err) - } - - result <- strings.TrimRightFunc(line, unicode.IsSpace) - }() - - select { - case line := <-result: - fmt.Fprint(w, "\n") - - if line == "" { - line = opts.Default - } - - return line, nil - case <-sigCh: - // Print a newline so that any further output starts properly - // on a new line. - fmt.Fprintln(w) - - // Mark that we were interrupted so future Ask calls fail. - i.interrupted = true - - return "", errors.New("interrupted") - } -} - -func (i *UIInput) init() { - if i.Colorize == nil { - i.Colorize = &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Disable: true, - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/command/unlock.go b/vendor/github.com/hashicorp/terraform/command/unlock.go deleted file mode 100644 index 4ac50497291..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/unlock.go +++ /dev/null @@ -1,144 +0,0 @@ -package command - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/cli" -) - -// UnlockCommand is a cli.Command implementation that manually unlocks -// the state. -type UnlockCommand struct { - Meta -} - -func (c *UnlockCommand) Run(args []string) int { - args, err := c.Meta.process(args, false) - if err != nil { - return 1 - } - - force := false - cmdFlags := c.Meta.flagSet("force-unlock") - cmdFlags.BoolVar(&force, "force", false, "force") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - args = cmdFlags.Args() - if len(args) == 0 { - c.Ui.Error("unlock requires a lock id argument") - return cli.RunResultHelp - } - - lockID := args[0] - args = args[1:] - - // assume everything is initialized. The user can manually init if this is - // required. - configPath, err := ModulePath(args) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - conf, err := c.Config(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) - return 1 - } - - // Load the backend - b, err := c.Backend(&BackendOpts{ - Config: conf, - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - env := c.Workspace() - st, err := b.State(env) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - isLocal := false - switch s := st.(type) { - case *state.BackupState: - if _, ok := s.Real.(*state.LocalState); ok { - isLocal = true - } - case *state.LocalState: - isLocal = true - } - - if !force { - // Forcing this doesn't do anything, but doesn't break anything either, - // and allows us to run the basic command test too. - if isLocal { - c.Ui.Error("Local state cannot be unlocked by another process") - return 1 - } - - desc := "Terraform will remove the lock on the remote state.\n" + - "This will allow local Terraform commands to modify this state, even though it\n" + - "may be still be in use. Only 'yes' will be accepted to confirm." - - v, err := c.UIInput().Input(&terraform.InputOpts{ - Id: "force-unlock", - Query: "Do you really want to force-unlock?", - Description: desc, - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error asking for confirmation: %s", err)) - return 1 - } - if v != "yes" { - c.Ui.Output("force-unlock cancelled.") - return 1 - } - } - - if err := st.Unlock(lockID); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to unlock state: %s", err)) - return 1 - } - - c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputUnlockSuccess))) - return 0 -} - -func (c *UnlockCommand) Help() string { - helpText := ` -Usage: terraform force-unlock LOCK_ID [DIR] - - Manually unlock the state for the defined configuration. - - This will not modify your infrastructure. This command removes the lock on the - state for the current configuration. The behavior of this lock is dependent - on the backend being used. Local state files cannot be unlocked by another - process. - -Options: - - -force Don't ask for input for unlock confirmation. -` - return strings.TrimSpace(helpText) -} - -func (c *UnlockCommand) Synopsis() string { - return "Manually unlock the terraform state" -} - -const outputUnlockSuccess = ` -[reset][bold][green]Terraform state has been successfully unlocked![reset][green] - -The state has been unlocked, and Terraform commands should now be able to -obtain a new lock on the remote state. -` diff --git a/vendor/github.com/hashicorp/terraform/command/untaint.go b/vendor/github.com/hashicorp/terraform/command/untaint.go deleted file mode 100644 index 1eca202779b..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/untaint.go +++ /dev/null @@ -1,215 +0,0 @@ -package command - -import ( - "context" - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/command/clistate" - "github.com/hashicorp/terraform/state" -) - -// UntaintCommand is a cli.Command implementation that manually untaints -// a resource, marking it as primary and ready for service. -type UntaintCommand struct { - Meta -} - -func (c *UntaintCommand) Run(args []string) int { - args, err := c.Meta.process(args, false) - if err != nil { - return 1 - } - - var allowMissing bool - var module string - cmdFlags := c.Meta.flagSet("untaint") - cmdFlags.BoolVar(&allowMissing, "allow-missing", false, "module") - cmdFlags.StringVar(&module, "module", "", "module") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") - cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") - cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") - cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") - cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - // Require the one argument for the resource to untaint - args = cmdFlags.Args() - if len(args) != 1 { - c.Ui.Error("The untaint command expects exactly one argument.") - cmdFlags.Usage() - return 1 - } - - name := args[0] - if module == "" { - module = "root" - } else { - module = "root." + module - } - - // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // Get the state - env := c.Workspace() - st, err := b.State(env) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - if err := st.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - if c.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), c.stateLockTimeout) - defer cancel() - - lockInfo := state.NewLockInfo() - lockInfo.Operation = "untaint" - lockID, err := clistate.Lock(lockCtx, st, lockInfo, c.Ui, c.Colorize()) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error locking state: %s", err)) - return 1 - } - - defer clistate.Unlock(st, lockID, c.Ui, c.Colorize()) - } - - // Get the actual state structure - s := st.State() - if s.Empty() { - if allowMissing { - return c.allowMissingExit(name, module) - } - - c.Ui.Error(fmt.Sprintf( - "The state is empty. The most common reason for this is that\n" + - "an invalid state file path was given or Terraform has never\n " + - "been run for this infrastructure. Infrastructure must exist\n" + - "for it to be untainted.")) - return 1 - } - - // Get the proper module holding the resource we want to untaint - modPath := strings.Split(module, ".") - mod := s.ModuleByPath(modPath) - if mod == nil { - if allowMissing { - return c.allowMissingExit(name, module) - } - - c.Ui.Error(fmt.Sprintf( - "The module %s could not be found. There is nothing to untaint.", - module)) - return 1 - } - - // If there are no resources in this module, it is an error - if len(mod.Resources) == 0 { - if allowMissing { - return c.allowMissingExit(name, module) - } - - c.Ui.Error(fmt.Sprintf( - "The module %s has no resources. There is nothing to untaint.", - module)) - return 1 - } - - // Get the resource we're looking for - rs, ok := mod.Resources[name] - if !ok { - if allowMissing { - return c.allowMissingExit(name, module) - } - - c.Ui.Error(fmt.Sprintf( - "The resource %s couldn't be found in the module %s.", - name, - module)) - return 1 - } - - // Untaint the resource - rs.Untaint() - - log.Printf("[INFO] Writing state output to: %s", c.Meta.StateOutPath()) - if err := st.WriteState(s); err != nil { - c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) - return 1 - } - if err := st.PersistState(); err != nil { - c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) - return 1 - } - - c.Ui.Output(fmt.Sprintf( - "The resource %s in the module %s has been successfully untainted!", - name, module)) - return 0 -} - -func (c *UntaintCommand) Help() string { - helpText := ` -Usage: terraform untaint [options] name - - Manually unmark a resource as tainted, restoring it as the primary - instance in the state. This reverses either a manual 'terraform taint' - or the result of provisioners failing on a resource. - - This will not modify your infrastructure. This command changes your - state to unmark a resource as tainted. This command can be undone by - reverting the state backup file that is created, or by running - 'terraform taint' on the resource. - -Options: - - -allow-missing If specified, the command will succeed (exit code 0) - even if the resource is missing. - - -backup=path Path to backup the existing state file before - modifying. Defaults to the "-state-out" path with - ".backup" extension. Set to "-" to disable backup. - - -lock=true Lock the state file when locking is supported. - - -lock-timeout=0s Duration to retry a state lock. - - -module=path The module path where the resource lives. By - default this will be root. Child modules can be specified - by names. Ex. "consul" or "consul.vpc" (nested modules). - - -no-color If specified, output won't contain any color. - - -state=path Path to read and save state (unless state-out - is specified). Defaults to "terraform.tfstate". - - -state-out=path Path to write updated state file. By default, the - "-state" path will be used. - -` - return strings.TrimSpace(helpText) -} - -func (c *UntaintCommand) Synopsis() string { - return "Manually unmark a resource as tainted" -} - -func (c *UntaintCommand) allowMissingExit(name, module string) int { - c.Ui.Output(fmt.Sprintf( - "The resource %s in the module %s was not found, but\n"+ - "-allow-missing is set, so we're exiting successfully.", - name, module)) - return 0 -} diff --git a/vendor/github.com/hashicorp/terraform/command/validate.go b/vendor/github.com/hashicorp/terraform/command/validate.go deleted file mode 100644 index f48d38e4a50..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/validate.go +++ /dev/null @@ -1,143 +0,0 @@ -package command - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/terraform" -) - -// ValidateCommand is a Command implementation that validates the terraform files -type ValidateCommand struct { - Meta -} - -const defaultPath = "." - -func (c *ValidateCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - var checkVars bool - - cmdFlags := c.Meta.flagSet("validate") - cmdFlags.BoolVar(&checkVars, "check-variables", true, "check-variables") - cmdFlags.Usage = func() { - c.Ui.Error(c.Help()) - } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - args = cmdFlags.Args() - - var dirPath string - if len(args) == 1 { - dirPath = args[0] - } else { - dirPath = "." - } - dir, err := filepath.Abs(dirPath) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "Unable to locate directory %v\n", err.Error())) - } - - // Check for user-supplied plugin path - if c.pluginPath, err = c.loadPluginPath(); err != nil { - c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) - return 1 - } - - rtnCode := c.validate(dir, checkVars) - - return rtnCode -} - -func (c *ValidateCommand) Synopsis() string { - return "Validates the Terraform files" -} - -func (c *ValidateCommand) Help() string { - helpText := ` -Usage: terraform validate [options] [dir] - - Validate the terraform files in a directory. Validation includes a - basic check of syntax as well as checking that all variables declared - in the configuration are specified in one of the possible ways: - - -var foo=... - -var-file=foo.vars - TF_VAR_foo environment variable - terraform.tfvars - default value - - If dir is not specified, then the current directory will be used. - -Options: - - -check-variables=true If set to true (default), the command will check - whether all required variables have been specified. - - -no-color If specified, output won't contain any color. - - -var 'foo=bar' Set a variable in the Terraform configuration. This - flag can be set multiple times. - - -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" is present, it will be - automatically loaded if this flag is not specified. -` - return strings.TrimSpace(helpText) -} - -func (c *ValidateCommand) validate(dir string, checkVars bool) int { - var diags tfdiags.Diagnostics - - cfg, err := config.LoadDir(dir) - if err != nil { - diags = diags.Append(err) - c.showDiagnostics(err) - return 1 - } - - diags = diags.Append(cfg.Validate()) - - if diags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - - if checkVars { - mod, modDiags := c.Module(dir) - diags = diags.Append(modDiags) - if modDiags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - - opts := c.contextOpts() - opts.Module = mod - - tfCtx, err := terraform.NewContext(opts) - if err != nil { - diags = diags.Append(err) - c.showDiagnostics(diags) - return 1 - } - - diags = diags.Append(tfCtx.Validate()) - } - - c.showDiagnostics(diags) - if diags.HasErrors() { - return 1 - } - - return 0 -} diff --git a/vendor/github.com/hashicorp/terraform/command/version.go b/vendor/github.com/hashicorp/terraform/command/version.go deleted file mode 100644 index b62da93bfca..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/version.go +++ /dev/null @@ -1,123 +0,0 @@ -package command - -import ( - "bytes" - "fmt" - "sort" -) - -// VersionCommand is a Command implementation prints the version. -type VersionCommand struct { - Meta - - Revision string - Version string - VersionPrerelease string - CheckFunc VersionCheckFunc -} - -// VersionCheckFunc is the callback called by the Version command to -// check if there is a new version of Terraform. -type VersionCheckFunc func() (VersionCheckInfo, error) - -// VersionCheckInfo is the return value for the VersionCheckFunc callback -// and tells the Version command information about the latest version -// of Terraform. -type VersionCheckInfo struct { - Outdated bool - Latest string - Alerts []string -} - -func (c *VersionCommand) Help() string { - return "" -} - -func (c *VersionCommand) Run(args []string) int { - var versionString bytes.Buffer - args, err := c.Meta.process(args, false) - if err != nil { - return 1 - } - - fmt.Fprintf(&versionString, "Terraform v%s", c.Version) - if c.VersionPrerelease != "" { - fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease) - - if c.Revision != "" { - fmt.Fprintf(&versionString, " (%s)", c.Revision) - } - } - - c.Ui.Output(versionString.String()) - - // We'll also attempt to print out the selected plugin versions. We can - // do this only if "terraform init" was already run and thus we've committed - // to a specific set of plugins. If not, the plugins lock will be empty - // and so we'll show _no_ providers. - // - // Generally-speaking this is a best-effort thing that will give us a good - // result in the usual case where the user successfully ran "terraform init" - // and then hit a problem running _another_ command. - providerPlugins := c.providerPluginSet() - pluginsLockFile := c.providerPluginsLock() - pluginsLock := pluginsLockFile.Read() - var pluginVersions []string - for meta := range providerPlugins { - name := meta.Name - wantHash, wanted := pluginsLock[name] - if !wanted { - // Ignore providers that aren't used by the current config at all - continue - } - gotHash, err := meta.SHA256() - if err != nil { - // if we can't read the file to hash it, ignore it. - continue - } - if !bytes.Equal(gotHash, wantHash) { - // Not the plugin we've locked, so ignore it. - continue - } - - // If we get here then we've found a selected plugin, so we'll print - // out its details. - if meta.Version == "0.0.0" { - pluginVersions = append(pluginVersions, fmt.Sprintf("+ provider.%s (unversioned)", name)) - } else { - pluginVersions = append(pluginVersions, fmt.Sprintf("+ provider.%s v%s", name, meta.Version)) - } - } - if len(pluginVersions) != 0 { - sort.Strings(pluginVersions) - for _, str := range pluginVersions { - c.Ui.Output(str) - } - } - - // If we have a version check function, then let's check for - // the latest version as well. - if c.CheckFunc != nil { - // Separate the prior output with a newline - c.Ui.Output("") - - // Check the latest version - info, err := c.CheckFunc() - if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error checking latest version: %s", err)) - } - if info.Outdated { - c.Ui.Output(fmt.Sprintf( - "Your version of Terraform is out of date! The latest version\n"+ - "is %s. You can update by downloading from www.terraform.io/downloads.html", - info.Latest)) - } - } - - return 0 -} - -func (c *VersionCommand) Synopsis() string { - return "Prints the Terraform version" -} diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_command.go b/vendor/github.com/hashicorp/terraform/command/workspace_command.go deleted file mode 100644 index e74d3797f88..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/workspace_command.go +++ /dev/null @@ -1,148 +0,0 @@ -package command - -import ( - "net/url" - "strings" - - "github.com/mitchellh/cli" -) - -// WorkspaceCommand is a Command Implementation that manipulates workspaces, -// which allow multiple distinct states and variables from a single config. -type WorkspaceCommand struct { - Meta - LegacyName bool -} - -func (c *WorkspaceCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - envCommandShowWarning(c.Ui, c.LegacyName) - - cmdFlags := c.Meta.flagSet("workspace") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - - c.Ui.Output(c.Help()) - return 0 -} - -func (c *WorkspaceCommand) Help() string { - helpText := ` -Usage: terraform workspace - - Create, change and delete Terraform workspaces. - - -Subcommands: - - show Show the current workspace name. - list List workspaces. - select Select a workspace. - new Create a new workspace. - delete Delete an existing workspace. -` - return strings.TrimSpace(helpText) -} - -func (c *WorkspaceCommand) Synopsis() string { - return "Workspace management" -} - -// validWorkspaceName returns true is this name is valid to use as a workspace name. -// Since most named states are accessed via a filesystem path or URL, check if -// escaping the name would be required. -func validWorkspaceName(name string) bool { - return name == url.PathEscape(name) -} - -func envCommandShowWarning(ui cli.Ui, show bool) { - if !show { - return - } - - ui.Warn(`Warning: the "terraform env" family of commands is deprecated. - -"Workspace" is now the preferred term for what earlier Terraform versions -called "environment", to reduce ambiguity caused by the latter term colliding -with other concepts. - -The "terraform workspace" commands should be used instead. "terraform env" -will be removed in a future Terraform version. -`) -} - -const ( - envNotSupported = `Backend does not support multiple workspaces` - - envExists = `Workspace %q already exists` - - envDoesNotExist = ` -Workspace %q doesn't exist. - -You can create this workspace with the "new" subcommand.` - - envChanged = `[reset][green]Switched to workspace %q.` - - envCreated = ` -[reset][green][bold]Created and switched to workspace %q![reset][green] - -You're now on a new, empty workspace. Workspaces isolate their state, -so if you run "terraform plan" Terraform will not see any existing state -for this configuration. -` - - envDeleted = `[reset][green]Deleted workspace %q!` - - envNotEmpty = ` -Workspace %[1]q is not empty. - -Deleting %[1]q can result in dangling resources: resources that -exist but are no longer manageable by Terraform. Please destroy -these resources first. If you want to delete this workspace -anyway and risk dangling resources, use the '-force' flag. -` - - envWarnNotEmpty = `[reset][yellow]WARNING: %q was non-empty. -The resources managed by the deleted workspace may still exist, -but are no longer manageable by Terraform since the state has -been deleted. -` - - envDelCurrent = ` -Workspace %[1]q is your active workspace. - -You cannot delete the currently active workspace. Please switch -to another workspace and try again. -` - - envInvalidName = ` -The workspace name %q is not allowed. The name must contain only URL safe -characters, and no path separators. -` - - envIsOverriddenNote = ` - -The active workspace is being overridden using the TF_WORKSPACE environment -variable. -` - - envIsOverriddenSelectError = ` -The selected workspace is currently overridden using the TF_WORKSPACE -environment variable. - -To select a new workspace, either update this environment variable or unset -it and then run this command again. -` - - envIsOverriddenNewError = ` -The workspace is currently overridden using the TF_WORKSPACE environment -variable. You cannot create a new workspace when using this setting. - -To create a new workspace, either unset this environment variable or update it -to match the workspace name you are trying to create, and then run this command -again. -` -) diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_delete.go b/vendor/github.com/hashicorp/terraform/command/workspace_delete.go deleted file mode 100644 index 99dac86d3b2..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/workspace_delete.go +++ /dev/null @@ -1,191 +0,0 @@ -package command - -import ( - "context" - "fmt" - "strings" - - "github.com/hashicorp/terraform/command/clistate" - "github.com/hashicorp/terraform/state" - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -type WorkspaceDeleteCommand struct { - Meta - LegacyName bool -} - -func (c *WorkspaceDeleteCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - envCommandShowWarning(c.Ui, c.LegacyName) - - force := false - cmdFlags := c.Meta.flagSet("workspace") - cmdFlags.BoolVar(&force, "force", false, "force removal of a non-empty workspace") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - args = cmdFlags.Args() - if len(args) == 0 { - c.Ui.Error("expected NAME.\n") - return cli.RunResultHelp - } - - delEnv := args[0] - - if !validWorkspaceName(delEnv) { - c.Ui.Error(fmt.Sprintf(envInvalidName, delEnv)) - return 1 - } - - configPath, err := ModulePath(args[1:]) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - cfg, err := c.Config(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) - return 1 - } - - // Load the backend - b, err := c.Backend(&BackendOpts{ - Config: cfg, - }) - - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - states, err := b.States() - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - exists := false - for _, s := range states { - if delEnv == s { - exists = true - break - } - } - - if !exists { - c.Ui.Error(fmt.Sprintf(strings.TrimSpace(envDoesNotExist), delEnv)) - return 1 - } - - if delEnv == c.Workspace() { - c.Ui.Error(fmt.Sprintf(strings.TrimSpace(envDelCurrent), delEnv)) - return 1 - } - - // we need the actual state to see if it's empty - sMgr, err := b.State(delEnv) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - if err := sMgr.RefreshState(); err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - hasResources := sMgr.State().HasResources() - - if hasResources && !force { - c.Ui.Error(fmt.Sprintf(strings.TrimSpace(envNotEmpty), delEnv)) - return 1 - } - - // Honor the lock request, for consistency and one final safety check. - if c.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), c.stateLockTimeout) - defer cancel() - - // Lock the state if we can - lockInfo := state.NewLockInfo() - lockInfo.Operation = "workspace delete" - lockID, err := clistate.Lock(lockCtx, sMgr, lockInfo, c.Ui, c.Colorize()) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error locking state: %s", err)) - return 1 - } - - // We need to release the lock just before deleting the state, in case - // the backend can't remove the resource while holding the lock. This - // is currently true for Windows local files. - // - // TODO: While there is little safety in locking while deleting the - // state, it might be nice to be able to coordinate processes around - // state deletion, i.e. in a CI environment. Adding Delete() as a - // required method of States would allow the removal of the resource to - // be delegated from the Backend to the State itself. - clistate.Unlock(sMgr, lockID, c.Ui, c.Colorize()) - } - - err = b.DeleteState(delEnv) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - c.Ui.Output( - c.Colorize().Color( - fmt.Sprintf(envDeleted, delEnv), - ), - ) - - if hasResources { - c.Ui.Output( - c.Colorize().Color( - fmt.Sprintf(envWarnNotEmpty, delEnv), - ), - ) - } - - return 0 -} - -func (c *WorkspaceDeleteCommand) AutocompleteArgs() complete.Predictor { - return completePredictSequence{ - complete.PredictNothing, // the "select" subcommand itself (already matched) - c.completePredictWorkspaceName(), - complete.PredictDirs(""), - } -} - -func (c *WorkspaceDeleteCommand) AutocompleteFlags() complete.Flags { - return complete.Flags{ - "-force": complete.PredictNothing, - } -} - -func (c *WorkspaceDeleteCommand) Help() string { - helpText := ` -Usage: terraform workspace delete [OPTIONS] NAME [DIR] - - Delete a Terraform workspace - - -Options: - - -force remove a non-empty workspace. -` - return strings.TrimSpace(helpText) -} - -func (c *WorkspaceDeleteCommand) Synopsis() string { - return "Delete a workspace" -} diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_list.go b/vendor/github.com/hashicorp/terraform/command/workspace_list.go deleted file mode 100644 index da7f9b19c6b..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/workspace_list.go +++ /dev/null @@ -1,99 +0,0 @@ -package command - -import ( - "bytes" - "fmt" - "strings" - - "github.com/posener/complete" -) - -type WorkspaceListCommand struct { - Meta - LegacyName bool -} - -func (c *WorkspaceListCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - envCommandShowWarning(c.Ui, c.LegacyName) - - cmdFlags := c.Meta.flagSet("workspace list") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - args = cmdFlags.Args() - configPath, err := ModulePath(args) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - cfg, err := c.Config(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) - return 1 - } - - // Load the backend - b, err := c.Backend(&BackendOpts{ - Config: cfg, - }) - - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - states, err := b.States() - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - env, isOverridden := c.WorkspaceOverridden() - - var out bytes.Buffer - for _, s := range states { - if s == env { - out.WriteString("* ") - } else { - out.WriteString(" ") - } - out.WriteString(s + "\n") - } - - c.Ui.Output(out.String()) - - if isOverridden { - c.Ui.Output(envIsOverriddenNote) - } - - return 0 -} - -func (c *WorkspaceListCommand) AutocompleteArgs() complete.Predictor { - return complete.PredictDirs("") -} - -func (c *WorkspaceListCommand) AutocompleteFlags() complete.Flags { - return nil -} - -func (c *WorkspaceListCommand) Help() string { - helpText := ` -Usage: terraform workspace list [DIR] - - List Terraform workspaces. -` - return strings.TrimSpace(helpText) -} - -func (c *WorkspaceListCommand) Synopsis() string { - return "List Workspaces" -} diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_new.go b/vendor/github.com/hashicorp/terraform/command/workspace_new.go deleted file mode 100644 index 71c9fdc1fcc..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/workspace_new.go +++ /dev/null @@ -1,190 +0,0 @@ -package command - -import ( - "context" - "fmt" - "os" - "strings" - - "github.com/hashicorp/terraform/command/clistate" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -type WorkspaceNewCommand struct { - Meta - LegacyName bool -} - -func (c *WorkspaceNewCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - envCommandShowWarning(c.Ui, c.LegacyName) - - statePath := "" - - cmdFlags := c.Meta.flagSet("workspace new") - cmdFlags.StringVar(&statePath, "state", "", "terraform state file") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - args = cmdFlags.Args() - if len(args) == 0 { - c.Ui.Error("Expected a single argument: NAME.\n") - return cli.RunResultHelp - } - - newEnv := args[0] - - if !validWorkspaceName(newEnv) { - c.Ui.Error(fmt.Sprintf(envInvalidName, newEnv)) - return 1 - } - - // You can't ask to create a workspace when you're overriding the - // workspace name to be something different. - if current, isOverridden := c.WorkspaceOverridden(); current != newEnv && isOverridden { - c.Ui.Error(envIsOverriddenNewError) - return 1 - } - - configPath, err := ModulePath(args[1:]) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - conf, err := c.Config(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) - } - - // Load the backend - b, err := c.Backend(&BackendOpts{ - Config: conf, - }) - - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - states, err := b.States() - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to get configured named states: %s", err)) - return 1 - } - for _, s := range states { - if newEnv == s { - c.Ui.Error(fmt.Sprintf(envExists, newEnv)) - return 1 - } - } - - _, err = b.State(newEnv) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // now set the current workspace locally - if err := c.SetWorkspace(newEnv); err != nil { - c.Ui.Error(fmt.Sprintf("Error selecting new workspace: %s", err)) - return 1 - } - - c.Ui.Output(c.Colorize().Color(fmt.Sprintf( - strings.TrimSpace(envCreated), newEnv))) - - if statePath == "" { - // if we're not loading a state, then we're done - return 0 - } - - // load the new Backend state - sMgr, err := b.State(newEnv) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - if c.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), c.stateLockTimeout) - defer cancel() - - // Lock the state if we can - lockInfo := state.NewLockInfo() - lockInfo.Operation = "workspace new" - lockID, err := clistate.Lock(lockCtx, sMgr, lockInfo, c.Ui, c.Colorize()) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error locking state: %s", err)) - return 1 - } - defer clistate.Unlock(sMgr, lockID, c.Ui, c.Colorize()) - } - - // read the existing state file - stateFile, err := os.Open(statePath) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - s, err := terraform.ReadState(stateFile) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // save the existing state in the new Backend. - err = sMgr.WriteState(s) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - err = sMgr.PersistState() - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - return 0 -} - -func (c *WorkspaceNewCommand) AutocompleteArgs() complete.Predictor { - return completePredictSequence{ - complete.PredictNothing, // the "new" subcommand itself (already matched) - complete.PredictAnything, - complete.PredictDirs(""), - } -} - -func (c *WorkspaceNewCommand) AutocompleteFlags() complete.Flags { - return complete.Flags{ - "-state": complete.PredictFiles("*.tfstate"), - } -} - -func (c *WorkspaceNewCommand) Help() string { - helpText := ` -Usage: terraform workspace new [OPTIONS] NAME [DIR] - - Create a new Terraform workspace. - - -Options: - - -state=path Copy an existing state file into the new workspace. -` - return strings.TrimSpace(helpText) -} - -func (c *WorkspaceNewCommand) Synopsis() string { - return "Create a new workspace" -} diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_select.go b/vendor/github.com/hashicorp/terraform/command/workspace_select.go deleted file mode 100644 index 7070cc6116f..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/workspace_select.go +++ /dev/null @@ -1,130 +0,0 @@ -package command - -import ( - "fmt" - "strings" - - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -type WorkspaceSelectCommand struct { - Meta - LegacyName bool -} - -func (c *WorkspaceSelectCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - envCommandShowWarning(c.Ui, c.LegacyName) - - cmdFlags := c.Meta.flagSet("workspace select") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - args = cmdFlags.Args() - if len(args) == 0 { - c.Ui.Error("Expected a single argument: NAME.\n") - return cli.RunResultHelp - } - - configPath, err := ModulePath(args[1:]) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - conf, err := c.Config(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) - } - - current, isOverridden := c.WorkspaceOverridden() - if isOverridden { - c.Ui.Error(envIsOverriddenSelectError) - return 1 - } - - // Load the backend - b, err := c.Backend(&BackendOpts{ - Config: conf, - }) - - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - name := args[0] - if !validWorkspaceName(name) { - c.Ui.Error(fmt.Sprintf(envInvalidName, name)) - return 1 - } - - states, err := b.States() - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - if name == current { - // already using this workspace - return 0 - } - - found := false - for _, s := range states { - if name == s { - found = true - break - } - } - - if !found { - c.Ui.Error(fmt.Sprintf(envDoesNotExist, name)) - return 1 - } - - err = c.SetWorkspace(name) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - c.Ui.Output( - c.Colorize().Color( - fmt.Sprintf(envChanged, name), - ), - ) - - return 0 -} - -func (c *WorkspaceSelectCommand) AutocompleteArgs() complete.Predictor { - return completePredictSequence{ - complete.PredictNothing, // the "select" subcommand itself (already matched) - c.completePredictWorkspaceName(), - complete.PredictDirs(""), - } -} - -func (c *WorkspaceSelectCommand) AutocompleteFlags() complete.Flags { - return nil -} - -func (c *WorkspaceSelectCommand) Help() string { - helpText := ` -Usage: terraform workspace select NAME [DIR] - - Select a different Terraform workspace. -` - return strings.TrimSpace(helpText) -} - -func (c *WorkspaceSelectCommand) Synopsis() string { - return "Select a workspace" -} diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_show.go b/vendor/github.com/hashicorp/terraform/command/workspace_show.go deleted file mode 100644 index cca688d780c..00000000000 --- a/vendor/github.com/hashicorp/terraform/command/workspace_show.go +++ /dev/null @@ -1,50 +0,0 @@ -package command - -import ( - "strings" - - "github.com/posener/complete" -) - -type WorkspaceShowCommand struct { - Meta -} - -func (c *WorkspaceShowCommand) Run(args []string) int { - args, err := c.Meta.process(args, true) - if err != nil { - return 1 - } - - cmdFlags := c.Meta.flagSet("workspace show") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - workspace := c.Workspace() - c.Ui.Output(workspace) - - return 0 -} - -func (c *WorkspaceShowCommand) AutocompleteArgs() complete.Predictor { - return complete.PredictNothing -} - -func (c *WorkspaceShowCommand) AutocompleteFlags() complete.Flags { - return nil -} - -func (c *WorkspaceShowCommand) Help() string { - helpText := ` -Usage: terraform workspace show - - Show the name of the current workspace. -` - return strings.TrimSpace(helpText) -} - -func (c *WorkspaceShowCommand) Synopsis() string { - return "Show the name of the current workspace" -} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go deleted file mode 100644 index edc1e2a9301..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go +++ /dev/null @@ -1,83 +0,0 @@ -package shadow - -import ( - "fmt" - "io" - "reflect" - - "github.com/hashicorp/go-multierror" - "github.com/mitchellh/reflectwalk" -) - -// Close will close all shadow values within the given structure. -// -// This uses reflection to walk the structure, find all shadow elements, -// and close them. Currently this will only find struct fields that are -// shadow values, and not slice elements, etc. -func Close(v interface{}) error { - // We require a pointer so we can address the internal fields - val := reflect.ValueOf(v) - if val.Kind() != reflect.Ptr { - return fmt.Errorf("value must be a pointer") - } - - // Walk and close - var w closeWalker - if err := reflectwalk.Walk(v, &w); err != nil { - return err - } - - return w.Err -} - -type closeWalker struct { - Err error -} - -func (w *closeWalker) Struct(reflect.Value) error { - // Do nothing. We implement this for reflectwalk.StructWalker - return nil -} - -var closerType = reflect.TypeOf((*io.Closer)(nil)).Elem() - -func (w *closeWalker) StructField(f reflect.StructField, v reflect.Value) error { - // Not sure why this would be but lets avoid some panics - if !v.IsValid() { - return nil - } - - // Empty for exported, so don't check unexported fields - if f.PkgPath != "" { - return nil - } - - // Verify the io.Closer is in this package - typ := v.Type() - if typ.PkgPath() != "github.com/hashicorp/terraform/helper/shadow" { - return nil - } - - var closer io.Closer - if v.Type().Implements(closerType) { - closer = v.Interface().(io.Closer) - } else if v.CanAddr() { - // The Close method may require a pointer receiver, but we only have a value. - v := v.Addr() - if v.Type().Implements(closerType) { - closer = v.Interface().(io.Closer) - } - } - - if closer == nil { - return reflectwalk.SkipEntry - } - - // Close it - if err := closer.Close(); err != nil { - w.Err = multierror.Append(w.Err, err) - } - - // Don't go into the struct field - return reflectwalk.SkipEntry -} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go deleted file mode 100644 index 4223e9255e2..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go +++ /dev/null @@ -1,128 +0,0 @@ -package shadow - -import ( - "sync" -) - -// ComparedValue is a struct that finds a value by comparing some key -// to the list of stored values. This is useful when there is no easy -// uniquely identifying key that works in a map (for that, use KeyedValue). -// -// ComparedValue is very expensive, relative to other Value types. Try to -// limit the number of values stored in a ComparedValue by potentially -// nesting it within a KeyedValue (a keyed value points to a compared value, -// for example). -type ComparedValue struct { - // Func is a function that is given the lookup key and a single - // stored value. If it matches, it returns true. - Func func(k, v interface{}) bool - - lock sync.Mutex - once sync.Once - closed bool - values []interface{} - waiters map[interface{}]*Value -} - -// Close closes the value. This can never fail. For a definition of -// "close" see the ErrClosed docs. -func (w *ComparedValue) Close() error { - w.lock.Lock() - defer w.lock.Unlock() - - // Set closed to true always - w.closed = true - - // For all waiters, complete with ErrClosed - for k, val := range w.waiters { - val.SetValue(ErrClosed) - delete(w.waiters, k) - } - - return nil -} - -// Value returns the value that was set for the given key, or blocks -// until one is available. -func (w *ComparedValue) Value(k interface{}) interface{} { - v, val := w.valueWaiter(k) - if val == nil { - return v - } - - return val.Value() -} - -// ValueOk gets the value for the given key, returning immediately if the -// value doesn't exist. The second return argument is true if the value exists. -func (w *ComparedValue) ValueOk(k interface{}) (interface{}, bool) { - v, val := w.valueWaiter(k) - return v, val == nil -} - -func (w *ComparedValue) SetValue(v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - w.once.Do(w.init) - - // Check if we already have this exact value (by simply comparing - // with == directly). If we do, then we don't insert it again. - found := false - for _, v2 := range w.values { - if v == v2 { - found = true - break - } - } - - if !found { - // Set the value, always - w.values = append(w.values, v) - } - - // Go through the waiters - for k, val := range w.waiters { - if w.Func(k, v) { - val.SetValue(v) - delete(w.waiters, k) - } - } -} - -func (w *ComparedValue) valueWaiter(k interface{}) (interface{}, *Value) { - w.lock.Lock() - w.once.Do(w.init) - - // Look for a pre-existing value - for _, v := range w.values { - if w.Func(k, v) { - w.lock.Unlock() - return v, nil - } - } - - // If we're closed, return that - if w.closed { - w.lock.Unlock() - return ErrClosed, nil - } - - // Pre-existing value doesn't exist, create a waiter - val := w.waiters[k] - if val == nil { - val = new(Value) - w.waiters[k] = val - } - w.lock.Unlock() - - // Return the waiter - return nil, val -} - -// Must be called with w.lock held. -func (w *ComparedValue) init() { - w.waiters = make(map[interface{}]*Value) - if w.Func == nil { - w.Func = func(k, v interface{}) bool { return k == v } - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go deleted file mode 100644 index 432b03668ea..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go +++ /dev/null @@ -1,151 +0,0 @@ -package shadow - -import ( - "sync" -) - -// KeyedValue is a struct that coordinates a value by key. If a value is -// not available for a give key, it'll block until it is available. -type KeyedValue struct { - lock sync.Mutex - once sync.Once - values map[string]interface{} - waiters map[string]*Value - closed bool -} - -// Close closes the value. This can never fail. For a definition of -// "close" see the ErrClosed docs. -func (w *KeyedValue) Close() error { - w.lock.Lock() - defer w.lock.Unlock() - - // Set closed to true always - w.closed = true - - // For all waiters, complete with ErrClosed - for k, val := range w.waiters { - val.SetValue(ErrClosed) - delete(w.waiters, k) - } - - return nil -} - -// Value returns the value that was set for the given key, or blocks -// until one is available. -func (w *KeyedValue) Value(k string) interface{} { - w.lock.Lock() - v, val := w.valueWaiter(k) - w.lock.Unlock() - - // If we have no waiter, then return the value - if val == nil { - return v - } - - // We have a waiter, so wait - return val.Value() -} - -// WaitForChange waits for the value with the given key to be set again. -// If the key isn't set, it'll wait for an initial value. Note that while -// it is called "WaitForChange", the value isn't guaranteed to _change_; -// this will return when a SetValue is called for the given k. -func (w *KeyedValue) WaitForChange(k string) interface{} { - w.lock.Lock() - w.once.Do(w.init) - - // If we're closed, we're closed - if w.closed { - w.lock.Unlock() - return ErrClosed - } - - // Check for an active waiter. If there isn't one, make it - val := w.waiters[k] - if val == nil { - val = new(Value) - w.waiters[k] = val - } - w.lock.Unlock() - - // And wait - return val.Value() -} - -// ValueOk gets the value for the given key, returning immediately if the -// value doesn't exist. The second return argument is true if the value exists. -func (w *KeyedValue) ValueOk(k string) (interface{}, bool) { - w.lock.Lock() - defer w.lock.Unlock() - - v, val := w.valueWaiter(k) - return v, val == nil -} - -func (w *KeyedValue) SetValue(k string, v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - w.setValue(k, v) -} - -// Init will initialize the key to a given value only if the key has -// not been set before. This is safe to call multiple times and in parallel. -func (w *KeyedValue) Init(k string, v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - - // If we have a waiter, set the value. - _, val := w.valueWaiter(k) - if val != nil { - w.setValue(k, v) - } -} - -// Must be called with w.lock held. -func (w *KeyedValue) init() { - w.values = make(map[string]interface{}) - w.waiters = make(map[string]*Value) -} - -// setValue is like SetValue but assumes the lock is held. -func (w *KeyedValue) setValue(k string, v interface{}) { - w.once.Do(w.init) - - // Set the value, always - w.values[k] = v - - // If we have a waiter, set it - if val, ok := w.waiters[k]; ok { - val.SetValue(v) - delete(w.waiters, k) - } -} - -// valueWaiter gets the value or the Value waiter for a given key. -// -// This must be called with lock held. -func (w *KeyedValue) valueWaiter(k string) (interface{}, *Value) { - w.once.Do(w.init) - - // If we have this value already, return it - if v, ok := w.values[k]; ok { - return v, nil - } - - // If we're closed, return that - if w.closed { - return ErrClosed, nil - } - - // No pending value, check for a waiter - val := w.waiters[k] - if val == nil { - val = new(Value) - w.waiters[k] = val - } - - // Return the waiter - return nil, val -} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go deleted file mode 100644 index 0a43d4d4d43..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go +++ /dev/null @@ -1,66 +0,0 @@ -package shadow - -import ( - "container/list" - "sync" -) - -// OrderedValue is a struct that keeps track of a value in the order -// it is set. Each time Value() is called, it will return the most recent -// calls value then discard it. -// -// This is unlike Value that returns the same value once it is set. -type OrderedValue struct { - lock sync.Mutex - values *list.List - waiters *list.List -} - -// Value returns the last value that was set, or blocks until one -// is received. -func (w *OrderedValue) Value() interface{} { - w.lock.Lock() - - // If we have a pending value already, use it - if w.values != nil && w.values.Len() > 0 { - front := w.values.Front() - w.values.Remove(front) - w.lock.Unlock() - return front.Value - } - - // No pending value, create a waiter - if w.waiters == nil { - w.waiters = list.New() - } - - var val Value - w.waiters.PushBack(&val) - w.lock.Unlock() - - // Return the value once we have it - return val.Value() -} - -// SetValue sets the latest value. -func (w *OrderedValue) SetValue(v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - - // If we have a waiter, notify it - if w.waiters != nil && w.waiters.Len() > 0 { - front := w.waiters.Front() - w.waiters.Remove(front) - - val := front.Value.(*Value) - val.SetValue(v) - return - } - - // Add it to the list of values - if w.values == nil { - w.values = list.New() - } - - w.values.PushBack(v) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go deleted file mode 100644 index 178b7e78ade..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go +++ /dev/null @@ -1,87 +0,0 @@ -package shadow - -import ( - "errors" - "sync" -) - -// ErrClosed is returned by any closed values. -// -// A "closed value" is when the shadow has been notified that the real -// side is complete and any blocking values will _never_ be satisfied -// in the future. In this case, this error is returned. If a value is already -// available, that is still returned. -var ErrClosed = errors.New("shadow closed") - -// Value is a struct that coordinates a value between two -// parallel routines. It is similar to atomic.Value except that when -// Value is called if it isn't set it will wait for it. -// -// The Value can be closed with Close, which will cause any future -// blocking operations to return immediately with ErrClosed. -type Value struct { - lock sync.Mutex - cond *sync.Cond - value interface{} - valueSet bool -} - -func (v *Value) Lock() { - v.lock.Lock() -} - -func (v *Value) Unlock() { - v.lock.Unlock() -} - -// Close closes the value. This can never fail. For a definition of -// "close" see the struct docs. -func (w *Value) Close() error { - w.lock.Lock() - set := w.valueSet - w.lock.Unlock() - - // If we haven't set the value, set it - if !set { - w.SetValue(ErrClosed) - } - - // Done - return nil -} - -// Value returns the value that was set. -func (w *Value) Value() interface{} { - w.lock.Lock() - defer w.lock.Unlock() - - // If we already have a value just return - for !w.valueSet { - // No value, setup the condition variable if we have to - if w.cond == nil { - w.cond = sync.NewCond(&w.lock) - } - - // Wait on it - w.cond.Wait() - } - - // Return the value - return w.value -} - -// SetValue sets the value. -func (w *Value) SetValue(v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - - // Set the value - w.valueSet = true - w.value = v - - // If we have a condition, clear it - if w.cond != nil { - w.cond.Broadcast() - w.cond = nil - } -} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/artifactory.go b/vendor/github.com/hashicorp/terraform/state/remote/artifactory.go deleted file mode 100644 index 727e9faf037..00000000000 --- a/vendor/github.com/hashicorp/terraform/state/remote/artifactory.go +++ /dev/null @@ -1,117 +0,0 @@ -package remote - -import ( - "crypto/md5" - "fmt" - "os" - "strings" - - artifactory "github.com/lusis/go-artifactory/src/artifactory.v401" -) - -const ARTIF_TFSTATE_NAME = "terraform.tfstate" - -func artifactoryFactory(conf map[string]string) (Client, error) { - userName, ok := conf["username"] - if !ok { - userName = os.Getenv("ARTIFACTORY_USERNAME") - if userName == "" { - return nil, fmt.Errorf( - "missing 'username' configuration or ARTIFACTORY_USERNAME environment variable") - } - } - password, ok := conf["password"] - if !ok { - password = os.Getenv("ARTIFACTORY_PASSWORD") - if password == "" { - return nil, fmt.Errorf( - "missing 'password' configuration or ARTIFACTORY_PASSWORD environment variable") - } - } - url, ok := conf["url"] - if !ok { - url = os.Getenv("ARTIFACTORY_URL") - if url == "" { - return nil, fmt.Errorf( - "missing 'url' configuration or ARTIFACTORY_URL environment variable") - } - } - repo, ok := conf["repo"] - if !ok { - return nil, fmt.Errorf( - "missing 'repo' configuration") - } - subpath, ok := conf["subpath"] - if !ok { - return nil, fmt.Errorf( - "missing 'subpath' configuration") - } - - clientConf := &artifactory.ClientConfig{ - BaseURL: url, - Username: userName, - Password: password, - } - nativeClient := artifactory.NewClient(clientConf) - - return &ArtifactoryClient{ - nativeClient: &nativeClient, - userName: userName, - password: password, - url: url, - repo: repo, - subpath: subpath, - }, nil - -} - -type ArtifactoryClient struct { - nativeClient *artifactory.ArtifactoryClient - userName string - password string - url string - repo string - subpath string -} - -func (c *ArtifactoryClient) Get() (*Payload, error) { - p := fmt.Sprintf("%s/%s/%s", c.repo, c.subpath, ARTIF_TFSTATE_NAME) - output, err := c.nativeClient.Get(p, make(map[string]string)) - if err != nil { - if strings.Contains(err.Error(), "404") { - return nil, nil - } - return nil, err - } - - // TODO: migrate to using X-Checksum-Md5 header from artifactory - // needs to be exposed by go-artifactory first - - hash := md5.Sum(output) - payload := &Payload{ - Data: output, - MD5: hash[:md5.Size], - } - - // If there was no data, then return nil - if len(payload.Data) == 0 { - return nil, nil - } - - return payload, nil -} - -func (c *ArtifactoryClient) Put(data []byte) error { - p := fmt.Sprintf("%s/%s/%s", c.repo, c.subpath, ARTIF_TFSTATE_NAME) - if _, err := c.nativeClient.Put(p, string(data), make(map[string]string)); err == nil { - return nil - } else { - return fmt.Errorf("Failed to upload state: %v", err) - } -} - -func (c *ArtifactoryClient) Delete() error { - p := fmt.Sprintf("%s/%s/%s", c.repo, c.subpath, ARTIF_TFSTATE_NAME) - err := c.nativeClient.Delete(p) - return err -} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/etcd.go b/vendor/github.com/hashicorp/terraform/state/remote/etcd.go deleted file mode 100644 index 7993603ff2e..00000000000 --- a/vendor/github.com/hashicorp/terraform/state/remote/etcd.go +++ /dev/null @@ -1,78 +0,0 @@ -package remote - -import ( - "crypto/md5" - "fmt" - "strings" - - etcdapi "github.com/coreos/etcd/client" - "golang.org/x/net/context" -) - -func etcdFactory(conf map[string]string) (Client, error) { - path, ok := conf["path"] - if !ok { - return nil, fmt.Errorf("missing 'path' configuration") - } - - endpoints, ok := conf["endpoints"] - if !ok || endpoints == "" { - return nil, fmt.Errorf("missing 'endpoints' configuration") - } - - config := etcdapi.Config{ - Endpoints: strings.Split(endpoints, " "), - } - if username, ok := conf["username"]; ok && username != "" { - config.Username = username - } - if password, ok := conf["password"]; ok && password != "" { - config.Password = password - } - - client, err := etcdapi.New(config) - if err != nil { - return nil, err - } - - return &EtcdClient{ - Client: client, - Path: path, - }, nil -} - -// EtcdClient is a remote client that stores data in etcd. -type EtcdClient struct { - Client etcdapi.Client - Path string -} - -func (c *EtcdClient) Get() (*Payload, error) { - resp, err := etcdapi.NewKeysAPI(c.Client).Get(context.Background(), c.Path, &etcdapi.GetOptions{Quorum: true}) - if err != nil { - if err, ok := err.(etcdapi.Error); ok && err.Code == etcdapi.ErrorCodeKeyNotFound { - return nil, nil - } - return nil, err - } - if resp.Node.Dir { - return nil, fmt.Errorf("path is a directory") - } - - data := []byte(resp.Node.Value) - md5 := md5.Sum(data) - return &Payload{ - Data: data, - MD5: md5[:], - }, nil -} - -func (c *EtcdClient) Put(data []byte) error { - _, err := etcdapi.NewKeysAPI(c.Client).Set(context.Background(), c.Path, string(data), nil) - return err -} - -func (c *EtcdClient) Delete() error { - _, err := etcdapi.NewKeysAPI(c.Client).Delete(context.Background(), c.Path, nil) - return err -} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/file.go b/vendor/github.com/hashicorp/terraform/state/remote/file.go deleted file mode 100644 index f3cbdb45eee..00000000000 --- a/vendor/github.com/hashicorp/terraform/state/remote/file.go +++ /dev/null @@ -1,64 +0,0 @@ -package remote - -import ( - "bytes" - "crypto/md5" - "fmt" - "io" - "os" -) - -func fileFactory(conf map[string]string) (Client, error) { - path, ok := conf["path"] - if !ok { - return nil, fmt.Errorf("missing 'path' configuration") - } - - return &FileClient{ - Path: path, - }, nil -} - -// FileClient is a remote client that stores data locally on disk. -// This is only used for development reasons to test remote state... locally. -type FileClient struct { - Path string -} - -func (c *FileClient) Get() (*Payload, error) { - var buf bytes.Buffer - f, err := os.Open(c.Path) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - - return nil, err - } - defer f.Close() - - if _, err := io.Copy(&buf, f); err != nil { - return nil, err - } - - md5 := md5.Sum(buf.Bytes()) - return &Payload{ - Data: buf.Bytes(), - MD5: md5[:], - }, nil -} - -func (c *FileClient) Put(data []byte) error { - f, err := os.Create(c.Path) - if err != nil { - return err - } - defer f.Close() - - _, err = f.Write(data) - return err -} - -func (c *FileClient) Delete() error { - return os.Remove(c.Path) -} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/gcs.go b/vendor/github.com/hashicorp/terraform/state/remote/gcs.go deleted file mode 100644 index bfd5118cdcf..00000000000 --- a/vendor/github.com/hashicorp/terraform/state/remote/gcs.go +++ /dev/null @@ -1,176 +0,0 @@ -package remote - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "log" - "net/http" - "os" - "runtime" - "strings" - - "github.com/hashicorp/terraform/helper/pathorcontents" - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" - "google.golang.org/api/googleapi" - "google.golang.org/api/storage/v1" - - version "github.com/hashicorp/terraform/version" -) - -// accountFile represents the structure of the credentials JSON -type accountFile struct { - PrivateKeyId string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - ClientEmail string `json:"client_email"` - ClientId string `json:"client_id"` -} - -func parseJSON(result interface{}, contents string) error { - r := strings.NewReader(contents) - dec := json.NewDecoder(r) - - return dec.Decode(result) -} - -type GCSClient struct { - bucket string - path string - clientStorage *storage.Service - context context.Context -} - -func gcsFactory(conf map[string]string) (Client, error) { - var account accountFile - var client *http.Client - clientScopes := []string{ - "https://www.googleapis.com/auth/devstorage.full_control", - } - - bucketName, ok := conf["bucket"] - if !ok { - return nil, fmt.Errorf("missing 'bucket' configuration") - } - - pathName, ok := conf["path"] - if !ok { - return nil, fmt.Errorf("missing 'path' configuration") - } - - credentials, ok := conf["credentials"] - if !ok { - credentials = os.Getenv("GOOGLE_CREDENTIALS") - } - - if credentials != "" { - contents, _, err := pathorcontents.Read(credentials) - if err != nil { - return nil, fmt.Errorf("Error loading credentials: %s", err) - } - - // Assume account_file is a JSON string - if err := parseJSON(&account, contents); err != nil { - return nil, fmt.Errorf("Error parsing credentials '%s': %s", contents, err) - } - - // Get the token for use in our requests - log.Printf("[INFO] Requesting Google token...") - log.Printf("[INFO] -- Email: %s", account.ClientEmail) - log.Printf("[INFO] -- Scopes: %s", clientScopes) - log.Printf("[INFO] -- Private Key Length: %d", len(account.PrivateKey)) - - conf := jwt.Config{ - Email: account.ClientEmail, - PrivateKey: []byte(account.PrivateKey), - Scopes: clientScopes, - TokenURL: "https://accounts.google.com/o/oauth2/token", - } - - client = conf.Client(oauth2.NoContext) - - } else { - log.Printf("[INFO] Authenticating using DefaultClient") - err := error(nil) - client, err = google.DefaultClient(oauth2.NoContext, clientScopes...) - if err != nil { - return nil, err - } - } - versionString := version.Version - userAgent := fmt.Sprintf( - "(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString) - - log.Printf("[INFO] Instantiating Google Storage Client...") - clientStorage, err := storage.New(client) - if err != nil { - return nil, err - } - clientStorage.UserAgent = userAgent - - return &GCSClient{ - clientStorage: clientStorage, - bucket: bucketName, - path: pathName, - }, nil - -} - -func (c *GCSClient) Get() (*Payload, error) { - // Read the object from bucket. - log.Printf("[INFO] Reading %s/%s", c.bucket, c.path) - - resp, err := c.clientStorage.Objects.Get(c.bucket, c.path).Download() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[INFO] %s/%s not found", c.bucket, c.path) - - return nil, nil - } - - return nil, fmt.Errorf("[WARN] Error retrieving object %s/%s: %s", c.bucket, c.path, err) - } - defer resp.Body.Close() - - var buf []byte - w := bytes.NewBuffer(buf) - n, err := io.Copy(w, resp.Body) - if err != nil { - log.Fatalf("[WARN] error buffering %q: %v", c.path, err) - } - log.Printf("[INFO] Downloaded %d bytes", n) - - payload := &Payload{ - Data: w.Bytes(), - } - - // If there was no data, then return nil - if len(payload.Data) == 0 { - return nil, nil - } - - return payload, nil -} - -func (c *GCSClient) Put(data []byte) error { - log.Printf("[INFO] Writing %s/%s", c.bucket, c.path) - - r := bytes.NewReader(data) - _, err := c.clientStorage.Objects.Insert(c.bucket, &storage.Object{Name: c.path}).Media(r).Do() - if err != nil { - return err - } - - return nil -} - -func (c *GCSClient) Delete() error { - log.Printf("[INFO] Deleting %s/%s", c.bucket, c.path) - - err := c.clientStorage.Objects.Delete(c.bucket, c.path).Do() - return err - -} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/http.go b/vendor/github.com/hashicorp/terraform/state/remote/http.go deleted file mode 100644 index b30c15590cb..00000000000 --- a/vendor/github.com/hashicorp/terraform/state/remote/http.go +++ /dev/null @@ -1,338 +0,0 @@ -package remote - -import ( - "bytes" - "crypto/md5" - "crypto/tls" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - - "github.com/hashicorp/terraform/state" -) - -func httpFactory(conf map[string]string) (Client, error) { - address, ok := conf["address"] - if !ok { - return nil, fmt.Errorf("missing 'address' configuration") - } - - updateURL, err := url.Parse(address) - if err != nil { - return nil, fmt.Errorf("failed to parse address URL: %s", err) - } - if updateURL.Scheme != "http" && updateURL.Scheme != "https" { - return nil, fmt.Errorf("address must be HTTP or HTTPS") - } - updateMethod, ok := conf["update_method"] - if !ok { - updateMethod = "POST" - } - - var lockURL *url.URL - if lockAddress, ok := conf["lock_address"]; ok { - var err error - lockURL, err = url.Parse(lockAddress) - if err != nil { - return nil, fmt.Errorf("failed to parse lockAddress URL: %s", err) - } - if lockURL.Scheme != "http" && lockURL.Scheme != "https" { - return nil, fmt.Errorf("lockAddress must be HTTP or HTTPS") - } - } else { - lockURL = nil - } - lockMethod, ok := conf["lock_method"] - if !ok { - lockMethod = "LOCK" - } - - var unlockURL *url.URL - if unlockAddress, ok := conf["unlock_address"]; ok { - var err error - unlockURL, err = url.Parse(unlockAddress) - if err != nil { - return nil, fmt.Errorf("failed to parse unlockAddress URL: %s", err) - } - if unlockURL.Scheme != "http" && unlockURL.Scheme != "https" { - return nil, fmt.Errorf("unlockAddress must be HTTP or HTTPS") - } - } else { - unlockURL = nil - } - unlockMethod, ok := conf["unlock_method"] - if !ok { - unlockMethod = "UNLOCK" - } - - client := &http.Client{} - if skipRaw, ok := conf["skip_cert_verification"]; ok { - skip, err := strconv.ParseBool(skipRaw) - if err != nil { - return nil, fmt.Errorf("skip_cert_verification must be boolean") - } - if skip { - // Replace the client with one that ignores TLS verification - client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - }, - } - } - } - - ret := &HTTPClient{ - URL: updateURL, - UpdateMethod: updateMethod, - - LockURL: lockURL, - LockMethod: lockMethod, - UnlockURL: unlockURL, - UnlockMethod: unlockMethod, - - Username: conf["username"], - Password: conf["password"], - - // accessible only for testing use - Client: client, - } - - return ret, nil -} - -// HTTPClient is a remote client that stores data in Consul or HTTP REST. -type HTTPClient struct { - // Update & Retrieve - URL *url.URL - UpdateMethod string - - // Locking - LockURL *url.URL - LockMethod string - UnlockURL *url.URL - UnlockMethod string - - // HTTP - Client *http.Client - Username string - Password string - - lockID string - jsonLockInfo []byte -} - -func (c *HTTPClient) httpRequest(method string, url *url.URL, data *[]byte, what string) (*http.Response, error) { - // If we have data we need a reader - var reader io.Reader = nil - if data != nil { - reader = bytes.NewReader(*data) - } - - // Create the request - req, err := http.NewRequest(method, url.String(), reader) - if err != nil { - return nil, fmt.Errorf("Failed to make %s HTTP request: %s", what, err) - } - // Setup basic auth - if c.Username != "" { - req.SetBasicAuth(c.Username, c.Password) - } - - // Work with data/body - if data != nil { - req.Header.Set("Content-Type", "application/json") - req.ContentLength = int64(len(*data)) - - // Generate the MD5 - hash := md5.Sum(*data) - b64 := base64.StdEncoding.EncodeToString(hash[:]) - req.Header.Set("Content-MD5", b64) - } - - // Make the request - resp, err := c.Client.Do(req) - if err != nil { - return nil, fmt.Errorf("Failed to %s: %v", what, err) - } - - return resp, nil -} - -func (c *HTTPClient) Lock(info *state.LockInfo) (string, error) { - if c.LockURL == nil { - return "", nil - } - c.lockID = "" - - jsonLockInfo := info.Marshal() - resp, err := c.httpRequest(c.LockMethod, c.LockURL, &jsonLockInfo, "lock") - if err != nil { - return "", err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - c.lockID = info.ID - c.jsonLockInfo = jsonLockInfo - return info.ID, nil - case http.StatusUnauthorized: - return "", fmt.Errorf("HTTP remote state endpoint requires auth") - case http.StatusForbidden: - return "", fmt.Errorf("HTTP remote state endpoint invalid auth") - case http.StatusConflict, http.StatusLocked: - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("HTTP remote state already locked, failed to read body") - } - existing := state.LockInfo{} - err = json.Unmarshal(body, &existing) - if err != nil { - return "", fmt.Errorf("HTTP remote state already locked, failed to unmarshal body") - } - return "", fmt.Errorf("HTTP remote state already locked: ID=%s", existing.ID) - default: - return "", fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) - } -} - -func (c *HTTPClient) Unlock(id string) error { - if c.UnlockURL == nil { - return nil - } - - resp, err := c.httpRequest(c.UnlockMethod, c.UnlockURL, &c.jsonLockInfo, "unlock") - if err != nil { - return err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - return nil - default: - return fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) - } -} - -func (c *HTTPClient) Get() (*Payload, error) { - resp, err := c.httpRequest("GET", c.URL, nil, "get state") - if err != nil { - return nil, err - } - defer resp.Body.Close() - - // Handle the common status codes - switch resp.StatusCode { - case http.StatusOK: - // Handled after - case http.StatusNoContent: - return nil, nil - case http.StatusNotFound: - return nil, nil - case http.StatusUnauthorized: - return nil, fmt.Errorf("HTTP remote state endpoint requires auth") - case http.StatusForbidden: - return nil, fmt.Errorf("HTTP remote state endpoint invalid auth") - case http.StatusInternalServerError: - return nil, fmt.Errorf("HTTP remote state internal server error") - default: - return nil, fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) - } - - // Read in the body - buf := bytes.NewBuffer(nil) - if _, err := io.Copy(buf, resp.Body); err != nil { - return nil, fmt.Errorf("Failed to read remote state: %s", err) - } - - // Create the payload - payload := &Payload{ - Data: buf.Bytes(), - } - - // If there was no data, then return nil - if len(payload.Data) == 0 { - return nil, nil - } - - // Check for the MD5 - if raw := resp.Header.Get("Content-MD5"); raw != "" { - md5, err := base64.StdEncoding.DecodeString(raw) - if err != nil { - return nil, fmt.Errorf( - "Failed to decode Content-MD5 '%s': %s", raw, err) - } - - payload.MD5 = md5 - } else { - // Generate the MD5 - hash := md5.Sum(payload.Data) - payload.MD5 = hash[:] - } - - return payload, nil -} - -func (c *HTTPClient) Put(data []byte) error { - // Copy the target URL - base := *c.URL - - if c.lockID != "" { - query := base.Query() - query.Set("ID", c.lockID) - base.RawQuery = query.Encode() - } - - /* - // Set the force query parameter if needed - if force { - values := base.Query() - values.Set("force", "true") - base.RawQuery = values.Encode() - } - */ - - var method string = "POST" - if c.UpdateMethod != "" { - method = c.UpdateMethod - } - resp, err := c.httpRequest(method, &base, &data, "upload state") - if err != nil { - return err - } - defer resp.Body.Close() - - // Handle the error codes - switch resp.StatusCode { - case http.StatusOK: - return nil - default: - return fmt.Errorf("HTTP error: %d", resp.StatusCode) - } -} - -func (c *HTTPClient) Delete() error { - // Make the request - resp, err := c.httpRequest("DELETE", c.URL, nil, "delete state") - if err != nil { - return err - } - defer resp.Body.Close() - - // Handle the error codes - switch resp.StatusCode { - case http.StatusOK: - return nil - default: - return fmt.Errorf("HTTP error: %d", resp.StatusCode) - } -} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/remote.go b/vendor/github.com/hashicorp/terraform/state/remote/remote.go deleted file mode 100644 index 33bdea559b0..00000000000 --- a/vendor/github.com/hashicorp/terraform/state/remote/remote.go +++ /dev/null @@ -1,52 +0,0 @@ -package remote - -import ( - "fmt" - - "github.com/hashicorp/terraform/state" -) - -// Client is the interface that must be implemented for a remote state -// driver. It supports dumb put/get/delete, and the higher level structs -// handle persisting the state properly here. -type Client interface { - Get() (*Payload, error) - Put([]byte) error - Delete() error -} - -// ClientLocker is an optional interface that allows a remote state -// backend to enable state lock/unlock. -type ClientLocker interface { - Client - state.Locker -} - -// Payload is the return value from the remote state storage. -type Payload struct { - MD5 []byte - Data []byte -} - -// Factory is the factory function to create a remote client. -type Factory func(map[string]string) (Client, error) - -// NewClient returns a new Client with the given type and configuration. -// The client is looked up in the BuiltinClients variable. -func NewClient(t string, conf map[string]string) (Client, error) { - f, ok := BuiltinClients[t] - if !ok { - return nil, fmt.Errorf("unknown remote client type: %s", t) - } - - return f(conf) -} - -// BuiltinClients is the list of built-in clients that can be used with -// NewClient. -var BuiltinClients = map[string]Factory{ - "artifactory": artifactoryFactory, - "etcd": etcdFactory, - "http": httpFactory, - "local": fileFactory, -} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/state.go b/vendor/github.com/hashicorp/terraform/state/remote/state.go deleted file mode 100644 index 575e4d18757..00000000000 --- a/vendor/github.com/hashicorp/terraform/state/remote/state.go +++ /dev/null @@ -1,139 +0,0 @@ -package remote - -import ( - "bytes" - "log" - "sync" - - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" -) - -// State implements the State interfaces in the state package to handle -// reading and writing the remote state. This State on its own does no -// local caching so every persist will go to the remote storage and local -// writes will go to memory. -type State struct { - mu sync.Mutex - - Client Client - - state, readState *terraform.State -} - -// StateReader impl. -func (s *State) State() *terraform.State { - s.mu.Lock() - defer s.mu.Unlock() - - return s.state.DeepCopy() -} - -// StateWriter impl. -func (s *State) WriteState(state *terraform.State) error { - s.mu.Lock() - defer s.mu.Unlock() - - if s.readState != nil && !state.SameLineage(s.readState) { - // This can't error here, because we need to be able to overwrite the - // state in some cases, like `state push -force` or `workspace new - // -state=` - log.Printf("[WARN] incompatible state lineage; given %s but want %s", state.Lineage, s.readState.Lineage) - } - - // We create a deep copy of the state here, because the caller also has - // a reference to the given object and can potentially go on to mutate - // it after we return, but we want the snapshot at this point in time. - s.state = state.DeepCopy() - - // Force our new state to have the same serial as our read state. We'll - // update this if PersistState is called later. (We don't require nor trust - // the caller to properly maintain serial for transient state objects since - // the rest of Terraform treats state as an openly mutable object.) - // - // If we have no read state then we assume we're either writing a new - // state for the first time or we're migrating a state from elsewhere, - // and in both cases we wish to retain the lineage and serial from - // the given state. - if s.readState != nil { - s.state.Serial = s.readState.Serial - } - - return nil -} - -// StateRefresher impl. -func (s *State) RefreshState() error { - s.mu.Lock() - defer s.mu.Unlock() - - payload, err := s.Client.Get() - if err != nil { - return err - } - - // no remote state is OK - if payload == nil { - return nil - } - - state, err := terraform.ReadState(bytes.NewReader(payload.Data)) - if err != nil { - return err - } - - s.state = state - s.readState = s.state.DeepCopy() // our states must be separate instances so we can track changes - return nil -} - -// StatePersister impl. -func (s *State) PersistState() error { - s.mu.Lock() - defer s.mu.Unlock() - - if !s.state.MarshalEqual(s.readState) { - // Our new state does not marshal as byte-for-byte identical to - // the old, so we need to increment the serial. - // Note that in WriteState we force the serial to match that of - // s.readState, if we have a readState. - s.state.Serial++ - } - - var buf bytes.Buffer - if err := terraform.WriteState(s.state, &buf); err != nil { - return err - } - - err := s.Client.Put(buf.Bytes()) - if err != nil { - return err - } - - // After we've successfully persisted, what we just wrote is our new - // reference state until someone calls RefreshState again. - s.readState = s.state.DeepCopy() - return nil -} - -// Lock calls the Client's Lock method if it's implemented. -func (s *State) Lock(info *state.LockInfo) (string, error) { - s.mu.Lock() - defer s.mu.Unlock() - - if c, ok := s.Client.(ClientLocker); ok { - return c.Lock(info) - } - return "", nil -} - -// Unlock calls the Client's Unlock method if it's implemented. -func (s *State) Unlock(id string) error { - s.mu.Lock() - defer s.mu.Unlock() - - if c, ok := s.Client.(ClientLocker); ok { - return c.Unlock(id) - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/testing.go b/vendor/github.com/hashicorp/terraform/state/remote/testing.go deleted file mode 100644 index b379b509dfe..00000000000 --- a/vendor/github.com/hashicorp/terraform/state/remote/testing.go +++ /dev/null @@ -1,97 +0,0 @@ -package remote - -import ( - "bytes" - "testing" - - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" -) - -// TestClient is a generic function to test any client. -func TestClient(t *testing.T, c Client) { - var buf bytes.Buffer - s := state.TestStateInitial() - if err := terraform.WriteState(s, &buf); err != nil { - t.Fatalf("err: %s", err) - } - data := buf.Bytes() - - if err := c.Put(data); err != nil { - t.Fatalf("put: %s", err) - } - - p, err := c.Get() - if err != nil { - t.Fatalf("get: %s", err) - } - if !bytes.Equal(p.Data, data) { - t.Fatalf("bad: %#v", p) - } - - if err := c.Delete(); err != nil { - t.Fatalf("delete: %s", err) - } - - p, err = c.Get() - if err != nil { - t.Fatalf("get: %s", err) - } - if p != nil { - t.Fatalf("bad: %#v", p) - } -} - -// Test the lock implementation for a remote.Client. -// This test requires 2 client instances, in oder to have multiple remote -// clients since some implementations may tie the client to the lock, or may -// have reentrant locks. -func TestRemoteLocks(t *testing.T, a, b Client) { - lockerA, ok := a.(state.Locker) - if !ok { - t.Fatal("client A not a state.Locker") - } - - lockerB, ok := b.(state.Locker) - if !ok { - t.Fatal("client B not a state.Locker") - } - - infoA := state.NewLockInfo() - infoA.Operation = "test" - infoA.Who = "clientA" - - infoB := state.NewLockInfo() - infoB.Operation = "test" - infoB.Who = "clientB" - - lockIDA, err := lockerA.Lock(infoA) - if err != nil { - t.Fatal("unable to get initial lock:", err) - } - - _, err = lockerB.Lock(infoB) - if err == nil { - lockerA.Unlock(lockIDA) - t.Fatal("client B obtained lock while held by client A") - } - - if err := lockerA.Unlock(lockIDA); err != nil { - t.Fatal("error unlocking client A", err) - } - - lockIDB, err := lockerB.Lock(infoB) - if err != nil { - t.Fatal("unable to obtain lock from client B") - } - - if lockIDB == lockIDA { - t.Fatalf("duplicate lock IDs: %q", lockIDB) - } - - if err = lockerB.Unlock(lockIDB); err != nil { - t.Fatal("error unlocking client B:", err) - } - - // TODO: Should we enforce that Unlock requires the correct ID? -} diff --git a/vendor/github.com/joyent/gocommon/LICENSE b/vendor/github.com/joyent/gocommon/LICENSE deleted file mode 100644 index 14e2f777f6c..00000000000 --- a/vendor/github.com/joyent/gocommon/LICENSE +++ /dev/null @@ -1,373 +0,0 @@ -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/joyent/gocommon/README.md b/vendor/github.com/joyent/gocommon/README.md deleted file mode 100644 index d396b09bbd7..00000000000 --- a/vendor/github.com/joyent/gocommon/README.md +++ /dev/null @@ -1,98 +0,0 @@ -gocommon -======== - -Common Go library for Joyent's Triton and Manta. - -[![wercker status](https://app.wercker.com/status/2f63bf7f68bfdd46b979abad19c0bee0/s/master "wercker status")](https://app.wercker.com/project/byKey/2f63bf7f68bfdd46b979abad19c0bee0) - -## Installation - -Use `go-get` to install gocommon. -``` -go get github.com/joyent/gocommon -``` - -## Documentation - -Auto-generated documentation can be found on godoc. - -- [github.com/joyent/gocommon](http://godoc.org/github.com/joyent/gocommon) -- [github.com/joyent/gocommon/client](http://godoc.org/github.com/joyent/client) -- [github.com/joyent/gocommon/errors](http://godoc.org/github.com/joyent/gocommon/errors) -- [github.com/joyent/gocommon/http](http://godoc.org/github.com/joyent/gocommon/http) -- [github.com/joyent/gocommon/jpc](http://godoc.org/github.com/joyent/gocommon/jpc) -- [github.com/joyent/gocommon/testing](http://godoc.org/github.com/joyent/gocommon/testing) - - -## Contributing - -Report bugs and request features using [GitHub Issues](https://github.com/joyent/gocommon/issues), or contribute code via a [GitHub Pull Request](https://github.com/joyent/gocommon/pulls). Changes will be code reviewed before merging. In the near future, automated tests will be run, but in the meantime please `go fmt`, `go lint`, and test all contributions. - - -## Developing - -This library assumes a Go development environment setup based on [How to Write Go Code](https://golang.org/doc/code.html). Your GOPATH environment variable should be pointed at your workspace directory. - -You can now use `go get github.com/joyent/gocommon` to install the repository to the correct location, but if you are intending on contributing back a change you may want to consider cloning the repository via git yourself. This way you can have a single source tree for all Joyent Go projects with each repo having two remotes -- your own fork on GitHub and the upstream origin. - -For example if your GOPATH is `~/src/joyent/go` and you're working on multiple repos then that directory tree might look like: - -``` -~/src/joyent/go/ -|_ pkg/ -|_ src/ - |_ github.com - |_ joyent - |_ gocommon - |_ gomanta - |_ gosdc - |_ gosign -``` - -### Recommended Setup - -``` -$ mkdir -p ${GOPATH}/src/github.com/joyent -$ cd ${GOPATH}/src/github.com/joyent -$ git clone git@github.com:/gocommon.git - -# fetch dependencies -$ git clone git@github.com:/gosign.git -$ go get -v -t ./... - -# add upstream remote -$ cd gocommon -$ git remote add upstream git@github.com:joyent/gocommon.git -$ git remote -v -origin git@github.com:/gocommon.git (fetch) -origin git@github.com:/gocommon.git (push) -upstream git@github.com:joyent/gocommon.git (fetch) -upstream git@github.com:joyent/gocommon.git (push) -``` - -### Run Tests - -The library needs values for the `SDC_URL`, `MANTA_URL`, `MANTA_KEY_ID` and `SDC_KEY_ID` environment variables even though the tests are run locally. You can generate a temporary key and use its fingerprint for tests without adding the key to your Triton Cloud account. - -``` -# create a temporary key -ssh-keygen -b 2048 -C "Testing Key" -f /tmp/id_rsa -t rsa -P "" - -# set up environment -# note: leave the -E md5 argument off on older ssh-keygen -export KEY_ID=$(ssh-keygen -E md5 -lf /tmp/id_rsa | awk -F' ' '{print $2}' | cut -d':' -f2-) -export SDC_KEY_ID=${KEY_ID} -export MANTA_KEY_ID=${KEY_ID} -export SDC_URL=https://us-east-1.api.joyent.com -export MANTA_URL=https://us-east.manta.joyent.com - -cd ${GOPATH}/src/github.com/joyent/gocommon -go test ./... -``` - -### Build the Library - -``` -cd ${GOPATH}/src/github.com/joyent/gocommon -go build ./... -``` diff --git a/vendor/github.com/joyent/gocommon/client/client.go b/vendor/github.com/joyent/gocommon/client/client.go deleted file mode 100644 index b4d40fd1fed..00000000000 --- a/vendor/github.com/joyent/gocommon/client/client.go +++ /dev/null @@ -1,110 +0,0 @@ -// -// gocommon - Go library to interact with the JoyentCloud -// -// -// Copyright (c) 2013 Joyent Inc. -// -// Written by Daniele Stroppa -// - -package client - -import ( - "fmt" - "log" - "net/url" - "strings" - "sync" - "time" - - joyenthttp "github.com/joyent/gocommon/http" - "github.com/joyent/gosign/auth" -) - -const ( - // The HTTP request methods. - GET = "GET" - POST = "POST" - PUT = "PUT" - DELETE = "DELETE" - HEAD = "HEAD" - COPY = "COPY" -) - -// Client implementations sends service requests to the JoyentCloud. -type Client interface { - SendRequest(method, apiCall, rfc1123Date string, request *joyenthttp.RequestData, response *joyenthttp.ResponseData) (err error) - // MakeServiceURL prepares a full URL to a service endpoint, with optional - // URL parts. It uses the first endpoint it can find for the given service type. - MakeServiceURL(parts []string) string - SignURL(path string, expires time.Time) (string, error) -} - -// This client sends requests without authenticating. -type client struct { - mu sync.Mutex - logger *log.Logger - baseURL string - creds *auth.Credentials - httpClient *joyenthttp.Client -} - -var _ Client = (*client)(nil) - -func newClient(baseURL string, credentials *auth.Credentials, httpClient *joyenthttp.Client, logger *log.Logger) Client { - client := client{baseURL: baseURL, logger: logger, creds: credentials, httpClient: httpClient} - return &client -} - -func NewClient(baseURL, apiVersion string, credentials *auth.Credentials, logger *log.Logger) Client { - sharedHttpClient := joyenthttp.New(credentials, apiVersion, logger) - return newClient(baseURL, credentials, sharedHttpClient, logger) -} - -func (c *client) sendRequest(method, url, rfc1123Date string, request *joyenthttp.RequestData, response *joyenthttp.ResponseData) (err error) { - if request.ReqValue != nil || response.RespValue != nil { - err = c.httpClient.JsonRequest(method, url, rfc1123Date, request, response) - } else { - err = c.httpClient.BinaryRequest(method, url, rfc1123Date, request, response) - } - return -} - -func (c *client) SendRequest(method, apiCall, rfc1123Date string, request *joyenthttp.RequestData, response *joyenthttp.ResponseData) (err error) { - url := c.MakeServiceURL([]string{c.creds.UserAuthentication.User, apiCall}) - err = c.sendRequest(method, url, rfc1123Date, request, response) - return -} - -func makeURL(base string, parts []string) string { - if !strings.HasSuffix(base, "/") && len(parts) > 0 { - base += "/" - } - if parts[1] == "" { - return base + parts[0] - } - return base + strings.Join(parts, "/") -} - -func (c *client) MakeServiceURL(parts []string) string { - return makeURL(c.baseURL, parts) -} - -func (c *client) SignURL(path string, expires time.Time) (string, error) { - parsedURL, err := url.Parse(c.baseURL) - if err != nil { - return "", fmt.Errorf("bad Manta endpoint URL %q: %v", c.baseURL, err) - } - userAuthentication := c.creds.UserAuthentication - userAuthentication.Algorithm = "RSA-SHA1" - keyId := url.QueryEscape(fmt.Sprintf("/%s/keys/%s", userAuthentication.User, c.creds.MantaKeyId)) - params := fmt.Sprintf("algorithm=%s&expires=%d&keyId=%s", userAuthentication.Algorithm, expires.Unix(), keyId) - signingLine := fmt.Sprintf("GET\n%s\n%s\n%s", parsedURL.Host, path, params) - - signature, err := auth.GetSignature(userAuthentication, signingLine) - if err != nil { - return "", fmt.Errorf("cannot generate URL signature: %v", err) - } - signedURL := fmt.Sprintf("%s%s?%s&signature=%s", c.baseURL, path, params, url.QueryEscape(signature)) - return signedURL, nil -} diff --git a/vendor/github.com/joyent/gocommon/errors/errors.go b/vendor/github.com/joyent/gocommon/errors/errors.go deleted file mode 100644 index 4c641cac467..00000000000 --- a/vendor/github.com/joyent/gocommon/errors/errors.go +++ /dev/null @@ -1,292 +0,0 @@ -// -// gocommon - Go library to interact with the JoyentCloud -// This package provides an Error implementation which knows about types of error, and which has support -// for error causes. -// -// Copyright (c) 2013 Joyent Inc. -// -// Written by Daniele Stroppa -// - -package errors - -import "fmt" - -type Code string - -const ( - // Public available error types. - // These errors are provided because they are specifically required by business logic in the callers. - BadRequestError = Code("BadRequest") - InternalErrorError = Code("InternalError") - InvalidArgumentError = Code("InvalidArgument") - InvalidCredentialsError = Code("InvalidCredentials") - InvalidHeaderError = Code("InvalidHeader") - InvalidVersionError = Code("InvalidVersion") - MissingParameterError = Code("MissinParameter") - NotAuthorizedError = Code("NotAuthorized") - RequestThrottledError = Code("RequestThrottled") - RequestTooLargeError = Code("RequestTooLarge") - RequestMovedError = Code("RequestMoved") - ResourceNotFoundError = Code("ResourceNotFound") - UnknownErrorError = Code("UnknownError") -) - -// Error instances store an optional error cause. -type Error interface { - error - Cause() error -} - -type gojoyentError struct { - error - errcode Code - cause error -} - -// Type checks. -var _ Error = (*gojoyentError)(nil) - -// Code returns the error code. -func (err *gojoyentError) code() Code { - if err.errcode != UnknownErrorError { - return err.errcode - } - if e, ok := err.cause.(*gojoyentError); ok { - return e.code() - } - return UnknownErrorError -} - -// Cause returns the error cause. -func (err *gojoyentError) Cause() error { - return err.cause -} - -// CausedBy returns true if this error or its cause are of the specified error code. -func (err *gojoyentError) causedBy(code Code) bool { - if err.code() == code { - return true - } - if cause, ok := err.cause.(*gojoyentError); ok { - return cause.code() == code - } - return false -} - -// Error fulfills the error interface, taking account of any caused by error. -func (err *gojoyentError) Error() string { - if err.cause != nil { - return fmt.Sprintf("%v\ncaused by: %v", err.error, err.cause) - } - return err.error.Error() -} - -func IsBadRequest(err error) bool { - if e, ok := err.(*gojoyentError); ok { - return e.causedBy(BadRequestError) - } - return false -} - -func IsInternalError(err error) bool { - if e, ok := err.(*gojoyentError); ok { - return e.causedBy(InternalErrorError) - } - return false -} - -func IsInvalidArgument(err error) bool { - if e, ok := err.(*gojoyentError); ok { - return e.causedBy(InvalidArgumentError) - } - return false -} - -func IsInvalidCredentials(err error) bool { - if e, ok := err.(*gojoyentError); ok { - return e.causedBy(InvalidCredentialsError) - } - return false -} - -func IsInvalidHeader(err error) bool { - if e, ok := err.(*gojoyentError); ok { - return e.causedBy(InvalidHeaderError) - } - return false -} - -func IsInvalidVersion(err error) bool { - if e, ok := err.(*gojoyentError); ok { - return e.causedBy(InvalidVersionError) - } - return false -} - -func IsMissingParameter(err error) bool { - if e, ok := err.(*gojoyentError); ok { - return e.causedBy(MissingParameterError) - } - return false -} - -func IsNotAuthorized(err error) bool { - if e, ok := err.(*gojoyentError); ok { - return e.causedBy(NotAuthorizedError) - } - return false -} - -func IsRequestThrottled(err error) bool { - if e, ok := err.(*gojoyentError); ok { - return e.causedBy(RequestThrottledError) - } - return false -} - -func IsRequestTooLarge(err error) bool { - if e, ok := err.(*gojoyentError); ok { - return e.causedBy(RequestTooLargeError) - } - return false -} - -func IsRequestMoved(err error) bool { - if e, ok := err.(*gojoyentError); ok { - return e.causedBy(RequestMovedError) - } - return false -} - -func IsResourceNotFound(err error) bool { - if e, ok := err.(*gojoyentError); ok { - return e.causedBy(ResourceNotFoundError) - } - return false -} - -func IsUnknownError(err error) bool { - if e, ok := err.(*gojoyentError); ok { - return e.causedBy(UnknownErrorError) - } - return false -} - -// New creates a new Error instance with the specified cause. -func makeErrorf(code Code, cause error, format string, args ...interface{}) Error { - return &gojoyentError{ - errcode: code, - error: fmt.Errorf(format, args...), - cause: cause, - } -} - -// New creates a new UnknownError Error instance with the specified cause. -func Newf(cause error, format string, args ...interface{}) Error { - return makeErrorf(UnknownErrorError, cause, format, args...) -} - -// New creates a new BadRequest Error instance with the specified cause. -func NewBadRequestf(cause error, context interface{}, format string, args ...interface{}) Error { - if format == "" { - format = fmt.Sprintf("Bad Request: %s", context) - } - return makeErrorf(BadRequestError, cause, format, args...) -} - -// New creates a new InternalError Error instance with the specified cause. -func NewInternalErrorf(cause error, context interface{}, format string, args ...interface{}) Error { - if format == "" { - format = fmt.Sprintf("Internal Error: %s", context) - } - return makeErrorf(InternalErrorError, cause, format, args...) -} - -// New creates a new InvalidArgument Error instance with the specified cause. -func NewInvalidArgumentf(cause error, context interface{}, format string, args ...interface{}) Error { - if format == "" { - format = fmt.Sprintf("Invalid Argument: %s", context) - } - return makeErrorf(InvalidArgumentError, cause, format, args...) -} - -// New creates a new InvalidCredentials Error instance with the specified cause. -func NewInvalidCredentialsf(cause error, context interface{}, format string, args ...interface{}) Error { - if format == "" { - format = fmt.Sprintf("Invalid Credentials: %s", context) - } - return makeErrorf(InvalidCredentialsError, cause, format, args...) -} - -// New creates a new InvalidHeader Error instance with the specified cause. -func NewInvalidHeaderf(cause error, context interface{}, format string, args ...interface{}) Error { - if format == "" { - format = fmt.Sprintf("Invalid Header: %s", context) - } - return makeErrorf(InvalidHeaderError, cause, format, args...) -} - -// New creates a new InvalidVersion Error instance with the specified cause. -func NewInvalidVersionf(cause error, context interface{}, format string, args ...interface{}) Error { - if format == "" { - format = fmt.Sprintf("Invalid Version: %s", context) - } - return makeErrorf(InvalidVersionError, cause, format, args...) -} - -// New creates a new MissingParameter Error instance with the specified cause. -func NewMissingParameterf(cause error, context interface{}, format string, args ...interface{}) Error { - if format == "" { - format = fmt.Sprintf("Missing Parameter: %s", context) - } - return makeErrorf(MissingParameterError, cause, format, args...) -} - -// New creates a new NotAuthorized Error instance with the specified cause. -func NewNotAuthorizedf(cause error, context interface{}, format string, args ...interface{}) Error { - if format == "" { - format = fmt.Sprintf("Not Authorized: %s", context) - } - return makeErrorf(NotAuthorizedError, cause, format, args...) -} - -// New creates a new RequestThrottled Error instance with the specified cause. -func NewRequestThrottledf(cause error, context interface{}, format string, args ...interface{}) Error { - if format == "" { - format = fmt.Sprintf("Request Throttled: %s", context) - } - return makeErrorf(RequestThrottledError, cause, format, args...) -} - -// New creates a new RequestTooLarge Error instance with the specified cause. -func NewRequestTooLargef(cause error, context interface{}, format string, args ...interface{}) Error { - if format == "" { - format = fmt.Sprintf("Request Too Large: %s", context) - } - return makeErrorf(RequestTooLargeError, cause, format, args...) -} - -// New creates a new RequestMoved Error instance with the specified cause. -func NewRequestMovedf(cause error, context interface{}, format string, args ...interface{}) Error { - if format == "" { - format = fmt.Sprintf("Request Moved: %s", context) - } - return makeErrorf(RequestMovedError, cause, format, args...) -} - -// New creates a new ResourceNotFound Error instance with the specified cause. -func NewResourceNotFoundf(cause error, context interface{}, format string, args ...interface{}) Error { - if format == "" { - format = fmt.Sprintf("Resource Not Found: %s", context) - } - return makeErrorf(ResourceNotFoundError, cause, format, args...) -} - -// New creates a new UnknownError Error instance with the specified cause. -func NewUnknownErrorf(cause error, context interface{}, format string, args ...interface{}) Error { - if format == "" { - format = fmt.Sprintf("Unknown Error: %s", context) - } - return makeErrorf(UnknownErrorError, cause, format, args...) -} diff --git a/vendor/github.com/joyent/gocommon/gocommon.go b/vendor/github.com/joyent/gocommon/gocommon.go deleted file mode 100644 index f5f6d82941c..00000000000 --- a/vendor/github.com/joyent/gocommon/gocommon.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * The gocommon package collects common packages to interact with the Joyent Public Cloud and Joyent Manta services. - * - * The gocommon package is structured as follow: - * - * - gocommon/client. Client for sending requests. - * - gocommon/errors. Joyent specific errors. - * - gocommon/http. HTTP client for sending requests. - * - gocommon/jpc. This package provides common structures and functions across packages. - * - gocommon/testing. Testing Suite for local testing. - * - * Copyright (c) 2016 Joyent Inc. - * Written by Daniele Stroppa - * - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. - */ - -package gocommon diff --git a/vendor/github.com/joyent/gocommon/http/client.go b/vendor/github.com/joyent/gocommon/http/client.go deleted file mode 100644 index 2fde6df3960..00000000000 --- a/vendor/github.com/joyent/gocommon/http/client.go +++ /dev/null @@ -1,427 +0,0 @@ -// -// gocommon - Go library to interact with the JoyentCloud -// An HTTP Client which sends json and binary requests, handling data marshalling and response processing. -// -// Copyright (c) 2013 Joyent Inc. -// -// Written by Daniele Stroppa -// - -package http - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - "time" - - "github.com/joyent/gocommon" - "github.com/joyent/gocommon/errors" - "github.com/joyent/gocommon/jpc" - "github.com/joyent/gosign/auth" -) - -const ( - contentTypeJSON = "application/json" - contentTypeOctetStream = "application/octet-stream" -) - -type Client struct { - http.Client - maxSendAttempts int - credentials *auth.Credentials - apiVersion string - logger *log.Logger - trace bool -} - -type ErrorResponse struct { - Message string `json:"message"` - Code int `json:"code"` -} - -func (e *ErrorResponse) Error() string { - return fmt.Sprintf("Failed: %d: %s", e.Code, e.Message) -} - -type ErrorWrapper struct { - Error ErrorResponse `json:"error"` -} - -type RequestData struct { - ReqHeaders http.Header - Params *url.Values - ReqValue interface{} - ReqReader io.Reader - ReqLength int -} - -type ResponseData struct { - ExpectedStatus []int - RespHeaders *http.Header - RespValue interface{} - RespReader io.ReadCloser -} - -const ( - // The maximum number of times to try sending a request before we give up - // (assuming any unsuccessful attempts can be sensibly tried again). - MaxSendAttempts = 3 -) - -// New returns a new http *Client using the default net/http client. -func New(credentials *auth.Credentials, apiVersion string, logger *log.Logger) *Client { - return &Client{*http.DefaultClient, MaxSendAttempts, credentials, apiVersion, logger, false} -} - -// SetTrace allows control over whether requests will write their -// contents to the logger supplied during construction. Note that this -// is not safe to call from multiple go-routines. -func (client *Client) SetTrace(traceEnabled bool) { - client.trace = traceEnabled -} - -func gojoyentAgent() string { - return fmt.Sprintf("gocommon (%s)", gocommon.Version) -} - -func createHeaders(extraHeaders http.Header, credentials *auth.Credentials, contentType, rfc1123Date, - apiVersion string, isMantaRequest bool) (http.Header, error) { - - headers := make(http.Header) - if extraHeaders != nil { - for header, values := range extraHeaders { - for _, value := range values { - headers.Add(header, value) - } - } - } - if extraHeaders.Get("Content-Type") == "" { - headers.Add("Content-Type", contentType) - } - if extraHeaders.Get("Accept") == "" { - headers.Add("Accept", contentType) - } - if rfc1123Date != "" { - headers.Set("Date", rfc1123Date) - } else { - headers.Set("Date", getDateForRegion(credentials, isMantaRequest)) - } - authHeaders, err := auth.CreateAuthorizationHeader(headers, credentials, isMantaRequest) - if err != nil { - return http.Header{}, err - } - headers.Set("Authorization", authHeaders) - if apiVersion != "" { - headers.Set("X-Api-Version", apiVersion) - } - headers.Add("User-Agent", gojoyentAgent()) - return headers, nil -} - -func getDateForRegion(credentials *auth.Credentials, isManta bool) string { - if isManta { - location, _ := time.LoadLocation(jpc.Locations["us-east-1"]) - return time.Now().In(location).Format(time.RFC1123) - } else { - location, _ := time.LoadLocation(jpc.Locations[credentials.Region()]) - return time.Now().In(location).Format(time.RFC1123) - } -} - -// JsonRequest JSON encodes and sends the object in reqData.ReqValue (if any) to the specified URL. -// Optional method arguments are passed using the RequestData object. -// Relevant RequestData fields: -// ReqHeaders: additional HTTP header values to add to the request. -// ExpectedStatus: the allowed HTTP response status values, else an error is returned. -// ReqValue: the data object to send. -// RespValue: the data object to decode the result into. -func (c *Client) JsonRequest(method, url, rfc1123Date string, request *RequestData, response *ResponseData) (err error) { - err = nil - var body []byte - if request.Params != nil { - url += "?" + request.Params.Encode() - } - if request.ReqValue != nil { - body, err = json.Marshal(request.ReqValue) - if err != nil { - err = errors.Newf(err, "failed marshalling the request body") - return - } - } - headers, err := createHeaders(request.ReqHeaders, c.credentials, contentTypeJSON, rfc1123Date, c.apiVersion, - isMantaRequest(url, c.credentials.UserAuthentication.User)) - if err != nil { - return err - } - respBody, respHeader, err := c.sendRequest( - method, url, bytes.NewReader(body), len(body), headers, response.ExpectedStatus, c.logger) - if err != nil { - return - } - defer respBody.Close() - respData, err := ioutil.ReadAll(respBody) - if err != nil { - err = errors.Newf(err, "failed reading the response body") - return - } - - if len(respData) > 0 { - if response.RespValue != nil { - if dest, ok := response.RespValue.(*[]byte); ok { - *dest = respData - //err = decodeJSON(bytes.NewReader(respData), false, response.RespValue) - //if err != nil { - // err = errors.Newf(err, "failed unmarshaling/decoding the response body: %s", respData) - //} - } else { - err = json.Unmarshal(respData, response.RespValue) - if err != nil { - err = decodeJSON(bytes.NewReader(respData), true, response.RespValue) - if err != nil { - err = errors.Newf(err, "failed unmarshaling/decoding the response body: %s", respData) - } - } - } - } - } - - if respHeader != nil { - response.RespHeaders = respHeader - } - - return -} - -func decodeJSON(r io.Reader, multiple bool, into interface{}) error { - d := json.NewDecoder(r) - if multiple { - return decodeStream(d, into) - } - return d.Decode(into) -} - -func decodeStream(d *json.Decoder, into interface{}) error { - t := reflect.TypeOf(into) - if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Slice { - return fmt.Errorf("unexpected type %s", t) - } - elemType := t.Elem().Elem() - slice := reflect.ValueOf(into).Elem() - for { - val := reflect.New(elemType) - if err := d.Decode(val.Interface()); err != nil { - if err == io.EOF { - break - } - return err - } - slice.Set(reflect.Append(slice, val.Elem())) - } - return nil -} - -// Sends the byte array in reqData.ReqValue (if any) to the specified URL. -// Optional method arguments are passed using the RequestData object. -// Relevant RequestData fields: -// ReqHeaders: additional HTTP header values to add to the request. -// ExpectedStatus: the allowed HTTP response status values, else an error is returned. -// ReqReader: an io.Reader providing the bytes to send. -// RespReader: assigned an io.ReadCloser instance used to read the returned data.. -func (c *Client) BinaryRequest(method, url, rfc1123Date string, request *RequestData, response *ResponseData) (err error) { - err = nil - - if request.Params != nil { - url += "?" + request.Params.Encode() - } - headers, err := createHeaders(request.ReqHeaders, c.credentials, contentTypeOctetStream, rfc1123Date, - c.apiVersion, isMantaRequest(url, c.credentials.UserAuthentication.User)) - if err != nil { - return err - } - respBody, respHeader, err := c.sendRequest( - method, url, request.ReqReader, request.ReqLength, headers, response.ExpectedStatus, c.logger) - if err != nil { - return - } - if response.RespReader != nil { - response.RespReader = respBody - } - if respHeader != nil { - response.RespHeaders = respHeader - } - return -} - -// Sends the specified request to URL and checks that the HTTP response status is as expected. -// reqReader: a reader returning the data to send. -// length: the number of bytes to send. -// headers: HTTP headers to include with the request. -// expectedStatus: a slice of allowed response status codes. -func (c *Client) sendRequest(method, URL string, reqReader io.Reader, length int, headers http.Header, - expectedStatus []int, logger *log.Logger) (rc io.ReadCloser, respHeader *http.Header, err error) { - reqData := make([]byte, length) - if reqReader != nil { - nrRead, err := io.ReadFull(reqReader, reqData) - if err != nil { - err = errors.Newf(err, "failed reading the request data, read %v of %v bytes", nrRead, length) - return rc, respHeader, err - } - } - rawResp, err := c.sendRateLimitedRequest(method, URL, headers, reqData, logger) - if err != nil { - return - } - - if logger != nil && c.trace { - logger.Printf("Request: %s %s\n", method, URL) - logger.Printf("Request header: %s\n", headers) - logger.Printf("Request body: %s\n", reqData) - logger.Printf("Response: %s\n", rawResp.Status) - logger.Printf("Response header: %s\n", rawResp.Header) - logger.Printf("Response body: %s\n", rawResp.Body) - logger.Printf("Response error: %s\n", err) - } - - foundStatus := false - if len(expectedStatus) == 0 { - expectedStatus = []int{http.StatusOK} - } - for _, status := range expectedStatus { - if rawResp.StatusCode == status { - foundStatus = true - break - } - } - if !foundStatus && len(expectedStatus) > 0 { - err = handleError(URL, rawResp) - rawResp.Body.Close() - return - } - return rawResp.Body, &rawResp.Header, err -} - -func (c *Client) sendRateLimitedRequest(method, URL string, headers http.Header, reqData []byte, - logger *log.Logger) (resp *http.Response, err error) { - for i := 0; i < c.maxSendAttempts; i++ { - var reqReader io.Reader - if reqData != nil { - reqReader = bytes.NewReader(reqData) - } - req, err := http.NewRequest(method, URL, reqReader) - if err != nil { - err = errors.Newf(err, "failed creating the request %s", URL) - return nil, err - } - // Setting req.Close to true to avoid malformed HTTP version "nullHTTP/1.1" error - // See http://stackoverflow.com/questions/17714494/golang-http-request-results-in-eof-errors-when-making-multiple-requests-successi - req.Close = true - for header, values := range headers { - for _, value := range values { - req.Header.Add(header, value) - } - } - req.ContentLength = int64(len(reqData)) - resp, err = c.Do(req) - if err != nil { - return nil, errors.Newf(err, "failed executing the request %s", URL) - } - if resp.StatusCode != http.StatusRequestEntityTooLarge || resp.Header.Get("Retry-After") == "" { - return resp, nil - } - resp.Body.Close() - retryAfter, err := strconv.ParseFloat(resp.Header.Get("Retry-After"), 64) - if err != nil { - return nil, errors.Newf(err, "Invalid Retry-After header %s", URL) - } - if retryAfter == 0 { - return nil, errors.Newf(err, "Resource limit exeeded at URL %s", URL) - } - if logger != nil { - logger.Println("Too many requests, retrying in %dms.", int(retryAfter*1000)) - } - time.Sleep(time.Duration(retryAfter) * time.Second) - } - return nil, errors.Newf(err, "Maximum number of attempts (%d) reached sending request to %s", c.maxSendAttempts, URL) -} - -type HttpError struct { - StatusCode int - Data map[string][]string - Url string - ResponseMessage string -} - -func (e *HttpError) Error() string { - return fmt.Sprintf("request %q returned unexpected status %d with body %q", - e.Url, - e.StatusCode, - e.ResponseMessage, - ) -} - -// The HTTP response status code was not one of those expected, so we construct an error. -// NotFound (404) codes have their own NotFound error type. -// We also make a guess at duplicate value errors. -func handleError(URL string, resp *http.Response) error { - errBytes, _ := ioutil.ReadAll(resp.Body) - errInfo := string(errBytes) - // Check if we have a JSON representation of the failure, if so decode it. - if resp.Header.Get("Content-Type") == contentTypeJSON { - var errResponse ErrorResponse - if err := json.Unmarshal(errBytes, &errResponse); err == nil { - errInfo = errResponse.Message - } - } - httpError := &HttpError{ - resp.StatusCode, map[string][]string(resp.Header), URL, errInfo, - } - switch resp.StatusCode { - case http.StatusBadRequest: - return errors.NewBadRequestf(httpError, "", "Bad request %s", URL) - case http.StatusUnauthorized: - return errors.NewNotAuthorizedf(httpError, "", "Unauthorised URL %s", URL) - //return errors.NewInvalidCredentialsf(httpError, "", "Unauthorised URL %s", URL) - case http.StatusForbidden: - //return errors. - case http.StatusNotFound: - return errors.NewResourceNotFoundf(httpError, "", "Resource not found %s", URL) - case http.StatusMethodNotAllowed: - //return errors. - case http.StatusNotAcceptable: - return errors.NewInvalidHeaderf(httpError, "", "Invalid Header %s", URL) - case http.StatusConflict: - return errors.NewMissingParameterf(httpError, "", "Missing parameters %s", URL) - //return errors.NewInvalidArgumentf(httpError, "", "Invalid parameter %s", URL) - case http.StatusRequestEntityTooLarge: - return errors.NewRequestTooLargef(httpError, "", "Request too large %s", URL) - case http.StatusUnsupportedMediaType: - //return errors. - case http.StatusServiceUnavailable: - return errors.NewInternalErrorf(httpError, "", "Internal error %s", URL) - case 420: - // SlowDown - return errors.NewRequestThrottledf(httpError, "", "Request throttled %s", URL) - case 422: - // Unprocessable Entity - return errors.NewInvalidArgumentf(httpError, "", "Invalid parameters %s", URL) - case 449: - // RetryWith - return errors.NewInvalidVersionf(httpError, "", "Invalid version %s", URL) - //RequestMovedError -> ? - } - - return errors.NewUnknownErrorf(httpError, "", "Unknown error %s", URL) -} - -func isMantaRequest(url, user string) bool { - return strings.Contains(url, "/"+user+"/stor") || strings.Contains(url, "/"+user+"/jobs") || strings.Contains(url, "/"+user+"/public") -} diff --git a/vendor/github.com/joyent/gocommon/jpc/jpc.go b/vendor/github.com/joyent/gocommon/jpc/jpc.go deleted file mode 100644 index 0943f6a2800..00000000000 --- a/vendor/github.com/joyent/gocommon/jpc/jpc.go +++ /dev/null @@ -1,113 +0,0 @@ -/* - * - * gocommon - Go library to interact with the JoyentCloud - * - * - * Copyright (c) 2016 Joyent Inc. - * - * Written by Daniele Stroppa - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. - */ - -package jpc - -import ( - "fmt" - "io/ioutil" - "os" - "reflect" - "runtime" - - "github.com/joyent/gosign/auth" -) - -const ( - // Environment variables - TritonAccount = "TRITON_ACCOUNT" - TritonKeyId = "TRITON_KEY_ID" - TritonUrl = "TRITON_URL" - SdcAccount = "SDC_ACCOUNT" - SdcKeyId = "SDC_KEY_ID" - SdcUrl = "SDC_URL" - MantaUser = "MANTA_USER" - MantaKeyId = "MANTA_KEY_ID" - MantaUrl = "MANTA_URL" -) - -var Locations = map[string]string{ - "us-east-1": "America/New_York", - "us-west-1": "America/Los_Angeles", - "us-sw-1": "America/Los_Angeles", - "eu-ams-1": "Europe/Amsterdam", -} - -// getConfig returns the value of the first available environment -// variable, among the given ones. -func getConfig(envVars ...string) (value string) { - value = "" - for _, v := range envVars { - value = os.Getenv(v) - if value != "" { - break - } - } - return -} - -// getUserHome returns the value of HOME environment -// variable for the user environment. -func getUserHome() string { - if runtime.GOOS == "windows" { - return os.Getenv("APPDATA") - } else { - return os.Getenv("HOME") - } -} - -// credentialsFromEnv creates and initializes the credentials from the -// environment variables. -func credentialsFromEnv(key string) (*auth.Credentials, error) { - var keyName string - if key == "" { - keyName = getUserHome() + "/.ssh/id_rsa" - } else { - keyName = key - } - privateKey, err := ioutil.ReadFile(keyName) - if err != nil { - return nil, err - } - authentication, err := auth.NewAuth(getConfig(TritonAccount, SdcAccount, MantaUser), string(privateKey), "rsa-sha256") - if err != nil { - return nil, err - } - - return &auth.Credentials{ - UserAuthentication: authentication, - SdcKeyId: getConfig(TritonKeyId, SdcKeyId), - SdcEndpoint: auth.Endpoint{URL: getConfig(TritonUrl, SdcUrl)}, - MantaKeyId: getConfig(MantaKeyId), - MantaEndpoint: auth.Endpoint{URL: getConfig(MantaUrl)}, - }, nil -} - -// CompleteCredentialsFromEnv gets and verifies all the required -// authentication parameters have values in the environment. -func CompleteCredentialsFromEnv(keyName string) (cred *auth.Credentials, err error) { - cred, err = credentialsFromEnv(keyName) - if err != nil { - return nil, err - } - v := reflect.ValueOf(cred).Elem() - t := v.Type() - for i := 0; i < v.NumField(); i++ { - f := v.Field(i) - if f.String() == "" { - return nil, fmt.Errorf("Required environment variable not set for credentials attribute: %s", t.Field(i).Name) - } - } - return cred, nil -} diff --git a/vendor/github.com/joyent/gocommon/version.go b/vendor/github.com/joyent/gocommon/version.go deleted file mode 100644 index 82fe84494bd..00000000000 --- a/vendor/github.com/joyent/gocommon/version.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - * - * gocommon - Go library to interact with the JoyentCloud - * - * - * Copyright (c) 2016 Joyent Inc. - * - * Written by Daniele Stroppa - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. - */ - -package gocommon - -import ( - "fmt" -) - -type VersionNum struct { - Major int - Minor int - Micro int -} - -func (v *VersionNum) String() string { - return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Micro) -} - -var VersionNumber = VersionNum{ - Major: 0, - Minor: 1, - Micro: 0, -} - -var Version = VersionNumber.String() diff --git a/vendor/github.com/joyent/gocommon/wercker.yml b/vendor/github.com/joyent/gocommon/wercker.yml deleted file mode 100644 index 563acf6fe49..00000000000 --- a/vendor/github.com/joyent/gocommon/wercker.yml +++ /dev/null @@ -1,40 +0,0 @@ -box: golang - -build: - steps: - # Sets the go workspace and places you package - # at the right place in the workspace tree - - setup-go-workspace: - package-dir: github.com/joyent/gocommon - - # Gets the dependencies - - script: - name: go get - code: | - go get -v -t ./... - - # Build the project - - script: - name: go build - code: | - go build ./... - - - script: - name: make a new key for testing - code: | - ssh-keygen -b 2048 \ - -C "Testing Key" \ - -f /root/.ssh/id_rsa \ - -t rsa \ - -P "" - - # Test the project - - script: - name: go test - code: | - export KEY_ID=$(ssh-keygen -lf /root/.ssh/id_rsa | awk -F' ' '{print $2}' | cut -d':' -f2-) - export SDC_KEY_ID=${KEY_ID} - export MANTA_KEY_ID=${KEY_ID} - export SDC_URL=https://us-east-1.api.joyent.com - export MANTA_URL=https://us-east.manta.joyent.com - go test ./... diff --git a/vendor/github.com/joyent/gomanta/LICENSE b/vendor/github.com/joyent/gomanta/LICENSE deleted file mode 100644 index 14e2f777f6c..00000000000 --- a/vendor/github.com/joyent/gomanta/LICENSE +++ /dev/null @@ -1,373 +0,0 @@ -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/joyent/gomanta/manta/manta.go b/vendor/github.com/joyent/gomanta/manta/manta.go deleted file mode 100644 index f7307454f4f..00000000000 --- a/vendor/github.com/joyent/gomanta/manta/manta.go +++ /dev/null @@ -1,459 +0,0 @@ -/* -The gomanta/manta package interacts with the Manta API (http://apidocs.joyent.com/manta/api.html). - -This Source Code Form is subject to the terms of the Mozilla Public -License, v. 2.0. If a copy of the MPL was not distributed with this -file, You can obtain one at http://mozilla.org/MPL/2.0/. - -Copyright (c) 2016 Joyent Inc. -Written by Daniele Stroppa - -*/ -package manta - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "path" - "time" - - "github.com/joyent/gocommon/client" - "github.com/joyent/gocommon/errors" - - jh "github.com/joyent/gocommon/http" -) - -const ( - // The default version of the Manta API to use - DefaultAPIVersion = "7.1" - - // Manta API URL parts - apiStorage = "stor" - apiJobs = "jobs" - apiJobsLive = "live" - apiJobsIn = "in" - apiJobsOut = "out" - apiJobsFail = "fail" - apiJobsErr = "err" - apiJobsEnd = "end" - apiJobsCancel = "cancel" - apiJobsStatus = "status" -) - -// Client provides a means to access Joyent Manta -type Client struct { - client client.Client -} - -// New creates a new Client. -func New(client client.Client) *Client { - return &Client{client} -} - -// request represents an API request -type request struct { - method string - url string - reqValue interface{} - reqHeader http.Header - reqReader io.Reader - reqLength int - resp interface{} - respHeader *http.Header - expectedStatus int -} - -// Helper method to send an API request -func (c *Client) sendRequest(req request) (*jh.ResponseData, error) { - request := jh.RequestData{ - ReqValue: req.reqValue, - ReqHeaders: req.reqHeader, - ReqReader: req.reqReader, - ReqLength: req.reqLength, - } - if req.expectedStatus == 0 { - req.expectedStatus = http.StatusOK - } - respData := jh.ResponseData{ - RespValue: req.resp, - RespHeaders: req.respHeader, - ExpectedStatus: []int{req.expectedStatus}, - } - err := c.client.SendRequest(req.method, req.url, "", &request, &respData) - return &respData, err -} - -// Helper method to create the API URL -func makeURL(parts ...string) string { - return path.Join(parts...) -} - -// ListDirectoryOpts represent the option that can be specified -// when listing a directory. -type ListDirectoryOpts struct { - Limit int `json:"limit"` // Limit to the number of records returned (default and max is 1000) - Marker string `json:"marker"` // Key name at which to start the next listing -} - -// Entry represents an object stored in Manta, either a file or a directory -type Entry struct { - Name string `json:"name"` // Entry name - Etag string `json:"etag,omitempty"` // If type is 'object', object UUID - Size int `json:"size,omitempty"` // If type is 'object', object size (content-length) - Type string `json:"type"` // Entry type, one of 'directory' or 'object' - Mtime string `json:"mtime"` // ISO8601 timestamp of the last update -} - -// Creates a directory at the specified path. Any parent directory must exist. -// See API docs: http://apidocs.joyent.com/manta/api.html#PutDirectory -func (c *Client) PutDirectory(path string) error { - requestHeaders := make(http.Header) - requestHeaders.Set("Content-Type", "application/json; type=directory") - requestHeaders.Set("Accept", "*/*") - req := request{ - method: client.PUT, - url: makeURL(apiStorage, path), - reqHeader: requestHeaders, - expectedStatus: http.StatusNoContent, - } - if _, err := c.sendRequest(req); err != nil { - return errors.Newf(err, "failed to create directory: %s", path) - } - return nil -} - -// Returns the content of the specified directory, using the specified options. -// See API docs: http://apidocs.joyent.com/manta/api.html#ListDirectory -func (c *Client) ListDirectory(directory string, opts ListDirectoryOpts) ([]Entry, error) { - var resp []Entry - requestHeaders := make(http.Header) - requestHeaders.Set("Accept", "*/*") - req := request{ - method: client.GET, - url: makeURL(apiStorage, directory), - reqHeader: requestHeaders, - resp: &resp, - reqValue: opts, - } - if _, err := c.sendRequest(req); err != nil { - return nil, errors.Newf(err, "failed to list directory %s", directory) - } - return resp, nil -} - -// Deletes the specified directory. Directory must be empty. -// See API docs: http://apidocs.joyent.com/manta/api.html#DeleteDirectory -func (c *Client) DeleteDirectory(path string) error { - req := request{ - method: client.DELETE, - url: makeURL(apiStorage, path), - expectedStatus: http.StatusNoContent, - } - if _, err := c.sendRequest(req); err != nil { - return errors.Newf(err, "failed to delete directory %s", path) - } - return nil -} - -// Creates an object at the specified path. Any parent directory must exist. -// See API docs: http://apidocs.joyent.com/manta/api.html#PutObject -func (c *Client) PutObject(path, objectName string, object []byte) error { - r := bytes.NewReader(object) - req := request{ - method: client.PUT, - url: makeURL(apiStorage, path, objectName), - reqReader: r, - reqLength: len(object), - expectedStatus: http.StatusNoContent, - } - if _, err := c.sendRequest(req); err != nil { - return errors.Newf(err, "failed to create object: %s/%s", path, objectName) - } - return nil -} - -// Retrieves the specified object from the specified location. -// See API docs: http://apidocs.joyent.com/manta/api.html#GetObject -func (c *Client) GetObject(path, objectName string) ([]byte, error) { - var resp []byte - requestHeaders := make(http.Header) - requestHeaders.Set("Accept", "*/*") - req := request{ - method: client.GET, - url: makeURL(apiStorage, path, objectName), - reqHeader: requestHeaders, - resp: &resp, - } - respData, err := c.sendRequest(req) - if err != nil { - return nil, errors.Newf(err, "failed to get object %s/%s", path, objectName) - } - res, ok := respData.RespValue.(*[]byte) - if !ok { - return nil, errors.Newf(err, "failed to assert downloaded data as type *[]byte for object %s/%s", path, objectName) - } - return *res, nil -} - -// Deletes the specified object from the specified location. -// See API docs: http://apidocs.joyent.com/manta/api.html#DeleteObject -func (c *Client) DeleteObject(path, objectName string) error { - req := request{ - method: client.DELETE, - url: makeURL(apiStorage, path, objectName), - expectedStatus: http.StatusNoContent, - } - if _, err := c.sendRequest(req); err != nil { - return errors.Newf(err, "failed to delete object %s/%s", path, objectName) - } - return nil -} - -// Creates a link (similar to a Unix hard link) from location to path/linkName. -// See API docs: http://apidocs.joyent.com/manta/api.html#PutSnapLink -func (c *Client) PutSnapLink(path, linkName, location string) error { - requestHeaders := make(http.Header) - requestHeaders.Set("Accept", "application/json; type=link") - requestHeaders.Set("Location", location) - req := request{ - method: client.PUT, - url: makeURL(apiStorage, path, linkName), - reqHeader: requestHeaders, - expectedStatus: http.StatusNoContent, - } - if _, err := c.sendRequest(req); err != nil { - return errors.Newf(err, "failed to create snap link: %s/%s", path, linkName) - } - return nil -} - -// CreateJobOpts represent the option that can be specified -// when creating a job. -type CreateJobOpts struct { - Name string `json:"name,omitempty"` // Job Name (optional) - Phases []Phase `json:"phases"` // Tasks to execute as part of this job -} - -// Job represents the status of a job. -type Job struct { - Id string // Job unique identifier - Name string `json:"name,omitempty"` // Job Name - State string // Job state - Cancelled bool // Whether the job has been cancelled or not - InputDone bool // Whether the inputs for the job is still open or not - Stats JobStats `json:"stats,omitempty"` // Job statistics - TimeCreated string // Time the job was created at - TimeDone string `json:"timeDone,omitempty"` // Time the job was completed - TimeArchiveStarted string `json:"timeArchiveStarted,omitempty"` // Time the job archiving started - TimeArchiveDone string `json:"timeArchiveDone,omitempty"` // Time the job archiving completed - Phases []Phase `json:"phases"` // Job tasks - Options interface{} // Job options -} - -// JobStats represents statistics about a job -type JobStats struct { - Errors int // Number or errors - Outputs int // Number of output produced - Retries int // Number of retries - Tasks int // Total number of task in the job - TasksDone int // number of tasks done -} - -// Phase represents a task to be executed as part of a Job -type Phase struct { - Type string `json:"type,omitempty"` // Task type, one of 'map' or 'reduce' (optional) - Assets []string `json:"assets,omitempty"` // An array of objects to be placed in the compute zones (optional) - Exec string `json:"exec"` // The actual shell statement to execute - Init string `json:"init"` // Shell statement to execute in each compute zone before any tasks are executed - Count int `json:"count,omitempty"` // If type is 'reduce', an optional number of reducers for this phase (default is 1) - Memory int `json:"memory,omitempty"` // Amount of DRAM to give to your compute zone (in Mb, optional) - Disk int `json:"disk,omitempty"` // Amount of disk space to give to your compute zone (in Gb, optional) -} - -// JobError represents an error occurred during a job execution -type JobError struct { - Id string // Job Id - Phase string // Phase number of the failure - What string // A human readable summary of what failed - Code string // Error code - Message string // Human readable error message - Stderr string // A key that saved the stderr for the given command (optional) - Key string // The input key being processed when the task failed (optional) -} - -// Creates a job with the given options. -// See API docs: http://apidocs.joyent.com/manta/api.html#CreateJob -func (c *Client) CreateJob(opts CreateJobOpts) (string, error) { - var resp string - var respHeader http.Header - req := request{ - method: client.POST, - url: apiJobs, - reqValue: opts, - respHeader: &respHeader, - resp: &resp, - expectedStatus: http.StatusCreated, - } - respData, err := c.sendRequest(req) - if err != nil { - return "", errors.Newf(err, "failed to create job with name: %s", opts.Name) - } - return respData.RespHeaders.Get("Location"), nil -} - -// Submits inputs to an already created job. -// See API docs: http://apidocs.joyent.com/manta/api.html#AddJobInputs -func (c *Client) AddJobInputs(jobId string, jobInputs io.Reader) error { - inputData, errI := ioutil.ReadAll(jobInputs) - if errI != nil { - return errors.Newf(errI, "failed to read inputs for job %s", jobId) - } - requestHeaders := make(http.Header) - requestHeaders.Set("Accept", "*/*") - requestHeaders.Set("Content-Type", "text/plain") - req := request{ - method: client.POST, - url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsIn), - reqValue: string(inputData), - reqHeader: requestHeaders, - expectedStatus: http.StatusNoContent, - } - if _, err := c.sendRequest(req); err != nil { - return errors.Newf(err, "failed to add inputs to job %s", jobId) - } - return nil -} - -// This closes input for a job, and finalize the job. -// See API docs: http://apidocs.joyent.com/manta/api.html#EndJobInput -func (c *Client) EndJobInputs(jobId string) error { - req := request{ - method: client.POST, - url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsIn, apiJobsEnd), - expectedStatus: http.StatusAccepted, - } - if _, err := c.sendRequest(req); err != nil { - return errors.Newf(err, "failed to end inputs for job %s", jobId) - } - return nil -} - -// This cancels a job from doing any further work. -// Cancellation is asynchronous and "best effort"; there is no guarantee the job will actually stop -// See API docs: http://apidocs.joyent.com/manta/api.html#CancelJob -func (c *Client) CancelJob(jobId string) error { - req := request{ - method: client.POST, - url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsCancel), - expectedStatus: http.StatusAccepted, - } - if _, err := c.sendRequest(req); err != nil { - return errors.Newf(err, "failed to cancel job %s", jobId) - } - return nil -} - -// Returns the list of jobs. -// Note you can filter the set of jobs down to only live jobs by setting the liveOnly flag. -// See API docs: http://apidocs.joyent.com/manta/api.html#ListJobs -func (c *Client) ListJobs(liveOnly bool) ([]Entry, error) { - var resp []Entry - var url string - if liveOnly { - url = fmt.Sprintf("%s?state=running", apiJobs) - } else { - url = apiJobs - } - req := request{ - method: client.GET, - url: url, - resp: &resp, - } - if _, err := c.sendRequest(req); err != nil { - return nil, errors.Newf(err, "failed to list jobs") - } - return resp, nil -} - -// Gets the high-level job container object for a given job. -// See API docs: http://apidocs.joyent.com/manta/api.html#GetJob -func (c *Client) GetJob(jobId string) (Job, error) { - var resp Job - req := request{ - method: client.GET, - url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsStatus), - resp: &resp, - } - if _, err := c.sendRequest(req); err != nil { - return Job{}, errors.Newf(err, "failed to get job with id: %s", jobId) - } - return resp, nil -} - -// Returns the current "live" set of outputs from a given job. -// See API docs: http://apidocs.joyent.com/manta/api.html#GetJobOutput -func (c *Client) GetJobOutput(jobId string) (string, error) { - var resp string - req := request{ - method: client.GET, - url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsOut), - resp: &resp, - } - if _, err := c.sendRequest(req); err != nil { - return "", errors.Newf(err, "failed to get output for job with id: %s", jobId) - } - return resp, nil -} - -// Returns the submitted input objects for a given job, available while the job is running. -// See API docs: http://apidocs.joyent.com/manta/api.html#GetJobInput -func (c *Client) GetJobInput(jobId string) (string, error) { - var resp string - req := request{ - method: client.GET, - url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsIn), - resp: &resp, - } - if _, err := c.sendRequest(req); err != nil { - return "", errors.Newf(err, "failed to get input for job with id: %s", jobId) - } - return resp, nil -} - -// Returns the current "live" set of failures from a given job. -// See API docs: http://apidocs.joyent.com/manta/api.html#GetJobFailures -func (c *Client) GetJobFailures(jobId string) (interface{}, error) { - var resp interface{} - req := request{ - method: client.GET, - url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsFail), - resp: &resp, - } - if _, err := c.sendRequest(req); err != nil { - return nil, errors.Newf(err, "failed to get failures for job with id: %s", jobId) - } - return resp, nil -} - -// Returns the current "live" set of errors from a given job. -// See API docs: http://apidocs.joyent.com/manta/api.html#GetJobErrors -func (c *Client) GetJobErrors(jobId string) ([]JobError, error) { - var resp []JobError - req := request{ - method: client.GET, - url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsErr), - resp: &resp, - } - if _, err := c.sendRequest(req); err != nil { - return nil, errors.Newf(err, "failed to get errors for job with id: %s", jobId) - } - return resp, nil -} - -// Returns a signed URL to retrieve the object at path. -func (c *Client) SignURL(path string, expires time.Time) (string, error) { - return c.client.SignURL(path, expires) -} diff --git a/vendor/github.com/joyent/gosign/LICENSE b/vendor/github.com/joyent/gosign/LICENSE deleted file mode 100755 index 14e2f777f6c..00000000000 --- a/vendor/github.com/joyent/gosign/LICENSE +++ /dev/null @@ -1,373 +0,0 @@ -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/joyent/gosign/auth/auth.go b/vendor/github.com/joyent/gosign/auth/auth.go deleted file mode 100644 index 018e24c66bb..00000000000 --- a/vendor/github.com/joyent/gosign/auth/auth.go +++ /dev/null @@ -1,132 +0,0 @@ -// -// gosign - Go HTTP signing library for the Joyent Public Cloud and Joyent Manta -// -// -// Copyright (c) 2013 Joyent Inc. -// -// Written by Daniele Stroppa -// - -package auth - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/pem" - "fmt" - "net/http" - "net/url" - "strings" -) - -const ( - // Authorization Headers - SdcSignature = "Signature keyId=\"/%s/keys/%s\",algorithm=\"%s\" %s" - MantaSignature = "Signature keyId=\"/%s/keys/%s\",algorithm=\"%s\",signature=\"%s\"" -) - -type Endpoint struct { - URL string -} - -type Auth struct { - User string - PrivateKey PrivateKey - Algorithm string -} - -type Credentials struct { - UserAuthentication *Auth - SdcKeyId string - SdcEndpoint Endpoint - MantaKeyId string - MantaEndpoint Endpoint -} - -type PrivateKey struct { - key *rsa.PrivateKey -} - -// NewAuth creates a new Auth. -func NewAuth(user, privateKey, algorithm string) (*Auth, error) { - block, _ := pem.Decode([]byte(privateKey)) - if block == nil { - return nil, fmt.Errorf("invalid private key data: %s", privateKey) - } - rsakey, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("An error occurred while parsing the key: %s", err) - } - return &Auth{user, PrivateKey{rsakey}, algorithm}, nil -} - -// The CreateAuthorizationHeader returns the Authorization header for the give request. -func CreateAuthorizationHeader(headers http.Header, credentials *Credentials, isMantaRequest bool) (string, error) { - if isMantaRequest { - signature, err := GetSignature(credentials.UserAuthentication, "date: "+headers.Get("Date")) - if err != nil { - return "", err - } - return fmt.Sprintf(MantaSignature, credentials.UserAuthentication.User, credentials.MantaKeyId, - credentials.UserAuthentication.Algorithm, signature), nil - } - signature, err := GetSignature(credentials.UserAuthentication, headers.Get("Date")) - if err != nil { - return "", err - } - return fmt.Sprintf(SdcSignature, credentials.UserAuthentication.User, credentials.SdcKeyId, - credentials.UserAuthentication.Algorithm, signature), nil -} - -// The GetSignature method signs the specified key according to http://apidocs.joyent.com/cloudapi/#issuing-requests -// and http://apidocs.joyent.com/manta/api.html#authentication. -func GetSignature(auth *Auth, signing string) (string, error) { - hashFunc := getHashFunction(auth.Algorithm) - hash := hashFunc.New() - hash.Write([]byte(signing)) - - digest := hash.Sum(nil) - - signed, err := rsa.SignPKCS1v15(rand.Reader, auth.PrivateKey.key, hashFunc, digest) - if err != nil { - return "", fmt.Errorf("An error occurred while signing the key: %s", err) - } - - return base64.StdEncoding.EncodeToString(signed), nil -} - -// Helper method to get the Hash function based on the algorithm -func getHashFunction(algorithm string) (hashFunc crypto.Hash) { - switch strings.ToLower(algorithm) { - case "rsa-sha1": - hashFunc = crypto.SHA1 - case "rsa-sha224", "rsa-sha256": - hashFunc = crypto.SHA256 - case "rsa-sha384", "rsa-sha512": - hashFunc = crypto.SHA512 - default: - hashFunc = crypto.SHA256 - } - return -} - -func (cred *Credentials) Region() string { - parsedUrl, err := url.Parse(cred.SdcEndpoint.URL) - if err != nil { - // Bogus URL - no region. - return "" - } - if strings.HasPrefix(parsedUrl.Host, "localhost") || strings.HasPrefix(parsedUrl.Host, "127.0.0.1") { - return "some-region" - } - - host := parsedUrl.Host - firstDotIdx := strings.Index(host, ".") - if firstDotIdx >= 0 { - return host[:firstDotIdx] - } - return host -} diff --git a/vendor/github.com/joyent/triton-go/CHANGELOG.md b/vendor/github.com/joyent/triton-go/CHANGELOG.md deleted file mode 100644 index df4a0fb4e09..00000000000 --- a/vendor/github.com/joyent/triton-go/CHANGELOG.md +++ /dev/null @@ -1,9 +0,0 @@ -## 0.2.0 (November 2) - -- Introduce CloudAPI's Ping under compute -- Introduce CloudAPI's RebootMachine under compute instances -- tools: Introduce unit testing and scripts for linting, etc. - -## 0.1.0 (November 2) - -- Initial release of a versioned SDK diff --git a/vendor/github.com/joyent/triton-go/GNUmakefile b/vendor/github.com/joyent/triton-go/GNUmakefile deleted file mode 100644 index cca7f675907..00000000000 --- a/vendor/github.com/joyent/triton-go/GNUmakefile +++ /dev/null @@ -1,47 +0,0 @@ -TEST?=$$(go list ./... |grep -Ev 'vendor|examples|testutils') -GOFMT_FILES?=$$(find . -name '*.go' |grep -v vendor) - -default: vet errcheck test - -tools:: ## Download and install all dev/code tools - @echo "==> Installing dev tools" - go get -u github.com/golang/dep/cmd/dep - go get -u github.com/golang/lint/golint - go get -u github.com/kisielk/errcheck - @echo "==> Installing test package dependencies" - go test -i $(TEST) || exit 1 - -test:: ## Run unit tests - @echo "==> Running unit tests" - @echo $(TEST) | \ - xargs -t go test -v $(TESTARGS) -timeout=30s -parallel=1 | grep -Ev 'TRITON_TEST|TestAcc' - -testacc:: ## Run acceptance tests - @echo "==> Running acceptance tests" - TRITON_TEST=1 go test $(TEST) -v $(TESTARGS) -run -timeout 120m - -vet:: ## Check for unwanted code constructs - @echo "go vet ." - @go vet $$(go list ./... | grep -v vendor/) ; if [ $$? -eq 1 ]; then \ - echo ""; \ - echo "Vet found suspicious constructs. Please check the reported constructs"; \ - echo "and fix them if necessary before submitting the code for review."; \ - exit 1; \ - fi - -lint:: ## Lint and vet code by common Go standards - @bash $(CURDIR)/scripts/lint.sh - -fmt:: ## Format as canonical Go code - gofmt -w $(GOFMT_FILES) - -fmtcheck:: ## Check if code format is canonical Go - @bash $(CURDIR)/scripts/gofmtcheck.sh - -errcheck:: ## Check for unhandled errors - @bash $(CURDIR)/scripts/errcheck.sh - -.PHONY: help -help:: ## Display this help message - @echo "GNU make(1) targets:" - @grep -E '^[a-zA-Z_.-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}' diff --git a/vendor/github.com/joyent/triton-go/Gopkg.lock b/vendor/github.com/joyent/triton-go/Gopkg.lock deleted file mode 100644 index b61936ad3f5..00000000000 --- a/vendor/github.com/joyent/triton-go/Gopkg.lock +++ /dev/null @@ -1,39 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "github.com/abdullin/seq" - packages = ["."] - revision = "d5467c17e7afe8d8f08f556c6c811a50c3feb28d" - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/errwrap" - packages = ["."] - revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55" - -[[projects]] - branch = "master" - name = "github.com/sean-/seed" - packages = ["."] - revision = "e2103e2c35297fb7e17febb81e49b312087a2372" - -[[projects]] - branch = "master" - name = "golang.org/x/crypto" - packages = ["curve25519","ed25519","ed25519/internal/edwards25519","ssh","ssh/agent"] - revision = "bd6f299fb381e4c3393d1c4b1f0b94f5e77650c8" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "28853a8970ee33112a9e7998b18e658bed04d177537ec69db678189f0b8a9a7d" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/joyent/triton-go/Gopkg.toml b/vendor/github.com/joyent/triton-go/Gopkg.toml deleted file mode 100644 index 3b85ddf9bb2..00000000000 --- a/vendor/github.com/joyent/triton-go/Gopkg.toml +++ /dev/null @@ -1,42 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - branch = "master" - name = "github.com/abdullin/seq" - -[[constraint]] - name = "github.com/davecgh/go-spew" - version = "1.1.0" - -[[constraint]] - branch = "master" - name = "github.com/hashicorp/errwrap" - -[[constraint]] - branch = "master" - name = "github.com/sean-/seed" - -[[constraint]] - branch = "master" - name = "golang.org/x/crypto" diff --git a/vendor/github.com/joyent/triton-go/LICENSE b/vendor/github.com/joyent/triton-go/LICENSE deleted file mode 100644 index 14e2f777f6c..00000000000 --- a/vendor/github.com/joyent/triton-go/LICENSE +++ /dev/null @@ -1,373 +0,0 @@ -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/joyent/triton-go/README.md b/vendor/github.com/joyent/triton-go/README.md deleted file mode 100644 index 1089c72da1b..00000000000 --- a/vendor/github.com/joyent/triton-go/README.md +++ /dev/null @@ -1,215 +0,0 @@ -# triton-go - -`triton-go` is an idiomatic library exposing a client SDK for Go applications -using Joyent's Triton Compute and Storage (Manta) APIs. - -## Usage - -Triton uses [HTTP Signature][4] to sign the Date header in each HTTP request -made to the Triton API. Currently, requests can be signed using either a private -key file loaded from disk (using an [`authentication.PrivateKeySigner`][5]), or -using a key stored with the local SSH Agent (using an [`SSHAgentSigner`][6]. - -To construct a Signer, use the `New*` range of methods in the `authentication` -package. In the case of `authentication.NewSSHAgentSigner`, the parameters are -the fingerprint of the key with which to sign, and the account name (normally -stored in the `SDC_ACCOUNT` environment variable). For example: - -``` -const fingerprint := "a4:c6:f3:75:80:27:e0:03:a9:98:79:ef:c5:0a:06:11" -sshKeySigner, err := authentication.NewSSHAgentSigner(fingerprint, "AccountName") -if err != nil { - log.Fatalf("NewSSHAgentSigner: %s", err) -} -``` - -An appropriate key fingerprint can be generated using `ssh-keygen`. - -``` -ssh-keygen -Emd5 -lf ~/.ssh/id_rsa.pub | cut -d " " -f 2 | sed 's/MD5://' -``` - -Each top level package, `account`, `compute`, `identity`, `network`, all have -their own seperate client. In order to initialize a package client, simply pass -the global `triton.ClientConfig` struct into the client's constructor function. - -```go - config := &triton.ClientConfig{ - TritonURL: os.Getenv("SDC_URL"), - MantaURL: os.Getenv("MANTA_URL"), - AccountName: accountName, - Signers: []authentication.Signer{sshKeySigner}, - } - - c, err := compute.NewClient(config) - if err != nil { - log.Fatalf("compute.NewClient: %s", err) - } -``` - -Constructing `compute.Client` returns an interface which exposes `compute` API -resources. The same goes for all other packages. Reference their unique -documentation for more information. - -The same `triton.ClientConfig` will initialize the Manta `storage` client as -well... - -```go - c, err := storage.NewClient(config) - if err != nil { - log.Fatalf("storage.NewClient: %s", err) - } -``` - -## Error Handling - -If an error is returned by the HTTP API, the `error` returned from the function -will contain an instance of `compute.TritonError` in the chain. Error wrapping -is performed using the [errwrap][7] library from HashiCorp. - -## Acceptance Tests - -Acceptance Tests run directly against the Triton API, so you will need either a -local installation of Triton or an account with Joyent's Public Cloud offering -in order to run them. The tests create real resources (and thus cost real -money)! - -In order to run acceptance tests, the following environment variables must be -set: - -- `TRITON_TEST` - must be set to any value in order to indicate desire to create - resources -- `SDC_URL` - the base endpoint for the Triton API -- `SDC_ACCOUNT` - the account name for the Triton API -- `SDC_KEY_ID` - the fingerprint of the SSH key identifying the key - -Additionally, you may set `SDC_KEY_MATERIAL` to the contents of an unencrypted -private key. If this is set, the PrivateKeySigner (see above) will be used - if -not the SSHAgentSigner will be used. - -### Example Run - -The verbose output has been removed for brevity here. - -``` -$ HTTP_PROXY=http://localhost:8888 \ - TRITON_TEST=1 \ - SDC_URL=https://us-sw-1.api.joyent.com \ - SDC_ACCOUNT=AccountName \ - SDC_KEY_ID=a4:c6:f3:75:80:27:e0:03:a9:98:79:ef:c5:0a:06:11 \ - go test -v -run "TestAccKey" -=== RUN TestAccKey_Create ---- PASS: TestAccKey_Create (12.46s) -=== RUN TestAccKey_Get ---- PASS: TestAccKey_Get (4.30s) -=== RUN TestAccKey_Delete ---- PASS: TestAccKey_Delete (15.08s) -PASS -ok github.com/joyent/triton-go 31.861s -``` - -## Example API - -There's an `examples/` directory available with sample code setup for many of -the APIs within this library. Most of these can be run using `go run` and -referencing your SSH key file use by your active `triton` CLI profile. - -```sh -$ eval "$(triton env us-sw-1)" -$ SDC_KEY_FILE=~/.ssh/triton-id_rsa go run examples/compute/instances.go -``` - -The following is a complete example of how to initialize the `compute` package -client and list all instances under an account. More detailed usage of this -library follows. - -```go - - -package main - -import ( - "context" - "fmt" - "io/ioutil" - "log" - "os" - "time" - - triton "github.com/joyent/triton-go" - "github.com/joyent/triton-go/authentication" - "github.com/joyent/triton-go/compute" -) - -func main() { - keyID := os.Getenv("SDC_KEY_ID") - accountName := os.Getenv("SDC_ACCOUNT") - keyMaterial := os.Getenv("SDC_KEY_MATERIAL") - - var signer authentication.Signer - var err error - - if keyMaterial == "" { - signer, err = authentication.NewSSHAgentSigner(keyID, accountName) - if err != nil { - log.Fatalf("Error Creating SSH Agent Signer: {{err}}", err) - } - } else { - var keyBytes []byte - if _, err = os.Stat(keyMaterial); err == nil { - keyBytes, err = ioutil.ReadFile(keyMaterial) - if err != nil { - log.Fatalf("Error reading key material from %s: %s", - keyMaterial, err) - } - block, _ := pem.Decode(keyBytes) - if block == nil { - log.Fatalf( - "Failed to read key material '%s': no key found", keyMaterial) - } - - if block.Headers["Proc-Type"] == "4,ENCRYPTED" { - log.Fatalf( - "Failed to read key '%s': password protected keys are\n"+ - "not currently supported. Please decrypt the key prior to use.", keyMaterial) - } - - } else { - keyBytes = []byte(keyMaterial) - } - - signer, err = authentication.NewPrivateKeySigner(keyID, []byte(keyMaterial), accountName) - if err != nil { - log.Fatalf("Error Creating SSH Private Key Signer: {{err}}", err) - } - } - - config := &triton.ClientConfig{ - TritonURL: os.Getenv("SDC_URL"), - AccountName: accountName, - Signers: []authentication.Signer{signer}, - } - - c, err := compute.NewClient(config) - if err != nil { - log.Fatalf("compute.NewClient: %s", err) - } - - listInput := &compute.ListInstancesInput{} - instances, err := c.Instances().List(context.Background(), listInput) - if err != nil { - log.Fatalf("compute.Instances.List: %v", err) - } - numInstances := 0 - for _, instance := range instances { - numInstances++ - fmt.Println(fmt.Sprintf("-- Instance: %v", instance.Name)) - } -} - -``` - -[4]: https://github.com/joyent/node-http-signature/blob/master/http_signing.md -[5]: https://godoc.org/github.com/joyent/triton-go/authentication -[6]: https://godoc.org/github.com/joyent/triton-go/authentication -[7]: https://github.com/hashicorp/go-errwrap diff --git a/vendor/github.com/joyent/triton-go/authentication/dummy.go b/vendor/github.com/joyent/triton-go/authentication/dummy.go deleted file mode 100644 index cd16273b664..00000000000 --- a/vendor/github.com/joyent/triton-go/authentication/dummy.go +++ /dev/null @@ -1,72 +0,0 @@ -package authentication - -// DON'T USE THIS OUTSIDE TESTING ~ This key was only created to use for -// internal unit testing. It should never be used for acceptance testing either. -// -// This is just a randomly generated key pair. -var Dummy = struct { - Fingerprint string - PrivateKey []byte - PublicKey []byte - Signer Signer -}{ - "9f:d6:65:fc:d6:60:dc:d0:4e:db:2d:75:f7:92:8c:31", - []byte(`-----BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEAui9lNjCJahHeFSFC6HXi/CNX588C/L2gJUx65bnNphVC98hW -1wzoRvPXHx5aWnb7lEbpNhP6B0UoCBDTaPgt9hHfD/oNQ+6HT1QpDIGfZmXI91/t -cjGVSBbxN7WaYt/HsPrGjbalwvQPChN53sMVmFkMTEDR5G3zOBOAGrOimlCT80wI -2S5Xg0spd8jjKM5I1swDR0xtuDWnHTR1Ohin+pEQIE6glLTfYq7oQx6nmMXXBNmk -+SaPD1FAyjkF/81im2EHXBygNEwraVrDcAxK2mKlU2XMJiogQKNYWlm3UkbNB6WP -Le12+Ka02rmIVsSqIpc/ZCBraAlCaSWlYCkU+vJ2hH/+ypy5bXNlbaTiWZK+vuI7 -PC87T50yLNeXVuNZAynzDpBCvsjiiHrB/ZFRfVfF6PviV8CV+m7GTzfAwJhVeSbl -rR6nts16K0HTD48v57DU0b0t5VOvC7cWPShs+afdSL3Z8ReL5EWMgU1wfvtycRKe -hiDVGj3Ms2cf83RIANr387G+1LcTQYP7JJuB7Svy5j+R6+HjI0cgu4EMUPdWfCNG -GyrlxwJNtPmUSfasH1xUKpqr7dC+0sN4/gfJw75WTAYrATkPzexoYNaMsGDfhuoh -kYa3Tn2q1g3kqhsX/R0Fd5d8d5qc137qcRCxiZYz9f3bVkXQbhYmO9da3KsCAwEA -AQKCAgAeEAURqOinPddUJhi9nDtYZwSMo3piAORY4W5+pW+1P32esLSE6MqgmkLD -/YytSsT4fjKtzq/yeJIsKztXmasiLmSMGd4Gd/9VKcuu/0cTq5+1gcG/TI5EI6Az -VJlnGacOxo9E1pcRUYMUJ2zoMSvNe6NmtJivf6lkBpIKvbKlpBkfkclj9/2db4d0 -lfVH43cTZ8Gnw4l70v320z+Sb+S/qqil7swy9rmTH5bVL5/0JQ3A9LuUl0tGN+J0 -RJzZXvprCFG958leaGYiDsu7zeBQPtlfC/LYvriSd02O2SmmmVQFxg/GZK9vGsvc -/VQsXnjyOOW9bxaop8YXYELBsiB21ipTHzOwoqHT8wFnjgU9Y/7iZIv7YbZKQsCS -DrwdlZ/Yw90wiif+ldYryIVinWfytt6ERv4Dgezc98+1XPi1Z/WB74/lIaDXFl3M -3ypjtvLYbKew2IkIjeAwjvZJg/QpC/50RrrPtVDgeAI1Ni01ikixUhMYsHJ1kRih -0tqLvLqSPoHmr6luFlaoKdc2eBqb+8U6K/TrXhKtT7BeUFiSbvnVfdbrH9r+AY/2 -zYtG6llzkE5DH8ZR3Qp+dx7QEDtvYhGftWhx9uasd79AN7CuGYnL54YFLKGRrWKN -ylysqfUyOQYiitdWdNCw9PP2vGRx5JAsMMSy+ft18jjTJvNQ0QKCAQEA28M11EE6 -MpnHxfyP00Dl1+3wl2lRyNXZnZ4hgkk1f83EJGpoB2amiMTF8P1qJb7US1fXtf7l -gkJMMk6t6iccexV1/NBh/7tDZHH/v4HPirFTXQFizflaghD8dEADy9DY4BpQYFRe -8zGsv4/4U0txCXkUIfKcENt/FtXv2T9blJT6cDV0yTx9IAyd4Kor7Ly2FIYroSME -uqnOQt5PwB+2qkE+9hdg4xBhFs9sW5dvyBvQvlBfX/xOmMw2ygH6vsaJlNfZ5VPa -EP/wFP/qHyhDlCfbHdL6qF2//wUoM2QM9RgBdZNhcKU7zWuf7Ev199tmlLC5O14J -PkQxUGftMfmWxQKCAQEA2OLKD8dwOzpwGJiPQdBmGpwCamfcCY4nDwqEaCu4vY1R -OJR+rpYdC2hgl5PTXWH7qzJVdT/ZAz2xUQOgB1hD3Ltk7DQ+EZIA8+vJdaicQOme -vfpMPNDxCEX9ee0AXAmAC3aET82B4cMFnjXjl1WXLLTowF/Jp/hMorm6tl2m15A2 -oTyWlB/i/W/cxHl2HFWK7o8uCNoKpKJjheNYn+emEcH1bkwrk8sxQ78cBNmqe/gk -MLgu8qfXQ0LLKIL7wqmIUHeUpkepOod8uXcTmmN2X9saCIwFKx4Jal5hh5v5cy0G -MkyZcUIhhnmzr7lXbepauE5V2Sj5Qp040AfRVjZcrwKCAQANe8OwuzPL6P2F20Ij -zwaLIhEx6QdYkC5i6lHaAY3jwoc3SMQLODQdjh0q9RFvMW8rFD+q7fG89T5hk8w9 -4ppvvthXY52vqBixcAEmCdvnAYxA15XtV1BDTLGAnHDfL3gu/85QqryMpU6ZDkdJ -LQbJcwFWN+F1c1Iv335w0N9YlW9sNQtuUWTH8544K5i4VLfDOJwyrchbf5GlLqir -/AYkGg634KVUKSwbzywxzm/QUkyTcLD5Xayg2V6/NDHjRKEqXbgDxwpJIrrjPvRp -ZvoGfA+Im+o/LElcZz+ZL5lP7GIiiaFf3PN3XhQY1mxIAdEgbFthFhrxFBQGf+ng -uBSVAoIBAHl12K8pg8LHoUtE9MVoziWMxRWOAH4ha+JSg4BLK/SLlbbYAnIHg1CG -LcH1eWNMokJnt9An54KXJBw4qYAzgB23nHdjcncoivwPSg1oVclMjCfcaqGMac+2 -UpPblF32vAyvXL3MWzZxn03Q5Bo2Rqk0zzwc6LP2rARdeyDyJaOHEfEOG03s5ZQE -91/YnbqUdW/QI3m1kkxM3Ot4PIOgmTJMqwQQCD+GhZppBmn49C7k8m+OVkxyjm0O -lPOlFxUXGE3oCgltDGrIwaKj+wh1Ny/LZjLvJ13UPnWhUYE+al6EEnpMx4nT/S5w -LZ71bu8RVajtxcoN1jnmDpECL8vWOeUCggEBAIEuKoY7pVHfs5gr5dXfQeVZEtqy -LnSdsd37/aqQZRlUpVmBrPNl1JBLiEVhk2SL3XJIDU4Er7f0idhtYLY3eE7wqZ4d -38Iaj5tv3zBc/wb1bImPgOgXCH7QrrbW7uTiYMLScuUbMR4uSpfubLaV8Zc9WHT8 -kTJ2pKKtA1GPJ4V7HCIxuTjD2iyOK1CRkaqSC+5VUuq5gHf92CEstv9AIvvy5cWg -gnfBQoS89m3aO035henSfRFKVJkHaEoasj8hB3pwl9FGZUJp1c2JxiKzONqZhyGa -6tcIAM3od0QtAfDJ89tWJ5D31W8KNNysobFSQxZ62WgLUUtXrkN1LGodxGQ= ------END RSA PRIVATE KEY-----`), - []byte(`ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC6L2U2MIlqEd4VIULodeL8I1fnzwL8vaAlTHrluc2mFUL3yFbXDOhG89cfHlpadvuURuk2E/oHRSgIENNo+C32Ed8P+g1D7odPVCkMgZ9mZcj3X+1yMZVIFvE3tZpi38ew+saNtqXC9A8KE3newxWYWQxMQNHkbfM4E4Aas6KaUJPzTAjZLleDSyl3yOMozkjWzANHTG24NacdNHU6GKf6kRAgTqCUtN9iruhDHqeYxdcE2aT5Jo8PUUDKOQX/zWKbYQdcHKA0TCtpWsNwDEraYqVTZcwmKiBAo1haWbdSRs0HpY8t7Xb4prTauYhWxKoilz9kIGtoCUJpJaVgKRT68naEf/7KnLltc2VtpOJZkr6+4js8LztPnTIs15dW41kDKfMOkEK+yOKIesH9kVF9V8Xo++JXwJX6bsZPN8DAmFV5JuWtHqe2zXorQdMPjy/nsNTRvS3lU68LtxY9KGz5p91IvdnxF4vkRYyBTXB++3JxEp6GINUaPcyzZx/zdEgA2vfzsb7UtxNBg/skm4HtK/LmP5Hr4eMjRyC7gQxQ91Z8I0YbKuXHAk20+ZRJ9qwfXFQqmqvt0L7Sw3j+B8nDvlZMBisBOQ/N7Ghg1oywYN+G6iGRhrdOfarWDeSqGxf9HQV3l3x3mpzXfupxELGJljP1/dtWRdBuFiY711rcqw== test-dummy-20171002140848`), - nil, -} - -func init() { - testSigner, _ := NewTestSigner() - Dummy.Signer = testSigner -} diff --git a/vendor/github.com/joyent/triton-go/authentication/ecdsa_signature.go b/vendor/github.com/joyent/triton-go/authentication/ecdsa_signature.go deleted file mode 100644 index 8aaba97a548..00000000000 --- a/vendor/github.com/joyent/triton-go/authentication/ecdsa_signature.go +++ /dev/null @@ -1,66 +0,0 @@ -package authentication - -import ( - "encoding/asn1" - "encoding/base64" - "fmt" - "math/big" - - "github.com/hashicorp/errwrap" - "golang.org/x/crypto/ssh" -) - -type ecdsaSignature struct { - hashAlgorithm string - R *big.Int - S *big.Int -} - -func (s *ecdsaSignature) SignatureType() string { - return fmt.Sprintf("ecdsa-%s", s.hashAlgorithm) -} - -func (s *ecdsaSignature) String() string { - toEncode := struct { - R *big.Int - S *big.Int - }{ - R: s.R, - S: s.S, - } - - signatureBytes, err := asn1.Marshal(toEncode) - if err != nil { - panic(fmt.Sprintf("Error marshaling signature: %s", err)) - } - - return base64.StdEncoding.EncodeToString(signatureBytes) -} - -func newECDSASignature(signatureBlob []byte) (*ecdsaSignature, error) { - var ecSig struct { - R *big.Int - S *big.Int - } - - if err := ssh.Unmarshal(signatureBlob, &ecSig); err != nil { - return nil, errwrap.Wrapf("Error unmarshaling signature: {{err}}", err) - } - - rValue := ecSig.R.Bytes() - var hashAlgorithm string - switch len(rValue) { - case 31, 32: - hashAlgorithm = "sha256" - case 65, 66: - hashAlgorithm = "sha512" - default: - return nil, fmt.Errorf("Unsupported key length: %d", len(rValue)) - } - - return &ecdsaSignature{ - hashAlgorithm: hashAlgorithm, - R: ecSig.R, - S: ecSig.S, - }, nil -} diff --git a/vendor/github.com/joyent/triton-go/authentication/private_key_signer.go b/vendor/github.com/joyent/triton-go/authentication/private_key_signer.go deleted file mode 100644 index 43bc286f03f..00000000000 --- a/vendor/github.com/joyent/triton-go/authentication/private_key_signer.go +++ /dev/null @@ -1,106 +0,0 @@ -package authentication - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/pem" - "errors" - "fmt" - "strings" - - "github.com/hashicorp/errwrap" - "golang.org/x/crypto/ssh" -) - -type PrivateKeySigner struct { - formattedKeyFingerprint string - keyFingerprint string - algorithm string - accountName string - hashFunc crypto.Hash - - privateKey *rsa.PrivateKey -} - -func NewPrivateKeySigner(keyFingerprint string, privateKeyMaterial []byte, accountName string) (*PrivateKeySigner, error) { - keyFingerprintMD5 := strings.Replace(keyFingerprint, ":", "", -1) - - block, _ := pem.Decode(privateKeyMaterial) - if block == nil { - return nil, errors.New("Error PEM-decoding private key material: nil block received") - } - - rsakey, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return nil, errwrap.Wrapf("Error parsing private key: {{err}}", err) - } - - sshPublicKey, err := ssh.NewPublicKey(rsakey.Public()) - if err != nil { - return nil, errwrap.Wrapf("Error parsing SSH key from private key: {{err}}", err) - } - - matchKeyFingerprint := formatPublicKeyFingerprint(sshPublicKey, false) - displayKeyFingerprint := formatPublicKeyFingerprint(sshPublicKey, true) - if matchKeyFingerprint != keyFingerprintMD5 { - return nil, errors.New("Private key file does not match public key fingerprint") - } - - signer := &PrivateKeySigner{ - formattedKeyFingerprint: displayKeyFingerprint, - keyFingerprint: keyFingerprint, - accountName: accountName, - - hashFunc: crypto.SHA1, - privateKey: rsakey, - } - - _, algorithm, err := signer.SignRaw("HelloWorld") - if err != nil { - return nil, fmt.Errorf("Cannot sign using ssh agent: %s", err) - } - signer.algorithm = algorithm - - return signer, nil -} - -func (s *PrivateKeySigner) Sign(dateHeader string) (string, error) { - const headerName = "date" - - hash := s.hashFunc.New() - hash.Write([]byte(fmt.Sprintf("%s: %s", headerName, dateHeader))) - digest := hash.Sum(nil) - - signed, err := rsa.SignPKCS1v15(rand.Reader, s.privateKey, s.hashFunc, digest) - if err != nil { - return "", errwrap.Wrapf("Error signing date header: {{err}}", err) - } - signedBase64 := base64.StdEncoding.EncodeToString(signed) - - keyID := fmt.Sprintf("/%s/keys/%s", s.accountName, s.formattedKeyFingerprint) - return fmt.Sprintf(authorizationHeaderFormat, keyID, "rsa-sha1", headerName, signedBase64), nil -} - -func (s *PrivateKeySigner) SignRaw(toSign string) (string, string, error) { - hash := s.hashFunc.New() - hash.Write([]byte(toSign)) - digest := hash.Sum(nil) - - signed, err := rsa.SignPKCS1v15(rand.Reader, s.privateKey, s.hashFunc, digest) - if err != nil { - return "", "", errwrap.Wrapf("Error signing date header: {{err}}", err) - } - signedBase64 := base64.StdEncoding.EncodeToString(signed) - return signedBase64, "rsa-sha1", nil -} - -func (s *PrivateKeySigner) KeyFingerprint() string { - return s.formattedKeyFingerprint -} - -func (s *PrivateKeySigner) DefaultAlgorithm() string { - return s.algorithm -} diff --git a/vendor/github.com/joyent/triton-go/authentication/rsa_signature.go b/vendor/github.com/joyent/triton-go/authentication/rsa_signature.go deleted file mode 100644 index 8d513f6c457..00000000000 --- a/vendor/github.com/joyent/triton-go/authentication/rsa_signature.go +++ /dev/null @@ -1,25 +0,0 @@ -package authentication - -import ( - "encoding/base64" -) - -type rsaSignature struct { - hashAlgorithm string - signature []byte -} - -func (s *rsaSignature) SignatureType() string { - return s.hashAlgorithm -} - -func (s *rsaSignature) String() string { - return base64.StdEncoding.EncodeToString(s.signature) -} - -func newRSASignature(signatureBlob []byte) (*rsaSignature, error) { - return &rsaSignature{ - hashAlgorithm: "rsa-sha1", - signature: signatureBlob, - }, nil -} diff --git a/vendor/github.com/joyent/triton-go/authentication/signature.go b/vendor/github.com/joyent/triton-go/authentication/signature.go deleted file mode 100644 index e6a52df301b..00000000000 --- a/vendor/github.com/joyent/triton-go/authentication/signature.go +++ /dev/null @@ -1,27 +0,0 @@ -package authentication - -import ( - "regexp" - "fmt" -) - -type httpAuthSignature interface { - SignatureType() string - String() string -} - -func keyFormatToKeyType(keyFormat string) (string, error) { - if keyFormat == "ssh-rsa" { - return "rsa", nil - } - - if keyFormat == "ssh-ed25519" { - return "ed25519", nil - } - - if regexp.MustCompile("^ecdsa-sha2-*").Match([]byte(keyFormat)) { - return "ecdsa", nil - } - - return "", fmt.Errorf("Unknown key format: %s", keyFormat) -} diff --git a/vendor/github.com/joyent/triton-go/authentication/signer.go b/vendor/github.com/joyent/triton-go/authentication/signer.go deleted file mode 100644 index 6e3d31dd705..00000000000 --- a/vendor/github.com/joyent/triton-go/authentication/signer.go +++ /dev/null @@ -1,10 +0,0 @@ -package authentication - -const authorizationHeaderFormat = `Signature keyId="%s",algorithm="%s",headers="%s",signature="%s"` - -type Signer interface { - DefaultAlgorithm() string - KeyFingerprint() string - Sign(dateHeader string) (string, error) - SignRaw(toSign string) (string, string, error) -} diff --git a/vendor/github.com/joyent/triton-go/authentication/ssh_agent_signer.go b/vendor/github.com/joyent/triton-go/authentication/ssh_agent_signer.go deleted file mode 100644 index 3da4a68e978..00000000000 --- a/vendor/github.com/joyent/triton-go/authentication/ssh_agent_signer.go +++ /dev/null @@ -1,170 +0,0 @@ -package authentication - -import ( - "crypto/md5" - "crypto/sha256" - "encoding/base64" - "errors" - "fmt" - "net" - "os" - "strings" - - "github.com/hashicorp/errwrap" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" -) - -var ( - ErrUnsetEnvVar = errors.New("SSH_AUTH_SOCK is not set") -) - -type SSHAgentSigner struct { - formattedKeyFingerprint string - keyFingerprint string - algorithm string - accountName string - keyIdentifier string - - agent agent.Agent - key ssh.PublicKey -} - -func NewSSHAgentSigner(keyFingerprint, accountName string) (*SSHAgentSigner, error) { - sshAgentAddress, agentOk := os.LookupEnv("SSH_AUTH_SOCK") - if !agentOk { - return nil, ErrUnsetEnvVar - } - - conn, err := net.Dial("unix", sshAgentAddress) - if err != nil { - return nil, errwrap.Wrapf("Error dialing SSH agent: {{err}}", err) - } - - ag := agent.NewClient(conn) - - signer := &SSHAgentSigner{ - keyFingerprint: keyFingerprint, - accountName: accountName, - agent: ag, - } - - matchingKey, err := signer.MatchKey() - if err != nil { - return nil, err - } - signer.key = matchingKey - signer.formattedKeyFingerprint = formatPublicKeyFingerprint(signer.key, true) - signer.keyIdentifier = fmt.Sprintf("/%s/keys/%s", signer.accountName, signer.formattedKeyFingerprint) - - _, algorithm, err := signer.SignRaw("HelloWorld") - if err != nil { - return nil, fmt.Errorf("Cannot sign using ssh agent: %s", err) - } - signer.algorithm = algorithm - - return signer, nil -} - -func (s *SSHAgentSigner) MatchKey() (ssh.PublicKey, error) { - keys, err := s.agent.List() - if err != nil { - return nil, errwrap.Wrapf("Error listing keys in SSH Agent: %s", err) - } - - keyFingerprintStripped := strings.TrimPrefix(s.keyFingerprint, "MD5:") - keyFingerprintStripped = strings.TrimPrefix(keyFingerprintStripped, "SHA256:") - keyFingerprintStripped = strings.Replace(keyFingerprintStripped, ":", "", -1) - - var matchingKey ssh.PublicKey - for _, key := range keys { - keyMD5 := md5.New() - keyMD5.Write(key.Marshal()) - finalizedMD5 := fmt.Sprintf("%x", keyMD5.Sum(nil)) - - keySHA256 := sha256.New() - keySHA256.Write(key.Marshal()) - finalizedSHA256 := base64.RawStdEncoding.EncodeToString(keySHA256.Sum(nil)) - - if keyFingerprintStripped == finalizedMD5 || keyFingerprintStripped == finalizedSHA256 { - matchingKey = key - } - } - - if matchingKey == nil { - return nil, fmt.Errorf("No key in the SSH Agent matches fingerprint: %s", s.keyFingerprint) - } - - return matchingKey, nil -} - -func (s *SSHAgentSigner) Sign(dateHeader string) (string, error) { - const headerName = "date" - - signature, err := s.agent.Sign(s.key, []byte(fmt.Sprintf("%s: %s", headerName, dateHeader))) - if err != nil { - return "", errwrap.Wrapf("Error signing date header: {{err}}", err) - } - - keyFormat, err := keyFormatToKeyType(signature.Format) - if err != nil { - return "", errwrap.Wrapf("Error reading signature: {{err}}", err) - } - - var authSignature httpAuthSignature - switch keyFormat { - case "rsa": - authSignature, err = newRSASignature(signature.Blob) - if err != nil { - return "", errwrap.Wrapf("Error reading signature: {{err}}", err) - } - case "ecdsa": - authSignature, err = newECDSASignature(signature.Blob) - if err != nil { - return "", errwrap.Wrapf("Error reading signature: {{err}}", err) - } - default: - return "", fmt.Errorf("Unsupported algorithm from SSH agent: %s", signature.Format) - } - - return fmt.Sprintf(authorizationHeaderFormat, s.keyIdentifier, - authSignature.SignatureType(), headerName, authSignature.String()), nil -} - -func (s *SSHAgentSigner) SignRaw(toSign string) (string, string, error) { - signature, err := s.agent.Sign(s.key, []byte(toSign)) - if err != nil { - return "", "", errwrap.Wrapf("Error signing string: {{err}}", err) - } - - keyFormat, err := keyFormatToKeyType(signature.Format) - if err != nil { - return "", "", errwrap.Wrapf("Error reading signature: {{err}}", err) - } - - var authSignature httpAuthSignature - switch keyFormat { - case "rsa": - authSignature, err = newRSASignature(signature.Blob) - if err != nil { - return "", "", errwrap.Wrapf("Error reading signature: {{err}}", err) - } - case "ecdsa": - authSignature, err = newECDSASignature(signature.Blob) - if err != nil { - return "", "", errwrap.Wrapf("Error reading signature: {{err}}", err) - } - default: - return "", "", fmt.Errorf("Unsupported algorithm from SSH agent: %s", signature.Format) - } - - return authSignature.String(), authSignature.SignatureType(), nil -} - -func (s *SSHAgentSigner) KeyFingerprint() string { - return s.formattedKeyFingerprint -} - -func (s *SSHAgentSigner) DefaultAlgorithm() string { - return s.algorithm -} diff --git a/vendor/github.com/joyent/triton-go/authentication/test_signer.go b/vendor/github.com/joyent/triton-go/authentication/test_signer.go deleted file mode 100644 index a9c2c82d569..00000000000 --- a/vendor/github.com/joyent/triton-go/authentication/test_signer.go +++ /dev/null @@ -1,27 +0,0 @@ -package authentication - -// TestSigner represents an authentication key signer which we can use for -// testing purposes only. This will largely be a stub to send through client -// unit tests. -type TestSigner struct{} - -// NewTestSigner constructs a new instance of test signer -func NewTestSigner() (Signer, error) { - return &TestSigner{}, nil -} - -func (s *TestSigner) DefaultAlgorithm() string { - return "" -} - -func (s *TestSigner) KeyFingerprint() string { - return "" -} - -func (s *TestSigner) Sign(dateHeader string) (string, error) { - return "", nil -} - -func (s *TestSigner) SignRaw(toSign string) (string, string, error) { - return "", "", nil -} diff --git a/vendor/github.com/joyent/triton-go/authentication/util.go b/vendor/github.com/joyent/triton-go/authentication/util.go deleted file mode 100644 index 7c298b68c15..00000000000 --- a/vendor/github.com/joyent/triton-go/authentication/util.go +++ /dev/null @@ -1,29 +0,0 @@ -package authentication - -import ( - "crypto/md5" - "fmt" - "strings" - - "golang.org/x/crypto/ssh" -) - -// formatPublicKeyFingerprint produces the MD5 fingerprint of the given SSH -// public key. If display is true, the fingerprint is formatted with colons -// between each byte, as per the output of OpenSSL. -func formatPublicKeyFingerprint(key ssh.PublicKey, display bool) string { - publicKeyFingerprint := md5.New() - publicKeyFingerprint.Write(key.Marshal()) - publicKeyFingerprintString := fmt.Sprintf("%x", publicKeyFingerprint.Sum(nil)) - - if !display { - return publicKeyFingerprintString - } - - formatted := "" - for i := 0; i < len(publicKeyFingerprintString); i = i + 2 { - formatted = fmt.Sprintf("%s%s:", formatted, publicKeyFingerprintString[i:i+2]) - } - - return strings.TrimSuffix(formatted, ":") -} diff --git a/vendor/github.com/joyent/triton-go/client/client.go b/vendor/github.com/joyent/triton-go/client/client.go deleted file mode 100644 index df697805a4e..00000000000 --- a/vendor/github.com/joyent/triton-go/client/client.go +++ /dev/null @@ -1,413 +0,0 @@ -package client - -import ( - "bytes" - "context" - "crypto/tls" - "encoding/json" - "errors" - "io" - "net" - "net/http" - "net/url" - "os" - "time" - - "github.com/hashicorp/errwrap" - "github.com/joyent/triton-go/authentication" -) - -const nilContext = "nil context" - -var ( - ErrDefaultAuth = errors.New("default SSH agent authentication requires SDC_KEY_ID and SSH_AUTH_SOCK") - ErrAccountName = errors.New("missing account name for Triton/Manta") - ErrMissingURL = errors.New("missing Triton and/or Manta URL") - - BadTritonURL = "invalid format of triton URL" - BadMantaURL = "invalid format of manta URL" -) - -// Client represents a connection to the Triton Compute or Object Storage APIs. -type Client struct { - HTTPClient *http.Client - Authorizers []authentication.Signer - TritonURL url.URL - MantaURL url.URL - AccountName string -} - -// New is used to construct a Client in order to make API -// requests to the Triton API. -// -// At least one signer must be provided - example signers include -// authentication.PrivateKeySigner and authentication.SSHAgentSigner. -func New(tritonURL string, mantaURL string, accountName string, signers ...authentication.Signer) (*Client, error) { - if accountName == "" { - return nil, ErrAccountName - } - - if tritonURL == "" && mantaURL == "" { - return nil, ErrMissingURL - } - - cloudURL, err := url.Parse(tritonURL) - if err != nil { - return nil, errwrap.Wrapf(BadTritonURL+": {{err}}", err) - } - - storageURL, err := url.Parse(mantaURL) - if err != nil { - return nil, errwrap.Wrapf(BadMantaURL+": {{err}}", err) - } - - authorizers := make([]authentication.Signer, 0) - for _, key := range signers { - if key != nil { - authorizers = append(authorizers, key) - } - } - - newClient := &Client{ - HTTPClient: &http.Client{ - Transport: httpTransport(false), - CheckRedirect: doNotFollowRedirects, - }, - Authorizers: authorizers, - TritonURL: *cloudURL, - MantaURL: *storageURL, - AccountName: accountName, - } - - // Default to constructing an SSHAgentSigner if there are no other signers - // passed into NewClient and there's an SDC_KEY_ID and SSH_AUTH_SOCK - // available in the user's environ(7). - if len(newClient.Authorizers) == 0 { - if err := newClient.DefaultAuth(); err != nil { - return nil, err - } - } - - return newClient, nil -} - -// initDefaultAuth provides a default key signer for a client. This should only -// be used internally if the client has no other key signer for authenticating -// with Triton. We first look for both `SDC_KEY_ID` and `SSH_AUTH_SOCK` in the -// user's environ(7). If so we default to the SSH agent key signer. -func (c *Client) DefaultAuth() error { - if keyID, keyOk := os.LookupEnv("SDC_KEY_ID"); keyOk { - defaultSigner, err := authentication.NewSSHAgentSigner(keyID, c.AccountName) - if err != nil { - return errwrap.Wrapf("problem initializing NewSSHAgentSigner: {{err}}", err) - } - c.Authorizers = append(c.Authorizers, defaultSigner) - } else { - return ErrDefaultAuth - } - return nil -} - -// InsecureSkipTLSVerify turns off TLS verification for the client connection. This -// allows connection to an endpoint with a certificate which was signed by a non- -// trusted CA, such as self-signed certificates. This can be useful when connecting -// to temporary Triton installations such as Triton Cloud-On-A-Laptop. -func (c *Client) InsecureSkipTLSVerify() { - if c.HTTPClient == nil { - return - } - - c.HTTPClient.Transport = httpTransport(true) -} - -func httpTransport(insecureSkipTLSVerify bool) *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - DisableKeepAlives: true, - MaxIdleConnsPerHost: -1, - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: insecureSkipTLSVerify, - }, - } -} - -func doNotFollowRedirects(*http.Request, []*http.Request) error { - return http.ErrUseLastResponse -} - -// TODO(justinwr): Deprecated? -// func (c *Client) FormatURL(path string) string { -// return fmt.Sprintf("%s%s", c.Endpoint, path) -// } - -func (c *Client) DecodeError(statusCode int, body io.Reader) error { - err := &TritonError{ - StatusCode: statusCode, - } - - errorDecoder := json.NewDecoder(body) - if err := errorDecoder.Decode(err); err != nil { - return errwrap.Wrapf("Error decoding error response: {{err}}", err) - } - - return err -} - -// ----------------------------------------------------------------------------- - -type RequestInput struct { - Method string - Path string - Query *url.Values - Headers *http.Header - Body interface{} -} - -func (c *Client) ExecuteRequestURIParams(ctx context.Context, inputs RequestInput) (io.ReadCloser, error) { - method := inputs.Method - path := inputs.Path - body := inputs.Body - query := inputs.Query - - var requestBody io.ReadSeeker - if body != nil { - marshaled, err := json.MarshalIndent(body, "", " ") - if err != nil { - return nil, err - } - requestBody = bytes.NewReader(marshaled) - } - - endpoint := c.TritonURL - endpoint.Path = path - if query != nil { - endpoint.RawQuery = query.Encode() - } - - req, err := http.NewRequest(method, endpoint.String(), requestBody) - if err != nil { - return nil, errwrap.Wrapf("Error constructing HTTP request: {{err}}", err) - } - - dateHeader := time.Now().UTC().Format(time.RFC1123) - req.Header.Set("date", dateHeader) - - // NewClient ensures there's always an authorizer (unless this is called - // outside that constructor). - authHeader, err := c.Authorizers[0].Sign(dateHeader) - if err != nil { - return nil, errwrap.Wrapf("Error signing HTTP request: {{err}}", err) - } - req.Header.Set("Authorization", authHeader) - req.Header.Set("Accept", "application/json") - req.Header.Set("Accept-Version", "8") - req.Header.Set("User-Agent", "triton-go Client API") - - if body != nil { - req.Header.Set("Content-Type", "application/json") - } - - resp, err := c.HTTPClient.Do(req.WithContext(ctx)) - if err != nil { - return nil, errwrap.Wrapf("Error executing HTTP request: {{err}}", err) - } - - if resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices { - return resp.Body, nil - } - - return nil, c.DecodeError(resp.StatusCode, resp.Body) -} - -func (c *Client) ExecuteRequest(ctx context.Context, inputs RequestInput) (io.ReadCloser, error) { - return c.ExecuteRequestURIParams(ctx, inputs) -} - -func (c *Client) ExecuteRequestRaw(ctx context.Context, inputs RequestInput) (*http.Response, error) { - method := inputs.Method - path := inputs.Path - body := inputs.Body - - var requestBody io.ReadSeeker - if body != nil { - marshaled, err := json.MarshalIndent(body, "", " ") - if err != nil { - return nil, err - } - requestBody = bytes.NewReader(marshaled) - } - - endpoint := c.TritonURL - endpoint.Path = path - - req, err := http.NewRequest(method, endpoint.String(), requestBody) - if err != nil { - return nil, errwrap.Wrapf("Error constructing HTTP request: {{err}}", err) - } - - dateHeader := time.Now().UTC().Format(time.RFC1123) - req.Header.Set("date", dateHeader) - - // NewClient ensures there's always an authorizer (unless this is called - // outside that constructor). - authHeader, err := c.Authorizers[0].Sign(dateHeader) - if err != nil { - return nil, errwrap.Wrapf("Error signing HTTP request: {{err}}", err) - } - req.Header.Set("Authorization", authHeader) - req.Header.Set("Accept", "application/json") - req.Header.Set("Accept-Version", "8") - req.Header.Set("User-Agent", "triton-go c API") - - if body != nil { - req.Header.Set("Content-Type", "application/json") - } - - resp, err := c.HTTPClient.Do(req.WithContext(ctx)) - if err != nil { - return nil, errwrap.Wrapf("Error executing HTTP request: {{err}}", err) - } - - return resp, nil -} - -func (c *Client) ExecuteRequestStorage(ctx context.Context, inputs RequestInput) (io.ReadCloser, http.Header, error) { - method := inputs.Method - path := inputs.Path - query := inputs.Query - headers := inputs.Headers - body := inputs.Body - - endpoint := c.MantaURL - endpoint.Path = path - - var requestBody io.ReadSeeker - if body != nil { - marshaled, err := json.MarshalIndent(body, "", " ") - if err != nil { - return nil, nil, err - } - requestBody = bytes.NewReader(marshaled) - } - - req, err := http.NewRequest(method, endpoint.String(), requestBody) - if err != nil { - return nil, nil, errwrap.Wrapf("Error constructing HTTP request: {{err}}", err) - } - - if body != nil && (headers == nil || headers.Get("Content-Type") == "") { - req.Header.Set("Content-Type", "application/json") - } - if headers != nil { - for key, values := range *headers { - for _, value := range values { - req.Header.Set(key, value) - } - } - } - - dateHeader := time.Now().UTC().Format(time.RFC1123) - req.Header.Set("date", dateHeader) - - authHeader, err := c.Authorizers[0].Sign(dateHeader) - if err != nil { - return nil, nil, errwrap.Wrapf("Error signing HTTP request: {{err}}", err) - } - req.Header.Set("Authorization", authHeader) - req.Header.Set("Accept", "*/*") - req.Header.Set("User-Agent", "manta-go client API") - - if query != nil { - req.URL.RawQuery = query.Encode() - } - - resp, err := c.HTTPClient.Do(req.WithContext(ctx)) - if err != nil { - return nil, nil, errwrap.Wrapf("Error executing HTTP request: {{err}}", err) - } - - if resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices { - return resp.Body, resp.Header, nil - } - - mantaError := &MantaError{ - StatusCode: resp.StatusCode, - } - - errorDecoder := json.NewDecoder(resp.Body) - if err := errorDecoder.Decode(mantaError); err != nil { - return nil, nil, errwrap.Wrapf("Error decoding error response: {{err}}", err) - } - return nil, nil, mantaError -} - -type RequestNoEncodeInput struct { - Method string - Path string - Query *url.Values - Headers *http.Header - Body io.ReadSeeker -} - -func (c *Client) ExecuteRequestNoEncode(ctx context.Context, inputs RequestNoEncodeInput) (io.ReadCloser, http.Header, error) { - method := inputs.Method - path := inputs.Path - query := inputs.Query - headers := inputs.Headers - body := inputs.Body - - endpoint := c.MantaURL - endpoint.Path = path - - req, err := http.NewRequest(method, endpoint.String(), body) - if err != nil { - return nil, nil, errwrap.Wrapf("Error constructing HTTP request: {{err}}", err) - } - - if headers != nil { - for key, values := range *headers { - for _, value := range values { - req.Header.Set(key, value) - } - } - } - - dateHeader := time.Now().UTC().Format(time.RFC1123) - req.Header.Set("date", dateHeader) - - authHeader, err := c.Authorizers[0].Sign(dateHeader) - if err != nil { - return nil, nil, errwrap.Wrapf("Error signing HTTP request: {{err}}", err) - } - req.Header.Set("Authorization", authHeader) - req.Header.Set("Accept", "*/*") - req.Header.Set("User-Agent", "manta-go client API") - - if query != nil { - req.URL.RawQuery = query.Encode() - } - - resp, err := c.HTTPClient.Do(req.WithContext(ctx)) - if err != nil { - return nil, nil, errwrap.Wrapf("Error executing HTTP request: {{err}}", err) - } - - if resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices { - return resp.Body, resp.Header, nil - } - - mantaError := &MantaError{ - StatusCode: resp.StatusCode, - } - - errorDecoder := json.NewDecoder(resp.Body) - if err := errorDecoder.Decode(mantaError); err != nil { - return nil, nil, errwrap.Wrapf("Error decoding error response: {{err}}", err) - } - return nil, nil, mantaError -} diff --git a/vendor/github.com/joyent/triton-go/client/errors.go b/vendor/github.com/joyent/triton-go/client/errors.go deleted file mode 100644 index 1fc64a0957f..00000000000 --- a/vendor/github.com/joyent/triton-go/client/errors.go +++ /dev/null @@ -1,190 +0,0 @@ -package client - -import ( - "fmt" - - "github.com/hashicorp/errwrap" -) - -// ClientError represents an error code and message along with the status code -// of the HTTP request which resulted in the error message. -type ClientError struct { - StatusCode int - Code string - Message string -} - -// Error implements interface Error on the TritonError type. -func (e ClientError) Error() string { - return fmt.Sprintf("%s: %s", e.Code, e.Message) -} - -// MantaError represents an error code and message along with -// the status code of the HTTP request which resulted in the error -// message. Error codes used by the Manta API are listed at -// https://apidocs.joyent.com/manta/api.html#errors -type MantaError struct { - StatusCode int - Code string `json:"code"` - Message string `json:"message"` -} - -// Error implements interface Error on the MantaError type. -func (e MantaError) Error() string { - return fmt.Sprintf("%s: %s", e.Code, e.Message) -} - -// TritonError represents an error code and message along with -// the status code of the HTTP request which resulted in the error -// message. Error codes used by the Triton API are listed at -// https://apidocs.joyent.com/cloudapi/#cloudapi-http-responses -type TritonError struct { - StatusCode int - Code string `json:"code"` - Message string `json:"message"` -} - -// Error implements interface Error on the TritonError type. -func (e TritonError) Error() string { - return fmt.Sprintf("%s: %s", e.Code, e.Message) -} - -func IsAuthSchemeError(err error) bool { - return isSpecificError(err, "AuthScheme") -} -func IsAuthorizationError(err error) bool { - return isSpecificError(err, "Authorization") -} -func IsBadRequestError(err error) bool { - return isSpecificError(err, "BadRequest") -} -func IsChecksumError(err error) bool { - return isSpecificError(err, "Checksum") -} -func IsConcurrentRequestError(err error) bool { - return isSpecificError(err, "ConcurrentRequest") -} -func IsContentLengthError(err error) bool { - return isSpecificError(err, "ContentLength") -} -func IsContentMD5MismatchError(err error) bool { - return isSpecificError(err, "ContentMD5Mismatch") -} -func IsEntityExistsError(err error) bool { - return isSpecificError(err, "EntityExists") -} -func IsInvalidArgumentError(err error) bool { - return isSpecificError(err, "InvalidArgument") -} -func IsInvalidAuthTokenError(err error) bool { - return isSpecificError(err, "InvalidAuthToken") -} -func IsInvalidCredentialsError(err error) bool { - return isSpecificError(err, "InvalidCredentials") -} -func IsInvalidDurabilityLevelError(err error) bool { - return isSpecificError(err, "InvalidDurabilityLevel") -} -func IsInvalidKeyIdError(err error) bool { - return isSpecificError(err, "InvalidKeyId") -} -func IsInvalidJobError(err error) bool { - return isSpecificError(err, "InvalidJob") -} -func IsInvalidLinkError(err error) bool { - return isSpecificError(err, "InvalidLink") -} -func IsInvalidLimitError(err error) bool { - return isSpecificError(err, "InvalidLimit") -} -func IsInvalidSignatureError(err error) bool { - return isSpecificError(err, "InvalidSignature") -} -func IsInvalidUpdateError(err error) bool { - return isSpecificError(err, "InvalidUpdate") -} -func IsDirectoryDoesNotExistError(err error) bool { - return isSpecificError(err, "DirectoryDoesNotExist") -} -func IsDirectoryExistsError(err error) bool { - return isSpecificError(err, "DirectoryExists") -} -func IsDirectoryNotEmptyError(err error) bool { - return isSpecificError(err, "DirectoryNotEmpty") -} -func IsDirectoryOperationError(err error) bool { - return isSpecificError(err, "DirectoryOperation") -} -func IsInternalError(err error) bool { - return isSpecificError(err, "Internal") -} -func IsJobNotFoundError(err error) bool { - return isSpecificError(err, "JobNotFound") -} -func IsJobStateError(err error) bool { - return isSpecificError(err, "JobState") -} -func IsKeyDoesNotExistError(err error) bool { - return isSpecificError(err, "KeyDoesNotExist") -} -func IsNotAcceptableError(err error) bool { - return isSpecificError(err, "NotAcceptable") -} -func IsNotEnoughSpaceError(err error) bool { - return isSpecificError(err, "NotEnoughSpace") -} -func IsLinkNotFoundError(err error) bool { - return isSpecificError(err, "LinkNotFound") -} -func IsLinkNotObjectError(err error) bool { - return isSpecificError(err, "LinkNotObject") -} -func IsLinkRequiredError(err error) bool { - return isSpecificError(err, "LinkRequired") -} -func IsParentNotDirectoryError(err error) bool { - return isSpecificError(err, "ParentNotDirectory") -} -func IsPreconditionFailedError(err error) bool { - return isSpecificError(err, "PreconditionFailed") -} -func IsPreSignedRequestError(err error) bool { - return isSpecificError(err, "PreSignedRequest") -} -func IsRequestEntityTooLargeError(err error) bool { - return isSpecificError(err, "RequestEntityTooLarge") -} -func IsResourceNotFoundError(err error) bool { - return isSpecificError(err, "ResourceNotFound") -} -func IsRootDirectoryError(err error) bool { - return isSpecificError(err, "RootDirectory") -} -func IsServiceUnavailableError(err error) bool { - return isSpecificError(err, "ServiceUnavailable") -} -func IsSSLRequiredError(err error) bool { - return isSpecificError(err, "SSLRequired") -} -func IsUploadTimeoutError(err error) bool { - return isSpecificError(err, "UploadTimeout") -} -func IsUserDoesNotExistError(err error) bool { - return isSpecificError(err, "UserDoesNotExist") -} - -// isSpecificError checks whether the error represented by err wraps -// an underlying MantaError with code errorCode. -func isSpecificError(err error, errorCode string) bool { - tritonErrorInterface := errwrap.GetType(err.(error), &MantaError{}) - if tritonErrorInterface == nil { - return false - } - - tritonErr := tritonErrorInterface.(*MantaError) - if tritonErr.Code == errorCode { - return true - } - - return false -} diff --git a/vendor/github.com/joyent/triton-go/storage/client.go b/vendor/github.com/joyent/triton-go/storage/client.go deleted file mode 100644 index 36e87e65370..00000000000 --- a/vendor/github.com/joyent/triton-go/storage/client.go +++ /dev/null @@ -1,51 +0,0 @@ -package storage - -import ( - triton "github.com/joyent/triton-go" - "github.com/joyent/triton-go/client" -) - -type StorageClient struct { - Client *client.Client -} - -func newStorageClient(client *client.Client) *StorageClient { - return &StorageClient{ - Client: client, - } -} - -// NewClient returns a new client for working with Storage endpoints and -// resources within CloudAPI -func NewClient(config *triton.ClientConfig) (*StorageClient, error) { - // TODO: Utilize config interface within the function itself - client, err := client.New(config.TritonURL, config.MantaURL, config.AccountName, config.Signers...) - if err != nil { - return nil, err - } - return newStorageClient(client), nil -} - -// Dir returns a DirectoryClient used for accessing functions pertaining to -// Directories functionality of the Manta API. -func (c *StorageClient) Dir() *DirectoryClient { - return &DirectoryClient{c.Client} -} - -// Jobs returns a JobClient used for accessing functions pertaining to Jobs -// functionality of the Triton Object Storage API. -func (c *StorageClient) Jobs() *JobClient { - return &JobClient{c.Client} -} - -// Objects returns an ObjectsClient used for accessing functions pertaining to -// Objects functionality of the Triton Object Storage API. -func (c *StorageClient) Objects() *ObjectsClient { - return &ObjectsClient{c.Client} -} - -// SnapLinks returns an SnapLinksClient used for accessing functions pertaining to -// SnapLinks functionality of the Triton Object Storage API. -func (c *StorageClient) SnapLinks() *SnapLinksClient { - return &SnapLinksClient{c.Client} -} diff --git a/vendor/github.com/joyent/triton-go/storage/directory.go b/vendor/github.com/joyent/triton-go/storage/directory.go deleted file mode 100644 index dd93d5f8e10..00000000000 --- a/vendor/github.com/joyent/triton-go/storage/directory.go +++ /dev/null @@ -1,144 +0,0 @@ -package storage - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "time" - - "github.com/hashicorp/errwrap" - "github.com/joyent/triton-go/client" -) - -type DirectoryClient struct { - client *client.Client -} - -// DirectoryEntry represents an object or directory in Manta. -type DirectoryEntry struct { - ETag string `json:"etag"` - ModifiedTime time.Time `json:"mtime"` - Name string `json:"name"` - Size uint64 `json:"size"` - Type string `json:"type"` -} - -// ListDirectoryInput represents parameters to a ListDirectory operation. -type ListDirectoryInput struct { - DirectoryName string - Limit uint64 - Marker string -} - -// ListDirectoryOutput contains the outputs of a ListDirectory operation. -type ListDirectoryOutput struct { - Entries []*DirectoryEntry - ResultSetSize uint64 -} - -// List lists the contents of a directory on the Triton Object Store service. -func (s *DirectoryClient) List(ctx context.Context, input *ListDirectoryInput) (*ListDirectoryOutput, error) { - path := fmt.Sprintf("/%s%s", s.client.AccountName, input.DirectoryName) - query := &url.Values{} - if input.Limit != 0 { - query.Set("limit", strconv.FormatUint(input.Limit, 10)) - } - if input.Marker != "" { - query.Set("manta_path", input.Marker) - } - - reqInput := client.RequestInput{ - Method: http.MethodGet, - Path: path, - Query: query, - } - respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return nil, errwrap.Wrapf("Error executing ListDirectory request: {{err}}", err) - } - - var results []*DirectoryEntry - for { - current := &DirectoryEntry{} - decoder := json.NewDecoder(respBody) - if err = decoder.Decode(¤t); err != nil { - if err == io.EOF { - break - } - return nil, errwrap.Wrapf("Error decoding ListDirectory response: {{err}}", err) - } - results = append(results, current) - } - - output := &ListDirectoryOutput{ - Entries: results, - } - - resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64) - if err == nil { - output.ResultSetSize = resultSetSize - } - - return output, nil -} - -// PutDirectoryInput represents parameters to a PutDirectory operation. -type PutDirectoryInput struct { - DirectoryName string -} - -// Put puts a directoy into the Triton Object Storage service is an idempotent -// create-or-update operation. Your private namespace starts at /:login, and you -// can create any nested set of directories or objects within it. -func (s *DirectoryClient) Put(ctx context.Context, input *PutDirectoryInput) error { - path := fmt.Sprintf("/%s%s", s.client.AccountName, input.DirectoryName) - headers := &http.Header{} - headers.Set("Content-Type", "application/json; type=directory") - - reqInput := client.RequestInput{ - Method: http.MethodPut, - Path: path, - Headers: headers, - } - respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return errwrap.Wrapf("Error executing PutDirectory request: {{err}}", err) - } - - return nil -} - -// DeleteDirectoryInput represents parameters to a DeleteDirectory operation. -type DeleteDirectoryInput struct { - DirectoryName string -} - -// Delete deletes a directory on the Triton Object Storage. The directory must -// be empty. -func (s *DirectoryClient) Delete(ctx context.Context, input *DeleteDirectoryInput) error { - path := fmt.Sprintf("/%s%s", s.client.AccountName, input.DirectoryName) - - reqInput := client.RequestInput{ - Method: http.MethodDelete, - Path: path, - } - respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return errwrap.Wrapf("Error executing DeleteDirectory request: {{err}}", err) - } - - return nil -} diff --git a/vendor/github.com/joyent/triton-go/storage/job.go b/vendor/github.com/joyent/triton-go/storage/job.go deleted file mode 100644 index c7be80ca0c7..00000000000 --- a/vendor/github.com/joyent/triton-go/storage/job.go +++ /dev/null @@ -1,440 +0,0 @@ -package storage - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/hashicorp/errwrap" - "github.com/joyent/triton-go/client" -) - -type JobClient struct { - client *client.Client -} - -const ( - JobStateDone = "done" - JobStateRunning = "running" -) - -// JobPhase represents the specification for a map or reduce phase of a Manta -// job. -type JobPhase struct { - // Type is the type of phase. Must be `map` or `reduce`. - Type string `json:"type,omitempty"` - - // Assets is an array of objects to be placed in your compute zones. - Assets []string `json:"assets,omitempty"` - - // Exec is the shell statement to execute. It may be any valid shell - // command, including pipelines and other shell syntax. You can also - // execute programs stored in the service by including them in "assets" - // and referencing them as /assets/$manta_path. - Exec string `json:"exec"` - - // Init is a shell statement to execute in each compute zone before - // any tasks are executed. The same constraints apply as to Exec. - Init string `json:"init"` - - // ReducerCount is an optional number of reducers for this phase. The - // default value if not specified is 1. The maximum value is 1024. - ReducerCount uint `json:"count,omitempty"` - - // Memory is the amount of DRAM in MB to be allocated to the compute - // zone. Valid values are 256, 512, 1024, 2048, 4096 or 8192. - Memory uint64 `json:"memory,omitempty"` - - // Disk is the amount of disk space in GB to be allocated to the compute - // zone. Valid values are 2, 4, 8, 16, 32, 64, 128, 256, 512 or 1024. - Disk uint64 `json:"disk,omitempty"` -} - -// JobSummary represents the summary of a compute job in Manta. -type JobSummary struct { - ModifiedTime time.Time `json:"mtime"` - ID string `json:"name"` -} - -// Job represents a compute job in Manta. -type Job struct { - ID string `json:"id"` - Name string `json:"name"` - Phases []*JobPhase `json:"phases"` - State string `json:"state"` - Cancelled bool `json:"cancelled"` - InputDone bool `json:"inputDone"` - CreatedTime time.Time `json:"timeCreated"` - DoneTime time.Time `json:"timeDone"` - Transient bool `json:"transient"` - Stats *JobStats `json:"stats"` -} - -// JobStats represents statistics for a compute job in Manta. -type JobStats struct { - Errors uint64 `json:"errors"` - Outputs uint64 `json:"outputs"` - Retries uint64 `json:"retries"` - Tasks uint64 `json:"tasks"` - TasksDone uint64 `json:"tasksDone"` -} - -// CreateJobInput represents parameters to a CreateJob operation. -type CreateJobInput struct { - Name string `json:"name"` - Phases []*JobPhase `json:"phases"` -} - -// CreateJobOutput contains the outputs of a CreateJob operation. -type CreateJobOutput struct { - JobID string -} - -// CreateJob submits a new job to be executed. This call is not -// idempotent, so calling it twice will create two jobs. -func (s *JobClient) Create(ctx context.Context, input *CreateJobInput) (*CreateJobOutput, error) { - path := fmt.Sprintf("/%s/jobs", s.client.AccountName) - - reqInput := client.RequestInput{ - Method: http.MethodPost, - Path: path, - Body: input, - } - respBody, respHeaders, err := s.client.ExecuteRequestStorage(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return nil, errwrap.Wrapf("Error executing CreateJob request: {{err}}", err) - } - - jobURI := respHeaders.Get("Location") - parts := strings.Split(jobURI, "/") - jobID := parts[len(parts)-1] - - response := &CreateJobOutput{ - JobID: jobID, - } - - return response, nil -} - -// AddJobInputs represents parameters to a AddJobInputs operation. -type AddJobInputsInput struct { - JobID string - ObjectPaths []string -} - -// AddJobInputs submits inputs to an already created job. -func (s *JobClient) AddInputs(ctx context.Context, input *AddJobInputsInput) error { - path := fmt.Sprintf("/%s/jobs/%s/live/in", s.client.AccountName, input.JobID) - headers := &http.Header{} - headers.Set("Content-Type", "text/plain") - - reader := strings.NewReader(strings.Join(input.ObjectPaths, "\n")) - - reqInput := client.RequestNoEncodeInput{ - Method: http.MethodPost, - Path: path, - Headers: headers, - Body: reader, - } - respBody, _, err := s.client.ExecuteRequestNoEncode(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return errwrap.Wrapf("Error executing AddJobInputs request: {{err}}", err) - } - - return nil -} - -// EndJobInputInput represents parameters to a EndJobInput operation. -type EndJobInputInput struct { - JobID string -} - -// EndJobInput submits inputs to an already created job. -func (s *JobClient) EndInput(ctx context.Context, input *EndJobInputInput) error { - path := fmt.Sprintf("/%s/jobs/%s/live/in/end", s.client.AccountName, input.JobID) - - reqInput := client.RequestNoEncodeInput{ - Method: http.MethodPost, - Path: path, - } - respBody, _, err := s.client.ExecuteRequestNoEncode(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return errwrap.Wrapf("Error executing EndJobInput request: {{err}}", err) - } - - return nil -} - -// CancelJobInput represents parameters to a CancelJob operation. -type CancelJobInput struct { - JobID string -} - -// CancelJob cancels a job from doing any further work. Cancellation -// is asynchronous and "best effort"; there is no guarantee the job -// will actually stop. For example, short jobs where input is already -// closed will likely still run to completion. -// -// This is however useful when: -// - input is still open -// - you have a long-running job -func (s *JobClient) Cancel(ctx context.Context, input *CancelJobInput) error { - path := fmt.Sprintf("/%s/jobs/%s/live/cancel", s.client.AccountName, input.JobID) - - reqInput := client.RequestNoEncodeInput{ - Method: http.MethodPost, - Path: path, - } - respBody, _, err := s.client.ExecuteRequestNoEncode(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return errwrap.Wrapf("Error executing CancelJob request: {{err}}", err) - } - - return nil -} - -// ListJobsInput represents parameters to a ListJobs operation. -type ListJobsInput struct { - RunningOnly bool - Limit uint64 - Marker string -} - -// ListJobsOutput contains the outputs of a ListJobs operation. -type ListJobsOutput struct { - Jobs []*JobSummary - ResultSetSize uint64 -} - -// ListJobs returns the list of jobs you currently have. -func (s *JobClient) List(ctx context.Context, input *ListJobsInput) (*ListJobsOutput, error) { - path := fmt.Sprintf("/%s/jobs", s.client.AccountName) - query := &url.Values{} - if input.RunningOnly { - query.Set("state", "running") - } - if input.Limit != 0 { - query.Set("limit", strconv.FormatUint(input.Limit, 10)) - } - if input.Marker != "" { - query.Set("manta_path", input.Marker) - } - - reqInput := client.RequestInput{ - Method: http.MethodGet, - Path: path, - Query: query, - } - respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return nil, errwrap.Wrapf("Error executing ListJobs request: {{err}}", err) - } - - var results []*JobSummary - for { - current := &JobSummary{} - decoder := json.NewDecoder(respBody) - if err = decoder.Decode(¤t); err != nil { - if err == io.EOF { - break - } - return nil, errwrap.Wrapf("Error decoding ListJobs response: {{err}}", err) - } - results = append(results, current) - } - - output := &ListJobsOutput{ - Jobs: results, - } - - resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64) - if err == nil { - output.ResultSetSize = resultSetSize - } - - return output, nil -} - -// GetJobInput represents parameters to a GetJob operation. -type GetJobInput struct { - JobID string -} - -// GetJobOutput contains the outputs of a GetJob operation. -type GetJobOutput struct { - Job *Job -} - -// GetJob returns the list of jobs you currently have. -func (s *JobClient) Get(ctx context.Context, input *GetJobInput) (*GetJobOutput, error) { - path := fmt.Sprintf("/%s/jobs/%s/live/status", s.client.AccountName, input.JobID) - - reqInput := client.RequestInput{ - Method: http.MethodGet, - Path: path, - } - respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return nil, errwrap.Wrapf("Error executing GetJob request: {{err}}", err) - } - - job := &Job{} - decoder := json.NewDecoder(respBody) - if err = decoder.Decode(&job); err != nil { - return nil, errwrap.Wrapf("Error decoding GetJob response: {{err}}", err) - } - - return &GetJobOutput{ - Job: job, - }, nil -} - -// GetJobOutputInput represents parameters to a GetJobOutput operation. -type GetJobOutputInput struct { - JobID string -} - -// GetJobOutputOutput contains the outputs for a GetJobOutput operation. It is your -// responsibility to ensure that the io.ReadCloser Items is closed. -type GetJobOutputOutput struct { - ResultSetSize uint64 - Items io.ReadCloser -} - -// GetJobOutput returns the current "live" set of outputs from a job. Think of -// this like `tail -f`. If error is nil (i.e. the operation is successful), it is -// your responsibility to close the io.ReadCloser named Items in the output. -func (s *JobClient) GetOutput(ctx context.Context, input *GetJobOutputInput) (*GetJobOutputOutput, error) { - path := fmt.Sprintf("/%s/jobs/%s/live/out", s.client.AccountName, input.JobID) - - reqInput := client.RequestInput{ - Method: http.MethodGet, - Path: path, - } - respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return nil, errwrap.Wrapf("Error executing GetJobOutput request: {{err}}", err) - } - - output := &GetJobOutputOutput{ - Items: respBody, - } - - resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64) - if err == nil { - output.ResultSetSize = resultSetSize - } - - return output, nil -} - -// GetJobInputInput represents parameters to a GetJobOutput operation. -type GetJobInputInput struct { - JobID string -} - -// GetJobInputOutput contains the outputs for a GetJobOutput operation. It is your -// responsibility to ensure that the io.ReadCloser Items is closed. -type GetJobInputOutput struct { - ResultSetSize uint64 - Items io.ReadCloser -} - -// GetJobInput returns the current "live" set of inputs from a job. Think of -// this like `tail -f`. If error is nil (i.e. the operation is successful), it is -// your responsibility to close the io.ReadCloser named Items in the output. -func (s *JobClient) GetInput(ctx context.Context, input *GetJobInputInput) (*GetJobInputOutput, error) { - path := fmt.Sprintf("/%s/jobs/%s/live/in", s.client.AccountName, input.JobID) - - reqInput := client.RequestInput{ - Method: http.MethodGet, - Path: path, - } - respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return nil, errwrap.Wrapf("Error executing GetJobInput request: {{err}}", err) - } - - output := &GetJobInputOutput{ - Items: respBody, - } - - resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64) - if err == nil { - output.ResultSetSize = resultSetSize - } - - return output, nil -} - -// GetJobFailuresInput represents parameters to a GetJobFailures operation. -type GetJobFailuresInput struct { - JobID string -} - -// GetJobFailuresOutput contains the outputs for a GetJobFailures operation. It is your -// responsibility to ensure that the io.ReadCloser Items is closed. -type GetJobFailuresOutput struct { - ResultSetSize uint64 - Items io.ReadCloser -} - -// GetJobFailures returns the current "live" set of outputs from a job. Think of -// this like `tail -f`. If error is nil (i.e. the operation is successful), it is -// your responsibility to close the io.ReadCloser named Items in the output. -func (s *JobClient) GetFailures(ctx context.Context, input *GetJobFailuresInput) (*GetJobFailuresOutput, error) { - path := fmt.Sprintf("/%s/jobs/%s/live/fail", s.client.AccountName, input.JobID) - - reqInput := client.RequestInput{ - Method: http.MethodGet, - Path: path, - } - respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return nil, errwrap.Wrapf("Error executing GetJobFailures request: {{err}}", err) - } - - output := &GetJobFailuresOutput{ - Items: respBody, - } - - resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64) - if err == nil { - output.ResultSetSize = resultSetSize - } - - return output, nil -} diff --git a/vendor/github.com/joyent/triton-go/storage/objects.go b/vendor/github.com/joyent/triton-go/storage/objects.go deleted file mode 100644 index afdd5e97fd9..00000000000 --- a/vendor/github.com/joyent/triton-go/storage/objects.go +++ /dev/null @@ -1,208 +0,0 @@ -package storage - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/hashicorp/errwrap" - "github.com/joyent/triton-go/client" -) - -type ObjectsClient struct { - client *client.Client -} - -// GetObjectInput represents parameters to a GetObject operation. -type GetObjectInput struct { - ObjectPath string -} - -// GetObjectOutput contains the outputs for a GetObject operation. It is your -// responsibility to ensure that the io.ReadCloser ObjectReader is closed. -type GetObjectOutput struct { - ContentLength uint64 - ContentType string - LastModified time.Time - ContentMD5 string - ETag string - Metadata map[string]string - ObjectReader io.ReadCloser -} - -// GetObject retrieves an object from the Manta service. If error is nil (i.e. -// the call returns successfully), it is your responsibility to close the io.ReadCloser -// named ObjectReader in the operation output. -func (s *ObjectsClient) Get(ctx context.Context, input *GetObjectInput) (*GetObjectOutput, error) { - path := fmt.Sprintf("/%s%s", s.client.AccountName, input.ObjectPath) - - reqInput := client.RequestInput{ - Method: http.MethodGet, - Path: path, - } - respBody, respHeaders, err := s.client.ExecuteRequestStorage(ctx, reqInput) - if err != nil { - return nil, errwrap.Wrapf("Error executing GetDirectory request: {{err}}", err) - } - - response := &GetObjectOutput{ - ContentType: respHeaders.Get("Content-Type"), - ContentMD5: respHeaders.Get("Content-MD5"), - ETag: respHeaders.Get("Etag"), - ObjectReader: respBody, - } - - lastModified, err := time.Parse(time.RFC1123, respHeaders.Get("Last-Modified")) - if err == nil { - response.LastModified = lastModified - } - - contentLength, err := strconv.ParseUint(respHeaders.Get("Content-Length"), 10, 64) - if err == nil { - response.ContentLength = contentLength - } - - metadata := map[string]string{} - for key, values := range respHeaders { - if strings.HasPrefix(key, "m-") { - metadata[key] = strings.Join(values, ", ") - } - } - response.Metadata = metadata - - return response, nil -} - -// DeleteObjectInput represents parameters to a DeleteObject operation. -type DeleteObjectInput struct { - ObjectPath string -} - -// DeleteObject deletes an object. -func (s *ObjectsClient) Delete(ctx context.Context, input *DeleteObjectInput) error { - path := fmt.Sprintf("/%s%s", s.client.AccountName, input.ObjectPath) - - reqInput := client.RequestInput{ - Method: http.MethodDelete, - Path: path, - } - respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return errwrap.Wrapf("Error executing DeleteObject request: {{err}}", err) - } - - return nil -} - -// PutObjectMetadataInput represents parameters to a PutObjectMetadata operation. -type PutObjectMetadataInput struct { - ObjectPath string - ContentType string - Metadata map[string]string -} - -// PutObjectMetadata allows you to overwrite the HTTP headers for an already -// existing object, without changing the data. Note this is an idempotent "replace" -// operation, so you must specify the complete set of HTTP headers you want -// stored on each request. -// -// You cannot change "critical" headers: -// - Content-Length -// - Content-MD5 -// - Durability-Level -func (s *ObjectsClient) PutMetadata(ctx context.Context, input *PutObjectMetadataInput) error { - path := fmt.Sprintf("/%s%s", s.client.AccountName, input.ObjectPath) - query := &url.Values{} - query.Set("metadata", "true") - - headers := &http.Header{} - headers.Set("Content-Type", input.ContentType) - for key, value := range input.Metadata { - headers.Set(key, value) - } - - reqInput := client.RequestInput{ - Method: http.MethodPut, - Path: path, - Query: query, - Headers: headers, - } - respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return errwrap.Wrapf("Error executing PutObjectMetadata request: {{err}}", err) - } - - return nil -} - -// PutObjectInput represents parameters to a PutObject operation. -type PutObjectInput struct { - ObjectPath string - DurabilityLevel uint64 - ContentType string - ContentMD5 string - IfMatch string - IfModifiedSince *time.Time - ContentLength uint64 - MaxContentLength uint64 - ObjectReader io.ReadSeeker -} - -func (s *ObjectsClient) Put(ctx context.Context, input *PutObjectInput) error { - path := fmt.Sprintf("/%s%s", s.client.AccountName, input.ObjectPath) - - if input.MaxContentLength != 0 && input.ContentLength != 0 { - return errors.New("ContentLength and MaxContentLength may not both be set to non-zero values.") - } - - headers := &http.Header{} - if input.DurabilityLevel != 0 { - headers.Set("Durability-Level", strconv.FormatUint(input.DurabilityLevel, 10)) - } - if input.ContentType != "" { - headers.Set("Content-Type", input.ContentType) - } - if input.ContentMD5 != "" { - headers.Set("Content-MD$", input.ContentMD5) - } - if input.IfMatch != "" { - headers.Set("If-Match", input.IfMatch) - } - if input.IfModifiedSince != nil { - headers.Set("If-Modified-Since", input.IfModifiedSince.Format(time.RFC1123)) - } - if input.ContentLength != 0 { - headers.Set("Content-Length", strconv.FormatUint(input.ContentLength, 10)) - } - if input.MaxContentLength != 0 { - headers.Set("Max-Content-Length", strconv.FormatUint(input.MaxContentLength, 10)) - } - - reqInput := client.RequestNoEncodeInput{ - Method: http.MethodPut, - Path: path, - Headers: headers, - Body: input.ObjectReader, - } - respBody, _, err := s.client.ExecuteRequestNoEncode(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return errwrap.Wrapf("Error executing PutObjectMetadata request: {{err}}", err) - } - - return nil -} diff --git a/vendor/github.com/joyent/triton-go/storage/signing.go b/vendor/github.com/joyent/triton-go/storage/signing.go deleted file mode 100644 index b88005ce885..00000000000 --- a/vendor/github.com/joyent/triton-go/storage/signing.go +++ /dev/null @@ -1,81 +0,0 @@ -package storage - -import ( - "bytes" - "fmt" - "net/url" - "strconv" - "strings" - "time" - - "github.com/hashicorp/errwrap" -) - -// SignURLInput represents parameters to a SignURL operation. -type SignURLInput struct { - ValidityPeriod time.Duration - Method string - ObjectPath string -} - -// SignURLOutput contains the outputs of a SignURL operation. To simply -// access the signed URL, use the SignedURL method. -type SignURLOutput struct { - host string - objectPath string - Method string - Algorithm string - Signature string - Expires string - KeyID string -} - -// SignedURL returns a signed URL for the given scheme. Valid schemes are -// `http` and `https`. -func (output *SignURLOutput) SignedURL(scheme string) string { - query := &url.Values{} - query.Set("algorithm", output.Algorithm) - query.Set("expires", output.Expires) - query.Set("keyId", output.KeyID) - query.Set("signature", output.Signature) - - sUrl := url.URL{} - sUrl.Scheme = scheme - sUrl.Host = output.host - sUrl.Path = output.objectPath - sUrl.RawQuery = query.Encode() - - return sUrl.String() -} - -// SignURL creates a time-expiring URL that can be shared with others. -// This is useful to generate HTML links, for example. -func (s *StorageClient) SignURL(input *SignURLInput) (*SignURLOutput, error) { - output := &SignURLOutput{ - host: s.Client.MantaURL.Host, - objectPath: fmt.Sprintf("/%s%s", s.Client.AccountName, input.ObjectPath), - Method: input.Method, - Algorithm: strings.ToUpper(s.Client.Authorizers[0].DefaultAlgorithm()), - Expires: strconv.FormatInt(time.Now().Add(input.ValidityPeriod).Unix(), 10), - KeyID: fmt.Sprintf("/%s/keys/%s", s.Client.AccountName, s.Client.Authorizers[0].KeyFingerprint()), - } - - toSign := bytes.Buffer{} - toSign.WriteString(input.Method + "\n") - toSign.WriteString(s.Client.MantaURL.Host + "\n") - toSign.WriteString(fmt.Sprintf("/%s%s\n", s.Client.AccountName, input.ObjectPath)) - - query := &url.Values{} - query.Set("algorithm", output.Algorithm) - query.Set("expires", output.Expires) - query.Set("keyId", output.KeyID) - toSign.WriteString(query.Encode()) - - signature, _, err := s.Client.Authorizers[0].SignRaw(toSign.String()) - if err != nil { - return nil, errwrap.Wrapf("Error signing string: {{err}}", err) - } - - output.Signature = signature - return output, nil -} diff --git a/vendor/github.com/joyent/triton-go/storage/snaplink.go b/vendor/github.com/joyent/triton-go/storage/snaplink.go deleted file mode 100644 index 1330f0a9774..00000000000 --- a/vendor/github.com/joyent/triton-go/storage/snaplink.go +++ /dev/null @@ -1,46 +0,0 @@ -package storage - -import ( - "context" - "fmt" - "net/http" - - "github.com/hashicorp/errwrap" - "github.com/joyent/triton-go/client" -) - -type SnapLinksClient struct { - client *client.Client -} - -// PutSnapLinkInput represents parameters to a PutSnapLink operation. -type PutSnapLinkInput struct { - LinkPath string - SourcePath string -} - -// PutSnapLink creates a SnapLink to an object. -func (s *SnapLinksClient) Put(ctx context.Context, input *PutSnapLinkInput) error { - linkPath := fmt.Sprintf("/%s%s", s.client.AccountName, input.LinkPath) - sourcePath := fmt.Sprintf("/%s%s", s.client.AccountName, input.SourcePath) - headers := &http.Header{} - headers.Set("Content-Type", "application/json; type=link") - headers.Set("location", sourcePath) - headers.Set("Accept", "~1.0") - headers.Set("Accept-Version", "application/json, */*") - - reqInput := client.RequestInput{ - Method: http.MethodPut, - Path: linkPath, - Headers: headers, - } - respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput) - if respBody != nil { - defer respBody.Close() - } - if err != nil { - return errwrap.Wrapf("Error executing PutSnapLink request: {{err}}", err) - } - - return nil -} diff --git a/vendor/github.com/joyent/triton-go/triton.go b/vendor/github.com/joyent/triton-go/triton.go deleted file mode 100644 index b5bacd2556a..00000000000 --- a/vendor/github.com/joyent/triton-go/triton.go +++ /dev/null @@ -1,18 +0,0 @@ -package triton - -import ( - "github.com/joyent/triton-go/authentication" -) - -// Universal package used for defining configuration used across all client -// constructors. - -// ClientConfig is a placeholder/input struct around the behavior of configuring -// a client constructor through the implementation's runtime environment -// (SDC/MANTA env vars). -type ClientConfig struct { - TritonURL string - MantaURL string - AccountName string - Signers []authentication.Signer -} diff --git a/vendor/github.com/lusis/go-artifactory/LICENSE b/vendor/github.com/lusis/go-artifactory/LICENSE deleted file mode 100644 index 7c588406b01..00000000000 --- a/vendor/github.com/lusis/go-artifactory/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -Apache License, Version 2.0 - -Copyright (c) 2016 John E. Vincent - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api.go deleted file mode 100644 index a60755a8b0a..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api.go +++ /dev/null @@ -1 +0,0 @@ -package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive.go deleted file mode 100644 index a60755a8b0a..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive.go +++ /dev/null @@ -1 +0,0 @@ -package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact.go deleted file mode 100644 index 67ae1fda53f..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact.go +++ /dev/null @@ -1,67 +0,0 @@ -package artifactory - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - "strings" -) - -type FileInfo struct { - Uri string `json:"uri"` - DownloadUri string `json:"downloadUri"` - Repo string `json:"repo"` - Path string `json:"path"` - RemoteUrl string `json:"remoteUrl,omitempty"` - Created string `json:"created"` - CreatedBy string `json:"createdBy"` - LastModified string `json:"lastModified"` - ModifiedBy string `json:"modifiedBy"` - MimeType string `json:"mimeType"` - Size string `json:"size"` - Checksums struct { - SHA1 string `json:"sha1"` - MD5 string `json:"md5"` - } `json:"checksums"` - OriginalChecksums struct { - SHA1 string `json:"sha1"` - MD5 string `json:"md5"` - } `json:"originalChecksums,omitempty"` -} - -func (c *ArtifactoryClient) DeployArtifact(repoKey string, filename string, path string, properties map[string]string) (CreatedStorageItem, error) { - var res CreatedStorageItem - var fileProps []string - var finalUrl string - finalUrl = "/" + repoKey + "/" - if &path != nil { - finalUrl = finalUrl + path - } - baseFile := filepath.Base(filename) - finalUrl = finalUrl + "/" + baseFile - if len(properties) > 0 { - finalUrl = finalUrl + ";" - for k, v := range properties { - fileProps = append(fileProps, k+"="+v) - } - finalUrl = finalUrl + strings.Join(fileProps, ";") - } - data, err := os.Open(filename) - if err != nil { - return res, err - } - defer data.Close() - b, _ := ioutil.ReadAll(data) - d, err := c.Put(finalUrl, string(b), make(map[string]string)) - if err != nil { - return res, err - } else { - e := json.Unmarshal(d, &res) - if e != nil { - return res, e - } else { - return res, nil - } - } -} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray.go deleted file mode 100644 index a60755a8b0a..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray.go +++ /dev/null @@ -1 +0,0 @@ -package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build.go deleted file mode 100644 index a60755a8b0a..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build.go +++ /dev/null @@ -1 +0,0 @@ -package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client.go deleted file mode 100644 index e744c01f785..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client.go +++ /dev/null @@ -1,80 +0,0 @@ -package artifactory - -import ( - "crypto/tls" - "fmt" - "net/http" - "os" -) - -type ClientConfig struct { - BaseURL string - Username string - Password string - Token string - AuthMethod string - VerifySSL bool - Client *http.Client - Transport *http.Transport -} - -type ArtifactoryClient struct { - Client *http.Client - Config *ClientConfig - Transport *http.Transport -} - -func NewClient(config *ClientConfig) (c ArtifactoryClient) { - verifySSL := func() bool { - if config.VerifySSL != true { - return false - } else { - return true - } - } - if config.Transport == nil { - config.Transport = new(http.Transport) - } - config.Transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: verifySSL()} - if config.Client == nil { - config.Client = new(http.Client) - } - config.Client.Transport = config.Transport - return ArtifactoryClient{Client: config.Client, Config: config} -} - -func clientConfigFrom(from string) (c *ClientConfig) { - conf := ClientConfig{} - switch from { - case "environment": - if os.Getenv("ARTIFACTORY_URL") == "" { - fmt.Printf("You must set the environment variable ARTIFACTORY_URL") - os.Exit(1) - } else { - conf.BaseURL = os.Getenv("ARTIFACTORY_URL") - } - if os.Getenv("ARTIFACTORY_TOKEN") == "" { - if os.Getenv("ARTIFACTORY_USERNAME") == "" || os.Getenv("ARTIFACTORY_PASSWORD") == "" { - fmt.Printf("You must set the environment variables ARTIFACTORY_USERNAME/ARTIFACTORY_PASSWORD\n") - os.Exit(1) - } else { - conf.AuthMethod = "basic" - } - } else { - conf.AuthMethod = "token" - } - } - if conf.AuthMethod == "token" { - conf.Token = os.Getenv("ARTIFACTORY_TOKEN") - } else { - conf.Username = os.Getenv("ARTIFACTORY_USERNAME") - conf.Password = os.Getenv("ARTIFACTORY_PASSWORD") - } - return &conf -} - -func NewClientFromEnv() (c ArtifactoryClient) { - config := clientConfigFrom("environment") - client := NewClient(config) - return client -} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance.go deleted file mode 100644 index a60755a8b0a..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance.go +++ /dev/null @@ -1 +0,0 @@ -package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/errors.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/errors.go deleted file mode 100644 index 70551aad9aa..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package artifactory - -type ErrorsJson struct { - Errors []ErrorJson `json:"errors"` -} - -type ErrorJson struct { - Status string `json:"status"` - Message string `json:"message"` -} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups.go deleted file mode 100644 index a60c11229b9..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups.go +++ /dev/null @@ -1,61 +0,0 @@ -package artifactory - -import ( - "encoding/json" -) - -type Group struct { - Name string `json:"name"` - Uri string `json:"uri"` -} - -type GroupDetails struct { - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - AutoJoin bool `json:"autoJoin,omitempty"` - Realm string `json:"realm,omitempty"` - RealmAttributes string `json:"realmAttributes,omitempty"` -} - -func (c *ArtifactoryClient) GetGroups() ([]Group, error) { - var res []Group - d, e := c.Get("/api/security/groups", make(map[string]string)) - if e != nil { - return res, e - } else { - err := json.Unmarshal(d, &res) - if err != nil { - return res, err - } else { - return res, e - } - } -} - -func (c *ArtifactoryClient) GetGroupDetails(u string) (GroupDetails, error) { - var res GroupDetails - d, e := c.Get("/api/security/groups/"+u, make(map[string]string)) - if e != nil { - return res, e - } else { - err := json.Unmarshal(d, &res) - if err != nil { - return res, err - } else { - return res, e - } - } -} - -func (c *ArtifactoryClient) CreateGroup(gname string, g GroupDetails) error { - j, jerr := json.Marshal(g) - if jerr != nil { - return jerr - } - o := make(map[string]string) - _, err := c.Put("/api/security/groups/"+gname, string(j), o) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http.go deleted file mode 100644 index 95a38c0b7b7..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http.go +++ /dev/null @@ -1,103 +0,0 @@ -package artifactory - -import ( - "bytes" - "crypto/sha1" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" -) - -func (c *ArtifactoryClient) Get(path string, options map[string]string) ([]byte, error) { - return c.makeRequest("GET", path, options, nil) -} - -func (c *ArtifactoryClient) Post(path string, data string, options map[string]string) ([]byte, error) { - body := strings.NewReader(data) - return c.makeRequest("POST", path, options, body) -} - -func (c *ArtifactoryClient) Put(path string, data string, options map[string]string) ([]byte, error) { - body := strings.NewReader(data) - return c.makeRequest("PUT", path, options, body) -} - -func (c *ArtifactoryClient) Delete(path string) error { - _, err := c.makeRequest("DELETE", path, make(map[string]string), nil) - if err != nil { - return err - } else { - return nil - } -} - -func (c *ArtifactoryClient) makeRequest(method string, path string, options map[string]string, body io.Reader) ([]byte, error) { - qs := url.Values{} - for q, p := range options { - qs.Add(q, p) - } - var base_req_path string - // swapped out legacy code below for simply trimming the trailing slash - //if c.Config.BaseURL[:len(c.Config.BaseURL)-1] == "/" { - // base_req_path = c.Config.BaseURL + path - //} else { - // base_req_path = c.Config.BaseURL + "/" + path - //} - base_req_path = strings.TrimSuffix(c.Config.BaseURL, "/") + path - u, err := url.Parse(base_req_path) - if err != nil { - var data bytes.Buffer - return data.Bytes(), err - } - if len(options) != 0 { - u.RawQuery = qs.Encode() - } - buf := new(bytes.Buffer) - if body != nil { - buf.ReadFrom(body) - } - req, _ := http.NewRequest(method, u.String(), bytes.NewReader(buf.Bytes())) - if body != nil { - h := sha1.New() - h.Write(buf.Bytes()) - chkSum := h.Sum(nil) - req.Header.Add("X-Checksum-Sha1", fmt.Sprintf("%x", chkSum)) - } - req.Header.Add("user-agent", "artifactory-go."+VERSION) - req.Header.Add("X-Result-Detail", "info, properties") - req.Header.Add("Accept", "application/json") - if c.Config.AuthMethod == "basic" { - req.SetBasicAuth(c.Config.Username, c.Config.Password) - } else { - req.Header.Add("X-JFrog-Art-Api", c.Config.Token) - } - r, err := c.Client.Do(req) - if err != nil { - var data bytes.Buffer - return data.Bytes(), err - } else { - defer r.Body.Close() - data, err := ioutil.ReadAll(r.Body) - if r.StatusCode < 200 || r.StatusCode > 299 { - var ej ErrorsJson - uerr := json.Unmarshal(data, &ej) - if uerr != nil { - emsg := fmt.Sprintf("Non-2xx code returned: %d. Message follows:\n%s", r.StatusCode, string(data)) - return data, errors.New(emsg) - } else { - var emsgs []string - for _, i := range ej.Errors { - emsgs = append(emsgs, i.Message) - } - return data, errors.New(strings.Join(emsgs, "\n")) - } - } else { - return data, err - } - } -} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/license.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/license.go deleted file mode 100644 index 7b9cdb588d5..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/license.go +++ /dev/null @@ -1,23 +0,0 @@ -package artifactory - -import ( - "encoding/json" -) - -type LicenseInformation struct { - LicenseType string `json:"type"` - ValidThrough string `json:"validThrough"` - LicensedTo string `json:"licensedTo"` -} - -func (c *ArtifactoryClient) GetLicenseInformation() (LicenseInformation, error) { - o := make(map[string]string, 0) - var l LicenseInformation - d, e := c.Get("/api/system/license", o) - if e != nil { - return l, e - } else { - err := json.Unmarshal(d, &l) - return l, err - } -} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes.go deleted file mode 100644 index 2ad1f504237..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes.go +++ /dev/null @@ -1,11 +0,0 @@ -package artifactory - -const LOCAL_REPO_MIMETYPE string = "application/vnd.org.jfrog.artifactory.repositories.LocalRepositoryConfiguration+json" -const REMOTE_REPO_MIMETYPE string = "application/vnd.org.jfrog.artifactory.repositories.RemoteRepositoryConfiguration+json" -const VIRTUAL_REPO_MIMETYPE string = "application/vnd.org.jfrog.artifactory.repositories.VirtualRepositoryConfiguration+json" -const USER_MIMETYPE string = "application/vnd.org.jfrog.artifactory.security.User+json" -const GROUP_MIMETYPE string = "application/vnd.org.jfrog.artifactory.security.Group+json" -const PERMISSION_TARGET_MIMETYPE string = "application/vnd.org.jfrog.artifactory.security.PermissionTarget+json" -const IMPORT_SETTINGS_MIMETYPE string = "application/vnd.org.jfrog.artifactory.system.ImportSettings+json" -const EXPORT_SETTIGNS_MIMETYPE string = "application/vnd.org.jfrog.artifactory.system.ExportSettings+json" -const SYSTEM_VERSION_MIMETYPE string = "application/vnd.org.jfrog.artifactory.system.Version+json" diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets.go deleted file mode 100644 index 71ff71edc75..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets.go +++ /dev/null @@ -1,53 +0,0 @@ -package artifactory - -import ( - "encoding/json" -) - -type PermissionTarget struct { - Name string `json:"name"` - Uri string `json:"uri"` -} - -type PermissionTargetDetails struct { - Name string `json:"name,omitempty"` - IncludesPattern string `json:"includesPattern,omitempty"` - ExcludesPattern string `json:"excludesPattern,omitempty"` - Repositories []string `json:"repositories,omitempty"` - Principals Principals `json:"principals,omitempty"` -} - -type Principals struct { - Users map[string][]string `json:"users"` - Groups map[string][]string `json:"groups"` -} - -func (c *ArtifactoryClient) GetPermissionTargets() ([]PermissionTarget, error) { - var res []PermissionTarget - d, e := c.Get("/api/security/permissions", make(map[string]string)) - if e != nil { - return res, e - } else { - err := json.Unmarshal(d, &res) - if err != nil { - return res, err - } else { - return res, e - } - } -} - -func (c *ArtifactoryClient) GetPermissionTargetDetails(u string) (PermissionTargetDetails, error) { - var res PermissionTargetDetails - d, e := c.Get("/api/security/permissions/"+u, make(map[string]string)) - if e != nil { - return res, e - } else { - err := json.Unmarshal(d, &res) - if err != nil { - return res, err - } else { - return res, e - } - } -} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos.go deleted file mode 100644 index 5a3ed8cd612..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos.go +++ /dev/null @@ -1,151 +0,0 @@ -package artifactory - -import ( - "encoding/json" - "fmt" -) - -type Repo struct { - Key string `json:"key"` - Rtype string `json:"type"` - Description string `json:"description,omitempty"` - Url string `json:"url,omitempty"` -} - -type RepoConfig interface { - MimeType() string -} - -type GenericRepoConfig struct { - Key string `json:"key,omitempty"` - RClass string `json:"rclass"` - PackageType string `json:"packageType,omitempty"` - Description string `json:"description,omitempty"` - Notes string `json:"notes,omitempty"` - IncludesPattern string `json:"includesPattern,omitempty"` - ExcludesPattern string `json:"excludesPattern,omitempty"` - HandleReleases bool `json:"handleReleases,omitempty"` - HandleSnapshots bool `json:"handleSnapshots,omitempty"` - MaxUniqueSnapshots int `json:"maxUniqueSnapshots,omitempty"` - SuppressPomConsistencyChecks bool `json:"supressPomConsistencyChecks,omitempty"` - BlackedOut bool `json:"blackedOut,omitempty"` - PropertySets []string `json:"propertySets,omitempty"` -} - -func (r GenericRepoConfig) MimeType() string { - return "" -} - -type LocalRepoConfig struct { - GenericRepoConfig - - LayoutRef string `json:"repoLayoutRef,omitempty"` - DebianTrivialLayout bool `json:"debianTrivialLayout,omitempty"` - ChecksumPolicyType string `json:"checksumPolicyType,omitempty"` - SnapshotVersionBehavior string `json:"snapshotVersionBehavior,omitempty"` - ArchiveBrowsingEnabled bool `json:"archiveBrowsingEnabled,omitempty"` - CalculateYumMetadata bool `json:"calculateYumMetadata,omitempty"` - YumRootDepth int `json:"yumRootDepth,omitempty"` -} - -func (r LocalRepoConfig) MimeType() string { - return LOCAL_REPO_MIMETYPE -} - -type RemoteRepoConfig struct { - GenericRepoConfig - - Url string `json:"url"` - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Proxy string `json:"proxy,omitempty"` - RemoteRepoChecksumPolicyType string `json:"remoteRepoChecksumPolicyType,omitempty"` - HardFail bool `json:"hardFail,omitempty"` - Offline bool `json:"offline,omitempty"` - StoreArtifactsLocally bool `json:"storeArtifactsLocally,omitempty"` - SocketTimeoutMillis int `json:"socketTimeoutMillis,omitempty"` - LocalAddress string `json:"localAddress,omitempty"` - RetrivialCachePeriodSecs int `json:"retrievalCachePeriodSecs,omitempty"` - FailedRetrievalCachePeriodSecs int `json:"failedRetrievalCachePeriodSecs,omitempty"` - MissedRetrievalCachePeriodSecs int `json:"missedRetrievalCachePeriodSecs,omitempty"` - UnusedArtifactsCleanupEnabled bool `json:"unusedArtifactCleanupEnabled,omitempty"` - UnusedArtifactsCleanupPeriodHours int `json:"unusedArtifactCleanupPeriodHours,omitempty"` - FetchJarsEagerly bool `json:"fetchJarsEagerly,omitempty"` - ShareConfiguration bool `json:"shareConfiguration,omitempty"` - SynchronizeProperties bool `json:"synchronizeProperties,omitempty"` - AllowAnyHostAuth bool `json:"allowAnyHostAuth,omitempty"` - EnableCookieManagement bool `json:"enableCookieManagement,omitempty"` - BowerRegistryUrl string `json:"bowerRegistryUrl,omitempty"` - VcsType string `json:"vcsType,omitempty"` - VcsGitProvider string `json:"vcsGitProvider,omitempty"` - VcsGitDownloader string `json:"vcsGitDownloader,omitempty"` -} - -func (r RemoteRepoConfig) MimeType() string { - return REMOTE_REPO_MIMETYPE -} - -type VirtualRepoConfig struct { - GenericRepoConfig - - Repositories []string `json:"repositories"` - DebianTrivialLayout bool `json:"debianTrivialLayout,omitempty"` - ArtifactoryRequestsCanRetrieveRemoteArtifacts bool `json:artifactoryRequestsCanRetrieveRemoteArtifacts,omitempty"` - KeyPair string `json:"keyPair,omitempty"` - PomRepositoryReferenceCleanupPolicy string `json:"pomRepositoryReferenceCleanupPolicy,omitempty"` -} - -func (r VirtualRepoConfig) MimeType() string { - return VIRTUAL_REPO_MIMETYPE -} - -func (client *ArtifactoryClient) GetRepos(rtype string) ([]Repo, error) { - o := make(map[string]string, 0) - if rtype != "all" { - o["type"] = rtype - } - var dat []Repo - d, e := client.Get("/api/repositories", o) - if e != nil { - return dat, e - } else { - err := json.Unmarshal(d, &dat) - if err != nil { - return dat, err - } else { - return dat, e - } - } -} - -func (client *ArtifactoryClient) GetRepo(key string) (RepoConfig, error) { - o := make(map[string]string, 0) - dat := new(GenericRepoConfig) - d, e := client.Get("/api/repositories/"+key, o) - if e != nil { - return *dat, e - } else { - err := json.Unmarshal(d, &dat) - if err != nil { - return *dat, err - } else { - switch dat.RClass { - case "local": - var cdat LocalRepoConfig - _ = json.Unmarshal(d, &cdat) - return cdat, nil - case "remote": - var cdat RemoteRepoConfig - _ = json.Unmarshal(d, &cdat) - return cdat, nil - case "virtual": - var cdat VirtualRepoConfig - _ = json.Unmarshal(d, &cdat) - return cdat, nil - default: - fmt.Printf("fallthrough to default\n") - return dat, nil - } - } - } -} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses.go deleted file mode 100644 index 20a41110716..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses.go +++ /dev/null @@ -1,9 +0,0 @@ -package artifactory - -type GavcSearchResults struct { - Results []FileInfo `json:"results"` -} - -type Uri struct { - Uri string `json:"uri,omitempty"` -} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search.go deleted file mode 100644 index 8c986d78f0e..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search.go +++ /dev/null @@ -1,47 +0,0 @@ -package artifactory - -import ( - "encoding/json" - "strings" -) - -type Gavc struct { - GroupID string - ArtifactID string - Version string - Classifier string - Repos []string -} - -func (c *ArtifactoryClient) GAVCSearch(coords *Gavc) (files []FileInfo, e error) { - url := "/api/search/gavc" - params := make(map[string]string) - if &coords.GroupID != nil { - params["g"] = coords.GroupID - } - if &coords.ArtifactID != nil { - params["a"] = coords.ArtifactID - } - if &coords.Version != nil { - params["v"] = coords.Version - } - if &coords.Classifier != nil { - params["c"] = coords.Classifier - } - if &coords.Repos != nil { - params["repos"] = strings.Join(coords.Repos, ",") - } - d, err := c.Get(url, params) - if err != nil { - return files, err - } else { - var dat GavcSearchResults - err := json.Unmarshal(d, &dat) - if err != nil { - return files, err - } else { - files = dat.Results - return files, nil - } - } -} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security.go deleted file mode 100644 index f741309612d..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security.go +++ /dev/null @@ -1,6 +0,0 @@ -package artifactory - -func (c *ArtifactoryClient) GetSystemSecurityConfiguration() (s string, e error) { - d, e := c.Get("/api/system/security", make(map[string]string)) - return string(d), e -} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage.go deleted file mode 100644 index b7967e11f25..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage.go +++ /dev/null @@ -1,18 +0,0 @@ -package artifactory - -type CreatedStorageItem struct { - URI string `json:"uri"` - DownloadURI string `json:"downloadUri"` - Repo string `json:"repo"` - Created string `json:"created"` - CreatedBy string `json:"createdBy"` - Size string `json:"size"` - MimeType string `json:"mimeType"` - Checksums ArtifactChecksums `json:"checksums"` - OriginalChecksums ArtifactChecksums `json:"originalChecksums"` -} - -type ArtifactChecksums struct { - MD5 string `json:"md5"` - SHA1 string `json:"sha1"` -} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system.go deleted file mode 100644 index d33c29c65f4..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system.go +++ /dev/null @@ -1,6 +0,0 @@ -package artifactory - -func (c *ArtifactoryClient) GetGeneralConfiguration() (s string, e error) { - d, e := c.Get("/api/system/configuration", make(map[string]string)) - return string(d), e -} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users.go deleted file mode 100644 index 9caf6adf743..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users.go +++ /dev/null @@ -1,83 +0,0 @@ -package artifactory - -import ( - "encoding/json" - "errors" -) - -type User struct { - Name string `json:"name"` - Uri string `json:"uri"` -} - -type UserDetails struct { - Name string `json:"name,omitempty"` - Email string `json:"email"` - Password string `json:"password"` - Admin bool `json:"admin,omitempty"` - ProfileUpdatable bool `json:"profileUpdatable,omitempty"` - InternalPasswordDisabled bool `json:"internalPasswordDisabled,omitempty"` - LastLoggedIn string `json:"lastLoggedIn,omitempty"` - Realm string `json:"realm,omitempty"` - Groups []string `json:"groups,omitempty"` -} - -func (c *ArtifactoryClient) GetUsers() ([]User, error) { - var res []User - d, e := c.Get("/api/security/users", make(map[string]string)) - if e != nil { - return res, e - } else { - err := json.Unmarshal(d, &res) - if err != nil { - return res, err - } else { - return res, e - } - } -} - -func (c *ArtifactoryClient) GetUserDetails(u string) (UserDetails, error) { - var res UserDetails - d, e := c.Get("/api/security/users/"+u, make(map[string]string)) - if e != nil { - return res, e - } else { - err := json.Unmarshal(d, &res) - if err != nil { - return res, err - } else { - return res, e - } - } -} - -func (c *ArtifactoryClient) CreateUser(uname string, u UserDetails) error { - if &u.Email == nil || &u.Password == nil { - return errors.New("Email and password are required to create users") - } - j, jerr := json.Marshal(u) - if jerr != nil { - return jerr - } - o := make(map[string]string) - _, err := c.Put("/api/security/users/"+uname, string(j), o) - if err != nil { - return err - } - return nil -} - -func (c *ArtifactoryClient) DeleteUser(uname string) error { - err := c.Delete("/api/security/users/" + uname) - if err != nil { - return err - } else { - return nil - } -} - -func (c *ArtifactoryClient) GetUserEncryptedPassword() (s string, err error) { - d, err := c.Get("/api/security/encryptedPassword", make(map[string]string)) - return string(d), err -} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version.go deleted file mode 100644 index bca14077e1e..00000000000 --- a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package artifactory - -const VERSION = "4.0.1" diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/LICENSE b/vendor/github.com/terraform-providers/terraform-provider-openstack/LICENSE deleted file mode 100644 index a612ad9813b..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/LICENSE +++ /dev/null @@ -1,373 +0,0 @@ -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/compute_instance_v2_networking.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/compute_instance_v2_networking.go deleted file mode 100644 index f7ae3ea54d1..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/compute_instance_v2_networking.go +++ /dev/null @@ -1,488 +0,0 @@ -// This set of code handles all functions required to configure networking -// on an openstack_compute_instance_v2 resource. -// -// This is a complicated task because it's not possible to obtain all -// information in a single API call. In fact, it even traverses multiple -// OpenStack services. -// -// The end result, from the user's point of view, is a structured set of -// understandable network information within the instance resource. -package openstack - -import ( - "fmt" - "log" - "os" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" - "github.com/hashicorp/terraform/helper/schema" -) - -// InstanceNIC is a structured representation of a Gophercloud servers.Server -// virtual NIC. -type InstanceNIC struct { - FixedIPv4 string - FixedIPv6 string - MAC string -} - -// InstanceAddresses is a collection of InstanceNICs, grouped by the -// network name. An instance/server could have multiple NICs on the same -// network. -type InstanceAddresses struct { - NetworkName string - InstanceNICs []InstanceNIC -} - -// InstanceNetwork represents a collection of network information that a -// Terraform instance needs to satisfy all network information requirements. -type InstanceNetwork struct { - UUID string - Name string - Port string - FixedIP string - AccessNetwork bool -} - -// getAllInstanceNetworks loops through the networks defined in the Terraform -// configuration and structures that information into something standard that -// can be consumed by both OpenStack and Terraform. -// -// This would be simple, except we have ensure both the network name and -// network ID have been determined. This isn't just for the convenience of a -// user specifying a human-readable network name, but the network information -// returned by an OpenStack instance only has the network name set! So if a -// user specified a network ID, there's no way to correlate it to the instance -// unless we know both the name and ID. -// -// Not only that, but we have to account for two OpenStack network services -// running: nova-network (legacy) and Neutron (current). -// -// In addition, if a port was specified, not all of the port information -// will be displayed, such as multiple fixed and floating IPs. This resource -// isn't currently configured for that type of flexibility. It's better to -// reference the actual port resource itself. -// -// So, let's begin the journey. -func getAllInstanceNetworks(d *schema.ResourceData, meta interface{}) ([]InstanceNetwork, error) { - var instanceNetworks []InstanceNetwork - - networks := d.Get("network").([]interface{}) - for _, v := range networks { - network := v.(map[string]interface{}) - networkID := network["uuid"].(string) - networkName := network["name"].(string) - portID := network["port"].(string) - - if networkID == "" && networkName == "" && portID == "" { - return nil, fmt.Errorf( - "At least one of network.uuid, network.name, or network.port must be set.") - } - - // If a user specified both an ID and name, that makes things easy - // since both name and ID are already satisfied. No need to query - // further. - if networkID != "" && networkName != "" { - v := InstanceNetwork{ - UUID: networkID, - Name: networkName, - Port: portID, - FixedIP: network["fixed_ip_v4"].(string), - AccessNetwork: network["access_network"].(bool), - } - instanceNetworks = append(instanceNetworks, v) - continue - } - - // But if at least one of name or ID was missing, we have to query - // for that other piece. - // - // Priority is given to a port since a network ID or name usually isn't - // specified when using a port. - // - // Next priority is given to the network ID since it's guaranteed to be - // an exact match. - queryType := "name" - queryTerm := networkName - if networkID != "" { - queryType = "id" - queryTerm = networkID - } - if portID != "" { - queryType = "port" - queryTerm = portID - } - - networkInfo, err := getInstanceNetworkInfo(d, meta, queryType, queryTerm) - if err != nil { - return nil, err - } - - v := InstanceNetwork{ - UUID: networkInfo["uuid"].(string), - Name: networkInfo["name"].(string), - Port: portID, - FixedIP: network["fixed_ip_v4"].(string), - AccessNetwork: network["access_network"].(bool), - } - - instanceNetworks = append(instanceNetworks, v) - } - - log.Printf("[DEBUG] getAllInstanceNetworks: %#v", instanceNetworks) - return instanceNetworks, nil -} - -// getInstanceNetworkInfo will query for network information in order to make -// an accurate determination of a network's name and a network's ID. -// -// We will try to first query the Neutron network service and fall back to the -// legacy nova-network service if that fails. -// -// If OS_NOVA_NETWORK is set, query nova-network even if Neutron is available. -// This is to be able to explicitly test the nova-network API. -func getInstanceNetworkInfo( - d *schema.ResourceData, meta interface{}, queryType, queryTerm string) (map[string]interface{}, error) { - - config := meta.(*Config) - - if _, ok := os.LookupEnv("OS_NOVA_NETWORK"); !ok { - networkClient, err := config.networkingV2Client(GetRegion(d, config)) - if err == nil { - networkInfo, err := getInstanceNetworkInfoNeutron(networkClient, queryType, queryTerm) - if err != nil { - return nil, fmt.Errorf("Error trying to get network information from the Network API: %s", err) - } - - return networkInfo, nil - } - } - - log.Printf("[DEBUG] Unable to obtain a network client") - - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return nil, fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - networkInfo, err := getInstanceNetworkInfoNovaNet(computeClient, queryType, queryTerm) - if err != nil { - return nil, fmt.Errorf("Error trying to get network information from the Nova API: %s", err) - } - - return networkInfo, nil -} - -// getInstanceNetworkInfoNovaNet will query the os-tenant-networks API for -// the network information. -func getInstanceNetworkInfoNovaNet( - client *gophercloud.ServiceClient, queryType, queryTerm string) (map[string]interface{}, error) { - - // If somehow a port ended up here, we should just error out. - if queryType == "port" { - return nil, fmt.Errorf( - "Unable to query a port (%s) using the Nova API", queryTerm) - } - - allPages, err := tenantnetworks.List(client).AllPages() - if err != nil { - return nil, fmt.Errorf( - "An error occured while querying the Nova API for network information: %s", err) - } - - networkList, err := tenantnetworks.ExtractNetworks(allPages) - if err != nil { - return nil, fmt.Errorf( - "An error occured while querying the Nova API for network information: %s", err) - } - - var networkFound bool - var network tenantnetworks.Network - - for _, v := range networkList { - if queryType == "id" && v.ID == queryTerm { - networkFound = true - network = v - break - } - - if queryType == "name" && v.Name == queryTerm { - networkFound = true - network = v - break - } - } - - if networkFound { - v := map[string]interface{}{ - "uuid": network.ID, - "name": network.Name, - } - - log.Printf("[DEBUG] getInstanceNetworkInfoNovaNet: %#v", v) - return v, nil - } - - return nil, fmt.Errorf("Could not find any matching network for %s %s", queryType, queryTerm) -} - -// getInstanceNetworkInfoNeutron will query the neutron API for the network -// information. -func getInstanceNetworkInfoNeutron( - client *gophercloud.ServiceClient, queryType, queryTerm string) (map[string]interface{}, error) { - - // If a port was specified, use it to look up the network ID - // and then query the network as if a network ID was originally used. - if queryType == "port" { - listOpts := ports.ListOpts{ - ID: queryTerm, - } - allPages, err := ports.List(client, listOpts).AllPages() - if err != nil { - return nil, fmt.Errorf("Unable to retrieve networks from the Network API: %s", err) - } - - allPorts, err := ports.ExtractPorts(allPages) - if err != nil { - return nil, fmt.Errorf("Unable to retrieve networks from the Network API: %s", err) - } - - var port ports.Port - switch len(allPorts) { - case 0: - return nil, fmt.Errorf("Could not find any matching port for %s %s", queryType, queryTerm) - case 1: - port = allPorts[0] - default: - return nil, fmt.Errorf("More than one port found for %s %s", queryType, queryTerm) - } - - queryType = "id" - queryTerm = port.NetworkID - } - - listOpts := networks.ListOpts{ - Status: "ACTIVE", - } - - switch queryType { - case "name": - listOpts.Name = queryTerm - default: - listOpts.ID = queryTerm - } - - allPages, err := networks.List(client, listOpts).AllPages() - if err != nil { - return nil, fmt.Errorf("Unable to retrieve networks from the Network API: %s", err) - } - - allNetworks, err := networks.ExtractNetworks(allPages) - if err != nil { - return nil, fmt.Errorf("Unable to retrieve networks from the Network API: %s", err) - } - - var network networks.Network - switch len(allNetworks) { - case 0: - return nil, fmt.Errorf("Could not find any matching network for %s %s", queryType, queryTerm) - case 1: - network = allNetworks[0] - default: - return nil, fmt.Errorf("More than one network found for %s %s", queryType, queryTerm) - } - - v := map[string]interface{}{ - "uuid": network.ID, - "name": network.Name, - } - - log.Printf("[DEBUG] getInstanceNetworkInfoNeutron: %#v", v) - return v, nil -} - -// getInstanceAddresses parses a Gophercloud server.Server's Address field into -// a structured InstanceAddresses struct. -func getInstanceAddresses(addresses map[string]interface{}) []InstanceAddresses { - var allInstanceAddresses []InstanceAddresses - - for networkName, v := range addresses { - instanceAddresses := InstanceAddresses{ - NetworkName: networkName, - } - - for _, v := range v.([]interface{}) { - instanceNIC := InstanceNIC{} - var exists bool - - v := v.(map[string]interface{}) - if v, ok := v["OS-EXT-IPS-MAC:mac_addr"].(string); ok { - instanceNIC.MAC = v - } - - if v["OS-EXT-IPS:type"] == "fixed" { - switch v["version"].(float64) { - case 6: - instanceNIC.FixedIPv6 = fmt.Sprintf("[%s]", v["addr"].(string)) - default: - instanceNIC.FixedIPv4 = v["addr"].(string) - } - } - - // To associate IPv4 and IPv6 on the right NIC, - // key on the mac address and fill in the blanks. - for i, v := range instanceAddresses.InstanceNICs { - if v.MAC == instanceNIC.MAC { - exists = true - if instanceNIC.FixedIPv6 != "" { - instanceAddresses.InstanceNICs[i].FixedIPv6 = instanceNIC.FixedIPv6 - } - if instanceNIC.FixedIPv4 != "" { - instanceAddresses.InstanceNICs[i].FixedIPv4 = instanceNIC.FixedIPv4 - } - } - } - - if !exists { - instanceAddresses.InstanceNICs = append(instanceAddresses.InstanceNICs, instanceNIC) - } - } - - allInstanceAddresses = append(allInstanceAddresses, instanceAddresses) - } - - log.Printf("[DEBUG] Addresses: %#v", addresses) - log.Printf("[DEBUG] allInstanceAddresses: %#v", allInstanceAddresses) - - return allInstanceAddresses -} - -// expandInstanceNetworks takes network information found in []InstanceNetwork -// and builds a Gophercloud []servers.Network for use in creating an Instance. -func expandInstanceNetworks(allInstanceNetworks []InstanceNetwork) []servers.Network { - var networks []servers.Network - for _, v := range allInstanceNetworks { - n := servers.Network{ - UUID: v.UUID, - Port: v.Port, - FixedIP: v.FixedIP, - } - networks = append(networks, n) - } - - return networks -} - -// flattenInstanceNetworks collects instance network information from different -// sources and aggregates it all together into a map array. -func flattenInstanceNetworks( - d *schema.ResourceData, meta interface{}) ([]map[string]interface{}, error) { - - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return nil, fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - server, err := servers.Get(computeClient, d.Id()).Extract() - if err != nil { - return nil, CheckDeleted(d, err, "server") - } - - allInstanceAddresses := getInstanceAddresses(server.Addresses) - allInstanceNetworks, err := getAllInstanceNetworks(d, meta) - if err != nil { - return nil, err - } - - networks := []map[string]interface{}{} - - // If there were no instance networks returned, this means that there - // was not a network specified in the Terraform configuration. When this - // happens, the instance will be launched on a "default" network, if one - // is available. If there isn't, the instance will fail to launch, so - // this is a safe assumption at this point. - if len(allInstanceNetworks) == 0 { - for _, instanceAddresses := range allInstanceAddresses { - for _, instanceNIC := range instanceAddresses.InstanceNICs { - v := map[string]interface{}{ - "name": instanceAddresses.NetworkName, - "fixed_ip_v4": instanceNIC.FixedIPv4, - "fixed_ip_v6": instanceNIC.FixedIPv6, - "mac": instanceNIC.MAC, - } - networks = append(networks, v) - } - } - - log.Printf("[DEBUG] flattenInstanceNetworks: %#v", networks) - return networks, nil - } - - // Loop through all networks and addresses, merge relevant address details. - for _, instanceNetwork := range allInstanceNetworks { - for _, instanceAddresses := range allInstanceAddresses { - if instanceNetwork.Name == instanceAddresses.NetworkName { - // Only use one NIC since it's possible the user defined another NIC - // on this same network in another Terraform network block. - instanceNIC := instanceAddresses.InstanceNICs[0] - copy(instanceAddresses.InstanceNICs, instanceAddresses.InstanceNICs[1:]) - v := map[string]interface{}{ - "name": instanceAddresses.NetworkName, - "fixed_ip_v4": instanceNIC.FixedIPv4, - "fixed_ip_v6": instanceNIC.FixedIPv6, - "mac": instanceNIC.MAC, - "uuid": instanceNetwork.UUID, - "port": instanceNetwork.Port, - "access_network": instanceNetwork.AccessNetwork, - } - networks = append(networks, v) - } - } - } - - log.Printf("[DEBUG] flattenInstanceNetworks: %#v", networks) - return networks, nil -} - -// getInstanceAccessAddresses determines the best IP address to communicate -// with the instance. It does this by looping through all networks and looking -// for a valid IP address. Priority is given to a network that was flagged as -// an access_network. -func getInstanceAccessAddresses( - d *schema.ResourceData, networks []map[string]interface{}) (string, string) { - - var hostv4, hostv6 string - - // Loop through all networks - // If the network has a valid fixed v4 or fixed v6 address - // and hostv4 or hostv6 is not set, set hostv4/hostv6. - // If the network is an "access_network" overwrite hostv4/hostv6. - for _, n := range networks { - var accessNetwork bool - - if an, ok := n["access_network"].(bool); ok && an { - accessNetwork = true - } - - if fixedIPv4, ok := n["fixed_ip_v4"].(string); ok && fixedIPv4 != "" { - if hostv4 == "" || accessNetwork { - hostv4 = fixedIPv4 - } - } - - if fixedIPv6, ok := n["fixed_ip_v6"].(string); ok && fixedIPv6 != "" { - if hostv6 == "" || accessNetwork { - hostv6 = fixedIPv6 - } - } - } - - log.Printf("[DEBUG] OpenStack Instance Network Access Addresses: %s, %s", hostv4, hostv6) - - return hostv4, hostv6 -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/config.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/config.go deleted file mode 100644 index ffbe72c06b1..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/config.go +++ /dev/null @@ -1,223 +0,0 @@ -package openstack - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "log" - "net/http" - "os" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth" - "github.com/hashicorp/terraform/helper/pathorcontents" - "github.com/hashicorp/terraform/terraform" -) - -type Config struct { - CACertFile string - ClientCertFile string - ClientKeyFile string - DomainID string - DomainName string - EndpointType string - IdentityEndpoint string - Insecure bool - Password string - Region string - Swauth bool - TenantID string - TenantName string - Token string - Username string - UserID string - - OsClient *gophercloud.ProviderClient -} - -func (c *Config) LoadAndValidate() error { - validEndpoint := false - validEndpoints := []string{ - "internal", "internalURL", - "admin", "adminURL", - "public", "publicURL", - "", - } - - for _, endpoint := range validEndpoints { - if c.EndpointType == endpoint { - validEndpoint = true - } - } - - if !validEndpoint { - return fmt.Errorf("Invalid endpoint type provided") - } - - ao := gophercloud.AuthOptions{ - DomainID: c.DomainID, - DomainName: c.DomainName, - IdentityEndpoint: c.IdentityEndpoint, - Password: c.Password, - TenantID: c.TenantID, - TenantName: c.TenantName, - TokenID: c.Token, - Username: c.Username, - UserID: c.UserID, - } - - client, err := openstack.NewClient(ao.IdentityEndpoint) - if err != nil { - return err - } - - // Set UserAgent - client.UserAgent.Prepend(terraform.UserAgentString()) - - config := &tls.Config{} - if c.CACertFile != "" { - caCert, _, err := pathorcontents.Read(c.CACertFile) - if err != nil { - return fmt.Errorf("Error reading CA Cert: %s", err) - } - - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM([]byte(caCert)) - config.RootCAs = caCertPool - } - - if c.Insecure { - config.InsecureSkipVerify = true - } - - if c.ClientCertFile != "" && c.ClientKeyFile != "" { - clientCert, _, err := pathorcontents.Read(c.ClientCertFile) - if err != nil { - return fmt.Errorf("Error reading Client Cert: %s", err) - } - clientKey, _, err := pathorcontents.Read(c.ClientKeyFile) - if err != nil { - return fmt.Errorf("Error reading Client Key: %s", err) - } - - cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey)) - if err != nil { - return err - } - - config.Certificates = []tls.Certificate{cert} - config.BuildNameToCertificate() - } - - // if OS_DEBUG is set, log the requests and responses - var osDebug bool - if os.Getenv("OS_DEBUG") != "" { - osDebug = true - } - - transport := &http.Transport{Proxy: http.ProxyFromEnvironment, TLSClientConfig: config} - client.HTTPClient = http.Client{ - Transport: &LogRoundTripper{ - Rt: transport, - OsDebug: osDebug, - }, - } - - // If using Swift Authentication, there's no need to validate authentication normally. - if !c.Swauth { - err = openstack.Authenticate(client, ao) - if err != nil { - return err - } - } - - c.OsClient = client - - return nil -} - -func (c *Config) determineRegion(region string) string { - // If a resource-level region was not specified, and a provider-level region was set, - // use the provider-level region. - if region == "" && c.Region != "" { - region = c.Region - } - - log.Printf("[DEBUG] OpenStack Region is: %s", region) - return region -} - -func (c *Config) blockStorageV1Client(region string) (*gophercloud.ServiceClient, error) { - return openstack.NewBlockStorageV1(c.OsClient, gophercloud.EndpointOpts{ - Region: c.determineRegion(region), - Availability: c.getEndpointType(), - }) -} - -func (c *Config) blockStorageV2Client(region string) (*gophercloud.ServiceClient, error) { - return openstack.NewBlockStorageV2(c.OsClient, gophercloud.EndpointOpts{ - Region: c.determineRegion(region), - Availability: c.getEndpointType(), - }) -} - -func (c *Config) computeV2Client(region string) (*gophercloud.ServiceClient, error) { - return openstack.NewComputeV2(c.OsClient, gophercloud.EndpointOpts{ - Region: c.determineRegion(region), - Availability: c.getEndpointType(), - }) -} - -func (c *Config) dnsV2Client(region string) (*gophercloud.ServiceClient, error) { - return openstack.NewDNSV2(c.OsClient, gophercloud.EndpointOpts{ - Region: c.determineRegion(region), - Availability: c.getEndpointType(), - }) -} - -func (c *Config) identityV3Client(region string) (*gophercloud.ServiceClient, error) { - return openstack.NewIdentityV3(c.OsClient, gophercloud.EndpointOpts{ - Region: c.determineRegion(region), - Availability: c.getEndpointType(), - }) -} - -func (c *Config) imageV2Client(region string) (*gophercloud.ServiceClient, error) { - return openstack.NewImageServiceV2(c.OsClient, gophercloud.EndpointOpts{ - Region: c.determineRegion(region), - Availability: c.getEndpointType(), - }) -} - -func (c *Config) networkingV2Client(region string) (*gophercloud.ServiceClient, error) { - return openstack.NewNetworkV2(c.OsClient, gophercloud.EndpointOpts{ - Region: c.determineRegion(region), - Availability: c.getEndpointType(), - }) -} - -func (c *Config) objectStorageV1Client(region string) (*gophercloud.ServiceClient, error) { - // If Swift Authentication is being used, return a swauth client. - if c.Swauth { - return swauth.NewObjectStorageV1(c.OsClient, swauth.AuthOpts{ - User: c.Username, - Key: c.Password, - }) - } - - return openstack.NewObjectStorageV1(c.OsClient, gophercloud.EndpointOpts{ - Region: c.determineRegion(region), - Availability: c.getEndpointType(), - }) -} - -func (c *Config) getEndpointType() gophercloud.Availability { - if c.EndpointType == "internal" || c.EndpointType == "internalURL" { - return gophercloud.AvailabilityInternal - } - if c.EndpointType == "admin" || c.EndpointType == "adminURL" { - return gophercloud.AvailabilityAdmin - } - return gophercloud.AvailabilityPublic -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_images_image_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_images_image_v2.go deleted file mode 100644 index ffc62eed542..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_images_image_v2.go +++ /dev/null @@ -1,283 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "sort" - - "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" - - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceImagesImageV2() *schema.Resource { - return &schema.Resource{ - Read: dataSourceImagesImageV2Read, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "visibility": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "owner": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "size_min": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "size_max": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "sort_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "name", - }, - - "sort_direction": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "asc", - ValidateFunc: dataSourceImagesImageV2SortDirection, - }, - - "tag": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "most_recent": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - - "properties": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - - // Computed values - "container_format": { - Type: schema.TypeString, - Computed: true, - }, - - "disk_format": { - Type: schema.TypeString, - Computed: true, - }, - - "min_disk_gb": { - Type: schema.TypeInt, - Computed: true, - }, - - "min_ram_mb": { - Type: schema.TypeInt, - Computed: true, - }, - - "protected": { - Type: schema.TypeBool, - Computed: true, - }, - - "checksum": { - Type: schema.TypeString, - Computed: true, - }, - - "size_bytes": { - Type: schema.TypeInt, - Computed: true, - }, - - "metadata": { - Type: schema.TypeMap, - Computed: true, - }, - - "updated_at": { - Type: schema.TypeString, - Computed: true, - }, - - "file": { - Type: schema.TypeString, - Computed: true, - }, - - "schema": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -// dataSourceImagesImageV2Read performs the image lookup. -func dataSourceImagesImageV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - imageClient, err := config.imageV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack image client: %s", err) - } - - visibility := resourceImagesImageV2VisibilityFromString(d.Get("visibility").(string)) - - listOpts := images.ListOpts{ - Name: d.Get("name").(string), - Visibility: visibility, - Owner: d.Get("owner").(string), - Status: images.ImageStatusActive, - SizeMin: int64(d.Get("size_min").(int)), - SizeMax: int64(d.Get("size_max").(int)), - SortKey: d.Get("sort_key").(string), - SortDir: d.Get("sort_direction").(string), - Tag: d.Get("tag").(string), - } - - log.Printf("[DEBUG] List Options: %#v", listOpts) - - var image images.Image - allPages, err := images.List(imageClient, listOpts).AllPages() - if err != nil { - return fmt.Errorf("Unable to query images: %s", err) - } - - allImages, err := images.ExtractImages(allPages) - if err != nil { - return fmt.Errorf("Unable to retrieve images: %s", err) - } - - properties := d.Get("properties").(map[string]interface{}) - imageProperties := resourceImagesImageV2ExpandProperties(properties) - if len(allImages) > 1 && len(imageProperties) > 0 { - var filteredImages []images.Image - for _, image := range allImages { - if len(image.Properties) > 0 { - match := true - for searchKey, searchValue := range imageProperties { - imageValue, ok := image.Properties[searchKey] - if !ok { - match = false - break - } - - if searchValue != imageValue { - match = false - break - } - } - - if match { - filteredImages = append(filteredImages, image) - } - } - } - allImages = filteredImages - } - - if len(allImages) < 1 { - return fmt.Errorf("Your query returned no results. " + - "Please change your search criteria and try again.") - } - - if len(allImages) > 1 { - recent := d.Get("most_recent").(bool) - log.Printf("[DEBUG] Multiple results found and `most_recent` is set to: %t", recent) - if recent { - image = mostRecentImage(allImages) - } else { - log.Printf("[DEBUG] Multiple results found: %#v", allImages) - return fmt.Errorf("Your query returned more than one result. Please try a more " + - "specific search criteria, or set `most_recent` attribute to true.") - } - } else { - image = allImages[0] - } - - log.Printf("[DEBUG] Single Image found: %s", image.ID) - return dataSourceImagesImageV2Attributes(d, &image) -} - -// dataSourceImagesImageV2Attributes populates the fields of an Image resource. -func dataSourceImagesImageV2Attributes(d *schema.ResourceData, image *images.Image) error { - log.Printf("[DEBUG] openstack_images_image details: %#v", image) - - d.SetId(image.ID) - d.Set("name", image.Name) - d.Set("tags", image.Tags) - d.Set("container_format", image.ContainerFormat) - d.Set("disk_format", image.DiskFormat) - d.Set("min_disk_gb", image.MinDiskGigabytes) - d.Set("min_ram_mb", image.MinRAMMegabytes) - d.Set("owner", image.Owner) - d.Set("protected", image.Protected) - d.Set("visibility", image.Visibility) - d.Set("checksum", image.Checksum) - d.Set("size_bytes", image.SizeBytes) - d.Set("metadata", image.Metadata) - d.Set("created_at", image.CreatedAt) - d.Set("updated_at", image.UpdatedAt) - d.Set("file", image.File) - d.Set("schema", image.Schema) - - return nil -} - -type imageSort []images.Image - -func (a imageSort) Len() int { return len(a) } -func (a imageSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a imageSort) Less(i, j int) bool { - itime := a[i].CreatedAt - jtime := a[j].CreatedAt - return itime.Unix() < jtime.Unix() -} - -// Returns the most recent Image out of a slice of images. -func mostRecentImage(images []images.Image) images.Image { - sortedImages := images - sort.Sort(imageSort(sortedImages)) - return sortedImages[len(sortedImages)-1] -} - -func dataSourceImagesImageV2SortDirection(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "asc" && value != "desc" { - err := fmt.Errorf("%s must be either asc or desc", k) - errors = append(errors, err) - } - return -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_network_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_network_v2.go deleted file mode 100644 index 6377057b755..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_network_v2.go +++ /dev/null @@ -1,125 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" -) - -func dataSourceNetworkingNetworkV2() *schema.Resource { - return &schema.Resource{ - Read: dataSourceNetworkingNetworkV2Read, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "network_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "status": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "matching_subnet_cidr": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "OS_TENANT_ID", - "OS_PROJECT_ID", - }, ""), - Description: descriptions["tenant_id"], - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "shared": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceNetworkingNetworkV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - - listOpts := networks.ListOpts{ - ID: d.Get("network_id").(string), - Name: d.Get("name").(string), - TenantID: d.Get("tenant_id").(string), - } - - if v, ok := d.GetOk("status"); ok { - listOpts.Status = v.(string) - } - - pages, err := networks.List(networkingClient, listOpts).AllPages() - allNetworks, err := networks.ExtractNetworks(pages) - if err != nil { - return fmt.Errorf("Unable to retrieve networks: %s", err) - } - - var refinedNetworks []networks.Network - if cidr := d.Get("matching_subnet_cidr").(string); cidr != "" { - for _, n := range allNetworks { - for _, s := range n.Subnets { - subnet, err := subnets.Get(networkingClient, s).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - continue - } - return fmt.Errorf("Unable to retrieve network subnet: %s", err) - } - if cidr == subnet.CIDR { - refinedNetworks = append(refinedNetworks, n) - } - } - } - } else { - refinedNetworks = allNetworks - } - - if len(refinedNetworks) < 1 { - return fmt.Errorf("Your query returned no results. " + - "Please change your search criteria and try again.") - } - - if len(refinedNetworks) > 1 { - return fmt.Errorf("Your query returned more than one result." + - " Please try a more specific search criteria") - } - - network := refinedNetworks[0] - - log.Printf("[DEBUG] Retrieved Network %s: %+v", network.ID, network) - d.SetId(network.ID) - - d.Set("name", network.Name) - d.Set("admin_state_up", strconv.FormatBool(network.AdminStateUp)) - d.Set("shared", strconv.FormatBool(network.Shared)) - d.Set("tenant_id", network.TenantID) - d.Set("region", GetRegion(d, config)) - - return nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_secgroup_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_secgroup_v2.go deleted file mode 100644 index 6f16782765d..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_secgroup_v2.go +++ /dev/null @@ -1,76 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" - - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceNetworkingSecGroupV2() *schema.Resource { - return &schema.Resource{ - Read: dataSourceNetworkingSecGroupV2Read, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "secgroup_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - }, - } -} - -func dataSourceNetworkingSecGroupV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - - listOpts := groups.ListOpts{ - ID: d.Get("secgroup_id").(string), - Name: d.Get("name").(string), - TenantID: d.Get("tenant_id").(string), - } - - pages, err := groups.List(networkingClient, listOpts).AllPages() - allSecGroups, err := groups.ExtractGroups(pages) - if err != nil { - return fmt.Errorf("Unable to retrieve security groups: %s", err) - } - - if len(allSecGroups) < 1 { - return fmt.Errorf("No Security Group found with name: %s", d.Get("name")) - } - - if len(allSecGroups) > 1 { - return fmt.Errorf("More than one Security Group found with name: %s", d.Get("name")) - } - - secGroup := allSecGroups[0] - - log.Printf("[DEBUG] Retrieved Security Group %s: %+v", secGroup.ID, secGroup) - d.SetId(secGroup.ID) - - d.Set("name", secGroup.Name) - d.Set("description", secGroup.Description) - d.Set("tenant_id", secGroup.TenantID) - d.Set("region", GetRegion(d, config)) - - return nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/lb_v2_shared.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/lb_v2_shared.go deleted file mode 100644 index 4769e768a4a..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/lb_v2_shared.go +++ /dev/null @@ -1,243 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" -) - -func waitForLBV2Listener(networkingClient *gophercloud.ServiceClient, id string, target string, pending []string, timeout time.Duration) error { - log.Printf("[DEBUG] Waiting for listener %s to become %s.", id, target) - - stateConf := &resource.StateChangeConf{ - Target: []string{target}, - Pending: pending, - Refresh: resourceLBV2ListenerRefreshFunc(networkingClient, id), - Timeout: timeout, - Delay: 5 * time.Second, - MinTimeout: 1 * time.Second, - } - - _, err := stateConf.WaitForState() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - switch target { - case "DELETED": - return nil - default: - return fmt.Errorf("Error: listener %s not found: %s", id, err) - } - } - return fmt.Errorf("Error waiting for listener %s to become %s: %s", id, target, err) - } - - return nil -} - -func resourceLBV2ListenerRefreshFunc(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - listener, err := listeners.Get(networkingClient, id).Extract() - if err != nil { - return nil, "", err - } - - // The listener resource has no Status attribute, so a successful Get is the best we can do - return listener, "ACTIVE", nil - } -} - -func waitForLBV2LoadBalancer(networkingClient *gophercloud.ServiceClient, id string, target string, pending []string, timeout time.Duration) error { - log.Printf("[DEBUG] Waiting for loadbalancer %s to become %s.", id, target) - - stateConf := &resource.StateChangeConf{ - Target: []string{target}, - Pending: pending, - Refresh: resourceLBV2LoadBalancerRefreshFunc(networkingClient, id), - Timeout: timeout, - Delay: 5 * time.Second, - MinTimeout: 1 * time.Second, - } - - _, err := stateConf.WaitForState() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - switch target { - case "DELETED": - return nil - default: - return fmt.Errorf("Error: loadbalancer %s not found: %s", id, err) - } - } - return fmt.Errorf("Error waiting for loadbalancer %s to become %s: %s", id, target, err) - } - - return nil -} - -func resourceLBV2LoadBalancerRefreshFunc(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - lb, err := loadbalancers.Get(networkingClient, id).Extract() - if err != nil { - return nil, "", err - } - - return lb, lb.ProvisioningStatus, nil - } -} - -func waitForLBV2Member(networkingClient *gophercloud.ServiceClient, poolID, memberID string, target string, pending []string, timeout time.Duration) error { - log.Printf("[DEBUG] Waiting for member %s to become %s.", memberID, target) - - stateConf := &resource.StateChangeConf{ - Target: []string{target}, - Pending: pending, - Refresh: resourceLBV2MemberRefreshFunc(networkingClient, poolID, memberID), - Timeout: timeout, - Delay: 5 * time.Second, - MinTimeout: 1 * time.Second, - } - - _, err := stateConf.WaitForState() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - switch target { - case "DELETED": - return nil - default: - return fmt.Errorf("Error: member %s not found: %s", memberID, err) - } - } - return fmt.Errorf("Error waiting for member %s to become %s: %s", memberID, target, err) - } - - return nil -} - -func resourceLBV2MemberRefreshFunc(networkingClient *gophercloud.ServiceClient, poolID, memberID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - member, err := pools.GetMember(networkingClient, poolID, memberID).Extract() - if err != nil { - return nil, "", err - } - - // The member resource has no Status attribute, so a successful Get is the best we can do - return member, "ACTIVE", nil - } -} - -func waitForLBV2Monitor(networkingClient *gophercloud.ServiceClient, id string, target string, pending []string, timeout time.Duration) error { - log.Printf("[DEBUG] Waiting for monitor %s to become %s.", id, target) - - stateConf := &resource.StateChangeConf{ - Target: []string{target}, - Pending: pending, - Refresh: resourceLBV2MonitorRefreshFunc(networkingClient, id), - Timeout: timeout, - Delay: 5 * time.Second, - MinTimeout: 1 * time.Second, - } - - _, err := stateConf.WaitForState() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - switch target { - case "DELETED": - return nil - default: - return fmt.Errorf("Error: monitor %s not found: %s", id, err) - } - } - return fmt.Errorf("Error waiting for monitor %s to become %s: %s", id, target, err) - } - - return nil -} - -func resourceLBV2MonitorRefreshFunc(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - monitor, err := monitors.Get(networkingClient, id).Extract() - if err != nil { - return nil, "", err - } - - // The monitor resource has no Status attribute, so a successful Get is the best we can do - return monitor, "ACTIVE", nil - } -} - -func waitForLBV2Pool(networkingClient *gophercloud.ServiceClient, id string, target string, pending []string, timeout time.Duration) error { - log.Printf("[DEBUG] Waiting for pool %s to become %s.", id, target) - - stateConf := &resource.StateChangeConf{ - Target: []string{target}, - Pending: pending, - Refresh: resourceLBV2PoolRefreshFunc(networkingClient, id), - Timeout: timeout, - Delay: 5 * time.Second, - MinTimeout: 1 * time.Second, - } - - _, err := stateConf.WaitForState() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - switch target { - case "DELETED": - return nil - default: - return fmt.Errorf("Error: pool %s not found: %s", id, err) - } - } - return fmt.Errorf("Error waiting for pool %s to become %s: %s", id, target, err) - } - - return nil -} - -func resourceLBV2PoolRefreshFunc(networkingClient *gophercloud.ServiceClient, poolID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - pool, err := pools.Get(networkingClient, poolID).Extract() - if err != nil { - return nil, "", err - } - - // The pool resource has no Status attribute, so a successful Get is the best we can do - return pool, "ACTIVE", nil - } -} - -func waitForLBV2viaPool(networkingClient *gophercloud.ServiceClient, id string, target string, timeout time.Duration) error { - pool, err := pools.Get(networkingClient, id).Extract() - if err != nil { - return err - } - - if pool.Loadbalancers != nil { - // each pool has an LB in Octavia lbaasv2 API - lbID := pool.Loadbalancers[0].ID - return waitForLBV2LoadBalancer(networkingClient, lbID, target, nil, timeout) - } - - if pool.Listeners != nil { - // each pool has a listener in Neutron lbaasv2 API - listenerID := pool.Listeners[0].ID - listener, err := listeners.Get(networkingClient, listenerID).Extract() - if err != nil { - return err - } - if listener.Loadbalancers != nil { - lbID := listener.Loadbalancers[0].ID - return waitForLBV2LoadBalancer(networkingClient, lbID, target, nil, timeout) - } - } - - // got a pool but no LB - this is wrong - return fmt.Errorf("No Load Balancer on pool %s", id) -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/provider.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/provider.go deleted file mode 100644 index 1b50d24a96c..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/provider.go +++ /dev/null @@ -1,261 +0,0 @@ -package openstack - -import ( - "github.com/hashicorp/terraform/helper/mutexkv" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// This is a global MutexKV for use within this plugin. -var osMutexKV = mutexkv.NewMutexKV() - -// Provider returns a schema.Provider for OpenStack. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "auth_url": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_URL", nil), - Description: descriptions["auth_url"], - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: descriptions["region"], - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - - "user_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_USERNAME", ""), - Description: descriptions["user_name"], - }, - - "user_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_USER_ID", ""), - Description: descriptions["user_name"], - }, - - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "OS_TENANT_ID", - "OS_PROJECT_ID", - }, ""), - Description: descriptions["tenant_id"], - }, - - "tenant_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "OS_TENANT_NAME", - "OS_PROJECT_NAME", - }, ""), - Description: descriptions["tenant_name"], - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Sensitive: true, - DefaultFunc: schema.EnvDefaultFunc("OS_PASSWORD", ""), - Description: descriptions["password"], - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_TOKEN", ""), - Description: descriptions["token"], - }, - - "domain_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "OS_USER_DOMAIN_ID", - "OS_PROJECT_DOMAIN_ID", - "OS_DOMAIN_ID", - }, ""), - Description: descriptions["domain_id"], - }, - - "domain_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "OS_USER_DOMAIN_NAME", - "OS_PROJECT_DOMAIN_NAME", - "OS_DOMAIN_NAME", - "OS_DEFAULT_DOMAIN", - }, ""), - Description: descriptions["domain_name"], - }, - - "insecure": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_INSECURE", ""), - Description: descriptions["insecure"], - }, - - "endpoint_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_ENDPOINT_TYPE", ""), - }, - - "cacert_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_CACERT", ""), - Description: descriptions["cacert_file"], - }, - - "cert": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_CERT", ""), - Description: descriptions["cert"], - }, - - "key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_KEY", ""), - Description: descriptions["key"], - }, - - "swauth": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_SWAUTH", ""), - Description: descriptions["swauth"], - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "openstack_images_image_v2": dataSourceImagesImageV2(), - "openstack_networking_network_v2": dataSourceNetworkingNetworkV2(), - "openstack_networking_secgroup_v2": dataSourceNetworkingSecGroupV2(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "openstack_blockstorage_volume_v1": resourceBlockStorageVolumeV1(), - "openstack_blockstorage_volume_v2": resourceBlockStorageVolumeV2(), - "openstack_blockstorage_volume_attach_v2": resourceBlockStorageVolumeAttachV2(), - "openstack_compute_flavor_v2": resourceComputeFlavorV2(), - "openstack_compute_instance_v2": resourceComputeInstanceV2(), - "openstack_compute_keypair_v2": resourceComputeKeypairV2(), - "openstack_compute_secgroup_v2": resourceComputeSecGroupV2(), - "openstack_compute_servergroup_v2": resourceComputeServerGroupV2(), - "openstack_compute_floatingip_v2": resourceComputeFloatingIPV2(), - "openstack_compute_floatingip_associate_v2": resourceComputeFloatingIPAssociateV2(), - "openstack_compute_volume_attach_v2": resourceComputeVolumeAttachV2(), - "openstack_dns_recordset_v2": resourceDNSRecordSetV2(), - "openstack_dns_zone_v2": resourceDNSZoneV2(), - "openstack_fw_firewall_v1": resourceFWFirewallV1(), - "openstack_fw_policy_v1": resourceFWPolicyV1(), - "openstack_fw_rule_v1": resourceFWRuleV1(), - "openstack_identity_project_v3": resourceIdentityProjectV3(), - "openstack_identity_user_v3": resourceIdentityUserV3(), - "openstack_images_image_v2": resourceImagesImageV2(), - "openstack_lb_member_v1": resourceLBMemberV1(), - "openstack_lb_monitor_v1": resourceLBMonitorV1(), - "openstack_lb_pool_v1": resourceLBPoolV1(), - "openstack_lb_vip_v1": resourceLBVipV1(), - "openstack_lb_loadbalancer_v2": resourceLoadBalancerV2(), - "openstack_lb_listener_v2": resourceListenerV2(), - "openstack_lb_pool_v2": resourcePoolV2(), - "openstack_lb_member_v2": resourceMemberV2(), - "openstack_lb_monitor_v2": resourceMonitorV2(), - "openstack_networking_network_v2": resourceNetworkingNetworkV2(), - "openstack_networking_subnet_v2": resourceNetworkingSubnetV2(), - "openstack_networking_floatingip_v2": resourceNetworkingFloatingIPV2(), - "openstack_networking_port_v2": resourceNetworkingPortV2(), - "openstack_networking_router_v2": resourceNetworkingRouterV2(), - "openstack_networking_router_interface_v2": resourceNetworkingRouterInterfaceV2(), - "openstack_networking_router_route_v2": resourceNetworkingRouterRouteV2(), - "openstack_networking_secgroup_v2": resourceNetworkingSecGroupV2(), - "openstack_networking_secgroup_rule_v2": resourceNetworkingSecGroupRuleV2(), - "openstack_objectstorage_container_v1": resourceObjectStorageContainerV1(), - }, - - ConfigureFunc: configureProvider, - } -} - -var descriptions map[string]string - -func init() { - descriptions = map[string]string{ - "auth_url": "The Identity authentication URL.", - - "region": "The OpenStack region to connect to.", - - "user_name": "Username to login with.", - - "user_id": "User ID to login with.", - - "tenant_id": "The ID of the Tenant (Identity v2) or Project (Identity v3)\n" + - "to login with.", - - "tenant_name": "The name of the Tenant (Identity v2) or Project (Identity v3)\n" + - "to login with.", - - "password": "Password to login with.", - - "token": "Authentication token to use as an alternative to username/password.", - - "domain_id": "The ID of the Domain to scope to (Identity v3).", - - "domain_name": "The name of the Domain to scope to (Identity v3).", - - "insecure": "Trust self-signed certificates.", - - "cacert_file": "A Custom CA certificate.", - - "endpoint_type": "The catalog endpoint type to use.", - - "cert": "A client certificate to authenticate with.", - - "key": "A client private key to authenticate with.", - - "swauth": "Use Swift's authentication system instead of Keystone. Only used for\n" + - "interaction with Swift.", - } -} - -func configureProvider(d *schema.ResourceData) (interface{}, error) { - config := Config{ - CACertFile: d.Get("cacert_file").(string), - ClientCertFile: d.Get("cert").(string), - ClientKeyFile: d.Get("key").(string), - DomainID: d.Get("domain_id").(string), - DomainName: d.Get("domain_name").(string), - EndpointType: d.Get("endpoint_type").(string), - IdentityEndpoint: d.Get("auth_url").(string), - Insecure: d.Get("insecure").(bool), - Password: d.Get("password").(string), - Region: d.Get("region").(string), - Swauth: d.Get("swauth").(bool), - Token: d.Get("token").(string), - TenantID: d.Get("tenant_id").(string), - TenantName: d.Get("tenant_name").(string), - Username: d.Get("user_name").(string), - UserID: d.Get("user_id").(string), - } - - if err := config.LoadAndValidate(); err != nil { - return nil, err - } - - return &config, nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_attach_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_attach_v2.go deleted file mode 100644 index b00491a16f5..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_attach_v2.go +++ /dev/null @@ -1,414 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions" - "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceBlockStorageVolumeAttachV2() *schema.Resource { - return &schema.Resource{ - Create: resourceBlockStorageVolumeAttachV2Create, - Read: resourceBlockStorageVolumeAttachV2Read, - Delete: resourceBlockStorageVolumeAttachV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "volume_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Removed: "instance_id is no longer used in this resource", - }, - - "host_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "device": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "attach_mode": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "ro" && value != "rw" { - errors = append(errors, fmt.Errorf( - "Only 'ro' and 'rw' are supported values for 'attach_mode'")) - } - return - }, - }, - - "initiator": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "multipath": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "os_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "platform": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "wwpn": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "wwnn": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - // Volume attachment information - "data": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - Sensitive: true, - }, - - "driver_volume_type": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "mount_point_base": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceBlockStorageVolumeAttachV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - client, err := config.blockStorageV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - // initialize the connection - volumeId := d.Get("volume_id").(string) - connOpts := &volumeactions.InitializeConnectionOpts{} - if v, ok := d.GetOk("host_name"); ok { - connOpts.Host = v.(string) - } - - if v, ok := d.GetOk("multipath"); ok { - multipath := v.(bool) - connOpts.Multipath = &multipath - } - - if v, ok := d.GetOk("ip_address"); ok { - connOpts.IP = v.(string) - } - - if v, ok := d.GetOk("initiator"); ok { - connOpts.Initiator = v.(string) - } - - if v, ok := d.GetOk("os_type"); ok { - connOpts.OSType = v.(string) - } - - if v, ok := d.GetOk("platform"); ok { - connOpts.Platform = v.(string) - } - - if v, ok := d.GetOk("wwnns"); ok { - connOpts.Wwnns = v.(string) - } - - if v, ok := d.GetOk("wwpns"); ok { - var wwpns []string - for _, i := range v.([]string) { - wwpns = append(wwpns, i) - } - - connOpts.Wwpns = wwpns - } - - connInfo, err := volumeactions.InitializeConnection(client, volumeId, connOpts).Extract() - if err != nil { - return fmt.Errorf("Unable to create connection: %s", err) - } - - // Only uncomment this when debugging since connInfo contains sensitive information. - // log.Printf("[DEBUG] Volume Connection for %s: %#v", volumeId, connInfo) - - // Because this information is only returned upon creation, - // it must be set in Create. - if v, ok := connInfo["data"]; ok { - data := make(map[string]string) - for key, value := range v.(map[string]interface{}) { - if v, ok := value.(string); ok { - data[key] = v - } - } - - d.Set("data", data) - } - - if v, ok := connInfo["driver_volume_type"]; ok { - d.Set("driver_volume_type", v) - } - - if v, ok := connInfo["mount_point_base"]; ok { - d.Set("mount_point_base", v) - } - - // Once the connection has been made, tell Cinder to mark the volume as attached. - attachMode, err := blockStorageVolumeAttachV2AttachMode(d.Get("attach_mode").(string)) - if err != nil { - return nil - } - - attachOpts := &volumeactions.AttachOpts{ - HostName: d.Get("host_name").(string), - MountPoint: d.Get("device").(string), - Mode: attachMode, - } - - log.Printf("[DEBUG] Attachment Options: %#v", attachOpts) - - if err := volumeactions.Attach(client, volumeId, attachOpts).ExtractErr(); err != nil { - return err - } - - // Wait for the volume to become available. - log.Printf("[DEBUG] Waiting for volume (%s) to become available", volumeId) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"available", "attaching"}, - Target: []string{"in-use"}, - Refresh: VolumeV2StateRefreshFunc(client, volumeId), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for volume (%s) to become ready: %s", volumeId, err) - } - - // Once the volume has been marked as attached, - // retrieve a fresh copy of it with all information now available. - volume, err := volumes.Get(client, volumeId).Extract() - if err != nil { - return err - } - - // Search for the attachmentId - var attachmentId string - hostName := d.Get("host_name").(string) - for _, attachment := range volume.Attachments { - if hostName != "" && hostName == attachment.HostName { - attachmentId = attachment.AttachmentID - } - } - - if attachmentId == "" { - return fmt.Errorf("Unable to determine attachment ID.") - } - - // The ID must be a combination of the volume and attachment ID - // since a volume ID is required to retrieve an attachment ID. - id := fmt.Sprintf("%s/%s", volumeId, attachmentId) - d.SetId(id) - - return resourceBlockStorageVolumeAttachV2Read(d, meta) -} - -func resourceBlockStorageVolumeAttachV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - client, err := config.blockStorageV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - volumeId, attachmentId, err := blockStorageVolumeAttachV2ParseId(d.Id()) - if err != nil { - return err - } - - volume, err := volumes.Get(client, volumeId).Extract() - if err != nil { - return err - } - - log.Printf("[DEBUG] Retrieved volume %s: %#v", d.Id(), volume) - - var attachment volumes.Attachment - for _, v := range volume.Attachments { - if attachmentId == v.AttachmentID { - attachment = v - } - } - - log.Printf("[DEBUG] Retrieved volume attachment: %#v", attachment) - - return nil -} - -func resourceBlockStorageVolumeAttachV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - client, err := config.blockStorageV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - volumeId, attachmentId, err := blockStorageVolumeAttachV2ParseId(d.Id()) - - // Terminate the connection - termOpts := &volumeactions.TerminateConnectionOpts{} - if v, ok := d.GetOk("host_name"); ok { - termOpts.Host = v.(string) - } - - if v, ok := d.GetOk("multipath"); ok { - multipath := v.(bool) - termOpts.Multipath = &multipath - } - - if v, ok := d.GetOk("ip_address"); ok { - termOpts.IP = v.(string) - } - - if v, ok := d.GetOk("initiator"); ok { - termOpts.Initiator = v.(string) - } - - if v, ok := d.GetOk("os_type"); ok { - termOpts.OSType = v.(string) - } - - if v, ok := d.GetOk("platform"); ok { - termOpts.Platform = v.(string) - } - - if v, ok := d.GetOk("wwnns"); ok { - termOpts.Wwnns = v.(string) - } - - if v, ok := d.GetOk("wwpns"); ok { - var wwpns []string - for _, i := range v.([]string) { - wwpns = append(wwpns, i) - } - - termOpts.Wwpns = wwpns - } - - err = volumeactions.TerminateConnection(client, volumeId, termOpts).ExtractErr() - if err != nil { - return fmt.Errorf("Error terminating volume connection %s: %s", volumeId, err) - } - - // Detach the volume - detachOpts := volumeactions.DetachOpts{ - AttachmentID: attachmentId, - } - - log.Printf("[DEBUG] Detachment Options: %#v", detachOpts) - - if err := volumeactions.Detach(client, volumeId, detachOpts).ExtractErr(); err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"in-use", "attaching", "detaching"}, - Target: []string{"available"}, - Refresh: VolumeV2StateRefreshFunc(client, volumeId), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for volume (%s) to become available: %s", volumeId, err) - } - - return nil -} - -func blockStorageVolumeAttachV2AttachMode(v string) (volumeactions.AttachMode, error) { - var attachMode volumeactions.AttachMode - var attachError error - switch v { - case "": - attachMode = "" - case "ro": - attachMode = volumeactions.ReadOnly - case "rw": - attachMode = volumeactions.ReadWrite - default: - attachError = fmt.Errorf("Invalid attach_mode specified") - } - - return attachMode, attachError -} - -func blockStorageVolumeAttachV2ParseId(id string) (string, string, error) { - parts := strings.Split(id, "/") - if len(parts) < 2 { - return "", "", fmt.Errorf("Unable to determine attachment ID") - } - - return parts[0], parts[1], nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v1.go deleted file mode 100644 index 7529f6341c7..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v1.go +++ /dev/null @@ -1,340 +0,0 @@ -package openstack - -import ( - "bytes" - "fmt" - "log" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceBlockStorageVolumeV1() *schema.Resource { - return &schema.Resource{ - Create: resourceBlockStorageVolumeV1Create, - Read: resourceBlockStorageVolumeV1Read, - Update: resourceBlockStorageVolumeV1Update, - Delete: resourceBlockStorageVolumeV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "size": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "availability_zone": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: false, - Computed: true, - }, - "snapshot_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "source_vol_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "image_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "volume_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "attachment": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "device": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - Set: resourceVolumeAttachmentHash, - }, - }, - } -} - -func resourceBlockStorageVolumeV1Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - blockStorageClient, err := config.blockStorageV1Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - createOpts := &volumes.CreateOpts{ - Description: d.Get("description").(string), - AvailabilityZone: d.Get("availability_zone").(string), - Name: d.Get("name").(string), - Size: d.Get("size").(int), - SnapshotID: d.Get("snapshot_id").(string), - SourceVolID: d.Get("source_vol_id").(string), - ImageID: d.Get("image_id").(string), - VolumeType: d.Get("volume_type").(string), - Metadata: resourceContainerMetadataV2(d), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - v, err := volumes.Create(blockStorageClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack volume: %s", err) - } - log.Printf("[INFO] Volume ID: %s", v.ID) - - // Wait for the volume to become available. - log.Printf( - "[DEBUG] Waiting for volume (%s) to become available", - v.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"downloading", "creating"}, - Target: []string{"available"}, - Refresh: VolumeV1StateRefreshFunc(blockStorageClient, v.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for volume (%s) to become ready: %s", - v.ID, err) - } - - // Store the ID now - d.SetId(v.ID) - - return resourceBlockStorageVolumeV1Read(d, meta) -} - -func resourceBlockStorageVolumeV1Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - blockStorageClient, err := config.blockStorageV1Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - v, err := volumes.Get(blockStorageClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "volume") - } - - log.Printf("[DEBUG] Retrieved volume %s: %+v", d.Id(), v) - - d.Set("size", v.Size) - d.Set("description", v.Description) - d.Set("availability_zone", v.AvailabilityZone) - d.Set("name", v.Name) - d.Set("snapshot_id", v.SnapshotID) - d.Set("source_vol_id", v.SourceVolID) - d.Set("volume_type", v.VolumeType) - d.Set("metadata", v.Metadata) - d.Set("region", GetRegion(d, config)) - - attachments := make([]map[string]interface{}, len(v.Attachments)) - for i, attachment := range v.Attachments { - attachments[i] = make(map[string]interface{}) - attachments[i]["id"] = attachment["id"] - attachments[i]["instance_id"] = attachment["server_id"] - attachments[i]["device"] = attachment["device"] - log.Printf("[DEBUG] attachment: %v", attachment) - } - d.Set("attachment", attachments) - - return nil -} - -func resourceBlockStorageVolumeV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - blockStorageClient, err := config.blockStorageV1Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - updateOpts := volumes.UpdateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - } - - if d.HasChange("metadata") { - updateOpts.Metadata = resourceVolumeMetadataV1(d) - } - - _, err = volumes.Update(blockStorageClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack volume: %s", err) - } - - return resourceBlockStorageVolumeV1Read(d, meta) -} - -func resourceBlockStorageVolumeV1Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - blockStorageClient, err := config.blockStorageV1Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - v, err := volumes.Get(blockStorageClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "volume") - } - - // make sure this volume is detached from all instances before deleting - if len(v.Attachments) > 0 { - log.Printf("[DEBUG] detaching volumes") - if computeClient, err := config.computeV2Client(GetRegion(d, config)); err != nil { - return err - } else { - for _, volumeAttachment := range v.Attachments { - log.Printf("[DEBUG] Attachment: %v", volumeAttachment) - if err := volumeattach.Delete(computeClient, volumeAttachment["server_id"].(string), volumeAttachment["id"].(string)).ExtractErr(); err != nil { - return err - } - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"in-use", "attaching", "detaching"}, - Target: []string{"available"}, - Refresh: VolumeV1StateRefreshFunc(blockStorageClient, d.Id()), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for volume (%s) to become available: %s", - d.Id(), err) - } - } - } - - // It's possible that this volume was used as a boot device and is currently - // in a "deleting" state from when the instance was terminated. - // If this is true, just move on. It'll eventually delete. - if v.Status != "deleting" { - if err := volumes.Delete(blockStorageClient, d.Id()).ExtractErr(); err != nil { - return CheckDeleted(d, err, "volume") - } - } - - // Wait for the volume to delete before moving on. - log.Printf("[DEBUG] Waiting for volume (%s) to delete", d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"deleting", "downloading", "available"}, - Target: []string{"deleted"}, - Refresh: VolumeV1StateRefreshFunc(blockStorageClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for volume (%s) to delete: %s", - d.Id(), err) - } - - d.SetId("") - return nil -} - -func resourceVolumeMetadataV1(d *schema.ResourceData) map[string]string { - m := make(map[string]string) - for key, val := range d.Get("metadata").(map[string]interface{}) { - m[key] = val.(string) - } - return m -} - -// VolumeV1StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// an OpenStack volume. -func VolumeV1StateRefreshFunc(client *gophercloud.ServiceClient, volumeID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - v, err := volumes.Get(client, volumeID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return v, "deleted", nil - } - return nil, "", err - } - - if v.Status == "error" { - return v, v.Status, fmt.Errorf("There was an error creating the volume. " + - "Please check with your cloud admin or check the Block Storage " + - "API logs to see why this error occurred.") - } - - return v, v.Status, nil - } -} - -func resourceVolumeAttachmentHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - if m["instance_id"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["instance_id"].(string))) - } - return hashcode.String(buf.String()) -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v2.go deleted file mode 100644 index ea035cf6f5f..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v2.go +++ /dev/null @@ -1,351 +0,0 @@ -package openstack - -import ( - "bytes" - "fmt" - "log" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceBlockStorageVolumeV2() *schema.Resource { - return &schema.Resource{ - Create: resourceBlockStorageVolumeV2Create, - Read: resourceBlockStorageVolumeV2Read, - Update: resourceBlockStorageVolumeV2Update, - Delete: resourceBlockStorageVolumeV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "size": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "availability_zone": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: false, - Computed: true, - }, - "snapshot_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "source_vol_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "image_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "volume_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "consistency_group_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "source_replica": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "attachment": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "device": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - Set: resourceVolumeV2AttachmentHash, - }, - }, - } -} - -func resourceBlockStorageVolumeV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - blockStorageClient, err := config.blockStorageV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - createOpts := &volumes.CreateOpts{ - AvailabilityZone: d.Get("availability_zone").(string), - ConsistencyGroupID: d.Get("consistency_group_id").(string), - Description: d.Get("description").(string), - ImageID: d.Get("image_id").(string), - Metadata: resourceContainerMetadataV2(d), - Name: d.Get("name").(string), - Size: d.Get("size").(int), - SnapshotID: d.Get("snapshot_id").(string), - SourceReplica: d.Get("source_replica").(string), - SourceVolID: d.Get("source_vol_id").(string), - VolumeType: d.Get("volume_type").(string), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - v, err := volumes.Create(blockStorageClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack volume: %s", err) - } - log.Printf("[INFO] Volume ID: %s", v.ID) - - // Wait for the volume to become available. - log.Printf( - "[DEBUG] Waiting for volume (%s) to become available", - v.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"downloading", "creating"}, - Target: []string{"available"}, - Refresh: VolumeV2StateRefreshFunc(blockStorageClient, v.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for volume (%s) to become ready: %s", - v.ID, err) - } - - // Store the ID now - d.SetId(v.ID) - - return resourceBlockStorageVolumeV2Read(d, meta) -} - -func resourceBlockStorageVolumeV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - blockStorageClient, err := config.blockStorageV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - v, err := volumes.Get(blockStorageClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "volume") - } - - log.Printf("[DEBUG] Retrieved volume %s: %+v", d.Id(), v) - - d.Set("size", v.Size) - d.Set("description", v.Description) - d.Set("availability_zone", v.AvailabilityZone) - d.Set("name", v.Name) - d.Set("snapshot_id", v.SnapshotID) - d.Set("source_vol_id", v.SourceVolID) - d.Set("volume_type", v.VolumeType) - d.Set("metadata", v.Metadata) - d.Set("region", GetRegion(d, config)) - - attachments := make([]map[string]interface{}, len(v.Attachments)) - for i, attachment := range v.Attachments { - attachments[i] = make(map[string]interface{}) - attachments[i]["id"] = attachment.ID - attachments[i]["instance_id"] = attachment.ServerID - attachments[i]["device"] = attachment.Device - log.Printf("[DEBUG] attachment: %v", attachment) - } - d.Set("attachment", attachments) - - return nil -} - -func resourceBlockStorageVolumeV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - blockStorageClient, err := config.blockStorageV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - updateOpts := volumes.UpdateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - } - - if d.HasChange("metadata") { - updateOpts.Metadata = resourceVolumeMetadataV2(d) - } - - _, err = volumes.Update(blockStorageClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack volume: %s", err) - } - - return resourceBlockStorageVolumeV2Read(d, meta) -} - -func resourceBlockStorageVolumeV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - blockStorageClient, err := config.blockStorageV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - v, err := volumes.Get(blockStorageClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "volume") - } - - // make sure this volume is detached from all instances before deleting - if len(v.Attachments) > 0 { - log.Printf("[DEBUG] detaching volumes") - if computeClient, err := config.computeV2Client(GetRegion(d, config)); err != nil { - return err - } else { - for _, volumeAttachment := range v.Attachments { - log.Printf("[DEBUG] Attachment: %v", volumeAttachment) - if err := volumeattach.Delete(computeClient, volumeAttachment.ServerID, volumeAttachment.ID).ExtractErr(); err != nil { - return err - } - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"in-use", "attaching", "detaching"}, - Target: []string{"available"}, - Refresh: VolumeV2StateRefreshFunc(blockStorageClient, d.Id()), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for volume (%s) to become available: %s", - d.Id(), err) - } - } - } - - // It's possible that this volume was used as a boot device and is currently - // in a "deleting" state from when the instance was terminated. - // If this is true, just move on. It'll eventually delete. - if v.Status != "deleting" { - if err := volumes.Delete(blockStorageClient, d.Id()).ExtractErr(); err != nil { - return CheckDeleted(d, err, "volume") - } - } - - // Wait for the volume to delete before moving on. - log.Printf("[DEBUG] Waiting for volume (%s) to delete", d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"deleting", "downloading", "available"}, - Target: []string{"deleted"}, - Refresh: VolumeV2StateRefreshFunc(blockStorageClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for volume (%s) to delete: %s", - d.Id(), err) - } - - d.SetId("") - return nil -} - -func resourceVolumeMetadataV2(d *schema.ResourceData) map[string]string { - m := make(map[string]string) - for key, val := range d.Get("metadata").(map[string]interface{}) { - m[key] = val.(string) - } - return m -} - -// VolumeV2StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// an OpenStack volume. -func VolumeV2StateRefreshFunc(client *gophercloud.ServiceClient, volumeID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - v, err := volumes.Get(client, volumeID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return v, "deleted", nil - } - return nil, "", err - } - - if v.Status == "error" { - return v, v.Status, fmt.Errorf("There was an error creating the volume. " + - "Please check with your cloud admin or check the Block Storage " + - "API logs to see why this error occurred.") - } - - return v, v.Status, nil - } -} - -func resourceVolumeV2AttachmentHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - if m["instance_id"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["instance_id"].(string))) - } - return hashcode.String(buf.String()) -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_flavor_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_flavor_v2.go deleted file mode 100644 index ce48b8f6e45..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_flavor_v2.go +++ /dev/null @@ -1,143 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeFlavorV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeFlavorV2Create, - Read: resourceComputeFlavorV2Read, - Delete: resourceComputeFlavorV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "ram": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "vcpus": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "disk": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "swap": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "rx_tx_factor": &schema.Schema{ - Type: schema.TypeFloat, - Optional: true, - ForceNew: true, - Default: 1, - }, - "is_public": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "ephemeral": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceComputeFlavorV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - disk := d.Get("disk").(int) - swap := d.Get("swap").(int) - isPublic := d.Get("is_public").(bool) - ephemeral := d.Get("ephemeral").(int) - createOpts := flavors.CreateOpts{ - Name: d.Get("name").(string), - RAM: d.Get("ram").(int), - VCPUs: d.Get("vcpus").(int), - Disk: &disk, - Swap: &swap, - RxTxFactor: d.Get("rx_tx_factor").(float64), - IsPublic: &isPublic, - Ephemeral: &ephemeral, - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - fl, err := flavors.Create(computeClient, &createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack flavor: %s", err) - } - - d.SetId(fl.ID) - - return resourceComputeFlavorV2Read(d, meta) -} - -func resourceComputeFlavorV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - fl, err := flavors.Get(computeClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "flavor") - } - - d.Set("name", fl.Name) - d.Set("ram", fl.RAM) - d.Set("vcpus", fl.VCPUs) - d.Set("disk", fl.Disk) - d.Set("swap", fl.Swap) - d.Set("rx_tx_factor", fl.RxTxFactor) - d.Set("is_public", fl.IsPublic) - // d.Set("ephemeral", fl.Ephemeral) TODO: Implement this in gophercloud - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceComputeFlavorV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - err = flavors.Delete(computeClient, d.Id()).ExtractErr() - if err != nil { - return fmt.Errorf("Error deleting OpenStack flavor: %s", err) - } - d.SetId("") - return nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_associate_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_associate_v2.go deleted file mode 100644 index 4f0d70345c9..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_associate_v2.go +++ /dev/null @@ -1,235 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strings" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - nfloatingips "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeFloatingIPAssociateV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeFloatingIPAssociateV2Create, - Read: resourceComputeFloatingIPAssociateV2Read, - Delete: resourceComputeFloatingIPAssociateV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "floating_ip": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "fixed_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceComputeFloatingIPAssociateV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - floatingIP := d.Get("floating_ip").(string) - fixedIP := d.Get("fixed_ip").(string) - instanceId := d.Get("instance_id").(string) - - associateOpts := floatingips.AssociateOpts{ - FloatingIP: floatingIP, - FixedIP: fixedIP, - } - log.Printf("[DEBUG] Associate Options: %#v", associateOpts) - - err = floatingips.AssociateInstance(computeClient, instanceId, associateOpts).ExtractErr() - if err != nil { - return fmt.Errorf("Error associating Floating IP: %s", err) - } - - // There's an API call to get this information, but it has been - // deprecated. The Neutron API could be used, but I'm trying not - // to mix service APIs. Therefore, a faux ID will be used. - id := fmt.Sprintf("%s/%s/%s", floatingIP, instanceId, fixedIP) - d.SetId(id) - - // This API call is synchronous, so Create won't return until the IP - // is attached. No need to wait for a state. - - return resourceComputeFloatingIPAssociateV2Read(d, meta) -} - -func resourceComputeFloatingIPAssociateV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - // Obtain relevant info from parsing the ID - floatingIP, instanceId, fixedIP, err := parseComputeFloatingIPAssociateId(d.Id()) - if err != nil { - return err - } - - // Now check and see whether the floating IP still exists. - // First try to do this by querying the Network API. - networkEnabled := true - networkClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - networkEnabled = false - } - - var exists bool - if networkEnabled { - log.Printf("[DEBUG] Checking for Floating IP existence via Network API") - exists, err = resourceComputeFloatingIPAssociateV2NetworkExists(networkClient, floatingIP) - } else { - log.Printf("[DEBUG] Checking for Floating IP existence via Compute API") - exists, err = resourceComputeFloatingIPAssociateV2ComputeExists(computeClient, floatingIP) - } - - if err != nil { - return err - } - - if !exists { - d.SetId("") - } - - // Next, see if the instance still exists - instance, err := servers.Get(computeClient, instanceId).Extract() - if err != nil { - if CheckDeleted(d, err, "instance") == nil { - return nil - } - } - - // Finally, check and see if the floating ip is still associated with the instance. - var associated bool - for _, networkAddresses := range instance.Addresses { - for _, element := range networkAddresses.([]interface{}) { - address := element.(map[string]interface{}) - if address["OS-EXT-IPS:type"] == "floating" && address["addr"] == floatingIP { - associated = true - } - } - } - - if !associated { - d.SetId("") - } - - // Set the attributes pulled from the composed resource ID - d.Set("floating_ip", floatingIP) - d.Set("instance_id", instanceId) - d.Set("fixed_ip", fixedIP) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceComputeFloatingIPAssociateV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - floatingIP := d.Get("floating_ip").(string) - instanceId := d.Get("instance_id").(string) - - disassociateOpts := floatingips.DisassociateOpts{ - FloatingIP: floatingIP, - } - log.Printf("[DEBUG] Disssociate Options: %#v", disassociateOpts) - - err = floatingips.DisassociateInstance(computeClient, instanceId, disassociateOpts).ExtractErr() - if err != nil { - return CheckDeleted(d, err, "floating ip association") - } - - return nil -} - -func parseComputeFloatingIPAssociateId(id string) (string, string, string, error) { - idParts := strings.Split(id, "/") - if len(idParts) < 3 { - return "", "", "", fmt.Errorf("Unable to determine floating ip association ID") - } - - floatingIP := idParts[0] - instanceId := idParts[1] - fixedIP := idParts[2] - - return floatingIP, instanceId, fixedIP, nil -} - -func resourceComputeFloatingIPAssociateV2NetworkExists(networkClient *gophercloud.ServiceClient, floatingIP string) (bool, error) { - listOpts := nfloatingips.ListOpts{ - FloatingIP: floatingIP, - } - allPages, err := nfloatingips.List(networkClient, listOpts).AllPages() - if err != nil { - return false, err - } - - allFips, err := nfloatingips.ExtractFloatingIPs(allPages) - if err != nil { - return false, err - } - - if len(allFips) > 1 { - return false, fmt.Errorf("There was a problem retrieving the floating IP") - } - - if len(allFips) == 0 { - return false, nil - } - - return true, nil -} - -func resourceComputeFloatingIPAssociateV2ComputeExists(computeClient *gophercloud.ServiceClient, floatingIP string) (bool, error) { - // If the Network API isn't available, fall back to the deprecated Compute API. - allPages, err := floatingips.List(computeClient).AllPages() - if err != nil { - return false, err - } - - allFips, err := floatingips.ExtractFloatingIPs(allPages) - if err != nil { - return false, err - } - - for _, f := range allFips { - if f.IP == floatingIP { - return true, nil - } - } - - return false, nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_v2.go deleted file mode 100644 index 9f85850bc84..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_v2.go +++ /dev/null @@ -1,111 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeFloatingIPV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeFloatingIPV2Create, - Read: resourceComputeFloatingIPV2Read, - Update: nil, - Delete: resourceComputeFloatingIPV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "pool": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_POOL_NAME", nil), - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "fixed_ip": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceComputeFloatingIPV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - createOpts := &floatingips.CreateOpts{ - Pool: d.Get("pool").(string), - } - log.Printf("[DEBUG] Create Options: %#v", createOpts) - newFip, err := floatingips.Create(computeClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating Floating IP: %s", err) - } - - d.SetId(newFip.ID) - - return resourceComputeFloatingIPV2Read(d, meta) -} - -func resourceComputeFloatingIPV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - fip, err := floatingips.Get(computeClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "floating ip") - } - - log.Printf("[DEBUG] Retrieved Floating IP %s: %+v", d.Id(), fip) - - d.Set("pool", fip.Pool) - d.Set("instance_id", fip.InstanceID) - d.Set("address", fip.IP) - d.Set("fixed_ip", fip.FixedIP) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceComputeFloatingIPV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - log.Printf("[DEBUG] Deleting Floating IP %s", d.Id()) - if err := floatingips.Delete(computeClient, d.Id()).ExtractErr(); err != nil { - return fmt.Errorf("Error deleting Floating IP: %s", err) - } - - return nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_instance_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_instance_v2.go deleted file mode 100644 index adcf0b96a43..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_instance_v2.go +++ /dev/null @@ -1,1082 +0,0 @@ -package openstack - -import ( - "bytes" - "crypto/sha1" - "encoding/hex" - "fmt" - "log" - "os" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop" - "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" - "github.com/gophercloud/gophercloud/openstack/compute/v2/images" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeInstanceV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeInstanceV2Create, - Read: resourceComputeInstanceV2Read, - Update: resourceComputeInstanceV2Update, - Delete: resourceComputeInstanceV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "image_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "image_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "flavor_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Computed: true, - DefaultFunc: schema.EnvDefaultFunc("OS_FLAVOR_ID", nil), - }, - "flavor_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Computed: true, - DefaultFunc: schema.EnvDefaultFunc("OS_FLAVOR_NAME", nil), - }, - "floating_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Removed: "Use the openstack_compute_floatingip_associate_v2 resource instead", - }, - "user_data": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - // just stash the hash for state & diff comparisons - StateFunc: func(v interface{}) string { - switch v.(type) { - case string: - hash := sha1.Sum([]byte(v.(string))) - return hex.EncodeToString(hash[:]) - default: - return "" - } - }, - }, - "security_groups": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: false, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "availability_zone": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "network": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uuid": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "port": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "fixed_ip_v4": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "fixed_ip_v6": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "floating_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "Use the openstack_compute_floatingip_associate_v2 resource instead", - }, - "mac": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "access_network": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - }, - }, - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: false, - }, - "config_drive": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "admin_pass": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "access_ip_v4": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: false, - }, - "access_ip_v6": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: false, - }, - "key_pair": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "block_device": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "uuid": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "volume_size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "destination_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "boot_index": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "delete_on_termination": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - "guest_format": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - "volume": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Removed: "Use block_device or openstack_compute_volume_attach_v2 instead", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "volume_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "device": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - }, - }, - "scheduler_hints": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "group": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "different_host": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "same_host": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "query": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "target_cell": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "build_near_host_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - }, - Set: resourceComputeSchedulerHintsHash, - }, - "personality": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "file": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "content": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - Set: resourceComputeInstancePersonalityHash, - }, - "stop_before_destroy": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "force_delete": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "all_metadata": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - }, - } -} - -func resourceComputeInstanceV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - var createOpts servers.CreateOptsBuilder - - // Determines the Image ID using the following rules: - // If a bootable block_device was specified, ignore the image altogether. - // If an image_id was specified, use it. - // If an image_name was specified, look up the image ID, report if error. - imageId, err := getImageIDFromConfig(computeClient, d) - if err != nil { - return err - } - - flavorId, err := getFlavorID(computeClient, d) - if err != nil { - return err - } - - // determine if block_device configuration is correct - // this includes valid combinations and required attributes - if err := checkBlockDeviceConfig(d); err != nil { - return err - } - - // Build a list of networks with the information given upon creation. - // Error out if an invalid network configuration was used. - allInstanceNetworks, err := getAllInstanceNetworks(d, meta) - if err != nil { - return err - } - - // Build a []servers.Network to pass into the create options. - networks := expandInstanceNetworks(allInstanceNetworks) - - configDrive := d.Get("config_drive").(bool) - - createOpts = &servers.CreateOpts{ - Name: d.Get("name").(string), - ImageRef: imageId, - FlavorRef: flavorId, - SecurityGroups: resourceInstanceSecGroupsV2(d), - AvailabilityZone: d.Get("availability_zone").(string), - Networks: networks, - Metadata: resourceInstanceMetadataV2(d), - ConfigDrive: &configDrive, - AdminPass: d.Get("admin_pass").(string), - UserData: []byte(d.Get("user_data").(string)), - Personality: resourceInstancePersonalityV2(d), - } - - if keyName, ok := d.Get("key_pair").(string); ok && keyName != "" { - createOpts = &keypairs.CreateOptsExt{ - CreateOptsBuilder: createOpts, - KeyName: keyName, - } - } - - if vL, ok := d.GetOk("block_device"); ok { - blockDevices, err := resourceInstanceBlockDevicesV2(d, vL.([]interface{})) - if err != nil { - return err - } - - createOpts = &bootfromvolume.CreateOptsExt{ - CreateOptsBuilder: createOpts, - BlockDevice: blockDevices, - } - } - - schedulerHintsRaw := d.Get("scheduler_hints").(*schema.Set).List() - if len(schedulerHintsRaw) > 0 { - log.Printf("[DEBUG] schedulerhints: %+v", schedulerHintsRaw) - schedulerHints := resourceInstanceSchedulerHintsV2(d, schedulerHintsRaw[0].(map[string]interface{})) - createOpts = &schedulerhints.CreateOptsExt{ - CreateOptsBuilder: createOpts, - SchedulerHints: schedulerHints, - } - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - - // If a block_device is used, use the bootfromvolume.Create function as it allows an empty ImageRef. - // Otherwise, use the normal servers.Create function. - var server *servers.Server - if _, ok := d.GetOk("block_device"); ok { - server, err = bootfromvolume.Create(computeClient, createOpts).Extract() - } else { - server, err = servers.Create(computeClient, createOpts).Extract() - } - - if err != nil { - return fmt.Errorf("Error creating OpenStack server: %s", err) - } - log.Printf("[INFO] Instance ID: %s", server.ID) - - // Store the ID now - d.SetId(server.ID) - - // Wait for the instance to become running so we can get some attributes - // that aren't available until later. - log.Printf( - "[DEBUG] Waiting for instance (%s) to become running", - server.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"BUILD"}, - Target: []string{"ACTIVE"}, - Refresh: ServerV2StateRefreshFunc(computeClient, server.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for instance (%s) to become ready: %s", - server.ID, err) - } - - return resourceComputeInstanceV2Read(d, meta) -} - -func resourceComputeInstanceV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - server, err := servers.Get(computeClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "server") - } - - log.Printf("[DEBUG] Retrieved Server %s: %+v", d.Id(), server) - - d.Set("name", server.Name) - - // Get the instance network and address information - networks, err := flattenInstanceNetworks(d, meta) - if err != nil { - return err - } - - // Determine the best IPv4 and IPv6 addresses to access the instance with - hostv4, hostv6 := getInstanceAccessAddresses(d, networks) - - // AccessIPv4/v6 isn't standard in OpenStack, but there have been reports - // of them being used in some environments. - if server.AccessIPv4 != "" && hostv4 == "" { - hostv4 = server.AccessIPv4 - } - - if server.AccessIPv6 != "" && hostv6 == "" { - hostv6 = server.AccessIPv6 - } - - d.Set("network", networks) - d.Set("access_ip_v4", hostv4) - d.Set("access_ip_v6", hostv6) - - // Determine the best IP address to use for SSH connectivity. - // Prefer IPv4 over IPv6. - var preferredSSHAddress string - if hostv4 != "" { - preferredSSHAddress = hostv4 - } else if hostv6 != "" { - preferredSSHAddress = hostv6 - } - - if preferredSSHAddress != "" { - // Initialize the connection info - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": preferredSSHAddress, - }) - } - - d.Set("all_metadata", server.Metadata) - - secGrpNames := []string{} - for _, sg := range server.SecurityGroups { - secGrpNames = append(secGrpNames, sg["name"].(string)) - } - d.Set("security_groups", secGrpNames) - - flavorId, ok := server.Flavor["id"].(string) - if !ok { - return fmt.Errorf("Error setting OpenStack server's flavor: %v", server.Flavor) - } - d.Set("flavor_id", flavorId) - - flavor, err := flavors.Get(computeClient, flavorId).Extract() - if err != nil { - return err - } - d.Set("flavor_name", flavor.Name) - - // Set the instance's image information appropriately - if err := setImageInformation(computeClient, server, d); err != nil { - return err - } - - // Build a custom struct for the availability zone extension - var serverWithAZ struct { - servers.Server - availabilityzones.ServerExt - } - - // Do another Get so the above work is not disturbed. - err = servers.Get(computeClient, d.Id()).ExtractInto(&serverWithAZ) - if err != nil { - return CheckDeleted(d, err, "server") - } - - // Set the availability zone - d.Set("availability_zone", serverWithAZ.AvailabilityZone) - - // Set the region - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceComputeInstanceV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - var updateOpts servers.UpdateOpts - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - - if updateOpts != (servers.UpdateOpts{}) { - _, err := servers.Update(computeClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack server: %s", err) - } - } - - if d.HasChange("metadata") { - oldMetadata, newMetadata := d.GetChange("metadata") - var metadataToDelete []string - - // Determine if any metadata keys were removed from the configuration. - // Then request those keys to be deleted. - for oldKey, _ := range oldMetadata.(map[string]interface{}) { - var found bool - for newKey, _ := range newMetadata.(map[string]interface{}) { - if oldKey == newKey { - found = true - } - } - - if !found { - metadataToDelete = append(metadataToDelete, oldKey) - } - } - - for _, key := range metadataToDelete { - err := servers.DeleteMetadatum(computeClient, d.Id(), key).ExtractErr() - if err != nil { - return fmt.Errorf("Error deleting metadata (%s) from server (%s): %s", key, d.Id(), err) - } - } - - // Update existing metadata and add any new metadata. - metadataOpts := make(servers.MetadataOpts) - for k, v := range newMetadata.(map[string]interface{}) { - metadataOpts[k] = v.(string) - } - - _, err := servers.UpdateMetadata(computeClient, d.Id(), metadataOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack server (%s) metadata: %s", d.Id(), err) - } - } - - if d.HasChange("security_groups") { - oldSGRaw, newSGRaw := d.GetChange("security_groups") - oldSGSet := oldSGRaw.(*schema.Set) - newSGSet := newSGRaw.(*schema.Set) - secgroupsToAdd := newSGSet.Difference(oldSGSet) - secgroupsToRemove := oldSGSet.Difference(newSGSet) - - log.Printf("[DEBUG] Security groups to add: %v", secgroupsToAdd) - - log.Printf("[DEBUG] Security groups to remove: %v", secgroupsToRemove) - - for _, g := range secgroupsToRemove.List() { - err := secgroups.RemoveServer(computeClient, d.Id(), g.(string)).ExtractErr() - if err != nil && err.Error() != "EOF" { - if _, ok := err.(gophercloud.ErrDefault404); ok { - continue - } - - return fmt.Errorf("Error removing security group (%s) from OpenStack server (%s): %s", g, d.Id(), err) - } else { - log.Printf("[DEBUG] Removed security group (%s) from instance (%s)", g, d.Id()) - } - } - - for _, g := range secgroupsToAdd.List() { - err := secgroups.AddServer(computeClient, d.Id(), g.(string)).ExtractErr() - if err != nil && err.Error() != "EOF" { - return fmt.Errorf("Error adding security group (%s) to OpenStack server (%s): %s", g, d.Id(), err) - } - log.Printf("[DEBUG] Added security group (%s) to instance (%s)", g, d.Id()) - } - } - - if d.HasChange("admin_pass") { - if newPwd, ok := d.Get("admin_pass").(string); ok { - err := servers.ChangeAdminPassword(computeClient, d.Id(), newPwd).ExtractErr() - if err != nil { - return fmt.Errorf("Error changing admin password of OpenStack server (%s): %s", d.Id(), err) - } - } - } - - if d.HasChange("flavor_id") || d.HasChange("flavor_name") { - var newFlavorId string - var err error - if d.HasChange("flavor_id") { - newFlavorId = d.Get("flavor_id").(string) - } else { - newFlavorName := d.Get("flavor_name").(string) - newFlavorId, err = flavors.IDFromName(computeClient, newFlavorName) - if err != nil { - return err - } - } - - resizeOpts := &servers.ResizeOpts{ - FlavorRef: newFlavorId, - } - log.Printf("[DEBUG] Resize configuration: %#v", resizeOpts) - err = servers.Resize(computeClient, d.Id(), resizeOpts).ExtractErr() - if err != nil { - return fmt.Errorf("Error resizing OpenStack server: %s", err) - } - - // Wait for the instance to finish resizing. - log.Printf("[DEBUG] Waiting for instance (%s) to finish resizing", d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"RESIZE"}, - Target: []string{"VERIFY_RESIZE"}, - Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for instance (%s) to resize: %s", d.Id(), err) - } - - // Confirm resize. - log.Printf("[DEBUG] Confirming resize") - err = servers.ConfirmResize(computeClient, d.Id()).ExtractErr() - if err != nil { - return fmt.Errorf("Error confirming resize of OpenStack server: %s", err) - } - - stateConf = &resource.StateChangeConf{ - Pending: []string{"VERIFY_RESIZE"}, - Target: []string{"ACTIVE"}, - Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for instance (%s) to confirm resize: %s", d.Id(), err) - } - } - - return resourceComputeInstanceV2Read(d, meta) -} - -func resourceComputeInstanceV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - if d.Get("stop_before_destroy").(bool) { - err = startstop.Stop(computeClient, d.Id()).ExtractErr() - if err != nil { - log.Printf("[WARN] Error stopping OpenStack instance: %s", err) - } else { - stopStateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"SHUTOFF"}, - Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), - Timeout: 3 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - log.Printf("[DEBUG] Waiting for instance (%s) to stop", d.Id()) - _, err = stopStateConf.WaitForState() - if err != nil { - log.Printf("[WARN] Error waiting for instance (%s) to stop: %s, proceeding to delete", d.Id(), err) - } - } - } - - if d.Get("force_delete").(bool) { - log.Printf("[DEBUG] Force deleting OpenStack Instance %s", d.Id()) - err = servers.ForceDelete(computeClient, d.Id()).ExtractErr() - if err != nil { - return fmt.Errorf("Error deleting OpenStack server: %s", err) - } - } else { - log.Printf("[DEBUG] Deleting OpenStack Instance %s", d.Id()) - err = servers.Delete(computeClient, d.Id()).ExtractErr() - if err != nil { - return fmt.Errorf("Error deleting OpenStack server: %s", err) - } - } - - // Wait for the instance to delete before moving on. - log.Printf("[DEBUG] Waiting for instance (%s) to delete", d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "SHUTOFF"}, - Target: []string{"DELETED", "SOFT_DELETED"}, - Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for instance (%s) to delete: %s", - d.Id(), err) - } - - d.SetId("") - return nil -} - -// ServerV2StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// an OpenStack instance. -func ServerV2StateRefreshFunc(client *gophercloud.ServiceClient, instanceID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - s, err := servers.Get(client, instanceID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return s, "DELETED", nil - } - return nil, "", err - } - - return s, s.Status, nil - } -} - -func resourceInstanceSecGroupsV2(d *schema.ResourceData) []string { - rawSecGroups := d.Get("security_groups").(*schema.Set).List() - secgroups := make([]string, len(rawSecGroups)) - for i, raw := range rawSecGroups { - secgroups[i] = raw.(string) - } - return secgroups -} - -func resourceInstanceMetadataV2(d *schema.ResourceData) map[string]string { - m := make(map[string]string) - for key, val := range d.Get("metadata").(map[string]interface{}) { - m[key] = val.(string) - } - return m -} - -func resourceInstanceBlockDevicesV2(d *schema.ResourceData, bds []interface{}) ([]bootfromvolume.BlockDevice, error) { - blockDeviceOpts := make([]bootfromvolume.BlockDevice, len(bds)) - for i, bd := range bds { - bdM := bd.(map[string]interface{}) - blockDeviceOpts[i] = bootfromvolume.BlockDevice{ - UUID: bdM["uuid"].(string), - VolumeSize: bdM["volume_size"].(int), - BootIndex: bdM["boot_index"].(int), - DeleteOnTermination: bdM["delete_on_termination"].(bool), - GuestFormat: bdM["guest_format"].(string), - } - - sourceType := bdM["source_type"].(string) - switch sourceType { - case "blank": - blockDeviceOpts[i].SourceType = bootfromvolume.SourceBlank - case "image": - blockDeviceOpts[i].SourceType = bootfromvolume.SourceImage - case "snapshot": - blockDeviceOpts[i].SourceType = bootfromvolume.SourceSnapshot - case "volume": - blockDeviceOpts[i].SourceType = bootfromvolume.SourceVolume - default: - return blockDeviceOpts, fmt.Errorf("unknown block device source type %s", sourceType) - } - - destinationType := bdM["destination_type"].(string) - switch destinationType { - case "local": - blockDeviceOpts[i].DestinationType = bootfromvolume.DestinationLocal - case "volume": - blockDeviceOpts[i].DestinationType = bootfromvolume.DestinationVolume - default: - return blockDeviceOpts, fmt.Errorf("unknown block device destination type %s", destinationType) - } - } - - log.Printf("[DEBUG] Block Device Options: %+v", blockDeviceOpts) - return blockDeviceOpts, nil -} - -func resourceInstanceSchedulerHintsV2(d *schema.ResourceData, schedulerHintsRaw map[string]interface{}) schedulerhints.SchedulerHints { - differentHost := []string{} - if len(schedulerHintsRaw["different_host"].([]interface{})) > 0 { - for _, dh := range schedulerHintsRaw["different_host"].([]interface{}) { - differentHost = append(differentHost, dh.(string)) - } - } - - sameHost := []string{} - if len(schedulerHintsRaw["same_host"].([]interface{})) > 0 { - for _, sh := range schedulerHintsRaw["same_host"].([]interface{}) { - sameHost = append(sameHost, sh.(string)) - } - } - - query := make([]interface{}, len(schedulerHintsRaw["query"].([]interface{}))) - if len(schedulerHintsRaw["query"].([]interface{})) > 0 { - for _, q := range schedulerHintsRaw["query"].([]interface{}) { - query = append(query, q.(string)) - } - } - - schedulerHints := schedulerhints.SchedulerHints{ - Group: schedulerHintsRaw["group"].(string), - DifferentHost: differentHost, - SameHost: sameHost, - Query: query, - TargetCell: schedulerHintsRaw["target_cell"].(string), - BuildNearHostIP: schedulerHintsRaw["build_near_host_ip"].(string), - } - - return schedulerHints -} - -func getImageIDFromConfig(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) (string, error) { - // If block_device was used, an Image does not need to be specified, unless an image/local - // combination was used. This emulates normal boot behavior. Otherwise, ignore the image altogether. - if vL, ok := d.GetOk("block_device"); ok { - needImage := false - for _, v := range vL.([]interface{}) { - vM := v.(map[string]interface{}) - if vM["source_type"] == "image" && vM["destination_type"] == "local" { - needImage = true - } - } - if !needImage { - return "", nil - } - } - - if imageId := d.Get("image_id").(string); imageId != "" { - return imageId, nil - } else { - // try the OS_IMAGE_ID environment variable - if v := os.Getenv("OS_IMAGE_ID"); v != "" { - return v, nil - } - } - - imageName := d.Get("image_name").(string) - if imageName == "" { - // try the OS_IMAGE_NAME environment variable - if v := os.Getenv("OS_IMAGE_NAME"); v != "" { - imageName = v - } - } - - if imageName != "" { - imageId, err := images.IDFromName(computeClient, imageName) - if err != nil { - return "", err - } - return imageId, nil - } - - return "", fmt.Errorf("Neither a boot device, image ID, or image name were able to be determined.") -} - -func setImageInformation(computeClient *gophercloud.ServiceClient, server *servers.Server, d *schema.ResourceData) error { - // If block_device was used, an Image does not need to be specified, unless an image/local - // combination was used. This emulates normal boot behavior. Otherwise, ignore the image altogether. - if vL, ok := d.GetOk("block_device"); ok { - needImage := false - for _, v := range vL.([]interface{}) { - vM := v.(map[string]interface{}) - if vM["source_type"] == "image" && vM["destination_type"] == "local" { - needImage = true - } - } - if !needImage { - d.Set("image_id", "Attempt to boot from volume - no image supplied") - return nil - } - } - - imageId := server.Image["id"].(string) - if imageId != "" { - d.Set("image_id", imageId) - if image, err := images.Get(computeClient, imageId).Extract(); err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - // If the image name can't be found, set the value to "Image not found". - // The most likely scenario is that the image no longer exists in the Image Service - // but the instance still has a record from when it existed. - d.Set("image_name", "Image not found") - return nil - } - return err - } else { - d.Set("image_name", image.Name) - } - } - - return nil -} - -func getFlavorID(client *gophercloud.ServiceClient, d *schema.ResourceData) (string, error) { - flavorId := d.Get("flavor_id").(string) - - if flavorId != "" { - return flavorId, nil - } - - flavorName := d.Get("flavor_name").(string) - return flavors.IDFromName(client, flavorName) -} - -func resourceComputeSchedulerHintsHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - if m["group"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["group"].(string))) - } - - if m["target_cell"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["target_cell"].(string))) - } - - if m["build_host_near_ip"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["build_host_near_ip"].(string))) - } - - buf.WriteString(fmt.Sprintf("%s-", m["different_host"].([]interface{}))) - buf.WriteString(fmt.Sprintf("%s-", m["same_host"].([]interface{}))) - buf.WriteString(fmt.Sprintf("%s-", m["query"].([]interface{}))) - - return hashcode.String(buf.String()) -} - -func checkBlockDeviceConfig(d *schema.ResourceData) error { - if vL, ok := d.GetOk("block_device"); ok { - for _, v := range vL.([]interface{}) { - vM := v.(map[string]interface{}) - - if vM["source_type"] != "blank" && vM["uuid"] == "" { - return fmt.Errorf("You must specify a uuid for %s block device types", vM["source_type"]) - } - - if vM["source_type"] == "image" && vM["destination_type"] == "volume" { - if vM["volume_size"] == 0 { - return fmt.Errorf("You must specify a volume_size when creating a volume from an image") - } - } - - if vM["source_type"] == "blank" && vM["destination_type"] == "local" { - if vM["volume_size"] == 0 { - return fmt.Errorf("You must specify a volume_size when creating a blank block device") - } - } - } - } - - return nil -} - -func resourceComputeInstancePersonalityHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["file"].(string))) - - return hashcode.String(buf.String()) -} - -func resourceInstancePersonalityV2(d *schema.ResourceData) servers.Personality { - var personalities servers.Personality - - if v := d.Get("personality"); v != nil { - personalityList := v.(*schema.Set).List() - if len(personalityList) > 0 { - for _, p := range personalityList { - rawPersonality := p.(map[string]interface{}) - file := servers.File{ - Path: rawPersonality["file"].(string), - Contents: []byte(rawPersonality["content"].(string)), - } - - log.Printf("[DEBUG] OpenStack Compute Instance Personality: %+v", file) - - personalities = append(personalities, &file) - } - } - } - - return personalities -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_keypair_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_keypair_v2.go deleted file mode 100644 index 5d2da47b355..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_keypair_v2.go +++ /dev/null @@ -1,105 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeKeypairV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeKeypairV2Create, - Read: resourceComputeKeypairV2Read, - Delete: resourceComputeKeypairV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "public_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceComputeKeypairV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - createOpts := KeyPairCreateOpts{ - keypairs.CreateOpts{ - Name: d.Get("name").(string), - PublicKey: d.Get("public_key").(string), - }, - MapValueSpecs(d), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - kp, err := keypairs.Create(computeClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack keypair: %s", err) - } - - d.SetId(kp.Name) - - return resourceComputeKeypairV2Read(d, meta) -} - -func resourceComputeKeypairV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - kp, err := keypairs.Get(computeClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "keypair") - } - - d.Set("name", kp.Name) - d.Set("public_key", kp.PublicKey) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceComputeKeypairV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - err = keypairs.Delete(computeClient, d.Id()).ExtractErr() - if err != nil { - return fmt.Errorf("Error deleting OpenStack keypair: %s", err) - } - d.SetId("") - return nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_secgroup_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_secgroup_v2.go deleted file mode 100644 index e3cb3f6dcbe..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_secgroup_v2.go +++ /dev/null @@ -1,398 +0,0 @@ -package openstack - -import ( - "bytes" - "fmt" - "log" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeSecGroupV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeSecGroupV2Create, - Read: resourceComputeSecGroupV2Read, - Update: resourceComputeSecGroupV2Update, - Delete: resourceComputeSecGroupV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "rule": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "from_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: false, - }, - "to_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: false, - }, - "ip_protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "cidr": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - StateFunc: func(v interface{}) string { - return strings.ToLower(v.(string)) - }, - }, - "from_group_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "self": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: false, - }, - }, - }, - Set: secgroupRuleV2Hash, - }, - }, - } -} - -func resourceComputeSecGroupV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - // Before creating the security group, make sure all rules are valid. - if err := checkSecGroupV2RulesForErrors(d); err != nil { - return err - } - - // If all rules are valid, proceed with creating the security gruop. - createOpts := secgroups.CreateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - sg, err := secgroups.Create(computeClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack security group: %s", err) - } - - d.SetId(sg.ID) - - // Now that the security group has been created, iterate through each rule and create it - createRuleOptsList := resourceSecGroupRulesV2(d) - for _, createRuleOpts := range createRuleOptsList { - _, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack security group rule: %s", err) - } - } - - return resourceComputeSecGroupV2Read(d, meta) -} - -func resourceComputeSecGroupV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - sg, err := secgroups.Get(computeClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "security group") - } - - d.Set("name", sg.Name) - d.Set("description", sg.Description) - - rtm, err := rulesToMap(computeClient, d, sg.Rules) - if err != nil { - return err - } - log.Printf("[DEBUG] rulesToMap(sg.Rules): %+v", rtm) - d.Set("rule", rtm) - - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceComputeSecGroupV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - updateOpts := secgroups.UpdateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - } - - log.Printf("[DEBUG] Updating Security Group (%s) with options: %+v", d.Id(), updateOpts) - - _, err = secgroups.Update(computeClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack security group (%s): %s", d.Id(), err) - } - - if d.HasChange("rule") { - oldSGRaw, newSGRaw := d.GetChange("rule") - oldSGRSet, newSGRSet := oldSGRaw.(*schema.Set), newSGRaw.(*schema.Set) - secgrouprulesToAdd := newSGRSet.Difference(oldSGRSet) - secgrouprulesToRemove := oldSGRSet.Difference(newSGRSet) - - log.Printf("[DEBUG] Security group rules to add: %v", secgrouprulesToAdd) - log.Printf("[DEBUG] Security groups rules to remove: %v", secgrouprulesToRemove) - - for _, rawRule := range secgrouprulesToAdd.List() { - createRuleOpts := resourceSecGroupRuleCreateOptsV2(d, rawRule) - rule, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract() - if err != nil { - return fmt.Errorf("Error adding rule to OpenStack security group (%s): %s", d.Id(), err) - } - log.Printf("[DEBUG] Added rule (%s) to OpenStack security group (%s) ", rule.ID, d.Id()) - } - - for _, r := range secgrouprulesToRemove.List() { - rule := resourceSecGroupRuleV2(d, r) - err := secgroups.DeleteRule(computeClient, rule.ID).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - continue - } - - return fmt.Errorf("Error removing rule (%s) from OpenStack security group (%s)", rule.ID, d.Id()) - } else { - log.Printf("[DEBUG] Removed rule (%s) from OpenStack security group (%s): %s", rule.ID, d.Id(), err) - } - } - } - - return resourceComputeSecGroupV2Read(d, meta) -} - -func resourceComputeSecGroupV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: SecGroupV2StateRefreshFunc(computeClient, d), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack security group: %s", err) - } - - d.SetId("") - return nil -} - -func resourceSecGroupRulesV2(d *schema.ResourceData) []secgroups.CreateRuleOpts { - rawRules := d.Get("rule").(*schema.Set).List() - createRuleOptsList := make([]secgroups.CreateRuleOpts, len(rawRules)) - for i, rawRule := range rawRules { - createRuleOptsList[i] = resourceSecGroupRuleCreateOptsV2(d, rawRule) - } - return createRuleOptsList -} - -func resourceSecGroupRuleCreateOptsV2(d *schema.ResourceData, rawRule interface{}) secgroups.CreateRuleOpts { - rawRuleMap := rawRule.(map[string]interface{}) - groupId := rawRuleMap["from_group_id"].(string) - if rawRuleMap["self"].(bool) { - groupId = d.Id() - } - return secgroups.CreateRuleOpts{ - ParentGroupID: d.Id(), - FromPort: rawRuleMap["from_port"].(int), - ToPort: rawRuleMap["to_port"].(int), - IPProtocol: rawRuleMap["ip_protocol"].(string), - CIDR: rawRuleMap["cidr"].(string), - FromGroupID: groupId, - } -} - -func checkSecGroupV2RulesForErrors(d *schema.ResourceData) error { - rawRules := d.Get("rule").(*schema.Set).List() - for _, rawRule := range rawRules { - rawRuleMap := rawRule.(map[string]interface{}) - - // only one of cidr, from_group_id, or self can be set - cidr := rawRuleMap["cidr"].(string) - groupId := rawRuleMap["from_group_id"].(string) - self := rawRuleMap["self"].(bool) - errorMessage := fmt.Errorf("Only one of cidr, from_group_id, or self can be set.") - - // if cidr is set, from_group_id and self cannot be set - if cidr != "" { - if groupId != "" || self { - return errorMessage - } - } - - // if from_group_id is set, cidr and self cannot be set - if groupId != "" { - if cidr != "" || self { - return errorMessage - } - } - - // if self is set, cidr and from_group_id cannot be set - if self { - if cidr != "" || groupId != "" { - return errorMessage - } - } - } - - return nil -} - -func resourceSecGroupRuleV2(d *schema.ResourceData, rawRule interface{}) secgroups.Rule { - rawRuleMap := rawRule.(map[string]interface{}) - return secgroups.Rule{ - ID: rawRuleMap["id"].(string), - ParentGroupID: d.Id(), - FromPort: rawRuleMap["from_port"].(int), - ToPort: rawRuleMap["to_port"].(int), - IPProtocol: rawRuleMap["ip_protocol"].(string), - IPRange: secgroups.IPRange{CIDR: rawRuleMap["cidr"].(string)}, - } -} - -func rulesToMap(computeClient *gophercloud.ServiceClient, d *schema.ResourceData, sgrs []secgroups.Rule) ([]map[string]interface{}, error) { - sgrMap := make([]map[string]interface{}, len(sgrs)) - for i, sgr := range sgrs { - groupId := "" - self := false - if sgr.Group.Name != "" { - if sgr.Group.Name == d.Get("name").(string) { - self = true - } else { - // Since Nova only returns the secgroup Name (and not the ID) for the group attribute, - // we need to look up all security groups and match the name. - // Nevermind that Nova wants the ID when setting the Group *and* that multiple groups - // with the same name can exist... - allPages, err := secgroups.List(computeClient).AllPages() - if err != nil { - return nil, err - } - securityGroups, err := secgroups.ExtractSecurityGroups(allPages) - if err != nil { - return nil, err - } - - for _, sg := range securityGroups { - if sg.Name == sgr.Group.Name { - groupId = sg.ID - } - } - } - } - - sgrMap[i] = map[string]interface{}{ - "id": sgr.ID, - "from_port": sgr.FromPort, - "to_port": sgr.ToPort, - "ip_protocol": sgr.IPProtocol, - "cidr": sgr.IPRange.CIDR, - "self": self, - "from_group_id": groupId, - } - } - return sgrMap, nil -} - -func secgroupRuleV2Hash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%d-", m["from_port"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["ip_protocol"].(string))) - buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["cidr"].(string)))) - buf.WriteString(fmt.Sprintf("%s-", m["from_group_id"].(string))) - buf.WriteString(fmt.Sprintf("%t-", m["self"].(bool))) - - return hashcode.String(buf.String()) -} - -func SecGroupV2StateRefreshFunc(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete Security Group %s.\n", d.Id()) - - err := secgroups.Delete(computeClient, d.Id()).ExtractErr() - if err != nil { - return nil, "", err - } - - s, err := secgroups.Get(computeClient, d.Id()).Extract() - if err != nil { - err = CheckDeleted(d, err, "Security Group") - if err != nil { - return s, "", err - } else { - log.Printf("[DEBUG] Successfully deleted Security Group %s", d.Id()) - return s, "DELETED", nil - } - } - - log.Printf("[DEBUG] Security Group %s still active.\n", d.Id()) - return s, "ACTIVE", nil - } -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_servergroup_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_servergroup_v2.go deleted file mode 100644 index 45e8993ac32..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_servergroup_v2.go +++ /dev/null @@ -1,138 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeServerGroupV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeServerGroupV2Create, - Read: resourceComputeServerGroupV2Read, - Update: nil, - Delete: resourceComputeServerGroupV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - "policies": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "members": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceComputeServerGroupV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - createOpts := ServerGroupCreateOpts{ - servergroups.CreateOpts{ - Name: d.Get("name").(string), - Policies: resourceServerGroupPoliciesV2(d), - }, - MapValueSpecs(d), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - newSG, err := servergroups.Create(computeClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating ServerGroup: %s", err) - } - - d.SetId(newSG.ID) - - return resourceComputeServerGroupV2Read(d, meta) -} - -func resourceComputeServerGroupV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - sg, err := servergroups.Get(computeClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "server group") - } - - log.Printf("[DEBUG] Retrieved ServerGroup %s: %+v", d.Id(), sg) - - // Set the name - d.Set("name", sg.Name) - - // Set the policies - policies := []string{} - for _, p := range sg.Policies { - policies = append(policies, p) - } - d.Set("policies", policies) - - // Set the members - members := []string{} - for _, m := range sg.Members { - members = append(members, m) - } - d.Set("members", members) - - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceComputeServerGroupV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - log.Printf("[DEBUG] Deleting ServerGroup %s", d.Id()) - if err := servergroups.Delete(computeClient, d.Id()).ExtractErr(); err != nil { - return fmt.Errorf("Error deleting ServerGroup: %s", err) - } - - return nil -} - -func resourceServerGroupPoliciesV2(d *schema.ResourceData) []string { - rawPolicies := d.Get("policies").([]interface{}) - policies := make([]string, len(rawPolicies)) - for i, raw := range rawPolicies { - policies[i] = raw.(string) - } - return policies -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_volume_attach_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_volume_attach_v2.go deleted file mode 100644 index fa517414b4c..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_volume_attach_v2.go +++ /dev/null @@ -1,222 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeVolumeAttachV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeVolumeAttachV2Create, - Read: resourceComputeVolumeAttachV2Read, - Delete: resourceComputeVolumeAttachV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "volume_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "device": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - }, - } -} - -func resourceComputeVolumeAttachV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - instanceId := d.Get("instance_id").(string) - volumeId := d.Get("volume_id").(string) - - var device string - if v, ok := d.GetOk("device"); ok { - device = v.(string) - } - - attachOpts := volumeattach.CreateOpts{ - Device: device, - VolumeID: volumeId, - } - - log.Printf("[DEBUG] Creating volume attachment: %#v", attachOpts) - - attachment, err := volumeattach.Create(computeClient, instanceId, attachOpts).Extract() - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ATTACHING"}, - Target: []string{"ATTACHED"}, - Refresh: resourceComputeVolumeAttachV2AttachFunc(computeClient, instanceId, attachment.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 30 * time.Second, - MinTimeout: 15 * time.Second, - } - - if _, err = stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error attaching OpenStack volume: %s", err) - } - - log.Printf("[DEBUG] Created volume attachment: %#v", attachment) - - // Use the instance ID and attachment ID as the resource ID. - // This is because an attachment cannot be retrieved just by its ID alone. - id := fmt.Sprintf("%s/%s", instanceId, attachment.ID) - - d.SetId(id) - - return resourceComputeVolumeAttachV2Read(d, meta) -} - -func resourceComputeVolumeAttachV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - instanceId, attachmentId, err := parseComputeVolumeAttachmentId(d.Id()) - if err != nil { - return err - } - - attachment, err := volumeattach.Get(computeClient, instanceId, attachmentId).Extract() - if err != nil { - return CheckDeleted(d, err, "compute_volume_attach") - } - - log.Printf("[DEBUG] Retrieved volume attachment: %#v", attachment) - - d.Set("instance_id", attachment.ServerID) - d.Set("volume_id", attachment.VolumeID) - d.Set("device", attachment.Device) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceComputeVolumeAttachV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - instanceId, attachmentId, err := parseComputeVolumeAttachmentId(d.Id()) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{""}, - Target: []string{"DETACHED"}, - Refresh: resourceComputeVolumeAttachV2DetachFunc(computeClient, instanceId, attachmentId), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 15 * time.Second, - MinTimeout: 15 * time.Second, - } - - if _, err = stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error detaching OpenStack volume: %s", err) - } - - return nil -} - -func resourceComputeVolumeAttachV2AttachFunc( - computeClient *gophercloud.ServiceClient, instanceId, attachmentId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - va, err := volumeattach.Get(computeClient, instanceId, attachmentId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return va, "ATTACHING", nil - } - return va, "", err - } - - return va, "ATTACHED", nil - } -} - -func resourceComputeVolumeAttachV2DetachFunc( - computeClient *gophercloud.ServiceClient, instanceId, attachmentId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to detach OpenStack volume %s from instance %s", - attachmentId, instanceId) - - va, err := volumeattach.Get(computeClient, instanceId, attachmentId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return va, "DETACHED", nil - } - return va, "", err - } - - err = volumeattach.Delete(computeClient, instanceId, attachmentId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return va, "DETACHED", nil - } - - if _, ok := err.(gophercloud.ErrDefault400); ok { - return nil, "", nil - } - - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack Volume Attachment (%s) is still active.", attachmentId) - return nil, "", nil - } -} - -func parseComputeVolumeAttachmentId(id string) (string, string, error) { - idParts := strings.Split(id, "/") - if len(idParts) < 2 { - return "", "", fmt.Errorf("Unable to determine volume attachment ID") - } - - instanceId := idParts[0] - attachmentId := idParts[1] - - return instanceId, attachmentId, nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_recordset_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_recordset_v2.go deleted file mode 100644 index 1a7173b1d31..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_recordset_v2.go +++ /dev/null @@ -1,276 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDNSRecordSetV2() *schema.Resource { - return &schema.Resource{ - Create: resourceDNSRecordSetV2Create, - Read: resourceDNSRecordSetV2Read, - Update: resourceDNSRecordSetV2Update, - Delete: resourceDNSRecordSetV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "zone_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "records": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: false, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceDNSRecordSetV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - recordsraw := d.Get("records").([]interface{}) - records := make([]string, len(recordsraw)) - for i, recordraw := range recordsraw { - records[i] = recordraw.(string) - } - - createOpts := RecordSetCreateOpts{ - recordsets.CreateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - Records: records, - TTL: d.Get("ttl").(int), - Type: d.Get("type").(string), - }, - MapValueSpecs(d), - } - - zoneID := d.Get("zone_id").(string) - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - n, err := recordsets.Create(dnsClient, zoneID, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS record set: %s", err) - } - - log.Printf("[DEBUG] Waiting for DNS record set (%s) to become available", n.ID) - stateConf := &resource.StateChangeConf{ - Target: []string{"ACTIVE"}, - Pending: []string{"PENDING"}, - Refresh: waitForDNSRecordSet(dnsClient, zoneID, n.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - id := fmt.Sprintf("%s/%s", zoneID, n.ID) - d.SetId(id) - - log.Printf("[DEBUG] Created OpenStack DNS record set %s: %#v", n.ID, n) - return resourceDNSRecordSetV2Read(d, meta) -} - -func resourceDNSRecordSetV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - // Obtain relevant info from parsing the ID - zoneID, recordsetID, err := parseDNSV2RecordSetID(d.Id()) - if err != nil { - return err - } - - n, err := recordsets.Get(dnsClient, zoneID, recordsetID).Extract() - if err != nil { - return CheckDeleted(d, err, "record_set") - } - - log.Printf("[DEBUG] Retrieved record set %s: %#v", recordsetID, n) - - d.Set("name", n.Name) - d.Set("description", n.Description) - d.Set("ttl", n.TTL) - d.Set("type", n.Type) - d.Set("records", n.Records) - d.Set("region", GetRegion(d, config)) - d.Set("zone_id", zoneID) - - return nil -} - -func resourceDNSRecordSetV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - var updateOpts recordsets.UpdateOpts - if d.HasChange("ttl") { - updateOpts.TTL = d.Get("ttl").(int) - } - - if d.HasChange("records") { - recordsraw := d.Get("records").([]interface{}) - records := make([]string, len(recordsraw)) - for i, recordraw := range recordsraw { - records[i] = recordraw.(string) - } - updateOpts.Records = records - } - - if d.HasChange("description") { - updateOpts.Description = d.Get("description").(string) - } - - // Obtain relevant info from parsing the ID - zoneID, recordsetID, err := parseDNSV2RecordSetID(d.Id()) - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating record set %s with options: %#v", recordsetID, updateOpts) - - _, err = recordsets.Update(dnsClient, zoneID, recordsetID, updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack DNS record set: %s", err) - } - - log.Printf("[DEBUG] Waiting for DNS record set (%s) to update", recordsetID) - stateConf := &resource.StateChangeConf{ - Target: []string{"ACTIVE"}, - Pending: []string{"PENDING"}, - Refresh: waitForDNSRecordSet(dnsClient, zoneID, recordsetID), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - return resourceDNSRecordSetV2Read(d, meta) -} - -func resourceDNSRecordSetV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - // Obtain relevant info from parsing the ID - zoneID, recordsetID, err := parseDNSV2RecordSetID(d.Id()) - if err != nil { - return err - } - - err = recordsets.Delete(dnsClient, zoneID, recordsetID).ExtractErr() - if err != nil { - return fmt.Errorf("Error deleting OpenStack DNS record set: %s", err) - } - - log.Printf("[DEBUG] Waiting for DNS record set (%s) to be deleted", recordsetID) - stateConf := &resource.StateChangeConf{ - Target: []string{"DELETED"}, - Pending: []string{"ACTIVE", "PENDING"}, - Refresh: waitForDNSRecordSet(dnsClient, zoneID, recordsetID), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId("") - return nil -} - -func waitForDNSRecordSet(dnsClient *gophercloud.ServiceClient, zoneID, recordsetId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - recordset, err := recordsets.Get(dnsClient, zoneID, recordsetId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return recordset, "DELETED", nil - } - - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack DNS record set (%s) current status: %s", recordset.ID, recordset.Status) - return recordset, recordset.Status, nil - } -} - -func parseDNSV2RecordSetID(id string) (string, string, error) { - idParts := strings.Split(id, "/") - if len(idParts) != 2 { - return "", "", fmt.Errorf("Unable to determine DNS record set ID from raw ID: %s", id) - } - - zoneID := idParts[0] - recordsetID := idParts[1] - - return zoneID, recordsetID, nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_zone_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_zone_v2.go deleted file mode 100644 index a3028e194f6..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_zone_v2.go +++ /dev/null @@ -1,276 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/dns/v2/zones" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDNSZoneV2() *schema.Resource { - return &schema.Resource{ - Create: resourceDNSZoneV2Create, - Read: resourceDNSZoneV2Read, - Update: resourceDNSZoneV2Update, - Delete: resourceDNSZoneV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "email": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: resourceDNSZoneV2ValidType, - }, - "attributes": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: false, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "masters": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceDNSZoneV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - mastersraw := d.Get("masters").(*schema.Set).List() - masters := make([]string, len(mastersraw)) - for i, masterraw := range mastersraw { - masters[i] = masterraw.(string) - } - - attrsraw := d.Get("attributes").(map[string]interface{}) - attrs := make(map[string]string, len(attrsraw)) - for k, v := range attrsraw { - attrs[k] = v.(string) - } - - createOpts := ZoneCreateOpts{ - zones.CreateOpts{ - Name: d.Get("name").(string), - Type: d.Get("type").(string), - Attributes: attrs, - TTL: d.Get("ttl").(int), - Email: d.Get("email").(string), - Description: d.Get("description").(string), - Masters: masters, - }, - MapValueSpecs(d), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - n, err := zones.Create(dnsClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS zone: %s", err) - } - - log.Printf("[DEBUG] Waiting for DNS Zone (%s) to become available", n.ID) - stateConf := &resource.StateChangeConf{ - Target: []string{"ACTIVE"}, - Pending: []string{"PENDING"}, - Refresh: waitForDNSZone(dnsClient, n.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId(n.ID) - - log.Printf("[DEBUG] Created OpenStack DNS Zone %s: %#v", n.ID, n) - return resourceDNSZoneV2Read(d, meta) -} - -func resourceDNSZoneV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - n, err := zones.Get(dnsClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "zone") - } - - log.Printf("[DEBUG] Retrieved Zone %s: %#v", d.Id(), n) - - d.Set("name", n.Name) - d.Set("email", n.Email) - d.Set("description", n.Description) - d.Set("ttl", n.TTL) - d.Set("type", n.Type) - d.Set("attributes", n.Attributes) - d.Set("masters", n.Masters) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceDNSZoneV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - var updateOpts zones.UpdateOpts - if d.HasChange("email") { - updateOpts.Email = d.Get("email").(string) - } - if d.HasChange("ttl") { - updateOpts.TTL = d.Get("ttl").(int) - } - if d.HasChange("masters") { - mastersraw := d.Get("masters").(*schema.Set).List() - masters := make([]string, len(mastersraw)) - for i, masterraw := range mastersraw { - masters[i] = masterraw.(string) - } - updateOpts.Masters = masters - } - if d.HasChange("description") { - updateOpts.Description = d.Get("description").(string) - } - - log.Printf("[DEBUG] Updating Zone %s with options: %#v", d.Id(), updateOpts) - - _, err = zones.Update(dnsClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack DNS Zone: %s", err) - } - - log.Printf("[DEBUG] Waiting for DNS Zone (%s) to update", d.Id()) - stateConf := &resource.StateChangeConf{ - Target: []string{"ACTIVE"}, - Pending: []string{"PENDING"}, - Refresh: waitForDNSZone(dnsClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - return resourceDNSZoneV2Read(d, meta) -} - -func resourceDNSZoneV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - _, err = zones.Delete(dnsClient, d.Id()).Extract() - if err != nil { - return fmt.Errorf("Error deleting OpenStack DNS Zone: %s", err) - } - - log.Printf("[DEBUG] Waiting for DNS Zone (%s) to become available", d.Id()) - stateConf := &resource.StateChangeConf{ - Target: []string{"DELETED"}, - Pending: []string{"ACTIVE", "PENDING"}, - Refresh: waitForDNSZone(dnsClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId("") - return nil -} - -func resourceDNSZoneV2ValidType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - validTypes := []string{ - "PRIMARY", - "SECONDARY", - } - - for _, v := range validTypes { - if value == v { - return - } - } - - err := fmt.Errorf("%s must be one of %s", k, validTypes) - errors = append(errors, err) - return -} - -func waitForDNSZone(dnsClient *gophercloud.ServiceClient, zoneId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - zone, err := zones.Get(dnsClient, zoneId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return zone, "DELETED", nil - } - - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack DNS Zone (%s) current status: %s", zone.ID, zone.Status) - return zone, zone.Status, nil - } -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_firewall_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_firewall_v1.go deleted file mode 100644 index dfb5a65c5e1..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_firewall_v1.go +++ /dev/null @@ -1,323 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceFWFirewallV1() *schema.Resource { - return &schema.Resource{ - Create: resourceFWFirewallV1Create, - Read: resourceFWFirewallV1Read, - Update: resourceFWFirewallV1Update, - Delete: resourceFWFirewallV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "policy_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "associated_routers": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - ConflictsWith: []string{"no_routers"}, - Computed: true, - }, - "no_routers": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ConflictsWith: []string{"associated_routers"}, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceFWFirewallV1Create(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var createOpts firewalls.CreateOptsBuilder - - adminStateUp := d.Get("admin_state_up").(bool) - createOpts = FirewallCreateOpts{ - firewalls.CreateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - PolicyID: d.Get("policy_id").(string), - AdminStateUp: &adminStateUp, - TenantID: d.Get("tenant_id").(string), - }, - MapValueSpecs(d), - } - - associatedRoutersRaw := d.Get("associated_routers").(*schema.Set).List() - if len(associatedRoutersRaw) > 0 { - log.Printf("[DEBUG] Will attempt to associate Firewall with router(s): %+v", associatedRoutersRaw) - - var routerIds []string - for _, v := range associatedRoutersRaw { - routerIds = append(routerIds, v.(string)) - } - - createOpts = &routerinsertion.CreateOptsExt{ - CreateOptsBuilder: createOpts, - RouterIDs: routerIds, - } - } - - if d.Get("no_routers").(bool) { - routerIds := make([]string, 0) - log.Println("[DEBUG] No routers specified. Setting to empty slice") - createOpts = &routerinsertion.CreateOptsExt{ - CreateOptsBuilder: createOpts, - RouterIDs: routerIds, - } - } - - log.Printf("[DEBUG] Create firewall: %#v", createOpts) - - firewall, err := firewalls.Create(networkingClient, createOpts).Extract() - if err != nil { - return err - } - - log.Printf("[DEBUG] Firewall created: %#v", firewall) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForFirewallActive(networkingClient, firewall.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 0, - MinTimeout: 2 * time.Second, - } - - _, err = stateConf.WaitForState() - log.Printf("[DEBUG] Firewall (%s) is active.", firewall.ID) - - d.SetId(firewall.ID) - - return resourceFWFirewallV1Read(d, meta) -} - -func resourceFWFirewallV1Read(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Retrieve information about firewall: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var firewall Firewall - err = firewalls.Get(networkingClient, d.Id()).ExtractInto(&firewall) - if err != nil { - return CheckDeleted(d, err, "firewall") - } - - log.Printf("[DEBUG] Read OpenStack Firewall %s: %#v", d.Id(), firewall) - - d.Set("name", firewall.Name) - d.Set("description", firewall.Description) - d.Set("policy_id", firewall.PolicyID) - d.Set("admin_state_up", firewall.AdminStateUp) - d.Set("tenant_id", firewall.TenantID) - d.Set("associated_routers", firewall.RouterIDs) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceFWFirewallV1Update(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - // PolicyID is required - opts := firewalls.UpdateOpts{ - PolicyID: d.Get("policy_id").(string), - } - - if d.HasChange("name") { - opts.Name = d.Get("name").(string) - } - - if d.HasChange("description") { - opts.Description = d.Get("description").(string) - } - - if d.HasChange("admin_state_up") { - adminStateUp := d.Get("admin_state_up").(bool) - opts.AdminStateUp = &adminStateUp - } - - var updateOpts firewalls.UpdateOptsBuilder - var routerIds []string - if d.HasChange("associated_routers") || d.HasChange("no_routers") { - // 'no_routers' = true means 'associated_routers' will be empty... - if d.Get("no_routers").(bool) { - log.Printf("[DEBUG] 'no_routers' is true.") - routerIds = make([]string, 0) - } else { - associatedRoutersRaw := d.Get("associated_routers").(*schema.Set).List() - for _, v := range associatedRoutersRaw { - routerIds = append(routerIds, v.(string)) - } - } - - updateOpts = routerinsertion.UpdateOptsExt{ - UpdateOptsBuilder: opts, - RouterIDs: routerIds, - } - } else { - updateOpts = opts - } - - log.Printf("[DEBUG] Updating firewall with id %s: %#v", d.Id(), updateOpts) - - err = firewalls.Update(networkingClient, d.Id(), updateOpts).Err - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE", "PENDING_UPDATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForFirewallActive(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 0, - MinTimeout: 2 * time.Second, - } - - _, err = stateConf.WaitForState() - - return resourceFWFirewallV1Read(d, meta) -} - -func resourceFWFirewallV1Delete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Destroy firewall: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - // Ensure the firewall was fully created/updated before being deleted. - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE", "PENDING_UPDATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForFirewallActive(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 0, - MinTimeout: 2 * time.Second, - } - - _, err = stateConf.WaitForState() - - err = firewalls.Delete(networkingClient, d.Id()).Err - - if err != nil { - return err - } - - stateConf = &resource.StateChangeConf{ - Pending: []string{"DELETING"}, - Target: []string{"DELETED"}, - Refresh: waitForFirewallDeletion(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 0, - MinTimeout: 2 * time.Second, - } - - _, err = stateConf.WaitForState() - - return err -} - -func waitForFirewallActive(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { - - return func() (interface{}, string, error) { - var fw Firewall - - err := firewalls.Get(networkingClient, id).ExtractInto(&fw) - if err != nil { - return nil, "", err - } - return fw, fw.Status, nil - } -} - -func waitForFirewallDeletion(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { - - return func() (interface{}, string, error) { - fw, err := firewalls.Get(networkingClient, id).Extract() - log.Printf("[DEBUG] Got firewall %s => %#v", id, fw) - - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Firewall %s is actually deleted", id) - return "", "DELETED", nil - } - return nil, "", fmt.Errorf("Unexpected error: %s", err) - } - - log.Printf("[DEBUG] Firewall %s deletion is pending", id) - return fw, "DELETING", nil - } -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_policy_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_policy_v1.go deleted file mode 100644 index 9012854aa13..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_policy_v1.go +++ /dev/null @@ -1,231 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceFWPolicyV1() *schema.Resource { - return &schema.Resource{ - Create: resourceFWPolicyV1Create, - Read: resourceFWPolicyV1Read, - Update: resourceFWPolicyV1Update, - Delete: resourceFWPolicyV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "audited": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "shared": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "rules": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceFWPolicyV1Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - v := d.Get("rules").([]interface{}) - - log.Printf("[DEBUG] Rules found : %#v", v) - log.Printf("[DEBUG] Rules count : %d", len(v)) - - rules := make([]string, len(v)) - for i, v := range v { - rules[i] = v.(string) - } - - audited := d.Get("audited").(bool) - - opts := PolicyCreateOpts{ - policies.CreateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - Audited: &audited, - TenantID: d.Get("tenant_id").(string), - Rules: rules, - }, - MapValueSpecs(d), - } - - if r, ok := d.GetOk("shared"); ok { - shared := r.(bool) - opts.Shared = &shared - } - - log.Printf("[DEBUG] Create firewall policy: %#v", opts) - - policy, err := policies.Create(networkingClient, opts).Extract() - if err != nil { - return err - } - - log.Printf("[DEBUG] Firewall policy created: %#v", policy) - - d.SetId(policy.ID) - - return resourceFWPolicyV1Read(d, meta) -} - -func resourceFWPolicyV1Read(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Retrieve information about firewall policy: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - policy, err := policies.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "FW policy") - } - - log.Printf("[DEBUG] Read OpenStack Firewall Policy %s: %#v", d.Id(), policy) - - d.Set("name", policy.Name) - d.Set("description", policy.Description) - d.Set("shared", policy.Shared) - d.Set("audited", policy.Audited) - d.Set("tenant_id", policy.TenantID) - d.Set("rules", policy.Rules) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceFWPolicyV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - opts := policies.UpdateOpts{} - - if d.HasChange("name") { - opts.Name = d.Get("name").(string) - } - - if d.HasChange("description") { - opts.Description = d.Get("description").(string) - } - - if d.HasChange("rules") { - v := d.Get("rules").([]interface{}) - - log.Printf("[DEBUG] Rules found : %#v", v) - log.Printf("[DEBUG] Rules count : %d", len(v)) - - rules := make([]string, len(v)) - for i, v := range v { - rules[i] = v.(string) - } - opts.Rules = rules - } - - log.Printf("[DEBUG] Updating firewall policy with id %s: %#v", d.Id(), opts) - - err = policies.Update(networkingClient, d.Id(), opts).Err - if err != nil { - return err - } - - return resourceFWPolicyV1Read(d, meta) -} - -func resourceFWPolicyV1Delete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Destroy firewall policy: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForFirewallPolicyDeletion(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 0, - MinTimeout: 2 * time.Second, - } - - if _, err = stateConf.WaitForState(); err != nil { - return err - } - - return nil -} - -func waitForFirewallPolicyDeletion(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - err := policies.Delete(networkingClient, id).Err - if err == nil { - return "", "DELETED", nil - } - - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - // This error usually means that the policy is attached - // to a firewall. At this point, the firewall is probably - // being delete. So, we retry a few times. - return nil, "ACTIVE", nil - } - } - - return nil, "ACTIVE", err - } -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_rule_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_rule_v1.go deleted file mode 100644 index 92793c46d99..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_rule_v1.go +++ /dev/null @@ -1,258 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceFWRuleV1() *schema.Resource { - return &schema.Resource{ - Create: resourceFWRuleV1Create, - Read: resourceFWRuleV1Read, - Update: resourceFWRuleV1Update, - Delete: resourceFWRuleV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "action": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "ip_version": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 4, - }, - "source_ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "destination_ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "source_port": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "destination_port": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "enabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceFWRuleV1Create(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - enabled := d.Get("enabled").(bool) - ipVersion := resourceFWRuleV1DetermineIPVersion(d.Get("ip_version").(int)) - protocol := resourceFWRuleV1DetermineProtocol(d.Get("protocol").(string)) - - ruleConfiguration := RuleCreateOpts{ - rules.CreateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - Protocol: protocol, - Action: d.Get("action").(string), - IPVersion: ipVersion, - SourceIPAddress: d.Get("source_ip_address").(string), - DestinationIPAddress: d.Get("destination_ip_address").(string), - SourcePort: d.Get("source_port").(string), - DestinationPort: d.Get("destination_port").(string), - Enabled: &enabled, - TenantID: d.Get("tenant_id").(string), - }, - MapValueSpecs(d), - } - - log.Printf("[DEBUG] Create firewall rule: %#v", ruleConfiguration) - - rule, err := rules.Create(networkingClient, ruleConfiguration).Extract() - - if err != nil { - return err - } - - log.Printf("[DEBUG] Firewall rule with id %s : %#v", rule.ID, rule) - - d.SetId(rule.ID) - - return resourceFWRuleV1Read(d, meta) -} - -func resourceFWRuleV1Read(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Retrieve information about firewall rule: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - rule, err := rules.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "FW rule") - } - - log.Printf("[DEBUG] Read OpenStack Firewall Rule %s: %#v", d.Id(), rule) - - d.Set("action", rule.Action) - d.Set("name", rule.Name) - d.Set("description", rule.Description) - d.Set("ip_version", rule.IPVersion) - d.Set("source_ip_address", rule.SourceIPAddress) - d.Set("destination_ip_address", rule.DestinationIPAddress) - d.Set("source_port", rule.SourcePort) - d.Set("destination_port", rule.DestinationPort) - d.Set("enabled", rule.Enabled) - - if rule.Protocol == "" { - d.Set("protocol", "any") - } else { - d.Set("protocol", rule.Protocol) - } - - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceFWRuleV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - name := d.Get("name").(string) - description := d.Get("description").(string) - protocol := d.Get("protocol").(string) - action := d.Get("action").(string) - ipVersion := resourceFWRuleV1DetermineIPVersion(d.Get("ip_version").(int)) - sourceIPAddress := d.Get("source_ip_address").(string) - sourcePort := d.Get("source_port").(string) - destinationIPAddress := d.Get("destination_ip_address").(string) - destinationPort := d.Get("destination_port").(string) - enabled := d.Get("enabled").(bool) - - opts := rules.UpdateOpts{ - Name: &name, - Description: &description, - Protocol: &protocol, - Action: &action, - IPVersion: &ipVersion, - SourceIPAddress: &sourceIPAddress, - DestinationIPAddress: &destinationIPAddress, - SourcePort: &sourcePort, - DestinationPort: &destinationPort, - Enabled: &enabled, - } - - log.Printf("[DEBUG] Updating firewall rules: %#v", opts) - err = rules.Update(networkingClient, d.Id(), opts).Err - if err != nil { - return err - } - - return resourceFWRuleV1Read(d, meta) -} - -func resourceFWRuleV1Delete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Destroy firewall rule: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - rule, err := rules.Get(networkingClient, d.Id()).Extract() - if err != nil { - return err - } - - if rule.PolicyID != "" { - _, err := policies.RemoveRule(networkingClient, rule.PolicyID, rule.ID).Extract() - if err != nil { - return err - } - } - - return rules.Delete(networkingClient, d.Id()).Err -} - -func resourceFWRuleV1DetermineIPVersion(ipv int) gophercloud.IPVersion { - // Determine the IP Version - var ipVersion gophercloud.IPVersion - switch ipv { - case 4: - ipVersion = gophercloud.IPv4 - case 6: - ipVersion = gophercloud.IPv6 - } - - return ipVersion -} - -func resourceFWRuleV1DetermineProtocol(p string) rules.Protocol { - var protocol rules.Protocol - switch p { - case "any": - protocol = rules.ProtocolAny - case "icmp": - protocol = rules.ProtocolICMP - case "tcp": - protocol = rules.ProtocolTCP - case "udp": - protocol = rules.ProtocolUDP - } - - return protocol -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_identity_project_v3.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_identity_project_v3.go deleted file mode 100644 index 83dddb50406..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_identity_project_v3.go +++ /dev/null @@ -1,184 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceIdentityProjectV3() *schema.Resource { - return &schema.Resource{ - Create: resourceIdentityProjectV3Create, - Read: resourceIdentityProjectV3Read, - Update: resourceIdentityProjectV3Update, - Delete: resourceIdentityProjectV3Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "domain_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "enabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "is_domain": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "parent_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - } -} - -func resourceIdentityProjectV3Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - identityClient, err := config.identityV3Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack identity client: %s", err) - } - - enabled := d.Get("enabled").(bool) - isDomain := d.Get("is_domain").(bool) - createOpts := projects.CreateOpts{ - Description: d.Get("description").(string), - DomainID: d.Get("domain_id").(string), - Enabled: &enabled, - IsDomain: &isDomain, - Name: d.Get("name").(string), - ParentID: d.Get("parent_id").(string), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - project, err := projects.Create(identityClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack project: %s", err) - } - - d.SetId(project.ID) - - return resourceIdentityProjectV3Read(d, meta) -} - -func resourceIdentityProjectV3Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - identityClient, err := config.identityV3Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack identity client: %s", err) - } - - project, err := projects.Get(identityClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "project") - } - - log.Printf("[DEBUG] Retrieved OpenStack project: %#v", project) - - d.Set("description", project.Description) - d.Set("domain_id", project.DomainID) - d.Set("enabled", project.Enabled) - d.Set("is_domain", project.IsDomain) - d.Set("name", project.Name) - d.Set("parent_id", project.ParentID) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceIdentityProjectV3Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - identityClient, err := config.identityV3Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack identity client: %s", err) - } - - var hasChange bool - var updateOpts projects.UpdateOpts - - if d.HasChange("domain_id") { - hasChange = true - updateOpts.DomainID = d.Get("domain_id").(string) - } - - if d.HasChange("enabled") { - hasChange = true - enabled := d.Get("enabled").(bool) - updateOpts.Enabled = &enabled - } - - if d.HasChange("is_domain") { - hasChange = true - isDomain := d.Get("is_domain").(bool) - updateOpts.IsDomain = &isDomain - } - - if d.HasChange("name") { - hasChange = true - updateOpts.Name = d.Get("name").(string) - } - - if d.HasChange("parent_id") { - hasChange = true - updateOpts.ParentID = d.Get("parent_id").(string) - } - - if d.HasChange("description") { - hasChange = true - updateOpts.Description = d.Get("description").(string) - } - - if hasChange { - _, err := projects.Update(identityClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack project: %s", err) - } - } - - return resourceIdentityProjectV3Read(d, meta) -} - -func resourceIdentityProjectV3Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - identityClient, err := config.identityV3Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack identity client: %s", err) - } - - err = projects.Delete(identityClient, d.Id()).ExtractErr() - if err != nil { - return fmt.Errorf("Error deleting OpenStack project: %s", err) - } - - return nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_identity_user_v3.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_identity_user_v3.go deleted file mode 100644 index b6c8008dde3..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_identity_user_v3.go +++ /dev/null @@ -1,311 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/gophercloud/gophercloud/openstack/identity/v3/users" - "github.com/hashicorp/terraform/helper/schema" -) - -var userOptions = map[users.Option]string{ - users.IgnoreChangePasswordUponFirstUse: "ignore_change_password_upon_first_use", - users.IgnorePasswordExpiry: "ignore_password_expiry", - users.IgnoreLockoutFailureAttempts: "ignore_lockout_failure_attempts", - users.MultiFactorAuthEnabled: "multi_factor_auth_enabled", -} - -func resourceIdentityUserV3() *schema.Resource { - return &schema.Resource{ - Create: resourceIdentityUserV3Create, - Read: resourceIdentityUserV3Read, - Update: resourceIdentityUserV3Update, - Delete: resourceIdentityUserV3Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "default_project_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "domain_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "enabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "extra": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Sensitive: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - // The following are all specific options that must - // be bundled into user.Options - "ignore_change_password_upon_first_use": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "ignore_password_expiry": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "ignore_lockout_failure_attempts": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "multi_factor_auth_enabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "multi_factor_auth_rule": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "rule": &schema.Schema{ - Type: schema.TypeList, - MinItems: 1, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - }, - } -} - -func resourceIdentityUserV3Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - identityClient, err := config.identityV3Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack identity client: %s", err) - } - - enabled := d.Get("enabled").(bool) - createOpts := users.CreateOpts{ - DefaultProjectID: d.Get("default_project_id").(string), - Description: d.Get("description").(string), - DomainID: d.Get("domain_id").(string), - Enabled: &enabled, - Extra: d.Get("extra").(map[string]interface{}), - Name: d.Get("name").(string), - } - - // Build the user options - options := map[users.Option]interface{}{} - for optionType, option := range userOptions { - if v, ok := d.GetOk(option); ok { - options[optionType] = v.(bool) - } - } - - // Build the MFA rules - mfaRules := resourceIdentityUserV3BuildMFARules(d.Get("multi_factor_auth_rule").([]interface{})) - if len(mfaRules) > 0 { - options[users.MultiFactorAuthRules] = mfaRules - } - - createOpts.Options = options - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - - // Add password here so it wouldn't go in the above log entry - createOpts.Password = d.Get("password").(string) - - user, err := users.Create(identityClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack user: %s", err) - } - - d.SetId(user.ID) - - return resourceIdentityUserV3Read(d, meta) -} - -func resourceIdentityUserV3Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - identityClient, err := config.identityV3Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack identity client: %s", err) - } - - user, err := users.Get(identityClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "user") - } - - log.Printf("[DEBUG] Retrieved OpenStack user: %#v", user) - - d.Set("default_project_id", user.DefaultProjectID) - d.Set("description", user.Description) - d.Set("domain_id", user.DomainID) - d.Set("enabled", user.Enabled) - d.Set("extra", user.Extra) - d.Set("name", user.Name) - d.Set("region", GetRegion(d, config)) - - options := user.Options - for _, option := range userOptions { - if v, ok := options[option]; ok { - d.Set(option, v.(bool)) - } - } - - mfaRules := []map[string]interface{}{} - if v, ok := options["multi_factor_auth_rules"].([]interface{}); ok { - for _, v := range v { - mfaRule := map[string]interface{}{ - "rule": v, - } - mfaRules = append(mfaRules, mfaRule) - } - - d.Set("multi_factor_auth_rule", mfaRules) - } - - return nil -} - -func resourceIdentityUserV3Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - identityClient, err := config.identityV3Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack identity client: %s", err) - } - - var hasChange bool - var updateOpts users.UpdateOpts - - if d.HasChange("default_project_id") { - hasChange = true - updateOpts.DefaultProjectID = d.Get("default_project_id").(string) - } - - if d.HasChange("description") { - hasChange = true - updateOpts.Description = d.Get("description").(string) - } - - if d.HasChange("domain_id") { - hasChange = true - updateOpts.DomainID = d.Get("domain_id").(string) - } - - if d.HasChange("enabled") { - hasChange = true - enabled := d.Get("enabled").(bool) - updateOpts.Enabled = &enabled - } - - if d.HasChange("extra") { - hasChange = true - updateOpts.Extra = d.Get("extra").(map[string]interface{}) - } - - if d.HasChange("name") { - hasChange = true - updateOpts.Name = d.Get("name").(string) - } - - // Determine if the options have changed - options := map[users.Option]interface{}{} - for optionType, option := range userOptions { - if d.HasChange(option) { - hasChange = true - options[optionType] = d.Get(option).(bool) - } - } - - // Build the MFA rules - if d.HasChange("multi_factor_auth_rule") { - mfaRules := resourceIdentityUserV3BuildMFARules(d.Get("multi_factor_auth_rule").([]interface{})) - if len(mfaRules) > 0 { - options[users.MultiFactorAuthRules] = mfaRules - } - } - - updateOpts.Options = options - - if hasChange { - log.Printf("[DEBUG] Update Options: %#v", updateOpts) - } - - if d.HasChange("password") { - hasChange = true - updateOpts.Password = d.Get("password").(string) - } - - if hasChange { - _, err := users.Update(identityClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack user: %s", err) - } - } - - return resourceIdentityUserV3Read(d, meta) -} - -func resourceIdentityUserV3Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - identityClient, err := config.identityV3Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack identity client: %s", err) - } - - err = users.Delete(identityClient, d.Id()).ExtractErr() - if err != nil { - return fmt.Errorf("Error deleting OpenStack user: %s", err) - } - - return nil -} - -func resourceIdentityUserV3BuildMFARules(rules []interface{}) []interface{} { - var mfaRules []interface{} - - for _, rule := range rules { - ruleMap := rule.(map[string]interface{}) - ruleList := ruleMap["rule"].([]interface{}) - mfaRules = append(mfaRules, ruleList) - } - - return mfaRules -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_images_image_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_images_image_v2.go deleted file mode 100644 index 3d9dcf8e1c5..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_images_image_v2.go +++ /dev/null @@ -1,526 +0,0 @@ -package openstack - -import ( - "crypto/md5" - "encoding/hex" - "fmt" - "io" - "log" - "net/http" - "os" - "path/filepath" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata" - "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceImagesImageV2() *schema.Resource { - return &schema.Resource{ - Create: resourceImagesImageV2Create, - Read: resourceImagesImageV2Read, - Update: resourceImagesImageV2Update, - Delete: resourceImagesImageV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "checksum": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "container_format": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resourceImagesImageV2ValidateContainerFormat, - }, - - "created_at": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "disk_format": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resourceImagesImageV2ValidateDiskFormat, - }, - - "file": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "image_cache_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: fmt.Sprintf("%s/.terraform/image_cache", os.Getenv("HOME")), - }, - - "image_source_url": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"local_file_path"}, - }, - - "local_file_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"image_source_url"}, - }, - - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - - "min_disk_gb": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validatePositiveInt, - Default: 0, - }, - - "min_ram_mb": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validatePositiveInt, - Default: 0, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - - "owner": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "protected": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - }, - - "schema": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "size_bytes": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - - "status": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "tags": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "update_at": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "visibility": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - ValidateFunc: resourceImagesImageV2ValidateVisibility, - Default: "private", - }, - - "properties": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceImagesImageV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - imageClient, err := config.imageV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack image client: %s", err) - } - - protected := d.Get("protected").(bool) - visibility := resourceImagesImageV2VisibilityFromString(d.Get("visibility").(string)) - - properties := d.Get("properties").(map[string]interface{}) - imageProperties := resourceImagesImageV2ExpandProperties(properties) - - createOpts := &images.CreateOpts{ - Name: d.Get("name").(string), - ContainerFormat: d.Get("container_format").(string), - DiskFormat: d.Get("disk_format").(string), - MinDisk: d.Get("min_disk_gb").(int), - MinRAM: d.Get("min_ram_mb").(int), - Protected: &protected, - Visibility: &visibility, - Properties: imageProperties, - } - - if v, ok := d.GetOk("tags"); ok { - tags := v.(*schema.Set).List() - createOpts.Tags = resourceImagesImageV2BuildTags(tags) - } - - d.Partial(true) - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - newImg, err := images.Create(imageClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating Image: %s", err) - } - - d.SetId(newImg.ID) - - // downloading/getting image file props - imgFilePath, err := resourceImagesImageV2File(d) - if err != nil { - return fmt.Errorf("Error opening file for Image: %s", err) - - } - fileSize, fileChecksum, err := resourceImagesImageV2FileProps(imgFilePath) - if err != nil { - return fmt.Errorf("Error getting file props: %s", err) - } - - // upload - imgFile, err := os.Open(imgFilePath) - if err != nil { - return fmt.Errorf("Error opening file %q: %s", imgFilePath, err) - } - defer imgFile.Close() - log.Printf("[WARN] Uploading image %s (%d bytes). This can be pretty long.", d.Id(), fileSize) - - res := imagedata.Upload(imageClient, d.Id(), imgFile) - if res.Err != nil { - return fmt.Errorf("Error while uploading file %q: %s", imgFilePath, res.Err) - } - - //wait for active - stateConf := &resource.StateChangeConf{ - Pending: []string{string(images.ImageStatusQueued), string(images.ImageStatusSaving)}, - Target: []string{string(images.ImageStatusActive)}, - Refresh: resourceImagesImageV2RefreshFunc(imageClient, d.Id(), fileSize, fileChecksum), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - if _, err = stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for Image: %s", err) - } - - d.Partial(false) - - return resourceImagesImageV2Read(d, meta) -} - -func resourceImagesImageV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - imageClient, err := config.imageV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack image client: %s", err) - } - - img, err := images.Get(imageClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "image") - } - - log.Printf("[DEBUG] Retrieved Image %s: %#v", d.Id(), img) - - d.Set("owner", img.Owner) - d.Set("status", img.Status) - d.Set("file", img.File) - d.Set("schema", img.Schema) - d.Set("checksum", img.Checksum) - d.Set("size_bytes", img.SizeBytes) - d.Set("metadata", img.Metadata) - d.Set("created_at", img.CreatedAt) - d.Set("update_at", img.UpdatedAt) - d.Set("container_format", img.ContainerFormat) - d.Set("disk_format", img.DiskFormat) - d.Set("min_disk_gb", img.MinDiskGigabytes) - d.Set("min_ram_mb", img.MinRAMMegabytes) - d.Set("file", img.File) - d.Set("name", img.Name) - d.Set("protected", img.Protected) - d.Set("size_bytes", img.SizeBytes) - d.Set("tags", img.Tags) - d.Set("visibility", img.Visibility) - d.Set("properties", img.Properties) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceImagesImageV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - imageClient, err := config.imageV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack image client: %s", err) - } - - updateOpts := make(images.UpdateOpts, 0) - - if d.HasChange("visibility") { - visibility := resourceImagesImageV2VisibilityFromString(d.Get("visibility").(string)) - v := images.UpdateVisibility{Visibility: visibility} - updateOpts = append(updateOpts, v) - } - - if d.HasChange("name") { - v := images.ReplaceImageName{NewName: d.Get("name").(string)} - updateOpts = append(updateOpts, v) - } - - if d.HasChange("tags") { - tags := d.Get("tags").(*schema.Set).List() - v := images.ReplaceImageTags{ - NewTags: resourceImagesImageV2BuildTags(tags), - } - updateOpts = append(updateOpts, v) - } - - log.Printf("[DEBUG] Update Options: %#v", updateOpts) - - _, err = images.Update(imageClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating image: %s", err) - } - - return resourceImagesImageV2Read(d, meta) -} - -func resourceImagesImageV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - imageClient, err := config.imageV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack image client: %s", err) - } - - log.Printf("[DEBUG] Deleting Image %s", d.Id()) - if err := images.Delete(imageClient, d.Id()).Err; err != nil { - return fmt.Errorf("Error deleting Image: %s", err) - } - - d.SetId("") - return nil -} - -func resourceImagesImageV2ValidateVisibility(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - validVisibilities := []string{ - "public", - "private", - "shared", - "community", - } - - for _, v := range validVisibilities { - if value == v { - return - } - } - - err := fmt.Errorf("%s must be one of %s", k, validVisibilities) - errors = append(errors, err) - return -} - -func validatePositiveInt(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value > 0 { - return - } - errors = append(errors, fmt.Errorf("%q must be a positive integer", k)) - return -} - -var DiskFormats = [9]string{"ami", "ari", "aki", "vhd", "vmdk", "raw", "qcow2", "vdi", "iso"} - -func resourceImagesImageV2ValidateDiskFormat(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - for i := range DiskFormats { - if value == DiskFormats[i] { - return - } - } - errors = append(errors, fmt.Errorf("%q must be one of %v", k, DiskFormats)) - return -} - -var ContainerFormats = [9]string{"ami", "ari", "aki", "bare", "ovf"} - -func resourceImagesImageV2ValidateContainerFormat(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - for i := range ContainerFormats { - if value == ContainerFormats[i] { - return - } - } - errors = append(errors, fmt.Errorf("%q must be one of %v", k, ContainerFormats)) - return -} - -func resourceImagesImageV2VisibilityFromString(v string) images.ImageVisibility { - switch v { - case "public": - return images.ImageVisibilityPublic - case "private": - return images.ImageVisibilityPrivate - case "shared": - return images.ImageVisibilityShared - case "community": - return images.ImageVisibilityCommunity - } - - return "" -} - -func fileMD5Checksum(f *os.File) (string, error) { - hash := md5.New() - if _, err := io.Copy(hash, f); err != nil { - return "", err - } - return hex.EncodeToString(hash.Sum(nil)), nil -} - -func resourceImagesImageV2FileProps(filename string) (int64, string, error) { - var filesize int64 - var filechecksum string - - file, err := os.Open(filename) - if err != nil { - return -1, "", fmt.Errorf("Error opening file for Image: %s", err) - - } - defer file.Close() - - fstat, err := file.Stat() - if err != nil { - return -1, "", fmt.Errorf("Error reading image file %q: %s", file.Name(), err) - } - - filesize = fstat.Size() - filechecksum, err = fileMD5Checksum(file) - - if err != nil { - return -1, "", fmt.Errorf("Error computing image file %q checksum: %s", file.Name(), err) - } - - return filesize, filechecksum, nil -} - -func resourceImagesImageV2File(d *schema.ResourceData) (string, error) { - if filename := d.Get("local_file_path").(string); filename != "" { - return filename, nil - } else if furl := d.Get("image_source_url").(string); furl != "" { - dir := d.Get("image_cache_path").(string) - os.MkdirAll(dir, 0700) - filename := filepath.Join(dir, fmt.Sprintf("%x.img", md5.Sum([]byte(furl)))) - - if _, err := os.Stat(filename); err != nil { - if !os.IsNotExist(err) { - return "", fmt.Errorf("Error while trying to access file %q: %s", filename, err) - } - log.Printf("[DEBUG] File doens't exists %s. will download from %s", filename, furl) - file, err := os.Create(filename) - if err != nil { - return "", fmt.Errorf("Error creating file %q: %s", filename, err) - } - defer file.Close() - resp, err := http.Get(furl) - if err != nil { - return "", fmt.Errorf("Error downloading image from %q", furl) - } - defer resp.Body.Close() - - if _, err = io.Copy(file, resp.Body); err != nil { - return "", fmt.Errorf("Error downloading image %q to file %q: %s", furl, filename, err) - } - return filename, nil - } else { - log.Printf("[DEBUG] File exists %s", filename) - return filename, nil - } - } else { - return "", fmt.Errorf("Error in config. no file specified") - } -} - -func resourceImagesImageV2RefreshFunc(client *gophercloud.ServiceClient, id string, fileSize int64, checksum string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - img, err := images.Get(client, id).Extract() - if err != nil { - return nil, "", err - } - log.Printf("[DEBUG] OpenStack image status is: %s", img.Status) - - if img.Checksum != checksum || int64(img.SizeBytes) != fileSize { - return img, fmt.Sprintf("%s", img.Status), fmt.Errorf("Error wrong size %v or checksum %q", img.SizeBytes, img.Checksum) - } - - return img, fmt.Sprintf("%s", img.Status), nil - } -} - -func resourceImagesImageV2BuildTags(v []interface{}) []string { - var tags []string - for _, tag := range v { - tags = append(tags, tag.(string)) - } - - return tags -} - -func resourceImagesImageV2ExpandProperties(v map[string]interface{}) map[string]string { - properties := map[string]string{} - for key, value := range v { - if v, ok := value.(string); ok { - properties[key] = v - } - } - - return properties -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_listener_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_listener_v2.go deleted file mode 100644 index 754d0b621ff..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_listener_v2.go +++ /dev/null @@ -1,314 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" -) - -func resourceListenerV2() *schema.Resource { - return &schema.Resource{ - Create: resourceListenerV2Create, - Read: resourceListenerV2Read, - Update: resourceListenerV2Update, - Delete: resourceListenerV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "TCP" && value != "HTTP" && value != "HTTPS" && value != "TERMINATED_HTTPS" { - errors = append(errors, fmt.Errorf( - "Only 'TCP', 'HTTP', 'HTTPS' and 'TERMINATED_HTTPS' are supported values for 'protocol'")) - } - return - }, - }, - - "protocol_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "loadbalancer_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "default_pool_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "connection_limit": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "default_tls_container_ref": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "sni_container_refs": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Default: true, - Optional: true, - }, - }, - } -} - -func resourceListenerV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - adminStateUp := d.Get("admin_state_up").(bool) - var sniContainerRefs []string - if raw, ok := d.GetOk("sni_container_refs"); ok { - for _, v := range raw.([]interface{}) { - sniContainerRefs = append(sniContainerRefs, v.(string)) - } - } - createOpts := listeners.CreateOpts{ - Protocol: listeners.Protocol(d.Get("protocol").(string)), - ProtocolPort: d.Get("protocol_port").(int), - TenantID: d.Get("tenant_id").(string), - LoadbalancerID: d.Get("loadbalancer_id").(string), - Name: d.Get("name").(string), - DefaultPoolID: d.Get("default_pool_id").(string), - Description: d.Get("description").(string), - DefaultTlsContainerRef: d.Get("default_tls_container_ref").(string), - SniContainerRefs: sniContainerRefs, - AdminStateUp: &adminStateUp, - } - - if v, ok := d.GetOk("connection_limit"); ok { - connectionLimit := v.(int) - createOpts.ConnLimit = &connectionLimit - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - - // Wait for LoadBalancer to become active before continuing - lbID := createOpts.LoadbalancerID - timeout := d.Timeout(schema.TimeoutCreate) - err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) - if err != nil { - return err - } - - log.Printf("[DEBUG] Attempting to create listener") - var listener *listeners.Listener - err = resource.Retry(timeout, func() *resource.RetryError { - listener, err = listeners.Create(networkingClient, createOpts).Extract() - if err != nil { - return checkForRetryableError(err) - } - return nil - }) - if err != nil { - return fmt.Errorf("Error creating listener: %s", err) - } - - // Wait for LoadBalancer to become active again before continuing - err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) - if err != nil { - return err - } - - d.SetId(listener.ID) - - return resourceListenerV2Read(d, meta) -} - -func resourceListenerV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - listener, err := listeners.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "listener") - } - - log.Printf("[DEBUG] Retrieved listener %s: %#v", d.Id(), listener) - - d.Set("name", listener.Name) - d.Set("protocol", listener.Protocol) - d.Set("tenant_id", listener.TenantID) - d.Set("description", listener.Description) - d.Set("protocol_port", listener.ProtocolPort) - d.Set("admin_state_up", listener.AdminStateUp) - d.Set("default_pool_id", listener.DefaultPoolID) - d.Set("connection_limit", listener.ConnLimit) - d.Set("sni_container_refs", listener.SniContainerRefs) - d.Set("default_tls_container_ref", listener.DefaultTlsContainerRef) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceListenerV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts listeners.UpdateOpts - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - if d.HasChange("description") { - updateOpts.Description = d.Get("description").(string) - } - if d.HasChange("connection_limit") { - connLimit := d.Get("connection_limit").(int) - updateOpts.ConnLimit = &connLimit - } - if d.HasChange("default_tls_container_ref") { - updateOpts.DefaultTlsContainerRef = d.Get("default_tls_container_ref").(string) - } - if d.HasChange("sni_container_refs") { - var sniContainerRefs []string - if raw, ok := d.GetOk("sni_container_refs"); ok { - for _, v := range raw.([]interface{}) { - sniContainerRefs = append(sniContainerRefs, v.(string)) - } - } - updateOpts.SniContainerRefs = sniContainerRefs - } - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - - // Wait for LoadBalancer to become active before continuing - lbID := d.Get("loadbalancer_id").(string) - timeout := d.Timeout(schema.TimeoutUpdate) - err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating listener %s with options: %#v", d.Id(), updateOpts) - err = resource.Retry(timeout, func() *resource.RetryError { - _, err = listeners.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return checkForRetryableError(err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Error updating listener %s: %s", d.Id(), err) - } - - // Wait for LoadBalancer to become active again before continuing - err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) - if err != nil { - return err - } - - return resourceListenerV2Read(d, meta) - -} - -func resourceListenerV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - // Wait for LoadBalancer to become active before continuing - lbID := d.Get("loadbalancer_id").(string) - timeout := d.Timeout(schema.TimeoutDelete) - err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) - if err != nil { - return err - } - - log.Printf("[DEBUG] Deleting listener %s", d.Id()) - err = resource.Retry(timeout, func() *resource.RetryError { - err = listeners.Delete(networkingClient, d.Id()).ExtractErr() - if err != nil { - return checkForRetryableError(err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Error deleting listener %s: %s", d.Id(), err) - } - - // Wait for LoadBalancer to become active again before continuing - err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) - if err != nil { - return err - } - - // Wait for Listener to delete - err = waitForLBV2Listener(networkingClient, d.Id(), "DELETED", nil, timeout) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_loadbalancer_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_loadbalancer_v2.go deleted file mode 100644 index af715680dfe..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_loadbalancer_v2.go +++ /dev/null @@ -1,287 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" -) - -func resourceLoadBalancerV2() *schema.Resource { - return &schema.Resource{ - Create: resourceLoadBalancerV2Create, - Read: resourceLoadBalancerV2Read, - Update: resourceLoadBalancerV2Update, - Delete: resourceLoadBalancerV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(5 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "vip_subnet_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "vip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "vip_port_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Default: true, - Optional: true, - }, - - "flavor": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "loadbalancer_provider": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "security_group_ids": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func resourceLoadBalancerV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var lbProvider string - if v, ok := d.GetOk("loadbalancer_provider"); ok { - lbProvider = v.(string) - } - - adminStateUp := d.Get("admin_state_up").(bool) - createOpts := loadbalancers.CreateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - VipSubnetID: d.Get("vip_subnet_id").(string), - TenantID: d.Get("tenant_id").(string), - VipAddress: d.Get("vip_address").(string), - AdminStateUp: &adminStateUp, - Flavor: d.Get("flavor").(string), - Provider: lbProvider, - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - lb, err := loadbalancers.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating LoadBalancer: %s", err) - } - - // Wait for LoadBalancer to become active before continuing - timeout := d.Timeout(schema.TimeoutCreate) - err = waitForLBV2LoadBalancer(networkingClient, lb.ID, "ACTIVE", nil, timeout) - if err != nil { - return err - } - - // Once the loadbalancer has been created, apply any requested security groups - // to the port that was created behind the scenes. - if err := resourceLoadBalancerV2SecurityGroups(networkingClient, lb.VipPortID, d); err != nil { - return err - } - - // If all has been successful, set the ID on the resource - d.SetId(lb.ID) - - return resourceLoadBalancerV2Read(d, meta) -} - -func resourceLoadBalancerV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - lb, err := loadbalancers.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "loadbalancer") - } - - log.Printf("[DEBUG] Retrieved loadbalancer %s: %#v", d.Id(), lb) - - d.Set("name", lb.Name) - d.Set("description", lb.Description) - d.Set("vip_subnet_id", lb.VipSubnetID) - d.Set("tenant_id", lb.TenantID) - d.Set("vip_address", lb.VipAddress) - d.Set("vip_port_id", lb.VipPortID) - d.Set("admin_state_up", lb.AdminStateUp) - d.Set("flavor", lb.Flavor) - d.Set("loadbalancer_provider", lb.Provider) - d.Set("region", GetRegion(d, config)) - - // Get any security groups on the VIP Port - if lb.VipPortID != "" { - port, err := ports.Get(networkingClient, lb.VipPortID).Extract() - if err != nil { - return err - } - - d.Set("security_group_ids", port.SecurityGroups) - } - - return nil -} - -func resourceLoadBalancerV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts loadbalancers.UpdateOpts - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - if d.HasChange("description") { - updateOpts.Description = d.Get("description").(string) - } - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - - // Wait for LoadBalancer to become active before continuing - timeout := d.Timeout(schema.TimeoutUpdate) - err = waitForLBV2LoadBalancer(networkingClient, d.Id(), "ACTIVE", nil, timeout) - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating loadbalancer %s with options: %#v", d.Id(), updateOpts) - err = resource.Retry(timeout, func() *resource.RetryError { - _, err = loadbalancers.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return checkForRetryableError(err) - } - return nil - }) - - // Wait for LoadBalancer to become active before continuing - err = waitForLBV2LoadBalancer(networkingClient, d.Id(), "ACTIVE", nil, timeout) - if err != nil { - return err - } - - // Security Groups get updated separately - if d.HasChange("security_group_ids") { - vipPortID := d.Get("vip_port_id").(string) - if err := resourceLoadBalancerV2SecurityGroups(networkingClient, vipPortID, d); err != nil { - return err - } - } - - return resourceLoadBalancerV2Read(d, meta) -} - -func resourceLoadBalancerV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - log.Printf("[DEBUG] Deleting loadbalancer %s", d.Id()) - timeout := d.Timeout(schema.TimeoutDelete) - err = resource.Retry(timeout, func() *resource.RetryError { - err = loadbalancers.Delete(networkingClient, d.Id()).ExtractErr() - if err != nil { - return checkForRetryableError(err) - } - return nil - }) - - // Wait for LoadBalancer to become delete - pending := []string{"PENDING_UPDATE", "PENDING_DELETE", "ACTIVE"} - err = waitForLBV2LoadBalancer(networkingClient, d.Id(), "DELETED", pending, timeout) - if err != nil { - return err - } - - return nil -} - -func resourceLoadBalancerV2SecurityGroups(networkingClient *gophercloud.ServiceClient, vipPortID string, d *schema.ResourceData) error { - if vipPortID != "" { - if v, ok := d.GetOk("security_group_ids"); ok { - securityGroups := resourcePortSecurityGroupsV2(v.(*schema.Set)) - updateOpts := ports.UpdateOpts{ - SecurityGroups: &securityGroups, - } - - log.Printf("[DEBUG] Adding security groups to loadbalancer "+ - "VIP Port %s: %#v", vipPortID, updateOpts) - - _, err := ports.Update(networkingClient, vipPortID, updateOpts).Extract() - if err != nil { - return err - } - } - } - - return nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v1.go deleted file mode 100644 index 5b203371fef..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v1.go +++ /dev/null @@ -1,236 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members" -) - -func resourceLBMemberV1() *schema.Resource { - return &schema.Resource{ - Create: resourceLBMemberV1Create, - Read: resourceLBMemberV1Read, - Update: resourceLBMemberV1Update, - Delete: resourceLBMemberV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "pool_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "weight": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: false, - Computed: true, - }, - }, - } -} - -func resourceLBMemberV1Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := members.CreateOpts{ - TenantID: d.Get("tenant_id").(string), - PoolID: d.Get("pool_id").(string), - Address: d.Get("address").(string), - ProtocolPort: d.Get("port").(int), - } - - log.Printf("[DEBUG] OpenStack LB Member Create Options: %#v", createOpts) - m, err := members.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack LB member: %s", err) - } - log.Printf("[INFO] LB member ID: %s", m.ID) - - log.Printf("[DEBUG] Waiting for OpenStack LB member (%s) to become available.", m.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE", "INACTIVE", "CREATED", "DOWN"}, - Refresh: waitForLBMemberActive(networkingClient, m.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - d.SetId(m.ID) - - // Due to the way Gophercloud is currently set up, AdminStateUp must be set post-create - asu := d.Get("admin_state_up").(bool) - updateOpts := members.UpdateOpts{ - AdminStateUp: &asu, - } - - log.Printf("[DEBUG] OpenStack LB Member Update Options: %#v", createOpts) - m, err = members.Update(networkingClient, m.ID, updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LB member: %s", err) - } - - return resourceLBMemberV1Read(d, meta) -} - -func resourceLBMemberV1Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - m, err := members.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "LB member") - } - - log.Printf("[DEBUG] Retrieved OpenStack LB member %s: %+v", d.Id(), m) - - d.Set("address", m.Address) - d.Set("pool_id", m.PoolID) - d.Set("port", m.ProtocolPort) - d.Set("weight", m.Weight) - d.Set("admin_state_up", m.AdminStateUp) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceLBMemberV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts members.UpdateOpts - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - - log.Printf("[DEBUG] Updating LB member %s with options: %+v", d.Id(), updateOpts) - - _, err = members.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LB member: %s", err) - } - - return resourceLBMemberV1Read(d, meta) -} - -func resourceLBMemberV1Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - err = members.Delete(networkingClient, d.Id()).ExtractErr() - if err != nil { - CheckDeleted(d, err, "LB member") - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "PENDING_DELETE"}, - Target: []string{"DELETED"}, - Refresh: waitForLBMemberDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack LB member: %s", err) - } - - d.SetId("") - return nil -} - -func waitForLBMemberActive(networkingClient *gophercloud.ServiceClient, memberId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - m, err := members.Get(networkingClient, memberId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack LB member: %+v", m) - if m.Status == "ACTIVE" { - return m, "ACTIVE", nil - } - - return m, m.Status, nil - } -} - -func waitForLBMemberDelete(networkingClient *gophercloud.ServiceClient, memberId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack LB member %s", memberId) - - m, err := members.Get(networkingClient, memberId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LB member %s", memberId) - return m, "DELETED", nil - } - return m, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LB member %s still active.", memberId) - return m, "ACTIVE", nil - } - -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v2.go deleted file mode 100644 index 13ea2b27af2..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v2.go +++ /dev/null @@ -1,256 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" -) - -func resourceMemberV2() *schema.Resource { - return &schema.Resource{ - Create: resourceMemberV2Create, - Read: resourceMemberV2Read, - Update: resourceMemberV2Update, - Delete: resourceMemberV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "protocol_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "weight": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 1 { - errors = append(errors, fmt.Errorf( - "Only numbers greater than 0 are supported values for 'weight'")) - } - return - }, - }, - - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Default: true, - Optional: true, - }, - - "pool_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceMemberV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - adminStateUp := d.Get("admin_state_up").(bool) - createOpts := pools.CreateMemberOpts{ - Name: d.Get("name").(string), - TenantID: d.Get("tenant_id").(string), - Address: d.Get("address").(string), - ProtocolPort: d.Get("protocol_port").(int), - Weight: d.Get("weight").(int), - AdminStateUp: &adminStateUp, - } - - // Must omit if not set - if v, ok := d.GetOk("subnet_id"); ok { - createOpts.SubnetID = v.(string) - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - - // Wait for LB to become active before continuing - poolID := d.Get("pool_id").(string) - timeout := d.Timeout(schema.TimeoutCreate) - err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) - if err != nil { - return err - } - - log.Printf("[DEBUG] Attempting to create member") - var member *pools.Member - err = resource.Retry(timeout, func() *resource.RetryError { - member, err = pools.CreateMember(networkingClient, poolID, createOpts).Extract() - if err != nil { - return checkForRetryableError(err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Error creating member: %s", err) - } - - // Wait for LB to become ACTIVE again - err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) - if err != nil { - return err - } - - d.SetId(member.ID) - - return resourceMemberV2Read(d, meta) -} - -func resourceMemberV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - member, err := pools.GetMember(networkingClient, d.Get("pool_id").(string), d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "member") - } - - log.Printf("[DEBUG] Retrieved member %s: %#v", d.Id(), member) - - d.Set("name", member.Name) - d.Set("weight", member.Weight) - d.Set("admin_state_up", member.AdminStateUp) - d.Set("tenant_id", member.TenantID) - d.Set("subnet_id", member.SubnetID) - d.Set("address", member.Address) - d.Set("protocol_port", member.ProtocolPort) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceMemberV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts pools.UpdateMemberOpts - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - if d.HasChange("weight") { - updateOpts.Weight = d.Get("weight").(int) - } - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - - // Wait for LB to become active before continuing - poolID := d.Get("pool_id").(string) - timeout := d.Timeout(schema.TimeoutUpdate) - err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating member %s with options: %#v", d.Id(), updateOpts) - err = resource.Retry(timeout, func() *resource.RetryError { - _, err = pools.UpdateMember(networkingClient, poolID, d.Id(), updateOpts).Extract() - if err != nil { - return checkForRetryableError(err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Unable to update member %s: %s", d.Id(), err) - } - - err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) - if err != nil { - return err - } - - return resourceMemberV2Read(d, meta) -} - -func resourceMemberV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - // Wait for Pool to become active before continuing - poolID := d.Get("pool_id").(string) - timeout := d.Timeout(schema.TimeoutDelete) - err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) - if err != nil { - return err - } - - log.Printf("[DEBUG] Attempting to delete member %s", d.Id()) - err = resource.Retry(timeout, func() *resource.RetryError { - err = pools.DeleteMember(networkingClient, poolID, d.Id()).ExtractErr() - if err != nil { - return checkForRetryableError(err) - } - return nil - }) - - // Wait for LB to become ACTIVE - err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v1.go deleted file mode 100644 index db6c5b2617f..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v1.go +++ /dev/null @@ -1,310 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strconv" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" -) - -func resourceLBMonitorV1() *schema.Resource { - return &schema.Resource{ - Create: resourceLBMonitorV1Create, - Read: resourceLBMonitorV1Read, - Update: resourceLBMonitorV1Update, - Delete: resourceLBMonitorV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "delay": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: false, - }, - "timeout": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: false, - }, - "max_retries": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: false, - }, - "url_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "http_method": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "expected_codes": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Computed: true, - }, - }, - } -} - -func resourceLBMonitorV1Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := monitors.CreateOpts{ - TenantID: d.Get("tenant_id").(string), - Delay: d.Get("delay").(int), - Timeout: d.Get("timeout").(int), - MaxRetries: d.Get("max_retries").(int), - URLPath: d.Get("url_path").(string), - ExpectedCodes: d.Get("expected_codes").(string), - HTTPMethod: d.Get("http_method").(string), - } - - if v, ok := d.GetOk("type"); ok { - monitorType := resourceLBMonitorV1DetermineType(v.(string)) - createOpts.Type = monitorType - } - - asuRaw := d.Get("admin_state_up").(string) - if asuRaw != "" { - asu, err := strconv.ParseBool(asuRaw) - if err != nil { - return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") - } - createOpts.AdminStateUp = &asu - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - m, err := monitors.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack LB Monitor: %s", err) - } - log.Printf("[INFO] LB Monitor ID: %s", m.ID) - - log.Printf("[DEBUG] Waiting for OpenStack LB Monitor (%s) to become available.", m.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForLBMonitorActive(networkingClient, m.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - d.SetId(m.ID) - - return resourceLBMonitorV1Read(d, meta) -} - -func resourceLBMonitorV1Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - m, err := monitors.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "LB monitor") - } - - log.Printf("[DEBUG] Retrieved OpenStack LB Monitor %s: %+v", d.Id(), m) - - d.Set("type", m.Type) - d.Set("delay", m.Delay) - d.Set("timeout", m.Timeout) - d.Set("max_retries", m.MaxRetries) - d.Set("tenant_id", m.TenantID) - d.Set("url_path", m.URLPath) - d.Set("http_method", m.HTTPMethod) - d.Set("expected_codes", m.ExpectedCodes) - d.Set("admin_state_up", strconv.FormatBool(m.AdminStateUp)) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceLBMonitorV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - updateOpts := monitors.UpdateOpts{ - Delay: d.Get("delay").(int), - Timeout: d.Get("timeout").(int), - MaxRetries: d.Get("max_retries").(int), - URLPath: d.Get("url_path").(string), - HTTPMethod: d.Get("http_method").(string), - ExpectedCodes: d.Get("expected_codes").(string), - } - - if d.HasChange("admin_state_up") { - asuRaw := d.Get("admin_state_up").(string) - if asuRaw != "" { - asu, err := strconv.ParseBool(asuRaw) - if err != nil { - return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") - } - updateOpts.AdminStateUp = &asu - } - } - - log.Printf("[DEBUG] Updating OpenStack LB Monitor %s with options: %+v", d.Id(), updateOpts) - - _, err = monitors.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LB Monitor: %s", err) - } - - return resourceLBMonitorV1Read(d, meta) -} - -func resourceLBMonitorV1Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "PENDING_DELETE"}, - Target: []string{"DELETED"}, - Refresh: waitForLBMonitorDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack LB Monitor: %s", err) - } - - d.SetId("") - return nil -} - -func resourceLBMonitorV1DetermineType(t string) monitors.MonitorType { - var monitorType monitors.MonitorType - switch t { - case "PING": - monitorType = monitors.TypePING - case "TCP": - monitorType = monitors.TypeTCP - case "HTTP": - monitorType = monitors.TypeHTTP - case "HTTPS": - monitorType = monitors.TypeHTTPS - } - - return monitorType -} - -func waitForLBMonitorActive(networkingClient *gophercloud.ServiceClient, monitorId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - m, err := monitors.Get(networkingClient, monitorId).Extract() - if err != nil { - return nil, "", err - } - - // The monitor resource has no Status attribute, so a successful Get is the best we can do - log.Printf("[DEBUG] OpenStack LB Monitor: %+v", m) - return m, "ACTIVE", nil - } -} - -func waitForLBMonitorDelete(networkingClient *gophercloud.ServiceClient, monitorId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack LB Monitor %s", monitorId) - - m, err := monitors.Get(networkingClient, monitorId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId) - return m, "DELETED", nil - } - - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId) - return m, "PENDING", nil - } - } - - return m, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LB Monitor: %+v", m) - err = monitors.Delete(networkingClient, monitorId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId) - return m, "DELETED", nil - } - - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId) - return m, "PENDING", nil - } - } - - return m, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LB Monitor %s still active.", monitorId) - return m, "ACTIVE", nil - } - -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v2.go deleted file mode 100644 index 96287aa3729..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v2.go +++ /dev/null @@ -1,280 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" -) - -func resourceMonitorV2() *schema.Resource { - return &schema.Resource{ - Create: resourceMonitorV2Create, - Read: resourceMonitorV2Read, - Update: resourceMonitorV2Update, - Delete: resourceMonitorV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "pool_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "delay": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "timeout": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "max_retries": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "url_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "http_method": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "expected_codes": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Default: true, - Optional: true, - }, - }, - } -} - -func resourceMonitorV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - adminStateUp := d.Get("admin_state_up").(bool) - createOpts := monitors.CreateOpts{ - PoolID: d.Get("pool_id").(string), - TenantID: d.Get("tenant_id").(string), - Type: d.Get("type").(string), - Delay: d.Get("delay").(int), - Timeout: d.Get("timeout").(int), - MaxRetries: d.Get("max_retries").(int), - URLPath: d.Get("url_path").(string), - HTTPMethod: d.Get("http_method").(string), - ExpectedCodes: d.Get("expected_codes").(string), - Name: d.Get("name").(string), - AdminStateUp: &adminStateUp, - } - - timeout := d.Timeout(schema.TimeoutCreate) - poolID := createOpts.PoolID - err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) - if err != nil { - return err - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - log.Printf("[DEBUG] Attempting to create monitor") - var monitor *monitors.Monitor - err = resource.Retry(timeout, func() *resource.RetryError { - monitor, err = monitors.Create(networkingClient, createOpts).Extract() - if err != nil { - return checkForRetryableError(err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Unable to create monitor: %s", err) - } - - err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) - if err != nil { - return err - } - - d.SetId(monitor.ID) - - return resourceMonitorV2Read(d, meta) -} - -func resourceMonitorV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - monitor, err := monitors.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "monitor") - } - - log.Printf("[DEBUG] Retrieved monitor %s: %#v", d.Id(), monitor) - - d.Set("tenant_id", monitor.TenantID) - d.Set("type", monitor.Type) - d.Set("delay", monitor.Delay) - d.Set("timeout", monitor.Timeout) - d.Set("max_retries", monitor.MaxRetries) - d.Set("url_path", monitor.URLPath) - d.Set("http_method", monitor.HTTPMethod) - d.Set("expected_codes", monitor.ExpectedCodes) - d.Set("admin_state_up", monitor.AdminStateUp) - d.Set("name", monitor.Name) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceMonitorV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts monitors.UpdateOpts - if d.HasChange("url_path") { - updateOpts.URLPath = d.Get("url_path").(string) - } - if d.HasChange("expected_codes") { - updateOpts.ExpectedCodes = d.Get("expected_codes").(string) - } - if d.HasChange("delay") { - updateOpts.Delay = d.Get("delay").(int) - } - if d.HasChange("timeout") { - updateOpts.Timeout = d.Get("timeout").(int) - } - if d.HasChange("max_retries") { - updateOpts.MaxRetries = d.Get("max_retries").(int) - } - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - if d.HasChange("http_method") { - updateOpts.HTTPMethod = d.Get("http_method").(string) - } - - log.Printf("[DEBUG] Updating monitor %s with options: %#v", d.Id(), updateOpts) - timeout := d.Timeout(schema.TimeoutUpdate) - poolID := d.Get("pool_id").(string) - err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) - if err != nil { - return err - } - - err = resource.Retry(timeout, func() *resource.RetryError { - _, err = monitors.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return checkForRetryableError(err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Unable to update monitor %s: %s", d.Id(), err) - } - - // Wait for LB to become active before continuing - err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) - if err != nil { - return err - } - - return resourceMonitorV2Read(d, meta) -} - -func resourceMonitorV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - log.Printf("[DEBUG] Deleting monitor %s", d.Id()) - timeout := d.Timeout(schema.TimeoutUpdate) - poolID := d.Get("pool_id").(string) - err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) - if err != nil { - return err - } - - err = resource.Retry(timeout, func() *resource.RetryError { - err = monitors.Delete(networkingClient, d.Id()).ExtractErr() - if err != nil { - return checkForRetryableError(err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Unable to delete monitor %s: %s", d.Id(), err) - } - - err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v1.go deleted file mode 100644 index 66ed273c39d..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v1.go +++ /dev/null @@ -1,344 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools" -) - -func resourceLBPoolV1() *schema.Resource { - return &schema.Resource{ - Create: resourceLBPoolV1Create, - Read: resourceLBPoolV1Read, - Update: resourceLBPoolV1Update, - Delete: resourceLBPoolV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "lb_method": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "lb_provider": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "member": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Removed: "Use openstack_lb_member_v1 instead.", - }, - "monitor_ids": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func resourceLBPoolV1Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := pools.CreateOpts{ - Name: d.Get("name").(string), - SubnetID: d.Get("subnet_id").(string), - TenantID: d.Get("tenant_id").(string), - Provider: d.Get("lb_provider").(string), - } - - if v, ok := d.GetOk("protocol"); ok { - protocol := resourceLBPoolV1DetermineProtocol(v.(string)) - createOpts.Protocol = protocol - } - - if v, ok := d.GetOk("lb_method"); ok { - lbMethod := resourceLBPoolV1DetermineLBMethod(v.(string)) - createOpts.LBMethod = lbMethod - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - p, err := pools.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack LB pool: %s", err) - } - log.Printf("[INFO] LB Pool ID: %s", p.ID) - - log.Printf("[DEBUG] Waiting for OpenStack LB pool (%s) to become available.", p.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForLBPoolActive(networkingClient, p.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - d.SetId(p.ID) - - if mIDs := resourcePoolMonitorIDsV1(d); mIDs != nil { - for _, mID := range mIDs { - _, err := pools.AssociateMonitor(networkingClient, p.ID, mID).Extract() - if err != nil { - return fmt.Errorf("Error associating monitor (%s) with OpenStack LB pool (%s): %s", mID, p.ID, err) - } - } - } - - return resourceLBPoolV1Read(d, meta) -} - -func resourceLBPoolV1Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - p, err := pools.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "LB pool") - } - - log.Printf("[DEBUG] Retrieved OpenStack LB Pool %s: %+v", d.Id(), p) - - d.Set("name", p.Name) - d.Set("protocol", p.Protocol) - d.Set("subnet_id", p.SubnetID) - d.Set("lb_method", p.LBMethod) - d.Set("lb_provider", p.Provider) - d.Set("tenant_id", p.TenantID) - d.Set("monitor_ids", p.MonitorIDs) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceLBPoolV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts pools.UpdateOpts - // If either option changed, update both. - // Gophercloud complains if one is empty. - if d.HasChange("name") || d.HasChange("lb_method") { - updateOpts.Name = d.Get("name").(string) - - lbMethod := resourceLBPoolV1DetermineLBMethod(d.Get("lb_method").(string)) - updateOpts.LBMethod = lbMethod - } - - log.Printf("[DEBUG] Updating OpenStack LB Pool %s with options: %+v", d.Id(), updateOpts) - - _, err = pools.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LB Pool: %s", err) - } - - if d.HasChange("monitor_ids") { - oldMIDsRaw, newMIDsRaw := d.GetChange("monitor_ids") - oldMIDsSet, newMIDsSet := oldMIDsRaw.(*schema.Set), newMIDsRaw.(*schema.Set) - monitorsToAdd := newMIDsSet.Difference(oldMIDsSet) - monitorsToRemove := oldMIDsSet.Difference(newMIDsSet) - - log.Printf("[DEBUG] Monitors to add: %v", monitorsToAdd) - - log.Printf("[DEBUG] Monitors to remove: %v", monitorsToRemove) - - for _, m := range monitorsToAdd.List() { - _, err := pools.AssociateMonitor(networkingClient, d.Id(), m.(string)).Extract() - if err != nil { - return fmt.Errorf("Error associating monitor (%s) with OpenStack server (%s): %s", m.(string), d.Id(), err) - } - log.Printf("[DEBUG] Associated monitor (%s) with pool (%s)", m.(string), d.Id()) - } - - for _, m := range monitorsToRemove.List() { - _, err := pools.DisassociateMonitor(networkingClient, d.Id(), m.(string)).Extract() - if err != nil { - return fmt.Errorf("Error disassociating monitor (%s) from OpenStack server (%s): %s", m.(string), d.Id(), err) - } - log.Printf("[DEBUG] Disassociated monitor (%s) from pool (%s)", m.(string), d.Id()) - } - } - - return resourceLBPoolV1Read(d, meta) -} - -func resourceLBPoolV1Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - // Make sure all monitors are disassociated first - if v, ok := d.GetOk("monitor_ids"); ok { - if monitorIDList, ok := v.([]interface{}); ok { - for _, monitorID := range monitorIDList { - mID := monitorID.(string) - log.Printf("[DEBUG] Attempting to disassociate monitor %s from pool %s", mID, d.Id()) - if res := pools.DisassociateMonitor(networkingClient, d.Id(), mID); res.Err != nil { - return fmt.Errorf("Error disassociating monitor %s from pool %s: %s", mID, d.Id(), err) - } - } - } - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "PENDING_DELETE"}, - Target: []string{"DELETED"}, - Refresh: waitForLBPoolDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack LB Pool: %s", err) - } - - d.SetId("") - return nil -} - -func resourcePoolMonitorIDsV1(d *schema.ResourceData) []string { - mIDsRaw := d.Get("monitor_ids").(*schema.Set) - mIDs := make([]string, mIDsRaw.Len()) - for i, raw := range mIDsRaw.List() { - mIDs[i] = raw.(string) - } - return mIDs -} - -func resourceLBPoolV1DetermineProtocol(v string) pools.LBProtocol { - var protocol pools.LBProtocol - switch v { - case "TCP": - protocol = pools.ProtocolTCP - case "HTTP": - protocol = pools.ProtocolHTTP - case "HTTPS": - protocol = pools.ProtocolHTTPS - } - - return protocol -} - -func resourceLBPoolV1DetermineLBMethod(v string) pools.LBMethod { - var lbMethod pools.LBMethod - switch v { - case "ROUND_ROBIN": - lbMethod = pools.LBMethodRoundRobin - case "LEAST_CONNECTIONS": - lbMethod = pools.LBMethodLeastConnections - } - - return lbMethod -} - -func waitForLBPoolActive(networkingClient *gophercloud.ServiceClient, poolId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - p, err := pools.Get(networkingClient, poolId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack LB Pool: %+v", p) - if p.Status == "ACTIVE" { - return p, "ACTIVE", nil - } - - return p, p.Status, nil - } -} - -func waitForLBPoolDelete(networkingClient *gophercloud.ServiceClient, poolId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack LB Pool %s", poolId) - - p, err := pools.Get(networkingClient, poolId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LB Pool %s", poolId) - return p, "DELETED", nil - } - return p, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LB Pool: %+v", p) - err = pools.Delete(networkingClient, poolId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LB Pool %s", poolId) - return p, "DELETED", nil - } - return p, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LB Pool %s still active.", poolId) - return p, "ACTIVE", nil - } - -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v2.go deleted file mode 100644 index f37542cc22a..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v2.go +++ /dev/null @@ -1,350 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" -) - -func resourcePoolV2() *schema.Resource { - return &schema.Resource{ - Create: resourcePoolV2Create, - Read: resourcePoolV2Read, - Update: resourcePoolV2Update, - Delete: resourcePoolV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "TCP" && value != "HTTP" && value != "HTTPS" { - errors = append(errors, fmt.Errorf( - "Only 'TCP', 'HTTP', and 'HTTPS' are supported values for 'protocol'")) - } - return - }, - }, - - // One of loadbalancer_id or listener_id must be provided - "loadbalancer_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - // One of loadbalancer_id or listener_id must be provided - "listener_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "lb_method": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "ROUND_ROBIN" && value != "LEAST_CONNECTIONS" && value != "SOURCE_IP" { - errors = append(errors, fmt.Errorf( - "Only 'ROUND_ROBIN', 'LEAST_CONNECTIONS', and 'SOURCE_IP' are supported values for 'lb_method'")) - } - return - }, - }, - - "persistence": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "SOURCE_IP" && value != "HTTP_COOKIE" && value != "APP_COOKIE" { - errors = append(errors, fmt.Errorf( - "Only 'SOURCE_IP', 'HTTP_COOKIE', and 'APP_COOKIE' are supported values for 'persistence'")) - } - return - }, - }, - - "cookie_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Default: true, - Optional: true, - }, - }, - } -} - -func resourcePoolV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - adminStateUp := d.Get("admin_state_up").(bool) - var persistence pools.SessionPersistence - if p, ok := d.GetOk("persistence"); ok { - pV := (p.([]interface{}))[0].(map[string]interface{}) - - persistence = pools.SessionPersistence{ - Type: pV["type"].(string), - } - - if persistence.Type == "APP_COOKIE" { - if pV["cookie_name"].(string) == "" { - return fmt.Errorf( - "Persistence cookie_name needs to be set if using 'APP_COOKIE' persistence type.") - } else { - persistence.CookieName = pV["cookie_name"].(string) - } - } else { - if pV["cookie_name"].(string) != "" { - return fmt.Errorf( - "Persistence cookie_name can only be set if using 'APP_COOKIE' persistence type.") - } - } - } - - createOpts := pools.CreateOpts{ - TenantID: d.Get("tenant_id").(string), - Name: d.Get("name").(string), - Description: d.Get("description").(string), - Protocol: pools.Protocol(d.Get("protocol").(string)), - LoadbalancerID: d.Get("loadbalancer_id").(string), - ListenerID: d.Get("listener_id").(string), - LBMethod: pools.LBMethod(d.Get("lb_method").(string)), - AdminStateUp: &adminStateUp, - } - - // Must omit if not set - if persistence != (pools.SessionPersistence{}) { - createOpts.Persistence = &persistence - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - - // Wait for LoadBalancer to become active before continuing - timeout := d.Timeout(schema.TimeoutCreate) - lbID := createOpts.LoadbalancerID - listenerID := createOpts.ListenerID - if lbID != "" { - err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) - if err != nil { - return err - } - } else if listenerID != "" { - // Wait for Listener to become active before continuing - err = waitForLBV2Listener(networkingClient, listenerID, "ACTIVE", nil, timeout) - if err != nil { - return err - } - } - - log.Printf("[DEBUG] Attempting to create pool") - var pool *pools.Pool - err = resource.Retry(timeout, func() *resource.RetryError { - pool, err = pools.Create(networkingClient, createOpts).Extract() - if err != nil { - return checkForRetryableError(err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Error creating pool: %s", err) - } - - // Wait for LoadBalancer to become active before continuing - if lbID != "" { - err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) - } else { - // Pool exists by now so we can ask for lbID - err = waitForLBV2viaPool(networkingClient, pool.ID, "ACTIVE", timeout) - } - if err != nil { - return err - } - - d.SetId(pool.ID) - - return resourcePoolV2Read(d, meta) -} - -func resourcePoolV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - pool, err := pools.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "pool") - } - - log.Printf("[DEBUG] Retrieved pool %s: %#v", d.Id(), pool) - - d.Set("lb_method", pool.LBMethod) - d.Set("protocol", pool.Protocol) - d.Set("description", pool.Description) - d.Set("tenant_id", pool.TenantID) - d.Set("admin_state_up", pool.AdminStateUp) - d.Set("name", pool.Name) - d.Set("persistence", pool.Persistence) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourcePoolV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts pools.UpdateOpts - if d.HasChange("lb_method") { - updateOpts.LBMethod = pools.LBMethod(d.Get("lb_method").(string)) - } - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - if d.HasChange("description") { - updateOpts.Description = d.Get("description").(string) - } - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - - // Wait for LoadBalancer to become active before continuing - timeout := d.Timeout(schema.TimeoutUpdate) - lbID := d.Get("loadbalancer_id").(string) - if lbID != "" { - err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) - } else { - err = waitForLBV2viaPool(networkingClient, d.Id(), "ACTIVE", timeout) - } - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating pool %s with options: %#v", d.Id(), updateOpts) - err = resource.Retry(timeout, func() *resource.RetryError { - _, err = pools.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return checkForRetryableError(err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Unable to update pool %s: %s", d.Id(), err) - } - - // Wait for LoadBalancer to become active before continuing - if lbID != "" { - err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) - } else { - err = waitForLBV2viaPool(networkingClient, d.Id(), "ACTIVE", timeout) - } - if err != nil { - return err - } - - return resourcePoolV2Read(d, meta) -} - -func resourcePoolV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - // Wait for LoadBalancer to become active before continuing - timeout := d.Timeout(schema.TimeoutDelete) - lbID := d.Get("loadbalancer_id").(string) - if lbID != "" { - err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) - if err != nil { - return err - } - } - - log.Printf("[DEBUG] Attempting to delete pool %s", d.Id()) - err = resource.Retry(timeout, func() *resource.RetryError { - err = pools.Delete(networkingClient, d.Id()).ExtractErr() - if err != nil { - return checkForRetryableError(err) - } - return nil - }) - - if lbID != "" { - err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) - } else { - // Wait for Pool to delete - err = waitForLBV2Pool(networkingClient, d.Id(), "DELETED", nil, timeout) - } - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_vip_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_vip_v1.go deleted file mode 100644 index 39acb5f4816..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_vip_v1.go +++ /dev/null @@ -1,401 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceLBVipV1() *schema.Resource { - return &schema.Resource{ - Create: resourceLBVipV1Create, - Read: resourceLBVipV1Read, - Update: resourceLBVipV1Update, - Delete: resourceLBVipV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "pool_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: false, - }, - "persistence": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: false, - }, - "conn_limit": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: false, - }, - "port_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - ForceNew: false, - }, - "floating_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - ForceNew: false, - }, - }, - } -} - -func resourceLBVipV1Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := vips.CreateOpts{ - Name: d.Get("name").(string), - SubnetID: d.Get("subnet_id").(string), - Protocol: d.Get("protocol").(string), - ProtocolPort: d.Get("port").(int), - PoolID: d.Get("pool_id").(string), - TenantID: d.Get("tenant_id").(string), - Address: d.Get("address").(string), - Description: d.Get("description").(string), - Persistence: resourceVipPersistenceV1(d), - ConnLimit: gophercloud.MaybeInt(d.Get("conn_limit").(int)), - } - - asu := d.Get("admin_state_up").(bool) - createOpts.AdminStateUp = &asu - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - p, err := vips.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack LB VIP: %s", err) - } - log.Printf("[INFO] LB VIP ID: %s", p.ID) - - log.Printf("[DEBUG] Waiting for OpenStack LB VIP (%s) to become available.", p.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForLBVIPActive(networkingClient, p.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - floatingIP := d.Get("floating_ip").(string) - if floatingIP != "" { - lbVipV1AssignFloatingIP(floatingIP, p.PortID, networkingClient) - } - - d.SetId(p.ID) - - return resourceLBVipV1Read(d, meta) -} - -func resourceLBVipV1Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - p, err := vips.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "LB VIP") - } - - log.Printf("[DEBUG] Retrieved OpenStack LB VIP %s: %+v", d.Id(), p) - - d.Set("name", p.Name) - d.Set("subnet_id", p.SubnetID) - d.Set("protocol", p.Protocol) - d.Set("port", p.ProtocolPort) - d.Set("pool_id", p.PoolID) - d.Set("port_id", p.PortID) - d.Set("tenant_id", p.TenantID) - d.Set("address", p.Address) - d.Set("description", p.Description) - d.Set("conn_limit", p.ConnLimit) - d.Set("admin_state_up", p.AdminStateUp) - - // Set the persistence method being used - persistence := make(map[string]interface{}) - if p.Persistence.Type != "" { - persistence["type"] = p.Persistence.Type - } - if p.Persistence.CookieName != "" { - persistence["cookie_name"] = p.Persistence.CookieName - } - d.Set("persistence", persistence) - - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceLBVipV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts vips.UpdateOpts - if d.HasChange("name") { - v := d.Get("name").(string) - updateOpts.Name = &v - } - - if d.HasChange("pool_id") { - v := d.Get("pool_id").(string) - updateOpts.PoolID = &v - } - - if d.HasChange("description") { - v := d.Get("description").(string) - updateOpts.Description = &v - } - - if d.HasChange("conn_limit") { - updateOpts.ConnLimit = gophercloud.MaybeInt(d.Get("conn_limit").(int)) - } - - if d.HasChange("floating_ip") { - portID := d.Get("port_id").(string) - - // Searching for a floating IP assigned to the VIP - listOpts := floatingips.ListOpts{ - PortID: portID, - } - page, err := floatingips.List(networkingClient, listOpts).AllPages() - if err != nil { - return err - } - - fips, err := floatingips.ExtractFloatingIPs(page) - if err != nil { - return err - } - - // If a floating IP is found we unassign it - if len(fips) == 1 { - portID := "" - updateOpts := floatingips.UpdateOpts{ - PortID: &portID, - } - if err = floatingips.Update(networkingClient, fips[0].ID, updateOpts).Err; err != nil { - return err - } - } - - // Assign the updated floating IP - floatingIP := d.Get("floating_ip").(string) - if floatingIP != "" { - lbVipV1AssignFloatingIP(floatingIP, portID, networkingClient) - } - } - - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - - // Persistence has to be included, even if it hasn't changed. - updateOpts.Persistence = resourceVipPersistenceV1(d) - - log.Printf("[DEBUG] Updating OpenStack LB VIP %s with options: %+v", d.Id(), updateOpts) - - _, err = vips.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LB VIP: %s", err) - } - - return resourceLBVipV1Read(d, meta) -} - -func resourceLBVipV1Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "PENDING_DELETE"}, - Target: []string{"DELETED"}, - Refresh: waitForLBVIPDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack LB VIP: %s", err) - } - - d.SetId("") - return nil -} - -func resourceVipPersistenceV1(d *schema.ResourceData) *vips.SessionPersistence { - rawP := d.Get("persistence").(interface{}) - rawMap := rawP.(map[string]interface{}) - if len(rawMap) != 0 { - p := vips.SessionPersistence{} - if t, ok := rawMap["type"]; ok { - p.Type = t.(string) - } - if c, ok := rawMap["cookie_name"]; ok { - p.CookieName = c.(string) - } - return &p - } - return nil -} - -func lbVipV1AssignFloatingIP(floatingIP, portID string, networkingClient *gophercloud.ServiceClient) error { - log.Printf("[DEBUG] Assigning floating IP %s to VIP %s", floatingIP, portID) - - listOpts := floatingips.ListOpts{ - FloatingIP: floatingIP, - } - page, err := floatingips.List(networkingClient, listOpts).AllPages() - if err != nil { - return err - } - - fips, err := floatingips.ExtractFloatingIPs(page) - if err != nil { - return err - } - if len(fips) != 1 { - return fmt.Errorf("Unable to retrieve floating IP '%s'", floatingIP) - } - - updateOpts := floatingips.UpdateOpts{ - PortID: &portID, - } - if err = floatingips.Update(networkingClient, fips[0].ID, updateOpts).Err; err != nil { - return err - } - - return nil -} - -func waitForLBVIPActive(networkingClient *gophercloud.ServiceClient, vipId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - p, err := vips.Get(networkingClient, vipId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack LB VIP: %+v", p) - if p.Status == "ACTIVE" { - return p, "ACTIVE", nil - } - - return p, p.Status, nil - } -} - -func waitForLBVIPDelete(networkingClient *gophercloud.ServiceClient, vipId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack LB VIP %s", vipId) - - p, err := vips.Get(networkingClient, vipId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LB VIP %s", vipId) - return p, "DELETED", nil - } - return p, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LB VIP: %+v", p) - err = vips.Delete(networkingClient, vipId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LB VIP %s", vipId) - return p, "DELETED", nil - } - return p, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LB VIP %s still active.", vipId) - return p, "ACTIVE", nil - } - -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_floatingip_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_floatingip_v2.go deleted file mode 100644 index 8f2291021ae..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_floatingip_v2.go +++ /dev/null @@ -1,298 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/pagination" -) - -func resourceNetworkingFloatingIPV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkFloatingIPV2Create, - Read: resourceNetworkFloatingIPV2Read, - Update: resourceNetworkFloatingIPV2Update, - Delete: resourceNetworkFloatingIPV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "pool": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_POOL_NAME", nil), - }, - "port_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "fixed_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkFloatingIPV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack network client: %s", err) - } - - poolID, err := getNetworkID(d, meta, d.Get("pool").(string)) - if err != nil { - return fmt.Errorf("Error retrieving floating IP pool name: %s", err) - } - if len(poolID) == 0 { - return fmt.Errorf("No network found with name: %s", d.Get("pool").(string)) - } - createOpts := FloatingIPCreateOpts{ - floatingips.CreateOpts{ - FloatingNetworkID: poolID, - PortID: d.Get("port_id").(string), - TenantID: d.Get("tenant_id").(string), - FixedIP: d.Get("fixed_ip").(string), - }, - MapValueSpecs(d), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - floatingIP, err := floatingips.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error allocating floating IP: %s", err) - } - - log.Printf("[DEBUG] Waiting for OpenStack Neutron Floating IP (%s) to become available.", floatingIP.ID) - - stateConf := &resource.StateChangeConf{ - Target: []string{"ACTIVE"}, - Refresh: waitForFloatingIPActive(networkingClient, floatingIP.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId(floatingIP.ID) - - return resourceNetworkFloatingIPV2Read(d, meta) -} - -func resourceNetworkFloatingIPV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack network client: %s", err) - } - - floatingIP, err := floatingips.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "floating IP") - } - - d.Set("address", floatingIP.FloatingIP) - d.Set("port_id", floatingIP.PortID) - d.Set("fixed_ip", floatingIP.FixedIP) - poolName, err := getNetworkName(d, meta, floatingIP.FloatingNetworkID) - if err != nil { - return fmt.Errorf("Error retrieving floating IP pool name: %s", err) - } - d.Set("pool", poolName) - d.Set("tenant_id", floatingIP.TenantID) - - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceNetworkFloatingIPV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack network client: %s", err) - } - - var updateOpts floatingips.UpdateOpts - - if d.HasChange("port_id") { - portID := d.Get("port_id").(string) - updateOpts.PortID = &portID - } - - log.Printf("[DEBUG] Update Options: %#v", updateOpts) - - _, err = floatingips.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating floating IP: %s", err) - } - - return resourceNetworkFloatingIPV2Read(d, meta) -} - -func resourceNetworkFloatingIPV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack network client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForFloatingIPDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Floating IP: %s", err) - } - - d.SetId("") - return nil -} - -func getNetworkID(d *schema.ResourceData, meta interface{}, networkName string) (string, error) { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return "", fmt.Errorf("Error creating OpenStack network client: %s", err) - } - - opts := networks.ListOpts{Name: networkName} - pager := networks.List(networkingClient, opts) - networkID := "" - - err = pager.EachPage(func(page pagination.Page) (bool, error) { - networkList, err := networks.ExtractNetworks(page) - if err != nil { - return false, err - } - - for _, n := range networkList { - if n.Name == networkName { - networkID = n.ID - return false, nil - } - } - - return true, nil - }) - - return networkID, err -} - -func getNetworkName(d *schema.ResourceData, meta interface{}, networkID string) (string, error) { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return "", fmt.Errorf("Error creating OpenStack network client: %s", err) - } - - opts := networks.ListOpts{ID: networkID} - pager := networks.List(networkingClient, opts) - networkName := "" - - err = pager.EachPage(func(page pagination.Page) (bool, error) { - networkList, err := networks.ExtractNetworks(page) - if err != nil { - return false, err - } - - for _, n := range networkList { - if n.ID == networkID { - networkName = n.Name - return false, nil - } - } - - return true, nil - }) - - return networkName, err -} - -func waitForFloatingIPActive(networkingClient *gophercloud.ServiceClient, fId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - f, err := floatingips.Get(networkingClient, fId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack Neutron Floating IP: %+v", f) - if f.Status == "DOWN" || f.Status == "ACTIVE" { - return f, "ACTIVE", nil - } - - return f, "", nil - } -} - -func waitForFloatingIPDelete(networkingClient *gophercloud.ServiceClient, fId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack Floating IP %s.\n", fId) - - f, err := floatingips.Get(networkingClient, fId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Floating IP %s", fId) - return f, "DELETED", nil - } - return f, "ACTIVE", err - } - - err = floatingips.Delete(networkingClient, fId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Floating IP %s", fId) - return f, "DELETED", nil - } - return f, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Floating IP %s still active.\n", fId) - return f, "ACTIVE", nil - } -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_network_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_network_v2.go deleted file mode 100644 index 1d24f1cc53a..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_network_v2.go +++ /dev/null @@ -1,326 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strconv" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" -) - -func resourceNetworkingNetworkV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingNetworkV2Create, - Read: resourceNetworkingNetworkV2Read, - Update: resourceNetworkingNetworkV2Update, - Delete: resourceNetworkingNetworkV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Computed: true, - }, - "shared": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Computed: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "segments": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "physical_network": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "network_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "segmentation_id": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkingNetworkV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := NetworkCreateOpts{ - networks.CreateOpts{ - Name: d.Get("name").(string), - TenantID: d.Get("tenant_id").(string), - }, - MapValueSpecs(d), - } - - asuRaw := d.Get("admin_state_up").(string) - if asuRaw != "" { - asu, err := strconv.ParseBool(asuRaw) - if err != nil { - return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") - } - createOpts.AdminStateUp = &asu - } - - sharedRaw := d.Get("shared").(string) - if sharedRaw != "" { - shared, err := strconv.ParseBool(sharedRaw) - if err != nil { - return fmt.Errorf("shared, if provided, must be either 'true' or 'false': %v", err) - } - createOpts.Shared = &shared - } - - segments := resourceNetworkingNetworkV2Segments(d) - - n := &networks.Network{} - if len(segments) > 0 { - providerCreateOpts := provider.CreateOptsExt{ - CreateOptsBuilder: createOpts, - Segments: segments, - } - log.Printf("[DEBUG] Create Options: %#v", providerCreateOpts) - n, err = networks.Create(networkingClient, providerCreateOpts).Extract() - } else { - log.Printf("[DEBUG] Create Options: %#v", createOpts) - n, err = networks.Create(networkingClient, createOpts).Extract() - } - - if err != nil { - return fmt.Errorf("Error creating OpenStack Neutron network: %s", err) - } - - log.Printf("[INFO] Network ID: %s", n.ID) - - log.Printf("[DEBUG] Waiting for Network (%s) to become available", n.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"BUILD"}, - Target: []string{"ACTIVE"}, - Refresh: waitForNetworkActive(networkingClient, n.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId(n.ID) - - return resourceNetworkingNetworkV2Read(d, meta) -} - -func resourceNetworkingNetworkV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - n, err := networks.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "network") - } - - log.Printf("[DEBUG] Retrieved Network %s: %+v", d.Id(), n) - - d.Set("name", n.Name) - d.Set("admin_state_up", strconv.FormatBool(n.AdminStateUp)) - d.Set("shared", strconv.FormatBool(n.Shared)) - d.Set("tenant_id", n.TenantID) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceNetworkingNetworkV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts networks.UpdateOpts - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - if d.HasChange("admin_state_up") { - asuRaw := d.Get("admin_state_up").(string) - if asuRaw != "" { - asu, err := strconv.ParseBool(asuRaw) - if err != nil { - return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") - } - updateOpts.AdminStateUp = &asu - } - } - if d.HasChange("shared") { - sharedRaw := d.Get("shared").(string) - if sharedRaw != "" { - shared, err := strconv.ParseBool(sharedRaw) - if err != nil { - return fmt.Errorf("shared, if provided, must be either 'true' or 'false': %v", err) - } - updateOpts.Shared = &shared - } - } - - log.Printf("[DEBUG] Updating Network %s with options: %+v", d.Id(), updateOpts) - - _, err = networks.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack Neutron Network: %s", err) - } - - return resourceNetworkingNetworkV2Read(d, meta) -} - -func resourceNetworkingNetworkV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForNetworkDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Network: %s", err) - } - - d.SetId("") - return nil -} - -func resourceNetworkingNetworkV2Segments(d *schema.ResourceData) (providerSegments []provider.Segment) { - segments := d.Get("segments").([]interface{}) - for _, v := range segments { - var segment provider.Segment - segmentMap := v.(map[string]interface{}) - - if v, ok := segmentMap["physical_network"].(string); ok { - segment.PhysicalNetwork = v - } - - if v, ok := segmentMap["network_type"].(string); ok { - segment.NetworkType = v - } - - if v, ok := segmentMap["segmentation_id"].(int); ok { - segment.SegmentationID = v - } - - providerSegments = append(providerSegments, segment) - } - return -} - -func waitForNetworkActive(networkingClient *gophercloud.ServiceClient, networkId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - n, err := networks.Get(networkingClient, networkId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack Neutron Network: %+v", n) - if n.Status == "DOWN" || n.Status == "ACTIVE" { - return n, "ACTIVE", nil - } - - return n, n.Status, nil - } -} - -func waitForNetworkDelete(networkingClient *gophercloud.ServiceClient, networkId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack Network %s.\n", networkId) - - n, err := networks.Get(networkingClient, networkId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Network %s", networkId) - return n, "DELETED", nil - } - return n, "ACTIVE", err - } - - err = networks.Delete(networkingClient, networkId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Network %s", networkId) - return n, "DELETED", nil - } - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - return n, "ACTIVE", nil - } - } - return n, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Network %s still active.\n", networkId) - return n, "ACTIVE", nil - } -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_port_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_port_v2.go deleted file mode 100644 index 4420a563f60..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_port_v2.go +++ /dev/null @@ -1,467 +0,0 @@ -package openstack - -import ( - "bytes" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" -) - -func resourceNetworkingPortV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingPortV2Create, - Read: resourceNetworkingPortV2Read, - Update: resourceNetworkingPortV2Update, - Delete: resourceNetworkingPortV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "network_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: false, - Computed: true, - }, - "mac_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "device_owner": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "security_group_ids": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "no_security_groups": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: false, - }, - "device_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "fixed_ip": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: false, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "allowed_address_pairs": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: false, - Set: allowedAddressPairsHash, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "mac_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - }, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - "all_fixed_ips": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "all_security_group_ids": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func resourceNetworkingPortV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var securityGroups []string - v := d.Get("security_group_ids") - securityGroups = resourcePortSecurityGroupsV2(v.(*schema.Set)) - noSecurityGroups := d.Get("no_security_groups").(bool) - - // Check and make sure an invalid security group configuration wasn't given. - if noSecurityGroups && len(securityGroups) > 0 { - return fmt.Errorf("Cannot have both no_security_groups and security_group_ids set") - } - - createOpts := PortCreateOpts{ - ports.CreateOpts{ - Name: d.Get("name").(string), - AdminStateUp: resourcePortAdminStateUpV2(d), - NetworkID: d.Get("network_id").(string), - MACAddress: d.Get("mac_address").(string), - TenantID: d.Get("tenant_id").(string), - DeviceOwner: d.Get("device_owner").(string), - DeviceID: d.Get("device_id").(string), - FixedIPs: resourcePortFixedIpsV2(d), - AllowedAddressPairs: resourceAllowedAddressPairsV2(d), - }, - MapValueSpecs(d), - } - - if noSecurityGroups { - securityGroups = []string{} - createOpts.SecurityGroups = &securityGroups - } - - // Only set SecurityGroups if one was specified. - // Otherwise this would mimic the no_security_groups action. - if len(securityGroups) > 0 { - createOpts.SecurityGroups = &securityGroups - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - p, err := ports.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack Neutron network: %s", err) - } - log.Printf("[INFO] Network ID: %s", p.ID) - - log.Printf("[DEBUG] Waiting for OpenStack Neutron Port (%s) to become available.", p.ID) - - stateConf := &resource.StateChangeConf{ - Target: []string{"ACTIVE"}, - Refresh: waitForNetworkPortActive(networkingClient, p.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId(p.ID) - - return resourceNetworkingPortV2Read(d, meta) -} - -func resourceNetworkingPortV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - p, err := ports.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "port") - } - - log.Printf("[DEBUG] Retrieved Port %s: %+v", d.Id(), p) - - d.Set("name", p.Name) - d.Set("admin_state_up", p.AdminStateUp) - d.Set("network_id", p.NetworkID) - d.Set("mac_address", p.MACAddress) - d.Set("tenant_id", p.TenantID) - d.Set("device_owner", p.DeviceOwner) - d.Set("device_id", p.DeviceID) - - // Create a slice of all returned Fixed IPs. - // This will be in the order returned by the API, - // which is usually alpha-numeric. - var ips []string - for _, ipObject := range p.FixedIPs { - ips = append(ips, ipObject.IPAddress) - } - d.Set("all_fixed_ips", ips) - - // Set all security groups. - // This can be different from what the user specified since - // the port can have the "default" group automatically applied. - d.Set("all_security_group_ids", p.SecurityGroups) - - // Convert AllowedAddressPairs to list of map - var pairs []map[string]interface{} - for _, pairObject := range p.AllowedAddressPairs { - pair := make(map[string]interface{}) - pair["ip_address"] = pairObject.IPAddress - pair["mac_address"] = pairObject.MACAddress - pairs = append(pairs, pair) - } - d.Set("allowed_address_pairs", pairs) - - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceNetworkingPortV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - v := d.Get("security_group_ids").(*schema.Set) - securityGroups := resourcePortSecurityGroupsV2(v) - noSecurityGroups := d.Get("no_security_groups").(bool) - - // Check and make sure an invalid security group configuration wasn't given. - if noSecurityGroups && len(securityGroups) > 0 { - return fmt.Errorf("Cannot have both no_security_groups and security_group_ids set") - } - - var hasChange bool - var updateOpts ports.UpdateOpts - - if d.HasChange("allowed_address_pairs") { - hasChange = true - aap := resourceAllowedAddressPairsV2(d) - updateOpts.AllowedAddressPairs = &aap - } - - if d.HasChange("no_security_groups") { - if noSecurityGroups { - hasChange = true - v := []string{} - updateOpts.SecurityGroups = &v - } - } - - if d.HasChange("security_group_ids") { - hasChange = true - sgs := d.Get("security_group_ids").(*schema.Set) - securityGroups := resourcePortSecurityGroupsV2(sgs) - updateOpts.SecurityGroups = &securityGroups - } - - if d.HasChange("name") { - hasChange = true - updateOpts.Name = d.Get("name").(string) - } - - if d.HasChange("admin_state_up") { - hasChange = true - updateOpts.AdminStateUp = resourcePortAdminStateUpV2(d) - } - - if d.HasChange("device_owner") { - hasChange = true - updateOpts.DeviceOwner = d.Get("device_owner").(string) - } - - if d.HasChange("device_id") { - hasChange = true - updateOpts.DeviceID = d.Get("device_id").(string) - } - - if d.HasChange("fixed_ip") { - hasChange = true - updateOpts.FixedIPs = resourcePortFixedIpsV2(d) - } - - if hasChange { - log.Printf("[DEBUG] Updating Port %s with options: %+v", d.Id(), updateOpts) - - _, err = ports.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack Neutron Network: %s", err) - } - } - - return resourceNetworkingPortV2Read(d, meta) -} - -func resourceNetworkingPortV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForNetworkPortDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Network: %s", err) - } - - d.SetId("") - return nil -} - -func resourcePortSecurityGroupsV2(v *schema.Set) []string { - var securityGroups []string - for _, v := range v.List() { - securityGroups = append(securityGroups, v.(string)) - } - return securityGroups -} - -func resourcePortFixedIpsV2(d *schema.ResourceData) interface{} { - rawIP := d.Get("fixed_ip").([]interface{}) - - if len(rawIP) == 0 { - return nil - } - - ip := make([]ports.IP, len(rawIP)) - for i, raw := range rawIP { - rawMap := raw.(map[string]interface{}) - ip[i] = ports.IP{ - SubnetID: rawMap["subnet_id"].(string), - IPAddress: rawMap["ip_address"].(string), - } - } - return ip -} - -func resourceAllowedAddressPairsV2(d *schema.ResourceData) []ports.AddressPair { - // ports.AddressPair - rawPairs := d.Get("allowed_address_pairs").(*schema.Set).List() - - pairs := make([]ports.AddressPair, len(rawPairs)) - for i, raw := range rawPairs { - rawMap := raw.(map[string]interface{}) - pairs[i] = ports.AddressPair{ - IPAddress: rawMap["ip_address"].(string), - MACAddress: rawMap["mac_address"].(string), - } - } - return pairs -} - -func resourcePortAdminStateUpV2(d *schema.ResourceData) *bool { - value := false - - if raw, ok := d.GetOk("admin_state_up"); ok && raw == true { - value = true - } - - return &value -} - -func allowedAddressPairsHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s", m["ip_address"].(string))) - - return hashcode.String(buf.String()) -} - -func waitForNetworkPortActive(networkingClient *gophercloud.ServiceClient, portId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - p, err := ports.Get(networkingClient, portId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack Neutron Port: %+v", p) - if p.Status == "DOWN" || p.Status == "ACTIVE" { - return p, "ACTIVE", nil - } - - return p, p.Status, nil - } -} - -func waitForNetworkPortDelete(networkingClient *gophercloud.ServiceClient, portId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack Neutron Port %s", portId) - - p, err := ports.Get(networkingClient, portId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Port %s", portId) - return p, "DELETED", nil - } - return p, "ACTIVE", err - } - - err = ports.Delete(networkingClient, portId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Port %s", portId) - return p, "DELETED", nil - } - return p, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Port %s still active.\n", portId) - return p, "ACTIVE", nil - } -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_interface_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_interface_v2.go deleted file mode 100644 index 4201b9617a4..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_interface_v2.go +++ /dev/null @@ -1,208 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" -) - -func resourceNetworkingRouterInterfaceV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingRouterInterfaceV2Create, - Read: resourceNetworkingRouterInterfaceV2Read, - Delete: resourceNetworkingRouterInterfaceV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "router_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "port_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkingRouterInterfaceV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := routers.AddInterfaceOpts{ - SubnetID: d.Get("subnet_id").(string), - PortID: d.Get("port_id").(string), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - n, err := routers.AddInterface(networkingClient, d.Get("router_id").(string), createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack Neutron router interface: %s", err) - } - log.Printf("[INFO] Router interface Port ID: %s", n.PortID) - - log.Printf("[DEBUG] Waiting for Router Interface (%s) to become available", n.PortID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"BUILD", "PENDING_CREATE", "PENDING_UPDATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForRouterInterfaceActive(networkingClient, n.PortID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId(n.PortID) - - return resourceNetworkingRouterInterfaceV2Read(d, meta) -} - -func resourceNetworkingRouterInterfaceV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - n, err := ports.Get(networkingClient, d.Id()).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving OpenStack Neutron Router Interface: %s", err) - } - - log.Printf("[DEBUG] Retrieved Router Interface %s: %+v", d.Id(), n) - - d.Set("router_id", n.DeviceID) - d.Set("port_id", n.ID) - - // Set the subnet ID by looking at thet port's FixedIPs. - // If there's more than one FixedIP, do not set the subnet - // as it's not possible to confidently determine which subnet - // belongs to this interface. However, that situation should - // not happen. - if len(n.FixedIPs) == 1 { - d.Set("subnet_id", n.FixedIPs[0].SubnetID) - } - - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceNetworkingRouterInterfaceV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForRouterInterfaceDelete(networkingClient, d), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Router Interface: %s", err) - } - - d.SetId("") - return nil -} - -func waitForRouterInterfaceActive(networkingClient *gophercloud.ServiceClient, rId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - r, err := ports.Get(networkingClient, rId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack Neutron Router Interface: %+v", r) - return r, r.Status, nil - } -} - -func waitForRouterInterfaceDelete(networkingClient *gophercloud.ServiceClient, d *schema.ResourceData) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - routerId := d.Get("router_id").(string) - routerInterfaceId := d.Id() - - log.Printf("[DEBUG] Attempting to delete OpenStack Router Interface %s.", routerInterfaceId) - - removeOpts := routers.RemoveInterfaceOpts{ - SubnetID: d.Get("subnet_id").(string), - PortID: d.Get("port_id").(string), - } - - r, err := ports.Get(networkingClient, routerInterfaceId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Router Interface %s", routerInterfaceId) - return r, "DELETED", nil - } - return r, "ACTIVE", err - } - - _, err = routers.RemoveInterface(networkingClient, routerId, removeOpts).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Router Interface %s.", routerInterfaceId) - return r, "DELETED", nil - } - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - log.Printf("[DEBUG] Router Interface %s is still in use.", routerInterfaceId) - return r, "ACTIVE", nil - } - } - - return r, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Router Interface %s is still active.", routerInterfaceId) - return r, "ACTIVE", nil - } -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_route_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_route_v2.go deleted file mode 100644 index c05e971f6e8..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_route_v2.go +++ /dev/null @@ -1,225 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" -) - -func resourceNetworkingRouterRouteV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingRouterRouteV2Create, - Read: resourceNetworkingRouterRouteV2Read, - Delete: resourceNetworkingRouterRouteV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "router_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "destination_cidr": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "next_hop": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkingRouterRouteV2Create(d *schema.ResourceData, meta interface{}) error { - - routerId := d.Get("router_id").(string) - osMutexKV.Lock(routerId) - defer osMutexKV.Unlock(routerId) - - var destCidr string = d.Get("destination_cidr").(string) - var nextHop string = d.Get("next_hop").(string) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - n, err := routers.Get(networkingClient, routerId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) - } - - var updateOpts routers.UpdateOpts - var routeExists bool = false - - var rts []routers.Route = n.Routes - for _, r := range rts { - - if r.DestinationCIDR == destCidr && r.NextHop == nextHop { - routeExists = true - break - } - } - - if !routeExists { - - if destCidr != "" && nextHop != "" { - r := routers.Route{DestinationCIDR: destCidr, NextHop: nextHop} - log.Printf( - "[INFO] Adding route %s", r) - rts = append(rts, r) - } - - updateOpts.Routes = rts - - log.Printf("[DEBUG] Updating Router %s with options: %+v", routerId, updateOpts) - - _, err = routers.Update(networkingClient, routerId, updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack Neutron Router: %s", err) - } - d.SetId(fmt.Sprintf("%s-route-%s-%s", routerId, destCidr, nextHop)) - - } else { - log.Printf("[DEBUG] Router %s has route already", routerId) - } - - return resourceNetworkingRouterRouteV2Read(d, meta) -} - -func resourceNetworkingRouterRouteV2Read(d *schema.ResourceData, meta interface{}) error { - - routerId := d.Get("router_id").(string) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - destCidr := d.Get("destination_cidr").(string) - nextHop := d.Get("next_hop").(string) - - routeIDParts := []string{} - if d.Id() != "" && strings.Contains(d.Id(), "-route-") { - routeIDParts = strings.Split(d.Id(), "-route-") - routeLastIDParts := strings.Split(routeIDParts[1], "-") - - if routerId == "" { - routerId = routeIDParts[0] - d.Set("router_id", routerId) - } - if destCidr == "" { - destCidr = routeLastIDParts[0] - } - if nextHop == "" { - nextHop = routeLastIDParts[1] - } - } - - n, err := routers.Get(networkingClient, routerId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) - } - - log.Printf("[DEBUG] Retrieved Router %s: %+v", routerId, n) - - d.Set("next_hop", "") - d.Set("destination_cidr", "") - - for _, r := range n.Routes { - - if r.DestinationCIDR == destCidr && r.NextHop == nextHop { - d.Set("destination_cidr", destCidr) - d.Set("next_hop", nextHop) - break - } - } - - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceNetworkingRouterRouteV2Delete(d *schema.ResourceData, meta interface{}) error { - - routerId := d.Get("router_id").(string) - osMutexKV.Lock(routerId) - defer osMutexKV.Unlock(routerId) - - config := meta.(*Config) - - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - n, err := routers.Get(networkingClient, routerId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return nil - } - - return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) - } - - var updateOpts routers.UpdateOpts - - var destCidr string = d.Get("destination_cidr").(string) - var nextHop string = d.Get("next_hop").(string) - - var oldRts []routers.Route = n.Routes - var newRts []routers.Route - - for _, r := range oldRts { - - if r.DestinationCIDR != destCidr || r.NextHop != nextHop { - newRts = append(newRts, r) - } - } - - if len(oldRts) != len(newRts) { - r := routers.Route{DestinationCIDR: destCidr, NextHop: nextHop} - log.Printf( - "[INFO] Deleting route %s", r) - updateOpts.Routes = newRts - - log.Printf("[DEBUG] Updating Router %s with options: %+v", routerId, updateOpts) - - _, err = routers.Update(networkingClient, routerId, updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack Neutron Router: %s", err) - } - } else { - return fmt.Errorf("Route did not exist already") - } - - return nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_v2.go deleted file mode 100644 index 1d759622fef..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_v2.go +++ /dev/null @@ -1,262 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" -) - -func resourceNetworkingRouterV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingRouterV2Create, - Read: resourceNetworkingRouterV2Read, - Update: resourceNetworkingRouterV2Update, - Delete: resourceNetworkingRouterV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: false, - Computed: true, - }, - "distributed": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Computed: true, - }, - "external_gateway": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Computed: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkingRouterV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := RouterCreateOpts{ - routers.CreateOpts{ - Name: d.Get("name").(string), - TenantID: d.Get("tenant_id").(string), - }, - MapValueSpecs(d), - } - - if asuRaw, ok := d.GetOk("admin_state_up"); ok { - asu := asuRaw.(bool) - createOpts.AdminStateUp = &asu - } - - if dRaw, ok := d.GetOk("distributed"); ok { - d := dRaw.(bool) - createOpts.Distributed = &d - } - - externalGateway := d.Get("external_gateway").(string) - if externalGateway != "" { - gatewayInfo := routers.GatewayInfo{ - NetworkID: externalGateway, - } - createOpts.GatewayInfo = &gatewayInfo - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - n, err := routers.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack Neutron router: %s", err) - } - log.Printf("[INFO] Router ID: %s", n.ID) - - log.Printf("[DEBUG] Waiting for OpenStack Neutron Router (%s) to become available", n.ID) - stateConf := &resource.StateChangeConf{ - Pending: []string{"BUILD", "PENDING_CREATE", "PENDING_UPDATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForRouterActive(networkingClient, n.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId(n.ID) - - return resourceNetworkingRouterV2Read(d, meta) -} - -func resourceNetworkingRouterV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - n, err := routers.Get(networkingClient, d.Id()).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) - } - - log.Printf("[DEBUG] Retrieved Router %s: %+v", d.Id(), n) - - d.Set("name", n.Name) - d.Set("admin_state_up", n.AdminStateUp) - d.Set("distributed", n.Distributed) - d.Set("tenant_id", n.TenantID) - d.Set("external_gateway", n.GatewayInfo.NetworkID) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceNetworkingRouterV2Update(d *schema.ResourceData, meta interface{}) error { - routerId := d.Id() - osMutexKV.Lock(routerId) - defer osMutexKV.Unlock(routerId) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts routers.UpdateOpts - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - if d.HasChange("external_gateway") { - externalGateway := d.Get("external_gateway").(string) - if externalGateway != "" { - gatewayInfo := routers.GatewayInfo{ - NetworkID: externalGateway, - } - updateOpts.GatewayInfo = &gatewayInfo - } - } - - log.Printf("[DEBUG] Updating Router %s with options: %+v", d.Id(), updateOpts) - - _, err = routers.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack Neutron Router: %s", err) - } - - return resourceNetworkingRouterV2Read(d, meta) -} - -func resourceNetworkingRouterV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForRouterDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Router: %s", err) - } - - d.SetId("") - return nil -} - -func waitForRouterActive(networkingClient *gophercloud.ServiceClient, routerId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - r, err := routers.Get(networkingClient, routerId).Extract() - if err != nil { - return nil, r.Status, err - } - - log.Printf("[DEBUG] OpenStack Neutron Router: %+v", r) - return r, r.Status, nil - } -} - -func waitForRouterDelete(networkingClient *gophercloud.ServiceClient, routerId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack Router %s.\n", routerId) - - r, err := routers.Get(networkingClient, routerId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Router %s", routerId) - return r, "DELETED", nil - } - return r, "ACTIVE", err - } - - err = routers.Delete(networkingClient, routerId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Router %s", routerId) - return r, "DELETED", nil - } - return r, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Router %s still active.\n", routerId) - return r, "ACTIVE", nil - } -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_rule_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_rule_v2.go deleted file mode 100644 index 33e68eb2efa..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_rule_v2.go +++ /dev/null @@ -1,316 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strconv" - "strings" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" -) - -func resourceNetworkingSecGroupRuleV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingSecGroupRuleV2Create, - Read: resourceNetworkingSecGroupRuleV2Read, - Delete: resourceNetworkingSecGroupRuleV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "direction": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "ethertype": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "port_range_min": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Computed: true, - }, - "port_range_max": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Computed: true, - }, - "protocol": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "remote_group_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "remote_ip_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - StateFunc: func(v interface{}) string { - return strings.ToLower(v.(string)) - }, - }, - "security_group_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - }, - } -} - -func resourceNetworkingSecGroupRuleV2Create(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - portRangeMin := d.Get("port_range_min").(int) - portRangeMax := d.Get("port_range_max").(int) - protocol := d.Get("protocol").(string) - - if protocol == "" { - if portRangeMin != 0 || portRangeMax != 0 { - return fmt.Errorf("A protocol must be specified when using port_range_min and port_range_max") - } - } - - opts := rules.CreateOpts{ - SecGroupID: d.Get("security_group_id").(string), - PortRangeMin: d.Get("port_range_min").(int), - PortRangeMax: d.Get("port_range_max").(int), - RemoteGroupID: d.Get("remote_group_id").(string), - RemoteIPPrefix: d.Get("remote_ip_prefix").(string), - TenantID: d.Get("tenant_id").(string), - } - - if v, ok := d.GetOk("direction"); ok { - direction := resourceNetworkingSecGroupRuleV2DetermineDirection(v.(string)) - opts.Direction = direction - } - - if v, ok := d.GetOk("ethertype"); ok { - ethertype := resourceNetworkingSecGroupRuleV2DetermineEtherType(v.(string)) - opts.EtherType = ethertype - } - - if v, ok := d.GetOk("protocol"); ok { - protocol := resourceNetworkingSecGroupRuleV2DetermineProtocol(v.(string)) - opts.Protocol = protocol - } - - log.Printf("[DEBUG] Create OpenStack Neutron security group: %#v", opts) - - security_group_rule, err := rules.Create(networkingClient, opts).Extract() - if err != nil { - return err - } - - log.Printf("[DEBUG] OpenStack Neutron Security Group Rule created: %#v", security_group_rule) - - d.SetId(security_group_rule.ID) - - return resourceNetworkingSecGroupRuleV2Read(d, meta) -} - -func resourceNetworkingSecGroupRuleV2Read(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Retrieve information about security group rule: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - security_group_rule, err := rules.Get(networkingClient, d.Id()).Extract() - - if err != nil { - return CheckDeleted(d, err, "OpenStack Security Group Rule") - } - - d.Set("direction", security_group_rule.Direction) - d.Set("ethertype", security_group_rule.EtherType) - d.Set("protocol", security_group_rule.Protocol) - d.Set("port_range_min", security_group_rule.PortRangeMin) - d.Set("port_range_max", security_group_rule.PortRangeMax) - d.Set("remote_group_id", security_group_rule.RemoteGroupID) - d.Set("remote_ip_prefix", security_group_rule.RemoteIPPrefix) - d.Set("security_group_id", security_group_rule.SecGroupID) - d.Set("tenant_id", security_group_rule.TenantID) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceNetworkingSecGroupRuleV2Delete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Destroy security group rule: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForSecGroupRuleDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Security Group Rule: %s", err) - } - - d.SetId("") - return err -} - -func resourceNetworkingSecGroupRuleV2DetermineDirection(v string) rules.RuleDirection { - var direction rules.RuleDirection - switch v { - case "ingress": - direction = rules.DirIngress - case "egress": - direction = rules.DirEgress - } - - return direction -} - -func resourceNetworkingSecGroupRuleV2DetermineEtherType(v string) rules.RuleEtherType { - var etherType rules.RuleEtherType - switch v { - case "IPv4": - etherType = rules.EtherType4 - case "IPv6": - etherType = rules.EtherType6 - } - - return etherType -} - -func resourceNetworkingSecGroupRuleV2DetermineProtocol(v string) rules.RuleProtocol { - var protocol rules.RuleProtocol - - // Check and see if the requested protocol matched a list of known protocol names. - switch v { - case "tcp": - protocol = rules.ProtocolTCP - case "udp": - protocol = rules.ProtocolUDP - case "icmp": - protocol = rules.ProtocolICMP - case "ah": - protocol = rules.ProtocolAH - case "dccp": - protocol = rules.ProtocolDCCP - case "egp": - protocol = rules.ProtocolEGP - case "esp": - protocol = rules.ProtocolESP - case "gre": - protocol = rules.ProtocolGRE - case "igmp": - protocol = rules.ProtocolIGMP - case "ipv6-encap": - protocol = rules.ProtocolIPv6Encap - case "ipv6-frag": - protocol = rules.ProtocolIPv6Frag - case "ipv6-icmp": - protocol = rules.ProtocolIPv6ICMP - case "ipv6-nonxt": - protocol = rules.ProtocolIPv6NoNxt - case "ipv6-opts": - protocol = rules.ProtocolIPv6Opts - case "ipv6-route": - protocol = rules.ProtocolIPv6Route - case "ospf": - protocol = rules.ProtocolOSPF - case "pgm": - protocol = rules.ProtocolPGM - case "rsvp": - protocol = rules.ProtocolRSVP - case "sctp": - protocol = rules.ProtocolSCTP - case "udplite": - protocol = rules.ProtocolUDPLite - case "vrrp": - protocol = rules.ProtocolVRRP - } - - // If the protocol wasn't matched above, see if it's an integer. - if protocol == "" { - _, err := strconv.Atoi(v) - if err == nil { - protocol = rules.RuleProtocol(v) - } - } - - return protocol -} - -func waitForSecGroupRuleDelete(networkingClient *gophercloud.ServiceClient, secGroupRuleId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack Security Group Rule %s.\n", secGroupRuleId) - - r, err := rules.Get(networkingClient, secGroupRuleId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group Rule %s", secGroupRuleId) - return r, "DELETED", nil - } - return r, "ACTIVE", err - } - - err = rules.Delete(networkingClient, secGroupRuleId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group Rule %s", secGroupRuleId) - return r, "DELETED", nil - } - return r, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Neutron Security Group Rule %s still active.\n", secGroupRuleId) - return r, "ACTIVE", nil - } -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_v2.go deleted file mode 100644 index 1911c4b7055..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_v2.go +++ /dev/null @@ -1,215 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" -) - -func resourceNetworkingSecGroupV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingSecGroupV2Create, - Read: resourceNetworkingSecGroupV2Read, - Update: resourceNetworkingSecGroupV2Update, - Delete: resourceNetworkingSecGroupV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "delete_default_rules": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkingSecGroupV2Create(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - opts := groups.CreateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - TenantID: d.Get("tenant_id").(string), - } - - log.Printf("[DEBUG] Create OpenStack Neutron Security Group: %#v", opts) - - security_group, err := groups.Create(networkingClient, opts).Extract() - if err != nil { - return err - } - - // Delete the default security group rules if it has been requested. - deleteDefaultRules := d.Get("delete_default_rules").(bool) - if deleteDefaultRules { - security_group, err := groups.Get(networkingClient, security_group.ID).Extract() - if err != nil { - return err - } - for _, rule := range security_group.Rules { - if err := rules.Delete(networkingClient, rule.ID).ExtractErr(); err != nil { - return fmt.Errorf( - "There was a problem deleting a default security group rule: %s", err) - } - } - } - - log.Printf("[DEBUG] OpenStack Neutron Security Group created: %#v", security_group) - - d.SetId(security_group.ID) - - return resourceNetworkingSecGroupV2Read(d, meta) -} - -func resourceNetworkingSecGroupV2Read(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Retrieve information about security group: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - security_group, err := groups.Get(networkingClient, d.Id()).Extract() - - if err != nil { - return CheckDeleted(d, err, "OpenStack Neutron Security group") - } - - d.Set("description", security_group.Description) - d.Set("tenant_id", security_group.TenantID) - d.Set("name", security_group.Name) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceNetworkingSecGroupV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var update bool - var updateOpts groups.UpdateOpts - - if d.HasChange("name") { - update = true - updateOpts.Name = d.Get("name").(string) - } - - if d.HasChange("description") { - update = true - updateOpts.Description = d.Get("description").(string) - } - - if update { - log.Printf("[DEBUG] Updating SecGroup %s with options: %#v", d.Id(), updateOpts) - _, err = groups.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack SecGroup: %s", err) - } - } - - return resourceNetworkingSecGroupV2Read(d, meta) -} - -func resourceNetworkingSecGroupV2Delete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Destroy security group: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForSecGroupDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Security Group: %s", err) - } - - d.SetId("") - return err -} - -func waitForSecGroupDelete(networkingClient *gophercloud.ServiceClient, secGroupId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack Security Group %s.\n", secGroupId) - - r, err := groups.Get(networkingClient, secGroupId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group %s", secGroupId) - return r, "DELETED", nil - } - return r, "ACTIVE", err - } - - err = groups.Delete(networkingClient, secGroupId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group %s", secGroupId) - return r, "DELETED", nil - } - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - return r, "ACTIVE", nil - } - } - return r, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Neutron Security Group %s still active.\n", secGroupId) - return r, "ACTIVE", nil - } -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_subnet_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_subnet_v2.go deleted file mode 100644 index 266d2de2eb5..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_subnet_v2.go +++ /dev/null @@ -1,423 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" -) - -func resourceNetworkingSubnetV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingSubnetV2Create, - Read: resourceNetworkingSubnetV2Read, - Update: resourceNetworkingSubnetV2Update, - Delete: resourceNetworkingSubnetV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "network_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "cidr": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "allocation_pools": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "start": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "end": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "gateway_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Computed: true, - }, - "no_gateway": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: false, - }, - "ip_version": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 4, - ForceNew: true, - }, - "enable_dhcp": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: false, - Default: true, - }, - "dns_nameservers": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "host_routes": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: false, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "destination_cidr": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "next_hop": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkingSubnetV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := SubnetCreateOpts{ - subnets.CreateOpts{ - NetworkID: d.Get("network_id").(string), - CIDR: d.Get("cidr").(string), - Name: d.Get("name").(string), - TenantID: d.Get("tenant_id").(string), - AllocationPools: resourceSubnetAllocationPoolsV2(d), - DNSNameservers: resourceSubnetDNSNameserversV2(d), - HostRoutes: resourceSubnetHostRoutesV2(d), - EnableDHCP: nil, - }, - MapValueSpecs(d), - } - - noGateway := d.Get("no_gateway").(bool) - gatewayIP := d.Get("gateway_ip").(string) - - if gatewayIP != "" && noGateway { - return fmt.Errorf("Both gateway_ip and no_gateway cannot be set") - } - - if gatewayIP != "" { - createOpts.GatewayIP = &gatewayIP - } - - if noGateway { - disableGateway := "" - createOpts.GatewayIP = &disableGateway - } - - enableDHCP := d.Get("enable_dhcp").(bool) - createOpts.EnableDHCP = &enableDHCP - - if v, ok := d.GetOk("ip_version"); ok { - ipVersion := resourceNetworkingSubnetV2DetermineIPVersion(v.(int)) - createOpts.IPVersion = ipVersion - } - - s, err := subnets.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack Neutron subnet: %s", err) - } - - log.Printf("[DEBUG] Waiting for Subnet (%s) to become available", s.ID) - stateConf := &resource.StateChangeConf{ - Target: []string{"ACTIVE"}, - Refresh: waitForSubnetActive(networkingClient, s.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId(s.ID) - - log.Printf("[DEBUG] Created Subnet %s: %#v", s.ID, s) - return resourceNetworkingSubnetV2Read(d, meta) -} - -func resourceNetworkingSubnetV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - s, err := subnets.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "subnet") - } - - log.Printf("[DEBUG] Retrieved Subnet %s: %#v", d.Id(), s) - - d.Set("network_id", s.NetworkID) - d.Set("cidr", s.CIDR) - d.Set("ip_version", s.IPVersion) - d.Set("name", s.Name) - d.Set("tenant_id", s.TenantID) - d.Set("gateway_ip", s.GatewayIP) - d.Set("dns_nameservers", s.DNSNameservers) - d.Set("host_routes", s.HostRoutes) - d.Set("enable_dhcp", s.EnableDHCP) - d.Set("network_id", s.NetworkID) - - // Set the allocation_pools - var allocationPools []map[string]interface{} - for _, v := range s.AllocationPools { - pool := make(map[string]interface{}) - pool["start"] = v.Start - pool["end"] = v.End - - allocationPools = append(allocationPools, pool) - } - d.Set("allocation_pools", allocationPools) - - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceNetworkingSubnetV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - // Check if both gateway_ip and no_gateway are set - if _, ok := d.GetOk("gateway_ip"); ok { - noGateway := d.Get("no_gateway").(bool) - if noGateway { - return fmt.Errorf("Both gateway_ip and no_gateway cannot be set.") - } - } - - var updateOpts subnets.UpdateOpts - - noGateway := d.Get("no_gateway").(bool) - gatewayIP := d.Get("gateway_ip").(string) - - if gatewayIP != "" && noGateway { - return fmt.Errorf("Both gateway_ip and no_gateway cannot be set") - } - - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - - if d.HasChange("gateway_ip") { - updateOpts.GatewayIP = nil - if v, ok := d.GetOk("gateway_ip"); ok { - gatewayIP := v.(string) - updateOpts.GatewayIP = &gatewayIP - } - } - - if d.HasChange("no_gateway") { - if d.Get("no_gateway").(bool) { - gatewayIP := "" - updateOpts.GatewayIP = &gatewayIP - } - } - - if d.HasChange("dns_nameservers") { - updateOpts.DNSNameservers = resourceSubnetDNSNameserversV2(d) - } - - if d.HasChange("host_routes") { - updateOpts.HostRoutes = resourceSubnetHostRoutesV2(d) - } - - if d.HasChange("enable_dhcp") { - v := d.Get("enable_dhcp").(bool) - updateOpts.EnableDHCP = &v - } - - if d.HasChange("allocation_pools") { - updateOpts.AllocationPools = resourceSubnetAllocationPoolsV2(d) - } - - log.Printf("[DEBUG] Updating Subnet %s with options: %+v", d.Id(), updateOpts) - - _, err = subnets.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack Neutron Subnet: %s", err) - } - - return resourceNetworkingSubnetV2Read(d, meta) -} - -func resourceNetworkingSubnetV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForSubnetDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Subnet: %s", err) - } - - d.SetId("") - return nil -} - -func resourceSubnetAllocationPoolsV2(d *schema.ResourceData) []subnets.AllocationPool { - rawAPs := d.Get("allocation_pools").([]interface{}) - aps := make([]subnets.AllocationPool, len(rawAPs)) - for i, raw := range rawAPs { - rawMap := raw.(map[string]interface{}) - aps[i] = subnets.AllocationPool{ - Start: rawMap["start"].(string), - End: rawMap["end"].(string), - } - } - return aps -} - -func resourceSubnetDNSNameserversV2(d *schema.ResourceData) []string { - rawDNSN := d.Get("dns_nameservers").(*schema.Set) - dnsn := make([]string, rawDNSN.Len()) - for i, raw := range rawDNSN.List() { - dnsn[i] = raw.(string) - } - return dnsn -} - -func resourceSubnetHostRoutesV2(d *schema.ResourceData) []subnets.HostRoute { - rawHR := d.Get("host_routes").([]interface{}) - hr := make([]subnets.HostRoute, len(rawHR)) - for i, raw := range rawHR { - rawMap := raw.(map[string]interface{}) - hr[i] = subnets.HostRoute{ - DestinationCIDR: rawMap["destination_cidr"].(string), - NextHop: rawMap["next_hop"].(string), - } - } - return hr -} - -func resourceNetworkingSubnetV2DetermineIPVersion(v int) gophercloud.IPVersion { - var ipVersion gophercloud.IPVersion - switch v { - case 4: - ipVersion = gophercloud.IPv4 - case 6: - ipVersion = gophercloud.IPv6 - } - - return ipVersion -} - -func waitForSubnetActive(networkingClient *gophercloud.ServiceClient, subnetId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - s, err := subnets.Get(networkingClient, subnetId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack Neutron Subnet: %+v", s) - return s, "ACTIVE", nil - } -} - -func waitForSubnetDelete(networkingClient *gophercloud.ServiceClient, subnetId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack Subnet %s.\n", subnetId) - - s, err := subnets.Get(networkingClient, subnetId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Subnet %s", subnetId) - return s, "DELETED", nil - } - return s, "ACTIVE", err - } - - err = subnets.Delete(networkingClient, subnetId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Subnet %s", subnetId) - return s, "DELETED", nil - } - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - return s, "ACTIVE", nil - } - } - return s, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Subnet %s still active.\n", subnetId) - return s, "ACTIVE", nil - } -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_objectstorage_container_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_objectstorage_container_v1.go deleted file mode 100644 index e4cbe5be859..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_objectstorage_container_v1.go +++ /dev/null @@ -1,151 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceObjectStorageContainerV1() *schema.Resource { - return &schema.Resource{ - Create: resourceObjectStorageContainerV1Create, - Read: resourceObjectStorageContainerV1Read, - Update: resourceObjectStorageContainerV1Update, - Delete: resourceObjectStorageContainerV1Delete, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "container_read": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "container_sync_to": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "container_sync_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "container_write": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "content_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: false, - }, - }, - } -} - -func resourceObjectStorageContainerV1Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - objectStorageClient, err := config.objectStorageV1Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack object storage client: %s", err) - } - - cn := d.Get("name").(string) - - createOpts := &containers.CreateOpts{ - ContainerRead: d.Get("container_read").(string), - ContainerSyncTo: d.Get("container_sync_to").(string), - ContainerSyncKey: d.Get("container_sync_key").(string), - ContainerWrite: d.Get("container_write").(string), - ContentType: d.Get("content_type").(string), - Metadata: resourceContainerMetadataV2(d), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - _, err = containers.Create(objectStorageClient, cn, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack container: %s", err) - } - log.Printf("[INFO] Container ID: %s", cn) - - // Store the ID now - d.SetId(cn) - - return resourceObjectStorageContainerV1Read(d, meta) -} - -func resourceObjectStorageContainerV1Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - d.Set("region", GetRegion(d, config)) - - return nil -} - -func resourceObjectStorageContainerV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - objectStorageClient, err := config.objectStorageV1Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack object storage client: %s", err) - } - - updateOpts := containers.UpdateOpts{ - ContainerRead: d.Get("container_read").(string), - ContainerSyncTo: d.Get("container_sync_to").(string), - ContainerSyncKey: d.Get("container_sync_key").(string), - ContainerWrite: d.Get("container_write").(string), - ContentType: d.Get("content_type").(string), - } - - if d.HasChange("metadata") { - updateOpts.Metadata = resourceContainerMetadataV2(d) - } - - _, err = containers.Update(objectStorageClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack container: %s", err) - } - - return resourceObjectStorageContainerV1Read(d, meta) -} - -func resourceObjectStorageContainerV1Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - objectStorageClient, err := config.objectStorageV1Client(GetRegion(d, config)) - if err != nil { - return fmt.Errorf("Error creating OpenStack object storage client: %s", err) - } - - _, err = containers.Delete(objectStorageClient, d.Id()).Extract() - if err != nil { - return fmt.Errorf("Error deleting OpenStack container: %s", err) - } - - d.SetId("") - return nil -} - -func resourceContainerMetadataV2(d *schema.ResourceData) map[string]string { - m := make(map[string]string) - for key, val := range d.Get("metadata").(map[string]interface{}) { - m[key] = val.(string) - } - return m -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/types.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/types.go deleted file mode 100644 index f69da63570c..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/types.go +++ /dev/null @@ -1,353 +0,0 @@ -package openstack - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "strings" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups" - "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets" - "github.com/gophercloud/gophercloud/openstack/dns/v2/zones" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" - "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" -) - -// LogRoundTripper satisfies the http.RoundTripper interface and is used to -// customize the default http client RoundTripper to allow for logging. -type LogRoundTripper struct { - Rt http.RoundTripper - OsDebug bool -} - -// RoundTrip performs a round-trip HTTP request and logs relevant information about it. -func (lrt *LogRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { - defer func() { - if request.Body != nil { - request.Body.Close() - } - }() - - // for future reference, this is how to access the Transport struct: - //tlsconfig := lrt.Rt.(*http.Transport).TLSClientConfig - - var err error - - if lrt.OsDebug { - log.Printf("[DEBUG] OpenStack Request URL: %s %s", request.Method, request.URL) - log.Printf("[DEBUG] Openstack Request Headers:\n%s", FormatHeaders(request.Header, "\n")) - - if request.Body != nil { - request.Body, err = lrt.logRequest(request.Body, request.Header.Get("Content-Type")) - if err != nil { - return nil, err - } - } - } - - response, err := lrt.Rt.RoundTrip(request) - if response == nil { - return nil, err - } - - if lrt.OsDebug { - log.Printf("[DEBUG] Openstack Response Code: %d", response.StatusCode) - log.Printf("[DEBUG] Openstack Response Headers:\n%s", FormatHeaders(response.Header, "\n")) - - response.Body, err = lrt.logResponse(response.Body, response.Header.Get("Content-Type")) - } - - return response, err -} - -// logRequest will log the HTTP Request details. -// If the body is JSON, it will attempt to be pretty-formatted. -func (lrt *LogRoundTripper) logRequest(original io.ReadCloser, contentType string) (io.ReadCloser, error) { - defer original.Close() - - var bs bytes.Buffer - _, err := io.Copy(&bs, original) - if err != nil { - return nil, err - } - - // Handle request contentType - if strings.HasPrefix(contentType, "application/json") { - debugInfo := lrt.formatJSON(bs.Bytes()) - log.Printf("[DEBUG] OpenStack Request Body: %s", debugInfo) - } else { - log.Printf("[DEBUG] OpenStack Request Body: %s", bs.String()) - } - - return ioutil.NopCloser(strings.NewReader(bs.String())), nil -} - -// logResponse will log the HTTP Response details. -// If the body is JSON, it will attempt to be pretty-formatted. -func (lrt *LogRoundTripper) logResponse(original io.ReadCloser, contentType string) (io.ReadCloser, error) { - if strings.HasPrefix(contentType, "application/json") { - var bs bytes.Buffer - defer original.Close() - _, err := io.Copy(&bs, original) - if err != nil { - return nil, err - } - debugInfo := lrt.formatJSON(bs.Bytes()) - if debugInfo != "" { - log.Printf("[DEBUG] OpenStack Response Body: %s", debugInfo) - } - return ioutil.NopCloser(strings.NewReader(bs.String())), nil - } - - log.Printf("[DEBUG] Not logging because OpenStack response body isn't JSON") - return original, nil -} - -// formatJSON will try to pretty-format a JSON body. -// It will also mask known fields which contain sensitive information. -func (lrt *LogRoundTripper) formatJSON(raw []byte) string { - var data map[string]interface{} - - err := json.Unmarshal(raw, &data) - if err != nil { - log.Printf("[DEBUG] Unable to parse OpenStack JSON: %s", err) - return string(raw) - } - - // Mask known password fields - if v, ok := data["auth"].(map[string]interface{}); ok { - if v, ok := v["identity"].(map[string]interface{}); ok { - if v, ok := v["password"].(map[string]interface{}); ok { - if v, ok := v["user"].(map[string]interface{}); ok { - v["password"] = "***" - } - } - } - } - - // Ignore the catalog - if v, ok := data["token"].(map[string]interface{}); ok { - if _, ok := v["catalog"]; ok { - return "" - } - } - - pretty, err := json.MarshalIndent(data, "", " ") - if err != nil { - log.Printf("[DEBUG] Unable to re-marshal OpenStack JSON: %s", err) - return string(raw) - } - - return string(pretty) -} - -// Firewall is an OpenStack firewall. -type Firewall struct { - firewalls.Firewall - routerinsertion.FirewallExt -} - -// FirewallCreateOpts represents the attributes used when creating a new firewall. -type FirewallCreateOpts struct { - firewalls.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToFirewallCreateMap casts a CreateOptsExt struct to a map. -// It overrides firewalls.ToFirewallCreateMap to add the ValueSpecs field. -func (opts FirewallCreateOpts) ToFirewallCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "firewall") -} - -//FirewallUpdateOpts -type FirewallUpdateOpts struct { - firewalls.UpdateOptsBuilder -} - -func (opts FirewallUpdateOpts) ToFirewallUpdateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "firewall") -} - -// FloatingIPCreateOpts represents the attributes used when creating a new floating ip. -type FloatingIPCreateOpts struct { - floatingips.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToFloatingIPCreateMap casts a CreateOpts struct to a map. -// It overrides floatingips.ToFloatingIPCreateMap to add the ValueSpecs field. -func (opts FloatingIPCreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "floatingip") -} - -// KeyPairCreateOpts represents the attributes used when creating a new keypair. -type KeyPairCreateOpts struct { - keypairs.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToKeyPairCreateMap casts a CreateOpts struct to a map. -// It overrides keypairs.ToKeyPairCreateMap to add the ValueSpecs field. -func (opts KeyPairCreateOpts) ToKeyPairCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "keypair") -} - -// NetworkCreateOpts represents the attributes used when creating a new network. -type NetworkCreateOpts struct { - networks.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToNetworkCreateMap casts a CreateOpts struct to a map. -// It overrides networks.ToNetworkCreateMap to add the ValueSpecs field. -func (opts NetworkCreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "network") -} - -// PolicyCreateOpts represents the attributes used when creating a new firewall policy. -type PolicyCreateOpts struct { - policies.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToPolicyCreateMap casts a CreateOpts struct to a map. -// It overrides policies.ToFirewallPolicyCreateMap to add the ValueSpecs field. -func (opts PolicyCreateOpts) ToFirewallPolicyCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "firewall_policy") -} - -// PortCreateOpts represents the attributes used when creating a new port. -type PortCreateOpts struct { - ports.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToPortCreateMap casts a CreateOpts struct to a map. -// It overrides ports.ToPortCreateMap to add the ValueSpecs field. -func (opts PortCreateOpts) ToPortCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "port") -} - -// RecordSetCreateOpts represents the attributes used when creating a new DNS record set. -type RecordSetCreateOpts struct { - recordsets.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToRecordSetCreateMap casts a CreateOpts struct to a map. -// It overrides recordsets.ToRecordSetCreateMap to add the ValueSpecs field. -func (opts RecordSetCreateOpts) ToRecordSetCreateMap() (map[string]interface{}, error) { - b, err := BuildRequest(opts, "") - if err != nil { - return nil, err - } - - if m, ok := b[""].(map[string]interface{}); ok { - return m, nil - } - - return nil, fmt.Errorf("Expected map but got %T", b[""]) -} - -// RouterCreateOpts represents the attributes used when creating a new router. -type RouterCreateOpts struct { - routers.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToRouterCreateMap casts a CreateOpts struct to a map. -// It overrides routers.ToRouterCreateMap to add the ValueSpecs field. -func (opts RouterCreateOpts) ToRouterCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "router") -} - -// RuleCreateOpts represents the attributes used when creating a new firewall rule. -type RuleCreateOpts struct { - rules.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToRuleCreateMap casts a CreateOpts struct to a map. -// It overrides rules.ToRuleCreateMap to add the ValueSpecs field. -func (opts RuleCreateOpts) ToRuleCreateMap() (map[string]interface{}, error) { - b, err := BuildRequest(opts, "firewall_rule") - if err != nil { - return nil, err - } - - if m := b["firewall_rule"].(map[string]interface{}); m["protocol"] == "any" { - m["protocol"] = nil - } - - return b, nil -} - -// ServerGroupCreateOpts represents the attributes used when creating a new router. -type ServerGroupCreateOpts struct { - servergroups.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToServerGroupCreateMap casts a CreateOpts struct to a map. -// It overrides routers.ToServerGroupCreateMap to add the ValueSpecs field. -func (opts ServerGroupCreateOpts) ToServerGroupCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "server_group") -} - -// SubnetCreateOpts represents the attributes used when creating a new subnet. -type SubnetCreateOpts struct { - subnets.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToSubnetCreateMap casts a CreateOpts struct to a map. -// It overrides subnets.ToSubnetCreateMap to add the ValueSpecs field. -func (opts SubnetCreateOpts) ToSubnetCreateMap() (map[string]interface{}, error) { - b, err := BuildRequest(opts, "subnet") - if err != nil { - return nil, err - } - - if m := b["subnet"].(map[string]interface{}); m["gateway_ip"] == "" { - m["gateway_ip"] = nil - } - - return b, nil -} - -// ZoneCreateOpts represents the attributes used when creating a new DNS zone. -type ZoneCreateOpts struct { - zones.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToZoneCreateMap casts a CreateOpts struct to a map. -// It overrides zones.ToZoneCreateMap to add the ValueSpecs field. -func (opts ZoneCreateOpts) ToZoneCreateMap() (map[string]interface{}, error) { - b, err := BuildRequest(opts, "") - if err != nil { - return nil, err - } - - if m, ok := b[""].(map[string]interface{}); ok { - if opts.TTL > 0 { - m["ttl"] = opts.TTL - } - - return m, nil - } - - return nil, fmt.Errorf("Expected map but got %T", b[""]) -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/util.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/util.go deleted file mode 100644 index 8b88cacb6a7..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/util.go +++ /dev/null @@ -1,114 +0,0 @@ -package openstack - -import ( - "fmt" - "net/http" - "sort" - "strings" - - "github.com/Unknwon/com" - "github.com/gophercloud/gophercloud" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -// BuildRequest takes an opts struct and builds a request body for -// Gophercloud to execute -func BuildRequest(opts interface{}, parent string) (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "") - if err != nil { - return nil, err - } - - b = AddValueSpecs(b) - - return map[string]interface{}{parent: b}, nil -} - -// CheckDeleted checks the error to see if it's a 404 (Not Found) and, if so, -// sets the resource ID to the empty string instead of throwing an error. -func CheckDeleted(d *schema.ResourceData, err error, msg string) error { - if _, ok := err.(gophercloud.ErrDefault404); ok { - d.SetId("") - return nil - } - - return fmt.Errorf("%s: %s", msg, err) -} - -// GetRegion returns the region that was specified in the resource. If a -// region was not set, the provider-level region is checked. The provider-level -// region can either be set by the region argument or by OS_REGION_NAME. -func GetRegion(d *schema.ResourceData, config *Config) string { - if v, ok := d.GetOk("region"); ok { - return v.(string) - } - - return config.Region -} - -// AddValueSpecs expands the 'value_specs' object and removes 'value_specs' -// from the reqeust body. -func AddValueSpecs(body map[string]interface{}) map[string]interface{} { - if body["value_specs"] != nil { - for k, v := range body["value_specs"].(map[string]interface{}) { - body[k] = v - } - delete(body, "value_specs") - } - - return body -} - -// MapValueSpecs converts ResourceData into a map -func MapValueSpecs(d *schema.ResourceData) map[string]string { - m := make(map[string]string) - for key, val := range d.Get("value_specs").(map[string]interface{}) { - m[key] = val.(string) - } - return m -} - -// List of headers that need to be redacted -var REDACT_HEADERS = []string{"x-auth-token", "x-auth-key", "x-service-token", - "x-storage-token", "x-account-meta-temp-url-key", "x-account-meta-temp-url-key-2", - "x-container-meta-temp-url-key", "x-container-meta-temp-url-key-2", "set-cookie", - "x-subject-token"} - -// RedactHeaders processes a headers object, returning a redacted list -func RedactHeaders(headers http.Header) (processedHeaders []string) { - for name, header := range headers { - for _, v := range header { - if com.IsSliceContainsStr(REDACT_HEADERS, name) { - processedHeaders = append(processedHeaders, fmt.Sprintf("%v: %v", name, "***")) - } else { - processedHeaders = append(processedHeaders, fmt.Sprintf("%v: %v", name, v)) - } - } - } - return -} - -// FormatHeaders processes a headers object plus a deliminator, returning a string -func FormatHeaders(headers http.Header, seperator string) string { - redactedHeaders := RedactHeaders(headers) - sort.Strings(redactedHeaders) - - return strings.Join(redactedHeaders, seperator) -} - -func checkForRetryableError(err error) *resource.RetryError { - switch errCode := err.(type) { - case gophercloud.ErrDefault500: - return resource.RetryableError(err) - case gophercloud.ErrUnexpectedResponseCode: - switch errCode.Actual { - case 409, 503: - return resource.RetryableError(err) - default: - return resource.NonRetryableError(err) - } - default: - return resource.NonRetryableError(err) - } -} diff --git a/vendor/github.com/ugorji/go/LICENSE b/vendor/github.com/ugorji/go/LICENSE deleted file mode 100644 index 95a0f0541cd..00000000000 --- a/vendor/github.com/ugorji/go/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2012-2015 Ugorji Nwoke. -All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go deleted file mode 100644 index 78b32055f30..00000000000 --- a/vendor/github.com/ugorji/go/codec/0doc.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -/* -High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library for -binc, msgpack, cbor, json - -Supported Serialization formats are: - - - msgpack: https://github.com/msgpack/msgpack - - binc: http://github.com/ugorji/binc - - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 - - json: http://json.org http://tools.ietf.org/html/rfc7159 - - simple: - -To install: - - go get github.com/ugorji/go/codec - -This package will carefully use 'unsafe' for performance reasons in specific places. -You can build without unsafe use by passing the safe or appengine tag -i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3 -go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from -go 1.7+ . This is because supporting unsafe requires knowledge of implementation details. - -For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Support for go1.4 and above, while selectively using newer APIs for later releases - - Good code coverage ( > 70% ) - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. - - Careful selected use of 'unsafe' for targeted performance gains. - 100% mode exists where 'unsafe' is not used at all. - - Lock-free (sans mutex) concurrency for scaling to 100's of cores - - Multiple conversions: - Package coerces types where appropriate - e.g. decode an int in the stream into a float, etc. - - Corner Cases: - Overflows, nil maps/slices, nil values in streams are handled correctly - - Standard field renaming via tags - - Support for omitting empty fields during an encoding - - Encoding from any value and decoding into pointer to any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Extensions to support efficient encoding/decoding of any named types - - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces - - Decoding without a schema (into a interface{}). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Encode a struct as an array, and decode struct from an array in the data stream - - Comprehensive support for anonymous fields - - Fast (no-reflection) encoding/decoding of common maps and slices - - Code-generation for faster performance. - - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats - - Support indefinite-length formats to enable true streaming - (for formats which support it e.g. json, cbor) - - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. - This mostly applies to maps, where iteration order is non-deterministic. - - NIL in data stream decoded as zero value - - Never silently skip data when decoding. - User decides whether to return an error or silently skip data when keys or indexes - in the data stream do not map to fields in the struct. - - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) - - Encode/Decode from/to chan types (for iterative streaming support) - - Drop-in replacement for encoding/json. `json:` key in struct tag supported. - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Handle unique idiosyncrasies of codecs e.g. - - For messagepack, configure how ambiguities in handling raw bytes are resolved - - For messagepack, provide rpc server/client codec to support - msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - -Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -Custom Encoding and Decoding - -This package maintains symmetry in the encoding and decoding halfs. -We determine how to encode or decode by walking this decision tree - - - is type a codec.Selfer? - - is there an extension registered for the type? - - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler? - - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler? - - is format text-based, and type an encoding.TextMarshaler? - - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc - -This symmetry is important to reduce chances of issues happening because the -encoding and decoding sides are out of sync e.g. decoded via very specific -encoding.TextUnmarshaler but encoded via kind-specific generalized mode. - -Consequently, if a type only defines one-half of the symetry -(e.g. it implements UnmarshalJSON() but not MarshalJSON() ), -then that type doesn't satisfy the check and we will continue walking down the -decision tree. - -RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -Usage - -The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification. - -The Encoder and Decoder are NOT safe for concurrent use. - -Consequently, the usage model is basically: - - - Create and initialize the Handle before any use. - Once created, DO NOT modify it. - - Multiple Encoders or Decoders can now use the Handle concurrently. - They only read information off the Handle (never write). - - However, each Encoder or Decoder MUST not be used concurrently - - To re-use an Encoder/Decoder, call Reset(...) on it first. - This allows you use state maintained on the Encoder/Decoder. - -Sample usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ch codec.CborHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -Running Tests - -To run tests, use the following: - - go test - -To run the full suite of tests, use the following: - - go test -tags alltests -run Suite - -You can run the tag 'safe' to run tests or build in safe mode. e.g. - - go test -tags safe -run Json - go test -tags "alltests safe" -run Suite - -Running Benchmarks - -Please see http://github.com/ugorji/go-codec-bench . - -*/ -package codec - diff --git a/vendor/github.com/ugorji/go/codec/README.md b/vendor/github.com/ugorji/go/codec/README.md deleted file mode 100644 index 95c7d617693..00000000000 --- a/vendor/github.com/ugorji/go/codec/README.md +++ /dev/null @@ -1,187 +0,0 @@ -# Codec - -High Performance, Feature-Rich Idiomatic Go codec/encoding library for -binc, msgpack, cbor, json. - -Supported Serialization formats are: - - - msgpack: https://github.com/msgpack/msgpack - - binc: http://github.com/ugorji/binc - - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 - - json: http://json.org http://tools.ietf.org/html/rfc7159 - - simple: - -To install: - - go get github.com/ugorji/go/codec - -This package will carefully use 'unsafe' for performance reasons in specific places. -You can build without unsafe use by passing the safe or appengine tag -i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3 -go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from -go 1.7+ . This is because supporting unsafe requires knowledge of implementation details. - -Online documentation: http://godoc.org/github.com/ugorji/go/codec -Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Support for go1.4 and above, while selectively using newer APIs for later releases - - Good code coverage ( > 70% ) - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. - - Careful selected use of 'unsafe' for targeted performance gains. - 100% mode exists where 'unsafe' is not used at all. - - Lock-free (sans mutex) concurrency for scaling to 100's of cores - - Multiple conversions: - Package coerces types where appropriate - e.g. decode an int in the stream into a float, etc. - - Corner Cases: - Overflows, nil maps/slices, nil values in streams are handled correctly - - Standard field renaming via tags - - Support for omitting empty fields during an encoding - - Encoding from any value and decoding into pointer to any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Extensions to support efficient encoding/decoding of any named types - - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces - - Decoding without a schema (into a interface{}). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Encode a struct as an array, and decode struct from an array in the data stream - - Comprehensive support for anonymous fields - - Fast (no-reflection) encoding/decoding of common maps and slices - - Code-generation for faster performance. - - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats - - Support indefinite-length formats to enable true streaming - (for formats which support it e.g. json, cbor) - - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. - This mostly applies to maps, where iteration order is non-deterministic. - - NIL in data stream decoded as zero value - - Never silently skip data when decoding. - User decides whether to return an error or silently skip data when keys or indexes - in the data stream do not map to fields in the struct. - - Encode/Decode from/to chan types (for iterative streaming support) - - Drop-in replacement for encoding/json. `json:` key in struct tag supported. - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Handle unique idiosyncrasies of codecs e.g. - - For messagepack, configure how ambiguities in handling raw bytes are resolved - - For messagepack, provide rpc server/client codec to support - msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - -## Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -## Custom Encoding and Decoding - -This package maintains symmetry in the encoding and decoding halfs. -We determine how to encode or decode by walking this decision tree - - - is type a codec.Selfer? - - is there an extension registered for the type? - - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler? - - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler? - - is format text-based, and type an encoding.TextMarshaler? - - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc - -This symmetry is important to reduce chances of issues happening because the -encoding and decoding sides are out of sync e.g. decoded via very specific -encoding.TextUnmarshaler but encoded via kind-specific generalized mode. - -Consequently, if a type only defines one-half of the symetry -(e.g. it implements UnmarshalJSON() but not MarshalJSON() ), -then that type doesn't satisfy the check and we will continue walking down the -decision tree. - -## RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -## Usage - -Typical usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ch codec.CborHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -## Running Tests - -To run tests, use the following: - - go test - -To run the full suite of tests, use the following: - - go test -tags alltests -run Suite - -You can run the tag 'safe' to run tests or build in safe mode. e.g. - - go test -tags safe -run Json - go test -tags "alltests safe" -run Suite - -## Running Benchmarks - -Please see http://github.com/ugorji/go-codec-bench . - diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go deleted file mode 100644 index be5b7d33804..00000000000 --- a/vendor/github.com/ugorji/go/codec/binc.go +++ /dev/null @@ -1,946 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math" - "reflect" - "time" -) - -const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. - -// vd as low 4 bits (there are 16 slots) -const ( - bincVdSpecial byte = iota - bincVdPosInt - bincVdNegInt - bincVdFloat - - bincVdString - bincVdByteArray - bincVdArray - bincVdMap - - bincVdTimestamp - bincVdSmallInt - bincVdUnicodeOther - bincVdSymbol - - bincVdDecimal - _ // open slot - _ // open slot - bincVdCustomExt = 0x0f -) - -const ( - bincSpNil byte = iota - bincSpFalse - bincSpTrue - bincSpNan - bincSpPosInf - bincSpNegInf - bincSpZeroFloat - bincSpZero - bincSpNegOne -) - -const ( - bincFlBin16 byte = iota - bincFlBin32 - _ // bincFlBin32e - bincFlBin64 - _ // bincFlBin64e - // others not currently supported -) - -type bincEncDriver struct { - e *Encoder - w encWriter - m map[string]uint16 // symbols - b [scratchByteArrayLen]byte - s uint16 // symbols sequencer - // encNoSeparator - encDriverNoopContainerWriter -} - -func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) { - if rt == timeTypId { - var bs []byte - switch x := v.(type) { - case time.Time: - bs = encodeTime(x) - case *time.Time: - bs = encodeTime(*x) - default: - e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v) - } - e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) - e.w.writeb(bs) - } -} - -func (e *bincEncDriver) EncodeNil() { - e.w.writen1(bincVdSpecial<<4 | bincSpNil) -} - -func (e *bincEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(bincVdSpecial<<4 | bincSpTrue) - } else { - e.w.writen1(bincVdSpecial<<4 | bincSpFalse) - } -} - -func (e *bincEncDriver) EncodeFloat32(f float32) { - if f == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) - return - } - e.w.writen1(bincVdFloat<<4 | bincFlBin32) - bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *bincEncDriver) EncodeFloat64(f float64) { - if f == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) - return - } - bigen.PutUint64(e.b[:8], math.Float64bits(f)) - if bincDoPrune { - i := 7 - for ; i >= 0 && (e.b[i] == 0); i-- { - } - i++ - if i <= 6 { - e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) - e.w.writen1(byte(i)) - e.w.writeb(e.b[:i]) - return - } - } - e.w.writen1(bincVdFloat<<4 | bincFlBin64) - e.w.writeb(e.b[:8]) -} - -func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { - if lim == 4 { - bigen.PutUint32(e.b[:lim], uint32(v)) - } else { - bigen.PutUint64(e.b[:lim], v) - } - if bincDoPrune { - i := pruneSignExt(e.b[:lim], pos) - e.w.writen1(bd | lim - 1 - byte(i)) - e.w.writeb(e.b[i:lim]) - } else { - e.w.writen1(bd | lim - 1) - e.w.writeb(e.b[:lim]) - } -} - -func (e *bincEncDriver) EncodeInt(v int64) { - const nbd byte = bincVdNegInt << 4 - if v >= 0 { - e.encUint(bincVdPosInt<<4, true, uint64(v)) - } else if v == -1 { - e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) - } else { - e.encUint(bincVdNegInt<<4, false, uint64(-v)) - } -} - -func (e *bincEncDriver) EncodeUint(v uint64) { - e.encUint(bincVdPosInt<<4, true, v) -} - -func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { - if v == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZero) - } else if pos && v >= 1 && v <= 16 { - e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) - } else if v <= math.MaxUint8 { - e.w.writen2(bd|0x0, byte(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd | 0x01) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.encIntegerPrune(bd, pos, v, 4) - } else { - e.encIntegerPrune(bd, pos, v, 8) - } -} - -func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { - bs := ext.WriteExt(rv) - if bs == nil { - e.EncodeNil() - return - } - e.encodeExtPreamble(uint8(xtag), len(bs)) - e.w.writeb(bs) -} - -func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { - e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.w.writeb(re.Data) -} - -func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { - e.encLen(bincVdCustomExt<<4, uint64(length)) - e.w.writen1(xtag) -} - -func (e *bincEncDriver) WriteArrayStart(length int) { - e.encLen(bincVdArray<<4, uint64(length)) -} - -func (e *bincEncDriver) WriteMapStart(length int) { - e.encLen(bincVdMap<<4, uint64(length)) -} - -func (e *bincEncDriver) EncodeString(c charEncoding, v string) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writestr(v) - } -} - -func (e *bincEncDriver) EncodeSymbol(v string) { - // if WriteSymbolsNoRefs { - // e.encodeString(c_UTF8, v) - // return - // } - - //symbols only offer benefit when string length > 1. - //This is because strings with length 1 take only 2 bytes to store - //(bd with embedded length, and single byte for string val). - - l := len(v) - if l == 0 { - e.encBytesLen(c_UTF8, 0) - return - } else if l == 1 { - e.encBytesLen(c_UTF8, 1) - e.w.writen1(v[0]) - return - } - if e.m == nil { - e.m = make(map[string]uint16, 16) - } - ui, ok := e.m[v] - if ok { - if ui <= math.MaxUint8 { - e.w.writen2(bincVdSymbol<<4, byte(ui)) - } else { - e.w.writen1(bincVdSymbol<<4 | 0x8) - bigenHelper{e.b[:2], e.w}.writeUint16(ui) - } - } else { - e.s++ - ui = e.s - //ui = uint16(atomic.AddUint32(&e.s, 1)) - e.m[v] = ui - var lenprec uint8 - if l <= math.MaxUint8 { - // lenprec = 0 - } else if l <= math.MaxUint16 { - lenprec = 1 - } else if int64(l) <= math.MaxUint32 { - lenprec = 2 - } else { - lenprec = 3 - } - if ui <= math.MaxUint8 { - e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) - } else { - e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) - bigenHelper{e.b[:2], e.w}.writeUint16(ui) - } - if lenprec == 0 { - e.w.writen1(byte(l)) - } else if lenprec == 1 { - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l)) - } else if lenprec == 2 { - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l)) - } else { - bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l)) - } - e.w.writestr(v) - } -} - -func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writeb(v) - } -} - -func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { - //TODO: support bincUnicodeOther (for now, just use string or bytearray) - if c == c_RAW { - e.encLen(bincVdByteArray<<4, length) - } else { - e.encLen(bincVdString<<4, length) - } -} - -func (e *bincEncDriver) encLen(bd byte, l uint64) { - if l < 12 { - e.w.writen1(bd | uint8(l+4)) - } else { - e.encLenNumber(bd, l) - } -} - -func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { - if v <= math.MaxUint8 { - e.w.writen2(bd, byte(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd | 0x01) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.w.writen1(bd | 0x02) - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) - } else { - e.w.writen1(bd | 0x03) - bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v)) - } -} - -//------------------------------------ - -type bincDecSymbol struct { - s string - b []byte - i uint16 -} - -type bincDecDriver struct { - d *Decoder - h *BincHandle - r decReader - br bool // bytes reader - bdRead bool - bd byte - vd byte - vs byte - // noStreamingCodec - // decNoSeparator - b [scratchByteArrayLen]byte - - // linear searching on this slice is ok, - // because we typically expect < 32 symbols in each stream. - s []bincDecSymbol - decDriverNoopContainerReader -} - -func (d *bincDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.vd = d.bd >> 4 - d.vs = d.bd & 0x0f - d.bdRead = true -} - -func (d *bincDecDriver) uncacheRead() { - if d.bdRead { - d.r.unreadn1() - d.bdRead = false - } -} - -func (d *bincDecDriver) ContainerType() (vt valueType) { - if !d.bdRead { - d.readNextBd() - } - if d.vd == bincVdSpecial && d.vs == bincSpNil { - return valueTypeNil - } else if d.vd == bincVdByteArray { - return valueTypeBytes - } else if d.vd == bincVdString { - return valueTypeString - } else if d.vd == bincVdArray { - return valueTypeArray - } else if d.vd == bincVdMap { - return valueTypeMap - } else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - } - return valueTypeUnset -} - -func (d *bincDecDriver) TryDecodeAsNil() bool { - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return true - } - return false -} - -func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) { - if !d.bdRead { - d.readNextBd() - } - if rt == timeTypId { - if d.vd != bincVdTimestamp { - d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) - return - } - tt, err := decodeTime(d.r.readx(int(d.vs))) - if err != nil { - panic(err) - } - var vt *time.Time = v.(*time.Time) - *vt = tt - d.bdRead = false - } -} - -func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { - if vs&0x8 == 0 { - d.r.readb(d.b[0:defaultLen]) - } else { - l := d.r.readn1() - if l > 8 { - d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l) - return - } - for i := l; i < 8; i++ { - d.b[i] = 0 - } - d.r.readb(d.b[0:l]) - } -} - -func (d *bincDecDriver) decFloat() (f float64) { - //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; } - if x := d.vs & 0x7; x == bincFlBin32 { - d.decFloatPre(d.vs, 4) - f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) - } else if x == bincFlBin64 { - d.decFloatPre(d.vs, 8) - f = math.Float64frombits(bigen.Uint64(d.b[0:8])) - } else { - d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) - return - } - return -} - -func (d *bincDecDriver) decUint() (v uint64) { - // need to inline the code (interface conversion and type assertion expensive) - switch d.vs { - case 0: - v = uint64(d.r.readn1()) - case 1: - d.r.readb(d.b[6:8]) - v = uint64(bigen.Uint16(d.b[6:8])) - case 2: - d.b[4] = 0 - d.r.readb(d.b[5:8]) - v = uint64(bigen.Uint32(d.b[4:8])) - case 3: - d.r.readb(d.b[4:8]) - v = uint64(bigen.Uint32(d.b[4:8])) - case 4, 5, 6: - lim := int(7 - d.vs) - d.r.readb(d.b[lim:8]) - for i := 0; i < lim; i++ { - d.b[i] = 0 - } - v = uint64(bigen.Uint64(d.b[:8])) - case 7: - d.r.readb(d.b[:8]) - v = uint64(bigen.Uint64(d.b[:8])) - default: - d.d.errorf("unsigned integers with greater than 64 bits of precision not supported") - return - } - return -} - -func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) { - if !d.bdRead { - d.readNextBd() - } - vd, vs := d.vd, d.vs - if vd == bincVdPosInt { - ui = d.decUint() - } else if vd == bincVdNegInt { - ui = d.decUint() - neg = true - } else if vd == bincVdSmallInt { - ui = uint64(d.vs) + 1 - } else if vd == bincVdSpecial { - if vs == bincSpZero { - //i = 0 - } else if vs == bincSpNegOne { - neg = true - ui = 1 - } else { - d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs) - return - } - } else { - d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) - return - } - return -} - -func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) { - ui, neg := d.decCheckInteger() - i, overflow := chkOvf.SignedInt(ui) - if overflow { - d.d.errorf("simple: overflow converting %v to signed integer", ui) - return - } - if neg { - i = -i - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("binc: overflow integer: %v for num bits: %v", i, bitsize) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - ui, neg := d.decCheckInteger() - if neg { - d.d.errorf("Assigning negative signed value to unsigned type") - return - } - if chkOvf.Uint(ui, bitsize) { - d.d.errorf("binc: overflow integer: %v", ui) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - vd, vs := d.vd, d.vs - if vd == bincVdSpecial { - d.bdRead = false - if vs == bincSpNan { - return math.NaN() - } else if vs == bincSpPosInf { - return math.Inf(1) - } else if vs == bincSpZeroFloat || vs == bincSpZero { - return - } else if vs == bincSpNegInf { - return math.Inf(-1) - } else { - d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) - return - } - } else if vd == bincVdFloat { - f = d.decFloat() - } else { - f = float64(d.DecodeInt(64)) - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("binc: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *bincDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) { - // b = false - } else if bd == (bincVdSpecial | bincSpTrue) { - b = true - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) ReadMapStart() (length int) { - if !d.bdRead { - d.readNextBd() - } - if d.vd != bincVdMap { - d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) - return - } - length = d.decLen() - d.bdRead = false - return -} - -func (d *bincDecDriver) ReadArrayStart() (length int) { - if !d.bdRead { - d.readNextBd() - } - if d.vd != bincVdArray { - d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) - return - } - length = d.decLen() - d.bdRead = false - return -} - -func (d *bincDecDriver) decLen() int { - if d.vs > 3 { - return int(d.vs - 4) - } - return int(d.decLenNumber()) -} - -func (d *bincDecDriver) decLenNumber() (v uint64) { - if x := d.vs; x == 0 { - v = uint64(d.r.readn1()) - } else if x == 1 { - d.r.readb(d.b[6:8]) - v = uint64(bigen.Uint16(d.b[6:8])) - } else if x == 2 { - d.r.readb(d.b[4:8]) - v = uint64(bigen.Uint32(d.b[4:8])) - } else { - d.r.readb(d.b[:8]) - v = bigen.Uint64(d.b[:8]) - } - return -} - -func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return - } - var slen int = -1 - // var ok bool - switch d.vd { - case bincVdString, bincVdByteArray: - slen = d.decLen() - if zerocopy { - if d.br { - bs2 = d.r.readx(slen) - } else if len(bs) == 0 { - bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:]) - } else { - bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) - } - } else { - bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) - } - if withString { - s = string(bs2) - } - case bincVdSymbol: - // zerocopy doesn't apply for symbols, - // as the values must be stored in a table for later use. - // - //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, - //extract symbol - //if containsStringVal, read it and put in map - //else look in map for string value - var symbol uint16 - vs := d.vs - if vs&0x8 == 0 { - symbol = uint16(d.r.readn1()) - } else { - symbol = uint16(bigen.Uint16(d.r.readx(2))) - } - if d.s == nil { - d.s = make([]bincDecSymbol, 0, 16) - } - - if vs&0x4 == 0 { - for i := range d.s { - j := &d.s[i] - if j.i == symbol { - bs2 = j.b - if withString { - if j.s == "" && bs2 != nil { - j.s = string(bs2) - } - s = j.s - } - break - } - } - } else { - switch vs & 0x3 { - case 0: - slen = int(d.r.readn1()) - case 1: - slen = int(bigen.Uint16(d.r.readx(2))) - case 2: - slen = int(bigen.Uint32(d.r.readx(4))) - case 3: - slen = int(bigen.Uint64(d.r.readx(8))) - } - // since using symbols, do not store any part of - // the parameter bs in the map, as it might be a shared buffer. - // bs2 = decByteSlice(d.r, slen, bs) - bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil) - if withString { - s = string(bs2) - } - d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2}) - } - default: - d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, bincVdSymbol, d.vd) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeString() (s string) { - // DecodeBytes does not accommodate symbols, whose impl stores string version in map. - // Use decStringAndBytes directly. - // return string(d.DecodeBytes(d.b[:], true, true)) - _, s = d.decStringAndBytes(d.b[:], true, true) - return -} - -func (d *bincDecDriver) DecodeStringAsBytes() (s []byte) { - s, _ = d.decStringAndBytes(d.b[:], false, true) - return -} - -func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return nil - } - var clen int - if d.vd == bincVdString || d.vd == bincVdByteArray { - clen = d.decLen() - } else { - d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, d.vd) - return - } - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) -} - -func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) - } - return -} - -func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.vd == bincVdCustomExt { - l := d.decLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - return - } - xbs = d.r.readx(l) - } else if d.vd == bincVdByteArray { - xbs = d.DecodeBytes(nil, true) - } else { - d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := d.d.n - var decodeFurther bool - - switch d.vd { - case bincVdSpecial: - switch d.vs { - case bincSpNil: - n.v = valueTypeNil - case bincSpFalse: - n.v = valueTypeBool - n.b = false - case bincSpTrue: - n.v = valueTypeBool - n.b = true - case bincSpNan: - n.v = valueTypeFloat - n.f = math.NaN() - case bincSpPosInf: - n.v = valueTypeFloat - n.f = math.Inf(1) - case bincSpNegInf: - n.v = valueTypeFloat - n.f = math.Inf(-1) - case bincSpZeroFloat: - n.v = valueTypeFloat - n.f = float64(0) - case bincSpZero: - n.v = valueTypeUint - n.u = uint64(0) // int8(0) - case bincSpNegOne: - n.v = valueTypeInt - n.i = int64(-1) // int8(-1) - default: - d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs) - } - case bincVdSmallInt: - n.v = valueTypeUint - n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 - case bincVdPosInt: - n.v = valueTypeUint - n.u = d.decUint() - case bincVdNegInt: - n.v = valueTypeInt - n.i = -(int64(d.decUint())) - case bincVdFloat: - n.v = valueTypeFloat - n.f = d.decFloat() - case bincVdSymbol: - n.v = valueTypeSymbol - n.s = d.DecodeString() - case bincVdString: - n.v = valueTypeString - n.s = d.DecodeString() - case bincVdByteArray: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false) - case bincVdTimestamp: - n.v = valueTypeTimestamp - tt, err := decodeTime(d.r.readx(int(d.vs))) - if err != nil { - panic(err) - } - n.t = tt - case bincVdCustomExt: - n.v = valueTypeExt - l := d.decLen() - n.u = uint64(d.r.readn1()) - n.l = d.r.readx(l) - case bincVdArray: - n.v = valueTypeArray - decodeFurther = true - case bincVdMap: - n.v = valueTypeMap - decodeFurther = true - default: - d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) - } - - if !decodeFurther { - d.bdRead = false - } - if n.v == valueTypeUint && d.h.SignedInteger { - n.v = valueTypeInt - n.i = int64(n.u) - } - return -} - -//------------------------------------ - -//BincHandle is a Handle for the Binc Schema-Free Encoding Format -//defined at https://github.com/ugorji/binc . -// -//BincHandle currently supports all Binc features with the following EXCEPTIONS: -// - only integers up to 64 bits of precision are supported. -// big integers are unsupported. -// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). -// extended precision and decimal IEEE 754 floats are unsupported. -// - Only UTF-8 strings supported. -// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. -// -//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. -type BincHandle struct { - BasicHandle - binaryEncodingType - noElemSeparators -} - -func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{b: ext}) -} - -func (h *BincHandle) newEncDriver(e *Encoder) encDriver { - return &bincEncDriver{e: e, w: e.w} -} - -func (h *BincHandle) newDecDriver(d *Decoder) decDriver { - return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes} -} - -func (_ *BincHandle) IsBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (e *bincEncDriver) reset() { - e.w = e.e.w - e.s = 0 - e.m = nil -} - -func (d *bincDecDriver) reset() { - d.r, d.br = d.d.r, d.d.bytes - d.s = nil - d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 -} - -var _ decDriver = (*bincDecDriver)(nil) -var _ encDriver = (*bincEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go deleted file mode 100644 index 3bc328f3062..00000000000 --- a/vendor/github.com/ugorji/go/codec/cbor.go +++ /dev/null @@ -1,662 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math" - "reflect" -) - -const ( - cborMajorUint byte = iota - cborMajorNegInt - cborMajorBytes - cborMajorText - cborMajorArray - cborMajorMap - cborMajorTag - cborMajorOther -) - -const ( - cborBdFalse byte = 0xf4 + iota - cborBdTrue - cborBdNil - cborBdUndefined - cborBdExt - cborBdFloat16 - cborBdFloat32 - cborBdFloat64 -) - -const ( - cborBdIndefiniteBytes byte = 0x5f - cborBdIndefiniteString = 0x7f - cborBdIndefiniteArray = 0x9f - cborBdIndefiniteMap = 0xbf - cborBdBreak = 0xff -) - -const ( - CborStreamBytes byte = 0x5f - CborStreamString = 0x7f - CborStreamArray = 0x9f - CborStreamMap = 0xbf - CborStreamBreak = 0xff -) - -const ( - cborBaseUint byte = 0x00 - cborBaseNegInt = 0x20 - cborBaseBytes = 0x40 - cborBaseString = 0x60 - cborBaseArray = 0x80 - cborBaseMap = 0xa0 - cborBaseTag = 0xc0 - cborBaseSimple = 0xe0 -) - -// ------------------- - -type cborEncDriver struct { - noBuiltInTypes - encDriverNoopContainerWriter - // encNoSeparator - e *Encoder - w encWriter - h *CborHandle - x [8]byte -} - -func (e *cborEncDriver) EncodeNil() { - e.w.writen1(cborBdNil) -} - -func (e *cborEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(cborBdTrue) - } else { - e.w.writen1(cborBdFalse) - } -} - -func (e *cborEncDriver) EncodeFloat32(f float32) { - e.w.writen1(cborBdFloat32) - bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *cborEncDriver) EncodeFloat64(f float64) { - e.w.writen1(cborBdFloat64) - bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) -} - -func (e *cborEncDriver) encUint(v uint64, bd byte) { - if v <= 0x17 { - e.w.writen1(byte(v) + bd) - } else if v <= math.MaxUint8 { - e.w.writen2(bd+0x18, uint8(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd + 0x19) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.w.writen1(bd + 0x1a) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v)) - } else { // if v <= math.MaxUint64 { - e.w.writen1(bd + 0x1b) - bigenHelper{e.x[:8], e.w}.writeUint64(v) - } -} - -func (e *cborEncDriver) EncodeInt(v int64) { - if v < 0 { - e.encUint(uint64(-1-v), cborBaseNegInt) - } else { - e.encUint(uint64(v), cborBaseUint) - } -} - -func (e *cborEncDriver) EncodeUint(v uint64) { - e.encUint(v, cborBaseUint) -} - -func (e *cborEncDriver) encLen(bd byte, length int) { - e.encUint(uint64(length), bd) -} - -func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { - e.encUint(uint64(xtag), cborBaseTag) - if v := ext.ConvertExt(rv); v == nil { - e.EncodeNil() - } else { - en.encode(v) - } -} - -func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { - e.encUint(uint64(re.Tag), cborBaseTag) - if false && re.Data != nil { - en.encode(re.Data) - } else if re.Value != nil { - en.encode(re.Value) - } else { - e.EncodeNil() - } -} - -func (e *cborEncDriver) WriteArrayStart(length int) { - if e.h.IndefiniteLength { - e.w.writen1(cborBdIndefiniteArray) - } else { - e.encLen(cborBaseArray, length) - } -} - -func (e *cborEncDriver) WriteMapStart(length int) { - if e.h.IndefiniteLength { - e.w.writen1(cborBdIndefiniteMap) - } else { - e.encLen(cborBaseMap, length) - } -} - -func (e *cborEncDriver) WriteMapEnd() { - if e.h.IndefiniteLength { - e.w.writen1(cborBdBreak) - } -} - -func (e *cborEncDriver) WriteArrayEnd() { - if e.h.IndefiniteLength { - e.w.writen1(cborBdBreak) - } -} - -func (e *cborEncDriver) EncodeSymbol(v string) { - e.encStringBytesS(cborBaseString, v) -} - -func (e *cborEncDriver) EncodeString(c charEncoding, v string) { - e.encStringBytesS(cborBaseString, v) -} - -func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - if c == c_RAW { - e.encStringBytesS(cborBaseBytes, stringView(v)) - } else { - e.encStringBytesS(cborBaseString, stringView(v)) - } -} - -func (e *cborEncDriver) encStringBytesS(bb byte, v string) { - if e.h.IndefiniteLength { - if bb == cborBaseBytes { - e.w.writen1(cborBdIndefiniteBytes) - } else { - e.w.writen1(cborBdIndefiniteString) - } - blen := len(v) / 4 - if blen == 0 { - blen = 64 - } else if blen > 1024 { - blen = 1024 - } - for i := 0; i < len(v); { - var v2 string - i2 := i + blen - if i2 < len(v) { - v2 = v[i:i2] - } else { - v2 = v[i:] - } - e.encLen(bb, len(v2)) - e.w.writestr(v2) - i = i2 - } - e.w.writen1(cborBdBreak) - } else { - e.encLen(bb, len(v)) - e.w.writestr(v) - } -} - -// ---------------------- - -type cborDecDriver struct { - d *Decoder - h *CborHandle - r decReader - b [scratchByteArrayLen]byte - br bool // bytes reader - bdRead bool - bd byte - noBuiltInTypes - // decNoSeparator - decDriverNoopContainerReader -} - -func (d *cborDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.bdRead = true -} - -func (d *cborDecDriver) uncacheRead() { - if d.bdRead { - d.r.unreadn1() - d.bdRead = false - } -} - -func (d *cborDecDriver) ContainerType() (vt valueType) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == cborBdNil { - return valueTypeNil - } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { - return valueTypeBytes - } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) { - return valueTypeString - } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { - return valueTypeArray - } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { - return valueTypeMap - } else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - } - return valueTypeUnset -} - -func (d *cborDecDriver) TryDecodeAsNil() bool { - if !d.bdRead { - d.readNextBd() - } - // treat Nil and Undefined as nil values - if d.bd == cborBdNil || d.bd == cborBdUndefined { - d.bdRead = false - return true - } - return false -} - -func (d *cborDecDriver) CheckBreak() bool { - if !d.bdRead { - d.readNextBd() - } - if d.bd == cborBdBreak { - d.bdRead = false - return true - } - return false -} - -func (d *cborDecDriver) decUint() (ui uint64) { - v := d.bd & 0x1f - if v <= 0x17 { - ui = uint64(v) - } else { - if v == 0x18 { - ui = uint64(d.r.readn1()) - } else if v == 0x19 { - ui = uint64(bigen.Uint16(d.r.readx(2))) - } else if v == 0x1a { - ui = uint64(bigen.Uint32(d.r.readx(4))) - } else if v == 0x1b { - ui = uint64(bigen.Uint64(d.r.readx(8))) - } else { - d.d.errorf("decUint: Invalid descriptor: %v", d.bd) - return - } - } - return -} - -func (d *cborDecDriver) decCheckInteger() (neg bool) { - if !d.bdRead { - d.readNextBd() - } - major := d.bd >> 5 - if major == cborMajorUint { - } else if major == cborMajorNegInt { - neg = true - } else { - d.d.errorf("invalid major: %v (bd: %v)", major, d.bd) - return - } - return -} - -func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) { - neg := d.decCheckInteger() - ui := d.decUint() - // check if this number can be converted to an int without overflow - var overflow bool - if neg { - if i, overflow = chkOvf.SignedInt(ui + 1); overflow { - d.d.errorf("cbor: overflow converting %v to signed integer", ui+1) - return - } - i = -i - } else { - if i, overflow = chkOvf.SignedInt(ui); overflow { - d.d.errorf("cbor: overflow converting %v to signed integer", ui) - return - } - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("cbor: overflow integer: %v", i) - return - } - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - if d.decCheckInteger() { - d.d.errorf("Assigning negative signed value to unsigned type") - return - } - ui = d.decUint() - if chkOvf.Uint(ui, bitsize) { - d.d.errorf("cbor: overflow integer: %v", ui) - return - } - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == cborBdFloat16 { - f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2))))) - } else if bd == cborBdFloat32 { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if bd == cborBdFloat64 { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else if bd >= cborBaseUint && bd < cborBaseBytes { - f = float64(d.DecodeInt(64)) - } else { - d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd) - return - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("cbor: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *cborDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == cborBdTrue { - b = true - } else if bd == cborBdFalse { - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *cborDecDriver) ReadMapStart() (length int) { - if !d.bdRead { - d.readNextBd() - } - d.bdRead = false - if d.bd == cborBdIndefiniteMap { - return -1 - } - return d.decLen() -} - -func (d *cborDecDriver) ReadArrayStart() (length int) { - if !d.bdRead { - d.readNextBd() - } - d.bdRead = false - if d.bd == cborBdIndefiniteArray { - return -1 - } - return d.decLen() -} - -func (d *cborDecDriver) decLen() int { - return int(d.decUint()) -} - -func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte { - d.bdRead = false - for { - if d.CheckBreak() { - break - } - if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { - d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd) - return nil - } - n := d.decLen() - oldLen := len(bs) - newLen := oldLen + n - if newLen > cap(bs) { - bs2 := make([]byte, newLen, 2*cap(bs)+n) - copy(bs2, bs) - bs = bs2 - } else { - bs = bs[:newLen] - } - d.r.readb(bs[oldLen:newLen]) - // bs = append(bs, d.r.readn()...) - d.bdRead = false - } - d.bdRead = false - return bs -} - -func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == cborBdNil || d.bd == cborBdUndefined { - d.bdRead = false - return nil - } - if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { - d.bdRead = false - if bs == nil { - return d.decAppendIndefiniteBytes(zeroByteSlice) - } - return d.decAppendIndefiniteBytes(bs[:0]) - } - clen := d.decLen() - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) -} - -func (d *cborDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.b[:], true)) -} - -func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) { - return d.DecodeBytes(d.b[:], true) -} - -func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if !d.bdRead { - d.readNextBd() - } - u := d.decUint() - d.bdRead = false - realxtag = u - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - d.d.decode(&re.Value) - } else if xtag != realxtag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) - return - } else { - var v interface{} - d.d.decode(&v) - ext.UpdateExt(rv, v) - } - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := d.d.n - var decodeFurther bool - - switch d.bd { - case cborBdNil: - n.v = valueTypeNil - case cborBdFalse: - n.v = valueTypeBool - n.b = false - case cborBdTrue: - n.v = valueTypeBool - n.b = true - case cborBdFloat16, cborBdFloat32: - n.v = valueTypeFloat - n.f = d.DecodeFloat(true) - case cborBdFloat64: - n.v = valueTypeFloat - n.f = d.DecodeFloat(false) - case cborBdIndefiniteBytes: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false) - case cborBdIndefiniteString: - n.v = valueTypeString - n.s = d.DecodeString() - case cborBdIndefiniteArray: - n.v = valueTypeArray - decodeFurther = true - case cborBdIndefiniteMap: - n.v = valueTypeMap - decodeFurther = true - default: - switch { - case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: - if d.h.SignedInteger { - n.v = valueTypeInt - n.i = d.DecodeInt(64) - } else { - n.v = valueTypeUint - n.u = d.DecodeUint(64) - } - case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: - n.v = valueTypeInt - n.i = d.DecodeInt(64) - case d.bd >= cborBaseBytes && d.bd < cborBaseString: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false) - case d.bd >= cborBaseString && d.bd < cborBaseArray: - n.v = valueTypeString - n.s = d.DecodeString() - case d.bd >= cborBaseArray && d.bd < cborBaseMap: - n.v = valueTypeArray - decodeFurther = true - case d.bd >= cborBaseMap && d.bd < cborBaseTag: - n.v = valueTypeMap - decodeFurther = true - case d.bd >= cborBaseTag && d.bd < cborBaseSimple: - n.v = valueTypeExt - n.u = d.decUint() - n.l = nil - // d.bdRead = false - // d.d.decode(&re.Value) // handled by decode itself. - // decodeFurther = true - default: - d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) - return - } - } - - if !decodeFurther { - d.bdRead = false - } - return -} - -// ------------------------- - -// CborHandle is a Handle for the CBOR encoding format, -// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . -// -// CBOR is comprehensively supported, including support for: -// - indefinite-length arrays/maps/bytes/strings -// - (extension) tags in range 0..0xffff (0 .. 65535) -// - half, single and double-precision floats -// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) -// - nil, true, false, ... -// - arrays and maps, bytes and text strings -// -// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. -// Users can implement them as needed (using SetExt), including spec-documented ones: -// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. -// -// To encode with indefinite lengths (streaming), users will use -// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants. -// -// For example, to encode "one-byte" as an indefinite length string: -// var buf bytes.Buffer -// e := NewEncoder(&buf, new(CborHandle)) -// buf.WriteByte(CborStreamString) -// e.MustEncode("one-") -// e.MustEncode("byte") -// buf.WriteByte(CborStreamBreak) -// encodedBytes := buf.Bytes() -// var vv interface{} -// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv) -// // Now, vv contains the same string "one-byte" -// -type CborHandle struct { - binaryEncodingType - noElemSeparators - BasicHandle - - // IndefiniteLength=true, means that we encode using indefinitelength - IndefiniteLength bool -} - -func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{i: ext}) -} - -func (h *CborHandle) newEncDriver(e *Encoder) encDriver { - return &cborEncDriver{e: e, w: e.w, h: h} -} - -func (h *CborHandle) newDecDriver(d *Decoder) decDriver { - return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes} -} - -func (e *cborEncDriver) reset() { - e.w = e.e.w -} - -func (d *cborDecDriver) reset() { - d.r, d.br = d.d.r, d.d.bytes - d.bd, d.bdRead = 0, false -} - -var _ decDriver = (*cborDecDriver)(nil) -var _ encDriver = (*cborEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go deleted file mode 100644 index f949f9e7b8d..00000000000 --- a/vendor/github.com/ugorji/go/codec/decode.go +++ /dev/null @@ -1,2469 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "encoding" - "errors" - "fmt" - "io" - "reflect" - "sync" - "time" -) - -// Some tagging information for error messages. -const ( - msgBadDesc = "Unrecognized descriptor byte" - msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" -) - -var ( - onlyMapOrArrayCanDecodeIntoStructErr = errors.New("only encoded map or array can be decoded into a struct") - cannotDecodeIntoNilErr = errors.New("cannot decode into nil") - - decUnreadByteNothingToReadErr = errors.New("cannot unread - nothing has been read") - decUnreadByteLastByteNotReadErr = errors.New("cannot unread - last byte has not been read") - decUnreadByteUnknownErr = errors.New("cannot unread - reason unknown") -) - -// decReader abstracts the reading source, allowing implementations that can -// read from an io.Reader or directly off a byte slice with zero-copying. -type decReader interface { - unreadn1() - - // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR - // just return a view of the []byte being decoded from. - // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. - readx(n int) []byte - readb([]byte) - readn1() uint8 - numread() int // number of bytes read - track() - stopTrack() []byte - - // skip will skip any byte that matches, and return the first non-matching byte - skip(accept *bitset256) (token byte) - // readTo will read any byte that matches, stopping once no-longer matching. - readTo(in []byte, accept *bitset256) (out []byte) - // readUntil will read, only stopping once it matches the 'stop' byte. - readUntil(in []byte, stop byte) (out []byte) -} - -type decDriver interface { - // this will check if the next token is a break. - CheckBreak() bool - // Note: TryDecodeAsNil should be careful not to share any temporary []byte with - // the rest of the decDriver. This is because sometimes, we optimize by holding onto - // a transient []byte, and ensuring the only other call we make to the decDriver - // during that time is maybe a TryDecodeAsNil() call. - TryDecodeAsNil() bool - // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. - ContainerType() (vt valueType) - // IsBuiltinType(rt uintptr) bool - DecodeBuiltin(rt uintptr, v interface{}) - - // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. - // For maps and arrays, it will not do the decoding in-band, but will signal - // the decoder, so that is done later, by setting the decNaked.valueType field. - // - // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). - // for extensions, DecodeNaked must read the tag and the []byte if it exists. - // if the []byte is not read, then kInterfaceNaked will treat it as a Handle - // that stores the subsequent value in-band, and complete reading the RawExt. - // - // extensions should also use readx to decode them, for efficiency. - // kInterface will extract the detached byte slice if it has to pass it outside its realm. - DecodeNaked() - DecodeInt(bitsize uint8) (i int64) - DecodeUint(bitsize uint8) (ui uint64) - DecodeFloat(chkOverflow32 bool) (f float64) - DecodeBool() (b bool) - // DecodeString can also decode symbols. - // It looks redundant as DecodeBytes is available. - // However, some codecs (e.g. binc) support symbols and can - // return a pre-stored string value, meaning that it can bypass - // the cost of []byte->string conversion. - DecodeString() (s string) - DecodeStringAsBytes() (v []byte) - - // DecodeBytes may be called directly, without going through reflection. - // Consequently, it must be designed to handle possible nil. - DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) - // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) - - // decodeExt will decode into a *RawExt or into an extension. - DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) - // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) - ReadArrayStart() int - ReadArrayElem() - ReadArrayEnd() - ReadMapStart() int - ReadMapElemKey() - ReadMapElemValue() - ReadMapEnd() - - reset() - uncacheRead() -} - -// type decNoSeparator struct {} -// func (_ decNoSeparator) ReadEnd() {} - -type decDriverNoopContainerReader struct{} - -func (_ decDriverNoopContainerReader) ReadArrayStart() (v int) { return } -func (_ decDriverNoopContainerReader) ReadArrayElem() {} -func (_ decDriverNoopContainerReader) ReadArrayEnd() {} -func (_ decDriverNoopContainerReader) ReadMapStart() (v int) { return } -func (_ decDriverNoopContainerReader) ReadMapElemKey() {} -func (_ decDriverNoopContainerReader) ReadMapElemValue() {} -func (_ decDriverNoopContainerReader) ReadMapEnd() {} -func (_ decDriverNoopContainerReader) CheckBreak() (v bool) { return } - -// func (_ decNoSeparator) uncacheRead() {} - -type DecodeOptions struct { - // MapType specifies type to use during schema-less decoding of a map in the stream. - // If nil, we use map[interface{}]interface{} - MapType reflect.Type - - // SliceType specifies type to use during schema-less decoding of an array in the stream. - // If nil, we use []interface{} - SliceType reflect.Type - - // MaxInitLen defines the maxinum initial length that we "make" a collection (string, slice, map, chan). - // If 0 or negative, we default to a sensible value based on the size of an element in the collection. - // - // For example, when decoding, a stream may say that it has 2^64 elements. - // We should not auto-matically provision a slice of that length, to prevent Out-Of-Memory crash. - // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. - MaxInitLen int - - // If ErrorIfNoField, return an error when decoding a map - // from a codec stream into a struct, and no matching struct field is found. - ErrorIfNoField bool - - // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. - // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, - // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). - ErrorIfNoArrayExpand bool - - // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). - SignedInteger bool - - // MapValueReset controls how we decode into a map value. - // - // By default, we MAY retrieve the mapping for a key, and then decode into that. - // However, especially with big maps, that retrieval may be expensive and unnecessary - // if the stream already contains all that is necessary to recreate the value. - // - // If true, we will never retrieve the previous mapping, - // but rather decode into a new value and set that in the map. - // - // If false, we will retrieve the previous mapping if necessary e.g. - // the previous mapping is a pointer, or is a struct or array with pre-set state, - // or is an interface. - MapValueReset bool - - // SliceElementReset: on decoding a slice, reset the element to a zero value first. - // - // concern: if the slice already contained some garbage, we will decode into that garbage. - SliceElementReset bool - - // InterfaceReset controls how we decode into an interface. - // - // By default, when we see a field that is an interface{...}, - // or a map with interface{...} value, we will attempt decoding into the - // "contained" value. - // - // However, this prevents us from reading a string into an interface{} - // that formerly contained a number. - // - // If true, we will decode into a new "blank" value, and set that in the interface. - // If false, we will decode into whatever is contained in the interface. - InterfaceReset bool - - // InternString controls interning of strings during decoding. - // - // Some handles, e.g. json, typically will read map keys as strings. - // If the set of keys are finite, it may help reduce allocation to - // look them up from a map (than to allocate them afresh). - // - // Note: Handles will be smart when using the intern functionality. - // Every string should not be interned. - // An excellent use-case for interning is struct field names, - // or map keys where key type is string. - InternString bool - - // PreferArrayOverSlice controls whether to decode to an array or a slice. - // - // This only impacts decoding into a nil interface{}. - // Consequently, it has no effect on codecgen. - // - // *Note*: This only applies if using go1.5 and above, - // as it requires reflect.ArrayOf support which was absent before go1.5. - PreferArrayOverSlice bool - - // DeleteOnNilMapValue controls how to decode a nil value in the stream. - // - // If true, we will delete the mapping of the key. - // Else, just set the mapping to the zero value of the type. - DeleteOnNilMapValue bool - - // ReaderBufferSize is the size of the buffer used when reading. - // - // if > 0, we use a smart buffer internally for performance purposes. - ReaderBufferSize int -} - -// ------------------------------------ - -type bufioDecReader struct { - buf []byte - r io.Reader - - c int // cursor - n int // num read - err error - - trb bool - tr []byte - - b [8]byte -} - -func (z *bufioDecReader) reset(r io.Reader) { - z.r, z.c, z.n, z.err, z.trb = r, 0, 0, nil, false - if z.tr != nil { - z.tr = z.tr[:0] - } -} - -func (z *bufioDecReader) Read(p []byte) (n int, err error) { - if z.err != nil { - return 0, z.err - } - p0 := p - n = copy(p, z.buf[z.c:]) - z.c += n - if z.c == len(z.buf) { - z.c = 0 - } - z.n += n - if len(p) == n { - if z.c == 0 { - z.buf = z.buf[:1] - z.buf[0] = p[len(p)-1] - z.c = 1 - } - if z.trb { - z.tr = append(z.tr, p0[:n]...) - } - return - } - p = p[n:] - var n2 int - // if we are here, then z.buf is all read - if len(p) > len(z.buf) { - n2, err = decReadFull(z.r, p) - n += n2 - z.n += n2 - z.err = err - // don't return EOF if some bytes were read. keep for next time. - if n > 0 && err == io.EOF { - err = nil - } - // always keep last byte in z.buf - z.buf = z.buf[:1] - z.buf[0] = p[len(p)-1] - z.c = 1 - if z.trb { - z.tr = append(z.tr, p0[:n]...) - } - return - } - // z.c is now 0, and len(p) <= len(z.buf) - for len(p) > 0 && z.err == nil { - // println("len(p) loop starting ... ") - z.c = 0 - z.buf = z.buf[0:cap(z.buf)] - n2, err = z.r.Read(z.buf) - if n2 > 0 { - if err == io.EOF { - err = nil - } - z.buf = z.buf[:n2] - n2 = copy(p, z.buf) - z.c = n2 - n += n2 - z.n += n2 - p = p[n2:] - } - z.err = err - // println("... len(p) loop done") - } - if z.c == 0 { - z.buf = z.buf[:1] - z.buf[0] = p[len(p)-1] - z.c = 1 - } - if z.trb { - z.tr = append(z.tr, p0[:n]...) - } - return -} - -func (z *bufioDecReader) ReadByte() (b byte, err error) { - z.b[0] = 0 - _, err = z.Read(z.b[:1]) - b = z.b[0] - return -} - -func (z *bufioDecReader) UnreadByte() (err error) { - if z.err != nil { - return z.err - } - if z.c > 0 { - z.c-- - z.n-- - if z.trb { - z.tr = z.tr[:len(z.tr)-1] - } - return - } - return decUnreadByteNothingToReadErr -} - -func (z *bufioDecReader) numread() int { - return z.n -} - -func (z *bufioDecReader) readx(n int) (bs []byte) { - if n <= 0 || z.err != nil { - return - } - if z.c+n <= len(z.buf) { - bs = z.buf[z.c : z.c+n] - z.n += n - z.c += n - if z.trb { - z.tr = append(z.tr, bs...) - } - return - } - bs = make([]byte, n) - _, err := z.Read(bs) - if err != nil { - panic(err) - } - return -} - -func (z *bufioDecReader) readb(bs []byte) { - _, err := z.Read(bs) - if err != nil { - panic(err) - } -} - -// func (z *bufioDecReader) readn1eof() (b uint8, eof bool) { -// b, err := z.ReadByte() -// if err != nil { -// if err == io.EOF { -// eof = true -// } else { -// panic(err) -// } -// } -// return -// } - -func (z *bufioDecReader) readn1() (b uint8) { - b, err := z.ReadByte() - if err != nil { - panic(err) - } - return -} - -func (z *bufioDecReader) search(in []byte, accept *bitset256, stop, flag uint8) (token byte, out []byte) { - // flag: 1 (skip), 2 (readTo), 4 (readUntil) - if flag == 4 { - for i := z.c; i < len(z.buf); i++ { - if z.buf[i] == stop { - token = z.buf[i] - z.n = z.n + (i - z.c) - 1 - i++ - out = z.buf[z.c:i] - if z.trb { - z.tr = append(z.tr, z.buf[z.c:i]...) - } - z.c = i - return - } - } - } else { - for i := z.c; i < len(z.buf); i++ { - if !accept.isset(z.buf[i]) { - token = z.buf[i] - z.n = z.n + (i - z.c) - 1 - if flag == 1 { - i++ - } else { - out = z.buf[z.c:i] - } - if z.trb { - z.tr = append(z.tr, z.buf[z.c:i]...) - } - z.c = i - return - } - } - } - z.n += len(z.buf) - z.c - if flag != 1 { - out = append(in, z.buf[z.c:]...) - } - if z.trb { - z.tr = append(z.tr, z.buf[z.c:]...) - } - var n2 int - if z.err != nil { - return - } - for { - z.c = 0 - z.buf = z.buf[0:cap(z.buf)] - n2, z.err = z.r.Read(z.buf) - if n2 > 0 && z.err != nil { - z.err = nil - } - z.buf = z.buf[:n2] - if flag == 4 { - for i := 0; i < n2; i++ { - if z.buf[i] == stop { - token = z.buf[i] - z.n += i - 1 - i++ - out = append(out, z.buf[z.c:i]...) - if z.trb { - z.tr = append(z.tr, z.buf[z.c:i]...) - } - z.c = i - return - } - } - } else { - for i := 0; i < n2; i++ { - if !accept.isset(z.buf[i]) { - token = z.buf[i] - z.n += i - 1 - if flag == 1 { - i++ - } - if flag != 1 { - out = append(out, z.buf[z.c:i]...) - } - if z.trb { - z.tr = append(z.tr, z.buf[z.c:i]...) - } - z.c = i - return - } - } - } - if flag != 1 { - out = append(out, z.buf[:n2]...) - } - z.n += n2 - if z.err != nil { - return - } - if z.trb { - z.tr = append(z.tr, z.buf[:n2]...) - } - } -} - -func (z *bufioDecReader) skip(accept *bitset256) (token byte) { - token, _ = z.search(nil, accept, 0, 1) - return -} - -func (z *bufioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { - _, out = z.search(in, accept, 0, 2) - return -} - -func (z *bufioDecReader) readUntil(in []byte, stop byte) (out []byte) { - _, out = z.search(in, nil, stop, 4) - return -} - -func (z *bufioDecReader) unreadn1() { - err := z.UnreadByte() - if err != nil { - panic(err) - } -} - -func (z *bufioDecReader) track() { - if z.tr != nil { - z.tr = z.tr[:0] - } - z.trb = true -} - -func (z *bufioDecReader) stopTrack() (bs []byte) { - z.trb = false - return z.tr -} - -// ioDecReader is a decReader that reads off an io.Reader. -// -// It also has a fallback implementation of ByteScanner if needed. -type ioDecReader struct { - r io.Reader // the reader passed in - - rr io.Reader - br io.ByteScanner - - l byte // last byte - ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread - b [4]byte // tiny buffer for reading single bytes - trb bool // tracking bytes turned on - - // temp byte array re-used internally for efficiency during read. - // shares buffer with Decoder, so we keep size of struct within 8 words. - x *[scratchByteArrayLen]byte - n int // num read - tr []byte // tracking bytes read -} - -func (z *ioDecReader) reset(r io.Reader) { - z.r = r - z.rr = r - z.l, z.ls, z.n, z.trb = 0, 0, 0, false - if z.tr != nil { - z.tr = z.tr[:0] - } - var ok bool - if z.br, ok = r.(io.ByteScanner); !ok { - z.br = z - z.rr = z - } -} - -func (z *ioDecReader) Read(p []byte) (n int, err error) { - if len(p) == 0 { - return - } - var firstByte bool - if z.ls == 1 { - z.ls = 2 - p[0] = z.l - if len(p) == 1 { - n = 1 - return - } - firstByte = true - p = p[1:] - } - n, err = z.r.Read(p) - if n > 0 { - if err == io.EOF && n == len(p) { - err = nil // read was successful, so postpone EOF (till next time) - } - z.l = p[n-1] - z.ls = 2 - } - if firstByte { - n++ - } - return -} - -func (z *ioDecReader) ReadByte() (c byte, err error) { - n, err := z.Read(z.b[:1]) - if n == 1 { - c = z.b[0] - if err == io.EOF { - err = nil // read was successful, so postpone EOF (till next time) - } - } - return -} - -func (z *ioDecReader) UnreadByte() (err error) { - switch z.ls { - case 2: - z.ls = 1 - case 0: - err = decUnreadByteNothingToReadErr - case 1: - err = decUnreadByteLastByteNotReadErr - default: - err = decUnreadByteUnknownErr - } - return -} - -func (z *ioDecReader) numread() int { - return z.n -} - -func (z *ioDecReader) readx(n int) (bs []byte) { - if n <= 0 { - return - } - if n < len(z.x) { - bs = z.x[:n] - } else { - bs = make([]byte, n) - } - if _, err := decReadFull(z.rr, bs); err != nil { - panic(err) - } - z.n += len(bs) - if z.trb { - z.tr = append(z.tr, bs...) - } - return -} - -func (z *ioDecReader) readb(bs []byte) { - // if len(bs) == 0 { - // return - // } - if _, err := decReadFull(z.rr, bs); err != nil { - panic(err) - } - z.n += len(bs) - if z.trb { - z.tr = append(z.tr, bs...) - } -} - -func (z *ioDecReader) readn1eof() (b uint8, eof bool) { - b, err := z.br.ReadByte() - if err == nil { - z.n++ - if z.trb { - z.tr = append(z.tr, b) - } - } else if err == io.EOF { - eof = true - } else { - panic(err) - } - return -} - -func (z *ioDecReader) readn1() (b uint8) { - var err error - if b, err = z.br.ReadByte(); err == nil { - z.n++ - if z.trb { - z.tr = append(z.tr, b) - } - return - } - panic(err) -} - -func (z *ioDecReader) skip(accept *bitset256) (token byte) { - for { - var eof bool - token, eof = z.readn1eof() - if eof { - return - } - if accept.isset(token) { - continue - } - return - } -} - -func (z *ioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { - out = in - for { - token, eof := z.readn1eof() - if eof { - return - } - if accept.isset(token) { - out = append(out, token) - } else { - z.unreadn1() - return - } - } -} - -func (z *ioDecReader) readUntil(in []byte, stop byte) (out []byte) { - out = in - for { - token, eof := z.readn1eof() - if eof { - panic(io.EOF) - } - out = append(out, token) - if token == stop { - return - } - } -} - -func (z *ioDecReader) unreadn1() { - err := z.br.UnreadByte() - if err != nil { - panic(err) - } - z.n-- - if z.trb { - if l := len(z.tr) - 1; l >= 0 { - z.tr = z.tr[:l] - } - } -} - -func (z *ioDecReader) track() { - if z.tr != nil { - z.tr = z.tr[:0] - } - z.trb = true -} - -func (z *ioDecReader) stopTrack() (bs []byte) { - z.trb = false - return z.tr -} - -// ------------------------------------ - -var bytesDecReaderCannotUnreadErr = errors.New("cannot unread last byte read") - -// bytesDecReader is a decReader that reads off a byte slice with zero copying -type bytesDecReader struct { - b []byte // data - c int // cursor - a int // available - t int // track start -} - -func (z *bytesDecReader) reset(in []byte) { - z.b = in - z.a = len(in) - z.c = 0 - z.t = 0 -} - -func (z *bytesDecReader) numread() int { - return z.c -} - -func (z *bytesDecReader) unreadn1() { - if z.c == 0 || len(z.b) == 0 { - panic(bytesDecReaderCannotUnreadErr) - } - z.c-- - z.a++ - return -} - -func (z *bytesDecReader) readx(n int) (bs []byte) { - // slicing from a non-constant start position is more expensive, - // as more computation is required to decipher the pointer start position. - // However, we do it only once, and it's better than reslicing both z.b and return value. - - if n <= 0 { - } else if z.a == 0 { - panic(io.EOF) - } else if n > z.a { - panic(io.ErrUnexpectedEOF) - } else { - c0 := z.c - z.c = c0 + n - z.a = z.a - n - bs = z.b[c0:z.c] - } - return -} - -func (z *bytesDecReader) readb(bs []byte) { - copy(bs, z.readx(len(bs))) -} - -func (z *bytesDecReader) readn1() (v uint8) { - if z.a == 0 { - panic(io.EOF) - } - v = z.b[z.c] - z.c++ - z.a-- - return -} - -// func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { -// if z.a == 0 { -// eof = true -// return -// } -// v = z.b[z.c] -// z.c++ -// z.a-- -// return -// } - -func (z *bytesDecReader) skip(accept *bitset256) (token byte) { - if z.a == 0 { - return - } - blen := len(z.b) - for i := z.c; i < blen; i++ { - if !accept.isset(z.b[i]) { - token = z.b[i] - i++ - z.a -= (i - z.c) - z.c = i - return - } - } - z.a, z.c = 0, blen - return -} - -func (z *bytesDecReader) readTo(_ []byte, accept *bitset256) (out []byte) { - if z.a == 0 { - return - } - blen := len(z.b) - for i := z.c; i < blen; i++ { - if !accept.isset(z.b[i]) { - out = z.b[z.c:i] - z.a -= (i - z.c) - z.c = i - return - } - } - out = z.b[z.c:] - z.a, z.c = 0, blen - return -} - -func (z *bytesDecReader) readUntil(_ []byte, stop byte) (out []byte) { - if z.a == 0 { - panic(io.EOF) - } - blen := len(z.b) - for i := z.c; i < blen; i++ { - if z.b[i] == stop { - i++ - out = z.b[z.c:i] - z.a -= (i - z.c) - z.c = i - return - } - } - z.a, z.c = 0, blen - panic(io.EOF) -} - -func (z *bytesDecReader) track() { - z.t = z.c -} - -func (z *bytesDecReader) stopTrack() (bs []byte) { - return z.b[z.t:z.c] -} - -// ---------------------------------------- - -func (d *Decoder) builtin(f *codecFnInfo, rv reflect.Value) { - d.d.DecodeBuiltin(f.ti.rtid, rv2i(rv)) -} - -func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) { - d.d.DecodeExt(rv2i(rv), 0, nil) -} - -func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) { - d.d.DecodeExt(rv2i(rv), f.xfTag, f.xfFn) -} - -func (d *Decoder) getValueForUnmarshalInterface(rv reflect.Value, indir int8) (v interface{}) { - if indir == -1 { - v = rv2i(rv.Addr()) - } else if indir == 0 { - v = rv2i(rv) - } else { - for j := int8(0); j < indir; j++ { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - v = rv2i(rv) - } - return -} - -func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) { - d.getValueForUnmarshalInterface(rv, f.ti.csIndir).(Selfer).CodecDecodeSelf(d) -} - -func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) { - bm := d.getValueForUnmarshalInterface(rv, f.ti.bunmIndir).(encoding.BinaryUnmarshaler) - xbs := d.d.DecodeBytes(nil, true) - if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { - panic(fnerr) - } -} - -func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) { - tm := d.getValueForUnmarshalInterface(rv, f.ti.tunmIndir).(encoding.TextUnmarshaler) - fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes()) - if fnerr != nil { - panic(fnerr) - } -} - -func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) { - tm := d.getValueForUnmarshalInterface(rv, f.ti.junmIndir).(jsonUnmarshaler) - // bs := d.d.DecodeBytes(d.b[:], true, true) - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} - -func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) { - d.errorf("no decoding function defined for kind %v", rv.Kind()) -} - -// var kIntfCtr uint64 - -func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { - // nil interface: - // use some hieristics to decode it appropriately - // based on the detected next value in the stream. - n := d.naked() - d.d.DecodeNaked() - if n.v == valueTypeNil { - return - } - // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). - // if num := f.ti.rt.NumMethod(); num > 0 { - if f.ti.numMeth > 0 { - d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) - return - } - // var useRvn bool - switch n.v { - case valueTypeMap: - if d.mtid == 0 || d.mtid == mapIntfIntfTypId { - if n.lm < arrayCacheLen { - n.ma[n.lm] = nil - rvn = n.rr[decNakedMapIntfIntfIdx*arrayCacheLen+n.lm] - n.lm++ - d.decode(&n.ma[n.lm-1]) - n.lm-- - } else { - var v2 map[interface{}]interface{} - d.decode(&v2) - rvn = reflect.ValueOf(&v2).Elem() - } - } else if d.mtid == mapStrIntfTypId { // for json performance - if n.ln < arrayCacheLen { - n.na[n.ln] = nil - rvn = n.rr[decNakedMapStrIntfIdx*arrayCacheLen+n.ln] - n.ln++ - d.decode(&n.na[n.ln-1]) - n.ln-- - } else { - var v2 map[string]interface{} - d.decode(&v2) - rvn = reflect.ValueOf(&v2).Elem() - } - } else { - rvn = reflect.New(d.h.MapType) - if useLookupRecognizedTypes && d.mtr { // isRecognizedRtid(d.mtid) { - d.decode(rv2i(rvn)) - rvn = rvn.Elem() - } else { - rvn = rvn.Elem() - d.decodeValue(rvn, nil, false, true) - } - } - case valueTypeArray: - if d.stid == 0 || d.stid == intfSliceTypId { - if n.ls < arrayCacheLen { - n.sa[n.ls] = nil - rvn = n.rr[decNakedSliceIntfIdx*arrayCacheLen+n.ls] - n.ls++ - d.decode(&n.sa[n.ls-1]) - n.ls-- - } else { - var v2 []interface{} - d.decode(&v2) - rvn = reflect.ValueOf(&v2).Elem() - } - if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { - rvn2 := reflect.New(reflectArrayOf(rvn.Len(), intfTyp)).Elem() - reflect.Copy(rvn2, rvn) - rvn = rvn2 - } - } else { - rvn = reflect.New(d.h.SliceType) - if useLookupRecognizedTypes && d.str { // isRecognizedRtid(d.stid) { - d.decode(rv2i(rvn)) - rvn = rvn.Elem() - } else { - rvn = rvn.Elem() - d.decodeValue(rvn, nil, false, true) - } - } - case valueTypeExt: - var v interface{} - tag, bytes := n.u, n.l // calling decode below might taint the values - if bytes == nil { - if n.li < arrayCacheLen { - n.ia[n.li] = nil - n.li++ - d.decode(&n.ia[n.li-1]) - // v = *(&n.ia[l]) - n.li-- - v = n.ia[n.li] - n.ia[n.li] = nil - } else { - d.decode(&v) - } - } - bfn := d.h.getExtForTag(tag) - if bfn == nil { - var re RawExt - re.Tag = tag - re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) - re.Value = v - rvn = reflect.ValueOf(&re).Elem() - } else { - rvnA := reflect.New(bfn.rt) - if bytes != nil { - bfn.ext.ReadExt(rv2i(rvnA), bytes) - } else { - bfn.ext.UpdateExt(rv2i(rvnA), v) - } - rvn = rvnA.Elem() - } - case valueTypeNil: - // no-op - case valueTypeInt: - rvn = n.rr[decNakedIntIdx] // d.np.get(&n.i) - case valueTypeUint: - rvn = n.rr[decNakedUintIdx] // d.np.get(&n.u) - case valueTypeFloat: - rvn = n.rr[decNakedFloatIdx] // d.np.get(&n.f) - case valueTypeBool: - rvn = n.rr[decNakedBoolIdx] // d.np.get(&n.b) - case valueTypeString, valueTypeSymbol: - rvn = n.rr[decNakedStringIdx] // d.np.get(&n.s) - case valueTypeBytes: - rvn = n.rr[decNakedBytesIdx] // d.np.get(&n.l) - case valueTypeTimestamp: - rvn = n.rr[decNakedTimeIdx] // d.np.get(&n.t) - default: - panic(fmt.Errorf("kInterfaceNaked: unexpected valueType: %d", n.v)) - } - return -} - -func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) { - // Note: - // A consequence of how kInterface works, is that - // if an interface already contains something, we try - // to decode into what was there before. - // We do not replace with a generic value (as got from decodeNaked). - - // every interface passed here MUST be settable. - var rvn reflect.Value - if rv.IsNil() { - if rvn = d.kInterfaceNaked(f); rvn.IsValid() { - rv.Set(rvn) - } - return - } - if d.h.InterfaceReset { - if rvn = d.kInterfaceNaked(f); rvn.IsValid() { - rv.Set(rvn) - } else { - // reset to zero value based on current type in there. - rv.Set(reflect.Zero(rv.Elem().Type())) - } - return - } - - // now we have a non-nil interface value, meaning it contains a type - rvn = rv.Elem() - if d.d.TryDecodeAsNil() { - rv.Set(reflect.Zero(rvn.Type())) - return - } - - // Note: interface{} is settable, but underlying type may not be. - // Consequently, we MAY have to create a decodable value out of the underlying value, - // decode into it, and reset the interface itself. - // fmt.Printf(">>>> kInterface: rvn type: %v, rv type: %v\n", rvn.Type(), rv.Type()) - - rvn2, canDecode := isDecodeable(rvn) - if canDecode { - d.decodeValue(rvn2, nil, true, true) - return - } - - rvn2 = reflect.New(rvn.Type()).Elem() - rvn2.Set(rvn) - d.decodeValue(rvn2, nil, true, true) - rv.Set(rvn2) -} - -func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { - // checking if recognized within kstruct is too expensive. - // only check where you can determine if valid outside the loop - // ie on homogenous collections: slices, arrays and maps. - // - // if true, we don't create too many decFn's. - // It's a delicate balance. - const checkRecognized bool = false // false: TODO - - fti := f.ti - dd := d.d - elemsep := d.hh.hasElemSeparators() - sfn := structFieldNode{v: rv, update: true} - ctyp := dd.ContainerType() - if ctyp == valueTypeMap { - containerLen := dd.ReadMapStart() - if containerLen == 0 { - dd.ReadMapEnd() - return - } - tisfi := fti.sfi - hasLen := containerLen >= 0 - - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - // rvkencname := dd.DecodeString() - if elemsep { - dd.ReadMapElemKey() - } - rvkencnameB := dd.DecodeStringAsBytes() - rvkencname := stringView(rvkencnameB) - // rvksi := ti.getForEncName(rvkencname) - if elemsep { - dd.ReadMapElemValue() - } - if k := fti.indexForEncName(rvkencname); k > -1 { - si := tisfi[k] - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) - } else { - d.decodeValue(sfn.field(si), nil, checkRecognized, true) - } - } else { - d.structFieldNotFound(-1, rvkencname) - } - // keepAlive4StringView(rvkencnameB) // maintain ref 4 stringView // not needed, as reference is outside loop - } - dd.ReadMapEnd() - } else if ctyp == valueTypeArray { - containerLen := dd.ReadArrayStart() - if containerLen == 0 { - dd.ReadArrayEnd() - return - } - // Not much gain from doing it two ways for array. - // Arrays are not used as much for structs. - hasLen := containerLen >= 0 - for j, si := range fti.sfip { - if (hasLen && j == containerLen) || (!hasLen && dd.CheckBreak()) { - break - } - if elemsep { - dd.ReadArrayElem() - } - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) - } else { - d.decodeValue(sfn.field(si), nil, checkRecognized, true) - } - } - if containerLen > len(fti.sfip) { - // read remaining values and throw away - for j := len(fti.sfip); j < containerLen; j++ { - if elemsep { - dd.ReadArrayElem() - } - d.structFieldNotFound(j, "") - } - } - dd.ReadArrayEnd() - } else { - d.error(onlyMapOrArrayCanDecodeIntoStructErr) - return - } -} - -func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { - // A slice can be set from a map or array in stream. - // This way, the order can be kept (as order is lost with map). - ti := f.ti - dd := d.d - rtelem0 := ti.rt.Elem() - ctyp := dd.ContainerType() - if ctyp == valueTypeBytes || ctyp == valueTypeString { - // you can only decode bytes or string in the stream into a slice or array of bytes - if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { - d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt) - } - if f.seq == seqTypeChan { - bs2 := dd.DecodeBytes(nil, true) - ch := rv2i(rv).(chan<- byte) - for _, b := range bs2 { - ch <- b - } - } else { - rvbs := rv.Bytes() - bs2 := dd.DecodeBytes(rvbs, false) - if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { - if rv.CanSet() { - rv.SetBytes(bs2) - } else { - copy(rvbs, bs2) - } - } - } - return - } - - // array := f.seq == seqTypeChan - - slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) - - // an array can never return a nil slice. so no need to check f.array here. - if containerLenS == 0 { - if rv.CanSet() { - if f.seq == seqTypeSlice { - if rv.IsNil() { - rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) - } else { - rv.SetLen(0) - } - } else if f.seq == seqTypeChan { - if rv.IsNil() { - rv.Set(reflect.MakeChan(ti.rt, 0)) - } - } - } - slh.End() - return - } - - rtelem0Size := int(rtelem0.Size()) - rtElem0Kind := rtelem0.Kind() - rtElem0Id := rt2id(rtelem0) - rtelem0Mut := !isImmutableKind(rtElem0Kind) - rtelem := rtelem0 - rtelemkind := rtelem.Kind() - for rtelemkind == reflect.Ptr { - rtelem = rtelem.Elem() - rtelemkind = rtelem.Kind() - } - - var fn *codecFn - - var rv0, rv9 reflect.Value - rv0 = rv - rvChanged := false - - rvlen := rv.Len() - rvcap := rv.Cap() - hasLen := containerLenS > 0 - if hasLen && f.seq == seqTypeSlice { - if containerLenS > rvcap { - oldRvlenGtZero := rvlen > 0 - rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size())) - if rvlen <= rvcap { - if rv.CanSet() { - rv.SetLen(rvlen) - } else { - rv = rv.Slice(0, rvlen) - rvChanged = true - } - } else { - rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) - rvcap = rvlen - rvChanged = true - } - if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { - reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) - } - } else if containerLenS != rvlen { - rvlen = containerLenS - if rv.CanSet() { - rv.SetLen(rvlen) - } else { - rv = rv.Slice(0, rvlen) - rvChanged = true - } - } - } - - var recognizedRtid, recognizedRtidPtr bool - if useLookupRecognizedTypes { - recognizedRtid = isRecognizedRtid(rtElem0Id) - recognizedRtidPtr = isRecognizedRtidPtr(rtElem0Id) - } - - // consider creating new element once, and just decoding into it. - var rtelem0Zero reflect.Value - var rtelem0ZeroValid bool - var decodeAsNil bool - var j int - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() { - if hasLen { - rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size) - } else { - rvlen = 8 - } - if f.seq == seqTypeSlice { - rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) - rvChanged = true - } else if f.seq == seqTypeChan { - rv.Set(reflect.MakeChan(ti.rt, rvlen)) - } - } - slh.ElemContainerState(j) - decodeAsNil = dd.TryDecodeAsNil() - if f.seq == seqTypeChan { - if decodeAsNil { - rv.Send(reflect.Zero(rtelem0)) - continue - } - if rtelem0Mut || !rv9.IsValid() { // || (rtElem0Kind == reflect.Ptr && rv9.IsNil()) { - rv9 = reflect.New(rtelem0).Elem() - } - if useLookupRecognizedTypes && (recognizedRtid || recognizedRtidPtr) { - d.decode(rv2i(rv9.Addr())) - } else { - if fn == nil { - fn = d.cf.get(rtelem, true, true) - } - d.decodeValue(rv9, fn, false, true) - } - rv.Send(rv9) - } else { - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= rvlen { - if f.seq == seqTypeArray { - d.arrayCannotExpand(rvlen, j+1) - decodeIntoBlank = true - } else { // if f.seq == seqTypeSlice - // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs - var rvcap2 int - rv9, rvcap2, rvChanged = decExpandSliceRV(rv, ti.rt, rtelem0Size, 1, rvlen, rvcap) - rvlen++ - if rvChanged { - rv = rv9 - rvcap = rvcap2 - } - } - } - if decodeIntoBlank { - if !decodeAsNil { - d.swallow() - } - } else { - rv9 = rv.Index(j) - if d.h.SliceElementReset || decodeAsNil { - if !rtelem0ZeroValid { - rtelem0ZeroValid = true - rtelem0Zero = reflect.Zero(rtelem0) - } - rv9.Set(rtelem0Zero) - } - if decodeAsNil { - continue - } - - if useLookupRecognizedTypes && recognizedRtid { - d.decode(rv2i(rv9.Addr())) - } else if useLookupRecognizedTypes && recognizedRtidPtr { // && !rv9.IsNil() { - if rv9.IsNil() { - rv9.Set(reflect.New(rtelem)) - } - d.decode(rv2i(rv9)) - } else { - if fn == nil { - fn = d.cf.get(rtelem, true, true) - } - d.decodeValue(rv9, fn, false, true) - } - } - } - } - if f.seq == seqTypeSlice { - if j < rvlen { - if rv.CanSet() { - rv.SetLen(j) - } else { - rv = rv.Slice(0, j) - rvChanged = true - } - rvlen = j - } else if j == 0 && rv.IsNil() { - rv = reflect.MakeSlice(ti.rt, 0, 0) - rvChanged = true - } - } - slh.End() - - if rvChanged { - rv0.Set(rv) - } -} - -// func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) { -// // d.decodeValueFn(rv.Slice(0, rv.Len())) -// f.kSlice(rv.Slice(0, rv.Len())) -// } - -func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { - dd := d.d - containerLen := dd.ReadMapStart() - elemsep := d.hh.hasElemSeparators() - ti := f.ti - if rv.IsNil() { - rv.Set(makeMapReflect(ti.rt, containerLen)) - } - - if containerLen == 0 { - dd.ReadMapEnd() - return - } - - ktype, vtype := ti.rt.Key(), ti.rt.Elem() - ktypeId := rt2id(ktype) - vtypeId := rt2id(vtype) - vtypeKind := vtype.Kind() - var recognizedKtyp, recognizedVtyp, recognizedPtrKtyp, recognizedPtrVtyp bool - if useLookupRecognizedTypes { - recognizedKtyp = isRecognizedRtid(ktypeId) - recognizedVtyp = isRecognizedRtid(vtypeId) - recognizedPtrKtyp = isRecognizedRtidPtr(ktypeId) - recognizedPtrVtyp = isRecognizedRtidPtr(vtypeId) - } - - var keyFn, valFn *codecFn - var ktypeLo, vtypeLo reflect.Type - for ktypeLo = ktype; ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { - } - - for vtypeLo = vtype; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { - } - - var mapGet, mapSet bool - rvvImmut := isImmutableKind(vtypeKind) - if !d.h.MapValueReset { - // if pointer, mapGet = true - // if interface, mapGet = true if !DecodeNakedAlways (else false) - // if builtin, mapGet = false - // else mapGet = true - if vtypeKind == reflect.Ptr { - mapGet = true - } else if vtypeKind == reflect.Interface { - if !d.h.InterfaceReset { - mapGet = true - } - } else if !rvvImmut { - mapGet = true - } - } - - var rvk, rvkp, rvv, rvz reflect.Value - rvkMut := !isImmutableKind(ktype.Kind()) // if ktype is immutable, then re-use the same rvk. - ktypeIsString := ktypeId == stringTypId - ktypeIsIntf := ktypeId == intfTypId - hasLen := containerLen > 0 - var kstrbs []byte - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if rvkMut || !rvkp.IsValid() { - rvkp = reflect.New(ktype) - rvk = rvkp.Elem() - } - if elemsep { - dd.ReadMapElemKey() - } - if dd.TryDecodeAsNil() { - // Previously, if a nil key, we just ignored the mapped value and continued. - // However, that makes the result of encoding and then decoding map[intf]intf{nil:nil} - // to be an empty map. - // Instead, we treat a nil key as the zero value of the type. - rvk.Set(reflect.Zero(ktype)) - } else if ktypeIsString { - kstrbs = dd.DecodeStringAsBytes() - rvk.SetString(stringView(kstrbs)) - // NOTE: if doing an insert, you MUST use a real string (not stringview) - } else if useLookupRecognizedTypes && recognizedKtyp { - d.decode(rv2i(rvkp)) - // rvk = rvkp.Elem() //TODO: remove, unnecessary - } else if useLookupRecognizedTypes && recognizedPtrKtyp { - if rvk.IsNil() { - rvk = reflect.New(ktypeLo) - } - d.decode(rv2i(rvk)) - } else { - if keyFn == nil { - keyFn = d.cf.get(ktypeLo, true, true) - } - d.decodeValue(rvk, keyFn, false, true) - } - // special case if a byte array. - if ktypeIsIntf { - if rvk2 := rvk.Elem(); rvk2.IsValid() { - rvk = rvk2 - if rvk.Type() == uint8SliceTyp { - rvk = reflect.ValueOf(d.string(rvk.Bytes())) - } - } - } - - if elemsep { - dd.ReadMapElemValue() - } - - // Brittle, but OK per TryDecodeAsNil() contract. - // i.e. TryDecodeAsNil never shares slices with other decDriver procedures - if dd.TryDecodeAsNil() { - if ktypeIsString { - rvk.SetString(d.string(kstrbs)) - } - if d.h.DeleteOnNilMapValue { - rv.SetMapIndex(rvk, reflect.Value{}) - } else { - rv.SetMapIndex(rvk, reflect.Zero(vtype)) - } - continue - } - - mapSet = true // set to false if u do a get, and its a non-nil pointer - if mapGet { - // mapGet true only in case where kind=Ptr|Interface or kind is otherwise mutable. - rvv = rv.MapIndex(rvk) - if !rvv.IsValid() { - rvv = reflect.New(vtype).Elem() - } else if vtypeKind == reflect.Ptr { - if rvv.IsNil() { - rvv = reflect.New(vtype).Elem() - } else { - mapSet = false - } - } else if vtypeKind == reflect.Interface { - // not addressable, and thus not settable. - // e MUST create a settable/addressable variant - rvv2 := reflect.New(rvv.Type()).Elem() - if !rvv.IsNil() { - rvv2.Set(rvv) - } - rvv = rvv2 - } - // else it is ~mutable, and we can just decode into it directly - } else if rvvImmut { - if !rvz.IsValid() { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } else { - rvv = reflect.New(vtype).Elem() - } - - // We MUST be done with the stringview of the key, before decoding the value - // so that we don't bastardize the reused byte array. - if mapSet && ktypeIsString { - rvk.SetString(d.string(kstrbs)) - } - if useLookupRecognizedTypes && recognizedVtyp && rvv.CanAddr() { - d.decode(rv2i(rvv.Addr())) - } else if useLookupRecognizedTypes && recognizedPtrVtyp { - if rvv.IsNil() { - rvv = reflect.New(vtypeLo) - mapSet = true - } - d.decode(rv2i(rvv)) - } else { - if valFn == nil { - valFn = d.cf.get(vtypeLo, true, true) - } - d.decodeValue(rvv, valFn, false, true) - // d.decodeValueFn(rvv, valFn) - } - if mapSet { - rv.SetMapIndex(rvk, rvv) - } - // if ktypeIsString { - // // keepAlive4StringView(kstrbs) // not needed, as reference is outside loop - // } - } - - dd.ReadMapEnd() -} - -// decNaked is used to keep track of the primitives decoded. -// Without it, we would have to decode each primitive and wrap it -// in an interface{}, causing an allocation. -// In this model, the primitives are decoded in a "pseudo-atomic" fashion, -// so we can rest assured that no other decoding happens while these -// primitives are being decoded. -// -// maps and arrays are not handled by this mechanism. -// However, RawExt is, and we accommodate for extensions that decode -// RawExt from DecodeNaked, but need to decode the value subsequently. -// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. -// -// However, decNaked also keeps some arrays of default maps and slices -// used in DecodeNaked. This way, we can get a pointer to it -// without causing a new heap allocation. -// -// kInterfaceNaked will ensure that there is no allocation for the common -// uses. -type decNaked struct { - // r RawExt // used for RawExt, uint, []byte. - u uint64 - i int64 - f float64 - l []byte - s string - t time.Time - - b bool - - inited bool - - v valueType - - li, lm, ln, ls int8 - - // array/stacks for reducing allocation - // keep arrays at the bottom? Chance is that they are not used much. - ia [arrayCacheLen]interface{} - ma [arrayCacheLen]map[interface{}]interface{} - na [arrayCacheLen]map[string]interface{} - sa [arrayCacheLen][]interface{} - // ra [2]RawExt - - rr [5 * arrayCacheLen]reflect.Value -} - -const ( - decNakedUintIdx = iota - decNakedIntIdx - decNakedFloatIdx - decNakedBytesIdx - decNakedStringIdx - decNakedTimeIdx - decNakedBoolIdx -) -const ( - _ = iota // maps to the scalars above - decNakedIntfIdx - decNakedMapIntfIntfIdx - decNakedMapStrIntfIdx - decNakedSliceIntfIdx -) - -func (n *decNaked) init() { - if n.inited { - return - } - // n.ms = n.ma[:0] - // n.is = n.ia[:0] - // n.ns = n.na[:0] - // n.ss = n.sa[:0] - - n.rr[decNakedUintIdx] = reflect.ValueOf(&n.u).Elem() - n.rr[decNakedIntIdx] = reflect.ValueOf(&n.i).Elem() - n.rr[decNakedFloatIdx] = reflect.ValueOf(&n.f).Elem() - n.rr[decNakedBytesIdx] = reflect.ValueOf(&n.l).Elem() - n.rr[decNakedStringIdx] = reflect.ValueOf(&n.s).Elem() - n.rr[decNakedTimeIdx] = reflect.ValueOf(&n.t).Elem() - n.rr[decNakedBoolIdx] = reflect.ValueOf(&n.b).Elem() - - for i := range [arrayCacheLen]struct{}{} { - n.rr[decNakedIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.ia[i])).Elem() - n.rr[decNakedMapIntfIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.ma[i])).Elem() - n.rr[decNakedMapStrIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.na[i])).Elem() - n.rr[decNakedSliceIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.sa[i])).Elem() - } - n.inited = true - // n.rr[] = reflect.ValueOf(&n.) -} - -func (n *decNaked) reset() { - if n == nil { - return - } - n.li, n.lm, n.ln, n.ls = 0, 0, 0, 0 -} - -type rtid2rv struct { - rtid uintptr - rv reflect.Value -} - -// A Decoder reads and decodes an object from an input stream in the codec format. -type Decoder struct { - // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. - // Try to put things that go together to fit within a cache line (8 words). - - d decDriver - // NOTE: Decoder shouldn't call it's read methods, - // as the handler MAY need to do some coordination. - r decReader - hh Handle - h *BasicHandle - - mtr, mtrp, str, strp bool // - - be bool // is binary encoding - bytes bool // is bytes reader - js bool // is json handle - - // ---- cpu cache line boundary? - - rb bytesDecReader - ri ioDecReader - bi bufioDecReader - - // cr containerStateRecv - - n *decNaked - nsp *sync.Pool - - // ---- cpu cache line boundary? - - is map[string]string // used for interning strings - - // cache the mapTypeId and sliceTypeId for faster comparisons - mtid uintptr - stid uintptr - - b [scratchByteArrayLen]byte - // _ uintptr // for alignment purposes, so next one starts from a cache line - - err error - // ---- cpu cache line boundary? - - cf codecFner - // _ [64]byte // force alignment??? -} - -// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. -// -// For efficiency, Users are encouraged to pass in a memory buffered reader -// (eg bufio.Reader, bytes.Buffer). -func NewDecoder(r io.Reader, h Handle) *Decoder { - d := newDecoder(h) - d.Reset(r) - return d -} - -// NewDecoderBytes returns a Decoder which efficiently decodes directly -// from a byte slice with zero copying. -func NewDecoderBytes(in []byte, h Handle) *Decoder { - d := newDecoder(h) - d.ResetBytes(in) - return d -} - -var defaultDecNaked decNaked - -func newDecoder(h Handle) *Decoder { - d := &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} - - // NOTE: do not initialize d.n here. It is lazily initialized in d.naked() - - _, d.js = h.(*JsonHandle) - if d.h.InternString { - d.is = make(map[string]string, 32) - } - d.d = h.newDecDriver(d) - // d.cr, _ = d.d.(containerStateRecv) - return d -} - -// naked must be called before each call to .DecodeNaked, -// as they will use it. -func (d *Decoder) naked() *decNaked { - if d.n == nil { - // consider one of: - // - get from sync.Pool (if GC is frequent, there's no value here) - // - new alloc (safest. only init'ed if it a naked decode will be done) - // - field in Decoder (makes the Decoder struct very big) - // To support using a decoder where a DecodeNaked is not needed, - // we prefer #1 or #2. - // d.n = new(decNaked) // &d.nv // new(decNaked) // grab from a sync.Pool - // d.n.init() - var v interface{} - d.nsp, v = pool.decNaked() - d.n = v.(*decNaked) - } - return d.n -} - -func (d *Decoder) resetCommon() { - d.n.reset() - d.d.reset() - d.cf.reset(d.hh) - d.err = nil - // reset all things which were cached from the Handle, - // but could be changed. - d.mtid, d.stid = 0, 0 - d.mtr, d.mtrp, d.str, d.strp = false, false, false, false - if d.h.MapType != nil { - d.mtid = rt2id(d.h.MapType) - if useLookupRecognizedTypes { - d.mtr = isRecognizedRtid(d.mtid) - d.mtrp = isRecognizedRtidPtr(d.mtid) - } - } - if d.h.SliceType != nil { - d.stid = rt2id(d.h.SliceType) - if useLookupRecognizedTypes { - d.str = isRecognizedRtid(d.stid) - d.strp = isRecognizedRtidPtr(d.stid) - } - } -} - -func (d *Decoder) Reset(r io.Reader) { - if d.h.ReaderBufferSize > 0 { - d.bi.buf = make([]byte, 0, d.h.ReaderBufferSize) - d.bi.reset(r) - d.r = &d.bi - } else { - d.ri.x = &d.b - // d.s = d.sa[:0] - d.ri.reset(r) - d.r = &d.ri - } - d.resetCommon() -} - -func (d *Decoder) ResetBytes(in []byte) { - d.bytes = true - d.rb.reset(in) - d.r = &d.rb - d.resetCommon() -} - -// Decode decodes the stream from reader and stores the result in the -// value pointed to by v. v cannot be a nil pointer. v can also be -// a reflect.Value of a pointer. -// -// Note that a pointer to a nil interface is not a nil pointer. -// If you do not know what type of stream it is, pass in a pointer to a nil interface. -// We will decode and store a value in that nil interface. -// -// Sample usages: -// // Decoding into a non-nil typed value -// var f float32 -// err = codec.NewDecoder(r, handle).Decode(&f) -// -// // Decoding into nil interface -// var v interface{} -// dec := codec.NewDecoder(r, handle) -// err = dec.Decode(&v) -// -// When decoding into a nil interface{}, we will decode into an appropriate value based -// on the contents of the stream: -// - Numbers are decoded as float64, int64 or uint64. -// - Other values are decoded appropriately depending on the type: -// bool, string, []byte, time.Time, etc -// - Extensions are decoded as RawExt (if no ext function registered for the tag) -// Configurations exist on the Handle to override defaults -// (e.g. for MapType, SliceType and how to decode raw bytes). -// -// When decoding into a non-nil interface{} value, the mode of encoding is based on the -// type of the value. When a value is seen: -// - If an extension is registered for it, call that extension function -// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error -// - Else decode it based on its reflect.Kind -// -// There are some special rules when decoding into containers (slice/array/map/struct). -// Decode will typically use the stream contents to UPDATE the container. -// - A map can be decoded from a stream map, by updating matching keys. -// - A slice can be decoded from a stream array, -// by updating the first n elements, where n is length of the stream. -// - A slice can be decoded from a stream map, by decoding as if -// it contains a sequence of key-value pairs. -// - A struct can be decoded from a stream map, by updating matching fields. -// - A struct can be decoded from a stream array, -// by updating fields as they occur in the struct (by index). -// -// When decoding a stream map or array with length of 0 into a nil map or slice, -// we reset the destination map or slice to a zero-length value. -// -// However, when decoding a stream nil, we reset the destination container -// to its "zero" value (e.g. nil for slice/map, etc). -// -func (d *Decoder) Decode(v interface{}) (err error) { - defer panicToErrs2(&d.err, &err) - d.MustDecode(v) - return -} - -// MustDecode is like Decode, but panics if unable to Decode. -// This provides insight to the code location that triggered the error. -func (d *Decoder) MustDecode(v interface{}) { - // TODO: Top-level: ensure that v is a pointer and not nil. - if d.err != nil { - panic(d.err) - } - if d.d.TryDecodeAsNil() { - d.setZero(v) - } else { - d.decode(v) - } - if d.nsp != nil { - if d.n != nil { - d.nsp.Put(d.n) - d.n = nil - } - d.nsp = nil - } - d.n = nil - // xprintf(">>>>>>>> >>>>>>>> num decFns: %v\n", d.cf.sn) -} - -// // this is not a smart swallow, as it allocates objects and does unnecessary work. -// func (d *Decoder) swallowViaHammer() { -// var blank interface{} -// d.decodeValueNoFn(reflect.ValueOf(&blank).Elem()) -// } - -func (d *Decoder) swallow() { - // smarter decode that just swallows the content - dd := d.d - if dd.TryDecodeAsNil() { - return - } - elemsep := d.hh.hasElemSeparators() - switch dd.ContainerType() { - case valueTypeMap: - containerLen := dd.ReadMapStart() - hasLen := containerLen >= 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - // if clenGtEqualZero {if j >= containerLen {break} } else if dd.CheckBreak() {break} - if elemsep { - dd.ReadMapElemKey() - } - d.swallow() - if elemsep { - dd.ReadMapElemValue() - } - d.swallow() - } - dd.ReadMapEnd() - case valueTypeArray: - containerLen := dd.ReadArrayStart() - hasLen := containerLen >= 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if elemsep { - dd.ReadArrayElem() - } - d.swallow() - } - dd.ReadArrayEnd() - case valueTypeBytes: - dd.DecodeBytes(d.b[:], true) - case valueTypeString: - dd.DecodeStringAsBytes() - default: - // these are all primitives, which we can get from decodeNaked - // if RawExt using Value, complete the processing. - n := d.naked() - dd.DecodeNaked() - if n.v == valueTypeExt && n.l == nil { - if n.li < arrayCacheLen { - n.ia[n.li] = nil - n.li++ - d.decode(&n.ia[n.li-1]) - n.ia[n.li-1] = nil - n.li-- - } else { - var v2 interface{} - d.decode(&v2) - } - } - } -} - -func (d *Decoder) setZero(iv interface{}) { - if iv == nil || definitelyNil(iv) { - return - } - var canDecode bool - switch v := iv.(type) { - case *string: - *v = "" - case *bool: - *v = false - case *int: - *v = 0 - case *int8: - *v = 0 - case *int16: - *v = 0 - case *int32: - *v = 0 - case *int64: - *v = 0 - case *uint: - *v = 0 - case *uint8: - *v = 0 - case *uint16: - *v = 0 - case *uint32: - *v = 0 - case *uint64: - *v = 0 - case *float32: - *v = 0 - case *float64: - *v = 0 - case *[]uint8: - *v = nil - case *Raw: - *v = nil - case reflect.Value: - if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { - v.Set(reflect.Zero(v.Type())) - } // TODO: else drain if chan, clear if map, set all to nil if slice??? - default: - if !fastpathDecodeSetZeroTypeSwitch(iv, d) { - v := reflect.ValueOf(iv) - if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { - v.Set(reflect.Zero(v.Type())) - } // TODO: else drain if chan, clear if map, set all to nil if slice??? - } - } -} - -func (d *Decoder) decode(iv interface{}) { - // check nil and interfaces explicitly, - // so that type switches just have a run of constant non-interface types. - if iv == nil { - d.error(cannotDecodeIntoNilErr) - return - } - if v, ok := iv.(Selfer); ok { - v.CodecDecodeSelf(d) - return - } - - switch v := iv.(type) { - // case nil: - // case Selfer: - - case reflect.Value: - v = d.ensureDecodeable(v) - d.decodeValue(v, nil, false, true) // TODO: maybe ask to recognize ... - - case *string: - *v = d.d.DecodeString() - case *bool: - *v = d.d.DecodeBool() - case *int: - *v = int(d.d.DecodeInt(intBitsize)) - case *int8: - *v = int8(d.d.DecodeInt(8)) - case *int16: - *v = int16(d.d.DecodeInt(16)) - case *int32: - *v = int32(d.d.DecodeInt(32)) - case *int64: - *v = d.d.DecodeInt(64) - case *uint: - *v = uint(d.d.DecodeUint(uintBitsize)) - case *uint8: - *v = uint8(d.d.DecodeUint(8)) - case *uint16: - *v = uint16(d.d.DecodeUint(16)) - case *uint32: - *v = uint32(d.d.DecodeUint(32)) - case *uint64: - *v = d.d.DecodeUint(64) - case *float32: - *v = float32(d.d.DecodeFloat(true)) - case *float64: - *v = d.d.DecodeFloat(false) - case *[]uint8: - *v = d.d.DecodeBytes(*v, false) - - case *Raw: - *v = d.rawBytes() - - case *interface{}: - d.decodeValue(reflect.ValueOf(iv).Elem(), nil, false, true) // TODO: consider recognize here - // d.decodeValueNotNil(reflect.ValueOf(iv).Elem()) - - default: - if !fastpathDecodeTypeSwitch(iv, d) { - v := reflect.ValueOf(iv) - v = d.ensureDecodeable(v) - d.decodeValue(v, nil, false, false) - // d.decodeValueFallback(v) - } - } -} - -func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn, tryRecognized, chkAll bool) { - // If stream is not containing a nil value, then we can deref to the base - // non-pointer value, and decode into that. - var rvp reflect.Value - var rvpValid bool - if rv.Kind() == reflect.Ptr { - rvpValid = true - for { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rvp = rv - rv = rv.Elem() - if rv.Kind() != reflect.Ptr { - break - } - } - } - - if useLookupRecognizedTypes && tryRecognized && isRecognizedRtid(rv2rtid(rv)) { - if rvpValid { - d.decode(rv2i(rvp)) - return - } else if rv.CanAddr() { - d.decode(rv2i(rv.Addr())) - return - } - } - - if fn == nil { - // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer - fn = d.cf.get(rv.Type(), chkAll, true) // chkAll, chkAll) - } - if fn.i.addr { - if rvpValid { - fn.fd(d, &fn.i, rvp) - } else if rv.CanAddr() { - fn.fd(d, &fn.i, rv.Addr()) - } else { - fn.fd(d, &fn.i, rv) - } - } else { - fn.fd(d, &fn.i, rv) - } - // return rv -} - -func (d *Decoder) structFieldNotFound(index int, rvkencname string) { - // NOTE: rvkencname may be a stringView, so don't pass it to another function. - if d.h.ErrorIfNoField { - if index >= 0 { - d.errorf("no matching struct field found when decoding stream array at index %v", index) - return - } else if rvkencname != "" { - d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) - return - } - } - d.swallow() -} - -func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { - if d.h.ErrorIfNoArrayExpand { - d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) - } -} - -func isDecodeable(rv reflect.Value) (rv2 reflect.Value, canDecode bool) { - switch rv.Kind() { - case reflect.Array: - return rv, true - case reflect.Ptr: - if !rv.IsNil() { - return rv.Elem(), true - } - case reflect.Slice, reflect.Chan, reflect.Map: - if !rv.IsNil() { - return rv, true - } - } - return -} - -func (d *Decoder) ensureDecodeable(rv reflect.Value) (rv2 reflect.Value) { - // decode can take any reflect.Value that is a inherently addressable i.e. - // - array - // - non-nil chan (we will SEND to it) - // - non-nil slice (we will set its elements) - // - non-nil map (we will put into it) - // - non-nil pointer (we can "update" it) - rv2, canDecode := isDecodeable(rv) - if canDecode { - return - } - if !rv.IsValid() { - d.error(cannotDecodeIntoNilErr) - return - } - if !rv.CanInterface() { - d.errorf("cannot decode into a value without an interface: %v", rv) - return - } - rvi := rv2i(rv) - d.errorf("cannot decode into value of kind: %v, type: %T, %v", rv.Kind(), rvi, rvi) - return -} - -// func (d *Decoder) chkPtrValue(rv reflect.Value) { -// // We can only decode into a non-nil pointer -// if rv.Kind() == reflect.Ptr && !rv.IsNil() { -// return -// } -// d.errNotValidPtrValue(rv) -// } - -// func (d *Decoder) errNotValidPtrValue(rv reflect.Value) { -// if !rv.IsValid() { -// d.error(cannotDecodeIntoNilErr) -// return -// } -// if !rv.CanInterface() { -// d.errorf("cannot decode into a value without an interface: %v", rv) -// return -// } -// rvi := rv2i(rv) -// d.errorf("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", rv.Kind(), rvi, rvi) -// } - -func (d *Decoder) error(err error) { - panic(err) -} - -func (d *Decoder) errorf(format string, params ...interface{}) { - params2 := make([]interface{}, len(params)+1) - params2[0] = d.r.numread() - copy(params2[1:], params) - err := fmt.Errorf("[pos %d]: "+format, params2...) - panic(err) -} - -// Possibly get an interned version of a string -// -// This should mostly be used for map keys, where the key type is string. -// This is because keys of a map/struct are typically reused across many objects. -func (d *Decoder) string(v []byte) (s string) { - if d.is == nil { - return string(v) // don't return stringView, as we need a real string here. - } - s, ok := d.is[string(v)] // no allocation here, per go implementation - if !ok { - s = string(v) // new allocation here - d.is[s] = s - } - return s -} - -// nextValueBytes returns the next value in the stream as a set of bytes. -func (d *Decoder) nextValueBytes() (bs []byte) { - d.d.uncacheRead() - d.r.track() - d.swallow() - bs = d.r.stopTrack() - return -} - -func (d *Decoder) rawBytes() []byte { - // ensure that this is not a view into the bytes - // i.e. make new copy always. - bs := d.nextValueBytes() - bs2 := make([]byte, len(bs)) - copy(bs2, bs) - return bs2 -} - -// -------------------------------------------------- - -// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. -// A slice can be set from a map or array in stream. This supports the MapBySlice interface. -type decSliceHelper struct { - d *Decoder - // ct valueType - array bool -} - -func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { - dd := d.d - ctyp := dd.ContainerType() - if ctyp == valueTypeArray { - x.array = true - clen = dd.ReadArrayStart() - } else if ctyp == valueTypeMap { - clen = dd.ReadMapStart() * 2 - } else { - d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) - } - // x.ct = ctyp - x.d = d - return -} - -func (x decSliceHelper) End() { - if x.array { - x.d.d.ReadArrayEnd() - } else { - x.d.d.ReadMapEnd() - } -} - -func (x decSliceHelper) ElemContainerState(index int) { - if x.array { - x.d.d.ReadArrayElem() - } else { - if index%2 == 0 { - x.d.d.ReadMapElemKey() - } else { - x.d.d.ReadMapElemValue() - } - } -} - -func decByteSlice(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) { - if clen == 0 { - return zeroByteSlice - } - if len(bs) == clen { - bsOut = bs - r.readb(bsOut) - } else if cap(bs) >= clen { - bsOut = bs[:clen] - r.readb(bsOut) - } else { - // bsOut = make([]byte, clen) - len2 := decInferLen(clen, maxInitLen, 1) - bsOut = make([]byte, len2) - r.readb(bsOut) - for len2 < clen { - len3 := decInferLen(clen-len2, maxInitLen, 1) - bs3 := bsOut - bsOut = make([]byte, len2+len3) - copy(bsOut, bs3) - r.readb(bsOut[len2:]) - len2 += len3 - } - } - return -} - -func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { - if xlen := len(in); xlen > 0 { - if isBytesReader || xlen <= scratchByteArrayLen { - if cap(dest) >= xlen { - out = dest[:xlen] - } else { - out = make([]byte, xlen) - } - copy(out, in) - return - } - } - return in -} - -// decInferLen will infer a sensible length, given the following: -// - clen: length wanted. -// - maxlen: max length to be returned. -// if <= 0, it is unset, and we infer it based on the unit size -// - unit: number of bytes for each element of the collection -func decInferLen(clen, maxlen, unit int) (rvlen int) { - // handle when maxlen is not set i.e. <= 0 - if clen <= 0 { - return - } - if unit == 0 { - return clen - } - if maxlen <= 0 { - // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. - // maxlen = 256 * 1024 / unit - // if maxlen < (4 * 1024) { - // maxlen = 4 * 1024 - // } - if unit < (256 / 4) { - maxlen = 256 * 1024 / unit - } else { - maxlen = 4 * 1024 - } - } - if clen > maxlen { - rvlen = maxlen - } else { - rvlen = clen - } - return -} - -func decExpandSliceRV(s reflect.Value, st reflect.Type, stElemSize, num, slen, scap int) ( - s2 reflect.Value, scap2 int, changed bool) { - l1 := slen + num // new slice length - if l1 < slen { - panic("expandSlice: slice overflow") - } - if l1 <= scap { - if s.CanSet() { - s.SetLen(l1) - } else { - s2 = s.Slice(0, l1) - scap2 = scap - changed = true - } - return - } - scap2 = growCap(scap, stElemSize, num) - s2 = reflect.MakeSlice(st, l1, scap2) - changed = true - reflect.Copy(s2, s) - return -} - -func decReadFull(r io.Reader, bs []byte) (n int, err error) { - var nn int - for n < len(bs) && err == nil { - nn, err = r.Read(bs[n:]) - if nn > 0 { - if err == io.EOF { - // leave EOF for next time - err = nil - } - n += nn - } - } - - // do not do this - it serves no purpose - // if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF } - return -} diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go deleted file mode 100644 index 508d04fb196..00000000000 --- a/vendor/github.com/ugorji/go/codec/encode.go +++ /dev/null @@ -1,1384 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "bufio" - "encoding" - "fmt" - "io" - "reflect" - "sort" - "sync" -) - -const defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 - -// AsSymbolFlag defines what should be encoded as symbols. -type AsSymbolFlag uint8 - -const ( - // AsSymbolDefault is default. - // Currently, this means only encode struct field names as symbols. - // The default is subject to change. - AsSymbolDefault AsSymbolFlag = iota - - // AsSymbolAll means encode anything which could be a symbol as a symbol. - AsSymbolAll = 0xfe - - // AsSymbolNone means do not encode anything as a symbol. - AsSymbolNone = 1 << iota - - // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. - AsSymbolMapStringKeysFlag - - // AsSymbolStructFieldName means encode struct field names as symbols. - AsSymbolStructFieldNameFlag -) - -// encWriter abstracts writing to a byte array or to an io.Writer. -type encWriter interface { - writeb([]byte) - writestr(string) - writen1(byte) - writen2(byte, byte) - atEndOfEncode() -} - -// encDriver abstracts the actual codec (binc vs msgpack, etc) -type encDriver interface { - // IsBuiltinType(rt uintptr) bool - EncodeBuiltin(rt uintptr, v interface{}) - EncodeNil() - EncodeInt(i int64) - EncodeUint(i uint64) - EncodeBool(b bool) - EncodeFloat32(f float32) - EncodeFloat64(f float64) - // encodeExtPreamble(xtag byte, length int) - EncodeRawExt(re *RawExt, e *Encoder) - EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) - WriteArrayStart(length int) - WriteArrayElem() - WriteArrayEnd() - WriteMapStart(length int) - WriteMapElemKey() - WriteMapElemValue() - WriteMapEnd() - EncodeString(c charEncoding, v string) - EncodeSymbol(v string) - EncodeStringBytes(c charEncoding, v []byte) - - //TODO - //encBignum(f *big.Int) - //encStringRunes(c charEncoding, v []rune) - - reset() - atEndOfEncode() -} - -type ioEncStringWriter interface { - WriteString(s string) (n int, err error) -} - -type ioEncFlusher interface { - Flush() error -} - -type encDriverAsis interface { - EncodeAsis(v []byte) -} - -// type encNoSeparator struct{} -// func (_ encNoSeparator) EncodeEnd() {} - -type encDriverNoopContainerWriter struct{} - -func (_ encDriverNoopContainerWriter) WriteArrayStart(length int) {} -func (_ encDriverNoopContainerWriter) WriteArrayElem() {} -func (_ encDriverNoopContainerWriter) WriteArrayEnd() {} -func (_ encDriverNoopContainerWriter) WriteMapStart(length int) {} -func (_ encDriverNoopContainerWriter) WriteMapElemKey() {} -func (_ encDriverNoopContainerWriter) WriteMapElemValue() {} -func (_ encDriverNoopContainerWriter) WriteMapEnd() {} -func (_ encDriverNoopContainerWriter) atEndOfEncode() {} - -// type ioEncWriterWriter interface { -// WriteByte(c byte) error -// WriteString(s string) (n int, err error) -// Write(p []byte) (n int, err error) -// } - -type EncodeOptions struct { - // Encode a struct as an array, and not as a map - StructToArray bool - - // Canonical representation means that encoding a value will always result in the same - // sequence of bytes. - // - // This only affects maps, as the iteration order for maps is random. - // - // The implementation MAY use the natural sort order for the map keys if possible: - // - // - If there is a natural sort order (ie for number, bool, string or []byte keys), - // then the map keys are first sorted in natural order and then written - // with corresponding map values to the strema. - // - If there is no natural sort order, then the map keys will first be - // encoded into []byte, and then sorted, - // before writing the sorted keys and the corresponding map values to the stream. - // - Canonical bool - - // CheckCircularRef controls whether we check for circular references - // and error fast during an encode. - // - // If enabled, an error is received if a pointer to a struct - // references itself either directly or through one of its fields (iteratively). - // - // This is opt-in, as there may be a performance hit to checking circular references. - CheckCircularRef bool - - // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers - // when checking if a value is empty. - // - // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. - RecursiveEmptyCheck bool - - // Raw controls whether we encode Raw values. - // This is a "dangerous" option and must be explicitly set. - // If set, we blindly encode Raw values as-is, without checking - // if they are a correct representation of a value in that format. - // If unset, we error out. - Raw bool - - // AsSymbols defines what should be encoded as symbols. - // - // Encoding as symbols can reduce the encoded size significantly. - // - // However, during decoding, each string to be encoded as a symbol must - // be checked to see if it has been seen before. Consequently, encoding time - // will increase if using symbols, because string comparisons has a clear cost. - // - // Sample values: - // AsSymbolNone - // AsSymbolAll - // AsSymbolMapStringKeys - // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag - AsSymbols AsSymbolFlag - - // WriterBufferSize is the size of the buffer used when writing. - // - // if > 0, we use a smart buffer internally for performance purposes. - WriterBufferSize int -} - -// --------------------------------------------- - -type simpleIoEncWriter struct { - io.Writer -} - -// type bufIoEncWriter struct { -// w io.Writer -// buf []byte -// err error -// } - -// func (x *bufIoEncWriter) Write(b []byte) (n int, err error) { -// if x.err != nil { -// return 0, x.err -// } -// if cap(x.buf)-len(x.buf) >= len(b) { -// x.buf = append(x.buf, b) -// return len(b), nil -// } -// n, err = x.w.Write(x.buf) -// if err != nil { -// x.err = err -// return 0, x.err -// } -// n, err = x.w.Write(b) -// x.err = err -// return -// } - -// ioEncWriter implements encWriter and can write to an io.Writer implementation -type ioEncWriter struct { - w io.Writer - ww io.Writer - bw io.ByteWriter - sw ioEncStringWriter - fw ioEncFlusher - b [8]byte -} - -func (z *ioEncWriter) WriteByte(b byte) (err error) { - // x.bs[0] = b - // _, err = x.ww.Write(x.bs[:]) - z.b[0] = b - _, err = z.w.Write(z.b[:1]) - return -} - -func (z *ioEncWriter) WriteString(s string) (n int, err error) { - return z.w.Write(bytesView(s)) -} - -func (z *ioEncWriter) writeb(bs []byte) { - // if len(bs) == 0 { - // return - // } - if _, err := z.ww.Write(bs); err != nil { - panic(err) - } -} - -func (z *ioEncWriter) writestr(s string) { - // if len(s) == 0 { - // return - // } - if _, err := z.sw.WriteString(s); err != nil { - panic(err) - } -} - -func (z *ioEncWriter) writen1(b byte) { - if err := z.bw.WriteByte(b); err != nil { - panic(err) - } -} - -func (z *ioEncWriter) writen2(b1, b2 byte) { - var err error - if err = z.bw.WriteByte(b1); err == nil { - if err = z.bw.WriteByte(b2); err == nil { - return - } - } - panic(err) -} - -// func (z *ioEncWriter) writen5(b1, b2, b3, b4, b5 byte) { -// z.b[0], z.b[1], z.b[2], z.b[3], z.b[4] = b1, b2, b3, b4, b5 -// if _, err := z.ww.Write(z.b[:5]); err != nil { -// panic(err) -// } -// } - -func (z *ioEncWriter) atEndOfEncode() { - if z.fw != nil { - z.fw.Flush() - } -} - -// ---------------------------------------- - -// bytesEncWriter implements encWriter and can write to an byte slice. -// It is used by Marshal function. -type bytesEncWriter struct { - b []byte - c int // cursor - out *[]byte // write out on atEndOfEncode -} - -func (z *bytesEncWriter) writeb(s []byte) { - oc, a := z.growNoAlloc(len(s)) - if a { - z.growAlloc(len(s), oc) - } - copy(z.b[oc:], s) -} - -func (z *bytesEncWriter) writestr(s string) { - oc, a := z.growNoAlloc(len(s)) - if a { - z.growAlloc(len(s), oc) - } - copy(z.b[oc:], s) -} - -func (z *bytesEncWriter) writen1(b1 byte) { - oc, a := z.growNoAlloc(1) - if a { - z.growAlloc(1, oc) - } - z.b[oc] = b1 -} - -func (z *bytesEncWriter) writen2(b1, b2 byte) { - oc, a := z.growNoAlloc(2) - if a { - z.growAlloc(2, oc) - } - z.b[oc+1] = b2 - z.b[oc] = b1 -} - -func (z *bytesEncWriter) atEndOfEncode() { - *(z.out) = z.b[:z.c] -} - -// have a growNoalloc(n int), which can be inlined. -// if allocation is needed, then call growAlloc(n int) - -func (z *bytesEncWriter) growNoAlloc(n int) (oldcursor int, allocNeeded bool) { - oldcursor = z.c - z.c = z.c + n - if z.c > len(z.b) { - if z.c > cap(z.b) { - allocNeeded = true - } else { - z.b = z.b[:cap(z.b)] - } - } - return -} - -func (z *bytesEncWriter) growAlloc(n int, oldcursor int) { - // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls. - // bytes.Buffer model (2*cap + n): much better - // bs := make([]byte, 2*cap(z.b)+n) - bs := make([]byte, growCap(cap(z.b), 1, n)) - copy(bs, z.b[:oldcursor]) - z.b = bs -} - -// --------------------------------------------- - -func (e *Encoder) builtin(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeBuiltin(f.ti.rtid, rv2i(rv)) -} - -func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) { - e.rawBytes(rv2i(rv).(Raw)) -} - -func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) { - // rev := rv2i(rv).(RawExt) - // e.e.EncodeRawExt(&rev, e) - var re *RawExt - if rv.CanAddr() { - re = rv2i(rv.Addr()).(*RawExt) - } else { - rev := rv2i(rv).(RawExt) - re = &rev - } - e.e.EncodeRawExt(re, e) -} - -func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) { - // if this is a struct|array and it was addressable, then pass the address directly (not the value) - if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() { - rv = rv.Addr() - } - e.e.EncodeExt(rv2i(rv), f.xfTag, f.xfFn, e) -} - -func (e *Encoder) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) { - if indir == 0 { - v = rv2i(rv) - } else if indir == -1 { - // If a non-pointer was passed to Encode(), then that value is not addressable. - // Take addr if addressable, else copy value to an addressable value. - if rv.CanAddr() { - v = rv2i(rv.Addr()) - } else { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2i(rv2) - } - } else { - for j := int8(0); j < indir; j++ { - if rv.IsNil() { - e.e.EncodeNil() - return - } - rv = rv.Elem() - } - v = rv2i(rv) - } - return v, true -} - -func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) { - if v, proceed := e.getValueForMarshalInterface(rv, f.ti.csIndir); proceed { - v.(Selfer).CodecEncodeSelf(e) - } -} - -func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) { - if v, proceed := e.getValueForMarshalInterface(rv, f.ti.bmIndir); proceed { - bs, fnerr := v.(encoding.BinaryMarshaler).MarshalBinary() - e.marshal(bs, fnerr, false, c_RAW) - } -} - -func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) { - if v, proceed := e.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed { - bs, fnerr := v.(encoding.TextMarshaler).MarshalText() - e.marshal(bs, fnerr, false, c_UTF8) - } -} - -func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) { - if v, proceed := e.getValueForMarshalInterface(rv, f.ti.jmIndir); proceed { - bs, fnerr := v.(jsonMarshaler).MarshalJSON() - e.marshal(bs, fnerr, true, c_UTF8) - } -} - -func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeBool(rv.Bool()) -} - -func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeString(c_UTF8, rv.String()) -} - -func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeFloat64(rv.Float()) -} - -func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeFloat32(float32(rv.Float())) -} - -func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeInt(rv.Int()) -} - -func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeUint(rv.Uint()) -} - -func (e *Encoder) kInvalid(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeNil() -} - -func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) { - e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) -} - -func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) { - ti := f.ti - ee := e.e - // array may be non-addressable, so we have to manage with care - // (don't call rv.Bytes, rv.Slice, etc). - // E.g. type struct S{B [2]byte}; - // Encode(S{}) will bomb on "panic: slice of unaddressable array". - if f.seq != seqTypeArray { - if rv.IsNil() { - ee.EncodeNil() - return - } - // If in this method, then there was no extension function defined. - // So it's okay to treat as []byte. - if ti.rtid == uint8SliceTypId { - ee.EncodeStringBytes(c_RAW, rv.Bytes()) - return - } - } - elemsep := e.hh.hasElemSeparators() - rtelem := ti.rt.Elem() - l := rv.Len() - if ti.rtid == uint8SliceTypId || rtelem.Kind() == reflect.Uint8 { - switch f.seq { - case seqTypeArray: - if rv.CanAddr() { - ee.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes()) - } else { - var bs []byte - if l <= cap(e.b) { - bs = e.b[:l] - } else { - bs = make([]byte, l) - } - reflect.Copy(reflect.ValueOf(bs), rv) - ee.EncodeStringBytes(c_RAW, bs) - } - return - case seqTypeSlice: - ee.EncodeStringBytes(c_RAW, rv.Bytes()) - return - } - } - if ti.rtid == uint8SliceTypId && f.seq == seqTypeChan { - bs := e.b[:0] - // do not use range, so that the number of elements encoded - // does not change, and encoding does not hang waiting on someone to close chan. - // for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) } - ch := rv2i(rv).(<-chan byte) - for i := 0; i < l; i++ { - bs = append(bs, <-ch) - } - ee.EncodeStringBytes(c_RAW, bs) - return - } - - if ti.mbs { - if l%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", l) - return - } - ee.WriteMapStart(l / 2) - } else { - ee.WriteArrayStart(l) - } - - if l > 0 { - var fn *codecFn - var recognizedVtyp = useLookupRecognizedTypes && isRecognizedRtidOrPtr(rt2id(rtelem)) - if !recognizedVtyp { - for rtelem.Kind() == reflect.Ptr { - rtelem = rtelem.Elem() - } - // if kind is reflect.Interface, do not pre-determine the - // encoding type, because preEncodeValue may break it down to - // a concrete type and kInterface will bomb. - if rtelem.Kind() != reflect.Interface { - fn = e.cf.get(rtelem, true, true) - } - } - // TODO: Consider perf implication of encoding odd index values as symbols if type is string - for j := 0; j < l; j++ { - if elemsep { - if ti.mbs { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } else { - ee.WriteArrayElem() - } - } - if f.seq == seqTypeChan { - if rv2, ok2 := rv.Recv(); ok2 { - if useLookupRecognizedTypes && recognizedVtyp { - e.encode(rv2i(rv2)) - } else { - e.encodeValue(rv2, fn, true) - } - } else { - ee.EncodeNil() // WE HAVE TO DO SOMETHING, so nil if nothing received. - } - } else { - if useLookupRecognizedTypes && recognizedVtyp { - e.encode(rv2i(rv.Index(j))) - } else { - e.encodeValue(rv.Index(j), fn, true) - } - } - } - } - - if ti.mbs { - ee.WriteMapEnd() - } else { - ee.WriteArrayEnd() - } -} - -func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) { - fti := f.ti - elemsep := e.hh.hasElemSeparators() - tisfi := fti.sfip - toMap := !(fti.toArray || e.h.StructToArray) - if toMap { - tisfi = fti.sfi - } - ee := e.e - - sfn := structFieldNode{v: rv, update: false} - if toMap { - ee.WriteMapStart(len(tisfi)) - // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - if !elemsep { - for _, si := range tisfi { - if asSymbols { - ee.EncodeSymbol(si.encName) - } else { - ee.EncodeString(c_UTF8, si.encName) - } - e.encodeValue(sfn.field(si), nil, true) - } - } else { - for _, si := range tisfi { - ee.WriteMapElemKey() - if asSymbols { - ee.EncodeSymbol(si.encName) - } else { - ee.EncodeString(c_UTF8, si.encName) - } - ee.WriteMapElemValue() - e.encodeValue(sfn.field(si), nil, true) - } - } - ee.WriteMapEnd() - } else { - ee.WriteArrayStart(len(tisfi)) - if !elemsep { - for _, si := range tisfi { - e.encodeValue(sfn.field(si), nil, true) - } - } else { - for _, si := range tisfi { - ee.WriteArrayElem() - e.encodeValue(sfn.field(si), nil, true) - } - } - ee.WriteArrayEnd() - } -} - -func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) { - fti := f.ti - elemsep := e.hh.hasElemSeparators() - tisfi := fti.sfip - toMap := !(fti.toArray || e.h.StructToArray) - // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) - if toMap { - tisfi = fti.sfi - } - newlen := len(fti.sfi) - ee := e.e - - // Use sync.Pool to reduce allocating slices unnecessarily. - // The cost of sync.Pool is less than the cost of new allocation. - // - // Each element of the array pools one of encStructPool(8|16|32|64). - // It allows the re-use of slices up to 64 in length. - // A performance cost of encoding structs was collecting - // which values were empty and should be omitted. - // We needed slices of reflect.Value and string to collect them. - // This shared pool reduces the amount of unnecessary creation we do. - // The cost is that of locking sometimes, but sync.Pool is efficient - // enough to reduce thread contention. - - var spool *sync.Pool - var poolv interface{} - var fkvs []stringRv - if newlen <= 8 { - spool, poolv = pool.stringRv8() - fkvs = poolv.(*[8]stringRv)[:newlen] - } else if newlen <= 16 { - spool, poolv = pool.stringRv16() - fkvs = poolv.(*[16]stringRv)[:newlen] - } else if newlen <= 32 { - spool, poolv = pool.stringRv32() - fkvs = poolv.(*[32]stringRv)[:newlen] - } else if newlen <= 64 { - spool, poolv = pool.stringRv64() - fkvs = poolv.(*[64]stringRv)[:newlen] - } else if newlen <= 128 { - spool, poolv = pool.stringRv128() - fkvs = poolv.(*[128]stringRv)[:newlen] - } else { - fkvs = make([]stringRv, newlen) - } - - newlen = 0 - var kv stringRv - recur := e.h.RecursiveEmptyCheck - sfn := structFieldNode{v: rv, update: false} - for _, si := range tisfi { - // kv.r = si.field(rv, false) - kv.r = sfn.field(si) - if toMap { - if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { - continue - } - kv.v = si.encName - } else { - // use the zero value. - // if a reference or struct, set to nil (so you do not output too much) - if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { - switch kv.r.Kind() { - case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice: - kv.r = reflect.Value{} //encode as nil - } - } - } - fkvs[newlen] = kv - newlen++ - } - - if toMap { - ee.WriteMapStart(newlen) - // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - if !elemsep { - for j := 0; j < newlen; j++ { - kv = fkvs[j] - if asSymbols { - ee.EncodeSymbol(kv.v) - } else { - ee.EncodeString(c_UTF8, kv.v) - } - e.encodeValue(kv.r, nil, true) - } - } else { - for j := 0; j < newlen; j++ { - kv = fkvs[j] - ee.WriteMapElemKey() - if asSymbols { - ee.EncodeSymbol(kv.v) - } else { - ee.EncodeString(c_UTF8, kv.v) - } - ee.WriteMapElemValue() - e.encodeValue(kv.r, nil, true) - } - } - ee.WriteMapEnd() - } else { - ee.WriteArrayStart(newlen) - if !elemsep { - for j := 0; j < newlen; j++ { - e.encodeValue(fkvs[j].r, nil, true) - } - } else { - for j := 0; j < newlen; j++ { - ee.WriteArrayElem() - e.encodeValue(fkvs[j].r, nil, true) - } - } - ee.WriteArrayEnd() - } - - // do not use defer. Instead, use explicit pool return at end of function. - // defer has a cost we are trying to avoid. - // If there is a panic and these slices are not returned, it is ok. - if spool != nil { - spool.Put(poolv) - } -} - -func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { - ee := e.e - if rv.IsNil() { - ee.EncodeNil() - return - } - - l := rv.Len() - ee.WriteMapStart(l) - elemsep := e.hh.hasElemSeparators() - if l == 0 { - ee.WriteMapEnd() - return - } - var asSymbols bool - // determine the underlying key and val encFn's for the map. - // This eliminates some work which is done for each loop iteration i.e. - // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. - // - // However, if kind is reflect.Interface, do not pre-determine the - // encoding type, because preEncodeValue may break it down to - // a concrete type and kInterface will bomb. - var keyFn, valFn *codecFn - ti := f.ti - rtkey0 := ti.rt.Key() - rtkey := rtkey0 - rtval0 := ti.rt.Elem() - rtval := rtval0 - rtkeyid := rt2id(rtkey0) - rtvalid := rt2id(rtval0) - for rtval.Kind() == reflect.Ptr { - rtval = rtval.Elem() - } - if rtval.Kind() != reflect.Interface { - valFn = e.cf.get(rtval, true, true) - } - mks := rv.MapKeys() - - if e.h.Canonical { - e.kMapCanonical(rtkey, rv, mks, valFn, asSymbols) - ee.WriteMapEnd() - return - } - - var recognizedKtyp, recognizedVtyp bool - var keyTypeIsString = rtkeyid == stringTypId - if keyTypeIsString { - asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - } else { - if useLookupRecognizedTypes { - if recognizedKtyp = isRecognizedRtidOrPtr(rtkeyid); recognizedKtyp { - goto LABEL1 - } - } - for rtkey.Kind() == reflect.Ptr { - rtkey = rtkey.Elem() - } - if rtkey.Kind() != reflect.Interface { - rtkeyid = rt2id(rtkey) - keyFn = e.cf.get(rtkey, true, true) - } - } - - // for j, lmks := 0, len(mks); j < lmks; j++ { -LABEL1: - recognizedVtyp = useLookupRecognizedTypes && isRecognizedRtidOrPtr(rtvalid) - for j := range mks { - if elemsep { - ee.WriteMapElemKey() - } - if keyTypeIsString { - if asSymbols { - ee.EncodeSymbol(mks[j].String()) - } else { - ee.EncodeString(c_UTF8, mks[j].String()) - } - } else if useLookupRecognizedTypes && recognizedKtyp { - e.encode(rv2i(mks[j])) - } else { - e.encodeValue(mks[j], keyFn, true) - } - if elemsep { - ee.WriteMapElemValue() - } - if useLookupRecognizedTypes && recognizedVtyp { - e.encode(rv2i(rv.MapIndex(mks[j]))) - } else { - e.encodeValue(rv.MapIndex(mks[j]), valFn, true) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) kMapCanonical(rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *codecFn, asSymbols bool) { - ee := e.e - elemsep := e.hh.hasElemSeparators() - // we previously did out-of-band if an extension was registered. - // This is not necessary, as the natural kind is sufficient for ordering. - - // WHAT IS THIS? rtkeyid can never be a []uint8, per spec - // if rtkeyid == uint8SliceTypId { - // mksv := make([]bytesRv, len(mks)) - // for i, k := range mks { - // v := &mksv[i] - // v.r = k - // v.v = k.Bytes() - // } - // sort.Sort(bytesRvSlice(mksv)) - // for i := range mksv { - // if elemsep { - // ee.WriteMapElemKey() - // } - // ee.EncodeStringBytes(c_RAW, mksv[i].v) - // if elemsep { - // ee.WriteMapElemValue() - // } - // e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) - // } - // return - // } - - switch rtkey.Kind() { - case reflect.Bool: - mksv := make([]boolRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Bool() - } - sort.Sort(boolRvSlice(mksv)) - for i := range mksv { - if elemsep { - ee.WriteMapElemKey() - } - ee.EncodeBool(mksv[i].v) - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) - } - case reflect.String: - mksv := make([]stringRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.String() - } - sort.Sort(stringRvSlice(mksv)) - for i := range mksv { - if elemsep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(mksv[i].v) - } else { - ee.EncodeString(c_UTF8, mksv[i].v) - } - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) - } - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: - mksv := make([]uintRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Uint() - } - sort.Sort(uintRvSlice(mksv)) - for i := range mksv { - if elemsep { - ee.WriteMapElemKey() - } - ee.EncodeUint(mksv[i].v) - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) - } - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - mksv := make([]intRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Int() - } - sort.Sort(intRvSlice(mksv)) - for i := range mksv { - if elemsep { - ee.WriteMapElemKey() - } - ee.EncodeInt(mksv[i].v) - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) - } - case reflect.Float32: - mksv := make([]floatRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Float() - } - sort.Sort(floatRvSlice(mksv)) - for i := range mksv { - if elemsep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(mksv[i].v)) - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) - } - case reflect.Float64: - mksv := make([]floatRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Float() - } - sort.Sort(floatRvSlice(mksv)) - for i := range mksv { - if elemsep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(mksv[i].v) - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) - } - default: - // out-of-band - // first encode each key to a []byte first, then sort them, then record - var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - mksbv := make([]bytesRv, len(mks)) - for i, k := range mks { - v := &mksbv[i] - l := len(mksv) - e2.MustEncode(k) - v.r = k - v.v = mksv[l:] - } - sort.Sort(bytesRvSlice(mksbv)) - for j := range mksbv { - if elemsep { - ee.WriteMapElemKey() - } - e.asis(mksbv[j].v) - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksbv[j].r), valFn, true) - } - } -} - -// // -------------------------------------------------- - -// An Encoder writes an object to an output stream in the codec format. -type Encoder struct { - // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder - e encDriver - // NOTE: Encoder shouldn't call it's write methods, - // as the handler MAY need to do some coordination. - w encWriter - - hh Handle - h *BasicHandle - - // ---- cpu cache line boundary? - - wi ioEncWriter - wb bytesEncWriter - bw bufio.Writer - - // cr containerStateRecv - as encDriverAsis - // ---- cpu cache line boundary? - - ci set - err error - - b [scratchByteArrayLen]byte - cf codecFner -} - -// NewEncoder returns an Encoder for encoding into an io.Writer. -// -// For efficiency, Users are encouraged to pass in a memory buffered writer -// (eg bufio.Writer, bytes.Buffer). -func NewEncoder(w io.Writer, h Handle) *Encoder { - e := newEncoder(h) - e.Reset(w) - return e -} - -// NewEncoderBytes returns an encoder for encoding directly and efficiently -// into a byte slice, using zero-copying to temporary slices. -// -// It will potentially replace the output byte slice pointed to. -// After encoding, the out parameter contains the encoded contents. -func NewEncoderBytes(out *[]byte, h Handle) *Encoder { - e := newEncoder(h) - e.ResetBytes(out) - return e -} - -func newEncoder(h Handle) *Encoder { - e := &Encoder{hh: h, h: h.getBasicHandle()} - e.e = h.newEncDriver(e) - e.as, _ = e.e.(encDriverAsis) - // e.cr, _ = e.e.(containerStateRecv) - return e -} - -// Reset the Encoder with a new output stream. -// -// This accommodates using the state of the Encoder, -// where it has "cached" information about sub-engines. -func (e *Encoder) Reset(w io.Writer) { - var ok bool - e.wi.w = w - if e.h.WriterBufferSize > 0 { - bw := bufio.NewWriterSize(w, e.h.WriterBufferSize) - e.bw = *bw - e.wi.bw = &e.bw - e.wi.sw = &e.bw - e.wi.fw = &e.bw - e.wi.ww = &e.bw - } else { - if e.wi.bw, ok = w.(io.ByteWriter); !ok { - e.wi.bw = &e.wi - } - if e.wi.sw, ok = w.(ioEncStringWriter); !ok { - e.wi.sw = &e.wi - } - e.wi.fw, _ = w.(ioEncFlusher) - e.wi.ww = w - } - e.w = &e.wi - e.e.reset() - e.cf.reset(e.hh) - e.err = nil -} - -func (e *Encoder) ResetBytes(out *[]byte) { - in := *out - if in == nil { - in = make([]byte, defEncByteBufSize) - } - e.wb.b, e.wb.out, e.wb.c = in, out, 0 - e.w = &e.wb - e.e.reset() - e.cf.reset(e.hh) - e.err = nil -} - -// Encode writes an object into a stream. -// -// Encoding can be configured via the struct tag for the fields. -// The "codec" key in struct field's tag value is the key name, -// followed by an optional comma and options. -// Note that the "json" key is used in the absence of the "codec" key. -// -// To set an option on all fields (e.g. omitempty on all fields), you -// can create a field called _struct, and set flags on it. -// -// Struct values "usually" encode as maps. Each exported struct field is encoded unless: -// - the field's tag is "-", OR -// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. -// -// When encoding as a map, the first string in the tag (before the comma) -// is the map key string to use when encoding. -// -// However, struct values may encode as arrays. This happens when: -// - StructToArray Encode option is set, OR -// - the tag on the _struct field sets the "toarray" option -// Note that omitempty is ignored when encoding struct values as arrays, -// as an entry must be encoded for each field, to maintain its position. -// -// Values with types that implement MapBySlice are encoded as stream maps. -// -// The empty values (for omitempty option) are false, 0, any nil pointer -// or interface value, and any array, slice, map, or string of length zero. -// -// Anonymous fields are encoded inline except: -// - the struct tag specifies a replacement name (first value) -// - the field is of an interface type -// -// Examples: -// -// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. -// type MyStruct struct { -// _struct bool `codec:",omitempty"` //set omitempty for every field -// Field1 string `codec:"-"` //skip this field -// Field2 int `codec:"myName"` //Use key "myName" in encode stream -// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. -// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. -// io.Reader //use key "Reader". -// MyStruct `codec:"my1" //use key "my1". -// MyStruct //inline it -// ... -// } -// -// type MyStruct struct { -// _struct bool `codec:",toarray"` //encode struct as an array -// } -// -// The mode of encoding is based on the type of the value. When a value is seen: -// - If a Selfer, call its CodecEncodeSelf method -// - If an extension is registered for it, call that extension function -// - If it implements encoding.(Binary|Text|JSON)Marshaler, call its Marshal(Binary|Text|JSON) method -// - Else encode it based on its reflect.Kind -// -// Note that struct field names and keys in map[string]XXX will be treated as symbols. -// Some formats support symbols (e.g. binc) and will properly encode the string -// only once in the stream, and use a tag to refer to it thereafter. -func (e *Encoder) Encode(v interface{}) (err error) { - defer panicToErrs2(&e.err, &err) - e.MustEncode(v) - return -} - -// MustEncode is like Encode, but panics if unable to Encode. -// This provides insight to the code location that triggered the error. -func (e *Encoder) MustEncode(v interface{}) { - if e.err != nil { - panic(e.err) - } - e.encode(v) - e.e.atEndOfEncode() - e.w.atEndOfEncode() -} - -func (e *Encoder) encode(iv interface{}) { - if iv == nil || definitelyNil(iv) { - e.e.EncodeNil() - return - } - if v, ok := iv.(Selfer); ok { - v.CodecEncodeSelf(e) - return - } - - switch v := iv.(type) { - // case nil: - // e.e.EncodeNil() - // case Selfer: - // v.CodecEncodeSelf(e) - case Raw: - e.rawBytes(v) - case reflect.Value: - e.encodeValue(v, nil, true) - - case string: - e.e.EncodeString(c_UTF8, v) - case bool: - e.e.EncodeBool(v) - case int: - e.e.EncodeInt(int64(v)) - case int8: - e.e.EncodeInt(int64(v)) - case int16: - e.e.EncodeInt(int64(v)) - case int32: - e.e.EncodeInt(int64(v)) - case int64: - e.e.EncodeInt(v) - case uint: - e.e.EncodeUint(uint64(v)) - case uint8: - e.e.EncodeUint(uint64(v)) - case uint16: - e.e.EncodeUint(uint64(v)) - case uint32: - e.e.EncodeUint(uint64(v)) - case uint64: - e.e.EncodeUint(v) - case uintptr: - e.e.EncodeUint(uint64(v)) - case float32: - e.e.EncodeFloat32(v) - case float64: - e.e.EncodeFloat64(v) - - case []uint8: - e.e.EncodeStringBytes(c_RAW, v) - - case *string: - e.e.EncodeString(c_UTF8, *v) - case *bool: - e.e.EncodeBool(*v) - case *int: - e.e.EncodeInt(int64(*v)) - case *int8: - e.e.EncodeInt(int64(*v)) - case *int16: - e.e.EncodeInt(int64(*v)) - case *int32: - e.e.EncodeInt(int64(*v)) - case *int64: - e.e.EncodeInt(*v) - case *uint: - e.e.EncodeUint(uint64(*v)) - case *uint8: - e.e.EncodeUint(uint64(*v)) - case *uint16: - e.e.EncodeUint(uint64(*v)) - case *uint32: - e.e.EncodeUint(uint64(*v)) - case *uint64: - e.e.EncodeUint(*v) - case *uintptr: - e.e.EncodeUint(uint64(*v)) - case *float32: - e.e.EncodeFloat32(*v) - case *float64: - e.e.EncodeFloat64(*v) - - case *[]uint8: - e.e.EncodeStringBytes(c_RAW, *v) - - default: - if !fastpathEncodeTypeSwitch(iv, e) { - // checkfastpath=true (not false), as underlying slice/map type may be fast-path - e.encodeValue(reflect.ValueOf(iv), nil, true) - } - } -} - -func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn, checkFastpath bool) { - // if a valid fn is passed, it MUST BE for the dereferenced type of rv - var sptr uintptr -TOP: - switch rv.Kind() { - case reflect.Ptr: - if rv.IsNil() { - e.e.EncodeNil() - return - } - rv = rv.Elem() - if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { - // TODO: Movable pointers will be an issue here. Future problem. - sptr = rv.UnsafeAddr() - break TOP - } - goto TOP - case reflect.Interface: - if rv.IsNil() { - e.e.EncodeNil() - return - } - rv = rv.Elem() - goto TOP - case reflect.Slice, reflect.Map: - if rv.IsNil() { - e.e.EncodeNil() - return - } - case reflect.Invalid, reflect.Func: - e.e.EncodeNil() - return - } - - if sptr != 0 && (&e.ci).add(sptr) { - e.errorf("circular reference found: # %d", sptr) - } - - if fn == nil { - rt := rv.Type() - // TODO: calling isRecognizedRtid here is a major slowdown - if false && useLookupRecognizedTypes && isRecognizedRtidOrPtr(rt2id(rt)) { - e.encode(rv2i(rv)) - return - } - // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer - fn = e.cf.get(rt, checkFastpath, true) - } - fn.fe(e, &fn.i, rv) - if sptr != 0 { - (&e.ci).remove(sptr) - } -} - -func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { - if fnerr != nil { - panic(fnerr) - } - if bs == nil { - e.e.EncodeNil() - } else if asis { - e.asis(bs) - } else { - e.e.EncodeStringBytes(c, bs) - } -} - -func (e *Encoder) asis(v []byte) { - if e.as == nil { - e.w.writeb(v) - } else { - e.as.EncodeAsis(v) - } -} - -func (e *Encoder) rawBytes(vv Raw) { - v := []byte(vv) - if !e.h.Raw { - e.errorf("Raw values cannot be encoded: %v", v) - } - if e.as == nil { - e.w.writeb(v) - } else { - e.as.EncodeAsis(v) - } -} - -func (e *Encoder) errorf(format string, params ...interface{}) { - err := fmt.Errorf(format, params...) - panic(err) -} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go deleted file mode 100644 index 07521543550..00000000000 --- a/vendor/github.com/ugorji/go/codec/fast-path.generated.go +++ /dev/null @@ -1,32728 +0,0 @@ -// +build !notfastpath - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl -// ************************************************************ - -package codec - -// Fast path functions try to create a fast path encode or decode implementation -// for common maps and slices. -// -// We define the functions and register then in this single file -// so as not to pollute the encode.go and decode.go, and create a dependency in there. -// This file can be omitted without causing a build failure. -// -// The advantage of fast paths is: -// - Many calls bypass reflection altogether -// -// Currently support -// - slice of all builtin types, -// - map of all builtin types to string or interface value -// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) -// This should provide adequate "typical" implementations. -// -// Note that fast track decode functions must handle values for which an address cannot be obtained. -// For example: -// m2 := map[string]int{} -// p2 := []interface{}{m2} -// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. -// - -import ( - "reflect" - "sort" -) - -const fastpathEnabled = true - -type fastpathT struct{} - -var fastpathTV fastpathT - -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*Encoder, *codecFnInfo, reflect.Value) - decfn func(*Decoder, *codecFnInfo, reflect.Value) -} - -type fastpathA [271]fastpathE - -func (x *fastpathA) index(rtid uintptr) int { - // use binary search to grab the index (adapted from sort/search.go) - h, i, j := 0, 0, 271 // len(x) - for i < j { - h = i + (j-i)/2 - if x[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - } - if i < 271 && x[i].rtid == rtid { - return i - } - return -1 -} - -type fastpathAslice []fastpathE - -func (x fastpathAslice) Len() int { return len(x) } -func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } -func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -var fastpathAV fastpathA - -// due to possible initialization loop error, make fastpath in an init() -func init() { - i := 0 - fn := func(v interface{}, - fe func(*Encoder, *codecFnInfo, reflect.Value), - fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) { - xrt := reflect.TypeOf(v) - xptr := rt2id(xrt) - if useLookupRecognizedTypes { - recognizedRtids = append(recognizedRtids, xptr) - recognizedRtidPtrs = append(recognizedRtidPtrs, rt2id(reflect.PtrTo(xrt))) - } - fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} - i++ - return - } - - fn([]interface{}(nil), (*Encoder).fastpathEncSliceIntfR, (*Decoder).fastpathDecSliceIntfR) - fn([]string(nil), (*Encoder).fastpathEncSliceStringR, (*Decoder).fastpathDecSliceStringR) - fn([]float32(nil), (*Encoder).fastpathEncSliceFloat32R, (*Decoder).fastpathDecSliceFloat32R) - fn([]float64(nil), (*Encoder).fastpathEncSliceFloat64R, (*Decoder).fastpathDecSliceFloat64R) - fn([]uint(nil), (*Encoder).fastpathEncSliceUintR, (*Decoder).fastpathDecSliceUintR) - fn([]uint16(nil), (*Encoder).fastpathEncSliceUint16R, (*Decoder).fastpathDecSliceUint16R) - fn([]uint32(nil), (*Encoder).fastpathEncSliceUint32R, (*Decoder).fastpathDecSliceUint32R) - fn([]uint64(nil), (*Encoder).fastpathEncSliceUint64R, (*Decoder).fastpathDecSliceUint64R) - fn([]uintptr(nil), (*Encoder).fastpathEncSliceUintptrR, (*Decoder).fastpathDecSliceUintptrR) - fn([]int(nil), (*Encoder).fastpathEncSliceIntR, (*Decoder).fastpathDecSliceIntR) - fn([]int8(nil), (*Encoder).fastpathEncSliceInt8R, (*Decoder).fastpathDecSliceInt8R) - fn([]int16(nil), (*Encoder).fastpathEncSliceInt16R, (*Decoder).fastpathDecSliceInt16R) - fn([]int32(nil), (*Encoder).fastpathEncSliceInt32R, (*Decoder).fastpathDecSliceInt32R) - fn([]int64(nil), (*Encoder).fastpathEncSliceInt64R, (*Decoder).fastpathDecSliceInt64R) - fn([]bool(nil), (*Encoder).fastpathEncSliceBoolR, (*Decoder).fastpathDecSliceBoolR) - - fn(map[interface{}]interface{}(nil), (*Encoder).fastpathEncMapIntfIntfR, (*Decoder).fastpathDecMapIntfIntfR) - fn(map[interface{}]string(nil), (*Encoder).fastpathEncMapIntfStringR, (*Decoder).fastpathDecMapIntfStringR) - fn(map[interface{}]uint(nil), (*Encoder).fastpathEncMapIntfUintR, (*Decoder).fastpathDecMapIntfUintR) - fn(map[interface{}]uint8(nil), (*Encoder).fastpathEncMapIntfUint8R, (*Decoder).fastpathDecMapIntfUint8R) - fn(map[interface{}]uint16(nil), (*Encoder).fastpathEncMapIntfUint16R, (*Decoder).fastpathDecMapIntfUint16R) - fn(map[interface{}]uint32(nil), (*Encoder).fastpathEncMapIntfUint32R, (*Decoder).fastpathDecMapIntfUint32R) - fn(map[interface{}]uint64(nil), (*Encoder).fastpathEncMapIntfUint64R, (*Decoder).fastpathDecMapIntfUint64R) - fn(map[interface{}]uintptr(nil), (*Encoder).fastpathEncMapIntfUintptrR, (*Decoder).fastpathDecMapIntfUintptrR) - fn(map[interface{}]int(nil), (*Encoder).fastpathEncMapIntfIntR, (*Decoder).fastpathDecMapIntfIntR) - fn(map[interface{}]int8(nil), (*Encoder).fastpathEncMapIntfInt8R, (*Decoder).fastpathDecMapIntfInt8R) - fn(map[interface{}]int16(nil), (*Encoder).fastpathEncMapIntfInt16R, (*Decoder).fastpathDecMapIntfInt16R) - fn(map[interface{}]int32(nil), (*Encoder).fastpathEncMapIntfInt32R, (*Decoder).fastpathDecMapIntfInt32R) - fn(map[interface{}]int64(nil), (*Encoder).fastpathEncMapIntfInt64R, (*Decoder).fastpathDecMapIntfInt64R) - fn(map[interface{}]float32(nil), (*Encoder).fastpathEncMapIntfFloat32R, (*Decoder).fastpathDecMapIntfFloat32R) - fn(map[interface{}]float64(nil), (*Encoder).fastpathEncMapIntfFloat64R, (*Decoder).fastpathDecMapIntfFloat64R) - fn(map[interface{}]bool(nil), (*Encoder).fastpathEncMapIntfBoolR, (*Decoder).fastpathDecMapIntfBoolR) - fn(map[string]interface{}(nil), (*Encoder).fastpathEncMapStringIntfR, (*Decoder).fastpathDecMapStringIntfR) - fn(map[string]string(nil), (*Encoder).fastpathEncMapStringStringR, (*Decoder).fastpathDecMapStringStringR) - fn(map[string]uint(nil), (*Encoder).fastpathEncMapStringUintR, (*Decoder).fastpathDecMapStringUintR) - fn(map[string]uint8(nil), (*Encoder).fastpathEncMapStringUint8R, (*Decoder).fastpathDecMapStringUint8R) - fn(map[string]uint16(nil), (*Encoder).fastpathEncMapStringUint16R, (*Decoder).fastpathDecMapStringUint16R) - fn(map[string]uint32(nil), (*Encoder).fastpathEncMapStringUint32R, (*Decoder).fastpathDecMapStringUint32R) - fn(map[string]uint64(nil), (*Encoder).fastpathEncMapStringUint64R, (*Decoder).fastpathDecMapStringUint64R) - fn(map[string]uintptr(nil), (*Encoder).fastpathEncMapStringUintptrR, (*Decoder).fastpathDecMapStringUintptrR) - fn(map[string]int(nil), (*Encoder).fastpathEncMapStringIntR, (*Decoder).fastpathDecMapStringIntR) - fn(map[string]int8(nil), (*Encoder).fastpathEncMapStringInt8R, (*Decoder).fastpathDecMapStringInt8R) - fn(map[string]int16(nil), (*Encoder).fastpathEncMapStringInt16R, (*Decoder).fastpathDecMapStringInt16R) - fn(map[string]int32(nil), (*Encoder).fastpathEncMapStringInt32R, (*Decoder).fastpathDecMapStringInt32R) - fn(map[string]int64(nil), (*Encoder).fastpathEncMapStringInt64R, (*Decoder).fastpathDecMapStringInt64R) - fn(map[string]float32(nil), (*Encoder).fastpathEncMapStringFloat32R, (*Decoder).fastpathDecMapStringFloat32R) - fn(map[string]float64(nil), (*Encoder).fastpathEncMapStringFloat64R, (*Decoder).fastpathDecMapStringFloat64R) - fn(map[string]bool(nil), (*Encoder).fastpathEncMapStringBoolR, (*Decoder).fastpathDecMapStringBoolR) - fn(map[float32]interface{}(nil), (*Encoder).fastpathEncMapFloat32IntfR, (*Decoder).fastpathDecMapFloat32IntfR) - fn(map[float32]string(nil), (*Encoder).fastpathEncMapFloat32StringR, (*Decoder).fastpathDecMapFloat32StringR) - fn(map[float32]uint(nil), (*Encoder).fastpathEncMapFloat32UintR, (*Decoder).fastpathDecMapFloat32UintR) - fn(map[float32]uint8(nil), (*Encoder).fastpathEncMapFloat32Uint8R, (*Decoder).fastpathDecMapFloat32Uint8R) - fn(map[float32]uint16(nil), (*Encoder).fastpathEncMapFloat32Uint16R, (*Decoder).fastpathDecMapFloat32Uint16R) - fn(map[float32]uint32(nil), (*Encoder).fastpathEncMapFloat32Uint32R, (*Decoder).fastpathDecMapFloat32Uint32R) - fn(map[float32]uint64(nil), (*Encoder).fastpathEncMapFloat32Uint64R, (*Decoder).fastpathDecMapFloat32Uint64R) - fn(map[float32]uintptr(nil), (*Encoder).fastpathEncMapFloat32UintptrR, (*Decoder).fastpathDecMapFloat32UintptrR) - fn(map[float32]int(nil), (*Encoder).fastpathEncMapFloat32IntR, (*Decoder).fastpathDecMapFloat32IntR) - fn(map[float32]int8(nil), (*Encoder).fastpathEncMapFloat32Int8R, (*Decoder).fastpathDecMapFloat32Int8R) - fn(map[float32]int16(nil), (*Encoder).fastpathEncMapFloat32Int16R, (*Decoder).fastpathDecMapFloat32Int16R) - fn(map[float32]int32(nil), (*Encoder).fastpathEncMapFloat32Int32R, (*Decoder).fastpathDecMapFloat32Int32R) - fn(map[float32]int64(nil), (*Encoder).fastpathEncMapFloat32Int64R, (*Decoder).fastpathDecMapFloat32Int64R) - fn(map[float32]float32(nil), (*Encoder).fastpathEncMapFloat32Float32R, (*Decoder).fastpathDecMapFloat32Float32R) - fn(map[float32]float64(nil), (*Encoder).fastpathEncMapFloat32Float64R, (*Decoder).fastpathDecMapFloat32Float64R) - fn(map[float32]bool(nil), (*Encoder).fastpathEncMapFloat32BoolR, (*Decoder).fastpathDecMapFloat32BoolR) - fn(map[float64]interface{}(nil), (*Encoder).fastpathEncMapFloat64IntfR, (*Decoder).fastpathDecMapFloat64IntfR) - fn(map[float64]string(nil), (*Encoder).fastpathEncMapFloat64StringR, (*Decoder).fastpathDecMapFloat64StringR) - fn(map[float64]uint(nil), (*Encoder).fastpathEncMapFloat64UintR, (*Decoder).fastpathDecMapFloat64UintR) - fn(map[float64]uint8(nil), (*Encoder).fastpathEncMapFloat64Uint8R, (*Decoder).fastpathDecMapFloat64Uint8R) - fn(map[float64]uint16(nil), (*Encoder).fastpathEncMapFloat64Uint16R, (*Decoder).fastpathDecMapFloat64Uint16R) - fn(map[float64]uint32(nil), (*Encoder).fastpathEncMapFloat64Uint32R, (*Decoder).fastpathDecMapFloat64Uint32R) - fn(map[float64]uint64(nil), (*Encoder).fastpathEncMapFloat64Uint64R, (*Decoder).fastpathDecMapFloat64Uint64R) - fn(map[float64]uintptr(nil), (*Encoder).fastpathEncMapFloat64UintptrR, (*Decoder).fastpathDecMapFloat64UintptrR) - fn(map[float64]int(nil), (*Encoder).fastpathEncMapFloat64IntR, (*Decoder).fastpathDecMapFloat64IntR) - fn(map[float64]int8(nil), (*Encoder).fastpathEncMapFloat64Int8R, (*Decoder).fastpathDecMapFloat64Int8R) - fn(map[float64]int16(nil), (*Encoder).fastpathEncMapFloat64Int16R, (*Decoder).fastpathDecMapFloat64Int16R) - fn(map[float64]int32(nil), (*Encoder).fastpathEncMapFloat64Int32R, (*Decoder).fastpathDecMapFloat64Int32R) - fn(map[float64]int64(nil), (*Encoder).fastpathEncMapFloat64Int64R, (*Decoder).fastpathDecMapFloat64Int64R) - fn(map[float64]float32(nil), (*Encoder).fastpathEncMapFloat64Float32R, (*Decoder).fastpathDecMapFloat64Float32R) - fn(map[float64]float64(nil), (*Encoder).fastpathEncMapFloat64Float64R, (*Decoder).fastpathDecMapFloat64Float64R) - fn(map[float64]bool(nil), (*Encoder).fastpathEncMapFloat64BoolR, (*Decoder).fastpathDecMapFloat64BoolR) - fn(map[uint]interface{}(nil), (*Encoder).fastpathEncMapUintIntfR, (*Decoder).fastpathDecMapUintIntfR) - fn(map[uint]string(nil), (*Encoder).fastpathEncMapUintStringR, (*Decoder).fastpathDecMapUintStringR) - fn(map[uint]uint(nil), (*Encoder).fastpathEncMapUintUintR, (*Decoder).fastpathDecMapUintUintR) - fn(map[uint]uint8(nil), (*Encoder).fastpathEncMapUintUint8R, (*Decoder).fastpathDecMapUintUint8R) - fn(map[uint]uint16(nil), (*Encoder).fastpathEncMapUintUint16R, (*Decoder).fastpathDecMapUintUint16R) - fn(map[uint]uint32(nil), (*Encoder).fastpathEncMapUintUint32R, (*Decoder).fastpathDecMapUintUint32R) - fn(map[uint]uint64(nil), (*Encoder).fastpathEncMapUintUint64R, (*Decoder).fastpathDecMapUintUint64R) - fn(map[uint]uintptr(nil), (*Encoder).fastpathEncMapUintUintptrR, (*Decoder).fastpathDecMapUintUintptrR) - fn(map[uint]int(nil), (*Encoder).fastpathEncMapUintIntR, (*Decoder).fastpathDecMapUintIntR) - fn(map[uint]int8(nil), (*Encoder).fastpathEncMapUintInt8R, (*Decoder).fastpathDecMapUintInt8R) - fn(map[uint]int16(nil), (*Encoder).fastpathEncMapUintInt16R, (*Decoder).fastpathDecMapUintInt16R) - fn(map[uint]int32(nil), (*Encoder).fastpathEncMapUintInt32R, (*Decoder).fastpathDecMapUintInt32R) - fn(map[uint]int64(nil), (*Encoder).fastpathEncMapUintInt64R, (*Decoder).fastpathDecMapUintInt64R) - fn(map[uint]float32(nil), (*Encoder).fastpathEncMapUintFloat32R, (*Decoder).fastpathDecMapUintFloat32R) - fn(map[uint]float64(nil), (*Encoder).fastpathEncMapUintFloat64R, (*Decoder).fastpathDecMapUintFloat64R) - fn(map[uint]bool(nil), (*Encoder).fastpathEncMapUintBoolR, (*Decoder).fastpathDecMapUintBoolR) - fn(map[uint8]interface{}(nil), (*Encoder).fastpathEncMapUint8IntfR, (*Decoder).fastpathDecMapUint8IntfR) - fn(map[uint8]string(nil), (*Encoder).fastpathEncMapUint8StringR, (*Decoder).fastpathDecMapUint8StringR) - fn(map[uint8]uint(nil), (*Encoder).fastpathEncMapUint8UintR, (*Decoder).fastpathDecMapUint8UintR) - fn(map[uint8]uint8(nil), (*Encoder).fastpathEncMapUint8Uint8R, (*Decoder).fastpathDecMapUint8Uint8R) - fn(map[uint8]uint16(nil), (*Encoder).fastpathEncMapUint8Uint16R, (*Decoder).fastpathDecMapUint8Uint16R) - fn(map[uint8]uint32(nil), (*Encoder).fastpathEncMapUint8Uint32R, (*Decoder).fastpathDecMapUint8Uint32R) - fn(map[uint8]uint64(nil), (*Encoder).fastpathEncMapUint8Uint64R, (*Decoder).fastpathDecMapUint8Uint64R) - fn(map[uint8]uintptr(nil), (*Encoder).fastpathEncMapUint8UintptrR, (*Decoder).fastpathDecMapUint8UintptrR) - fn(map[uint8]int(nil), (*Encoder).fastpathEncMapUint8IntR, (*Decoder).fastpathDecMapUint8IntR) - fn(map[uint8]int8(nil), (*Encoder).fastpathEncMapUint8Int8R, (*Decoder).fastpathDecMapUint8Int8R) - fn(map[uint8]int16(nil), (*Encoder).fastpathEncMapUint8Int16R, (*Decoder).fastpathDecMapUint8Int16R) - fn(map[uint8]int32(nil), (*Encoder).fastpathEncMapUint8Int32R, (*Decoder).fastpathDecMapUint8Int32R) - fn(map[uint8]int64(nil), (*Encoder).fastpathEncMapUint8Int64R, (*Decoder).fastpathDecMapUint8Int64R) - fn(map[uint8]float32(nil), (*Encoder).fastpathEncMapUint8Float32R, (*Decoder).fastpathDecMapUint8Float32R) - fn(map[uint8]float64(nil), (*Encoder).fastpathEncMapUint8Float64R, (*Decoder).fastpathDecMapUint8Float64R) - fn(map[uint8]bool(nil), (*Encoder).fastpathEncMapUint8BoolR, (*Decoder).fastpathDecMapUint8BoolR) - fn(map[uint16]interface{}(nil), (*Encoder).fastpathEncMapUint16IntfR, (*Decoder).fastpathDecMapUint16IntfR) - fn(map[uint16]string(nil), (*Encoder).fastpathEncMapUint16StringR, (*Decoder).fastpathDecMapUint16StringR) - fn(map[uint16]uint(nil), (*Encoder).fastpathEncMapUint16UintR, (*Decoder).fastpathDecMapUint16UintR) - fn(map[uint16]uint8(nil), (*Encoder).fastpathEncMapUint16Uint8R, (*Decoder).fastpathDecMapUint16Uint8R) - fn(map[uint16]uint16(nil), (*Encoder).fastpathEncMapUint16Uint16R, (*Decoder).fastpathDecMapUint16Uint16R) - fn(map[uint16]uint32(nil), (*Encoder).fastpathEncMapUint16Uint32R, (*Decoder).fastpathDecMapUint16Uint32R) - fn(map[uint16]uint64(nil), (*Encoder).fastpathEncMapUint16Uint64R, (*Decoder).fastpathDecMapUint16Uint64R) - fn(map[uint16]uintptr(nil), (*Encoder).fastpathEncMapUint16UintptrR, (*Decoder).fastpathDecMapUint16UintptrR) - fn(map[uint16]int(nil), (*Encoder).fastpathEncMapUint16IntR, (*Decoder).fastpathDecMapUint16IntR) - fn(map[uint16]int8(nil), (*Encoder).fastpathEncMapUint16Int8R, (*Decoder).fastpathDecMapUint16Int8R) - fn(map[uint16]int16(nil), (*Encoder).fastpathEncMapUint16Int16R, (*Decoder).fastpathDecMapUint16Int16R) - fn(map[uint16]int32(nil), (*Encoder).fastpathEncMapUint16Int32R, (*Decoder).fastpathDecMapUint16Int32R) - fn(map[uint16]int64(nil), (*Encoder).fastpathEncMapUint16Int64R, (*Decoder).fastpathDecMapUint16Int64R) - fn(map[uint16]float32(nil), (*Encoder).fastpathEncMapUint16Float32R, (*Decoder).fastpathDecMapUint16Float32R) - fn(map[uint16]float64(nil), (*Encoder).fastpathEncMapUint16Float64R, (*Decoder).fastpathDecMapUint16Float64R) - fn(map[uint16]bool(nil), (*Encoder).fastpathEncMapUint16BoolR, (*Decoder).fastpathDecMapUint16BoolR) - fn(map[uint32]interface{}(nil), (*Encoder).fastpathEncMapUint32IntfR, (*Decoder).fastpathDecMapUint32IntfR) - fn(map[uint32]string(nil), (*Encoder).fastpathEncMapUint32StringR, (*Decoder).fastpathDecMapUint32StringR) - fn(map[uint32]uint(nil), (*Encoder).fastpathEncMapUint32UintR, (*Decoder).fastpathDecMapUint32UintR) - fn(map[uint32]uint8(nil), (*Encoder).fastpathEncMapUint32Uint8R, (*Decoder).fastpathDecMapUint32Uint8R) - fn(map[uint32]uint16(nil), (*Encoder).fastpathEncMapUint32Uint16R, (*Decoder).fastpathDecMapUint32Uint16R) - fn(map[uint32]uint32(nil), (*Encoder).fastpathEncMapUint32Uint32R, (*Decoder).fastpathDecMapUint32Uint32R) - fn(map[uint32]uint64(nil), (*Encoder).fastpathEncMapUint32Uint64R, (*Decoder).fastpathDecMapUint32Uint64R) - fn(map[uint32]uintptr(nil), (*Encoder).fastpathEncMapUint32UintptrR, (*Decoder).fastpathDecMapUint32UintptrR) - fn(map[uint32]int(nil), (*Encoder).fastpathEncMapUint32IntR, (*Decoder).fastpathDecMapUint32IntR) - fn(map[uint32]int8(nil), (*Encoder).fastpathEncMapUint32Int8R, (*Decoder).fastpathDecMapUint32Int8R) - fn(map[uint32]int16(nil), (*Encoder).fastpathEncMapUint32Int16R, (*Decoder).fastpathDecMapUint32Int16R) - fn(map[uint32]int32(nil), (*Encoder).fastpathEncMapUint32Int32R, (*Decoder).fastpathDecMapUint32Int32R) - fn(map[uint32]int64(nil), (*Encoder).fastpathEncMapUint32Int64R, (*Decoder).fastpathDecMapUint32Int64R) - fn(map[uint32]float32(nil), (*Encoder).fastpathEncMapUint32Float32R, (*Decoder).fastpathDecMapUint32Float32R) - fn(map[uint32]float64(nil), (*Encoder).fastpathEncMapUint32Float64R, (*Decoder).fastpathDecMapUint32Float64R) - fn(map[uint32]bool(nil), (*Encoder).fastpathEncMapUint32BoolR, (*Decoder).fastpathDecMapUint32BoolR) - fn(map[uint64]interface{}(nil), (*Encoder).fastpathEncMapUint64IntfR, (*Decoder).fastpathDecMapUint64IntfR) - fn(map[uint64]string(nil), (*Encoder).fastpathEncMapUint64StringR, (*Decoder).fastpathDecMapUint64StringR) - fn(map[uint64]uint(nil), (*Encoder).fastpathEncMapUint64UintR, (*Decoder).fastpathDecMapUint64UintR) - fn(map[uint64]uint8(nil), (*Encoder).fastpathEncMapUint64Uint8R, (*Decoder).fastpathDecMapUint64Uint8R) - fn(map[uint64]uint16(nil), (*Encoder).fastpathEncMapUint64Uint16R, (*Decoder).fastpathDecMapUint64Uint16R) - fn(map[uint64]uint32(nil), (*Encoder).fastpathEncMapUint64Uint32R, (*Decoder).fastpathDecMapUint64Uint32R) - fn(map[uint64]uint64(nil), (*Encoder).fastpathEncMapUint64Uint64R, (*Decoder).fastpathDecMapUint64Uint64R) - fn(map[uint64]uintptr(nil), (*Encoder).fastpathEncMapUint64UintptrR, (*Decoder).fastpathDecMapUint64UintptrR) - fn(map[uint64]int(nil), (*Encoder).fastpathEncMapUint64IntR, (*Decoder).fastpathDecMapUint64IntR) - fn(map[uint64]int8(nil), (*Encoder).fastpathEncMapUint64Int8R, (*Decoder).fastpathDecMapUint64Int8R) - fn(map[uint64]int16(nil), (*Encoder).fastpathEncMapUint64Int16R, (*Decoder).fastpathDecMapUint64Int16R) - fn(map[uint64]int32(nil), (*Encoder).fastpathEncMapUint64Int32R, (*Decoder).fastpathDecMapUint64Int32R) - fn(map[uint64]int64(nil), (*Encoder).fastpathEncMapUint64Int64R, (*Decoder).fastpathDecMapUint64Int64R) - fn(map[uint64]float32(nil), (*Encoder).fastpathEncMapUint64Float32R, (*Decoder).fastpathDecMapUint64Float32R) - fn(map[uint64]float64(nil), (*Encoder).fastpathEncMapUint64Float64R, (*Decoder).fastpathDecMapUint64Float64R) - fn(map[uint64]bool(nil), (*Encoder).fastpathEncMapUint64BoolR, (*Decoder).fastpathDecMapUint64BoolR) - fn(map[uintptr]interface{}(nil), (*Encoder).fastpathEncMapUintptrIntfR, (*Decoder).fastpathDecMapUintptrIntfR) - fn(map[uintptr]string(nil), (*Encoder).fastpathEncMapUintptrStringR, (*Decoder).fastpathDecMapUintptrStringR) - fn(map[uintptr]uint(nil), (*Encoder).fastpathEncMapUintptrUintR, (*Decoder).fastpathDecMapUintptrUintR) - fn(map[uintptr]uint8(nil), (*Encoder).fastpathEncMapUintptrUint8R, (*Decoder).fastpathDecMapUintptrUint8R) - fn(map[uintptr]uint16(nil), (*Encoder).fastpathEncMapUintptrUint16R, (*Decoder).fastpathDecMapUintptrUint16R) - fn(map[uintptr]uint32(nil), (*Encoder).fastpathEncMapUintptrUint32R, (*Decoder).fastpathDecMapUintptrUint32R) - fn(map[uintptr]uint64(nil), (*Encoder).fastpathEncMapUintptrUint64R, (*Decoder).fastpathDecMapUintptrUint64R) - fn(map[uintptr]uintptr(nil), (*Encoder).fastpathEncMapUintptrUintptrR, (*Decoder).fastpathDecMapUintptrUintptrR) - fn(map[uintptr]int(nil), (*Encoder).fastpathEncMapUintptrIntR, (*Decoder).fastpathDecMapUintptrIntR) - fn(map[uintptr]int8(nil), (*Encoder).fastpathEncMapUintptrInt8R, (*Decoder).fastpathDecMapUintptrInt8R) - fn(map[uintptr]int16(nil), (*Encoder).fastpathEncMapUintptrInt16R, (*Decoder).fastpathDecMapUintptrInt16R) - fn(map[uintptr]int32(nil), (*Encoder).fastpathEncMapUintptrInt32R, (*Decoder).fastpathDecMapUintptrInt32R) - fn(map[uintptr]int64(nil), (*Encoder).fastpathEncMapUintptrInt64R, (*Decoder).fastpathDecMapUintptrInt64R) - fn(map[uintptr]float32(nil), (*Encoder).fastpathEncMapUintptrFloat32R, (*Decoder).fastpathDecMapUintptrFloat32R) - fn(map[uintptr]float64(nil), (*Encoder).fastpathEncMapUintptrFloat64R, (*Decoder).fastpathDecMapUintptrFloat64R) - fn(map[uintptr]bool(nil), (*Encoder).fastpathEncMapUintptrBoolR, (*Decoder).fastpathDecMapUintptrBoolR) - fn(map[int]interface{}(nil), (*Encoder).fastpathEncMapIntIntfR, (*Decoder).fastpathDecMapIntIntfR) - fn(map[int]string(nil), (*Encoder).fastpathEncMapIntStringR, (*Decoder).fastpathDecMapIntStringR) - fn(map[int]uint(nil), (*Encoder).fastpathEncMapIntUintR, (*Decoder).fastpathDecMapIntUintR) - fn(map[int]uint8(nil), (*Encoder).fastpathEncMapIntUint8R, (*Decoder).fastpathDecMapIntUint8R) - fn(map[int]uint16(nil), (*Encoder).fastpathEncMapIntUint16R, (*Decoder).fastpathDecMapIntUint16R) - fn(map[int]uint32(nil), (*Encoder).fastpathEncMapIntUint32R, (*Decoder).fastpathDecMapIntUint32R) - fn(map[int]uint64(nil), (*Encoder).fastpathEncMapIntUint64R, (*Decoder).fastpathDecMapIntUint64R) - fn(map[int]uintptr(nil), (*Encoder).fastpathEncMapIntUintptrR, (*Decoder).fastpathDecMapIntUintptrR) - fn(map[int]int(nil), (*Encoder).fastpathEncMapIntIntR, (*Decoder).fastpathDecMapIntIntR) - fn(map[int]int8(nil), (*Encoder).fastpathEncMapIntInt8R, (*Decoder).fastpathDecMapIntInt8R) - fn(map[int]int16(nil), (*Encoder).fastpathEncMapIntInt16R, (*Decoder).fastpathDecMapIntInt16R) - fn(map[int]int32(nil), (*Encoder).fastpathEncMapIntInt32R, (*Decoder).fastpathDecMapIntInt32R) - fn(map[int]int64(nil), (*Encoder).fastpathEncMapIntInt64R, (*Decoder).fastpathDecMapIntInt64R) - fn(map[int]float32(nil), (*Encoder).fastpathEncMapIntFloat32R, (*Decoder).fastpathDecMapIntFloat32R) - fn(map[int]float64(nil), (*Encoder).fastpathEncMapIntFloat64R, (*Decoder).fastpathDecMapIntFloat64R) - fn(map[int]bool(nil), (*Encoder).fastpathEncMapIntBoolR, (*Decoder).fastpathDecMapIntBoolR) - fn(map[int8]interface{}(nil), (*Encoder).fastpathEncMapInt8IntfR, (*Decoder).fastpathDecMapInt8IntfR) - fn(map[int8]string(nil), (*Encoder).fastpathEncMapInt8StringR, (*Decoder).fastpathDecMapInt8StringR) - fn(map[int8]uint(nil), (*Encoder).fastpathEncMapInt8UintR, (*Decoder).fastpathDecMapInt8UintR) - fn(map[int8]uint8(nil), (*Encoder).fastpathEncMapInt8Uint8R, (*Decoder).fastpathDecMapInt8Uint8R) - fn(map[int8]uint16(nil), (*Encoder).fastpathEncMapInt8Uint16R, (*Decoder).fastpathDecMapInt8Uint16R) - fn(map[int8]uint32(nil), (*Encoder).fastpathEncMapInt8Uint32R, (*Decoder).fastpathDecMapInt8Uint32R) - fn(map[int8]uint64(nil), (*Encoder).fastpathEncMapInt8Uint64R, (*Decoder).fastpathDecMapInt8Uint64R) - fn(map[int8]uintptr(nil), (*Encoder).fastpathEncMapInt8UintptrR, (*Decoder).fastpathDecMapInt8UintptrR) - fn(map[int8]int(nil), (*Encoder).fastpathEncMapInt8IntR, (*Decoder).fastpathDecMapInt8IntR) - fn(map[int8]int8(nil), (*Encoder).fastpathEncMapInt8Int8R, (*Decoder).fastpathDecMapInt8Int8R) - fn(map[int8]int16(nil), (*Encoder).fastpathEncMapInt8Int16R, (*Decoder).fastpathDecMapInt8Int16R) - fn(map[int8]int32(nil), (*Encoder).fastpathEncMapInt8Int32R, (*Decoder).fastpathDecMapInt8Int32R) - fn(map[int8]int64(nil), (*Encoder).fastpathEncMapInt8Int64R, (*Decoder).fastpathDecMapInt8Int64R) - fn(map[int8]float32(nil), (*Encoder).fastpathEncMapInt8Float32R, (*Decoder).fastpathDecMapInt8Float32R) - fn(map[int8]float64(nil), (*Encoder).fastpathEncMapInt8Float64R, (*Decoder).fastpathDecMapInt8Float64R) - fn(map[int8]bool(nil), (*Encoder).fastpathEncMapInt8BoolR, (*Decoder).fastpathDecMapInt8BoolR) - fn(map[int16]interface{}(nil), (*Encoder).fastpathEncMapInt16IntfR, (*Decoder).fastpathDecMapInt16IntfR) - fn(map[int16]string(nil), (*Encoder).fastpathEncMapInt16StringR, (*Decoder).fastpathDecMapInt16StringR) - fn(map[int16]uint(nil), (*Encoder).fastpathEncMapInt16UintR, (*Decoder).fastpathDecMapInt16UintR) - fn(map[int16]uint8(nil), (*Encoder).fastpathEncMapInt16Uint8R, (*Decoder).fastpathDecMapInt16Uint8R) - fn(map[int16]uint16(nil), (*Encoder).fastpathEncMapInt16Uint16R, (*Decoder).fastpathDecMapInt16Uint16R) - fn(map[int16]uint32(nil), (*Encoder).fastpathEncMapInt16Uint32R, (*Decoder).fastpathDecMapInt16Uint32R) - fn(map[int16]uint64(nil), (*Encoder).fastpathEncMapInt16Uint64R, (*Decoder).fastpathDecMapInt16Uint64R) - fn(map[int16]uintptr(nil), (*Encoder).fastpathEncMapInt16UintptrR, (*Decoder).fastpathDecMapInt16UintptrR) - fn(map[int16]int(nil), (*Encoder).fastpathEncMapInt16IntR, (*Decoder).fastpathDecMapInt16IntR) - fn(map[int16]int8(nil), (*Encoder).fastpathEncMapInt16Int8R, (*Decoder).fastpathDecMapInt16Int8R) - fn(map[int16]int16(nil), (*Encoder).fastpathEncMapInt16Int16R, (*Decoder).fastpathDecMapInt16Int16R) - fn(map[int16]int32(nil), (*Encoder).fastpathEncMapInt16Int32R, (*Decoder).fastpathDecMapInt16Int32R) - fn(map[int16]int64(nil), (*Encoder).fastpathEncMapInt16Int64R, (*Decoder).fastpathDecMapInt16Int64R) - fn(map[int16]float32(nil), (*Encoder).fastpathEncMapInt16Float32R, (*Decoder).fastpathDecMapInt16Float32R) - fn(map[int16]float64(nil), (*Encoder).fastpathEncMapInt16Float64R, (*Decoder).fastpathDecMapInt16Float64R) - fn(map[int16]bool(nil), (*Encoder).fastpathEncMapInt16BoolR, (*Decoder).fastpathDecMapInt16BoolR) - fn(map[int32]interface{}(nil), (*Encoder).fastpathEncMapInt32IntfR, (*Decoder).fastpathDecMapInt32IntfR) - fn(map[int32]string(nil), (*Encoder).fastpathEncMapInt32StringR, (*Decoder).fastpathDecMapInt32StringR) - fn(map[int32]uint(nil), (*Encoder).fastpathEncMapInt32UintR, (*Decoder).fastpathDecMapInt32UintR) - fn(map[int32]uint8(nil), (*Encoder).fastpathEncMapInt32Uint8R, (*Decoder).fastpathDecMapInt32Uint8R) - fn(map[int32]uint16(nil), (*Encoder).fastpathEncMapInt32Uint16R, (*Decoder).fastpathDecMapInt32Uint16R) - fn(map[int32]uint32(nil), (*Encoder).fastpathEncMapInt32Uint32R, (*Decoder).fastpathDecMapInt32Uint32R) - fn(map[int32]uint64(nil), (*Encoder).fastpathEncMapInt32Uint64R, (*Decoder).fastpathDecMapInt32Uint64R) - fn(map[int32]uintptr(nil), (*Encoder).fastpathEncMapInt32UintptrR, (*Decoder).fastpathDecMapInt32UintptrR) - fn(map[int32]int(nil), (*Encoder).fastpathEncMapInt32IntR, (*Decoder).fastpathDecMapInt32IntR) - fn(map[int32]int8(nil), (*Encoder).fastpathEncMapInt32Int8R, (*Decoder).fastpathDecMapInt32Int8R) - fn(map[int32]int16(nil), (*Encoder).fastpathEncMapInt32Int16R, (*Decoder).fastpathDecMapInt32Int16R) - fn(map[int32]int32(nil), (*Encoder).fastpathEncMapInt32Int32R, (*Decoder).fastpathDecMapInt32Int32R) - fn(map[int32]int64(nil), (*Encoder).fastpathEncMapInt32Int64R, (*Decoder).fastpathDecMapInt32Int64R) - fn(map[int32]float32(nil), (*Encoder).fastpathEncMapInt32Float32R, (*Decoder).fastpathDecMapInt32Float32R) - fn(map[int32]float64(nil), (*Encoder).fastpathEncMapInt32Float64R, (*Decoder).fastpathDecMapInt32Float64R) - fn(map[int32]bool(nil), (*Encoder).fastpathEncMapInt32BoolR, (*Decoder).fastpathDecMapInt32BoolR) - fn(map[int64]interface{}(nil), (*Encoder).fastpathEncMapInt64IntfR, (*Decoder).fastpathDecMapInt64IntfR) - fn(map[int64]string(nil), (*Encoder).fastpathEncMapInt64StringR, (*Decoder).fastpathDecMapInt64StringR) - fn(map[int64]uint(nil), (*Encoder).fastpathEncMapInt64UintR, (*Decoder).fastpathDecMapInt64UintR) - fn(map[int64]uint8(nil), (*Encoder).fastpathEncMapInt64Uint8R, (*Decoder).fastpathDecMapInt64Uint8R) - fn(map[int64]uint16(nil), (*Encoder).fastpathEncMapInt64Uint16R, (*Decoder).fastpathDecMapInt64Uint16R) - fn(map[int64]uint32(nil), (*Encoder).fastpathEncMapInt64Uint32R, (*Decoder).fastpathDecMapInt64Uint32R) - fn(map[int64]uint64(nil), (*Encoder).fastpathEncMapInt64Uint64R, (*Decoder).fastpathDecMapInt64Uint64R) - fn(map[int64]uintptr(nil), (*Encoder).fastpathEncMapInt64UintptrR, (*Decoder).fastpathDecMapInt64UintptrR) - fn(map[int64]int(nil), (*Encoder).fastpathEncMapInt64IntR, (*Decoder).fastpathDecMapInt64IntR) - fn(map[int64]int8(nil), (*Encoder).fastpathEncMapInt64Int8R, (*Decoder).fastpathDecMapInt64Int8R) - fn(map[int64]int16(nil), (*Encoder).fastpathEncMapInt64Int16R, (*Decoder).fastpathDecMapInt64Int16R) - fn(map[int64]int32(nil), (*Encoder).fastpathEncMapInt64Int32R, (*Decoder).fastpathDecMapInt64Int32R) - fn(map[int64]int64(nil), (*Encoder).fastpathEncMapInt64Int64R, (*Decoder).fastpathDecMapInt64Int64R) - fn(map[int64]float32(nil), (*Encoder).fastpathEncMapInt64Float32R, (*Decoder).fastpathDecMapInt64Float32R) - fn(map[int64]float64(nil), (*Encoder).fastpathEncMapInt64Float64R, (*Decoder).fastpathDecMapInt64Float64R) - fn(map[int64]bool(nil), (*Encoder).fastpathEncMapInt64BoolR, (*Decoder).fastpathDecMapInt64BoolR) - fn(map[bool]interface{}(nil), (*Encoder).fastpathEncMapBoolIntfR, (*Decoder).fastpathDecMapBoolIntfR) - fn(map[bool]string(nil), (*Encoder).fastpathEncMapBoolStringR, (*Decoder).fastpathDecMapBoolStringR) - fn(map[bool]uint(nil), (*Encoder).fastpathEncMapBoolUintR, (*Decoder).fastpathDecMapBoolUintR) - fn(map[bool]uint8(nil), (*Encoder).fastpathEncMapBoolUint8R, (*Decoder).fastpathDecMapBoolUint8R) - fn(map[bool]uint16(nil), (*Encoder).fastpathEncMapBoolUint16R, (*Decoder).fastpathDecMapBoolUint16R) - fn(map[bool]uint32(nil), (*Encoder).fastpathEncMapBoolUint32R, (*Decoder).fastpathDecMapBoolUint32R) - fn(map[bool]uint64(nil), (*Encoder).fastpathEncMapBoolUint64R, (*Decoder).fastpathDecMapBoolUint64R) - fn(map[bool]uintptr(nil), (*Encoder).fastpathEncMapBoolUintptrR, (*Decoder).fastpathDecMapBoolUintptrR) - fn(map[bool]int(nil), (*Encoder).fastpathEncMapBoolIntR, (*Decoder).fastpathDecMapBoolIntR) - fn(map[bool]int8(nil), (*Encoder).fastpathEncMapBoolInt8R, (*Decoder).fastpathDecMapBoolInt8R) - fn(map[bool]int16(nil), (*Encoder).fastpathEncMapBoolInt16R, (*Decoder).fastpathDecMapBoolInt16R) - fn(map[bool]int32(nil), (*Encoder).fastpathEncMapBoolInt32R, (*Decoder).fastpathDecMapBoolInt32R) - fn(map[bool]int64(nil), (*Encoder).fastpathEncMapBoolInt64R, (*Decoder).fastpathDecMapBoolInt64R) - fn(map[bool]float32(nil), (*Encoder).fastpathEncMapBoolFloat32R, (*Decoder).fastpathDecMapBoolFloat32R) - fn(map[bool]float64(nil), (*Encoder).fastpathEncMapBoolFloat64R, (*Decoder).fastpathDecMapBoolFloat64R) - fn(map[bool]bool(nil), (*Encoder).fastpathEncMapBoolBoolR, (*Decoder).fastpathDecMapBoolBoolR) - - sort.Sort(fastpathAslice(fastpathAV[:])) -} - -// -- encode - -// -- -- fast path type switch -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { - - case []interface{}: - fastpathTV.EncSliceIntfV(v, e) - case *[]interface{}: - fastpathTV.EncSliceIntfV(*v, e) - - case map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(v, e) - case *map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(*v, e) - - case map[interface{}]string: - fastpathTV.EncMapIntfStringV(v, e) - case *map[interface{}]string: - fastpathTV.EncMapIntfStringV(*v, e) - - case map[interface{}]uint: - fastpathTV.EncMapIntfUintV(v, e) - case *map[interface{}]uint: - fastpathTV.EncMapIntfUintV(*v, e) - - case map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(v, e) - case *map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(*v, e) - - case map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(v, e) - case *map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(*v, e) - - case map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(v, e) - case *map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(*v, e) - - case map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(v, e) - case *map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(*v, e) - - case map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(v, e) - case *map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(*v, e) - - case map[interface{}]int: - fastpathTV.EncMapIntfIntV(v, e) - case *map[interface{}]int: - fastpathTV.EncMapIntfIntV(*v, e) - - case map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(v, e) - case *map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(*v, e) - - case map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(v, e) - case *map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(*v, e) - - case map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(v, e) - case *map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(*v, e) - - case map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(v, e) - case *map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(*v, e) - - case map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(v, e) - case *map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(*v, e) - - case map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(v, e) - case *map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(*v, e) - - case map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(v, e) - case *map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(*v, e) - - case []string: - fastpathTV.EncSliceStringV(v, e) - case *[]string: - fastpathTV.EncSliceStringV(*v, e) - - case map[string]interface{}: - fastpathTV.EncMapStringIntfV(v, e) - case *map[string]interface{}: - fastpathTV.EncMapStringIntfV(*v, e) - - case map[string]string: - fastpathTV.EncMapStringStringV(v, e) - case *map[string]string: - fastpathTV.EncMapStringStringV(*v, e) - - case map[string]uint: - fastpathTV.EncMapStringUintV(v, e) - case *map[string]uint: - fastpathTV.EncMapStringUintV(*v, e) - - case map[string]uint8: - fastpathTV.EncMapStringUint8V(v, e) - case *map[string]uint8: - fastpathTV.EncMapStringUint8V(*v, e) - - case map[string]uint16: - fastpathTV.EncMapStringUint16V(v, e) - case *map[string]uint16: - fastpathTV.EncMapStringUint16V(*v, e) - - case map[string]uint32: - fastpathTV.EncMapStringUint32V(v, e) - case *map[string]uint32: - fastpathTV.EncMapStringUint32V(*v, e) - - case map[string]uint64: - fastpathTV.EncMapStringUint64V(v, e) - case *map[string]uint64: - fastpathTV.EncMapStringUint64V(*v, e) - - case map[string]uintptr: - fastpathTV.EncMapStringUintptrV(v, e) - case *map[string]uintptr: - fastpathTV.EncMapStringUintptrV(*v, e) - - case map[string]int: - fastpathTV.EncMapStringIntV(v, e) - case *map[string]int: - fastpathTV.EncMapStringIntV(*v, e) - - case map[string]int8: - fastpathTV.EncMapStringInt8V(v, e) - case *map[string]int8: - fastpathTV.EncMapStringInt8V(*v, e) - - case map[string]int16: - fastpathTV.EncMapStringInt16V(v, e) - case *map[string]int16: - fastpathTV.EncMapStringInt16V(*v, e) - - case map[string]int32: - fastpathTV.EncMapStringInt32V(v, e) - case *map[string]int32: - fastpathTV.EncMapStringInt32V(*v, e) - - case map[string]int64: - fastpathTV.EncMapStringInt64V(v, e) - case *map[string]int64: - fastpathTV.EncMapStringInt64V(*v, e) - - case map[string]float32: - fastpathTV.EncMapStringFloat32V(v, e) - case *map[string]float32: - fastpathTV.EncMapStringFloat32V(*v, e) - - case map[string]float64: - fastpathTV.EncMapStringFloat64V(v, e) - case *map[string]float64: - fastpathTV.EncMapStringFloat64V(*v, e) - - case map[string]bool: - fastpathTV.EncMapStringBoolV(v, e) - case *map[string]bool: - fastpathTV.EncMapStringBoolV(*v, e) - - case []float32: - fastpathTV.EncSliceFloat32V(v, e) - case *[]float32: - fastpathTV.EncSliceFloat32V(*v, e) - - case map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(v, e) - case *map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(*v, e) - - case map[float32]string: - fastpathTV.EncMapFloat32StringV(v, e) - case *map[float32]string: - fastpathTV.EncMapFloat32StringV(*v, e) - - case map[float32]uint: - fastpathTV.EncMapFloat32UintV(v, e) - case *map[float32]uint: - fastpathTV.EncMapFloat32UintV(*v, e) - - case map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(v, e) - case *map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(*v, e) - - case map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(v, e) - case *map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(*v, e) - - case map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(v, e) - case *map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(*v, e) - - case map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(v, e) - case *map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(*v, e) - - case map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(v, e) - case *map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(*v, e) - - case map[float32]int: - fastpathTV.EncMapFloat32IntV(v, e) - case *map[float32]int: - fastpathTV.EncMapFloat32IntV(*v, e) - - case map[float32]int8: - fastpathTV.EncMapFloat32Int8V(v, e) - case *map[float32]int8: - fastpathTV.EncMapFloat32Int8V(*v, e) - - case map[float32]int16: - fastpathTV.EncMapFloat32Int16V(v, e) - case *map[float32]int16: - fastpathTV.EncMapFloat32Int16V(*v, e) - - case map[float32]int32: - fastpathTV.EncMapFloat32Int32V(v, e) - case *map[float32]int32: - fastpathTV.EncMapFloat32Int32V(*v, e) - - case map[float32]int64: - fastpathTV.EncMapFloat32Int64V(v, e) - case *map[float32]int64: - fastpathTV.EncMapFloat32Int64V(*v, e) - - case map[float32]float32: - fastpathTV.EncMapFloat32Float32V(v, e) - case *map[float32]float32: - fastpathTV.EncMapFloat32Float32V(*v, e) - - case map[float32]float64: - fastpathTV.EncMapFloat32Float64V(v, e) - case *map[float32]float64: - fastpathTV.EncMapFloat32Float64V(*v, e) - - case map[float32]bool: - fastpathTV.EncMapFloat32BoolV(v, e) - case *map[float32]bool: - fastpathTV.EncMapFloat32BoolV(*v, e) - - case []float64: - fastpathTV.EncSliceFloat64V(v, e) - case *[]float64: - fastpathTV.EncSliceFloat64V(*v, e) - - case map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(v, e) - case *map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(*v, e) - - case map[float64]string: - fastpathTV.EncMapFloat64StringV(v, e) - case *map[float64]string: - fastpathTV.EncMapFloat64StringV(*v, e) - - case map[float64]uint: - fastpathTV.EncMapFloat64UintV(v, e) - case *map[float64]uint: - fastpathTV.EncMapFloat64UintV(*v, e) - - case map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(v, e) - case *map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(*v, e) - - case map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(v, e) - case *map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(*v, e) - - case map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(v, e) - case *map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(*v, e) - - case map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(v, e) - case *map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(*v, e) - - case map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(v, e) - case *map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(*v, e) - - case map[float64]int: - fastpathTV.EncMapFloat64IntV(v, e) - case *map[float64]int: - fastpathTV.EncMapFloat64IntV(*v, e) - - case map[float64]int8: - fastpathTV.EncMapFloat64Int8V(v, e) - case *map[float64]int8: - fastpathTV.EncMapFloat64Int8V(*v, e) - - case map[float64]int16: - fastpathTV.EncMapFloat64Int16V(v, e) - case *map[float64]int16: - fastpathTV.EncMapFloat64Int16V(*v, e) - - case map[float64]int32: - fastpathTV.EncMapFloat64Int32V(v, e) - case *map[float64]int32: - fastpathTV.EncMapFloat64Int32V(*v, e) - - case map[float64]int64: - fastpathTV.EncMapFloat64Int64V(v, e) - case *map[float64]int64: - fastpathTV.EncMapFloat64Int64V(*v, e) - - case map[float64]float32: - fastpathTV.EncMapFloat64Float32V(v, e) - case *map[float64]float32: - fastpathTV.EncMapFloat64Float32V(*v, e) - - case map[float64]float64: - fastpathTV.EncMapFloat64Float64V(v, e) - case *map[float64]float64: - fastpathTV.EncMapFloat64Float64V(*v, e) - - case map[float64]bool: - fastpathTV.EncMapFloat64BoolV(v, e) - case *map[float64]bool: - fastpathTV.EncMapFloat64BoolV(*v, e) - - case []uint: - fastpathTV.EncSliceUintV(v, e) - case *[]uint: - fastpathTV.EncSliceUintV(*v, e) - - case map[uint]interface{}: - fastpathTV.EncMapUintIntfV(v, e) - case *map[uint]interface{}: - fastpathTV.EncMapUintIntfV(*v, e) - - case map[uint]string: - fastpathTV.EncMapUintStringV(v, e) - case *map[uint]string: - fastpathTV.EncMapUintStringV(*v, e) - - case map[uint]uint: - fastpathTV.EncMapUintUintV(v, e) - case *map[uint]uint: - fastpathTV.EncMapUintUintV(*v, e) - - case map[uint]uint8: - fastpathTV.EncMapUintUint8V(v, e) - case *map[uint]uint8: - fastpathTV.EncMapUintUint8V(*v, e) - - case map[uint]uint16: - fastpathTV.EncMapUintUint16V(v, e) - case *map[uint]uint16: - fastpathTV.EncMapUintUint16V(*v, e) - - case map[uint]uint32: - fastpathTV.EncMapUintUint32V(v, e) - case *map[uint]uint32: - fastpathTV.EncMapUintUint32V(*v, e) - - case map[uint]uint64: - fastpathTV.EncMapUintUint64V(v, e) - case *map[uint]uint64: - fastpathTV.EncMapUintUint64V(*v, e) - - case map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(v, e) - case *map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(*v, e) - - case map[uint]int: - fastpathTV.EncMapUintIntV(v, e) - case *map[uint]int: - fastpathTV.EncMapUintIntV(*v, e) - - case map[uint]int8: - fastpathTV.EncMapUintInt8V(v, e) - case *map[uint]int8: - fastpathTV.EncMapUintInt8V(*v, e) - - case map[uint]int16: - fastpathTV.EncMapUintInt16V(v, e) - case *map[uint]int16: - fastpathTV.EncMapUintInt16V(*v, e) - - case map[uint]int32: - fastpathTV.EncMapUintInt32V(v, e) - case *map[uint]int32: - fastpathTV.EncMapUintInt32V(*v, e) - - case map[uint]int64: - fastpathTV.EncMapUintInt64V(v, e) - case *map[uint]int64: - fastpathTV.EncMapUintInt64V(*v, e) - - case map[uint]float32: - fastpathTV.EncMapUintFloat32V(v, e) - case *map[uint]float32: - fastpathTV.EncMapUintFloat32V(*v, e) - - case map[uint]float64: - fastpathTV.EncMapUintFloat64V(v, e) - case *map[uint]float64: - fastpathTV.EncMapUintFloat64V(*v, e) - - case map[uint]bool: - fastpathTV.EncMapUintBoolV(v, e) - case *map[uint]bool: - fastpathTV.EncMapUintBoolV(*v, e) - - case map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(v, e) - case *map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(*v, e) - - case map[uint8]string: - fastpathTV.EncMapUint8StringV(v, e) - case *map[uint8]string: - fastpathTV.EncMapUint8StringV(*v, e) - - case map[uint8]uint: - fastpathTV.EncMapUint8UintV(v, e) - case *map[uint8]uint: - fastpathTV.EncMapUint8UintV(*v, e) - - case map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(v, e) - case *map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(*v, e) - - case map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(v, e) - case *map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(*v, e) - - case map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(v, e) - case *map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(*v, e) - - case map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(v, e) - case *map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(*v, e) - - case map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(v, e) - case *map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(*v, e) - - case map[uint8]int: - fastpathTV.EncMapUint8IntV(v, e) - case *map[uint8]int: - fastpathTV.EncMapUint8IntV(*v, e) - - case map[uint8]int8: - fastpathTV.EncMapUint8Int8V(v, e) - case *map[uint8]int8: - fastpathTV.EncMapUint8Int8V(*v, e) - - case map[uint8]int16: - fastpathTV.EncMapUint8Int16V(v, e) - case *map[uint8]int16: - fastpathTV.EncMapUint8Int16V(*v, e) - - case map[uint8]int32: - fastpathTV.EncMapUint8Int32V(v, e) - case *map[uint8]int32: - fastpathTV.EncMapUint8Int32V(*v, e) - - case map[uint8]int64: - fastpathTV.EncMapUint8Int64V(v, e) - case *map[uint8]int64: - fastpathTV.EncMapUint8Int64V(*v, e) - - case map[uint8]float32: - fastpathTV.EncMapUint8Float32V(v, e) - case *map[uint8]float32: - fastpathTV.EncMapUint8Float32V(*v, e) - - case map[uint8]float64: - fastpathTV.EncMapUint8Float64V(v, e) - case *map[uint8]float64: - fastpathTV.EncMapUint8Float64V(*v, e) - - case map[uint8]bool: - fastpathTV.EncMapUint8BoolV(v, e) - case *map[uint8]bool: - fastpathTV.EncMapUint8BoolV(*v, e) - - case []uint16: - fastpathTV.EncSliceUint16V(v, e) - case *[]uint16: - fastpathTV.EncSliceUint16V(*v, e) - - case map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(v, e) - case *map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(*v, e) - - case map[uint16]string: - fastpathTV.EncMapUint16StringV(v, e) - case *map[uint16]string: - fastpathTV.EncMapUint16StringV(*v, e) - - case map[uint16]uint: - fastpathTV.EncMapUint16UintV(v, e) - case *map[uint16]uint: - fastpathTV.EncMapUint16UintV(*v, e) - - case map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(v, e) - case *map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(*v, e) - - case map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(v, e) - case *map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(*v, e) - - case map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(v, e) - case *map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(*v, e) - - case map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(v, e) - case *map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(*v, e) - - case map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(v, e) - case *map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(*v, e) - - case map[uint16]int: - fastpathTV.EncMapUint16IntV(v, e) - case *map[uint16]int: - fastpathTV.EncMapUint16IntV(*v, e) - - case map[uint16]int8: - fastpathTV.EncMapUint16Int8V(v, e) - case *map[uint16]int8: - fastpathTV.EncMapUint16Int8V(*v, e) - - case map[uint16]int16: - fastpathTV.EncMapUint16Int16V(v, e) - case *map[uint16]int16: - fastpathTV.EncMapUint16Int16V(*v, e) - - case map[uint16]int32: - fastpathTV.EncMapUint16Int32V(v, e) - case *map[uint16]int32: - fastpathTV.EncMapUint16Int32V(*v, e) - - case map[uint16]int64: - fastpathTV.EncMapUint16Int64V(v, e) - case *map[uint16]int64: - fastpathTV.EncMapUint16Int64V(*v, e) - - case map[uint16]float32: - fastpathTV.EncMapUint16Float32V(v, e) - case *map[uint16]float32: - fastpathTV.EncMapUint16Float32V(*v, e) - - case map[uint16]float64: - fastpathTV.EncMapUint16Float64V(v, e) - case *map[uint16]float64: - fastpathTV.EncMapUint16Float64V(*v, e) - - case map[uint16]bool: - fastpathTV.EncMapUint16BoolV(v, e) - case *map[uint16]bool: - fastpathTV.EncMapUint16BoolV(*v, e) - - case []uint32: - fastpathTV.EncSliceUint32V(v, e) - case *[]uint32: - fastpathTV.EncSliceUint32V(*v, e) - - case map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(v, e) - case *map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(*v, e) - - case map[uint32]string: - fastpathTV.EncMapUint32StringV(v, e) - case *map[uint32]string: - fastpathTV.EncMapUint32StringV(*v, e) - - case map[uint32]uint: - fastpathTV.EncMapUint32UintV(v, e) - case *map[uint32]uint: - fastpathTV.EncMapUint32UintV(*v, e) - - case map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(v, e) - case *map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(*v, e) - - case map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(v, e) - case *map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(*v, e) - - case map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(v, e) - case *map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(*v, e) - - case map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(v, e) - case *map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(*v, e) - - case map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(v, e) - case *map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(*v, e) - - case map[uint32]int: - fastpathTV.EncMapUint32IntV(v, e) - case *map[uint32]int: - fastpathTV.EncMapUint32IntV(*v, e) - - case map[uint32]int8: - fastpathTV.EncMapUint32Int8V(v, e) - case *map[uint32]int8: - fastpathTV.EncMapUint32Int8V(*v, e) - - case map[uint32]int16: - fastpathTV.EncMapUint32Int16V(v, e) - case *map[uint32]int16: - fastpathTV.EncMapUint32Int16V(*v, e) - - case map[uint32]int32: - fastpathTV.EncMapUint32Int32V(v, e) - case *map[uint32]int32: - fastpathTV.EncMapUint32Int32V(*v, e) - - case map[uint32]int64: - fastpathTV.EncMapUint32Int64V(v, e) - case *map[uint32]int64: - fastpathTV.EncMapUint32Int64V(*v, e) - - case map[uint32]float32: - fastpathTV.EncMapUint32Float32V(v, e) - case *map[uint32]float32: - fastpathTV.EncMapUint32Float32V(*v, e) - - case map[uint32]float64: - fastpathTV.EncMapUint32Float64V(v, e) - case *map[uint32]float64: - fastpathTV.EncMapUint32Float64V(*v, e) - - case map[uint32]bool: - fastpathTV.EncMapUint32BoolV(v, e) - case *map[uint32]bool: - fastpathTV.EncMapUint32BoolV(*v, e) - - case []uint64: - fastpathTV.EncSliceUint64V(v, e) - case *[]uint64: - fastpathTV.EncSliceUint64V(*v, e) - - case map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(v, e) - case *map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(*v, e) - - case map[uint64]string: - fastpathTV.EncMapUint64StringV(v, e) - case *map[uint64]string: - fastpathTV.EncMapUint64StringV(*v, e) - - case map[uint64]uint: - fastpathTV.EncMapUint64UintV(v, e) - case *map[uint64]uint: - fastpathTV.EncMapUint64UintV(*v, e) - - case map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(v, e) - case *map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(*v, e) - - case map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(v, e) - case *map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(*v, e) - - case map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(v, e) - case *map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(*v, e) - - case map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(v, e) - case *map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(*v, e) - - case map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(v, e) - case *map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(*v, e) - - case map[uint64]int: - fastpathTV.EncMapUint64IntV(v, e) - case *map[uint64]int: - fastpathTV.EncMapUint64IntV(*v, e) - - case map[uint64]int8: - fastpathTV.EncMapUint64Int8V(v, e) - case *map[uint64]int8: - fastpathTV.EncMapUint64Int8V(*v, e) - - case map[uint64]int16: - fastpathTV.EncMapUint64Int16V(v, e) - case *map[uint64]int16: - fastpathTV.EncMapUint64Int16V(*v, e) - - case map[uint64]int32: - fastpathTV.EncMapUint64Int32V(v, e) - case *map[uint64]int32: - fastpathTV.EncMapUint64Int32V(*v, e) - - case map[uint64]int64: - fastpathTV.EncMapUint64Int64V(v, e) - case *map[uint64]int64: - fastpathTV.EncMapUint64Int64V(*v, e) - - case map[uint64]float32: - fastpathTV.EncMapUint64Float32V(v, e) - case *map[uint64]float32: - fastpathTV.EncMapUint64Float32V(*v, e) - - case map[uint64]float64: - fastpathTV.EncMapUint64Float64V(v, e) - case *map[uint64]float64: - fastpathTV.EncMapUint64Float64V(*v, e) - - case map[uint64]bool: - fastpathTV.EncMapUint64BoolV(v, e) - case *map[uint64]bool: - fastpathTV.EncMapUint64BoolV(*v, e) - - case []uintptr: - fastpathTV.EncSliceUintptrV(v, e) - case *[]uintptr: - fastpathTV.EncSliceUintptrV(*v, e) - - case map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(v, e) - case *map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(*v, e) - - case map[uintptr]string: - fastpathTV.EncMapUintptrStringV(v, e) - case *map[uintptr]string: - fastpathTV.EncMapUintptrStringV(*v, e) - - case map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(v, e) - case *map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(*v, e) - - case map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(v, e) - case *map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(*v, e) - - case map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(v, e) - case *map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(*v, e) - - case map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(v, e) - case *map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(*v, e) - - case map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(v, e) - case *map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(*v, e) - - case map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(v, e) - case *map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(*v, e) - - case map[uintptr]int: - fastpathTV.EncMapUintptrIntV(v, e) - case *map[uintptr]int: - fastpathTV.EncMapUintptrIntV(*v, e) - - case map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(v, e) - case *map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(*v, e) - - case map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(v, e) - case *map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(*v, e) - - case map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(v, e) - case *map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(*v, e) - - case map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(v, e) - case *map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(*v, e) - - case map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(v, e) - case *map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(*v, e) - - case map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(v, e) - case *map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(*v, e) - - case map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(v, e) - case *map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(*v, e) - - case []int: - fastpathTV.EncSliceIntV(v, e) - case *[]int: - fastpathTV.EncSliceIntV(*v, e) - - case map[int]interface{}: - fastpathTV.EncMapIntIntfV(v, e) - case *map[int]interface{}: - fastpathTV.EncMapIntIntfV(*v, e) - - case map[int]string: - fastpathTV.EncMapIntStringV(v, e) - case *map[int]string: - fastpathTV.EncMapIntStringV(*v, e) - - case map[int]uint: - fastpathTV.EncMapIntUintV(v, e) - case *map[int]uint: - fastpathTV.EncMapIntUintV(*v, e) - - case map[int]uint8: - fastpathTV.EncMapIntUint8V(v, e) - case *map[int]uint8: - fastpathTV.EncMapIntUint8V(*v, e) - - case map[int]uint16: - fastpathTV.EncMapIntUint16V(v, e) - case *map[int]uint16: - fastpathTV.EncMapIntUint16V(*v, e) - - case map[int]uint32: - fastpathTV.EncMapIntUint32V(v, e) - case *map[int]uint32: - fastpathTV.EncMapIntUint32V(*v, e) - - case map[int]uint64: - fastpathTV.EncMapIntUint64V(v, e) - case *map[int]uint64: - fastpathTV.EncMapIntUint64V(*v, e) - - case map[int]uintptr: - fastpathTV.EncMapIntUintptrV(v, e) - case *map[int]uintptr: - fastpathTV.EncMapIntUintptrV(*v, e) - - case map[int]int: - fastpathTV.EncMapIntIntV(v, e) - case *map[int]int: - fastpathTV.EncMapIntIntV(*v, e) - - case map[int]int8: - fastpathTV.EncMapIntInt8V(v, e) - case *map[int]int8: - fastpathTV.EncMapIntInt8V(*v, e) - - case map[int]int16: - fastpathTV.EncMapIntInt16V(v, e) - case *map[int]int16: - fastpathTV.EncMapIntInt16V(*v, e) - - case map[int]int32: - fastpathTV.EncMapIntInt32V(v, e) - case *map[int]int32: - fastpathTV.EncMapIntInt32V(*v, e) - - case map[int]int64: - fastpathTV.EncMapIntInt64V(v, e) - case *map[int]int64: - fastpathTV.EncMapIntInt64V(*v, e) - - case map[int]float32: - fastpathTV.EncMapIntFloat32V(v, e) - case *map[int]float32: - fastpathTV.EncMapIntFloat32V(*v, e) - - case map[int]float64: - fastpathTV.EncMapIntFloat64V(v, e) - case *map[int]float64: - fastpathTV.EncMapIntFloat64V(*v, e) - - case map[int]bool: - fastpathTV.EncMapIntBoolV(v, e) - case *map[int]bool: - fastpathTV.EncMapIntBoolV(*v, e) - - case []int8: - fastpathTV.EncSliceInt8V(v, e) - case *[]int8: - fastpathTV.EncSliceInt8V(*v, e) - - case map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(v, e) - case *map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(*v, e) - - case map[int8]string: - fastpathTV.EncMapInt8StringV(v, e) - case *map[int8]string: - fastpathTV.EncMapInt8StringV(*v, e) - - case map[int8]uint: - fastpathTV.EncMapInt8UintV(v, e) - case *map[int8]uint: - fastpathTV.EncMapInt8UintV(*v, e) - - case map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(v, e) - case *map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(*v, e) - - case map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(v, e) - case *map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(*v, e) - - case map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(v, e) - case *map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(*v, e) - - case map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(v, e) - case *map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(*v, e) - - case map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(v, e) - case *map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(*v, e) - - case map[int8]int: - fastpathTV.EncMapInt8IntV(v, e) - case *map[int8]int: - fastpathTV.EncMapInt8IntV(*v, e) - - case map[int8]int8: - fastpathTV.EncMapInt8Int8V(v, e) - case *map[int8]int8: - fastpathTV.EncMapInt8Int8V(*v, e) - - case map[int8]int16: - fastpathTV.EncMapInt8Int16V(v, e) - case *map[int8]int16: - fastpathTV.EncMapInt8Int16V(*v, e) - - case map[int8]int32: - fastpathTV.EncMapInt8Int32V(v, e) - case *map[int8]int32: - fastpathTV.EncMapInt8Int32V(*v, e) - - case map[int8]int64: - fastpathTV.EncMapInt8Int64V(v, e) - case *map[int8]int64: - fastpathTV.EncMapInt8Int64V(*v, e) - - case map[int8]float32: - fastpathTV.EncMapInt8Float32V(v, e) - case *map[int8]float32: - fastpathTV.EncMapInt8Float32V(*v, e) - - case map[int8]float64: - fastpathTV.EncMapInt8Float64V(v, e) - case *map[int8]float64: - fastpathTV.EncMapInt8Float64V(*v, e) - - case map[int8]bool: - fastpathTV.EncMapInt8BoolV(v, e) - case *map[int8]bool: - fastpathTV.EncMapInt8BoolV(*v, e) - - case []int16: - fastpathTV.EncSliceInt16V(v, e) - case *[]int16: - fastpathTV.EncSliceInt16V(*v, e) - - case map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(v, e) - case *map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(*v, e) - - case map[int16]string: - fastpathTV.EncMapInt16StringV(v, e) - case *map[int16]string: - fastpathTV.EncMapInt16StringV(*v, e) - - case map[int16]uint: - fastpathTV.EncMapInt16UintV(v, e) - case *map[int16]uint: - fastpathTV.EncMapInt16UintV(*v, e) - - case map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(v, e) - case *map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(*v, e) - - case map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(v, e) - case *map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(*v, e) - - case map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(v, e) - case *map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(*v, e) - - case map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(v, e) - case *map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(*v, e) - - case map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(v, e) - case *map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(*v, e) - - case map[int16]int: - fastpathTV.EncMapInt16IntV(v, e) - case *map[int16]int: - fastpathTV.EncMapInt16IntV(*v, e) - - case map[int16]int8: - fastpathTV.EncMapInt16Int8V(v, e) - case *map[int16]int8: - fastpathTV.EncMapInt16Int8V(*v, e) - - case map[int16]int16: - fastpathTV.EncMapInt16Int16V(v, e) - case *map[int16]int16: - fastpathTV.EncMapInt16Int16V(*v, e) - - case map[int16]int32: - fastpathTV.EncMapInt16Int32V(v, e) - case *map[int16]int32: - fastpathTV.EncMapInt16Int32V(*v, e) - - case map[int16]int64: - fastpathTV.EncMapInt16Int64V(v, e) - case *map[int16]int64: - fastpathTV.EncMapInt16Int64V(*v, e) - - case map[int16]float32: - fastpathTV.EncMapInt16Float32V(v, e) - case *map[int16]float32: - fastpathTV.EncMapInt16Float32V(*v, e) - - case map[int16]float64: - fastpathTV.EncMapInt16Float64V(v, e) - case *map[int16]float64: - fastpathTV.EncMapInt16Float64V(*v, e) - - case map[int16]bool: - fastpathTV.EncMapInt16BoolV(v, e) - case *map[int16]bool: - fastpathTV.EncMapInt16BoolV(*v, e) - - case []int32: - fastpathTV.EncSliceInt32V(v, e) - case *[]int32: - fastpathTV.EncSliceInt32V(*v, e) - - case map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(v, e) - case *map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(*v, e) - - case map[int32]string: - fastpathTV.EncMapInt32StringV(v, e) - case *map[int32]string: - fastpathTV.EncMapInt32StringV(*v, e) - - case map[int32]uint: - fastpathTV.EncMapInt32UintV(v, e) - case *map[int32]uint: - fastpathTV.EncMapInt32UintV(*v, e) - - case map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(v, e) - case *map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(*v, e) - - case map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(v, e) - case *map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(*v, e) - - case map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(v, e) - case *map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(*v, e) - - case map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(v, e) - case *map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(*v, e) - - case map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(v, e) - case *map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(*v, e) - - case map[int32]int: - fastpathTV.EncMapInt32IntV(v, e) - case *map[int32]int: - fastpathTV.EncMapInt32IntV(*v, e) - - case map[int32]int8: - fastpathTV.EncMapInt32Int8V(v, e) - case *map[int32]int8: - fastpathTV.EncMapInt32Int8V(*v, e) - - case map[int32]int16: - fastpathTV.EncMapInt32Int16V(v, e) - case *map[int32]int16: - fastpathTV.EncMapInt32Int16V(*v, e) - - case map[int32]int32: - fastpathTV.EncMapInt32Int32V(v, e) - case *map[int32]int32: - fastpathTV.EncMapInt32Int32V(*v, e) - - case map[int32]int64: - fastpathTV.EncMapInt32Int64V(v, e) - case *map[int32]int64: - fastpathTV.EncMapInt32Int64V(*v, e) - - case map[int32]float32: - fastpathTV.EncMapInt32Float32V(v, e) - case *map[int32]float32: - fastpathTV.EncMapInt32Float32V(*v, e) - - case map[int32]float64: - fastpathTV.EncMapInt32Float64V(v, e) - case *map[int32]float64: - fastpathTV.EncMapInt32Float64V(*v, e) - - case map[int32]bool: - fastpathTV.EncMapInt32BoolV(v, e) - case *map[int32]bool: - fastpathTV.EncMapInt32BoolV(*v, e) - - case []int64: - fastpathTV.EncSliceInt64V(v, e) - case *[]int64: - fastpathTV.EncSliceInt64V(*v, e) - - case map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(v, e) - case *map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(*v, e) - - case map[int64]string: - fastpathTV.EncMapInt64StringV(v, e) - case *map[int64]string: - fastpathTV.EncMapInt64StringV(*v, e) - - case map[int64]uint: - fastpathTV.EncMapInt64UintV(v, e) - case *map[int64]uint: - fastpathTV.EncMapInt64UintV(*v, e) - - case map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(v, e) - case *map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(*v, e) - - case map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(v, e) - case *map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(*v, e) - - case map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(v, e) - case *map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(*v, e) - - case map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(v, e) - case *map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(*v, e) - - case map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(v, e) - case *map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(*v, e) - - case map[int64]int: - fastpathTV.EncMapInt64IntV(v, e) - case *map[int64]int: - fastpathTV.EncMapInt64IntV(*v, e) - - case map[int64]int8: - fastpathTV.EncMapInt64Int8V(v, e) - case *map[int64]int8: - fastpathTV.EncMapInt64Int8V(*v, e) - - case map[int64]int16: - fastpathTV.EncMapInt64Int16V(v, e) - case *map[int64]int16: - fastpathTV.EncMapInt64Int16V(*v, e) - - case map[int64]int32: - fastpathTV.EncMapInt64Int32V(v, e) - case *map[int64]int32: - fastpathTV.EncMapInt64Int32V(*v, e) - - case map[int64]int64: - fastpathTV.EncMapInt64Int64V(v, e) - case *map[int64]int64: - fastpathTV.EncMapInt64Int64V(*v, e) - - case map[int64]float32: - fastpathTV.EncMapInt64Float32V(v, e) - case *map[int64]float32: - fastpathTV.EncMapInt64Float32V(*v, e) - - case map[int64]float64: - fastpathTV.EncMapInt64Float64V(v, e) - case *map[int64]float64: - fastpathTV.EncMapInt64Float64V(*v, e) - - case map[int64]bool: - fastpathTV.EncMapInt64BoolV(v, e) - case *map[int64]bool: - fastpathTV.EncMapInt64BoolV(*v, e) - - case []bool: - fastpathTV.EncSliceBoolV(v, e) - case *[]bool: - fastpathTV.EncSliceBoolV(*v, e) - - case map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(v, e) - case *map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(*v, e) - - case map[bool]string: - fastpathTV.EncMapBoolStringV(v, e) - case *map[bool]string: - fastpathTV.EncMapBoolStringV(*v, e) - - case map[bool]uint: - fastpathTV.EncMapBoolUintV(v, e) - case *map[bool]uint: - fastpathTV.EncMapBoolUintV(*v, e) - - case map[bool]uint8: - fastpathTV.EncMapBoolUint8V(v, e) - case *map[bool]uint8: - fastpathTV.EncMapBoolUint8V(*v, e) - - case map[bool]uint16: - fastpathTV.EncMapBoolUint16V(v, e) - case *map[bool]uint16: - fastpathTV.EncMapBoolUint16V(*v, e) - - case map[bool]uint32: - fastpathTV.EncMapBoolUint32V(v, e) - case *map[bool]uint32: - fastpathTV.EncMapBoolUint32V(*v, e) - - case map[bool]uint64: - fastpathTV.EncMapBoolUint64V(v, e) - case *map[bool]uint64: - fastpathTV.EncMapBoolUint64V(*v, e) - - case map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(v, e) - case *map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(*v, e) - - case map[bool]int: - fastpathTV.EncMapBoolIntV(v, e) - case *map[bool]int: - fastpathTV.EncMapBoolIntV(*v, e) - - case map[bool]int8: - fastpathTV.EncMapBoolInt8V(v, e) - case *map[bool]int8: - fastpathTV.EncMapBoolInt8V(*v, e) - - case map[bool]int16: - fastpathTV.EncMapBoolInt16V(v, e) - case *map[bool]int16: - fastpathTV.EncMapBoolInt16V(*v, e) - - case map[bool]int32: - fastpathTV.EncMapBoolInt32V(v, e) - case *map[bool]int32: - fastpathTV.EncMapBoolInt32V(*v, e) - - case map[bool]int64: - fastpathTV.EncMapBoolInt64V(v, e) - case *map[bool]int64: - fastpathTV.EncMapBoolInt64V(*v, e) - - case map[bool]float32: - fastpathTV.EncMapBoolFloat32V(v, e) - case *map[bool]float32: - fastpathTV.EncMapBoolFloat32V(*v, e) - - case map[bool]float64: - fastpathTV.EncMapBoolFloat64V(v, e) - case *map[bool]float64: - fastpathTV.EncMapBoolFloat64V(*v, e) - - case map[bool]bool: - fastpathTV.EncMapBoolBoolV(v, e) - case *map[bool]bool: - fastpathTV.EncMapBoolBoolV(*v, e) - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions - -func (e *Encoder) fastpathEncSliceIntfR(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceIntfV(rv2i(rv).([]interface{}), e) - } else { - fastpathTV.EncSliceIntfV(rv2i(rv).([]interface{}), e) - } -} -func (_ fastpathT) EncSliceIntfV(v []interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - e.encode(v2) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - e.encode(v2) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceStringR(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceStringV(rv2i(rv).([]string), e) - } else { - fastpathTV.EncSliceStringV(rv2i(rv).([]string), e) - } -} -func (_ fastpathT) EncSliceStringV(v []string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeString(c_UTF8, v2) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceStringV(v []string, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeString(c_UTF8, v2) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceFloat32R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceFloat32V(rv2i(rv).([]float32), e) - } else { - fastpathTV.EncSliceFloat32V(rv2i(rv).([]float32), e) - } -} -func (_ fastpathT) EncSliceFloat32V(v []float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeFloat32(v2) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeFloat32(v2) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceFloat64R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceFloat64V(rv2i(rv).([]float64), e) - } else { - fastpathTV.EncSliceFloat64V(rv2i(rv).([]float64), e) - } -} -func (_ fastpathT) EncSliceFloat64V(v []float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeFloat64(v2) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeFloat64(v2) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceUintR(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUintV(rv2i(rv).([]uint), e) - } else { - fastpathTV.EncSliceUintV(rv2i(rv).([]uint), e) - } -} -func (_ fastpathT) EncSliceUintV(v []uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceUintV(v []uint, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceUint16R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint16V(rv2i(rv).([]uint16), e) - } else { - fastpathTV.EncSliceUint16V(rv2i(rv).([]uint16), e) - } -} -func (_ fastpathT) EncSliceUint16V(v []uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceUint32R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint32V(rv2i(rv).([]uint32), e) - } else { - fastpathTV.EncSliceUint32V(rv2i(rv).([]uint32), e) - } -} -func (_ fastpathT) EncSliceUint32V(v []uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceUint64R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint64V(rv2i(rv).([]uint64), e) - } else { - fastpathTV.EncSliceUint64V(rv2i(rv).([]uint64), e) - } -} -func (_ fastpathT) EncSliceUint64V(v []uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceUintptrR(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUintptrV(rv2i(rv).([]uintptr), e) - } else { - fastpathTV.EncSliceUintptrV(rv2i(rv).([]uintptr), e) - } -} -func (_ fastpathT) EncSliceUintptrV(v []uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - e.encode(v2) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - e.encode(v2) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceIntR(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceIntV(rv2i(rv).([]int), e) - } else { - fastpathTV.EncSliceIntV(rv2i(rv).([]int), e) - } -} -func (_ fastpathT) EncSliceIntV(v []int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeInt(int64(v2)) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceIntV(v []int, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeInt(int64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceInt8R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt8V(rv2i(rv).([]int8), e) - } else { - fastpathTV.EncSliceInt8V(rv2i(rv).([]int8), e) - } -} -func (_ fastpathT) EncSliceInt8V(v []int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeInt(int64(v2)) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceInt8V(v []int8, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeInt(int64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceInt16R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt16V(rv2i(rv).([]int16), e) - } else { - fastpathTV.EncSliceInt16V(rv2i(rv).([]int16), e) - } -} -func (_ fastpathT) EncSliceInt16V(v []int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeInt(int64(v2)) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceInt16V(v []int16, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeInt(int64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceInt32R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt32V(rv2i(rv).([]int32), e) - } else { - fastpathTV.EncSliceInt32V(rv2i(rv).([]int32), e) - } -} -func (_ fastpathT) EncSliceInt32V(v []int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeInt(int64(v2)) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceInt32V(v []int32, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeInt(int64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceInt64R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt64V(rv2i(rv).([]int64), e) - } else { - fastpathTV.EncSliceInt64V(rv2i(rv).([]int64), e) - } -} -func (_ fastpathT) EncSliceInt64V(v []int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeInt(int64(v2)) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceInt64V(v []int64, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeInt(int64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceBoolR(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceBoolV(rv2i(rv).([]bool), e) - } else { - fastpathTV.EncSliceBoolV(rv2i(rv).([]bool), e) - } -} -func (_ fastpathT) EncSliceBoolV(v []bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeBool(v2) - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) EncAsMapSliceBoolV(v []bool, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeBool(v2) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), e) -} -func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfStringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfStringV(rv2i(rv).(map[interface{}]string), e) -} -func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfUintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfUintV(rv2i(rv).(map[interface{}]uint), e) -} -func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), e) -} -func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), e) -} -func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), e) -} -func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), e) -} -func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), e) -} -func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfIntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfIntV(rv2i(rv).(map[interface{}]int), e) -} -func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfInt8V(rv2i(rv).(map[interface{}]int8), e) -} -func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfInt16V(rv2i(rv).(map[interface{}]int16), e) -} -func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfInt32V(rv2i(rv).(map[interface{}]int32), e) -} -func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfInt64V(rv2i(rv).(map[interface{}]int64), e) -} -func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), e) -} -func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), e) -} -func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfBoolV(rv2i(rv).(map[interface{}]bool), e) -} -func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringIntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) -} -func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - e.encode(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringStringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringStringV(rv2i(rv).(map[string]string), e) -} -func (_ fastpathT) EncMapStringStringV(v map[string]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[string(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringUintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringUintV(rv2i(rv).(map[string]uint), e) -} -func (_ fastpathT) EncMapStringUintV(v map[string]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringUint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) -} -func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringUint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringUint16V(rv2i(rv).(map[string]uint16), e) -} -func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringUint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringUint32V(rv2i(rv).(map[string]uint32), e) -} -func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringUint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) -} -func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringUintptrV(rv2i(rv).(map[string]uintptr), e) -} -func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - e.encode(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringIntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringIntV(rv2i(rv).(map[string]int), e) -} -func (_ fastpathT) EncMapStringIntV(v map[string]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringInt8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringInt8V(rv2i(rv).(map[string]int8), e) -} -func (_ fastpathT) EncMapStringInt8V(v map[string]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringInt16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringInt16V(rv2i(rv).(map[string]int16), e) -} -func (_ fastpathT) EncMapStringInt16V(v map[string]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringInt32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) -} -func (_ fastpathT) EncMapStringInt32V(v map[string]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringInt64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringInt64V(rv2i(rv).(map[string]int64), e) -} -func (_ fastpathT) EncMapStringInt64V(v map[string]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringFloat32V(rv2i(rv).(map[string]float32), e) -} -func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) -} -func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringBoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) -} -func (_ fastpathT) EncMapStringBoolV(v map[string]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), e) -} -func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32StringV(rv2i(rv).(map[float32]string), e) -} -func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32UintV(rv2i(rv).(map[float32]uint), e) -} -func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), e) -} -func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), e) -} -func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), e) -} -func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), e) -} -func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), e) -} -func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32IntV(rv2i(rv).(map[float32]int), e) -} -func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Int8V(rv2i(rv).(map[float32]int8), e) -} -func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Int16V(rv2i(rv).(map[float32]int16), e) -} -func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Int32V(rv2i(rv).(map[float32]int32), e) -} -func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Int64V(rv2i(rv).(map[float32]int64), e) -} -func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Float32V(rv2i(rv).(map[float32]float32), e) -} -func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Float64V(rv2i(rv).(map[float32]float64), e) -} -func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32BoolV(rv2i(rv).(map[float32]bool), e) -} -func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), e) -} -func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64StringV(rv2i(rv).(map[float64]string), e) -} -func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64UintV(rv2i(rv).(map[float64]uint), e) -} -func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), e) -} -func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), e) -} -func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), e) -} -func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), e) -} -func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), e) -} -func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64IntV(rv2i(rv).(map[float64]int), e) -} -func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Int8V(rv2i(rv).(map[float64]int8), e) -} -func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Int16V(rv2i(rv).(map[float64]int16), e) -} -func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Int32V(rv2i(rv).(map[float64]int32), e) -} -func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Int64V(rv2i(rv).(map[float64]int64), e) -} -func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Float32V(rv2i(rv).(map[float64]float32), e) -} -func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Float64V(rv2i(rv).(map[float64]float64), e) -} -func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64BoolV(rv2i(rv).(map[float64]bool), e) -} -func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintIntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintIntfV(rv2i(rv).(map[uint]interface{}), e) -} -func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintStringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintStringV(rv2i(rv).(map[uint]string), e) -} -func (_ fastpathT) EncMapUintStringV(v map[uint]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintUintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintUintV(rv2i(rv).(map[uint]uint), e) -} -func (_ fastpathT) EncMapUintUintV(v map[uint]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintUint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintUint8V(rv2i(rv).(map[uint]uint8), e) -} -func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintUint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintUint16V(rv2i(rv).(map[uint]uint16), e) -} -func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintUint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintUint32V(rv2i(rv).(map[uint]uint32), e) -} -func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintUint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintUint64V(rv2i(rv).(map[uint]uint64), e) -} -func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintUintptrV(rv2i(rv).(map[uint]uintptr), e) -} -func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintIntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintIntV(rv2i(rv).(map[uint]int), e) -} -func (_ fastpathT) EncMapUintIntV(v map[uint]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintInt8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintInt8V(rv2i(rv).(map[uint]int8), e) -} -func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintInt16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintInt16V(rv2i(rv).(map[uint]int16), e) -} -func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintInt32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintInt32V(rv2i(rv).(map[uint]int32), e) -} -func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintInt64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintInt64V(rv2i(rv).(map[uint]int64), e) -} -func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintFloat32V(rv2i(rv).(map[uint]float32), e) -} -func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintFloat64V(rv2i(rv).(map[uint]float64), e) -} -func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintBoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintBoolV(rv2i(rv).(map[uint]bool), e) -} -func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) -} -func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) -} -func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8UintV(rv2i(rv).(map[uint8]uint), e) -} -func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) -} -func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), e) -} -func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), e) -} -func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) -} -func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), e) -} -func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) -} -func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Int8V(rv2i(rv).(map[uint8]int8), e) -} -func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Int16V(rv2i(rv).(map[uint8]int16), e) -} -func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) -} -func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Int64V(rv2i(rv).(map[uint8]int64), e) -} -func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Float32V(rv2i(rv).(map[uint8]float32), e) -} -func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) -} -func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) -} -func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), e) -} -func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16StringV(rv2i(rv).(map[uint16]string), e) -} -func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16UintV(rv2i(rv).(map[uint16]uint), e) -} -func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), e) -} -func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), e) -} -func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), e) -} -func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), e) -} -func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), e) -} -func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16IntV(rv2i(rv).(map[uint16]int), e) -} -func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Int8V(rv2i(rv).(map[uint16]int8), e) -} -func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Int16V(rv2i(rv).(map[uint16]int16), e) -} -func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Int32V(rv2i(rv).(map[uint16]int32), e) -} -func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Int64V(rv2i(rv).(map[uint16]int64), e) -} -func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Float32V(rv2i(rv).(map[uint16]float32), e) -} -func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Float64V(rv2i(rv).(map[uint16]float64), e) -} -func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16BoolV(rv2i(rv).(map[uint16]bool), e) -} -func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), e) -} -func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32StringV(rv2i(rv).(map[uint32]string), e) -} -func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32UintV(rv2i(rv).(map[uint32]uint), e) -} -func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), e) -} -func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), e) -} -func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), e) -} -func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), e) -} -func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), e) -} -func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32IntV(rv2i(rv).(map[uint32]int), e) -} -func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Int8V(rv2i(rv).(map[uint32]int8), e) -} -func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Int16V(rv2i(rv).(map[uint32]int16), e) -} -func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Int32V(rv2i(rv).(map[uint32]int32), e) -} -func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Int64V(rv2i(rv).(map[uint32]int64), e) -} -func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Float32V(rv2i(rv).(map[uint32]float32), e) -} -func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Float64V(rv2i(rv).(map[uint32]float64), e) -} -func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32BoolV(rv2i(rv).(map[uint32]bool), e) -} -func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) -} -func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) -} -func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64UintV(rv2i(rv).(map[uint64]uint), e) -} -func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) -} -func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), e) -} -func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), e) -} -func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) -} -func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), e) -} -func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) -} -func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Int8V(rv2i(rv).(map[uint64]int8), e) -} -func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Int16V(rv2i(rv).(map[uint64]int16), e) -} -func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) -} -func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Int64V(rv2i(rv).(map[uint64]int64), e) -} -func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Float32V(rv2i(rv).(map[uint64]float32), e) -} -func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) -} -func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) -} -func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), e) -} -func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrStringV(rv2i(rv).(map[uintptr]string), e) -} -func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrUintV(rv2i(rv).(map[uintptr]uint), e) -} -func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), e) -} -func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), e) -} -func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), e) -} -func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), e) -} -func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), e) -} -func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrIntV(rv2i(rv).(map[uintptr]int), e) -} -func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), e) -} -func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), e) -} -func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), e) -} -func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), e) -} -func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), e) -} -func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), e) -} -func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), e) -} -func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntIntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) -} -func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntStringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntStringV(rv2i(rv).(map[int]string), e) -} -func (_ fastpathT) EncMapIntStringV(v map[int]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[int(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntUintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntUintV(rv2i(rv).(map[int]uint), e) -} -func (_ fastpathT) EncMapIntUintV(v map[int]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntUint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) -} -func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntUint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntUint16V(rv2i(rv).(map[int]uint16), e) -} -func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntUint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntUint32V(rv2i(rv).(map[int]uint32), e) -} -func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntUint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) -} -func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntUintptrV(rv2i(rv).(map[int]uintptr), e) -} -func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntIntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntIntV(rv2i(rv).(map[int]int), e) -} -func (_ fastpathT) EncMapIntIntV(v map[int]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntInt8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntInt8V(rv2i(rv).(map[int]int8), e) -} -func (_ fastpathT) EncMapIntInt8V(v map[int]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntInt16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntInt16V(rv2i(rv).(map[int]int16), e) -} -func (_ fastpathT) EncMapIntInt16V(v map[int]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntInt32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) -} -func (_ fastpathT) EncMapIntInt32V(v map[int]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntInt64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntInt64V(rv2i(rv).(map[int]int64), e) -} -func (_ fastpathT) EncMapIntInt64V(v map[int]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntFloat32V(rv2i(rv).(map[int]float32), e) -} -func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) -} -func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntBoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) -} -func (_ fastpathT) EncMapIntBoolV(v map[int]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8IntfV(rv2i(rv).(map[int8]interface{}), e) -} -func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8StringV(rv2i(rv).(map[int8]string), e) -} -func (_ fastpathT) EncMapInt8StringV(v map[int8]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8UintV(rv2i(rv).(map[int8]uint), e) -} -func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Uint8V(rv2i(rv).(map[int8]uint8), e) -} -func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Uint16V(rv2i(rv).(map[int8]uint16), e) -} -func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Uint32V(rv2i(rv).(map[int8]uint32), e) -} -func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Uint64V(rv2i(rv).(map[int8]uint64), e) -} -func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), e) -} -func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8IntV(rv2i(rv).(map[int8]int), e) -} -func (_ fastpathT) EncMapInt8IntV(v map[int8]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Int8V(rv2i(rv).(map[int8]int8), e) -} -func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Int16V(rv2i(rv).(map[int8]int16), e) -} -func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Int32V(rv2i(rv).(map[int8]int32), e) -} -func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Int64V(rv2i(rv).(map[int8]int64), e) -} -func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Float32V(rv2i(rv).(map[int8]float32), e) -} -func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Float64V(rv2i(rv).(map[int8]float64), e) -} -func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8BoolV(rv2i(rv).(map[int8]bool), e) -} -func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16IntfV(rv2i(rv).(map[int16]interface{}), e) -} -func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16StringV(rv2i(rv).(map[int16]string), e) -} -func (_ fastpathT) EncMapInt16StringV(v map[int16]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16UintV(rv2i(rv).(map[int16]uint), e) -} -func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Uint8V(rv2i(rv).(map[int16]uint8), e) -} -func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Uint16V(rv2i(rv).(map[int16]uint16), e) -} -func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Uint32V(rv2i(rv).(map[int16]uint32), e) -} -func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Uint64V(rv2i(rv).(map[int16]uint64), e) -} -func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), e) -} -func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16IntV(rv2i(rv).(map[int16]int), e) -} -func (_ fastpathT) EncMapInt16IntV(v map[int16]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Int8V(rv2i(rv).(map[int16]int8), e) -} -func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Int16V(rv2i(rv).(map[int16]int16), e) -} -func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Int32V(rv2i(rv).(map[int16]int32), e) -} -func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Int64V(rv2i(rv).(map[int16]int64), e) -} -func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Float32V(rv2i(rv).(map[int16]float32), e) -} -func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Float64V(rv2i(rv).(map[int16]float64), e) -} -func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16BoolV(rv2i(rv).(map[int16]bool), e) -} -func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) -} -func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) -} -func (_ fastpathT) EncMapInt32StringV(v map[int32]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32UintV(rv2i(rv).(map[int32]uint), e) -} -func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) -} -func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Uint16V(rv2i(rv).(map[int32]uint16), e) -} -func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Uint32V(rv2i(rv).(map[int32]uint32), e) -} -func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) -} -func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), e) -} -func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) -} -func (_ fastpathT) EncMapInt32IntV(v map[int32]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Int8V(rv2i(rv).(map[int32]int8), e) -} -func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Int16V(rv2i(rv).(map[int32]int16), e) -} -func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) -} -func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Int64V(rv2i(rv).(map[int32]int64), e) -} -func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Float32V(rv2i(rv).(map[int32]float32), e) -} -func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) -} -func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) -} -func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64IntfV(rv2i(rv).(map[int64]interface{}), e) -} -func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64StringV(rv2i(rv).(map[int64]string), e) -} -func (_ fastpathT) EncMapInt64StringV(v map[int64]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64UintV(rv2i(rv).(map[int64]uint), e) -} -func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Uint8V(rv2i(rv).(map[int64]uint8), e) -} -func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Uint16V(rv2i(rv).(map[int64]uint16), e) -} -func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Uint32V(rv2i(rv).(map[int64]uint32), e) -} -func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Uint64V(rv2i(rv).(map[int64]uint64), e) -} -func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), e) -} -func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64IntV(rv2i(rv).(map[int64]int), e) -} -func (_ fastpathT) EncMapInt64IntV(v map[int64]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Int8V(rv2i(rv).(map[int64]int8), e) -} -func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Int16V(rv2i(rv).(map[int64]int16), e) -} -func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Int32V(rv2i(rv).(map[int64]int32), e) -} -func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Int64V(rv2i(rv).(map[int64]int64), e) -} -func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Float32V(rv2i(rv).(map[int64]float32), e) -} -func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Float64V(rv2i(rv).(map[int64]float64), e) -} -func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64BoolV(rv2i(rv).(map[int64]bool), e) -} -func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolIntfV(rv2i(rv).(map[bool]interface{}), e) -} -func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolStringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolStringV(rv2i(rv).(map[bool]string), e) -} -func (_ fastpathT) EncMapBoolStringV(v map[bool]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeString(c_UTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolUintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolUintV(rv2i(rv).(map[bool]uint), e) -} -func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolUint8V(rv2i(rv).(map[bool]uint8), e) -} -func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolUint16V(rv2i(rv).(map[bool]uint16), e) -} -func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolUint32V(rv2i(rv).(map[bool]uint32), e) -} -func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolUint64V(rv2i(rv).(map[bool]uint64), e) -} -func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), e) -} -func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolIntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolIntV(rv2i(rv).(map[bool]int), e) -} -func (_ fastpathT) EncMapBoolIntV(v map[bool]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolInt8V(rv2i(rv).(map[bool]int8), e) -} -func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolInt16V(rv2i(rv).(map[bool]int16), e) -} -func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolInt32V(rv2i(rv).(map[bool]int32), e) -} -func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolInt64V(rv2i(rv).(map[bool]int64), e) -} -func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolFloat32V(rv2i(rv).(map[bool]float32), e) -} -func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolFloat64V(rv2i(rv).(map[bool]float64), e) -} -func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolBoolV(rv2i(rv).(map[bool]bool), e) -} -func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -// -- decode - -// -- -- fast path type switch -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - switch v := iv.(type) { - - case []interface{}: - fastpathTV.DecSliceIntfV(v, false, d) - case *[]interface{}: - if v2, changed2 := fastpathTV.DecSliceIntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]interface{}: - fastpathTV.DecMapIntfIntfV(v, false, d) - case *map[interface{}]interface{}: - if v2, changed2 := fastpathTV.DecMapIntfIntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]string: - fastpathTV.DecMapIntfStringV(v, false, d) - case *map[interface{}]string: - if v2, changed2 := fastpathTV.DecMapIntfStringV(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]uint: - fastpathTV.DecMapIntfUintV(v, false, d) - case *map[interface{}]uint: - if v2, changed2 := fastpathTV.DecMapIntfUintV(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]uint8: - fastpathTV.DecMapIntfUint8V(v, false, d) - case *map[interface{}]uint8: - if v2, changed2 := fastpathTV.DecMapIntfUint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]uint16: - fastpathTV.DecMapIntfUint16V(v, false, d) - case *map[interface{}]uint16: - if v2, changed2 := fastpathTV.DecMapIntfUint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]uint32: - fastpathTV.DecMapIntfUint32V(v, false, d) - case *map[interface{}]uint32: - if v2, changed2 := fastpathTV.DecMapIntfUint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]uint64: - fastpathTV.DecMapIntfUint64V(v, false, d) - case *map[interface{}]uint64: - if v2, changed2 := fastpathTV.DecMapIntfUint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]uintptr: - fastpathTV.DecMapIntfUintptrV(v, false, d) - case *map[interface{}]uintptr: - if v2, changed2 := fastpathTV.DecMapIntfUintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]int: - fastpathTV.DecMapIntfIntV(v, false, d) - case *map[interface{}]int: - if v2, changed2 := fastpathTV.DecMapIntfIntV(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]int8: - fastpathTV.DecMapIntfInt8V(v, false, d) - case *map[interface{}]int8: - if v2, changed2 := fastpathTV.DecMapIntfInt8V(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]int16: - fastpathTV.DecMapIntfInt16V(v, false, d) - case *map[interface{}]int16: - if v2, changed2 := fastpathTV.DecMapIntfInt16V(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]int32: - fastpathTV.DecMapIntfInt32V(v, false, d) - case *map[interface{}]int32: - if v2, changed2 := fastpathTV.DecMapIntfInt32V(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]int64: - fastpathTV.DecMapIntfInt64V(v, false, d) - case *map[interface{}]int64: - if v2, changed2 := fastpathTV.DecMapIntfInt64V(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]float32: - fastpathTV.DecMapIntfFloat32V(v, false, d) - case *map[interface{}]float32: - if v2, changed2 := fastpathTV.DecMapIntfFloat32V(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]float64: - fastpathTV.DecMapIntfFloat64V(v, false, d) - case *map[interface{}]float64: - if v2, changed2 := fastpathTV.DecMapIntfFloat64V(*v, true, d); changed2 { - *v = v2 - } - - case map[interface{}]bool: - fastpathTV.DecMapIntfBoolV(v, false, d) - case *map[interface{}]bool: - if v2, changed2 := fastpathTV.DecMapIntfBoolV(*v, true, d); changed2 { - *v = v2 - } - - case []string: - fastpathTV.DecSliceStringV(v, false, d) - case *[]string: - if v2, changed2 := fastpathTV.DecSliceStringV(*v, true, d); changed2 { - *v = v2 - } - - case map[string]interface{}: - fastpathTV.DecMapStringIntfV(v, false, d) - case *map[string]interface{}: - if v2, changed2 := fastpathTV.DecMapStringIntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[string]string: - fastpathTV.DecMapStringStringV(v, false, d) - case *map[string]string: - if v2, changed2 := fastpathTV.DecMapStringStringV(*v, true, d); changed2 { - *v = v2 - } - - case map[string]uint: - fastpathTV.DecMapStringUintV(v, false, d) - case *map[string]uint: - if v2, changed2 := fastpathTV.DecMapStringUintV(*v, true, d); changed2 { - *v = v2 - } - - case map[string]uint8: - fastpathTV.DecMapStringUint8V(v, false, d) - case *map[string]uint8: - if v2, changed2 := fastpathTV.DecMapStringUint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[string]uint16: - fastpathTV.DecMapStringUint16V(v, false, d) - case *map[string]uint16: - if v2, changed2 := fastpathTV.DecMapStringUint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[string]uint32: - fastpathTV.DecMapStringUint32V(v, false, d) - case *map[string]uint32: - if v2, changed2 := fastpathTV.DecMapStringUint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[string]uint64: - fastpathTV.DecMapStringUint64V(v, false, d) - case *map[string]uint64: - if v2, changed2 := fastpathTV.DecMapStringUint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[string]uintptr: - fastpathTV.DecMapStringUintptrV(v, false, d) - case *map[string]uintptr: - if v2, changed2 := fastpathTV.DecMapStringUintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[string]int: - fastpathTV.DecMapStringIntV(v, false, d) - case *map[string]int: - if v2, changed2 := fastpathTV.DecMapStringIntV(*v, true, d); changed2 { - *v = v2 - } - - case map[string]int8: - fastpathTV.DecMapStringInt8V(v, false, d) - case *map[string]int8: - if v2, changed2 := fastpathTV.DecMapStringInt8V(*v, true, d); changed2 { - *v = v2 - } - - case map[string]int16: - fastpathTV.DecMapStringInt16V(v, false, d) - case *map[string]int16: - if v2, changed2 := fastpathTV.DecMapStringInt16V(*v, true, d); changed2 { - *v = v2 - } - - case map[string]int32: - fastpathTV.DecMapStringInt32V(v, false, d) - case *map[string]int32: - if v2, changed2 := fastpathTV.DecMapStringInt32V(*v, true, d); changed2 { - *v = v2 - } - - case map[string]int64: - fastpathTV.DecMapStringInt64V(v, false, d) - case *map[string]int64: - if v2, changed2 := fastpathTV.DecMapStringInt64V(*v, true, d); changed2 { - *v = v2 - } - - case map[string]float32: - fastpathTV.DecMapStringFloat32V(v, false, d) - case *map[string]float32: - if v2, changed2 := fastpathTV.DecMapStringFloat32V(*v, true, d); changed2 { - *v = v2 - } - - case map[string]float64: - fastpathTV.DecMapStringFloat64V(v, false, d) - case *map[string]float64: - if v2, changed2 := fastpathTV.DecMapStringFloat64V(*v, true, d); changed2 { - *v = v2 - } - - case map[string]bool: - fastpathTV.DecMapStringBoolV(v, false, d) - case *map[string]bool: - if v2, changed2 := fastpathTV.DecMapStringBoolV(*v, true, d); changed2 { - *v = v2 - } - - case []float32: - fastpathTV.DecSliceFloat32V(v, false, d) - case *[]float32: - if v2, changed2 := fastpathTV.DecSliceFloat32V(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]interface{}: - fastpathTV.DecMapFloat32IntfV(v, false, d) - case *map[float32]interface{}: - if v2, changed2 := fastpathTV.DecMapFloat32IntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]string: - fastpathTV.DecMapFloat32StringV(v, false, d) - case *map[float32]string: - if v2, changed2 := fastpathTV.DecMapFloat32StringV(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]uint: - fastpathTV.DecMapFloat32UintV(v, false, d) - case *map[float32]uint: - if v2, changed2 := fastpathTV.DecMapFloat32UintV(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]uint8: - fastpathTV.DecMapFloat32Uint8V(v, false, d) - case *map[float32]uint8: - if v2, changed2 := fastpathTV.DecMapFloat32Uint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]uint16: - fastpathTV.DecMapFloat32Uint16V(v, false, d) - case *map[float32]uint16: - if v2, changed2 := fastpathTV.DecMapFloat32Uint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]uint32: - fastpathTV.DecMapFloat32Uint32V(v, false, d) - case *map[float32]uint32: - if v2, changed2 := fastpathTV.DecMapFloat32Uint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]uint64: - fastpathTV.DecMapFloat32Uint64V(v, false, d) - case *map[float32]uint64: - if v2, changed2 := fastpathTV.DecMapFloat32Uint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]uintptr: - fastpathTV.DecMapFloat32UintptrV(v, false, d) - case *map[float32]uintptr: - if v2, changed2 := fastpathTV.DecMapFloat32UintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]int: - fastpathTV.DecMapFloat32IntV(v, false, d) - case *map[float32]int: - if v2, changed2 := fastpathTV.DecMapFloat32IntV(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]int8: - fastpathTV.DecMapFloat32Int8V(v, false, d) - case *map[float32]int8: - if v2, changed2 := fastpathTV.DecMapFloat32Int8V(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]int16: - fastpathTV.DecMapFloat32Int16V(v, false, d) - case *map[float32]int16: - if v2, changed2 := fastpathTV.DecMapFloat32Int16V(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]int32: - fastpathTV.DecMapFloat32Int32V(v, false, d) - case *map[float32]int32: - if v2, changed2 := fastpathTV.DecMapFloat32Int32V(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]int64: - fastpathTV.DecMapFloat32Int64V(v, false, d) - case *map[float32]int64: - if v2, changed2 := fastpathTV.DecMapFloat32Int64V(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]float32: - fastpathTV.DecMapFloat32Float32V(v, false, d) - case *map[float32]float32: - if v2, changed2 := fastpathTV.DecMapFloat32Float32V(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]float64: - fastpathTV.DecMapFloat32Float64V(v, false, d) - case *map[float32]float64: - if v2, changed2 := fastpathTV.DecMapFloat32Float64V(*v, true, d); changed2 { - *v = v2 - } - - case map[float32]bool: - fastpathTV.DecMapFloat32BoolV(v, false, d) - case *map[float32]bool: - if v2, changed2 := fastpathTV.DecMapFloat32BoolV(*v, true, d); changed2 { - *v = v2 - } - - case []float64: - fastpathTV.DecSliceFloat64V(v, false, d) - case *[]float64: - if v2, changed2 := fastpathTV.DecSliceFloat64V(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]interface{}: - fastpathTV.DecMapFloat64IntfV(v, false, d) - case *map[float64]interface{}: - if v2, changed2 := fastpathTV.DecMapFloat64IntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]string: - fastpathTV.DecMapFloat64StringV(v, false, d) - case *map[float64]string: - if v2, changed2 := fastpathTV.DecMapFloat64StringV(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]uint: - fastpathTV.DecMapFloat64UintV(v, false, d) - case *map[float64]uint: - if v2, changed2 := fastpathTV.DecMapFloat64UintV(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]uint8: - fastpathTV.DecMapFloat64Uint8V(v, false, d) - case *map[float64]uint8: - if v2, changed2 := fastpathTV.DecMapFloat64Uint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]uint16: - fastpathTV.DecMapFloat64Uint16V(v, false, d) - case *map[float64]uint16: - if v2, changed2 := fastpathTV.DecMapFloat64Uint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]uint32: - fastpathTV.DecMapFloat64Uint32V(v, false, d) - case *map[float64]uint32: - if v2, changed2 := fastpathTV.DecMapFloat64Uint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]uint64: - fastpathTV.DecMapFloat64Uint64V(v, false, d) - case *map[float64]uint64: - if v2, changed2 := fastpathTV.DecMapFloat64Uint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]uintptr: - fastpathTV.DecMapFloat64UintptrV(v, false, d) - case *map[float64]uintptr: - if v2, changed2 := fastpathTV.DecMapFloat64UintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]int: - fastpathTV.DecMapFloat64IntV(v, false, d) - case *map[float64]int: - if v2, changed2 := fastpathTV.DecMapFloat64IntV(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]int8: - fastpathTV.DecMapFloat64Int8V(v, false, d) - case *map[float64]int8: - if v2, changed2 := fastpathTV.DecMapFloat64Int8V(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]int16: - fastpathTV.DecMapFloat64Int16V(v, false, d) - case *map[float64]int16: - if v2, changed2 := fastpathTV.DecMapFloat64Int16V(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]int32: - fastpathTV.DecMapFloat64Int32V(v, false, d) - case *map[float64]int32: - if v2, changed2 := fastpathTV.DecMapFloat64Int32V(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]int64: - fastpathTV.DecMapFloat64Int64V(v, false, d) - case *map[float64]int64: - if v2, changed2 := fastpathTV.DecMapFloat64Int64V(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]float32: - fastpathTV.DecMapFloat64Float32V(v, false, d) - case *map[float64]float32: - if v2, changed2 := fastpathTV.DecMapFloat64Float32V(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]float64: - fastpathTV.DecMapFloat64Float64V(v, false, d) - case *map[float64]float64: - if v2, changed2 := fastpathTV.DecMapFloat64Float64V(*v, true, d); changed2 { - *v = v2 - } - - case map[float64]bool: - fastpathTV.DecMapFloat64BoolV(v, false, d) - case *map[float64]bool: - if v2, changed2 := fastpathTV.DecMapFloat64BoolV(*v, true, d); changed2 { - *v = v2 - } - - case []uint: - fastpathTV.DecSliceUintV(v, false, d) - case *[]uint: - if v2, changed2 := fastpathTV.DecSliceUintV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]interface{}: - fastpathTV.DecMapUintIntfV(v, false, d) - case *map[uint]interface{}: - if v2, changed2 := fastpathTV.DecMapUintIntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]string: - fastpathTV.DecMapUintStringV(v, false, d) - case *map[uint]string: - if v2, changed2 := fastpathTV.DecMapUintStringV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]uint: - fastpathTV.DecMapUintUintV(v, false, d) - case *map[uint]uint: - if v2, changed2 := fastpathTV.DecMapUintUintV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]uint8: - fastpathTV.DecMapUintUint8V(v, false, d) - case *map[uint]uint8: - if v2, changed2 := fastpathTV.DecMapUintUint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]uint16: - fastpathTV.DecMapUintUint16V(v, false, d) - case *map[uint]uint16: - if v2, changed2 := fastpathTV.DecMapUintUint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]uint32: - fastpathTV.DecMapUintUint32V(v, false, d) - case *map[uint]uint32: - if v2, changed2 := fastpathTV.DecMapUintUint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]uint64: - fastpathTV.DecMapUintUint64V(v, false, d) - case *map[uint]uint64: - if v2, changed2 := fastpathTV.DecMapUintUint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]uintptr: - fastpathTV.DecMapUintUintptrV(v, false, d) - case *map[uint]uintptr: - if v2, changed2 := fastpathTV.DecMapUintUintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]int: - fastpathTV.DecMapUintIntV(v, false, d) - case *map[uint]int: - if v2, changed2 := fastpathTV.DecMapUintIntV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]int8: - fastpathTV.DecMapUintInt8V(v, false, d) - case *map[uint]int8: - if v2, changed2 := fastpathTV.DecMapUintInt8V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]int16: - fastpathTV.DecMapUintInt16V(v, false, d) - case *map[uint]int16: - if v2, changed2 := fastpathTV.DecMapUintInt16V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]int32: - fastpathTV.DecMapUintInt32V(v, false, d) - case *map[uint]int32: - if v2, changed2 := fastpathTV.DecMapUintInt32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]int64: - fastpathTV.DecMapUintInt64V(v, false, d) - case *map[uint]int64: - if v2, changed2 := fastpathTV.DecMapUintInt64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]float32: - fastpathTV.DecMapUintFloat32V(v, false, d) - case *map[uint]float32: - if v2, changed2 := fastpathTV.DecMapUintFloat32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]float64: - fastpathTV.DecMapUintFloat64V(v, false, d) - case *map[uint]float64: - if v2, changed2 := fastpathTV.DecMapUintFloat64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint]bool: - fastpathTV.DecMapUintBoolV(v, false, d) - case *map[uint]bool: - if v2, changed2 := fastpathTV.DecMapUintBoolV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]interface{}: - fastpathTV.DecMapUint8IntfV(v, false, d) - case *map[uint8]interface{}: - if v2, changed2 := fastpathTV.DecMapUint8IntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]string: - fastpathTV.DecMapUint8StringV(v, false, d) - case *map[uint8]string: - if v2, changed2 := fastpathTV.DecMapUint8StringV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]uint: - fastpathTV.DecMapUint8UintV(v, false, d) - case *map[uint8]uint: - if v2, changed2 := fastpathTV.DecMapUint8UintV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]uint8: - fastpathTV.DecMapUint8Uint8V(v, false, d) - case *map[uint8]uint8: - if v2, changed2 := fastpathTV.DecMapUint8Uint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]uint16: - fastpathTV.DecMapUint8Uint16V(v, false, d) - case *map[uint8]uint16: - if v2, changed2 := fastpathTV.DecMapUint8Uint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]uint32: - fastpathTV.DecMapUint8Uint32V(v, false, d) - case *map[uint8]uint32: - if v2, changed2 := fastpathTV.DecMapUint8Uint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]uint64: - fastpathTV.DecMapUint8Uint64V(v, false, d) - case *map[uint8]uint64: - if v2, changed2 := fastpathTV.DecMapUint8Uint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]uintptr: - fastpathTV.DecMapUint8UintptrV(v, false, d) - case *map[uint8]uintptr: - if v2, changed2 := fastpathTV.DecMapUint8UintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]int: - fastpathTV.DecMapUint8IntV(v, false, d) - case *map[uint8]int: - if v2, changed2 := fastpathTV.DecMapUint8IntV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]int8: - fastpathTV.DecMapUint8Int8V(v, false, d) - case *map[uint8]int8: - if v2, changed2 := fastpathTV.DecMapUint8Int8V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]int16: - fastpathTV.DecMapUint8Int16V(v, false, d) - case *map[uint8]int16: - if v2, changed2 := fastpathTV.DecMapUint8Int16V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]int32: - fastpathTV.DecMapUint8Int32V(v, false, d) - case *map[uint8]int32: - if v2, changed2 := fastpathTV.DecMapUint8Int32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]int64: - fastpathTV.DecMapUint8Int64V(v, false, d) - case *map[uint8]int64: - if v2, changed2 := fastpathTV.DecMapUint8Int64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]float32: - fastpathTV.DecMapUint8Float32V(v, false, d) - case *map[uint8]float32: - if v2, changed2 := fastpathTV.DecMapUint8Float32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]float64: - fastpathTV.DecMapUint8Float64V(v, false, d) - case *map[uint8]float64: - if v2, changed2 := fastpathTV.DecMapUint8Float64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint8]bool: - fastpathTV.DecMapUint8BoolV(v, false, d) - case *map[uint8]bool: - if v2, changed2 := fastpathTV.DecMapUint8BoolV(*v, true, d); changed2 { - *v = v2 - } - - case []uint16: - fastpathTV.DecSliceUint16V(v, false, d) - case *[]uint16: - if v2, changed2 := fastpathTV.DecSliceUint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]interface{}: - fastpathTV.DecMapUint16IntfV(v, false, d) - case *map[uint16]interface{}: - if v2, changed2 := fastpathTV.DecMapUint16IntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]string: - fastpathTV.DecMapUint16StringV(v, false, d) - case *map[uint16]string: - if v2, changed2 := fastpathTV.DecMapUint16StringV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]uint: - fastpathTV.DecMapUint16UintV(v, false, d) - case *map[uint16]uint: - if v2, changed2 := fastpathTV.DecMapUint16UintV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]uint8: - fastpathTV.DecMapUint16Uint8V(v, false, d) - case *map[uint16]uint8: - if v2, changed2 := fastpathTV.DecMapUint16Uint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]uint16: - fastpathTV.DecMapUint16Uint16V(v, false, d) - case *map[uint16]uint16: - if v2, changed2 := fastpathTV.DecMapUint16Uint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]uint32: - fastpathTV.DecMapUint16Uint32V(v, false, d) - case *map[uint16]uint32: - if v2, changed2 := fastpathTV.DecMapUint16Uint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]uint64: - fastpathTV.DecMapUint16Uint64V(v, false, d) - case *map[uint16]uint64: - if v2, changed2 := fastpathTV.DecMapUint16Uint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]uintptr: - fastpathTV.DecMapUint16UintptrV(v, false, d) - case *map[uint16]uintptr: - if v2, changed2 := fastpathTV.DecMapUint16UintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]int: - fastpathTV.DecMapUint16IntV(v, false, d) - case *map[uint16]int: - if v2, changed2 := fastpathTV.DecMapUint16IntV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]int8: - fastpathTV.DecMapUint16Int8V(v, false, d) - case *map[uint16]int8: - if v2, changed2 := fastpathTV.DecMapUint16Int8V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]int16: - fastpathTV.DecMapUint16Int16V(v, false, d) - case *map[uint16]int16: - if v2, changed2 := fastpathTV.DecMapUint16Int16V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]int32: - fastpathTV.DecMapUint16Int32V(v, false, d) - case *map[uint16]int32: - if v2, changed2 := fastpathTV.DecMapUint16Int32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]int64: - fastpathTV.DecMapUint16Int64V(v, false, d) - case *map[uint16]int64: - if v2, changed2 := fastpathTV.DecMapUint16Int64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]float32: - fastpathTV.DecMapUint16Float32V(v, false, d) - case *map[uint16]float32: - if v2, changed2 := fastpathTV.DecMapUint16Float32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]float64: - fastpathTV.DecMapUint16Float64V(v, false, d) - case *map[uint16]float64: - if v2, changed2 := fastpathTV.DecMapUint16Float64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint16]bool: - fastpathTV.DecMapUint16BoolV(v, false, d) - case *map[uint16]bool: - if v2, changed2 := fastpathTV.DecMapUint16BoolV(*v, true, d); changed2 { - *v = v2 - } - - case []uint32: - fastpathTV.DecSliceUint32V(v, false, d) - case *[]uint32: - if v2, changed2 := fastpathTV.DecSliceUint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]interface{}: - fastpathTV.DecMapUint32IntfV(v, false, d) - case *map[uint32]interface{}: - if v2, changed2 := fastpathTV.DecMapUint32IntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]string: - fastpathTV.DecMapUint32StringV(v, false, d) - case *map[uint32]string: - if v2, changed2 := fastpathTV.DecMapUint32StringV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]uint: - fastpathTV.DecMapUint32UintV(v, false, d) - case *map[uint32]uint: - if v2, changed2 := fastpathTV.DecMapUint32UintV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]uint8: - fastpathTV.DecMapUint32Uint8V(v, false, d) - case *map[uint32]uint8: - if v2, changed2 := fastpathTV.DecMapUint32Uint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]uint16: - fastpathTV.DecMapUint32Uint16V(v, false, d) - case *map[uint32]uint16: - if v2, changed2 := fastpathTV.DecMapUint32Uint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]uint32: - fastpathTV.DecMapUint32Uint32V(v, false, d) - case *map[uint32]uint32: - if v2, changed2 := fastpathTV.DecMapUint32Uint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]uint64: - fastpathTV.DecMapUint32Uint64V(v, false, d) - case *map[uint32]uint64: - if v2, changed2 := fastpathTV.DecMapUint32Uint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]uintptr: - fastpathTV.DecMapUint32UintptrV(v, false, d) - case *map[uint32]uintptr: - if v2, changed2 := fastpathTV.DecMapUint32UintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]int: - fastpathTV.DecMapUint32IntV(v, false, d) - case *map[uint32]int: - if v2, changed2 := fastpathTV.DecMapUint32IntV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]int8: - fastpathTV.DecMapUint32Int8V(v, false, d) - case *map[uint32]int8: - if v2, changed2 := fastpathTV.DecMapUint32Int8V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]int16: - fastpathTV.DecMapUint32Int16V(v, false, d) - case *map[uint32]int16: - if v2, changed2 := fastpathTV.DecMapUint32Int16V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]int32: - fastpathTV.DecMapUint32Int32V(v, false, d) - case *map[uint32]int32: - if v2, changed2 := fastpathTV.DecMapUint32Int32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]int64: - fastpathTV.DecMapUint32Int64V(v, false, d) - case *map[uint32]int64: - if v2, changed2 := fastpathTV.DecMapUint32Int64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]float32: - fastpathTV.DecMapUint32Float32V(v, false, d) - case *map[uint32]float32: - if v2, changed2 := fastpathTV.DecMapUint32Float32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]float64: - fastpathTV.DecMapUint32Float64V(v, false, d) - case *map[uint32]float64: - if v2, changed2 := fastpathTV.DecMapUint32Float64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint32]bool: - fastpathTV.DecMapUint32BoolV(v, false, d) - case *map[uint32]bool: - if v2, changed2 := fastpathTV.DecMapUint32BoolV(*v, true, d); changed2 { - *v = v2 - } - - case []uint64: - fastpathTV.DecSliceUint64V(v, false, d) - case *[]uint64: - if v2, changed2 := fastpathTV.DecSliceUint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]interface{}: - fastpathTV.DecMapUint64IntfV(v, false, d) - case *map[uint64]interface{}: - if v2, changed2 := fastpathTV.DecMapUint64IntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]string: - fastpathTV.DecMapUint64StringV(v, false, d) - case *map[uint64]string: - if v2, changed2 := fastpathTV.DecMapUint64StringV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]uint: - fastpathTV.DecMapUint64UintV(v, false, d) - case *map[uint64]uint: - if v2, changed2 := fastpathTV.DecMapUint64UintV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]uint8: - fastpathTV.DecMapUint64Uint8V(v, false, d) - case *map[uint64]uint8: - if v2, changed2 := fastpathTV.DecMapUint64Uint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]uint16: - fastpathTV.DecMapUint64Uint16V(v, false, d) - case *map[uint64]uint16: - if v2, changed2 := fastpathTV.DecMapUint64Uint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]uint32: - fastpathTV.DecMapUint64Uint32V(v, false, d) - case *map[uint64]uint32: - if v2, changed2 := fastpathTV.DecMapUint64Uint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]uint64: - fastpathTV.DecMapUint64Uint64V(v, false, d) - case *map[uint64]uint64: - if v2, changed2 := fastpathTV.DecMapUint64Uint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]uintptr: - fastpathTV.DecMapUint64UintptrV(v, false, d) - case *map[uint64]uintptr: - if v2, changed2 := fastpathTV.DecMapUint64UintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]int: - fastpathTV.DecMapUint64IntV(v, false, d) - case *map[uint64]int: - if v2, changed2 := fastpathTV.DecMapUint64IntV(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]int8: - fastpathTV.DecMapUint64Int8V(v, false, d) - case *map[uint64]int8: - if v2, changed2 := fastpathTV.DecMapUint64Int8V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]int16: - fastpathTV.DecMapUint64Int16V(v, false, d) - case *map[uint64]int16: - if v2, changed2 := fastpathTV.DecMapUint64Int16V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]int32: - fastpathTV.DecMapUint64Int32V(v, false, d) - case *map[uint64]int32: - if v2, changed2 := fastpathTV.DecMapUint64Int32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]int64: - fastpathTV.DecMapUint64Int64V(v, false, d) - case *map[uint64]int64: - if v2, changed2 := fastpathTV.DecMapUint64Int64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]float32: - fastpathTV.DecMapUint64Float32V(v, false, d) - case *map[uint64]float32: - if v2, changed2 := fastpathTV.DecMapUint64Float32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]float64: - fastpathTV.DecMapUint64Float64V(v, false, d) - case *map[uint64]float64: - if v2, changed2 := fastpathTV.DecMapUint64Float64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uint64]bool: - fastpathTV.DecMapUint64BoolV(v, false, d) - case *map[uint64]bool: - if v2, changed2 := fastpathTV.DecMapUint64BoolV(*v, true, d); changed2 { - *v = v2 - } - - case []uintptr: - fastpathTV.DecSliceUintptrV(v, false, d) - case *[]uintptr: - if v2, changed2 := fastpathTV.DecSliceUintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]interface{}: - fastpathTV.DecMapUintptrIntfV(v, false, d) - case *map[uintptr]interface{}: - if v2, changed2 := fastpathTV.DecMapUintptrIntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]string: - fastpathTV.DecMapUintptrStringV(v, false, d) - case *map[uintptr]string: - if v2, changed2 := fastpathTV.DecMapUintptrStringV(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]uint: - fastpathTV.DecMapUintptrUintV(v, false, d) - case *map[uintptr]uint: - if v2, changed2 := fastpathTV.DecMapUintptrUintV(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]uint8: - fastpathTV.DecMapUintptrUint8V(v, false, d) - case *map[uintptr]uint8: - if v2, changed2 := fastpathTV.DecMapUintptrUint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]uint16: - fastpathTV.DecMapUintptrUint16V(v, false, d) - case *map[uintptr]uint16: - if v2, changed2 := fastpathTV.DecMapUintptrUint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]uint32: - fastpathTV.DecMapUintptrUint32V(v, false, d) - case *map[uintptr]uint32: - if v2, changed2 := fastpathTV.DecMapUintptrUint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]uint64: - fastpathTV.DecMapUintptrUint64V(v, false, d) - case *map[uintptr]uint64: - if v2, changed2 := fastpathTV.DecMapUintptrUint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]uintptr: - fastpathTV.DecMapUintptrUintptrV(v, false, d) - case *map[uintptr]uintptr: - if v2, changed2 := fastpathTV.DecMapUintptrUintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]int: - fastpathTV.DecMapUintptrIntV(v, false, d) - case *map[uintptr]int: - if v2, changed2 := fastpathTV.DecMapUintptrIntV(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]int8: - fastpathTV.DecMapUintptrInt8V(v, false, d) - case *map[uintptr]int8: - if v2, changed2 := fastpathTV.DecMapUintptrInt8V(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]int16: - fastpathTV.DecMapUintptrInt16V(v, false, d) - case *map[uintptr]int16: - if v2, changed2 := fastpathTV.DecMapUintptrInt16V(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]int32: - fastpathTV.DecMapUintptrInt32V(v, false, d) - case *map[uintptr]int32: - if v2, changed2 := fastpathTV.DecMapUintptrInt32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]int64: - fastpathTV.DecMapUintptrInt64V(v, false, d) - case *map[uintptr]int64: - if v2, changed2 := fastpathTV.DecMapUintptrInt64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]float32: - fastpathTV.DecMapUintptrFloat32V(v, false, d) - case *map[uintptr]float32: - if v2, changed2 := fastpathTV.DecMapUintptrFloat32V(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]float64: - fastpathTV.DecMapUintptrFloat64V(v, false, d) - case *map[uintptr]float64: - if v2, changed2 := fastpathTV.DecMapUintptrFloat64V(*v, true, d); changed2 { - *v = v2 - } - - case map[uintptr]bool: - fastpathTV.DecMapUintptrBoolV(v, false, d) - case *map[uintptr]bool: - if v2, changed2 := fastpathTV.DecMapUintptrBoolV(*v, true, d); changed2 { - *v = v2 - } - - case []int: - fastpathTV.DecSliceIntV(v, false, d) - case *[]int: - if v2, changed2 := fastpathTV.DecSliceIntV(*v, true, d); changed2 { - *v = v2 - } - - case map[int]interface{}: - fastpathTV.DecMapIntIntfV(v, false, d) - case *map[int]interface{}: - if v2, changed2 := fastpathTV.DecMapIntIntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[int]string: - fastpathTV.DecMapIntStringV(v, false, d) - case *map[int]string: - if v2, changed2 := fastpathTV.DecMapIntStringV(*v, true, d); changed2 { - *v = v2 - } - - case map[int]uint: - fastpathTV.DecMapIntUintV(v, false, d) - case *map[int]uint: - if v2, changed2 := fastpathTV.DecMapIntUintV(*v, true, d); changed2 { - *v = v2 - } - - case map[int]uint8: - fastpathTV.DecMapIntUint8V(v, false, d) - case *map[int]uint8: - if v2, changed2 := fastpathTV.DecMapIntUint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[int]uint16: - fastpathTV.DecMapIntUint16V(v, false, d) - case *map[int]uint16: - if v2, changed2 := fastpathTV.DecMapIntUint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[int]uint32: - fastpathTV.DecMapIntUint32V(v, false, d) - case *map[int]uint32: - if v2, changed2 := fastpathTV.DecMapIntUint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int]uint64: - fastpathTV.DecMapIntUint64V(v, false, d) - case *map[int]uint64: - if v2, changed2 := fastpathTV.DecMapIntUint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int]uintptr: - fastpathTV.DecMapIntUintptrV(v, false, d) - case *map[int]uintptr: - if v2, changed2 := fastpathTV.DecMapIntUintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[int]int: - fastpathTV.DecMapIntIntV(v, false, d) - case *map[int]int: - if v2, changed2 := fastpathTV.DecMapIntIntV(*v, true, d); changed2 { - *v = v2 - } - - case map[int]int8: - fastpathTV.DecMapIntInt8V(v, false, d) - case *map[int]int8: - if v2, changed2 := fastpathTV.DecMapIntInt8V(*v, true, d); changed2 { - *v = v2 - } - - case map[int]int16: - fastpathTV.DecMapIntInt16V(v, false, d) - case *map[int]int16: - if v2, changed2 := fastpathTV.DecMapIntInt16V(*v, true, d); changed2 { - *v = v2 - } - - case map[int]int32: - fastpathTV.DecMapIntInt32V(v, false, d) - case *map[int]int32: - if v2, changed2 := fastpathTV.DecMapIntInt32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int]int64: - fastpathTV.DecMapIntInt64V(v, false, d) - case *map[int]int64: - if v2, changed2 := fastpathTV.DecMapIntInt64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int]float32: - fastpathTV.DecMapIntFloat32V(v, false, d) - case *map[int]float32: - if v2, changed2 := fastpathTV.DecMapIntFloat32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int]float64: - fastpathTV.DecMapIntFloat64V(v, false, d) - case *map[int]float64: - if v2, changed2 := fastpathTV.DecMapIntFloat64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int]bool: - fastpathTV.DecMapIntBoolV(v, false, d) - case *map[int]bool: - if v2, changed2 := fastpathTV.DecMapIntBoolV(*v, true, d); changed2 { - *v = v2 - } - - case []int8: - fastpathTV.DecSliceInt8V(v, false, d) - case *[]int8: - if v2, changed2 := fastpathTV.DecSliceInt8V(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]interface{}: - fastpathTV.DecMapInt8IntfV(v, false, d) - case *map[int8]interface{}: - if v2, changed2 := fastpathTV.DecMapInt8IntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]string: - fastpathTV.DecMapInt8StringV(v, false, d) - case *map[int8]string: - if v2, changed2 := fastpathTV.DecMapInt8StringV(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]uint: - fastpathTV.DecMapInt8UintV(v, false, d) - case *map[int8]uint: - if v2, changed2 := fastpathTV.DecMapInt8UintV(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]uint8: - fastpathTV.DecMapInt8Uint8V(v, false, d) - case *map[int8]uint8: - if v2, changed2 := fastpathTV.DecMapInt8Uint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]uint16: - fastpathTV.DecMapInt8Uint16V(v, false, d) - case *map[int8]uint16: - if v2, changed2 := fastpathTV.DecMapInt8Uint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]uint32: - fastpathTV.DecMapInt8Uint32V(v, false, d) - case *map[int8]uint32: - if v2, changed2 := fastpathTV.DecMapInt8Uint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]uint64: - fastpathTV.DecMapInt8Uint64V(v, false, d) - case *map[int8]uint64: - if v2, changed2 := fastpathTV.DecMapInt8Uint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]uintptr: - fastpathTV.DecMapInt8UintptrV(v, false, d) - case *map[int8]uintptr: - if v2, changed2 := fastpathTV.DecMapInt8UintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]int: - fastpathTV.DecMapInt8IntV(v, false, d) - case *map[int8]int: - if v2, changed2 := fastpathTV.DecMapInt8IntV(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]int8: - fastpathTV.DecMapInt8Int8V(v, false, d) - case *map[int8]int8: - if v2, changed2 := fastpathTV.DecMapInt8Int8V(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]int16: - fastpathTV.DecMapInt8Int16V(v, false, d) - case *map[int8]int16: - if v2, changed2 := fastpathTV.DecMapInt8Int16V(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]int32: - fastpathTV.DecMapInt8Int32V(v, false, d) - case *map[int8]int32: - if v2, changed2 := fastpathTV.DecMapInt8Int32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]int64: - fastpathTV.DecMapInt8Int64V(v, false, d) - case *map[int8]int64: - if v2, changed2 := fastpathTV.DecMapInt8Int64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]float32: - fastpathTV.DecMapInt8Float32V(v, false, d) - case *map[int8]float32: - if v2, changed2 := fastpathTV.DecMapInt8Float32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]float64: - fastpathTV.DecMapInt8Float64V(v, false, d) - case *map[int8]float64: - if v2, changed2 := fastpathTV.DecMapInt8Float64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int8]bool: - fastpathTV.DecMapInt8BoolV(v, false, d) - case *map[int8]bool: - if v2, changed2 := fastpathTV.DecMapInt8BoolV(*v, true, d); changed2 { - *v = v2 - } - - case []int16: - fastpathTV.DecSliceInt16V(v, false, d) - case *[]int16: - if v2, changed2 := fastpathTV.DecSliceInt16V(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]interface{}: - fastpathTV.DecMapInt16IntfV(v, false, d) - case *map[int16]interface{}: - if v2, changed2 := fastpathTV.DecMapInt16IntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]string: - fastpathTV.DecMapInt16StringV(v, false, d) - case *map[int16]string: - if v2, changed2 := fastpathTV.DecMapInt16StringV(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]uint: - fastpathTV.DecMapInt16UintV(v, false, d) - case *map[int16]uint: - if v2, changed2 := fastpathTV.DecMapInt16UintV(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]uint8: - fastpathTV.DecMapInt16Uint8V(v, false, d) - case *map[int16]uint8: - if v2, changed2 := fastpathTV.DecMapInt16Uint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]uint16: - fastpathTV.DecMapInt16Uint16V(v, false, d) - case *map[int16]uint16: - if v2, changed2 := fastpathTV.DecMapInt16Uint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]uint32: - fastpathTV.DecMapInt16Uint32V(v, false, d) - case *map[int16]uint32: - if v2, changed2 := fastpathTV.DecMapInt16Uint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]uint64: - fastpathTV.DecMapInt16Uint64V(v, false, d) - case *map[int16]uint64: - if v2, changed2 := fastpathTV.DecMapInt16Uint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]uintptr: - fastpathTV.DecMapInt16UintptrV(v, false, d) - case *map[int16]uintptr: - if v2, changed2 := fastpathTV.DecMapInt16UintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]int: - fastpathTV.DecMapInt16IntV(v, false, d) - case *map[int16]int: - if v2, changed2 := fastpathTV.DecMapInt16IntV(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]int8: - fastpathTV.DecMapInt16Int8V(v, false, d) - case *map[int16]int8: - if v2, changed2 := fastpathTV.DecMapInt16Int8V(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]int16: - fastpathTV.DecMapInt16Int16V(v, false, d) - case *map[int16]int16: - if v2, changed2 := fastpathTV.DecMapInt16Int16V(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]int32: - fastpathTV.DecMapInt16Int32V(v, false, d) - case *map[int16]int32: - if v2, changed2 := fastpathTV.DecMapInt16Int32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]int64: - fastpathTV.DecMapInt16Int64V(v, false, d) - case *map[int16]int64: - if v2, changed2 := fastpathTV.DecMapInt16Int64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]float32: - fastpathTV.DecMapInt16Float32V(v, false, d) - case *map[int16]float32: - if v2, changed2 := fastpathTV.DecMapInt16Float32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]float64: - fastpathTV.DecMapInt16Float64V(v, false, d) - case *map[int16]float64: - if v2, changed2 := fastpathTV.DecMapInt16Float64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int16]bool: - fastpathTV.DecMapInt16BoolV(v, false, d) - case *map[int16]bool: - if v2, changed2 := fastpathTV.DecMapInt16BoolV(*v, true, d); changed2 { - *v = v2 - } - - case []int32: - fastpathTV.DecSliceInt32V(v, false, d) - case *[]int32: - if v2, changed2 := fastpathTV.DecSliceInt32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]interface{}: - fastpathTV.DecMapInt32IntfV(v, false, d) - case *map[int32]interface{}: - if v2, changed2 := fastpathTV.DecMapInt32IntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]string: - fastpathTV.DecMapInt32StringV(v, false, d) - case *map[int32]string: - if v2, changed2 := fastpathTV.DecMapInt32StringV(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]uint: - fastpathTV.DecMapInt32UintV(v, false, d) - case *map[int32]uint: - if v2, changed2 := fastpathTV.DecMapInt32UintV(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]uint8: - fastpathTV.DecMapInt32Uint8V(v, false, d) - case *map[int32]uint8: - if v2, changed2 := fastpathTV.DecMapInt32Uint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]uint16: - fastpathTV.DecMapInt32Uint16V(v, false, d) - case *map[int32]uint16: - if v2, changed2 := fastpathTV.DecMapInt32Uint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]uint32: - fastpathTV.DecMapInt32Uint32V(v, false, d) - case *map[int32]uint32: - if v2, changed2 := fastpathTV.DecMapInt32Uint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]uint64: - fastpathTV.DecMapInt32Uint64V(v, false, d) - case *map[int32]uint64: - if v2, changed2 := fastpathTV.DecMapInt32Uint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]uintptr: - fastpathTV.DecMapInt32UintptrV(v, false, d) - case *map[int32]uintptr: - if v2, changed2 := fastpathTV.DecMapInt32UintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]int: - fastpathTV.DecMapInt32IntV(v, false, d) - case *map[int32]int: - if v2, changed2 := fastpathTV.DecMapInt32IntV(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]int8: - fastpathTV.DecMapInt32Int8V(v, false, d) - case *map[int32]int8: - if v2, changed2 := fastpathTV.DecMapInt32Int8V(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]int16: - fastpathTV.DecMapInt32Int16V(v, false, d) - case *map[int32]int16: - if v2, changed2 := fastpathTV.DecMapInt32Int16V(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]int32: - fastpathTV.DecMapInt32Int32V(v, false, d) - case *map[int32]int32: - if v2, changed2 := fastpathTV.DecMapInt32Int32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]int64: - fastpathTV.DecMapInt32Int64V(v, false, d) - case *map[int32]int64: - if v2, changed2 := fastpathTV.DecMapInt32Int64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]float32: - fastpathTV.DecMapInt32Float32V(v, false, d) - case *map[int32]float32: - if v2, changed2 := fastpathTV.DecMapInt32Float32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]float64: - fastpathTV.DecMapInt32Float64V(v, false, d) - case *map[int32]float64: - if v2, changed2 := fastpathTV.DecMapInt32Float64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int32]bool: - fastpathTV.DecMapInt32BoolV(v, false, d) - case *map[int32]bool: - if v2, changed2 := fastpathTV.DecMapInt32BoolV(*v, true, d); changed2 { - *v = v2 - } - - case []int64: - fastpathTV.DecSliceInt64V(v, false, d) - case *[]int64: - if v2, changed2 := fastpathTV.DecSliceInt64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]interface{}: - fastpathTV.DecMapInt64IntfV(v, false, d) - case *map[int64]interface{}: - if v2, changed2 := fastpathTV.DecMapInt64IntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]string: - fastpathTV.DecMapInt64StringV(v, false, d) - case *map[int64]string: - if v2, changed2 := fastpathTV.DecMapInt64StringV(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]uint: - fastpathTV.DecMapInt64UintV(v, false, d) - case *map[int64]uint: - if v2, changed2 := fastpathTV.DecMapInt64UintV(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]uint8: - fastpathTV.DecMapInt64Uint8V(v, false, d) - case *map[int64]uint8: - if v2, changed2 := fastpathTV.DecMapInt64Uint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]uint16: - fastpathTV.DecMapInt64Uint16V(v, false, d) - case *map[int64]uint16: - if v2, changed2 := fastpathTV.DecMapInt64Uint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]uint32: - fastpathTV.DecMapInt64Uint32V(v, false, d) - case *map[int64]uint32: - if v2, changed2 := fastpathTV.DecMapInt64Uint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]uint64: - fastpathTV.DecMapInt64Uint64V(v, false, d) - case *map[int64]uint64: - if v2, changed2 := fastpathTV.DecMapInt64Uint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]uintptr: - fastpathTV.DecMapInt64UintptrV(v, false, d) - case *map[int64]uintptr: - if v2, changed2 := fastpathTV.DecMapInt64UintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]int: - fastpathTV.DecMapInt64IntV(v, false, d) - case *map[int64]int: - if v2, changed2 := fastpathTV.DecMapInt64IntV(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]int8: - fastpathTV.DecMapInt64Int8V(v, false, d) - case *map[int64]int8: - if v2, changed2 := fastpathTV.DecMapInt64Int8V(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]int16: - fastpathTV.DecMapInt64Int16V(v, false, d) - case *map[int64]int16: - if v2, changed2 := fastpathTV.DecMapInt64Int16V(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]int32: - fastpathTV.DecMapInt64Int32V(v, false, d) - case *map[int64]int32: - if v2, changed2 := fastpathTV.DecMapInt64Int32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]int64: - fastpathTV.DecMapInt64Int64V(v, false, d) - case *map[int64]int64: - if v2, changed2 := fastpathTV.DecMapInt64Int64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]float32: - fastpathTV.DecMapInt64Float32V(v, false, d) - case *map[int64]float32: - if v2, changed2 := fastpathTV.DecMapInt64Float32V(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]float64: - fastpathTV.DecMapInt64Float64V(v, false, d) - case *map[int64]float64: - if v2, changed2 := fastpathTV.DecMapInt64Float64V(*v, true, d); changed2 { - *v = v2 - } - - case map[int64]bool: - fastpathTV.DecMapInt64BoolV(v, false, d) - case *map[int64]bool: - if v2, changed2 := fastpathTV.DecMapInt64BoolV(*v, true, d); changed2 { - *v = v2 - } - - case []bool: - fastpathTV.DecSliceBoolV(v, false, d) - case *[]bool: - if v2, changed2 := fastpathTV.DecSliceBoolV(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]interface{}: - fastpathTV.DecMapBoolIntfV(v, false, d) - case *map[bool]interface{}: - if v2, changed2 := fastpathTV.DecMapBoolIntfV(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]string: - fastpathTV.DecMapBoolStringV(v, false, d) - case *map[bool]string: - if v2, changed2 := fastpathTV.DecMapBoolStringV(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]uint: - fastpathTV.DecMapBoolUintV(v, false, d) - case *map[bool]uint: - if v2, changed2 := fastpathTV.DecMapBoolUintV(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]uint8: - fastpathTV.DecMapBoolUint8V(v, false, d) - case *map[bool]uint8: - if v2, changed2 := fastpathTV.DecMapBoolUint8V(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]uint16: - fastpathTV.DecMapBoolUint16V(v, false, d) - case *map[bool]uint16: - if v2, changed2 := fastpathTV.DecMapBoolUint16V(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]uint32: - fastpathTV.DecMapBoolUint32V(v, false, d) - case *map[bool]uint32: - if v2, changed2 := fastpathTV.DecMapBoolUint32V(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]uint64: - fastpathTV.DecMapBoolUint64V(v, false, d) - case *map[bool]uint64: - if v2, changed2 := fastpathTV.DecMapBoolUint64V(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]uintptr: - fastpathTV.DecMapBoolUintptrV(v, false, d) - case *map[bool]uintptr: - if v2, changed2 := fastpathTV.DecMapBoolUintptrV(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]int: - fastpathTV.DecMapBoolIntV(v, false, d) - case *map[bool]int: - if v2, changed2 := fastpathTV.DecMapBoolIntV(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]int8: - fastpathTV.DecMapBoolInt8V(v, false, d) - case *map[bool]int8: - if v2, changed2 := fastpathTV.DecMapBoolInt8V(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]int16: - fastpathTV.DecMapBoolInt16V(v, false, d) - case *map[bool]int16: - if v2, changed2 := fastpathTV.DecMapBoolInt16V(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]int32: - fastpathTV.DecMapBoolInt32V(v, false, d) - case *map[bool]int32: - if v2, changed2 := fastpathTV.DecMapBoolInt32V(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]int64: - fastpathTV.DecMapBoolInt64V(v, false, d) - case *map[bool]int64: - if v2, changed2 := fastpathTV.DecMapBoolInt64V(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]float32: - fastpathTV.DecMapBoolFloat32V(v, false, d) - case *map[bool]float32: - if v2, changed2 := fastpathTV.DecMapBoolFloat32V(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]float64: - fastpathTV.DecMapBoolFloat64V(v, false, d) - case *map[bool]float64: - if v2, changed2 := fastpathTV.DecMapBoolFloat64V(*v, true, d); changed2 { - *v = v2 - } - - case map[bool]bool: - fastpathTV.DecMapBoolBoolV(v, false, d) - case *map[bool]bool: - if v2, changed2 := fastpathTV.DecMapBoolBoolV(*v, true, d); changed2 { - *v = v2 - } - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool { - switch v := iv.(type) { - - case *[]interface{}: - *v = nil - - case *map[interface{}]interface{}: - *v = nil - - case *map[interface{}]string: - *v = nil - - case *map[interface{}]uint: - *v = nil - - case *map[interface{}]uint8: - *v = nil - - case *map[interface{}]uint16: - *v = nil - - case *map[interface{}]uint32: - *v = nil - - case *map[interface{}]uint64: - *v = nil - - case *map[interface{}]uintptr: - *v = nil - - case *map[interface{}]int: - *v = nil - - case *map[interface{}]int8: - *v = nil - - case *map[interface{}]int16: - *v = nil - - case *map[interface{}]int32: - *v = nil - - case *map[interface{}]int64: - *v = nil - - case *map[interface{}]float32: - *v = nil - - case *map[interface{}]float64: - *v = nil - - case *map[interface{}]bool: - *v = nil - - case *[]string: - *v = nil - - case *map[string]interface{}: - *v = nil - - case *map[string]string: - *v = nil - - case *map[string]uint: - *v = nil - - case *map[string]uint8: - *v = nil - - case *map[string]uint16: - *v = nil - - case *map[string]uint32: - *v = nil - - case *map[string]uint64: - *v = nil - - case *map[string]uintptr: - *v = nil - - case *map[string]int: - *v = nil - - case *map[string]int8: - *v = nil - - case *map[string]int16: - *v = nil - - case *map[string]int32: - *v = nil - - case *map[string]int64: - *v = nil - - case *map[string]float32: - *v = nil - - case *map[string]float64: - *v = nil - - case *map[string]bool: - *v = nil - - case *[]float32: - *v = nil - - case *map[float32]interface{}: - *v = nil - - case *map[float32]string: - *v = nil - - case *map[float32]uint: - *v = nil - - case *map[float32]uint8: - *v = nil - - case *map[float32]uint16: - *v = nil - - case *map[float32]uint32: - *v = nil - - case *map[float32]uint64: - *v = nil - - case *map[float32]uintptr: - *v = nil - - case *map[float32]int: - *v = nil - - case *map[float32]int8: - *v = nil - - case *map[float32]int16: - *v = nil - - case *map[float32]int32: - *v = nil - - case *map[float32]int64: - *v = nil - - case *map[float32]float32: - *v = nil - - case *map[float32]float64: - *v = nil - - case *map[float32]bool: - *v = nil - - case *[]float64: - *v = nil - - case *map[float64]interface{}: - *v = nil - - case *map[float64]string: - *v = nil - - case *map[float64]uint: - *v = nil - - case *map[float64]uint8: - *v = nil - - case *map[float64]uint16: - *v = nil - - case *map[float64]uint32: - *v = nil - - case *map[float64]uint64: - *v = nil - - case *map[float64]uintptr: - *v = nil - - case *map[float64]int: - *v = nil - - case *map[float64]int8: - *v = nil - - case *map[float64]int16: - *v = nil - - case *map[float64]int32: - *v = nil - - case *map[float64]int64: - *v = nil - - case *map[float64]float32: - *v = nil - - case *map[float64]float64: - *v = nil - - case *map[float64]bool: - *v = nil - - case *[]uint: - *v = nil - - case *map[uint]interface{}: - *v = nil - - case *map[uint]string: - *v = nil - - case *map[uint]uint: - *v = nil - - case *map[uint]uint8: - *v = nil - - case *map[uint]uint16: - *v = nil - - case *map[uint]uint32: - *v = nil - - case *map[uint]uint64: - *v = nil - - case *map[uint]uintptr: - *v = nil - - case *map[uint]int: - *v = nil - - case *map[uint]int8: - *v = nil - - case *map[uint]int16: - *v = nil - - case *map[uint]int32: - *v = nil - - case *map[uint]int64: - *v = nil - - case *map[uint]float32: - *v = nil - - case *map[uint]float64: - *v = nil - - case *map[uint]bool: - *v = nil - - case *map[uint8]interface{}: - *v = nil - - case *map[uint8]string: - *v = nil - - case *map[uint8]uint: - *v = nil - - case *map[uint8]uint8: - *v = nil - - case *map[uint8]uint16: - *v = nil - - case *map[uint8]uint32: - *v = nil - - case *map[uint8]uint64: - *v = nil - - case *map[uint8]uintptr: - *v = nil - - case *map[uint8]int: - *v = nil - - case *map[uint8]int8: - *v = nil - - case *map[uint8]int16: - *v = nil - - case *map[uint8]int32: - *v = nil - - case *map[uint8]int64: - *v = nil - - case *map[uint8]float32: - *v = nil - - case *map[uint8]float64: - *v = nil - - case *map[uint8]bool: - *v = nil - - case *[]uint16: - *v = nil - - case *map[uint16]interface{}: - *v = nil - - case *map[uint16]string: - *v = nil - - case *map[uint16]uint: - *v = nil - - case *map[uint16]uint8: - *v = nil - - case *map[uint16]uint16: - *v = nil - - case *map[uint16]uint32: - *v = nil - - case *map[uint16]uint64: - *v = nil - - case *map[uint16]uintptr: - *v = nil - - case *map[uint16]int: - *v = nil - - case *map[uint16]int8: - *v = nil - - case *map[uint16]int16: - *v = nil - - case *map[uint16]int32: - *v = nil - - case *map[uint16]int64: - *v = nil - - case *map[uint16]float32: - *v = nil - - case *map[uint16]float64: - *v = nil - - case *map[uint16]bool: - *v = nil - - case *[]uint32: - *v = nil - - case *map[uint32]interface{}: - *v = nil - - case *map[uint32]string: - *v = nil - - case *map[uint32]uint: - *v = nil - - case *map[uint32]uint8: - *v = nil - - case *map[uint32]uint16: - *v = nil - - case *map[uint32]uint32: - *v = nil - - case *map[uint32]uint64: - *v = nil - - case *map[uint32]uintptr: - *v = nil - - case *map[uint32]int: - *v = nil - - case *map[uint32]int8: - *v = nil - - case *map[uint32]int16: - *v = nil - - case *map[uint32]int32: - *v = nil - - case *map[uint32]int64: - *v = nil - - case *map[uint32]float32: - *v = nil - - case *map[uint32]float64: - *v = nil - - case *map[uint32]bool: - *v = nil - - case *[]uint64: - *v = nil - - case *map[uint64]interface{}: - *v = nil - - case *map[uint64]string: - *v = nil - - case *map[uint64]uint: - *v = nil - - case *map[uint64]uint8: - *v = nil - - case *map[uint64]uint16: - *v = nil - - case *map[uint64]uint32: - *v = nil - - case *map[uint64]uint64: - *v = nil - - case *map[uint64]uintptr: - *v = nil - - case *map[uint64]int: - *v = nil - - case *map[uint64]int8: - *v = nil - - case *map[uint64]int16: - *v = nil - - case *map[uint64]int32: - *v = nil - - case *map[uint64]int64: - *v = nil - - case *map[uint64]float32: - *v = nil - - case *map[uint64]float64: - *v = nil - - case *map[uint64]bool: - *v = nil - - case *[]uintptr: - *v = nil - - case *map[uintptr]interface{}: - *v = nil - - case *map[uintptr]string: - *v = nil - - case *map[uintptr]uint: - *v = nil - - case *map[uintptr]uint8: - *v = nil - - case *map[uintptr]uint16: - *v = nil - - case *map[uintptr]uint32: - *v = nil - - case *map[uintptr]uint64: - *v = nil - - case *map[uintptr]uintptr: - *v = nil - - case *map[uintptr]int: - *v = nil - - case *map[uintptr]int8: - *v = nil - - case *map[uintptr]int16: - *v = nil - - case *map[uintptr]int32: - *v = nil - - case *map[uintptr]int64: - *v = nil - - case *map[uintptr]float32: - *v = nil - - case *map[uintptr]float64: - *v = nil - - case *map[uintptr]bool: - *v = nil - - case *[]int: - *v = nil - - case *map[int]interface{}: - *v = nil - - case *map[int]string: - *v = nil - - case *map[int]uint: - *v = nil - - case *map[int]uint8: - *v = nil - - case *map[int]uint16: - *v = nil - - case *map[int]uint32: - *v = nil - - case *map[int]uint64: - *v = nil - - case *map[int]uintptr: - *v = nil - - case *map[int]int: - *v = nil - - case *map[int]int8: - *v = nil - - case *map[int]int16: - *v = nil - - case *map[int]int32: - *v = nil - - case *map[int]int64: - *v = nil - - case *map[int]float32: - *v = nil - - case *map[int]float64: - *v = nil - - case *map[int]bool: - *v = nil - - case *[]int8: - *v = nil - - case *map[int8]interface{}: - *v = nil - - case *map[int8]string: - *v = nil - - case *map[int8]uint: - *v = nil - - case *map[int8]uint8: - *v = nil - - case *map[int8]uint16: - *v = nil - - case *map[int8]uint32: - *v = nil - - case *map[int8]uint64: - *v = nil - - case *map[int8]uintptr: - *v = nil - - case *map[int8]int: - *v = nil - - case *map[int8]int8: - *v = nil - - case *map[int8]int16: - *v = nil - - case *map[int8]int32: - *v = nil - - case *map[int8]int64: - *v = nil - - case *map[int8]float32: - *v = nil - - case *map[int8]float64: - *v = nil - - case *map[int8]bool: - *v = nil - - case *[]int16: - *v = nil - - case *map[int16]interface{}: - *v = nil - - case *map[int16]string: - *v = nil - - case *map[int16]uint: - *v = nil - - case *map[int16]uint8: - *v = nil - - case *map[int16]uint16: - *v = nil - - case *map[int16]uint32: - *v = nil - - case *map[int16]uint64: - *v = nil - - case *map[int16]uintptr: - *v = nil - - case *map[int16]int: - *v = nil - - case *map[int16]int8: - *v = nil - - case *map[int16]int16: - *v = nil - - case *map[int16]int32: - *v = nil - - case *map[int16]int64: - *v = nil - - case *map[int16]float32: - *v = nil - - case *map[int16]float64: - *v = nil - - case *map[int16]bool: - *v = nil - - case *[]int32: - *v = nil - - case *map[int32]interface{}: - *v = nil - - case *map[int32]string: - *v = nil - - case *map[int32]uint: - *v = nil - - case *map[int32]uint8: - *v = nil - - case *map[int32]uint16: - *v = nil - - case *map[int32]uint32: - *v = nil - - case *map[int32]uint64: - *v = nil - - case *map[int32]uintptr: - *v = nil - - case *map[int32]int: - *v = nil - - case *map[int32]int8: - *v = nil - - case *map[int32]int16: - *v = nil - - case *map[int32]int32: - *v = nil - - case *map[int32]int64: - *v = nil - - case *map[int32]float32: - *v = nil - - case *map[int32]float64: - *v = nil - - case *map[int32]bool: - *v = nil - - case *[]int64: - *v = nil - - case *map[int64]interface{}: - *v = nil - - case *map[int64]string: - *v = nil - - case *map[int64]uint: - *v = nil - - case *map[int64]uint8: - *v = nil - - case *map[int64]uint16: - *v = nil - - case *map[int64]uint32: - *v = nil - - case *map[int64]uint64: - *v = nil - - case *map[int64]uintptr: - *v = nil - - case *map[int64]int: - *v = nil - - case *map[int64]int8: - *v = nil - - case *map[int64]int16: - *v = nil - - case *map[int64]int32: - *v = nil - - case *map[int64]int64: - *v = nil - - case *map[int64]float32: - *v = nil - - case *map[int64]float64: - *v = nil - - case *map[int64]bool: - *v = nil - - case *[]bool: - *v = nil - - case *map[bool]interface{}: - *v = nil - - case *map[bool]string: - *v = nil - - case *map[bool]uint: - *v = nil - - case *map[bool]uint8: - *v = nil - - case *map[bool]uint16: - *v = nil - - case *map[bool]uint32: - *v = nil - - case *map[bool]uint64: - *v = nil - - case *map[bool]uintptr: - *v = nil - - case *map[bool]int: - *v = nil - - case *map[bool]int8: - *v = nil - - case *map[bool]int16: - *v = nil - - case *map[bool]int32: - *v = nil - - case *map[bool]int64: - *v = nil - - case *map[bool]float32: - *v = nil - - case *map[bool]float64: - *v = nil - - case *map[bool]bool: - *v = nil - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions - -func (d *Decoder) fastpathDecSliceIntfR(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]interface{}) - if v, changed := fastpathTV.DecSliceIntfV(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceIntfV(rv2i(rv).([]interface{}), !array, d) - } -} -func (f fastpathT) DecSliceIntfX(vp *[]interface{}, d *Decoder) { - if v, changed := f.DecSliceIntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceIntfV(v []interface{}, canChange bool, d *Decoder) (_ []interface{}, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []interface{}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]interface{}, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) - } else { - xlen = 8 - } - v = make([]interface{}, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, nil) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - d.decode(&v[j]) - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]interface{}, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecSliceStringR(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]string) - if v, changed := fastpathTV.DecSliceStringV(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceStringV(rv2i(rv).([]string), !array, d) - } -} -func (f fastpathT) DecSliceStringX(vp *[]string, d *Decoder) { - if v, changed := f.DecSliceStringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceStringV(v []string, canChange bool, d *Decoder) (_ []string, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []string{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]string, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) - } else { - xlen = 8 - } - v = make([]string, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, "") - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - v[j] = dd.DecodeString() - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]string, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecSliceFloat32R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]float32) - if v, changed := fastpathTV.DecSliceFloat32V(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceFloat32V(rv2i(rv).([]float32), !array, d) - } -} -func (f fastpathT) DecSliceFloat32X(vp *[]float32, d *Decoder) { - if v, changed := f.DecSliceFloat32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceFloat32V(v []float32, canChange bool, d *Decoder) (_ []float32, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []float32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]float32, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - } else { - xlen = 8 - } - v = make([]float32, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - v[j] = float32(dd.DecodeFloat(true)) - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]float32, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecSliceFloat64R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]float64) - if v, changed := fastpathTV.DecSliceFloat64V(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceFloat64V(rv2i(rv).([]float64), !array, d) - } -} -func (f fastpathT) DecSliceFloat64X(vp *[]float64, d *Decoder) { - if v, changed := f.DecSliceFloat64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceFloat64V(v []float64, canChange bool, d *Decoder) (_ []float64, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []float64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]float64, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - } else { - xlen = 8 - } - v = make([]float64, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - v[j] = dd.DecodeFloat(false) - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]float64, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecSliceUintR(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]uint) - if v, changed := fastpathTV.DecSliceUintV(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceUintV(rv2i(rv).([]uint), !array, d) - } -} -func (f fastpathT) DecSliceUintX(vp *[]uint, d *Decoder) { - if v, changed := f.DecSliceUintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUintV(v []uint, canChange bool, d *Decoder) (_ []uint, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - } else { - xlen = 8 - } - v = make([]uint, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - v[j] = uint(dd.DecodeUint(uintBitsize)) - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]uint, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecSliceUint16R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]uint16) - if v, changed := fastpathTV.DecSliceUint16V(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceUint16V(rv2i(rv).([]uint16), !array, d) - } -} -func (f fastpathT) DecSliceUint16X(vp *[]uint16, d *Decoder) { - if v, changed := f.DecSliceUint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint16V(v []uint16, canChange bool, d *Decoder) (_ []uint16, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint16, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) - } else { - xlen = 8 - } - v = make([]uint16, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - v[j] = uint16(dd.DecodeUint(16)) - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]uint16, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecSliceUint32R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]uint32) - if v, changed := fastpathTV.DecSliceUint32V(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceUint32V(rv2i(rv).([]uint32), !array, d) - } -} -func (f fastpathT) DecSliceUint32X(vp *[]uint32, d *Decoder) { - if v, changed := f.DecSliceUint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint32V(v []uint32, canChange bool, d *Decoder) (_ []uint32, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint32, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - } else { - xlen = 8 - } - v = make([]uint32, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - v[j] = uint32(dd.DecodeUint(32)) - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]uint32, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecSliceUint64R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]uint64) - if v, changed := fastpathTV.DecSliceUint64V(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceUint64V(rv2i(rv).([]uint64), !array, d) - } -} -func (f fastpathT) DecSliceUint64X(vp *[]uint64, d *Decoder) { - if v, changed := f.DecSliceUint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint64V(v []uint64, canChange bool, d *Decoder) (_ []uint64, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint64, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - } else { - xlen = 8 - } - v = make([]uint64, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - v[j] = dd.DecodeUint(64) - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]uint64, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecSliceUintptrR(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]uintptr) - if v, changed := fastpathTV.DecSliceUintptrV(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceUintptrV(rv2i(rv).([]uintptr), !array, d) - } -} -func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, d *Decoder) { - if v, changed := f.DecSliceUintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUintptrV(v []uintptr, canChange bool, d *Decoder) (_ []uintptr, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uintptr{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uintptr, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - } else { - xlen = 8 - } - v = make([]uintptr, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - v[j] = uintptr(dd.DecodeUint(uintBitsize)) - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]uintptr, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecSliceIntR(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]int) - if v, changed := fastpathTV.DecSliceIntV(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceIntV(rv2i(rv).([]int), !array, d) - } -} -func (f fastpathT) DecSliceIntX(vp *[]int, d *Decoder) { - if v, changed := f.DecSliceIntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceIntV(v []int, canChange bool, d *Decoder) (_ []int, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - } else { - xlen = 8 - } - v = make([]int, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - v[j] = int(dd.DecodeInt(intBitsize)) - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]int, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecSliceInt8R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]int8) - if v, changed := fastpathTV.DecSliceInt8V(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceInt8V(rv2i(rv).([]int8), !array, d) - } -} -func (f fastpathT) DecSliceInt8X(vp *[]int8, d *Decoder) { - if v, changed := f.DecSliceInt8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt8V(v []int8, canChange bool, d *Decoder) (_ []int8, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int8{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int8, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) - } else { - xlen = 8 - } - v = make([]int8, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - v[j] = int8(dd.DecodeInt(8)) - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]int8, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecSliceInt16R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]int16) - if v, changed := fastpathTV.DecSliceInt16V(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceInt16V(rv2i(rv).([]int16), !array, d) - } -} -func (f fastpathT) DecSliceInt16X(vp *[]int16, d *Decoder) { - if v, changed := f.DecSliceInt16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt16V(v []int16, canChange bool, d *Decoder) (_ []int16, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int16, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) - } else { - xlen = 8 - } - v = make([]int16, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - v[j] = int16(dd.DecodeInt(16)) - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]int16, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecSliceInt32R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]int32) - if v, changed := fastpathTV.DecSliceInt32V(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceInt32V(rv2i(rv).([]int32), !array, d) - } -} -func (f fastpathT) DecSliceInt32X(vp *[]int32, d *Decoder) { - if v, changed := f.DecSliceInt32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt32V(v []int32, canChange bool, d *Decoder) (_ []int32, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int32, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - } else { - xlen = 8 - } - v = make([]int32, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - v[j] = int32(dd.DecodeInt(32)) - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]int32, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecSliceInt64R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]int64) - if v, changed := fastpathTV.DecSliceInt64V(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceInt64V(rv2i(rv).([]int64), !array, d) - } -} -func (f fastpathT) DecSliceInt64X(vp *[]int64, d *Decoder) { - if v, changed := f.DecSliceInt64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt64V(v []int64, canChange bool, d *Decoder) (_ []int64, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int64, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - } else { - xlen = 8 - } - v = make([]int64, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - v[j] = dd.DecodeInt(64) - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]int64, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecSliceBoolR(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]bool) - if v, changed := fastpathTV.DecSliceBoolV(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.DecSliceBoolV(rv2i(rv).([]bool), !array, d) - } -} -func (f fastpathT) DecSliceBoolX(vp *[]bool, d *Decoder) { - if v, changed := f.DecSliceBoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecSliceBoolV(v []bool, canChange bool, d *Decoder) (_ []bool, changed bool) { - dd := d.d - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []bool{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]bool, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) - } else { - xlen = 8 - } - v = make([]bool, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, false) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - v[j] = dd.DecodeBool() - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]bool, 0) - changed = true - } - } - slh.End() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]interface{}) - if v, changed := fastpathTV.DecMapIntfIntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), false, d) -} -func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, d *Decoder) { - if v, changed := f.DecMapIntfIntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, canChange bool, - d *Decoder) (_ map[interface{}]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[interface{}]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk interface{} - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfStringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]string) - if v, changed := fastpathTV.DecMapIntfStringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfStringV(rv2i(rv).(map[interface{}]string), false, d) -} -func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, d *Decoder) { - if v, changed := f.DecMapIntfStringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, canChange bool, - d *Decoder) (_ map[interface{}]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[interface{}]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfUintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]uint) - if v, changed := fastpathTV.DecMapIntfUintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfUintV(rv2i(rv).(map[interface{}]uint), false, d) -} -func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, d *Decoder) { - if v, changed := f.DecMapIntfUintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, canChange bool, - d *Decoder) (_ map[interface{}]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]uint8) - if v, changed := fastpathTV.DecMapIntfUint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), false, d) -} -func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, d *Decoder) { - if v, changed := f.DecMapIntfUint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, canChange bool, - d *Decoder) (_ map[interface{}]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]uint16) - if v, changed := fastpathTV.DecMapIntfUint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), false, d) -} -func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, d *Decoder) { - if v, changed := f.DecMapIntfUint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, canChange bool, - d *Decoder) (_ map[interface{}]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[interface{}]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]uint32) - if v, changed := fastpathTV.DecMapIntfUint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), false, d) -} -func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, d *Decoder) { - if v, changed := f.DecMapIntfUint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, canChange bool, - d *Decoder) (_ map[interface{}]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]uint64) - if v, changed := fastpathTV.DecMapIntfUint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), false, d) -} -func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, d *Decoder) { - if v, changed := f.DecMapIntfUint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, canChange bool, - d *Decoder) (_ map[interface{}]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]uintptr) - if v, changed := fastpathTV.DecMapIntfUintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), false, d) -} -func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, d *Decoder) { - if v, changed := f.DecMapIntfUintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, canChange bool, - d *Decoder) (_ map[interface{}]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfIntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]int) - if v, changed := fastpathTV.DecMapIntfIntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfIntV(rv2i(rv).(map[interface{}]int), false, d) -} -func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, d *Decoder) { - if v, changed := f.DecMapIntfIntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, canChange bool, - d *Decoder) (_ map[interface{}]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]int8) - if v, changed := fastpathTV.DecMapIntfInt8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfInt8V(rv2i(rv).(map[interface{}]int8), false, d) -} -func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, d *Decoder) { - if v, changed := f.DecMapIntfInt8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, canChange bool, - d *Decoder) (_ map[interface{}]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]int16) - if v, changed := fastpathTV.DecMapIntfInt16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfInt16V(rv2i(rv).(map[interface{}]int16), false, d) -} -func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, d *Decoder) { - if v, changed := f.DecMapIntfInt16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, canChange bool, - d *Decoder) (_ map[interface{}]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[interface{}]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]int32) - if v, changed := fastpathTV.DecMapIntfInt32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfInt32V(rv2i(rv).(map[interface{}]int32), false, d) -} -func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, d *Decoder) { - if v, changed := f.DecMapIntfInt32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, canChange bool, - d *Decoder) (_ map[interface{}]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]int64) - if v, changed := fastpathTV.DecMapIntfInt64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfInt64V(rv2i(rv).(map[interface{}]int64), false, d) -} -func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, d *Decoder) { - if v, changed := f.DecMapIntfInt64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, canChange bool, - d *Decoder) (_ map[interface{}]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]float32) - if v, changed := fastpathTV.DecMapIntfFloat32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), false, d) -} -func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, d *Decoder) { - if v, changed := f.DecMapIntfFloat32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, canChange bool, - d *Decoder) (_ map[interface{}]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]float64) - if v, changed := fastpathTV.DecMapIntfFloat64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), false, d) -} -func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, d *Decoder) { - if v, changed := f.DecMapIntfFloat64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, canChange bool, - d *Decoder) (_ map[interface{}]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]bool) - if v, changed := fastpathTV.DecMapIntfBoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntfBoolV(rv2i(rv).(map[interface{}]bool), false, d) -} -func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, d *Decoder) { - if v, changed := f.DecMapIntfBoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, canChange bool, - d *Decoder) (_ map[interface{}]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk interface{} - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringIntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]interface{}) - if v, changed := fastpathTV.DecMapStringIntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringIntfV(rv2i(rv).(map[string]interface{}), false, d) -} -func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, d *Decoder) { - if v, changed := f.DecMapStringIntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, canChange bool, - d *Decoder) (_ map[string]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[string]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk string - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringStringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]string) - if v, changed := fastpathTV.DecMapStringStringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringStringV(rv2i(rv).(map[string]string), false, d) -} -func (f fastpathT) DecMapStringStringX(vp *map[string]string, d *Decoder) { - if v, changed := f.DecMapStringStringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringStringV(v map[string]string, canChange bool, - d *Decoder) (_ map[string]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[string]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringUintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]uint) - if v, changed := fastpathTV.DecMapStringUintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringUintV(rv2i(rv).(map[string]uint), false, d) -} -func (f fastpathT) DecMapStringUintX(vp *map[string]uint, d *Decoder) { - if v, changed := f.DecMapStringUintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUintV(v map[string]uint, canChange bool, - d *Decoder) (_ map[string]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringUint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]uint8) - if v, changed := fastpathTV.DecMapStringUint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringUint8V(rv2i(rv).(map[string]uint8), false, d) -} -func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, d *Decoder) { - if v, changed := f.DecMapStringUint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, canChange bool, - d *Decoder) (_ map[string]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringUint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]uint16) - if v, changed := fastpathTV.DecMapStringUint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringUint16V(rv2i(rv).(map[string]uint16), false, d) -} -func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, d *Decoder) { - if v, changed := f.DecMapStringUint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, canChange bool, - d *Decoder) (_ map[string]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[string]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringUint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]uint32) - if v, changed := fastpathTV.DecMapStringUint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringUint32V(rv2i(rv).(map[string]uint32), false, d) -} -func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, d *Decoder) { - if v, changed := f.DecMapStringUint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, canChange bool, - d *Decoder) (_ map[string]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringUint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]uint64) - if v, changed := fastpathTV.DecMapStringUint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringUint64V(rv2i(rv).(map[string]uint64), false, d) -} -func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, d *Decoder) { - if v, changed := f.DecMapStringUint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, canChange bool, - d *Decoder) (_ map[string]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]uintptr) - if v, changed := fastpathTV.DecMapStringUintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringUintptrV(rv2i(rv).(map[string]uintptr), false, d) -} -func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, d *Decoder) { - if v, changed := f.DecMapStringUintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, canChange bool, - d *Decoder) (_ map[string]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringIntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]int) - if v, changed := fastpathTV.DecMapStringIntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringIntV(rv2i(rv).(map[string]int), false, d) -} -func (f fastpathT) DecMapStringIntX(vp *map[string]int, d *Decoder) { - if v, changed := f.DecMapStringIntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringIntV(v map[string]int, canChange bool, - d *Decoder) (_ map[string]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringInt8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]int8) - if v, changed := fastpathTV.DecMapStringInt8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringInt8V(rv2i(rv).(map[string]int8), false, d) -} -func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, d *Decoder) { - if v, changed := f.DecMapStringInt8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt8V(v map[string]int8, canChange bool, - d *Decoder) (_ map[string]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringInt16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]int16) - if v, changed := fastpathTV.DecMapStringInt16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringInt16V(rv2i(rv).(map[string]int16), false, d) -} -func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, d *Decoder) { - if v, changed := f.DecMapStringInt16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt16V(v map[string]int16, canChange bool, - d *Decoder) (_ map[string]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[string]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringInt32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]int32) - if v, changed := fastpathTV.DecMapStringInt32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringInt32V(rv2i(rv).(map[string]int32), false, d) -} -func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, d *Decoder) { - if v, changed := f.DecMapStringInt32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt32V(v map[string]int32, canChange bool, - d *Decoder) (_ map[string]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringInt64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]int64) - if v, changed := fastpathTV.DecMapStringInt64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringInt64V(rv2i(rv).(map[string]int64), false, d) -} -func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, d *Decoder) { - if v, changed := f.DecMapStringInt64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt64V(v map[string]int64, canChange bool, - d *Decoder) (_ map[string]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]float32) - if v, changed := fastpathTV.DecMapStringFloat32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringFloat32V(rv2i(rv).(map[string]float32), false, d) -} -func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, d *Decoder) { - if v, changed := f.DecMapStringFloat32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, canChange bool, - d *Decoder) (_ map[string]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]float64) - if v, changed := fastpathTV.DecMapStringFloat64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringFloat64V(rv2i(rv).(map[string]float64), false, d) -} -func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, d *Decoder) { - if v, changed := f.DecMapStringFloat64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, canChange bool, - d *Decoder) (_ map[string]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringBoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]bool) - if v, changed := fastpathTV.DecMapStringBoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapStringBoolV(rv2i(rv).(map[string]bool), false, d) -} -func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, d *Decoder) { - if v, changed := f.DecMapStringBoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringBoolV(v map[string]bool, canChange bool, - d *Decoder) (_ map[string]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk string - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]interface{}) - if v, changed := fastpathTV.DecMapFloat32IntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), false, d) -} -func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, d *Decoder) { - if v, changed := f.DecMapFloat32IntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, canChange bool, - d *Decoder) (_ map[float32]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[float32]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk float32 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]string) - if v, changed := fastpathTV.DecMapFloat32StringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32StringV(rv2i(rv).(map[float32]string), false, d) -} -func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, d *Decoder) { - if v, changed := f.DecMapFloat32StringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, canChange bool, - d *Decoder) (_ map[float32]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[float32]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]uint) - if v, changed := fastpathTV.DecMapFloat32UintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32UintV(rv2i(rv).(map[float32]uint), false, d) -} -func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, d *Decoder) { - if v, changed := f.DecMapFloat32UintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, canChange bool, - d *Decoder) (_ map[float32]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]uint8) - if v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), false, d) -} -func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, d *Decoder) { - if v, changed := f.DecMapFloat32Uint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, canChange bool, - d *Decoder) (_ map[float32]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]uint16) - if v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), false, d) -} -func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, d *Decoder) { - if v, changed := f.DecMapFloat32Uint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, canChange bool, - d *Decoder) (_ map[float32]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[float32]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]uint32) - if v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), false, d) -} -func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, d *Decoder) { - if v, changed := f.DecMapFloat32Uint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, canChange bool, - d *Decoder) (_ map[float32]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]uint64) - if v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), false, d) -} -func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, d *Decoder) { - if v, changed := f.DecMapFloat32Uint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, canChange bool, - d *Decoder) (_ map[float32]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]uintptr) - if v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), false, d) -} -func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, d *Decoder) { - if v, changed := f.DecMapFloat32UintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, canChange bool, - d *Decoder) (_ map[float32]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]int) - if v, changed := fastpathTV.DecMapFloat32IntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32IntV(rv2i(rv).(map[float32]int), false, d) -} -func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, d *Decoder) { - if v, changed := f.DecMapFloat32IntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, canChange bool, - d *Decoder) (_ map[float32]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]int8) - if v, changed := fastpathTV.DecMapFloat32Int8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32Int8V(rv2i(rv).(map[float32]int8), false, d) -} -func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, d *Decoder) { - if v, changed := f.DecMapFloat32Int8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, canChange bool, - d *Decoder) (_ map[float32]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]int16) - if v, changed := fastpathTV.DecMapFloat32Int16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32Int16V(rv2i(rv).(map[float32]int16), false, d) -} -func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, d *Decoder) { - if v, changed := f.DecMapFloat32Int16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, canChange bool, - d *Decoder) (_ map[float32]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[float32]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]int32) - if v, changed := fastpathTV.DecMapFloat32Int32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32Int32V(rv2i(rv).(map[float32]int32), false, d) -} -func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, d *Decoder) { - if v, changed := f.DecMapFloat32Int32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, canChange bool, - d *Decoder) (_ map[float32]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]int64) - if v, changed := fastpathTV.DecMapFloat32Int64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32Int64V(rv2i(rv).(map[float32]int64), false, d) -} -func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, d *Decoder) { - if v, changed := f.DecMapFloat32Int64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, canChange bool, - d *Decoder) (_ map[float32]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]float32) - if v, changed := fastpathTV.DecMapFloat32Float32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32Float32V(rv2i(rv).(map[float32]float32), false, d) -} -func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, d *Decoder) { - if v, changed := f.DecMapFloat32Float32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, canChange bool, - d *Decoder) (_ map[float32]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]float64) - if v, changed := fastpathTV.DecMapFloat32Float64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32Float64V(rv2i(rv).(map[float32]float64), false, d) -} -func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, d *Decoder) { - if v, changed := f.DecMapFloat32Float64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, canChange bool, - d *Decoder) (_ map[float32]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]bool) - if v, changed := fastpathTV.DecMapFloat32BoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat32BoolV(rv2i(rv).(map[float32]bool), false, d) -} -func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, d *Decoder) { - if v, changed := f.DecMapFloat32BoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, canChange bool, - d *Decoder) (_ map[float32]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float32 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(dd.DecodeFloat(true)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]interface{}) - if v, changed := fastpathTV.DecMapFloat64IntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), false, d) -} -func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, d *Decoder) { - if v, changed := f.DecMapFloat64IntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, canChange bool, - d *Decoder) (_ map[float64]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[float64]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk float64 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]string) - if v, changed := fastpathTV.DecMapFloat64StringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64StringV(rv2i(rv).(map[float64]string), false, d) -} -func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, d *Decoder) { - if v, changed := f.DecMapFloat64StringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, canChange bool, - d *Decoder) (_ map[float64]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[float64]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]uint) - if v, changed := fastpathTV.DecMapFloat64UintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64UintV(rv2i(rv).(map[float64]uint), false, d) -} -func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, d *Decoder) { - if v, changed := f.DecMapFloat64UintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, canChange bool, - d *Decoder) (_ map[float64]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]uint8) - if v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), false, d) -} -func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, d *Decoder) { - if v, changed := f.DecMapFloat64Uint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, canChange bool, - d *Decoder) (_ map[float64]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]uint16) - if v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), false, d) -} -func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, d *Decoder) { - if v, changed := f.DecMapFloat64Uint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, canChange bool, - d *Decoder) (_ map[float64]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[float64]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]uint32) - if v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), false, d) -} -func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, d *Decoder) { - if v, changed := f.DecMapFloat64Uint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, canChange bool, - d *Decoder) (_ map[float64]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]uint64) - if v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), false, d) -} -func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, d *Decoder) { - if v, changed := f.DecMapFloat64Uint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, canChange bool, - d *Decoder) (_ map[float64]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]uintptr) - if v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), false, d) -} -func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, d *Decoder) { - if v, changed := f.DecMapFloat64UintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, canChange bool, - d *Decoder) (_ map[float64]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]int) - if v, changed := fastpathTV.DecMapFloat64IntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64IntV(rv2i(rv).(map[float64]int), false, d) -} -func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, d *Decoder) { - if v, changed := f.DecMapFloat64IntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, canChange bool, - d *Decoder) (_ map[float64]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]int8) - if v, changed := fastpathTV.DecMapFloat64Int8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64Int8V(rv2i(rv).(map[float64]int8), false, d) -} -func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, d *Decoder) { - if v, changed := f.DecMapFloat64Int8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, canChange bool, - d *Decoder) (_ map[float64]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]int16) - if v, changed := fastpathTV.DecMapFloat64Int16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64Int16V(rv2i(rv).(map[float64]int16), false, d) -} -func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, d *Decoder) { - if v, changed := f.DecMapFloat64Int16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, canChange bool, - d *Decoder) (_ map[float64]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[float64]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]int32) - if v, changed := fastpathTV.DecMapFloat64Int32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64Int32V(rv2i(rv).(map[float64]int32), false, d) -} -func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, d *Decoder) { - if v, changed := f.DecMapFloat64Int32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, canChange bool, - d *Decoder) (_ map[float64]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]int64) - if v, changed := fastpathTV.DecMapFloat64Int64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64Int64V(rv2i(rv).(map[float64]int64), false, d) -} -func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, d *Decoder) { - if v, changed := f.DecMapFloat64Int64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, canChange bool, - d *Decoder) (_ map[float64]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]float32) - if v, changed := fastpathTV.DecMapFloat64Float32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64Float32V(rv2i(rv).(map[float64]float32), false, d) -} -func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, d *Decoder) { - if v, changed := f.DecMapFloat64Float32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, canChange bool, - d *Decoder) (_ map[float64]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]float64) - if v, changed := fastpathTV.DecMapFloat64Float64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64Float64V(rv2i(rv).(map[float64]float64), false, d) -} -func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, d *Decoder) { - if v, changed := f.DecMapFloat64Float64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, canChange bool, - d *Decoder) (_ map[float64]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]bool) - if v, changed := fastpathTV.DecMapFloat64BoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapFloat64BoolV(rv2i(rv).(map[float64]bool), false, d) -} -func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, d *Decoder) { - if v, changed := f.DecMapFloat64BoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, canChange bool, - d *Decoder) (_ map[float64]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk float64 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat(false) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintIntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]interface{}) - if v, changed := fastpathTV.DecMapUintIntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintIntfV(rv2i(rv).(map[uint]interface{}), false, d) -} -func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, d *Decoder) { - if v, changed := f.DecMapUintIntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, canChange bool, - d *Decoder) (_ map[uint]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintStringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]string) - if v, changed := fastpathTV.DecMapUintStringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintStringV(rv2i(rv).(map[uint]string), false, d) -} -func (f fastpathT) DecMapUintStringX(vp *map[uint]string, d *Decoder) { - if v, changed := f.DecMapUintStringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintStringV(v map[uint]string, canChange bool, - d *Decoder) (_ map[uint]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintUintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]uint) - if v, changed := fastpathTV.DecMapUintUintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintUintV(rv2i(rv).(map[uint]uint), false, d) -} -func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, d *Decoder) { - if v, changed := f.DecMapUintUintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUintV(v map[uint]uint, canChange bool, - d *Decoder) (_ map[uint]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintUint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]uint8) - if v, changed := fastpathTV.DecMapUintUint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintUint8V(rv2i(rv).(map[uint]uint8), false, d) -} -func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, d *Decoder) { - if v, changed := f.DecMapUintUint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, canChange bool, - d *Decoder) (_ map[uint]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintUint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]uint16) - if v, changed := fastpathTV.DecMapUintUint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintUint16V(rv2i(rv).(map[uint]uint16), false, d) -} -func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, d *Decoder) { - if v, changed := f.DecMapUintUint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, canChange bool, - d *Decoder) (_ map[uint]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintUint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]uint32) - if v, changed := fastpathTV.DecMapUintUint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintUint32V(rv2i(rv).(map[uint]uint32), false, d) -} -func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, d *Decoder) { - if v, changed := f.DecMapUintUint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, canChange bool, - d *Decoder) (_ map[uint]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintUint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]uint64) - if v, changed := fastpathTV.DecMapUintUint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintUint64V(rv2i(rv).(map[uint]uint64), false, d) -} -func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, d *Decoder) { - if v, changed := f.DecMapUintUint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, canChange bool, - d *Decoder) (_ map[uint]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]uintptr) - if v, changed := fastpathTV.DecMapUintUintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintUintptrV(rv2i(rv).(map[uint]uintptr), false, d) -} -func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, d *Decoder) { - if v, changed := f.DecMapUintUintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, canChange bool, - d *Decoder) (_ map[uint]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintIntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]int) - if v, changed := fastpathTV.DecMapUintIntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintIntV(rv2i(rv).(map[uint]int), false, d) -} -func (f fastpathT) DecMapUintIntX(vp *map[uint]int, d *Decoder) { - if v, changed := f.DecMapUintIntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintIntV(v map[uint]int, canChange bool, - d *Decoder) (_ map[uint]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintInt8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]int8) - if v, changed := fastpathTV.DecMapUintInt8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintInt8V(rv2i(rv).(map[uint]int8), false, d) -} -func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, d *Decoder) { - if v, changed := f.DecMapUintInt8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, canChange bool, - d *Decoder) (_ map[uint]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintInt16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]int16) - if v, changed := fastpathTV.DecMapUintInt16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintInt16V(rv2i(rv).(map[uint]int16), false, d) -} -func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, d *Decoder) { - if v, changed := f.DecMapUintInt16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, canChange bool, - d *Decoder) (_ map[uint]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintInt32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]int32) - if v, changed := fastpathTV.DecMapUintInt32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintInt32V(rv2i(rv).(map[uint]int32), false, d) -} -func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, d *Decoder) { - if v, changed := f.DecMapUintInt32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, canChange bool, - d *Decoder) (_ map[uint]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintInt64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]int64) - if v, changed := fastpathTV.DecMapUintInt64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintInt64V(rv2i(rv).(map[uint]int64), false, d) -} -func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, d *Decoder) { - if v, changed := f.DecMapUintInt64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, canChange bool, - d *Decoder) (_ map[uint]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]float32) - if v, changed := fastpathTV.DecMapUintFloat32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintFloat32V(rv2i(rv).(map[uint]float32), false, d) -} -func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, d *Decoder) { - if v, changed := f.DecMapUintFloat32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, canChange bool, - d *Decoder) (_ map[uint]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]float64) - if v, changed := fastpathTV.DecMapUintFloat64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintFloat64V(rv2i(rv).(map[uint]float64), false, d) -} -func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, d *Decoder) { - if v, changed := f.DecMapUintFloat64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, canChange bool, - d *Decoder) (_ map[uint]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintBoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]bool) - if v, changed := fastpathTV.DecMapUintBoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintBoolV(rv2i(rv).(map[uint]bool), false, d) -} -func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, d *Decoder) { - if v, changed := f.DecMapUintBoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, canChange bool, - d *Decoder) (_ map[uint]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]interface{}) - if v, changed := fastpathTV.DecMapUint8IntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), false, d) -} -func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, d *Decoder) { - if v, changed := f.DecMapUint8IntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, canChange bool, - d *Decoder) (_ map[uint8]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[uint8]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint8 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]string) - if v, changed := fastpathTV.DecMapUint8StringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8StringV(rv2i(rv).(map[uint8]string), false, d) -} -func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, d *Decoder) { - if v, changed := f.DecMapUint8StringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, canChange bool, - d *Decoder) (_ map[uint8]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[uint8]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]uint) - if v, changed := fastpathTV.DecMapUint8UintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8UintV(rv2i(rv).(map[uint8]uint), false, d) -} -func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, d *Decoder) { - if v, changed := f.DecMapUint8UintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, canChange bool, - d *Decoder) (_ map[uint8]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]uint8) - if v, changed := fastpathTV.DecMapUint8Uint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), false, d) -} -func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, d *Decoder) { - if v, changed := f.DecMapUint8Uint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, canChange bool, - d *Decoder) (_ map[uint8]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]uint16) - if v, changed := fastpathTV.DecMapUint8Uint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), false, d) -} -func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, d *Decoder) { - if v, changed := f.DecMapUint8Uint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, canChange bool, - d *Decoder) (_ map[uint8]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint8]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]uint32) - if v, changed := fastpathTV.DecMapUint8Uint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), false, d) -} -func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, d *Decoder) { - if v, changed := f.DecMapUint8Uint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, canChange bool, - d *Decoder) (_ map[uint8]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]uint64) - if v, changed := fastpathTV.DecMapUint8Uint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), false, d) -} -func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, d *Decoder) { - if v, changed := f.DecMapUint8Uint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, canChange bool, - d *Decoder) (_ map[uint8]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]uintptr) - if v, changed := fastpathTV.DecMapUint8UintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), false, d) -} -func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, d *Decoder) { - if v, changed := f.DecMapUint8UintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, canChange bool, - d *Decoder) (_ map[uint8]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]int) - if v, changed := fastpathTV.DecMapUint8IntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8IntV(rv2i(rv).(map[uint8]int), false, d) -} -func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, d *Decoder) { - if v, changed := f.DecMapUint8IntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, canChange bool, - d *Decoder) (_ map[uint8]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]int8) - if v, changed := fastpathTV.DecMapUint8Int8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8Int8V(rv2i(rv).(map[uint8]int8), false, d) -} -func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, d *Decoder) { - if v, changed := f.DecMapUint8Int8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, canChange bool, - d *Decoder) (_ map[uint8]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]int16) - if v, changed := fastpathTV.DecMapUint8Int16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8Int16V(rv2i(rv).(map[uint8]int16), false, d) -} -func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, d *Decoder) { - if v, changed := f.DecMapUint8Int16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, canChange bool, - d *Decoder) (_ map[uint8]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint8]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]int32) - if v, changed := fastpathTV.DecMapUint8Int32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8Int32V(rv2i(rv).(map[uint8]int32), false, d) -} -func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, d *Decoder) { - if v, changed := f.DecMapUint8Int32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, canChange bool, - d *Decoder) (_ map[uint8]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]int64) - if v, changed := fastpathTV.DecMapUint8Int64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8Int64V(rv2i(rv).(map[uint8]int64), false, d) -} -func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, d *Decoder) { - if v, changed := f.DecMapUint8Int64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, canChange bool, - d *Decoder) (_ map[uint8]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]float32) - if v, changed := fastpathTV.DecMapUint8Float32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8Float32V(rv2i(rv).(map[uint8]float32), false, d) -} -func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, d *Decoder) { - if v, changed := f.DecMapUint8Float32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, canChange bool, - d *Decoder) (_ map[uint8]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]float64) - if v, changed := fastpathTV.DecMapUint8Float64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8Float64V(rv2i(rv).(map[uint8]float64), false, d) -} -func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, d *Decoder) { - if v, changed := f.DecMapUint8Float64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, canChange bool, - d *Decoder) (_ map[uint8]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]bool) - if v, changed := fastpathTV.DecMapUint8BoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint8BoolV(rv2i(rv).(map[uint8]bool), false, d) -} -func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, d *Decoder) { - if v, changed := f.DecMapUint8BoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, canChange bool, - d *Decoder) (_ map[uint8]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint8 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(dd.DecodeUint(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]interface{}) - if v, changed := fastpathTV.DecMapUint16IntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), false, d) -} -func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, d *Decoder) { - if v, changed := f.DecMapUint16IntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, canChange bool, - d *Decoder) (_ map[uint16]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[uint16]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint16 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]string) - if v, changed := fastpathTV.DecMapUint16StringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16StringV(rv2i(rv).(map[uint16]string), false, d) -} -func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, d *Decoder) { - if v, changed := f.DecMapUint16StringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, canChange bool, - d *Decoder) (_ map[uint16]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[uint16]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]uint) - if v, changed := fastpathTV.DecMapUint16UintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16UintV(rv2i(rv).(map[uint16]uint), false, d) -} -func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, d *Decoder) { - if v, changed := f.DecMapUint16UintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, canChange bool, - d *Decoder) (_ map[uint16]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]uint8) - if v, changed := fastpathTV.DecMapUint16Uint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), false, d) -} -func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, d *Decoder) { - if v, changed := f.DecMapUint16Uint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, canChange bool, - d *Decoder) (_ map[uint16]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]uint16) - if v, changed := fastpathTV.DecMapUint16Uint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), false, d) -} -func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, d *Decoder) { - if v, changed := f.DecMapUint16Uint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, canChange bool, - d *Decoder) (_ map[uint16]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[uint16]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]uint32) - if v, changed := fastpathTV.DecMapUint16Uint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), false, d) -} -func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, d *Decoder) { - if v, changed := f.DecMapUint16Uint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, canChange bool, - d *Decoder) (_ map[uint16]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]uint64) - if v, changed := fastpathTV.DecMapUint16Uint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), false, d) -} -func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, d *Decoder) { - if v, changed := f.DecMapUint16Uint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, canChange bool, - d *Decoder) (_ map[uint16]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]uintptr) - if v, changed := fastpathTV.DecMapUint16UintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), false, d) -} -func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, d *Decoder) { - if v, changed := f.DecMapUint16UintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, canChange bool, - d *Decoder) (_ map[uint16]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]int) - if v, changed := fastpathTV.DecMapUint16IntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16IntV(rv2i(rv).(map[uint16]int), false, d) -} -func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, d *Decoder) { - if v, changed := f.DecMapUint16IntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, canChange bool, - d *Decoder) (_ map[uint16]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]int8) - if v, changed := fastpathTV.DecMapUint16Int8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16Int8V(rv2i(rv).(map[uint16]int8), false, d) -} -func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, d *Decoder) { - if v, changed := f.DecMapUint16Int8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, canChange bool, - d *Decoder) (_ map[uint16]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]int16) - if v, changed := fastpathTV.DecMapUint16Int16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16Int16V(rv2i(rv).(map[uint16]int16), false, d) -} -func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, d *Decoder) { - if v, changed := f.DecMapUint16Int16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, canChange bool, - d *Decoder) (_ map[uint16]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[uint16]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]int32) - if v, changed := fastpathTV.DecMapUint16Int32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16Int32V(rv2i(rv).(map[uint16]int32), false, d) -} -func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, d *Decoder) { - if v, changed := f.DecMapUint16Int32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, canChange bool, - d *Decoder) (_ map[uint16]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]int64) - if v, changed := fastpathTV.DecMapUint16Int64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16Int64V(rv2i(rv).(map[uint16]int64), false, d) -} -func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, d *Decoder) { - if v, changed := f.DecMapUint16Int64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, canChange bool, - d *Decoder) (_ map[uint16]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]float32) - if v, changed := fastpathTV.DecMapUint16Float32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16Float32V(rv2i(rv).(map[uint16]float32), false, d) -} -func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, d *Decoder) { - if v, changed := f.DecMapUint16Float32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, canChange bool, - d *Decoder) (_ map[uint16]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]float64) - if v, changed := fastpathTV.DecMapUint16Float64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16Float64V(rv2i(rv).(map[uint16]float64), false, d) -} -func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, d *Decoder) { - if v, changed := f.DecMapUint16Float64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, canChange bool, - d *Decoder) (_ map[uint16]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]bool) - if v, changed := fastpathTV.DecMapUint16BoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint16BoolV(rv2i(rv).(map[uint16]bool), false, d) -} -func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, d *Decoder) { - if v, changed := f.DecMapUint16BoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, canChange bool, - d *Decoder) (_ map[uint16]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint16 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(dd.DecodeUint(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]interface{}) - if v, changed := fastpathTV.DecMapUint32IntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), false, d) -} -func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, d *Decoder) { - if v, changed := f.DecMapUint32IntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, canChange bool, - d *Decoder) (_ map[uint32]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[uint32]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint32 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]string) - if v, changed := fastpathTV.DecMapUint32StringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32StringV(rv2i(rv).(map[uint32]string), false, d) -} -func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, d *Decoder) { - if v, changed := f.DecMapUint32StringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, canChange bool, - d *Decoder) (_ map[uint32]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[uint32]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]uint) - if v, changed := fastpathTV.DecMapUint32UintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32UintV(rv2i(rv).(map[uint32]uint), false, d) -} -func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, d *Decoder) { - if v, changed := f.DecMapUint32UintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, canChange bool, - d *Decoder) (_ map[uint32]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]uint8) - if v, changed := fastpathTV.DecMapUint32Uint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), false, d) -} -func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, d *Decoder) { - if v, changed := f.DecMapUint32Uint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, canChange bool, - d *Decoder) (_ map[uint32]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]uint16) - if v, changed := fastpathTV.DecMapUint32Uint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), false, d) -} -func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, d *Decoder) { - if v, changed := f.DecMapUint32Uint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, canChange bool, - d *Decoder) (_ map[uint32]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint32]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]uint32) - if v, changed := fastpathTV.DecMapUint32Uint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), false, d) -} -func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, d *Decoder) { - if v, changed := f.DecMapUint32Uint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, canChange bool, - d *Decoder) (_ map[uint32]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]uint64) - if v, changed := fastpathTV.DecMapUint32Uint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), false, d) -} -func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, d *Decoder) { - if v, changed := f.DecMapUint32Uint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, canChange bool, - d *Decoder) (_ map[uint32]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]uintptr) - if v, changed := fastpathTV.DecMapUint32UintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), false, d) -} -func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, d *Decoder) { - if v, changed := f.DecMapUint32UintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, canChange bool, - d *Decoder) (_ map[uint32]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]int) - if v, changed := fastpathTV.DecMapUint32IntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32IntV(rv2i(rv).(map[uint32]int), false, d) -} -func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, d *Decoder) { - if v, changed := f.DecMapUint32IntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, canChange bool, - d *Decoder) (_ map[uint32]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]int8) - if v, changed := fastpathTV.DecMapUint32Int8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32Int8V(rv2i(rv).(map[uint32]int8), false, d) -} -func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, d *Decoder) { - if v, changed := f.DecMapUint32Int8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, canChange bool, - d *Decoder) (_ map[uint32]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]int16) - if v, changed := fastpathTV.DecMapUint32Int16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32Int16V(rv2i(rv).(map[uint32]int16), false, d) -} -func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, d *Decoder) { - if v, changed := f.DecMapUint32Int16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, canChange bool, - d *Decoder) (_ map[uint32]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint32]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]int32) - if v, changed := fastpathTV.DecMapUint32Int32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32Int32V(rv2i(rv).(map[uint32]int32), false, d) -} -func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, d *Decoder) { - if v, changed := f.DecMapUint32Int32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, canChange bool, - d *Decoder) (_ map[uint32]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]int64) - if v, changed := fastpathTV.DecMapUint32Int64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32Int64V(rv2i(rv).(map[uint32]int64), false, d) -} -func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, d *Decoder) { - if v, changed := f.DecMapUint32Int64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, canChange bool, - d *Decoder) (_ map[uint32]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]float32) - if v, changed := fastpathTV.DecMapUint32Float32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32Float32V(rv2i(rv).(map[uint32]float32), false, d) -} -func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, d *Decoder) { - if v, changed := f.DecMapUint32Float32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, canChange bool, - d *Decoder) (_ map[uint32]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]float64) - if v, changed := fastpathTV.DecMapUint32Float64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32Float64V(rv2i(rv).(map[uint32]float64), false, d) -} -func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, d *Decoder) { - if v, changed := f.DecMapUint32Float64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, canChange bool, - d *Decoder) (_ map[uint32]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]bool) - if v, changed := fastpathTV.DecMapUint32BoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint32BoolV(rv2i(rv).(map[uint32]bool), false, d) -} -func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, d *Decoder) { - if v, changed := f.DecMapUint32BoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, canChange bool, - d *Decoder) (_ map[uint32]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint32 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(dd.DecodeUint(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]interface{}) - if v, changed := fastpathTV.DecMapUint64IntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), false, d) -} -func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, d *Decoder) { - if v, changed := f.DecMapUint64IntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, canChange bool, - d *Decoder) (_ map[uint64]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint64]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint64 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]string) - if v, changed := fastpathTV.DecMapUint64StringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64StringV(rv2i(rv).(map[uint64]string), false, d) -} -func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, d *Decoder) { - if v, changed := f.DecMapUint64StringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, canChange bool, - d *Decoder) (_ map[uint64]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint64]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]uint) - if v, changed := fastpathTV.DecMapUint64UintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64UintV(rv2i(rv).(map[uint64]uint), false, d) -} -func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, d *Decoder) { - if v, changed := f.DecMapUint64UintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, canChange bool, - d *Decoder) (_ map[uint64]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]uint8) - if v, changed := fastpathTV.DecMapUint64Uint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), false, d) -} -func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, d *Decoder) { - if v, changed := f.DecMapUint64Uint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, canChange bool, - d *Decoder) (_ map[uint64]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]uint16) - if v, changed := fastpathTV.DecMapUint64Uint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), false, d) -} -func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, d *Decoder) { - if v, changed := f.DecMapUint64Uint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, canChange bool, - d *Decoder) (_ map[uint64]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint64]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]uint32) - if v, changed := fastpathTV.DecMapUint64Uint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), false, d) -} -func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, d *Decoder) { - if v, changed := f.DecMapUint64Uint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, canChange bool, - d *Decoder) (_ map[uint64]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]uint64) - if v, changed := fastpathTV.DecMapUint64Uint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), false, d) -} -func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, d *Decoder) { - if v, changed := f.DecMapUint64Uint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, canChange bool, - d *Decoder) (_ map[uint64]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]uintptr) - if v, changed := fastpathTV.DecMapUint64UintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), false, d) -} -func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, d *Decoder) { - if v, changed := f.DecMapUint64UintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, canChange bool, - d *Decoder) (_ map[uint64]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]int) - if v, changed := fastpathTV.DecMapUint64IntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64IntV(rv2i(rv).(map[uint64]int), false, d) -} -func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, d *Decoder) { - if v, changed := f.DecMapUint64IntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, canChange bool, - d *Decoder) (_ map[uint64]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]int8) - if v, changed := fastpathTV.DecMapUint64Int8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64Int8V(rv2i(rv).(map[uint64]int8), false, d) -} -func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, d *Decoder) { - if v, changed := f.DecMapUint64Int8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, canChange bool, - d *Decoder) (_ map[uint64]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]int16) - if v, changed := fastpathTV.DecMapUint64Int16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64Int16V(rv2i(rv).(map[uint64]int16), false, d) -} -func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, d *Decoder) { - if v, changed := f.DecMapUint64Int16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, canChange bool, - d *Decoder) (_ map[uint64]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint64]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]int32) - if v, changed := fastpathTV.DecMapUint64Int32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64Int32V(rv2i(rv).(map[uint64]int32), false, d) -} -func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, d *Decoder) { - if v, changed := f.DecMapUint64Int32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, canChange bool, - d *Decoder) (_ map[uint64]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]int64) - if v, changed := fastpathTV.DecMapUint64Int64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64Int64V(rv2i(rv).(map[uint64]int64), false, d) -} -func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, d *Decoder) { - if v, changed := f.DecMapUint64Int64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, canChange bool, - d *Decoder) (_ map[uint64]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]float32) - if v, changed := fastpathTV.DecMapUint64Float32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64Float32V(rv2i(rv).(map[uint64]float32), false, d) -} -func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, d *Decoder) { - if v, changed := f.DecMapUint64Float32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, canChange bool, - d *Decoder) (_ map[uint64]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]float64) - if v, changed := fastpathTV.DecMapUint64Float64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64Float64V(rv2i(rv).(map[uint64]float64), false, d) -} -func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, d *Decoder) { - if v, changed := f.DecMapUint64Float64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, canChange bool, - d *Decoder) (_ map[uint64]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]bool) - if v, changed := fastpathTV.DecMapUint64BoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUint64BoolV(rv2i(rv).(map[uint64]bool), false, d) -} -func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, d *Decoder) { - if v, changed := f.DecMapUint64BoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, canChange bool, - d *Decoder) (_ map[uint64]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uint64 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]interface{}) - if v, changed := fastpathTV.DecMapUintptrIntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), false, d) -} -func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, d *Decoder) { - if v, changed := f.DecMapUintptrIntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, canChange bool, - d *Decoder) (_ map[uintptr]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uintptr]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uintptr - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]string) - if v, changed := fastpathTV.DecMapUintptrStringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrStringV(rv2i(rv).(map[uintptr]string), false, d) -} -func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, d *Decoder) { - if v, changed := f.DecMapUintptrStringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, canChange bool, - d *Decoder) (_ map[uintptr]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uintptr]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]uint) - if v, changed := fastpathTV.DecMapUintptrUintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrUintV(rv2i(rv).(map[uintptr]uint), false, d) -} -func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, d *Decoder) { - if v, changed := f.DecMapUintptrUintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, canChange bool, - d *Decoder) (_ map[uintptr]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]uint8) - if v, changed := fastpathTV.DecMapUintptrUint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), false, d) -} -func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, d *Decoder) { - if v, changed := f.DecMapUintptrUint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, canChange bool, - d *Decoder) (_ map[uintptr]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]uint16) - if v, changed := fastpathTV.DecMapUintptrUint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), false, d) -} -func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, d *Decoder) { - if v, changed := f.DecMapUintptrUint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, canChange bool, - d *Decoder) (_ map[uintptr]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uintptr]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]uint32) - if v, changed := fastpathTV.DecMapUintptrUint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), false, d) -} -func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, d *Decoder) { - if v, changed := f.DecMapUintptrUint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, canChange bool, - d *Decoder) (_ map[uintptr]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]uint64) - if v, changed := fastpathTV.DecMapUintptrUint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), false, d) -} -func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, d *Decoder) { - if v, changed := f.DecMapUintptrUint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, canChange bool, - d *Decoder) (_ map[uintptr]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]uintptr) - if v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), false, d) -} -func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, d *Decoder) { - if v, changed := f.DecMapUintptrUintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, canChange bool, - d *Decoder) (_ map[uintptr]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]int) - if v, changed := fastpathTV.DecMapUintptrIntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrIntV(rv2i(rv).(map[uintptr]int), false, d) -} -func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, d *Decoder) { - if v, changed := f.DecMapUintptrIntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, canChange bool, - d *Decoder) (_ map[uintptr]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]int8) - if v, changed := fastpathTV.DecMapUintptrInt8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), false, d) -} -func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, d *Decoder) { - if v, changed := f.DecMapUintptrInt8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, canChange bool, - d *Decoder) (_ map[uintptr]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]int16) - if v, changed := fastpathTV.DecMapUintptrInt16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), false, d) -} -func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, d *Decoder) { - if v, changed := f.DecMapUintptrInt16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, canChange bool, - d *Decoder) (_ map[uintptr]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uintptr]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]int32) - if v, changed := fastpathTV.DecMapUintptrInt32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), false, d) -} -func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, d *Decoder) { - if v, changed := f.DecMapUintptrInt32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, canChange bool, - d *Decoder) (_ map[uintptr]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]int64) - if v, changed := fastpathTV.DecMapUintptrInt64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), false, d) -} -func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, d *Decoder) { - if v, changed := f.DecMapUintptrInt64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, canChange bool, - d *Decoder) (_ map[uintptr]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]float32) - if v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), false, d) -} -func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, d *Decoder) { - if v, changed := f.DecMapUintptrFloat32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, canChange bool, - d *Decoder) (_ map[uintptr]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]float64) - if v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), false, d) -} -func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, d *Decoder) { - if v, changed := f.DecMapUintptrFloat64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, canChange bool, - d *Decoder) (_ map[uintptr]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]bool) - if v, changed := fastpathTV.DecMapUintptrBoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), false, d) -} -func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, d *Decoder) { - if v, changed := f.DecMapUintptrBoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, canChange bool, - d *Decoder) (_ map[uintptr]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk uintptr - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntIntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]interface{}) - if v, changed := fastpathTV.DecMapIntIntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntIntfV(rv2i(rv).(map[int]interface{}), false, d) -} -func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, d *Decoder) { - if v, changed := f.DecMapIntIntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, canChange bool, - d *Decoder) (_ map[int]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntStringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]string) - if v, changed := fastpathTV.DecMapIntStringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntStringV(rv2i(rv).(map[int]string), false, d) -} -func (f fastpathT) DecMapIntStringX(vp *map[int]string, d *Decoder) { - if v, changed := f.DecMapIntStringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntStringV(v map[int]string, canChange bool, - d *Decoder) (_ map[int]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntUintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]uint) - if v, changed := fastpathTV.DecMapIntUintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntUintV(rv2i(rv).(map[int]uint), false, d) -} -func (f fastpathT) DecMapIntUintX(vp *map[int]uint, d *Decoder) { - if v, changed := f.DecMapIntUintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUintV(v map[int]uint, canChange bool, - d *Decoder) (_ map[int]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntUint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]uint8) - if v, changed := fastpathTV.DecMapIntUint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntUint8V(rv2i(rv).(map[int]uint8), false, d) -} -func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, d *Decoder) { - if v, changed := f.DecMapIntUint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, canChange bool, - d *Decoder) (_ map[int]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntUint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]uint16) - if v, changed := fastpathTV.DecMapIntUint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntUint16V(rv2i(rv).(map[int]uint16), false, d) -} -func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, d *Decoder) { - if v, changed := f.DecMapIntUint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, canChange bool, - d *Decoder) (_ map[int]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntUint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]uint32) - if v, changed := fastpathTV.DecMapIntUint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntUint32V(rv2i(rv).(map[int]uint32), false, d) -} -func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, d *Decoder) { - if v, changed := f.DecMapIntUint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, canChange bool, - d *Decoder) (_ map[int]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntUint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]uint64) - if v, changed := fastpathTV.DecMapIntUint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntUint64V(rv2i(rv).(map[int]uint64), false, d) -} -func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, d *Decoder) { - if v, changed := f.DecMapIntUint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, canChange bool, - d *Decoder) (_ map[int]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]uintptr) - if v, changed := fastpathTV.DecMapIntUintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntUintptrV(rv2i(rv).(map[int]uintptr), false, d) -} -func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, d *Decoder) { - if v, changed := f.DecMapIntUintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, canChange bool, - d *Decoder) (_ map[int]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntIntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]int) - if v, changed := fastpathTV.DecMapIntIntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntIntV(rv2i(rv).(map[int]int), false, d) -} -func (f fastpathT) DecMapIntIntX(vp *map[int]int, d *Decoder) { - if v, changed := f.DecMapIntIntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntIntV(v map[int]int, canChange bool, - d *Decoder) (_ map[int]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntInt8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]int8) - if v, changed := fastpathTV.DecMapIntInt8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntInt8V(rv2i(rv).(map[int]int8), false, d) -} -func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, d *Decoder) { - if v, changed := f.DecMapIntInt8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt8V(v map[int]int8, canChange bool, - d *Decoder) (_ map[int]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntInt16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]int16) - if v, changed := fastpathTV.DecMapIntInt16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntInt16V(rv2i(rv).(map[int]int16), false, d) -} -func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, d *Decoder) { - if v, changed := f.DecMapIntInt16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt16V(v map[int]int16, canChange bool, - d *Decoder) (_ map[int]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntInt32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]int32) - if v, changed := fastpathTV.DecMapIntInt32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntInt32V(rv2i(rv).(map[int]int32), false, d) -} -func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, d *Decoder) { - if v, changed := f.DecMapIntInt32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt32V(v map[int]int32, canChange bool, - d *Decoder) (_ map[int]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntInt64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]int64) - if v, changed := fastpathTV.DecMapIntInt64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntInt64V(rv2i(rv).(map[int]int64), false, d) -} -func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, d *Decoder) { - if v, changed := f.DecMapIntInt64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt64V(v map[int]int64, canChange bool, - d *Decoder) (_ map[int]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]float32) - if v, changed := fastpathTV.DecMapIntFloat32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntFloat32V(rv2i(rv).(map[int]float32), false, d) -} -func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, d *Decoder) { - if v, changed := f.DecMapIntFloat32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, canChange bool, - d *Decoder) (_ map[int]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]float64) - if v, changed := fastpathTV.DecMapIntFloat64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntFloat64V(rv2i(rv).(map[int]float64), false, d) -} -func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, d *Decoder) { - if v, changed := f.DecMapIntFloat64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, canChange bool, - d *Decoder) (_ map[int]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntBoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]bool) - if v, changed := fastpathTV.DecMapIntBoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapIntBoolV(rv2i(rv).(map[int]bool), false, d) -} -func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, d *Decoder) { - if v, changed := f.DecMapIntBoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntBoolV(v map[int]bool, canChange bool, - d *Decoder) (_ map[int]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(dd.DecodeInt(intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]interface{}) - if v, changed := fastpathTV.DecMapInt8IntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8IntfV(rv2i(rv).(map[int8]interface{}), false, d) -} -func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, d *Decoder) { - if v, changed := f.DecMapInt8IntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, canChange bool, - d *Decoder) (_ map[int8]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[int8]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int8 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]string) - if v, changed := fastpathTV.DecMapInt8StringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8StringV(rv2i(rv).(map[int8]string), false, d) -} -func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, d *Decoder) { - if v, changed := f.DecMapInt8StringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8StringV(v map[int8]string, canChange bool, - d *Decoder) (_ map[int8]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[int8]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]uint) - if v, changed := fastpathTV.DecMapInt8UintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8UintV(rv2i(rv).(map[int8]uint), false, d) -} -func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, d *Decoder) { - if v, changed := f.DecMapInt8UintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, canChange bool, - d *Decoder) (_ map[int8]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]uint8) - if v, changed := fastpathTV.DecMapInt8Uint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8Uint8V(rv2i(rv).(map[int8]uint8), false, d) -} -func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, d *Decoder) { - if v, changed := f.DecMapInt8Uint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, canChange bool, - d *Decoder) (_ map[int8]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]uint16) - if v, changed := fastpathTV.DecMapInt8Uint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8Uint16V(rv2i(rv).(map[int8]uint16), false, d) -} -func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, d *Decoder) { - if v, changed := f.DecMapInt8Uint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, canChange bool, - d *Decoder) (_ map[int8]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int8]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]uint32) - if v, changed := fastpathTV.DecMapInt8Uint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8Uint32V(rv2i(rv).(map[int8]uint32), false, d) -} -func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, d *Decoder) { - if v, changed := f.DecMapInt8Uint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, canChange bool, - d *Decoder) (_ map[int8]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]uint64) - if v, changed := fastpathTV.DecMapInt8Uint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8Uint64V(rv2i(rv).(map[int8]uint64), false, d) -} -func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, d *Decoder) { - if v, changed := f.DecMapInt8Uint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, canChange bool, - d *Decoder) (_ map[int8]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]uintptr) - if v, changed := fastpathTV.DecMapInt8UintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), false, d) -} -func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, d *Decoder) { - if v, changed := f.DecMapInt8UintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, canChange bool, - d *Decoder) (_ map[int8]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]int) - if v, changed := fastpathTV.DecMapInt8IntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8IntV(rv2i(rv).(map[int8]int), false, d) -} -func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, d *Decoder) { - if v, changed := f.DecMapInt8IntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8IntV(v map[int8]int, canChange bool, - d *Decoder) (_ map[int8]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]int8) - if v, changed := fastpathTV.DecMapInt8Int8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8Int8V(rv2i(rv).(map[int8]int8), false, d) -} -func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, d *Decoder) { - if v, changed := f.DecMapInt8Int8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, canChange bool, - d *Decoder) (_ map[int8]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]int16) - if v, changed := fastpathTV.DecMapInt8Int16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8Int16V(rv2i(rv).(map[int8]int16), false, d) -} -func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, d *Decoder) { - if v, changed := f.DecMapInt8Int16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, canChange bool, - d *Decoder) (_ map[int8]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int8]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]int32) - if v, changed := fastpathTV.DecMapInt8Int32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8Int32V(rv2i(rv).(map[int8]int32), false, d) -} -func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, d *Decoder) { - if v, changed := f.DecMapInt8Int32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, canChange bool, - d *Decoder) (_ map[int8]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]int64) - if v, changed := fastpathTV.DecMapInt8Int64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8Int64V(rv2i(rv).(map[int8]int64), false, d) -} -func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, d *Decoder) { - if v, changed := f.DecMapInt8Int64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, canChange bool, - d *Decoder) (_ map[int8]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]float32) - if v, changed := fastpathTV.DecMapInt8Float32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8Float32V(rv2i(rv).(map[int8]float32), false, d) -} -func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, d *Decoder) { - if v, changed := f.DecMapInt8Float32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, canChange bool, - d *Decoder) (_ map[int8]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]float64) - if v, changed := fastpathTV.DecMapInt8Float64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8Float64V(rv2i(rv).(map[int8]float64), false, d) -} -func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, d *Decoder) { - if v, changed := f.DecMapInt8Float64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, canChange bool, - d *Decoder) (_ map[int8]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]bool) - if v, changed := fastpathTV.DecMapInt8BoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt8BoolV(rv2i(rv).(map[int8]bool), false, d) -} -func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, d *Decoder) { - if v, changed := f.DecMapInt8BoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, canChange bool, - d *Decoder) (_ map[int8]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int8 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(dd.DecodeInt(8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]interface{}) - if v, changed := fastpathTV.DecMapInt16IntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16IntfV(rv2i(rv).(map[int16]interface{}), false, d) -} -func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, d *Decoder) { - if v, changed := f.DecMapInt16IntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, canChange bool, - d *Decoder) (_ map[int16]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[int16]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int16 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]string) - if v, changed := fastpathTV.DecMapInt16StringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16StringV(rv2i(rv).(map[int16]string), false, d) -} -func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, d *Decoder) { - if v, changed := f.DecMapInt16StringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16StringV(v map[int16]string, canChange bool, - d *Decoder) (_ map[int16]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[int16]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]uint) - if v, changed := fastpathTV.DecMapInt16UintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16UintV(rv2i(rv).(map[int16]uint), false, d) -} -func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, d *Decoder) { - if v, changed := f.DecMapInt16UintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, canChange bool, - d *Decoder) (_ map[int16]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]uint8) - if v, changed := fastpathTV.DecMapInt16Uint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16Uint8V(rv2i(rv).(map[int16]uint8), false, d) -} -func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, d *Decoder) { - if v, changed := f.DecMapInt16Uint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, canChange bool, - d *Decoder) (_ map[int16]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]uint16) - if v, changed := fastpathTV.DecMapInt16Uint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16Uint16V(rv2i(rv).(map[int16]uint16), false, d) -} -func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, d *Decoder) { - if v, changed := f.DecMapInt16Uint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, canChange bool, - d *Decoder) (_ map[int16]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[int16]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]uint32) - if v, changed := fastpathTV.DecMapInt16Uint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16Uint32V(rv2i(rv).(map[int16]uint32), false, d) -} -func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, d *Decoder) { - if v, changed := f.DecMapInt16Uint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, canChange bool, - d *Decoder) (_ map[int16]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]uint64) - if v, changed := fastpathTV.DecMapInt16Uint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16Uint64V(rv2i(rv).(map[int16]uint64), false, d) -} -func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, d *Decoder) { - if v, changed := f.DecMapInt16Uint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, canChange bool, - d *Decoder) (_ map[int16]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]uintptr) - if v, changed := fastpathTV.DecMapInt16UintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), false, d) -} -func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, d *Decoder) { - if v, changed := f.DecMapInt16UintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, canChange bool, - d *Decoder) (_ map[int16]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]int) - if v, changed := fastpathTV.DecMapInt16IntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16IntV(rv2i(rv).(map[int16]int), false, d) -} -func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, d *Decoder) { - if v, changed := f.DecMapInt16IntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16IntV(v map[int16]int, canChange bool, - d *Decoder) (_ map[int16]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]int8) - if v, changed := fastpathTV.DecMapInt16Int8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16Int8V(rv2i(rv).(map[int16]int8), false, d) -} -func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, d *Decoder) { - if v, changed := f.DecMapInt16Int8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, canChange bool, - d *Decoder) (_ map[int16]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]int16) - if v, changed := fastpathTV.DecMapInt16Int16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16Int16V(rv2i(rv).(map[int16]int16), false, d) -} -func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, d *Decoder) { - if v, changed := f.DecMapInt16Int16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, canChange bool, - d *Decoder) (_ map[int16]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[int16]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]int32) - if v, changed := fastpathTV.DecMapInt16Int32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16Int32V(rv2i(rv).(map[int16]int32), false, d) -} -func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, d *Decoder) { - if v, changed := f.DecMapInt16Int32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, canChange bool, - d *Decoder) (_ map[int16]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]int64) - if v, changed := fastpathTV.DecMapInt16Int64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16Int64V(rv2i(rv).(map[int16]int64), false, d) -} -func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, d *Decoder) { - if v, changed := f.DecMapInt16Int64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, canChange bool, - d *Decoder) (_ map[int16]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]float32) - if v, changed := fastpathTV.DecMapInt16Float32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16Float32V(rv2i(rv).(map[int16]float32), false, d) -} -func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, d *Decoder) { - if v, changed := f.DecMapInt16Float32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, canChange bool, - d *Decoder) (_ map[int16]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]float64) - if v, changed := fastpathTV.DecMapInt16Float64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16Float64V(rv2i(rv).(map[int16]float64), false, d) -} -func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, d *Decoder) { - if v, changed := f.DecMapInt16Float64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, canChange bool, - d *Decoder) (_ map[int16]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]bool) - if v, changed := fastpathTV.DecMapInt16BoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt16BoolV(rv2i(rv).(map[int16]bool), false, d) -} -func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, d *Decoder) { - if v, changed := f.DecMapInt16BoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, canChange bool, - d *Decoder) (_ map[int16]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int16 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(dd.DecodeInt(16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]interface{}) - if v, changed := fastpathTV.DecMapInt32IntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32IntfV(rv2i(rv).(map[int32]interface{}), false, d) -} -func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, d *Decoder) { - if v, changed := f.DecMapInt32IntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, canChange bool, - d *Decoder) (_ map[int32]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[int32]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int32 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]string) - if v, changed := fastpathTV.DecMapInt32StringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32StringV(rv2i(rv).(map[int32]string), false, d) -} -func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, d *Decoder) { - if v, changed := f.DecMapInt32StringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32StringV(v map[int32]string, canChange bool, - d *Decoder) (_ map[int32]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[int32]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]uint) - if v, changed := fastpathTV.DecMapInt32UintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32UintV(rv2i(rv).(map[int32]uint), false, d) -} -func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, d *Decoder) { - if v, changed := f.DecMapInt32UintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, canChange bool, - d *Decoder) (_ map[int32]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]uint8) - if v, changed := fastpathTV.DecMapInt32Uint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32Uint8V(rv2i(rv).(map[int32]uint8), false, d) -} -func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, d *Decoder) { - if v, changed := f.DecMapInt32Uint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, canChange bool, - d *Decoder) (_ map[int32]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]uint16) - if v, changed := fastpathTV.DecMapInt32Uint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32Uint16V(rv2i(rv).(map[int32]uint16), false, d) -} -func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, d *Decoder) { - if v, changed := f.DecMapInt32Uint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, canChange bool, - d *Decoder) (_ map[int32]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int32]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]uint32) - if v, changed := fastpathTV.DecMapInt32Uint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32Uint32V(rv2i(rv).(map[int32]uint32), false, d) -} -func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, d *Decoder) { - if v, changed := f.DecMapInt32Uint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, canChange bool, - d *Decoder) (_ map[int32]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]uint64) - if v, changed := fastpathTV.DecMapInt32Uint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32Uint64V(rv2i(rv).(map[int32]uint64), false, d) -} -func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, d *Decoder) { - if v, changed := f.DecMapInt32Uint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, canChange bool, - d *Decoder) (_ map[int32]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]uintptr) - if v, changed := fastpathTV.DecMapInt32UintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), false, d) -} -func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, d *Decoder) { - if v, changed := f.DecMapInt32UintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, canChange bool, - d *Decoder) (_ map[int32]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]int) - if v, changed := fastpathTV.DecMapInt32IntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32IntV(rv2i(rv).(map[int32]int), false, d) -} -func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, d *Decoder) { - if v, changed := f.DecMapInt32IntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32IntV(v map[int32]int, canChange bool, - d *Decoder) (_ map[int32]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]int8) - if v, changed := fastpathTV.DecMapInt32Int8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32Int8V(rv2i(rv).(map[int32]int8), false, d) -} -func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, d *Decoder) { - if v, changed := f.DecMapInt32Int8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, canChange bool, - d *Decoder) (_ map[int32]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]int16) - if v, changed := fastpathTV.DecMapInt32Int16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32Int16V(rv2i(rv).(map[int32]int16), false, d) -} -func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, d *Decoder) { - if v, changed := f.DecMapInt32Int16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, canChange bool, - d *Decoder) (_ map[int32]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int32]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]int32) - if v, changed := fastpathTV.DecMapInt32Int32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32Int32V(rv2i(rv).(map[int32]int32), false, d) -} -func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, d *Decoder) { - if v, changed := f.DecMapInt32Int32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, canChange bool, - d *Decoder) (_ map[int32]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]int64) - if v, changed := fastpathTV.DecMapInt32Int64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32Int64V(rv2i(rv).(map[int32]int64), false, d) -} -func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, d *Decoder) { - if v, changed := f.DecMapInt32Int64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, canChange bool, - d *Decoder) (_ map[int32]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]float32) - if v, changed := fastpathTV.DecMapInt32Float32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32Float32V(rv2i(rv).(map[int32]float32), false, d) -} -func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, d *Decoder) { - if v, changed := f.DecMapInt32Float32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, canChange bool, - d *Decoder) (_ map[int32]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]float64) - if v, changed := fastpathTV.DecMapInt32Float64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32Float64V(rv2i(rv).(map[int32]float64), false, d) -} -func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, d *Decoder) { - if v, changed := f.DecMapInt32Float64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, canChange bool, - d *Decoder) (_ map[int32]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]bool) - if v, changed := fastpathTV.DecMapInt32BoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt32BoolV(rv2i(rv).(map[int32]bool), false, d) -} -func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, d *Decoder) { - if v, changed := f.DecMapInt32BoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, canChange bool, - d *Decoder) (_ map[int32]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int32 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(dd.DecodeInt(32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]interface{}) - if v, changed := fastpathTV.DecMapInt64IntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64IntfV(rv2i(rv).(map[int64]interface{}), false, d) -} -func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, d *Decoder) { - if v, changed := f.DecMapInt64IntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, canChange bool, - d *Decoder) (_ map[int64]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int64]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int64 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]string) - if v, changed := fastpathTV.DecMapInt64StringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64StringV(rv2i(rv).(map[int64]string), false, d) -} -func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, d *Decoder) { - if v, changed := f.DecMapInt64StringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64StringV(v map[int64]string, canChange bool, - d *Decoder) (_ map[int64]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int64]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]uint) - if v, changed := fastpathTV.DecMapInt64UintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64UintV(rv2i(rv).(map[int64]uint), false, d) -} -func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, d *Decoder) { - if v, changed := f.DecMapInt64UintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, canChange bool, - d *Decoder) (_ map[int64]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]uint8) - if v, changed := fastpathTV.DecMapInt64Uint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64Uint8V(rv2i(rv).(map[int64]uint8), false, d) -} -func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, d *Decoder) { - if v, changed := f.DecMapInt64Uint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, canChange bool, - d *Decoder) (_ map[int64]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]uint16) - if v, changed := fastpathTV.DecMapInt64Uint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64Uint16V(rv2i(rv).(map[int64]uint16), false, d) -} -func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, d *Decoder) { - if v, changed := f.DecMapInt64Uint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, canChange bool, - d *Decoder) (_ map[int64]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int64]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]uint32) - if v, changed := fastpathTV.DecMapInt64Uint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64Uint32V(rv2i(rv).(map[int64]uint32), false, d) -} -func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, d *Decoder) { - if v, changed := f.DecMapInt64Uint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, canChange bool, - d *Decoder) (_ map[int64]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]uint64) - if v, changed := fastpathTV.DecMapInt64Uint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64Uint64V(rv2i(rv).(map[int64]uint64), false, d) -} -func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, d *Decoder) { - if v, changed := f.DecMapInt64Uint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, canChange bool, - d *Decoder) (_ map[int64]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]uintptr) - if v, changed := fastpathTV.DecMapInt64UintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), false, d) -} -func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, d *Decoder) { - if v, changed := f.DecMapInt64UintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, canChange bool, - d *Decoder) (_ map[int64]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]int) - if v, changed := fastpathTV.DecMapInt64IntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64IntV(rv2i(rv).(map[int64]int), false, d) -} -func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, d *Decoder) { - if v, changed := f.DecMapInt64IntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64IntV(v map[int64]int, canChange bool, - d *Decoder) (_ map[int64]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]int8) - if v, changed := fastpathTV.DecMapInt64Int8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64Int8V(rv2i(rv).(map[int64]int8), false, d) -} -func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, d *Decoder) { - if v, changed := f.DecMapInt64Int8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, canChange bool, - d *Decoder) (_ map[int64]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]int16) - if v, changed := fastpathTV.DecMapInt64Int16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64Int16V(rv2i(rv).(map[int64]int16), false, d) -} -func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, d *Decoder) { - if v, changed := f.DecMapInt64Int16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, canChange bool, - d *Decoder) (_ map[int64]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int64]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]int32) - if v, changed := fastpathTV.DecMapInt64Int32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64Int32V(rv2i(rv).(map[int64]int32), false, d) -} -func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, d *Decoder) { - if v, changed := f.DecMapInt64Int32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, canChange bool, - d *Decoder) (_ map[int64]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]int64) - if v, changed := fastpathTV.DecMapInt64Int64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64Int64V(rv2i(rv).(map[int64]int64), false, d) -} -func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, d *Decoder) { - if v, changed := f.DecMapInt64Int64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, canChange bool, - d *Decoder) (_ map[int64]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]float32) - if v, changed := fastpathTV.DecMapInt64Float32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64Float32V(rv2i(rv).(map[int64]float32), false, d) -} -func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, d *Decoder) { - if v, changed := f.DecMapInt64Float32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, canChange bool, - d *Decoder) (_ map[int64]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]float64) - if v, changed := fastpathTV.DecMapInt64Float64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64Float64V(rv2i(rv).(map[int64]float64), false, d) -} -func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, d *Decoder) { - if v, changed := f.DecMapInt64Float64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, canChange bool, - d *Decoder) (_ map[int64]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]bool) - if v, changed := fastpathTV.DecMapInt64BoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapInt64BoolV(rv2i(rv).(map[int64]bool), false, d) -} -func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, d *Decoder) { - if v, changed := f.DecMapInt64BoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, canChange bool, - d *Decoder) (_ map[int64]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk int64 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt(64) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]interface{}) - if v, changed := fastpathTV.DecMapBoolIntfV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolIntfV(rv2i(rv).(map[bool]interface{}), false, d) -} -func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, d *Decoder) { - if v, changed := f.DecMapBoolIntfV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, canChange bool, - d *Decoder) (_ map[bool]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[bool]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk bool - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolStringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]string) - if v, changed := fastpathTV.DecMapBoolStringV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolStringV(rv2i(rv).(map[bool]string), false, d) -} -func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, d *Decoder) { - if v, changed := f.DecMapBoolStringV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolStringV(v map[bool]string, canChange bool, - d *Decoder) (_ map[bool]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[bool]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolUintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]uint) - if v, changed := fastpathTV.DecMapBoolUintV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolUintV(rv2i(rv).(map[bool]uint), false, d) -} -func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, d *Decoder) { - if v, changed := f.DecMapBoolUintV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, canChange bool, - d *Decoder) (_ map[bool]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]uint8) - if v, changed := fastpathTV.DecMapBoolUint8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolUint8V(rv2i(rv).(map[bool]uint8), false, d) -} -func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, d *Decoder) { - if v, changed := f.DecMapBoolUint8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, canChange bool, - d *Decoder) (_ map[bool]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]uint16) - if v, changed := fastpathTV.DecMapBoolUint16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolUint16V(rv2i(rv).(map[bool]uint16), false, d) -} -func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, d *Decoder) { - if v, changed := f.DecMapBoolUint16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, canChange bool, - d *Decoder) (_ map[bool]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[bool]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]uint32) - if v, changed := fastpathTV.DecMapBoolUint32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolUint32V(rv2i(rv).(map[bool]uint32), false, d) -} -func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, d *Decoder) { - if v, changed := f.DecMapBoolUint32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, canChange bool, - d *Decoder) (_ map[bool]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]uint64) - if v, changed := fastpathTV.DecMapBoolUint64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolUint64V(rv2i(rv).(map[bool]uint64), false, d) -} -func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, d *Decoder) { - if v, changed := f.DecMapBoolUint64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, canChange bool, - d *Decoder) (_ map[bool]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]uintptr) - if v, changed := fastpathTV.DecMapBoolUintptrV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), false, d) -} -func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, d *Decoder) { - if v, changed := f.DecMapBoolUintptrV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, canChange bool, - d *Decoder) (_ map[bool]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolIntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]int) - if v, changed := fastpathTV.DecMapBoolIntV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolIntV(rv2i(rv).(map[bool]int), false, d) -} -func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, d *Decoder) { - if v, changed := f.DecMapBoolIntV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolIntV(v map[bool]int, canChange bool, - d *Decoder) (_ map[bool]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]int8) - if v, changed := fastpathTV.DecMapBoolInt8V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolInt8V(rv2i(rv).(map[bool]int8), false, d) -} -func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, d *Decoder) { - if v, changed := f.DecMapBoolInt8V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, canChange bool, - d *Decoder) (_ map[bool]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]int16) - if v, changed := fastpathTV.DecMapBoolInt16V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolInt16V(rv2i(rv).(map[bool]int16), false, d) -} -func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, d *Decoder) { - if v, changed := f.DecMapBoolInt16V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, canChange bool, - d *Decoder) (_ map[bool]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[bool]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]int32) - if v, changed := fastpathTV.DecMapBoolInt32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolInt32V(rv2i(rv).(map[bool]int32), false, d) -} -func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, d *Decoder) { - if v, changed := f.DecMapBoolInt32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, canChange bool, - d *Decoder) (_ map[bool]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]int64) - if v, changed := fastpathTV.DecMapBoolInt64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolInt64V(rv2i(rv).(map[bool]int64), false, d) -} -func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, d *Decoder) { - if v, changed := f.DecMapBoolInt64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, canChange bool, - d *Decoder) (_ map[bool]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]float32) - if v, changed := fastpathTV.DecMapBoolFloat32V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolFloat32V(rv2i(rv).(map[bool]float32), false, d) -} -func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, d *Decoder) { - if v, changed := f.DecMapBoolFloat32V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, canChange bool, - d *Decoder) (_ map[bool]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]float64) - if v, changed := fastpathTV.DecMapBoolFloat64V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolFloat64V(rv2i(rv).(map[bool]float64), false, d) -} -func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, d *Decoder) { - if v, changed := f.DecMapBoolFloat64V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, canChange bool, - d *Decoder) (_ map[bool]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]bool) - if v, changed := fastpathTV.DecMapBoolBoolV(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.DecMapBoolBoolV(rv2i(rv).(map[bool]bool), false, d) -} -func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, d *Decoder) { - if v, changed := f.DecMapBoolBoolV(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, canChange bool, - d *Decoder) (_ map[bool]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - - var mk bool - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl deleted file mode 100644 index 1df9504c55c..00000000000 --- a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl +++ /dev/null @@ -1,465 +0,0 @@ -// +build !notfastpath - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl -// ************************************************************ - -package codec - -// Fast path functions try to create a fast path encode or decode implementation -// for common maps and slices. -// -// We define the functions and register then in this single file -// so as not to pollute the encode.go and decode.go, and create a dependency in there. -// This file can be omitted without causing a build failure. -// -// The advantage of fast paths is: -// - Many calls bypass reflection altogether -// -// Currently support -// - slice of all builtin types, -// - map of all builtin types to string or interface value -// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) -// This should provide adequate "typical" implementations. -// -// Note that fast track decode functions must handle values for which an address cannot be obtained. -// For example: -// m2 := map[string]int{} -// p2 := []interface{}{m2} -// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. -// - -import ( - "reflect" - "sort" -) - -const fastpathEnabled = true - -type fastpathT struct {} - -var fastpathTV fastpathT - -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*Encoder, *codecFnInfo, reflect.Value) - decfn func(*Decoder, *codecFnInfo, reflect.Value) -} - -type fastpathA [{{ .FastpathLen }}]fastpathE - -func (x *fastpathA) index(rtid uintptr) int { - // use binary search to grab the index (adapted from sort/search.go) - h, i, j := 0, 0, {{ .FastpathLen }} // len(x) - for i < j { - h = i + (j-i)/2 - if x[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - } - if i < {{ .FastpathLen }} && x[i].rtid == rtid { - return i - } - return -1 -} - -type fastpathAslice []fastpathE - -func (x fastpathAslice) Len() int { return len(x) } -func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } -func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -var fastpathAV fastpathA - -// due to possible initialization loop error, make fastpath in an init() -func init() { - i := 0 - fn := func(v interface{}, - fe func(*Encoder, *codecFnInfo, reflect.Value), - fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) { - xrt := reflect.TypeOf(v) - xptr := rt2id(xrt) - if useLookupRecognizedTypes { - recognizedRtids = append(recognizedRtids, xptr) - recognizedRtidPtrs = append(recognizedRtidPtrs, rt2id(reflect.PtrTo(xrt))) - } - fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} - i++ - return - } - - {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} - - {{range .Values}}{{if not .Primitive}}{{if .MapKey }} - fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} - - sort.Sort(fastpathAslice(fastpathAV[:])) -} - -// -- encode - -// -- -- fast path type switch -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}:{{else}} - case map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e){{if not .MapKey }} - case *[]{{ .Elem }}:{{else}} - case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) -{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -{{/* **** removing this block, as they are never called directly **** -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) - case *[]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) -{{end}}{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} - case map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) - case *map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) -{{end}}{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} -*/}} - -// -- -- fast path functions -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - -func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv2i(rv).([]{{ .Elem }}), e) - } else { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).([]{{ .Elem }}), e) - } -} -func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) { - if v == nil { e.e.EncodeNil(); return } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { ee.WriteArrayElem() } - {{ encmd .Elem "v2"}} - } - ee.WriteArrayEnd() -} - -func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - {{ encmd .Elem "v2"}} - } - ee.WriteMapEnd() -} - -{{end}}{{end}}{{end}} - -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} - -func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e) -} -func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) { - if v == nil { e.e.EncodeNil(); return } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - {{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - {{end}}if e.h.Canonical { - {{if eq .MapKey "interface{}"}}{{/* out of band - */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}} - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { ee.WriteMapElemKey() } - e.asis(v2[j].v) - if esep { ee.WriteMapElemValue() } - e.encode(v[v2[j].i]) - } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) - var i int - for k, _ := range v { - v2[i] = {{ $x }}(k) - i++ - } - sort.Sort({{ sorttype .MapKey false}}(v2)) - for _, k2 := range v2 { - if esep { ee.WriteMapElemKey() } - {{if eq .MapKey "string"}}if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - }{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}} - if esep { ee.WriteMapElemValue() } - {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }} - } {{end}} - } else { - for k2, v2 := range v { - if esep { ee.WriteMapElemKey() } - {{if eq .MapKey "string"}}if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - }{{else}}{{ encmd .MapKey "k2"}}{{end}} - if esep { ee.WriteMapElemValue() } - {{ encmd .Elem "v2"}} - } - } - ee.WriteMapEnd() -} - -{{end}}{{end}}{{end}} - -// -- decode - -// -- -- fast path type switch -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}:{{else}} - case map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d){{if not .MapKey }} - case *[]{{ .Elem }}: {{else}} - case *map[{{ .MapKey }}]{{ .Elem }}: {{end}} - if v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d); changed2 { - *v = v2 - } -{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case *[]{{ .Elem }}: {{else}} - case *map[{{ .MapKey }}]{{ .Elem }}: {{end}} - *v = nil -{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} -{{/* -Slices can change if they -- did not come from an array -- are addressable (from a ptr) -- are settable (e.g. contained in an interface{}) -*/}} -func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - var vp = rv2i(rv).(*[]{{ .Elem }}) - if v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, !array, d); changed { - *vp = v - } - } else { - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).([]{{ .Elem }}), !array, d) - } -} -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) { - if v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) { - dd := d.d - {{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}} - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []{{ .Elem }}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]{{ .Elem }}, xlen) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) - } else { - xlen = 8 - } - v = make([]{{ .Elem }}, xlen) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, {{ zerocmd .Elem }}) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { - {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } - } - if canChange { - if j < len(v) { - v = v[:j] - changed = true - } else if j == 0 && v == nil { - v = make([]{{ .Elem }}, 0) - changed = true - } - } - slh.End() - return v, changed -} - -{{end}}{{end}}{{end}} - - -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} -{{/* -Maps can change if they are -- addressable (from a ptr) -- settable (e.g. contained in an interface{}) -*/}} -func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }}) - if v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed { - *vp = v - } - return - } - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), false, d) -} -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) { - if v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed { - *vp = v - } -} -func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, canChange bool, - d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - {{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}} - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}) - v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - {{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}} - var mk {{ .MapKey }} - var mv {{ .Elem }} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { dd.ReadMapElemKey() } - {{ if eq .MapKey "interface{}" }}mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} - }{{ else }}mk = {{ decmd .MapKey }}{{ end }} - if esep { dd.ReadMapElemValue() } - if dd.TryDecodeAsNil() { - if d.h.DeleteOnNilMapValue { delete(v, mk) } else { v[mk] = {{ zerocmd .Elem }} } - continue - } - {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } - d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - return v, changed -} - -{{end}}{{end}}{{end}} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/fast-path.not.go deleted file mode 100644 index 9573d64ab01..00000000000 --- a/vendor/github.com/ugorji/go/codec/fast-path.not.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build notfastpath - -package codec - -import "reflect" - -const fastpathEnabled = false - -// The generated fast-path code is very large, and adds a few seconds to the build time. -// This causes test execution, execution of small tools which use codec, etc -// to take a long time. -// -// To mitigate, we now support the notfastpath tag. -// This tag disables fastpath during build, allowing for faster build, test execution, -// short-program runs, etc. - -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } -func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool { return false } - -type fastpathT struct{} -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*Encoder, *codecFnInfo, reflect.Value) - decfn func(*Decoder, *codecFnInfo, reflect.Value) -} -type fastpathA [0]fastpathE - -func (x fastpathA) index(rtid uintptr) int { return -1 } - -var fastpathAV fastpathA -var fastpathTV fastpathT diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl deleted file mode 100644 index d9940c0ad6f..00000000000 --- a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl +++ /dev/null @@ -1,77 +0,0 @@ -{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} -var {{var "c"}} bool {{/* // changed */}} -_ = {{var "c"}}{{end}} -if {{var "l"}} == 0 { - {{if isSlice }}if {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } else if len({{var "v"}}) != 0 { - {{var "v"}} = {{var "v"}}[:0] - {{var "c"}} = true - } {{end}} {{if isChan }}if {{var "v"}} == nil { - {{var "v"}} = make({{ .CTyp }}, 0) - {{var "c"}} = true - } {{end}} -} else { - {{var "hl"}} := {{var "l"}} > 0 - var {{var "rl"}} int; _ = {{var "rl"}} - {{if isSlice }} if {{var "hl"}} { - if {{var "l"}} > cap({{var "v"}}) { - {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - {{var "c"}} = true - } else if {{var "l"}} != len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "l"}}] - {{var "c"}} = true - } - } {{end}} - var {{var "j"}} int - // var {{var "dn"}} bool - for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { - {{if not isArray}} if {{var "j"}} == 0 && len({{var "v"}}) == 0 { - if {{var "hl"}} { - {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - } else { - {{var "rl"}} = 8 - } - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - {{var "c"}} = true - }{{end}} - {{var "h"}}.ElemContainerState({{var "j"}}) - // {{var "dn"}} = r.TryDecodeAsNil() - {{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }} - {{ decLineVar $x }} - {{var "v"}} <- {{ $x }} - {{else}} - // if indefinite, etc, then expand the slice if necessary - var {{var "db"}} bool - if {{var "j"}} >= len({{var "v"}}) { - {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}); {{var "c"}} = true - {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true - {{end}} - } - if {{var "db"}} { - z.DecSwallow() - } else { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - {{end}} - } - {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "j"}}] - {{var "c"}} = true - } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = make([]{{ .Typ }}, 0) - {{var "c"}} = true - } {{end}} -} -{{var "h"}}.End() -{{if not isArray }}if {{var "c"}} { - *{{ .Varname }} = {{var "v"}} -}{{end}} - diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl deleted file mode 100644 index 8323b54940d..00000000000 --- a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl +++ /dev/null @@ -1,42 +0,0 @@ -{{var "v"}} := *{{ .Varname }} -{{var "l"}} := r.ReadMapStart() -{{var "bh"}} := z.DecBasicHandle() -if {{var "v"}} == nil { - {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) - {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) - *{{ .Varname }} = {{var "v"}} -} -var {{var "mk"}} {{ .KTyp }} -var {{var "mv"}} {{ .Typ }} -var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool -if {{var "bh"}}.MapValueReset { - {{if decElemKindPtr}}{{var "mg"}} = true - {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } - {{else if not decElemKindImmutable}}{{var "mg"}} = true - {{end}} } -if {{var "l"}} != 0 { -{{var "hl"}} := {{var "l"}} > 0 - for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { - r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true{{end}} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} - {{var "mdn"}} = false - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} - if {{var "mdn"}} { - if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } - } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} // else len==0: TODO: Should we clear map entries? -r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go deleted file mode 100644 index c89a14d676b..00000000000 --- a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go +++ /dev/null @@ -1,245 +0,0 @@ -/* // +build ignore */ - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl -// ************************************************************ - -package codec - -import ( - "encoding" - "reflect" -) - -// GenVersion is the current version of codecgen. -const GenVersion = 8 - -// This file is used to generate helper code for codecgen. -// The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continuously and without notice. -// -// To help enforce this, we create an unexported type with exported members. -// The only way to get the type is via the one exported type that we control (somewhat). -// -// When static codecs are created for types, they will use this value -// to perform encoding or decoding of primitives or known slice or map types. - -// GenHelperEncoder is exported so that it can be used externally by codecgen. -// -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { - return genHelperEncoder{e: e}, e.e -} - -// GenHelperDecoder is exported so that it can be used externally by codecgen. -// -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { - return genHelperDecoder{d: d}, d.d -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperEncoder struct { - e *Encoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperDecoder struct { - d *Decoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBasicHandle() *BasicHandle { - return f.e.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinary() bool { - return f.e.cf.be // f.e.hh.isBinaryEncoding() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFallback(iv interface{}) { - // println(">>>>>>>>> EncFallback") - // f.e.encodeI(iv, false, false) - f.e.encodeValue(reflect.ValueOf(iv), nil, false) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { - bs, fnerr := iv.MarshalText() - f.e.marshal(bs, fnerr, false, c_UTF8) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { - bs, fnerr := iv.MarshalJSON() - f.e.marshal(bs, fnerr, true, c_UTF8) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { - bs, fnerr := iv.MarshalBinary() - f.e.marshal(bs, fnerr, false, c_RAW) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncRaw(iv Raw) { - f.e.rawBytes(iv) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) TimeRtidIfBinc() uintptr { - if _, ok := f.e.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.cf.js -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) HasExtensions() bool { - return len(f.e.h.extHandle) != 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v) - if rt.Kind() == reflect.Ptr { - rt = rt.Elem() - } - rtid := rt2id(rt) - if xfFn := f.e.h.getExt(rtid); xfFn != nil { - f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) - return true - } - return false -} - -// ---------------- DECODER FOLLOWS ----------------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBasicHandle() *BasicHandle { - return f.d.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinary() bool { - return f.d.be // f.d.hh.isBinaryEncoding() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSwallow() { - f.d.swallow() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchBuffer() []byte { - return f.d.b[:] -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { - // println(">>>>>>>>> DecFallback") - rv := reflect.ValueOf(iv) - if chkPtr { - rv = f.d.ensureDecodeable(rv) - } - f.d.decodeValue(rv, nil, false, false) - // f.d.decodeValueFallback(rv) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { - return f.d.decSliceHelperStart() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { - f.d.structFieldNotFound(index, name) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { - f.d.arrayCannotExpand(sliceLen, streamLen) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - // bs := f.dd.DecodeStringAsBytes() - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecRaw() []byte { - return f.d.rawBytes() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) TimeRtidIfBinc() uintptr { - if _, ok := f.d.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) IsJSONHandle() bool { - return f.d.js -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) HasExtensions() bool { - return len(f.d.h.extHandle) != 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v).Elem() - rtid := rt2id(rt) - if xfFn := f.d.h.getExt(rtid); xfFn != nil { - f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) - return true - } - return false -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { - return decInferLen(clen, maxlen, unit) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) StringView(v []byte) string { - return stringView(v) -} diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl deleted file mode 100644 index 79b6145c99b..00000000000 --- a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl +++ /dev/null @@ -1,220 +0,0 @@ -/* // +build ignore */ - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl -// ************************************************************ - -package codec - -import ( - "encoding" - "reflect" -) - -// GenVersion is the current version of codecgen. -const GenVersion = {{ .Version }} - -// This file is used to generate helper code for codecgen. -// The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continuously and without notice. -// -// To help enforce this, we create an unexported type with exported members. -// The only way to get the type is via the one exported type that we control (somewhat). -// -// When static codecs are created for types, they will use this value -// to perform encoding or decoding of primitives or known slice or map types. - -// GenHelperEncoder is exported so that it can be used externally by codecgen. -// -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { - return genHelperEncoder{e:e}, e.e -} - -// GenHelperDecoder is exported so that it can be used externally by codecgen. -// -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { - return genHelperDecoder{d:d}, d.d -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperEncoder struct { - e *Encoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperDecoder struct { - d *Decoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBasicHandle() *BasicHandle { - return f.e.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinary() bool { - return f.e.cf.be // f.e.hh.isBinaryEncoding() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFallback(iv interface{}) { - // println(">>>>>>>>> EncFallback") - // f.e.encodeI(iv, false, false) - f.e.encodeValue(reflect.ValueOf(iv), nil, false) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { - bs, fnerr := iv.MarshalText() - f.e.marshal(bs, fnerr, false, c_UTF8) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { - bs, fnerr := iv.MarshalJSON() - f.e.marshal(bs, fnerr, true, c_UTF8) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { - bs, fnerr := iv.MarshalBinary() - f.e.marshal(bs, fnerr, false, c_RAW) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncRaw(iv Raw) { - f.e.rawBytes(iv) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) TimeRtidIfBinc() uintptr { - if _, ok := f.e.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.cf.js -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) HasExtensions() bool { - return len(f.e.h.extHandle) != 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v) - if rt.Kind() == reflect.Ptr { - rt = rt.Elem() - } - rtid := rt2id(rt) - if xfFn := f.e.h.getExt(rtid); xfFn != nil { - f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) - return true - } - return false -} - -// ---------------- DECODER FOLLOWS ----------------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBasicHandle() *BasicHandle { - return f.d.h -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinary() bool { - return f.d.be // f.d.hh.isBinaryEncoding() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSwallow() { - f.d.swallow() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchBuffer() []byte { - return f.d.b[:] -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { - // println(">>>>>>>>> DecFallback") - rv := reflect.ValueOf(iv) - if chkPtr { - rv = f.d.ensureDecodeable(rv) - } - f.d.decodeValue(rv, nil, false, false) - // f.d.decodeValueFallback(rv) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { - return f.d.decSliceHelperStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { - f.d.structFieldNotFound(index, name) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { - f.d.arrayCannotExpand(sliceLen, streamLen) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - // bs := f.dd.DecodeStringAsBytes() - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecRaw() []byte { - return f.d.rawBytes() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) TimeRtidIfBinc() uintptr { - if _, ok := f.d.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) IsJSONHandle() bool { - return f.d.js -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) HasExtensions() bool { - return len(f.d.h.extHandle) != 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v).Elem() - rtid := rt2id(rt) - if xfFn := f.d.h.getExt(rtid); xfFn != nil { - f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) - return true - } - return false -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { - return decInferLen(clen, maxlen, unit) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) StringView(v []byte) string { - return stringView(v) -} - diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go deleted file mode 100644 index b50a6024dde..00000000000 --- a/vendor/github.com/ugorji/go/codec/gen.generated.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl - -const genDecMapTmpl = ` -{{var "v"}} := *{{ .Varname }} -{{var "l"}} := r.ReadMapStart() -{{var "bh"}} := z.DecBasicHandle() -if {{var "v"}} == nil { - {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) - {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) - *{{ .Varname }} = {{var "v"}} -} -var {{var "mk"}} {{ .KTyp }} -var {{var "mv"}} {{ .Typ }} -var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool -if {{var "bh"}}.MapValueReset { - {{if decElemKindPtr}}{{var "mg"}} = true - {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } - {{else if not decElemKindImmutable}}{{var "mg"}} = true - {{end}} } -if {{var "l"}} != 0 { -{{var "hl"}} := {{var "l"}} > 0 - for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { - r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true{{end}} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} - {{var "mdn"}} = false - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} - if {{var "mdn"}} { - if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } - } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} // else len==0: TODO: Should we clear map entries? -r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} -` - -const genDecListTmpl = ` -{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} -var {{var "c"}} bool {{/* // changed */}} -_ = {{var "c"}}{{end}} -if {{var "l"}} == 0 { - {{if isSlice }}if {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } else if len({{var "v"}}) != 0 { - {{var "v"}} = {{var "v"}}[:0] - {{var "c"}} = true - } {{end}} {{if isChan }}if {{var "v"}} == nil { - {{var "v"}} = make({{ .CTyp }}, 0) - {{var "c"}} = true - } {{end}} -} else { - {{var "hl"}} := {{var "l"}} > 0 - var {{var "rl"}} int; _ = {{var "rl"}} - {{if isSlice }} if {{var "hl"}} { - if {{var "l"}} > cap({{var "v"}}) { - {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - {{var "c"}} = true - } else if {{var "l"}} != len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "l"}}] - {{var "c"}} = true - } - } {{end}} - var {{var "j"}} int - // var {{var "dn"}} bool - for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { - {{if not isArray}} if {{var "j"}} == 0 && len({{var "v"}}) == 0 { - if {{var "hl"}} { - {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - } else { - {{var "rl"}} = 8 - } - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - {{var "c"}} = true - }{{end}} - {{var "h"}}.ElemContainerState({{var "j"}}) - // {{var "dn"}} = r.TryDecodeAsNil() - {{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }} - {{ decLineVar $x }} - {{var "v"}} <- {{ $x }} - {{else}} - // if indefinite, etc, then expand the slice if necessary - var {{var "db"}} bool - if {{var "j"}} >= len({{var "v"}}) { - {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}); {{var "c"}} = true - {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true - {{end}} - } - if {{var "db"}} { - z.DecSwallow() - } else { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - {{end}} - } - {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "j"}}] - {{var "c"}} = true - } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = make([]{{ .Typ }}, 0) - {{var "c"}} = true - } {{end}} -} -{{var "h"}}.End() -{{if not isArray }}if {{var "c"}} { - *{{ .Varname }} = {{var "v"}} -}{{end}} - -` - diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go deleted file mode 100644 index 7d430e5d41b..00000000000 --- a/vendor/github.com/ugorji/go/codec/gen.go +++ /dev/null @@ -1,2030 +0,0 @@ -// +build codecgen.exec - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "bytes" - "encoding/base64" - "errors" - "fmt" - "go/format" - "io" - "io/ioutil" - "math/rand" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "text/template" - "time" - "unicode" - "unicode/utf8" -) - -// --------------------------------------------------- -// codecgen supports the full cycle of reflection-based codec: -// - RawExt -// - Raw -// - Builtins -// - Extensions -// - (Binary|Text|JSON)(Unm|M)arshal -// - generic by-kind -// -// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. -// In those areas, we try to only do reflection or interface-conversion when NECESSARY: -// - Extensions, only if Extensions are configured. -// -// However, codecgen doesn't support the following: -// - Canonical option. (codecgen IGNORES it currently) -// This is just because it has not been implemented. -// -// During encode/decode, Selfer takes precedence. -// A type implementing Selfer will know how to encode/decode itself statically. -// -// The following field types are supported: -// array: [n]T -// slice: []T -// map: map[K]V -// primitive: [u]int[n], float(32|64), bool, string -// struct -// -// --------------------------------------------------- -// Note that a Selfer cannot call (e|d).(En|De)code on itself, -// as this will cause a circular reference, as (En|De)code will call Selfer methods. -// Any type that implements Selfer must implement completely and not fallback to (En|De)code. -// -// In addition, code in this file manages the generation of fast-path implementations of -// encode/decode of slices/maps of primitive keys/values. -// -// Users MUST re-generate their implementations whenever the code shape changes. -// The generated code will panic if it was generated with a version older than the supporting library. -// --------------------------------------------------- -// -// codec framework is very feature rich. -// When encoding or decoding into an interface, it depends on the runtime type of the interface. -// The type of the interface may be a named type, an extension, etc. -// Consequently, we fallback to runtime codec for encoding/decoding interfaces. -// In addition, we fallback for any value which cannot be guaranteed at runtime. -// This allows us support ANY value, including any named types, specifically those which -// do not implement our interfaces (e.g. Selfer). -// -// This explains some slowness compared to other code generation codecs (e.g. msgp). -// This reduction in speed is only seen when your refers to interfaces, -// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } -// -// codecgen will panic if the file was generated with an old version of the library in use. -// -// Note: -// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. -// This way, there isn't a function call overhead just to see that we should not enter a block of code. -// -// Note: -// codecgen-generated code depends on the variables defined by fast-path.generated.go. -// consequently, you cannot run with tags "codecgen notfastpath". - -// GenVersion is the current version of codecgen. -// -// NOTE: Increment this value each time codecgen changes fundamentally. -// Fundamental changes are: -// - helper methods change (signature change, new ones added, some removed, etc) -// - codecgen command line changes -// -// v1: Initial Version -// v2: -// v3: Changes for Kubernetes: -// changes in signature of some unpublished helper methods and codecgen cmdline arguments. -// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) -// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. -// v6: removed unsafe from gen, and now uses codecgen.exec tag -const genVersion = 8 - -const ( - genCodecPkg = "codec1978" - genTempVarPfx = "yy" - genTopLevelVarName = "x" - - // ignore canBeNil parameter, and always set to true. - // This is because nil can appear anywhere, so we should always check. - genAnythingCanBeNil = true - - // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; - // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals - // are not executed a lot. - // - // From testing, it didn't make much difference in runtime, so keep as true (one function only) - genUseOneFunctionForDecStructMap = true -) - -type genStructMapStyle uint8 - -const ( - genStructMapStyleConsolidated genStructMapStyle = iota - genStructMapStyleLenPrefix - genStructMapStyleCheckBreak -) - -var ( - genAllTypesSamePkgErr = errors.New("All types must be in the same package") - genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice") - genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") - genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) -) - -// genRunner holds some state used during a Gen run. -type genRunner struct { - w io.Writer // output - c uint64 // counter used for generating varsfx - t []reflect.Type // list of types to run selfer on - - tc reflect.Type // currently running selfer on this type - te map[uintptr]bool // types for which the encoder has been created - td map[uintptr]bool // types for which the decoder has been created - cp string // codec import path - - im map[string]reflect.Type // imports to add - imn map[string]string // package names of imports to add - imc uint64 // counter for import numbers - - is map[reflect.Type]struct{} // types seen during import search - bp string // base PkgPath, for which we are generating for - - cpfx string // codec package prefix - - tm map[reflect.Type]struct{} // types for which enc/dec must be generated - ts []reflect.Type // types for which enc/dec must be generated - - xs string // top level variable/constant suffix - hn string // fn helper type name - - ti *TypeInfos - // rr *rand.Rand // random generator for file-specific types - - nx bool // no extensions -} - -// Gen will write a complete go file containing Selfer implementations for each -// type passed. All the types must be in the same package. -// -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool, - ti *TypeInfos, typ ...reflect.Type) { - // All types passed to this method do not have a codec.Selfer method implemented directly. - // codecgen already checks the AST and skips any types that define the codec.Selfer methods. - // Consequently, there's no need to check and trim them if they implement codec.Selfer - - if len(typ) == 0 { - return - } - x := genRunner{ - w: w, - t: typ, - te: make(map[uintptr]bool), - td: make(map[uintptr]bool), - im: make(map[string]reflect.Type), - imn: make(map[string]string), - is: make(map[reflect.Type]struct{}), - tm: make(map[reflect.Type]struct{}), - ts: []reflect.Type{}, - bp: genImportPath(typ[0]), - xs: uid, - ti: ti, - nx: noExtensions, - } - if x.ti == nil { - x.ti = defTypeInfos - } - if x.xs == "" { - rr := rand.New(rand.NewSource(time.Now().UnixNano())) - x.xs = strconv.FormatInt(rr.Int63n(9999), 10) - } - - // gather imports first: - x.cp = genImportPath(reflect.TypeOf(x)) - x.imn[x.cp] = genCodecPkg - for _, t := range typ { - // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) - if genImportPath(t) != x.bp { - panic(genAllTypesSamePkgErr) - } - x.genRefPkgs(t) - } - if buildTags != "" { - x.line("// +build " + buildTags) - x.line("") - } - x.line(` - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -`) - x.line("package " + pkgName) - x.line("") - x.line("import (") - if x.cp != x.bp { - x.cpfx = genCodecPkg + "." - x.linef("%s \"%s\"", genCodecPkg, x.cp) - } - // use a sorted set of im keys, so that we can get consistent output - imKeys := make([]string, 0, len(x.im)) - for k, _ := range x.im { - imKeys = append(imKeys, k) - } - sort.Strings(imKeys) - for _, k := range imKeys { // for k, _ := range x.im { - x.linef("%s \"%s\"", x.imn[k], k) - } - // add required packages - for _, k := range [...]string{"reflect", "runtime", "fmt", "errors"} { - if _, ok := x.im[k]; !ok { - x.line("\"" + k + "\"") - } - } - x.line(")") - x.line("") - - x.line("const (") - x.linef("// ----- content types ----") - x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8)) - x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW)) - x.linef("// ----- value types used ----") - x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray)) - x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap)) - x.linef("// ----- containerStateValues ----") - x.linef("codecSelfer_containerMapKey%s = %v", x.xs, int64(containerMapKey)) - x.linef("codecSelfer_containerMapValue%s = %v", x.xs, int64(containerMapValue)) - x.linef("codecSelfer_containerMapEnd%s = %v", x.xs, int64(containerMapEnd)) - x.linef("codecSelfer_containerArrayElem%s = %v", x.xs, int64(containerArrayElem)) - x.linef("codecSelfer_containerArrayEnd%s = %v", x.xs, int64(containerArrayEnd)) - x.line(")") - x.line("var (") - x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())") - x.line("codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") - x.line(")") - x.line("") - - x.hn = "codecSelfer" + x.xs - x.line("type " + x.hn + " struct{}") - x.line("") - - x.varsfxreset() - x.line("func init() {") - x.linef("if %sGenVersion != %v {", x.cpfx, genVersion) - x.line("_, file, _, _ := runtime.Caller(0)") - x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) - x.linef(`%v, %sGenVersion, file)`, genVersion, x.cpfx) - x.line("panic(err)") - x.linef("}") - x.line("if false { // reference the types, but skip this branch at build/run time") - var n int - // for k, t := range x.im { - for _, k := range imKeys { - t := x.im[k] - x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) - n++ - } - if n > 0 { - x.out("_") - for i := 1; i < n; i++ { - x.out(", _") - } - x.out(" = v0") - for i := 1; i < n; i++ { - x.outf(", v%v", i) - } - } - x.line("} ") // close if false - x.line("}") // close init - x.line("") - - // generate rest of type info - for _, t := range typ { - x.tc = t - x.selfer(true) - x.selfer(false) - } - - for _, t := range x.ts { - rtid := rt2id(t) - // generate enc functions for all these slice/map types. - x.varsfxreset() - x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) - x.genRequiredMethodVars(true) - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - x.encListFallback("v", t) - case reflect.Map: - x.encMapFallback("v", t) - default: - panic(genExpectArrayOrMapErr) - } - x.line("}") - x.line("") - - // generate dec functions for all these slice/map types. - x.varsfxreset() - x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) - x.genRequiredMethodVars(false) - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - x.decListFallback("v", rtid, t) - case reflect.Map: - x.decMapFallback("v", rtid, t) - default: - panic(genExpectArrayOrMapErr) - } - x.line("}") - x.line("") - } - - x.line("") -} - -func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { - // return varname != genTopLevelVarName && t != x.tc - // the only time we checkForSelfer is if we are not at the TOP of the generated code. - return varname != genTopLevelVarName -} - -func (x *genRunner) arr2str(t reflect.Type, s string) string { - if t.Kind() == reflect.Array { - return s - } - return "" -} - -func (x *genRunner) genRequiredMethodVars(encode bool) { - x.line("var h " + x.hn) - if encode { - x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") - } else { - x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") - } - x.line("_, _, _ = h, z, r") -} - -func (x *genRunner) genRefPkgs(t reflect.Type) { - if _, ok := x.is[t]; ok { - return - } - // fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) - x.is[t] = struct{}{} - tpkg, tname := genImportPath(t), t.Name() - if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { - if _, ok := x.im[tpkg]; !ok { - x.im[tpkg] = t - if idx := strings.LastIndex(tpkg, "/"); idx < 0 { - x.imn[tpkg] = tpkg - } else { - x.imc++ - x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) - } - } - } - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: - x.genRefPkgs(t.Elem()) - case reflect.Map: - x.genRefPkgs(t.Elem()) - x.genRefPkgs(t.Key()) - case reflect.Struct: - for i := 0; i < t.NumField(); i++ { - if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { - x.genRefPkgs(t.Field(i).Type) - } - } - } -} - -func (x *genRunner) line(s string) { - x.out(s) - if len(s) == 0 || s[len(s)-1] != '\n' { - x.out("\n") - } -} - -func (x *genRunner) varsfx() string { - x.c++ - return strconv.FormatUint(x.c, 10) -} - -func (x *genRunner) varsfxreset() { - x.c = 0 -} - -func (x *genRunner) out(s string) { - if _, err := io.WriteString(x.w, s); err != nil { - panic(err) - } -} - -func (x *genRunner) linef(s string, params ...interface{}) { - x.line(fmt.Sprintf(s, params...)) -} - -func (x *genRunner) outf(s string, params ...interface{}) { - x.out(fmt.Sprintf(s, params...)) -} - -func (x *genRunner) genTypeName(t reflect.Type) (n string) { - // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() - - // if the type has a PkgPath, which doesn't match the current package, - // then include it. - // We cannot depend on t.String() because it includes current package, - // or t.PkgPath because it includes full import path, - // - var ptrPfx string - for t.Kind() == reflect.Ptr { - ptrPfx += "*" - t = t.Elem() - } - if tn := t.Name(); tn != "" { - return ptrPfx + x.genTypeNamePrim(t) - } - switch t.Kind() { - case reflect.Map: - return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) - case reflect.Slice: - return ptrPfx + "[]" + x.genTypeName(t.Elem()) - case reflect.Array: - return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) - case reflect.Chan: - return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) - default: - if t == intfTyp { - return ptrPfx + "interface{}" - } else { - return ptrPfx + x.genTypeNamePrim(t) - } - } -} - -func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { - if t.Name() == "" { - return t.String() - } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { - return t.Name() - } else { - return x.imn[genImportPath(t)] + "." + t.Name() - // return t.String() // best way to get the package name inclusive - } -} - -func (x *genRunner) genZeroValueR(t reflect.Type) string { - // if t is a named type, w - switch t.Kind() { - case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, - reflect.Slice, reflect.Map, reflect.Invalid: - return "nil" - case reflect.Bool: - return "false" - case reflect.String: - return `""` - case reflect.Struct, reflect.Array: - return x.genTypeName(t) + "{}" - default: // all numbers - return "0" - } -} - -func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { - return genMethodNameT(t, x.tc) -} - -func (x *genRunner) selfer(encode bool) { - t := x.tc - t0 := t - // always make decode use a pointer receiver, - // and structs always use a ptr receiver (encode|decode) - isptr := !encode || t.Kind() == reflect.Struct - x.varsfxreset() - fnSigPfx := "func (x " - if isptr { - fnSigPfx += "*" - } - fnSigPfx += x.genTypeName(t) - - x.out(fnSigPfx) - if isptr { - t = reflect.PtrTo(t) - } - if encode { - x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") - x.genRequiredMethodVars(true) - // x.enc(genTopLevelVarName, t) - x.encVar(genTopLevelVarName, t) - } else { - x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - // do not use decVar, as there is no need to check TryDecodeAsNil - // or way to elegantly handle that, and also setting it to a - // non-nil value doesn't affect the pointer passed. - // x.decVar(genTopLevelVarName, t, false) - x.dec(genTopLevelVarName, t0) - } - x.line("}") - x.line("") - - if encode || t0.Kind() != reflect.Struct { - return - } - - // write is containerMap - if genUseOneFunctionForDecStructMap { - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleConsolidated) - x.line("}") - x.line("") - } else { - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleLenPrefix) - x.line("}") - x.line("") - - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleCheckBreak) - x.line("}") - x.line("") - } - - // write containerArray - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0) - x.line("}") - x.line("") - -} - -// used for chan, array, slice, map -func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) { - if encode { - x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), varname) - } else { - x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname) - } - x.registerXtraT(t) -} - -func (x *genRunner) registerXtraT(t reflect.Type) { - // recursively register the types - if _, ok := x.tm[t]; ok { - return - } - var tkey reflect.Type - switch t.Kind() { - case reflect.Chan, reflect.Slice, reflect.Array: - case reflect.Map: - tkey = t.Key() - default: - return - } - x.tm[t] = struct{}{} - x.ts = append(x.ts, t) - // check if this refers to any xtra types eg. a slice of array: add the array - x.registerXtraT(t.Elem()) - if tkey != nil { - x.registerXtraT(tkey) - } -} - -// encVar will encode a variable. -// The parameter, t, is the reflect.Type of the variable itself -func (x *genRunner) encVar(varname string, t reflect.Type) { - // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) - var checkNil bool - switch t.Kind() { - case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: - checkNil = true - } - if checkNil { - x.linef("if %s == nil { r.EncodeNil() } else { ", varname) - } - switch t.Kind() { - case reflect.Ptr: - switch t.Elem().Kind() { - case reflect.Struct, reflect.Array: - x.enc(varname, genNonPtr(t)) - default: - i := x.varsfx() - x.line(genTempVarPfx + i + " := *" + varname) - x.enc(genTempVarPfx+i, genNonPtr(t)) - } - case reflect.Struct, reflect.Array: - i := x.varsfx() - x.line(genTempVarPfx + i + " := &" + varname) - x.enc(genTempVarPfx+i, t) - default: - x.enc(varname, t) - } - - if checkNil { - x.line("}") - } - -} - -// enc will encode a variable (varname) of type t, -// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying) -func (x *genRunner) enc(varname string, t reflect.Type) { - rtid := rt2id(t) - // We call CodecEncodeSelf if one of the following are honored: - // - the type already implements Selfer, call that - // - the type has a Selfer implementation just created, use that - // - the type is in the list of the ones we will generate for, but it is not currently being generated - - mi := x.varsfx() - tptr := reflect.PtrTo(t) - tk := t.Kind() - if x.checkForSelfer(t, varname) { - if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T - if tptr.Implements(selferTyp) || t.Implements(selferTyp) { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - } else { // varname is of type T - if t.Implements(selferTyp) { - x.line(varname + ".CodecEncodeSelf(e)") - return - } else if tptr.Implements(selferTyp) { - x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) - x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) - return - } - } - - if _, ok := x.te[rtid]; ok { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - } - - inlist := false - for _, t0 := range x.t { - if t == t0 { - inlist = true - if x.checkForSelfer(t, varname) { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - break - } - } - - var rtidAdded bool - if t == x.tc { - x.te[rtid] = true - rtidAdded = true - } - - // check if - // - type is RawExt, Raw - // - the type implements (Text|JSON|Binary)(Unm|M)arshal - x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi) - x.linef("_ = %sm%s", genTempVarPfx, mi) - x.line("if false {") //start if block - defer func() { x.line("}") }() //end if block - - if t == rawTyp { - x.linef("} else { z.EncRaw(%v)", varname) - return - } - if t == rawExtTyp { - x.linef("} else { r.EncodeRawExt(%v, e)", varname) - return - } - // HACK: Support for Builtins. - // Currently, only Binc supports builtins, and the only builtin type is time.Time. - // Have a method that returns the rtid for time.Time if Handle is Binc. - if t == timeTyp { - vrtid := genTempVarPfx + "m" + x.varsfx() - x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) - x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname) - } - // only check for extensions if the type is named, and has a packagePath. - if !x.nx && genImportPath(t) != "" && t.Name() != "" { - // first check if extensions are configued, before doing the interface conversion - x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname) - } - if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T - if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { - x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) - } - if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) - } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { - x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) - } - } else { // varname is of type T - if t.Implements(binaryMarshalerTyp) { - x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) - } else if tptr.Implements(binaryMarshalerTyp) { - x.linef("} else if %sm%s { z.EncBinaryMarshal(&%v) ", genTempVarPfx, mi, varname) - } - if t.Implements(jsonMarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) - } else if tptr.Implements(jsonMarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", genTempVarPfx, mi, varname) - } else if t.Implements(textMarshalerTyp) { - x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) - } else if tptr.Implements(textMarshalerTyp) { - x.linef("} else if !%sm%s { z.EncTextMarshal(&%v) ", genTempVarPfx, mi, varname) - } - } - x.line("} else {") - - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x.line("r.EncodeInt(int64(" + varname + "))") - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x.line("r.EncodeUint(uint64(" + varname + "))") - case reflect.Float32: - x.line("r.EncodeFloat32(float32(" + varname + "))") - case reflect.Float64: - x.line("r.EncodeFloat64(float64(" + varname + "))") - case reflect.Bool: - x.line("r.EncodeBool(bool(" + varname + "))") - case reflect.String: - x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(" + varname + "))") - case reflect.Chan: - x.xtraSM(varname, true, t) - // x.encListFallback(varname, rtid, t) - case reflect.Array: - x.xtraSM(varname, true, t) - case reflect.Slice: - // if nil, call dedicated function - // if a []uint8, call dedicated function - // if a known fastpath slice, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if rtid == uint8SliceTypId { - x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))") - } else if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") - } else { - x.xtraSM(varname, true, t) - // x.encListFallback(varname, rtid, t) - } - case reflect.Map: - // if nil, call dedicated function - // if a known fastpath map, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") - if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") - } else { - x.xtraSM(varname, true, t) - // x.encMapFallback(varname, rtid, t) - } - case reflect.Struct: - if !inlist { - delete(x.te, rtid) - x.line("z.EncFallback(" + varname + ")") - break - } - x.encStruct(varname, rtid, t) - default: - if rtidAdded { - delete(x.te, rtid) - } - x.line("z.EncFallback(" + varname + ")") - } -} - -func (x *genRunner) encZero(t reflect.Type) { - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x.line("r.EncodeInt(0)") - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x.line("r.EncodeUint(0)") - case reflect.Float32: - x.line("r.EncodeFloat32(0)") - case reflect.Float64: - x.line("r.EncodeFloat64(0)") - case reflect.Bool: - x.line("r.EncodeBool(false)") - case reflect.String: - x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + `, "")`) - default: - x.line("r.EncodeNil()") - } -} - -func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { - // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) - // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it - - // if t === type currently running selfer on, do for all - ti := x.ti.get(rtid, t) - i := x.varsfx() - sepVarname := genTempVarPfx + "sep" + i - numfieldsvar := genTempVarPfx + "q" + i - ti2arrayvar := genTempVarPfx + "r" + i - struct2arrvar := genTempVarPfx + "2arr" + i - - x.line(sepVarname + " := !z.EncBinary()") - x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) - tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. - // due to omitEmpty, we need to calculate the - // number of non-empty things we write out first. - // This is required as we need to pre-determine the size of the container, - // to support length-prefixing. - if ti.anyOmitEmpty { - x.linef("var %s [%v]bool", numfieldsvar, len(tisfi)) - x.linef("_ = %s", numfieldsvar) - } - x.linef("_, _ = %s, %s", sepVarname, struct2arrvar) - x.linef("const %s bool = %v", ti2arrayvar, ti.toArray) - var nn int - if ti.anyOmitEmpty { - for j, si := range tisfi { - if !si.omitEmpty { - nn++ - continue - } - var t2 reflect.StructField - var omitline string - { - t2typ := t - varname3 := varname - for ij, ix := range si.is { - if uint8(ij) == si.nis { - break - } - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(int(ix)) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - omitline += varname3 + " != nil && " - } - } - } - // never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. - // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) - switch t2.Type.Kind() { - case reflect.Struct: - omitline += " true" - case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: - omitline += "len(" + varname + "." + t2.Name + ") != 0" - default: - omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type) - } - x.linef("%s[%v] = %s", numfieldsvar, j, omitline) - } - } - // x.linef("var %snn%s int", genTempVarPfx, i) - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { - x.linef("r.WriteArrayStart(%d)", len(tisfi)) - x.linef("} else {") // if not ti.toArray - if ti.anyOmitEmpty { - x.linef("var %snn%s = %v", genTempVarPfx, i, nn) - x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) - x.linef("r.WriteMapStart(%snn%s)", genTempVarPfx, i) - x.linef("%snn%s = %v", genTempVarPfx, i, 0) - } else { - x.linef("r.WriteMapStart(%d)", len(tisfi)) - } - x.line("}") // close if not StructToArray - - for j, si := range tisfi { - i := x.varsfx() - isNilVarName := genTempVarPfx + "n" + i - var labelUsed bool - var t2 reflect.StructField - { - t2typ := t - varname3 := varname - for ij, ix := range si.is { - if uint8(ij) == si.nis { - break - } - // fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix) - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(int(ix)) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - if !labelUsed { - x.line("var " + isNilVarName + " bool") - } - x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") - x.line("goto LABEL" + i) - x.line("}") - labelUsed = true - // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") - } - } - // t2 = t.FieldByIndex(si.is) - } - if labelUsed { - x.line("LABEL" + i + ":") - } - // if the type of the field is a Selfer, or one of the ones - - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray - if labelUsed { - x.linef("if %s { r.WriteArrayElem(); r.EncodeNil() } else { ", isNilVarName) - // x.linef("if %s { z.EncSendContainerState(codecSelfer_containerArrayElem%s); r.EncodeNil() } else { ", isNilVarName, x.xs) - } - x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - if si.omitEmpty { - x.linef("if %s[%v] {", numfieldsvar, j) - } - x.encVar(varname+"."+t2.Name, t2.Type) - if si.omitEmpty { - x.linef("} else {") - x.encZero(t2.Type) - x.linef("}") - } - if labelUsed { - x.line("}") - } - - x.linef("} else {") // if not ti.toArray - - if si.omitEmpty { - x.linef("if %s[%v] {", numfieldsvar, j) - } - x.line("r.WriteMapElemKey()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) - x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))") - x.line("r.WriteMapElemValue()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) - if labelUsed { - x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") - x.encVar(varname+"."+t2.Name, t2.Type) - x.line("}") - } else { - x.encVar(varname+"."+t2.Name, t2.Type) - } - if si.omitEmpty { - x.line("}") - } - x.linef("} ") // end if/else ti.toArray - } - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { - x.line("r.WriteArrayEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) - x.line("} else {") - x.line("r.WriteMapEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) - x.line("}") - -} - -func (x *genRunner) encListFallback(varname string, t reflect.Type) { - if t.AssignableTo(uint8SliceTyp) { - x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, []byte(%s))", x.xs, varname) - return - } - if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { - x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ([%v]byte(%s))[:])", x.xs, t.Len(), varname) - return - } - i := x.varsfx() - g := genTempVarPfx - x.line("r.WriteArrayStart(len(" + varname + "))") - if t.Kind() == reflect.Chan { - x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i) - x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - x.linef("%sv%s := <-%s", g, i, varname) - } else { - // x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) - x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) - x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - } - x.encVar(genTempVarPfx+"v"+i, t.Elem()) - x.line("}") - x.line("r.WriteArrayEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) -} - -func (x *genRunner) encMapFallback(varname string, t reflect.Type) { - // TODO: expand this to handle canonical. - i := x.varsfx() - x.line("r.WriteMapStart(len(" + varname + "))") - x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) - // x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {") - x.line("r.WriteMapElemKey()") // f("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) - x.encVar(genTempVarPfx+"k"+i, t.Key()) - x.line("r.WriteMapElemValue()") // f("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) - x.encVar(genTempVarPfx+"v"+i, t.Elem()) - x.line("}") - x.line("r.WriteMapEnd()") // f("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) -} - -func (x *genRunner) decVar(varname, decodedNilVarname string, t reflect.Type, canBeNil bool) { - // We only encode as nil if a nillable value. - // This removes some of the wasted checks for TryDecodeAsNil. - // We need to think about this more, to see what happens if omitempty, etc - // cause a nil value to be stored when something is expected. - // This could happen when decoding from a struct encoded as an array. - // For that, decVar should be called with canNil=true, to force true as its value. - i := x.varsfx() - if !canBeNil { - canBeNil = genAnythingCanBeNil || !genIsImmutable(t) - } - if canBeNil { - x.line("if r.TryDecodeAsNil() {") - if decodedNilVarname != "" { - x.line(decodedNilVarname + " = true") - } else if t.Kind() == reflect.Ptr { - x.line("if " + varname + " != nil { ") - - // if varname is a field of a struct (has a dot in it), - // then just set it to nil - if strings.IndexByte(varname, '.') != -1 { - x.line(varname + " = nil") - } else { - x.line("*" + varname + " = " + x.genZeroValueR(t.Elem())) - } - x.line("}") - } else { - x.line(varname + " = " + x.genZeroValueR(t)) - } - x.line("} else {") - } else { - x.line("// cannot be nil") - } - if t.Kind() != reflect.Ptr { - if x.decTryAssignPrimitive(varname, t) { - x.line(genTempVarPfx + "v" + i + " := &" + varname) - x.dec(genTempVarPfx+"v"+i, t) - } - } else { - x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) - // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). - // There's a chance of a **T in here which is nil. - var ptrPfx string - for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { - ptrPfx += "*" - x.linef("if %s%s == nil { %s%s = new(%s)}", - ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) - } - // if varname has [ in it, then create temp variable for this ptr thingie - if strings.Index(varname, "[") >= 0 { - varname2 := genTempVarPfx + "w" + i - x.line(varname2 + " := " + varname) - varname = varname2 - } - - if ptrPfx == "" { - x.dec(varname, t) - } else { - x.line(genTempVarPfx + "z" + i + " := " + ptrPfx + varname) - x.dec(genTempVarPfx+"z"+i, t) - } - - } - - if canBeNil { - x.line("} ") - } -} - -// dec will decode a variable (varname) of type ptrTo(t). -// t is always a basetype (i.e. not of kind reflect.Ptr). -func (x *genRunner) dec(varname string, t reflect.Type) { - // assumptions: - // - the varname is to a pointer already. No need to take address of it - // - t is always a baseType T (not a *T, etc). - rtid := rt2id(t) - tptr := reflect.PtrTo(t) - if x.checkForSelfer(t, varname) { - if t.Implements(selferTyp) || tptr.Implements(selferTyp) { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - if _, ok := x.td[rtid]; ok { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - } - - inlist := false - for _, t0 := range x.t { - if t == t0 { - inlist = true - if x.checkForSelfer(t, varname) { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - break - } - } - - var rtidAdded bool - if t == x.tc { - x.td[rtid] = true - rtidAdded = true - } - - // check if - // - type is Raw, RawExt - // - the type implements (Text|JSON|Binary)(Unm|M)arshal - mi := x.varsfx() - x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) - x.linef("_ = %sm%s", genTempVarPfx, mi) - x.line("if false {") //start if block - defer func() { x.line("}") }() //end if block - - if t == rawTyp { - x.linef("} else { *%v = z.DecRaw()", varname) - return - } - if t == rawExtTyp { - x.linef("} else { r.DecodeExt(%v, 0, nil)", varname) - return - } - - // HACK: Support for Builtins. - // Currently, only Binc supports builtins, and the only builtin type is time.Time. - // Have a method that returns the rtid for time.Time if Handle is Binc. - if t == timeTyp { - vrtid := genTempVarPfx + "m" + x.varsfx() - x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) - x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname) - } - // only check for extensions if the type is named, and has a packagePath. - if !x.nx && genImportPath(t) != "" && t.Name() != "" { - // first check if extensions are configued, before doing the interface conversion - x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) - } - - if t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { - x.linef("} else if %sm%s { z.DecBinaryUnmarshal(%v) ", genTempVarPfx, mi, varname) - } - if t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.DecJSONUnmarshal(%v)", genTempVarPfx, mi, varname) - } else if t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { - x.linef("} else if !%sm%s { z.DecTextUnmarshal(%v)", genTempVarPfx, mi, varname) - } - - x.line("} else {") - - // Since these are pointers, we cannot share, and have to use them one by one - switch t.Kind() { - case reflect.Int: - x.line("*((*int)(" + varname + ")) = int(r.DecodeInt(codecSelferBitsize" + x.xs + "))") - // x.line("z.DecInt((*int)(" + varname + "))") - case reflect.Int8: - x.line("*((*int8)(" + varname + ")) = int8(r.DecodeInt(8))") - // x.line("z.DecInt8((*int8)(" + varname + "))") - case reflect.Int16: - x.line("*((*int16)(" + varname + ")) = int16(r.DecodeInt(16))") - // x.line("z.DecInt16((*int16)(" + varname + "))") - case reflect.Int32: - x.line("*((*int32)(" + varname + ")) = int32(r.DecodeInt(32))") - // x.line("z.DecInt32((*int32)(" + varname + "))") - case reflect.Int64: - x.line("*((*int64)(" + varname + ")) = int64(r.DecodeInt(64))") - // x.line("z.DecInt64((*int64)(" + varname + "))") - - case reflect.Uint: - x.line("*((*uint)(" + varname + ")) = uint(r.DecodeUint(codecSelferBitsize" + x.xs + "))") - // x.line("z.DecUint((*uint)(" + varname + "))") - case reflect.Uint8: - x.line("*((*uint8)(" + varname + ")) = uint8(r.DecodeUint(8))") - // x.line("z.DecUint8((*uint8)(" + varname + "))") - case reflect.Uint16: - x.line("*((*uint16)(" + varname + ")) = uint16(r.DecodeUint(16))") - //x.line("z.DecUint16((*uint16)(" + varname + "))") - case reflect.Uint32: - x.line("*((*uint32)(" + varname + ")) = uint32(r.DecodeUint(32))") - //x.line("z.DecUint32((*uint32)(" + varname + "))") - case reflect.Uint64: - x.line("*((*uint64)(" + varname + ")) = uint64(r.DecodeUint(64))") - //x.line("z.DecUint64((*uint64)(" + varname + "))") - case reflect.Uintptr: - x.line("*((*uintptr)(" + varname + ")) = uintptr(r.DecodeUint(codecSelferBitsize" + x.xs + "))") - - case reflect.Float32: - x.line("*((*float32)(" + varname + ")) = float32(r.DecodeFloat(true))") - //x.line("z.DecFloat32((*float32)(" + varname + "))") - case reflect.Float64: - x.line("*((*float64)(" + varname + ")) = float64(r.DecodeFloat(false))") - // x.line("z.DecFloat64((*float64)(" + varname + "))") - - case reflect.Bool: - x.line("*((*bool)(" + varname + ")) = r.DecodeBool()") - // x.line("z.DecBool((*bool)(" + varname + "))") - case reflect.String: - x.line("*((*string)(" + varname + ")) = r.DecodeString()") - // x.line("z.DecString((*string)(" + varname + "))") - case reflect.Array, reflect.Chan: - x.xtraSM(varname, false, t) - // x.decListFallback(varname, rtid, true, t) - case reflect.Slice: - // if a []uint8, call dedicated function - // if a known fastpath slice, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if rtid == uint8SliceTypId { - x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false)") - } else if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", d)") - } else { - x.xtraSM(varname, false, t) - // x.decListFallback(varname, rtid, false, t) - } - case reflect.Map: - // if a known fastpath map, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", d)") - } else { - x.xtraSM(varname, false, t) - // x.decMapFallback(varname, rtid, t) - } - case reflect.Struct: - if inlist { - x.decStruct(varname, rtid, t) - } else { - // delete(x.td, rtid) - x.line("z.DecFallback(" + varname + ", false)") - } - default: - if rtidAdded { - delete(x.te, rtid) - } - x.line("z.DecFallback(" + varname + ", true)") - } -} - -func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) { - // This should only be used for exact primitives (ie un-named types). - // Named types may be implementations of Selfer, Unmarshaler, etc. - // They should be handled by dec(...) - - if t.Name() != "" { - tryAsPtr = true - return - } - - switch t.Kind() { - case reflect.Int: - x.linef("%s = r.DecodeInt(codecSelferBitsize%s)", varname, x.xs) - case reflect.Int8: - x.linef("%s = r.DecodeInt(8)", varname) - case reflect.Int16: - x.linef("%s = r.DecodeInt(16)", varname) - case reflect.Int32: - x.linef("%s = r.DecodeInt(32)", varname) - case reflect.Int64: - x.linef("%s = r.DecodeInt(64)", varname) - - case reflect.Uint: - x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) - case reflect.Uint8: - x.linef("%s = r.DecodeUint(8)", varname) - case reflect.Uint16: - x.linef("%s = r.DecodeUint(16)", varname) - case reflect.Uint32: - x.linef("%s = r.DecodeUint(32)", varname) - case reflect.Uint64: - x.linef("%s = r.DecodeUint(64)", varname) - case reflect.Uintptr: - x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) - - case reflect.Float32: - x.linef("%s = r.DecodeFloat(true)", varname) - case reflect.Float64: - x.linef("%s = r.DecodeFloat(false)", varname) - - case reflect.Bool: - x.linef("%s = r.DecodeBool()", varname) - case reflect.String: - x.linef("%s = r.DecodeString()", varname) - default: - tryAsPtr = true - } - return -} - -func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { - if t.AssignableTo(uint8SliceTyp) { - x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false)") - return - } - if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { - x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], true)", t.Len(), varname) - return - } - type tstruc struct { - TempVar string - Rand string - Varname string - CTyp string - Typ string - Immutable bool - Size int - } - telem := t.Elem() - ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} - - funcs := make(template.FuncMap) - - funcs["decLineVar"] = func(varname string) string { - x.decVar(varname, "", telem, false) - return "" - } - // funcs["decLine"] = func(pfx string) string { - // x.decVar(ts.TempVar+pfx+ts.Rand, "", reflect.PtrTo(telem), false) - // return "" - // } - funcs["var"] = func(s string) string { - return ts.TempVar + s + ts.Rand - } - funcs["zero"] = func() string { - return x.genZeroValueR(telem) - } - funcs["isArray"] = func() bool { - return t.Kind() == reflect.Array - } - funcs["isSlice"] = func() bool { - return t.Kind() == reflect.Slice - } - funcs["isChan"] = func() bool { - return t.Kind() == reflect.Chan - } - tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) - if err != nil { - panic(err) - } - if err = tm.Execute(x.w, &ts); err != nil { - panic(err) - } -} - -func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { - type tstruc struct { - TempVar string - Sfx string - Rand string - Varname string - KTyp string - Typ string - Size int - } - telem := t.Elem() - tkey := t.Key() - ts := tstruc{ - genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), - x.genTypeName(telem), int(telem.Size() + tkey.Size()), - } - - funcs := make(template.FuncMap) - funcs["decElemZero"] = func() string { - return x.genZeroValueR(telem) - } - funcs["decElemKindImmutable"] = func() bool { - return genIsImmutable(telem) - } - funcs["decElemKindPtr"] = func() bool { - return telem.Kind() == reflect.Ptr - } - funcs["decElemKindIntf"] = func() bool { - return telem.Kind() == reflect.Interface - } - funcs["decLineVarK"] = func(varname string) string { - x.decVar(varname, "", tkey, false) - return "" - } - funcs["decLineVar"] = func(varname, decodedNilVarname string) string { - x.decVar(varname, decodedNilVarname, telem, false) - return "" - } - // funcs["decLineK"] = func(pfx string) string { - // x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false) - // return "" - // } - // funcs["decLine"] = func(pfx string) string { - // x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) - // return "" - // } - funcs["var"] = func(s string) string { - return ts.TempVar + s + ts.Rand - } - - tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) - if err != nil { - panic(err) - } - if err = tm.Execute(x.w, &ts); err != nil { - panic(err) - } -} - -func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { - ti := x.ti.get(rtid, t) - tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. - x.line("switch (" + kName + ") {") - for _, si := range tisfi { - x.line("case \"" + si.encName + "\":") - var t2 reflect.StructField - { - //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. - // t2 = t.FieldByIndex(si.is) - t2typ := t - varname3 := varname - for ij, ix := range si.is { - if uint8(ij) == si.nis { - break - } - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(int(ix)) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) - } - } - } - x.decVar(varname+"."+t2.Name, "", t2.Type, false) - } - x.line("default:") - // pass the slice here, so that the string will not escape, and maybe save allocation - x.line("z.DecStructFieldNotFound(-1, " + kName + ")") - x.line("} // end switch " + kName) -} - -func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { - tpfx := genTempVarPfx - i := x.varsfx() - kName := tpfx + "s" + i - - // x.line("var " + kName + "Arr = [32]byte{} // default string to decode into") - // x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into") - // use the scratch buffer to avoid allocation (most field names are < 32). - - x.line("var " + kName + "Slc = z.DecScratchBuffer() // default slice to decode into") - - x.line("_ = " + kName + "Slc") - switch style { - case genStructMapStyleLenPrefix: - x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) - case genStructMapStyleCheckBreak: - x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) - default: // 0, otherwise. - x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length - x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) - x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) - x.line("} else { if r.CheckBreak() { break }; }") - } - x.line("r.ReadMapElemKey()") // f("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs) - x.line(kName + "Slc = r.DecodeStringAsBytes()") - // let string be scoped to this loop alone, so it doesn't escape. - x.line(kName + " := string(" + kName + "Slc)") - x.line("r.ReadMapElemValue()") // f("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs) - x.decStructMapSwitch(kName, varname, rtid, t) - - x.line("} // end for " + tpfx + "j" + i) - x.line("r.ReadMapEnd()") // f("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) -} - -func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { - tpfx := genTempVarPfx - i := x.varsfx() - ti := x.ti.get(rtid, t) - tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. - x.linef("var %sj%s int", tpfx, i) - x.linef("var %sb%s bool", tpfx, i) // break - x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length - for _, si := range tisfi { - var t2 reflect.StructField - { - //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. - // t2 = t.FieldByIndex(si.is) - t2typ := t - varname3 := varname - for ij, ix := range si.is { - if uint8(ij) == si.nis { - break - } - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(int(ix)) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) - } - } - } - - x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", - tpfx, i, tpfx, i, tpfx, i, - tpfx, i, lenvarname, tpfx, i) - x.linef("if %sb%s { r.ReadArrayEnd(); %s }", tpfx, i, breakString) - // x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }", tpfx, i, x.xs, breakString) - x.line("r.ReadArrayElem()") // f("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - x.decVar(varname+"."+t2.Name, "", t2.Type, true) - } - // read remaining values and throw away. - x.line("for {") - x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", - tpfx, i, tpfx, i, tpfx, i, - tpfx, i, lenvarname, tpfx, i) - x.linef("if %sb%s { break }", tpfx, i) - x.line("r.ReadArrayElem()") // f("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) - x.line("}") - x.line("r.ReadArrayEnd()") // f("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) -} - -func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { - // if container is map - i := x.varsfx() - x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) - x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) - x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") - x.linef("if %sl%s == 0 {", genTempVarPfx, i) - x.line("r.ReadMapEnd()") // f("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) - if genUseOneFunctionForDecStructMap { - x.line("} else { ") - x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i) - } else { - x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") - x.line("x.codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") - x.line("} else {") - x.line("x.codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") - } - x.line("}") - - // else if container is array - x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) - x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") - x.linef("if %sl%s == 0 {", genTempVarPfx, i) - x.line("r.ReadArrayEnd()") // f("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) - x.line("} else { ") - x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i) - x.line("}") - // else panic - x.line("} else { ") - x.line("panic(codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + ")") - x.line("} ") -} - -// -------- - -type genV struct { - // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice - MapKey string - Elem string - Primitive string - Size int -} - -func (x *genRunner) newGenV(t reflect.Type) (v genV) { - switch t.Kind() { - case reflect.Slice, reflect.Array: - te := t.Elem() - v.Elem = x.genTypeName(te) - v.Size = int(te.Size()) - case reflect.Map: - te, tk := t.Elem(), t.Key() - v.Elem = x.genTypeName(te) - v.MapKey = x.genTypeName(tk) - v.Size = int(te.Size() + tk.Size()) - default: - panic("unexpected type for newGenV. Requires map or slice type") - } - return -} - -func (x *genV) MethodNamePfx(prefix string, prim bool) string { - var name []byte - if prefix != "" { - name = append(name, prefix...) - } - if prim { - name = append(name, genTitleCaseName(x.Primitive)...) - } else { - if x.MapKey == "" { - name = append(name, "Slice"...) - } else { - name = append(name, "Map"...) - name = append(name, genTitleCaseName(x.MapKey)...) - } - name = append(name, genTitleCaseName(x.Elem)...) - } - return string(name) - -} - -// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. -// -// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, -// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. -// We strip it here. -func genImportPath(t reflect.Type) (s string) { - s = t.PkgPath() - if genCheckVendor { - // HACK: always handle vendoring. It should be typically on in go 1.6, 1.7 - s = stripVendor(s) - } - return -} - -// A go identifier is (letter|_)[letter|number|_]* -func genGoIdentifier(s string, checkFirstChar bool) string { - b := make([]byte, 0, len(s)) - t := make([]byte, 4) - var n int - for i, r := range s { - if checkFirstChar && i == 0 && !unicode.IsLetter(r) { - b = append(b, '_') - } - // r must be unicode_letter, unicode_digit or _ - if unicode.IsLetter(r) || unicode.IsDigit(r) { - n = utf8.EncodeRune(t, r) - b = append(b, t[:n]...) - } else { - b = append(b, '_') - } - } - return string(b) -} - -func genNonPtr(t reflect.Type) reflect.Type { - for t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t -} - -func genTitleCaseName(s string) string { - switch s { - case "interface{}", "interface {}": - return "Intf" - default: - return strings.ToUpper(s[0:1]) + s[1:] - } -} - -func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { - var ptrPfx string - for t.Kind() == reflect.Ptr { - ptrPfx += "Ptrto" - t = t.Elem() - } - tstr := t.String() - if tn := t.Name(); tn != "" { - if tRef != nil && genImportPath(t) == genImportPath(tRef) { - return ptrPfx + tn - } else { - if genQNameRegex.MatchString(tstr) { - return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } - } - switch t.Kind() { - case reflect.Map: - return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) - case reflect.Slice: - return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) - case reflect.Array: - return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) - case reflect.Chan: - var cx string - switch t.ChanDir() { - case reflect.SendDir: - cx = "ChanSend" - case reflect.RecvDir: - cx = "ChanRecv" - default: - cx = "Chan" - } - return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) - default: - if t == intfTyp { - return ptrPfx + "Interface" - } else { - if tRef != nil && genImportPath(t) == genImportPath(tRef) { - if t.Name() != "" { - return ptrPfx + t.Name() - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } else { - // best way to get the package name inclusive - // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr)) - if t.Name() != "" && genQNameRegex.MatchString(tstr) { - return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } - } - } -} - -// genCustomNameForType base64encodes the t.String() value in such a way -// that it can be used within a function name. -func genCustomTypeName(tstr string) string { - len2 := genBase64enc.EncodedLen(len(tstr)) - bufx := make([]byte, len2) - genBase64enc.Encode(bufx, []byte(tstr)) - for i := len2 - 1; i >= 0; i-- { - if bufx[i] == '=' { - len2-- - } else { - break - } - } - return string(bufx[:len2]) -} - -func genIsImmutable(t reflect.Type) (v bool) { - return isImmutableKind(t.Kind()) -} - -type genInternal struct { - Version int - Values []genV -} - -func (x genInternal) FastpathLen() (l int) { - for _, v := range x.Values { - if v.Primitive == "" { - l++ - } - } - return -} - -func genInternalZeroValue(s string) string { - switch s { - case "interface{}", "interface {}": - return "nil" - case "bool": - return "false" - case "string": - return `""` - default: - return "0" - } -} - -func genInternalNonZeroValue(s string) string { - switch s { - case "interface{}", "interface {}": - return `"string-is-an-interface"` // return string, to remove ambiguity - case "bool": - return "true" - case "string": - return `"some-string"` - case "float32", "float64", "float", "double": - return "10.1" - default: - return "10" - } -} - -func genInternalEncCommandAsString(s string, vname string) string { - switch s { - case "uint", "uint8", "uint16", "uint32", "uint64": - return "ee.EncodeUint(uint64(" + vname + "))" - case "int", "int8", "int16", "int32", "int64": - return "ee.EncodeInt(int64(" + vname + "))" - case "string": - return "ee.EncodeString(c_UTF8, " + vname + ")" - case "float32": - return "ee.EncodeFloat32(" + vname + ")" - case "float64": - return "ee.EncodeFloat64(" + vname + ")" - case "bool": - return "ee.EncodeBool(" + vname + ")" - case "symbol": - return "ee.EncodeSymbol(" + vname + ")" - default: - return "e.encode(" + vname + ")" - } -} - -func genInternalDecCommandAsString(s string) string { - switch s { - case "uint": - return "uint(dd.DecodeUint(uintBitsize))" - case "uint8": - return "uint8(dd.DecodeUint(8))" - case "uint16": - return "uint16(dd.DecodeUint(16))" - case "uint32": - return "uint32(dd.DecodeUint(32))" - case "uint64": - return "dd.DecodeUint(64)" - case "uintptr": - return "uintptr(dd.DecodeUint(uintBitsize))" - case "int": - return "int(dd.DecodeInt(intBitsize))" - case "int8": - return "int8(dd.DecodeInt(8))" - case "int16": - return "int16(dd.DecodeInt(16))" - case "int32": - return "int32(dd.DecodeInt(32))" - case "int64": - return "dd.DecodeInt(64)" - - case "string": - return "dd.DecodeString()" - case "float32": - return "float32(dd.DecodeFloat(true))" - case "float64": - return "dd.DecodeFloat(false)" - case "bool": - return "dd.DecodeBool()" - default: - panic(errors.New("gen internal: unknown type for decode: " + s)) - } -} - -func genInternalSortType(s string, elem bool) string { - for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { - if strings.HasPrefix(s, v) { - if elem { - if v == "int" || v == "uint" || v == "float" { - return v + "64" - } else { - return v - } - } - return v + "Slice" - } - } - panic("sorttype: unexpected type: " + s) -} - -func stripVendor(s string) string { - // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. - // if s contains /vendor/ OR startsWith vendor/, then return everything after it. - const vendorStart = "vendor/" - const vendorInline = "/vendor/" - if i := strings.LastIndex(s, vendorInline); i >= 0 { - s = s[i+len(vendorInline):] - } else if strings.HasPrefix(s, vendorStart) { - s = s[len(vendorStart):] - } - return s -} - -// var genInternalMu sync.Mutex -var genInternalV = genInternal{Version: genVersion} -var genInternalTmplFuncs template.FuncMap -var genInternalOnce sync.Once - -func genInternalInit() { - types := [...]string{ - "interface{}", - "string", - "float32", - "float64", - "uint", - "uint8", - "uint16", - "uint32", - "uint64", - "uintptr", - "int", - "int8", - "int16", - "int32", - "int64", - "bool", - } - // keep as slice, so it is in specific iteration order. - // Initial order was uint64, string, interface{}, int, int64 - mapvaltypes := [...]string{ - "interface{}", - "string", - "uint", - "uint8", - "uint16", - "uint32", - "uint64", - "uintptr", - "int", - "int8", - "int16", - "int32", - "int64", - "float32", - "float64", - "bool", - } - wordSizeBytes := int(intBitsize) / 8 - - mapvaltypes2 := map[string]int{ - "interface{}": 2 * wordSizeBytes, - "string": 2 * wordSizeBytes, - "uint": 1 * wordSizeBytes, - "uint8": 1, - "uint16": 2, - "uint32": 4, - "uint64": 8, - "uintptr": 1 * wordSizeBytes, - "int": 1 * wordSizeBytes, - "int8": 1, - "int16": 2, - "int32": 4, - "int64": 8, - "float32": 4, - "float64": 8, - "bool": 1, - } - var gt = genInternal{Version: genVersion} - - // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function - for _, s := range types { - gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) - if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. - gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) - } - if _, ok := mapvaltypes2[s]; !ok { - gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) - } - for _, ms := range mapvaltypes { - gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) - } - } - - funcs := make(template.FuncMap) - // funcs["haspfx"] = strings.HasPrefix - funcs["encmd"] = genInternalEncCommandAsString - funcs["decmd"] = genInternalDecCommandAsString - funcs["zerocmd"] = genInternalZeroValue - funcs["nonzerocmd"] = genInternalNonZeroValue - funcs["hasprefix"] = strings.HasPrefix - funcs["sorttype"] = genInternalSortType - - genInternalV = gt - genInternalTmplFuncs = funcs -} - -// genInternalGoFile is used to generate source files from templates. -// It is run by the program author alone. -// Unfortunately, it has to be exported so that it can be called from a command line tool. -// *** DO NOT USE *** -func genInternalGoFile(r io.Reader, w io.Writer) (err error) { - genInternalOnce.Do(genInternalInit) - - gt := genInternalV - - t := template.New("").Funcs(genInternalTmplFuncs) - - tmplstr, err := ioutil.ReadAll(r) - if err != nil { - return - } - - if t, err = t.Parse(string(tmplstr)); err != nil { - return - } - - var out bytes.Buffer - err = t.Execute(&out, gt) - if err != nil { - return - } - - bout, err := format.Source(out.Bytes()) - if err != nil { - w.Write(out.Bytes()) // write out if error, so we can still see. - // w.Write(bout) // write out if error, as much as possible, so we can still see. - return - } - w.Write(bout) - return -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go deleted file mode 100644 index 7567e2c07b4..00000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.5 - -package codec - -import "reflect" - -const reflectArrayOfSupported = true - -func reflectArrayOf(count int, elem reflect.Type) reflect.Type { - return reflect.ArrayOf(count, elem) -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go deleted file mode 100644 index ec94bd0c0af..00000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build !go1.5 - -package codec - -import "reflect" - -const reflectArrayOfSupported = false - -func reflectArrayOf(count int, elem reflect.Type) reflect.Type { - panic("codec: reflect.ArrayOf unsupported in this go version") -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go deleted file mode 100644 index 51fe40e5bf2..00000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.9 - -package codec - -import "reflect" - -func makeMapReflect(t reflect.Type, size int) reflect.Value { - if size < 0 { - return reflect.MakeMapWithSize(t, 4) - } - return reflect.MakeMapWithSize(t, size) -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go deleted file mode 100644 index d4b9c2c8d9a..00000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build !go1.9 - -package codec - -import "reflect" - -func makeMapReflect(t reflect.Type, size int) reflect.Value { - return reflect.MakeMap(t) -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go b/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go deleted file mode 100644 index dcd8c3d11ce..00000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build !go1.4 - -package codec - -// This codec package will only work for go1.4 and above. -// This is for the following reasons: -// - go 1.4 was released in 2014 -// - go runtime is written fully in go -// - interface only holds pointers -// - reflect.Value is stabilized as 3 words - -func init() { - panic("codec: go 1.3 and below are not supported") -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go deleted file mode 100644 index 68626e1ce74..00000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.5,!go1.6 - -package codec - -import "os" - -var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go deleted file mode 100644 index 344f5967bee..00000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.6,!go1.7 - -package codec - -import "os" - -var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go deleted file mode 100644 index de91d29407f..00000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.7 - -package codec - -const genCheckVendor = true diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go deleted file mode 100644 index 9d007bfed4c..00000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build !go1.5 - -package codec - -var genCheckVendor = false diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go deleted file mode 100644 index df2febbf801..00000000000 --- a/vendor/github.com/ugorji/go/codec/helper.go +++ /dev/null @@ -1,1963 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// Contains code shared by both encode and decode. - -// Some shared ideas around encoding/decoding -// ------------------------------------------ -// -// If an interface{} is passed, we first do a type assertion to see if it is -// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. -// -// If we start with a reflect.Value, we are already in reflect.Value land and -// will try to grab the function for the underlying Type and directly call that function. -// This is more performant than calling reflect.Value.Interface(). -// -// This still helps us bypass many layers of reflection, and give best performance. -// -// Containers -// ------------ -// Containers in the stream are either associative arrays (key-value pairs) or -// regular arrays (indexed by incrementing integers). -// -// Some streams support indefinite-length containers, and use a breaking -// byte-sequence to denote that the container has come to an end. -// -// Some streams also are text-based, and use explicit separators to denote the -// end/beginning of different values. -// -// During encode, we use a high-level condition to determine how to iterate through -// the container. That decision is based on whether the container is text-based (with -// separators) or binary (without separators). If binary, we do not even call the -// encoding of separators. -// -// During decode, we use a different high-level condition to determine how to iterate -// through the containers. That decision is based on whether the stream contained -// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that -// it has to be binary, and we do not even try to read separators. -// -// Philosophy -// ------------ -// On decode, this codec will update containers appropriately: -// - If struct, update fields from stream into fields of struct. -// If field in stream not found in struct, handle appropriately (based on option). -// If a struct field has no corresponding value in the stream, leave it AS IS. -// If nil in stream, set value to nil/zero value. -// - If map, update map from stream. -// If the stream value is NIL, set the map to nil. -// - if slice, try to update up to length of array in stream. -// if container len is less than stream array length, -// and container cannot be expanded, handled (based on option). -// This means you can decode 4-element stream array into 1-element array. -// -// ------------------------------------ -// On encode, user can specify omitEmpty. This means that the value will be omitted -// if the zero value. The problem may occur during decode, where omitted values do not affect -// the value being decoded into. This means that if decoding into a struct with an -// int field with current value=5, and the field is omitted in the stream, then after -// decoding, the value will still be 5 (not 0). -// omitEmpty only works if you guarantee that you always decode into zero-values. -// -// ------------------------------------ -// We could have truncated a map to remove keys not available in the stream, -// or set values in the struct which are not in the stream to their zero values. -// We decided against it because there is no efficient way to do it. -// We may introduce it as an option later. -// However, that will require enabling it for both runtime and code generation modes. -// -// To support truncate, we need to do 2 passes over the container: -// map -// - first collect all keys (e.g. in k1) -// - for each key in stream, mark k1 that the key should not be removed -// - after updating map, do second pass and call delete for all keys in k1 which are not marked -// struct: -// - for each field, track the *typeInfo s1 -// - iterate through all s1, and for each one not marked, set value to zero -// - this involves checking the possible anonymous fields which are nil ptrs. -// too much work. -// -// ------------------------------------------ -// Error Handling is done within the library using panic. -// -// This way, the code doesn't have to keep checking if an error has happened, -// and we don't have to keep sending the error value along with each call -// or storing it in the En|Decoder and checking it constantly along the way. -// -// The disadvantage is that small functions which use panics cannot be inlined. -// The code accounts for that by only using panics behind an interface; -// since interface calls cannot be inlined, this is irrelevant. -// -// We considered storing the error is En|Decoder. -// - once it has its err field set, it cannot be used again. -// - panicing will be optional, controlled by const flag. -// - code should always check error first and return early. -// We eventually decided against it as it makes the code clumsier to always -// check for these error conditions. - -import ( - "bytes" - "encoding" - "encoding/binary" - "errors" - "fmt" - "math" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "time" -) - -const ( - scratchByteArrayLen = 32 - // initCollectionCap = 16 // 32 is defensive. 16 is preferred. - - // Support encoding.(Binary|Text)(Unm|M)arshaler. - // This constant flag will enable or disable it. - supportMarshalInterfaces = true - - // for debugging, set this to false, to catch panic traces. - // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. - recoverPanicToErr = true - - // arrayCacheLen is the length of the cache used in encoder or decoder for - // allowing zero-alloc initialization. - arrayCacheLen = 8 - - // We tried an optimization, where we detect if a type is one of the known types - // we optimized for (e.g. int, []uint64, etc). - // - // However, we notice some worse performance when using this optimization. - // So we hide it behind a flag, to turn on if needed. - useLookupRecognizedTypes = false - - // using recognized allows us to do d.decode(interface{}) instead of d.decodeValue(reflect.Value) - // when we can infer that the kind of the interface{} is one of the ones hard-coded in the - // type switch for known types or the ones defined by fast-path. - // - // However, it seems we get better performance when we don't recognize, and just let - // reflection handle it. - // - // Reasoning is as below: - // typeswitch is a binary search with a branch to a code-point. - // getdecfn is a binary search with a call to a function pointer. - // - // both are about the same. - // - // so: why prefer typeswitch? - // - // is recognized does the following: - // - lookup rtid - // - check if in sorted list - // - calls decode(type switch) - // - 1 or 2 binary search to a point in code - // - branch there - // - // vs getdecfn - // - lookup rtid - // - check in sorted list for a function pointer - // - calls it to decode using reflection (optimized) - - // always set xDebug = false before releasing software - xDebug = true -) - -var ( - oneByteArr = [1]byte{0} - zeroByteSlice = oneByteArr[:0:0] -) - -var refBitset bitset32 - -var pool pooler - -func init() { - pool.init() - - refBitset.set(byte(reflect.Map)) - refBitset.set(byte(reflect.Ptr)) - refBitset.set(byte(reflect.Func)) - refBitset.set(byte(reflect.Chan)) -} - -// type findCodecFnMode uint8 - -// const ( -// findCodecFnModeMap findCodecFnMode = iota -// findCodecFnModeBinarySearch -// findCodecFnModeLinearSearch -// ) - -type charEncoding uint8 - -const ( - c_RAW charEncoding = iota - c_UTF8 - c_UTF16LE - c_UTF16BE - c_UTF32LE - c_UTF32BE -) - -// valueType is the stream type -type valueType uint8 - -const ( - valueTypeUnset valueType = iota - valueTypeNil - valueTypeInt - valueTypeUint - valueTypeFloat - valueTypeBool - valueTypeString - valueTypeSymbol - valueTypeBytes - valueTypeMap - valueTypeArray - valueTypeTimestamp - valueTypeExt - - // valueTypeInvalid = 0xff -) - -func (x valueType) String() string { - switch x { - case valueTypeNil: - return "Nil" - case valueTypeInt: - return "Int" - case valueTypeUint: - return "Uint" - case valueTypeFloat: - return "Float" - case valueTypeBool: - return "Bool" - case valueTypeString: - return "String" - case valueTypeSymbol: - return "Symbol" - case valueTypeBytes: - return "Bytes" - case valueTypeMap: - return "Map" - case valueTypeArray: - return "Array" - case valueTypeTimestamp: - return "Timestamp" - case valueTypeExt: - return "Ext" - } - return strconv.FormatInt(int64(x), 10) -} - -type seqType uint8 - -const ( - _ seqType = iota - seqTypeArray - seqTypeSlice - seqTypeChan -) - -// note that containerMapStart and containerArraySend are not sent. -// This is because the ReadXXXStart and EncodeXXXStart already does these. -type containerState uint8 - -const ( - _ containerState = iota - - containerMapStart // slot left open, since Driver method already covers it - containerMapKey - containerMapValue - containerMapEnd - containerArrayStart // slot left open, since Driver methods already cover it - containerArrayElem - containerArrayEnd -) - -// sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo -type sfiIdx struct { - name string - index int -} - -// do not recurse if a containing type refers to an embedded type -// which refers back to its containing type (via a pointer). -// The second time this back-reference happens, break out, -// so as not to cause an infinite loop. -const rgetMaxRecursion = 2 - -// Anecdotally, we believe most types have <= 12 fields. -// Java's PMD rules set TooManyFields threshold to 15. -const typeInfoLoadArrayLen = 12 - -type typeInfoLoad struct { - fNames []string - encNames []string - etypes []uintptr - sfis []*structFieldInfo -} - -type typeInfoLoadArray struct { - fNames [typeInfoLoadArrayLen]string - encNames [typeInfoLoadArrayLen]string - etypes [typeInfoLoadArrayLen]uintptr - sfis [typeInfoLoadArrayLen]*structFieldInfo - sfiidx [typeInfoLoadArrayLen]sfiIdx -} - -// type containerStateRecv interface { -// sendContainerState(containerState) -// } - -// mirror json.Marshaler and json.Unmarshaler here, -// so we don't import the encoding/json package -type jsonMarshaler interface { - MarshalJSON() ([]byte, error) -} -type jsonUnmarshaler interface { - UnmarshalJSON([]byte) error -} - -// type byteAccepter func(byte) bool - -var ( - bigen = binary.BigEndian - structInfoFieldName = "_struct" - - mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) - mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) - intfSliceTyp = reflect.TypeOf([]interface{}(nil)) - intfTyp = intfSliceTyp.Elem() - - stringTyp = reflect.TypeOf("") - timeTyp = reflect.TypeOf(time.Time{}) - rawExtTyp = reflect.TypeOf(RawExt{}) - rawTyp = reflect.TypeOf(Raw{}) - uint8SliceTyp = reflect.TypeOf([]uint8(nil)) - - mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() - - binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() - binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() - - textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - - jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() - jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() - - selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() - - uint8SliceTypId = rt2id(uint8SliceTyp) - rawExtTypId = rt2id(rawExtTyp) - rawTypId = rt2id(rawTyp) - intfTypId = rt2id(intfTyp) - timeTypId = rt2id(timeTyp) - stringTypId = rt2id(stringTyp) - - mapStrIntfTypId = rt2id(mapStrIntfTyp) - mapIntfIntfTypId = rt2id(mapIntfIntfTyp) - intfSliceTypId = rt2id(intfSliceTyp) - // mapBySliceTypId = rt2id(mapBySliceTyp) - - intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) - uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) - - bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} - bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} - - chkOvf checkOverflow - - noFieldNameToStructFieldInfoErr = errors.New("no field name passed to parseStructFieldInfo") -) - -var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) - -var immutableKindsSet = [32]bool{ - // reflect.Invalid: , - reflect.Bool: true, - reflect.Int: true, - reflect.Int8: true, - reflect.Int16: true, - reflect.Int32: true, - reflect.Int64: true, - reflect.Uint: true, - reflect.Uint8: true, - reflect.Uint16: true, - reflect.Uint32: true, - reflect.Uint64: true, - reflect.Uintptr: true, - reflect.Float32: true, - reflect.Float64: true, - reflect.Complex64: true, - reflect.Complex128: true, - // reflect.Array - // reflect.Chan - // reflect.Func: true, - // reflect.Interface - // reflect.Map - // reflect.Ptr - // reflect.Slice - reflect.String: true, - // reflect.Struct - // reflect.UnsafePointer -} - -var recognizedRtids []uintptr -var recognizedRtidPtrs []uintptr -var recognizedRtidOrPtrs []uintptr - -func init() { - if !useLookupRecognizedTypes { - return - } - for _, v := range [...]interface{}{ - float32(0), - float64(0), - uintptr(0), - uint(0), - uint8(0), - uint16(0), - uint32(0), - uint64(0), - uintptr(0), - int(0), - int8(0), - int16(0), - int32(0), - int64(0), - bool(false), - string(""), - Raw{}, - []byte(nil), - } { - rt := reflect.TypeOf(v) - recognizedRtids = append(recognizedRtids, rt2id(rt)) - recognizedRtidPtrs = append(recognizedRtidPtrs, rt2id(reflect.PtrTo(rt))) - } -} - -func containsU(s []uintptr, v uintptr) bool { - // return false // TODO: REMOVE - h, i, j := 0, 0, len(s) - for i < j { - h = i + (j-i)/2 - if s[h] < v { - i = h + 1 - } else { - j = h - } - } - if i < len(s) && s[i] == v { - return true - } - return false -} - -func isRecognizedRtid(rtid uintptr) bool { - return containsU(recognizedRtids, rtid) -} - -func isRecognizedRtidPtr(rtid uintptr) bool { - return containsU(recognizedRtidPtrs, rtid) -} - -func isRecognizedRtidOrPtr(rtid uintptr) bool { - return containsU(recognizedRtidOrPtrs, rtid) -} - -// Selfer defines methods by which a value can encode or decode itself. -// -// Any type which implements Selfer will be able to encode or decode itself. -// Consequently, during (en|de)code, this takes precedence over -// (text|binary)(M|Unm)arshal or extension support. -type Selfer interface { - CodecEncodeSelf(*Encoder) - CodecDecodeSelf(*Decoder) -} - -// MapBySlice represents a slice which should be encoded as a map in the stream. -// The slice contains a sequence of key-value pairs. -// This affords storing a map in a specific sequence in the stream. -// -// The support of MapBySlice affords the following: -// - A slice type which implements MapBySlice will be encoded as a map -// - A slice can be decoded from a map in the stream -type MapBySlice interface { - MapBySlice() -} - -// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. -// -// BasicHandle encapsulates the common options and extension functions. -type BasicHandle struct { - // TypeInfos is used to get the type info for any type. - // - // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json - TypeInfos *TypeInfos - - extHandle - EncodeOptions - DecodeOptions - noBuiltInTypeChecker -} - -func (x *BasicHandle) getBasicHandle() *BasicHandle { - return x -} - -func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - if x.TypeInfos == nil { - return defTypeInfos.get(rtid, rt) - } - return x.TypeInfos.get(rtid, rt) -} - -// Handle is the interface for a specific encoding format. -// -// Typically, a Handle is pre-configured before first time use, -// and not modified while in use. Such a pre-configured Handle -// is safe for concurrent access. -type Handle interface { - getBasicHandle() *BasicHandle - newEncDriver(w *Encoder) encDriver - newDecDriver(r *Decoder) decDriver - isBinary() bool - hasElemSeparators() bool - IsBuiltinType(rtid uintptr) bool -} - -// Raw represents raw formatted bytes. -// We "blindly" store it during encode and store the raw bytes during decode. -// Note: it is dangerous during encode, so we may gate the behaviour behind an Encode flag which must be explicitly set. -type Raw []byte - -// RawExt represents raw unprocessed extension data. -// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag. -// -// Only one of Data or Value is nil. If Data is nil, then the content of the RawExt is in the Value. -type RawExt struct { - Tag uint64 - // Data is the []byte which represents the raw ext. If Data is nil, ext is exposed in Value. - // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types - Data []byte - // Value represents the extension, if Data is nil. - // Value is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. - Value interface{} -} - -// BytesExt handles custom (de)serialization of types to/from []byte. -// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. -type BytesExt interface { - // WriteExt converts a value to a []byte. - // - // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. - WriteExt(v interface{}) []byte - - // ReadExt updates a value from a []byte. - ReadExt(dst interface{}, src []byte) -} - -// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. -// The Encoder or Decoder will then handle the further (de)serialization of that known type. -// -// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. -type InterfaceExt interface { - // ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64. - // - // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. - ConvertExt(v interface{}) interface{} - - // UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time. - UpdateExt(dst interface{}, src interface{}) -} - -// Ext handles custom (de)serialization of custom types / extensions. -type Ext interface { - BytesExt - InterfaceExt -} - -// addExtWrapper is a wrapper implementation to support former AddExt exported method. -type addExtWrapper struct { - encFn func(reflect.Value) ([]byte, error) - decFn func(reflect.Value, []byte) error -} - -func (x addExtWrapper) WriteExt(v interface{}) []byte { - bs, err := x.encFn(reflect.ValueOf(v)) - if err != nil { - panic(err) - } - return bs -} - -func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { - if err := x.decFn(reflect.ValueOf(v), bs); err != nil { - panic(err) - } -} - -func (x addExtWrapper) ConvertExt(v interface{}) interface{} { - return x.WriteExt(v) -} - -func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { - x.ReadExt(dest, v.([]byte)) -} - -type setExtWrapper struct { - b BytesExt - i InterfaceExt -} - -func (x *setExtWrapper) WriteExt(v interface{}) []byte { - if x.b == nil { - panic("BytesExt.WriteExt is not supported") - } - return x.b.WriteExt(v) -} - -func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) { - if x.b == nil { - panic("BytesExt.WriteExt is not supported") - - } - x.b.ReadExt(v, bs) -} - -func (x *setExtWrapper) ConvertExt(v interface{}) interface{} { - if x.i == nil { - panic("InterfaceExt.ConvertExt is not supported") - - } - return x.i.ConvertExt(v) -} - -func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) { - if x.i == nil { - panic("InterfaceExxt.UpdateExt is not supported") - - } - x.i.UpdateExt(dest, v) -} - -type binaryEncodingType struct{} - -func (_ binaryEncodingType) isBinary() bool { return true } - -type textEncodingType struct{} - -func (_ textEncodingType) isBinary() bool { return false } - -// noBuiltInTypes is embedded into many types which do not support builtins -// e.g. msgpack, simple, cbor. - -type noBuiltInTypeChecker struct{} - -func (_ noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false } - -type noBuiltInTypes struct{ noBuiltInTypeChecker } - -func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} -func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} - -// type noStreamingCodec struct{} -// func (_ noStreamingCodec) CheckBreak() bool { return false } -// func (_ noStreamingCodec) hasElemSeparators() bool { return false } - -type noElemSeparators struct{} - -func (_ noElemSeparators) hasElemSeparators() (v bool) { return } - -// bigenHelper. -// Users must already slice the x completely, because we will not reslice. -type bigenHelper struct { - x []byte // must be correctly sliced to appropriate len. slicing is a cost. - w encWriter -} - -func (z bigenHelper) writeUint16(v uint16) { - bigen.PutUint16(z.x, v) - z.w.writeb(z.x) -} - -func (z bigenHelper) writeUint32(v uint32) { - bigen.PutUint32(z.x, v) - z.w.writeb(z.x) -} - -func (z bigenHelper) writeUint64(v uint64) { - bigen.PutUint64(z.x, v) - z.w.writeb(z.x) -} - -type extTypeTagFn struct { - rtid uintptr - rt reflect.Type - tag uint64 - ext Ext -} - -type extHandle []extTypeTagFn - -// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. -// -// AddExt registes an encode and decode function for a reflect.Type. -// AddExt internally calls SetExt. -// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. -func (o *extHandle) AddExt( - rt reflect.Type, tag byte, - encfn func(reflect.Value) ([]byte, error), decfn func(reflect.Value, []byte) error, -) (err error) { - if encfn == nil || decfn == nil { - return o.SetExt(rt, uint64(tag), nil) - } - return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) -} - -// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. -// -// Note that the type must be a named type, and specifically not -// a pointer or Interface. An error is returned if that is not honored. -// -// To Deregister an ext, call SetExt with nil Ext -func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { - // o is a pointer, because we may need to initialize it - if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { - err = fmt.Errorf("codec.Handle.AddExt: Takes named type, not a pointer or interface: %T", - reflect.Zero(rt).Interface()) - return - } - - rtid := rt2id(rt) - for _, v := range *o { - if v.rtid == rtid { - v.tag, v.ext = tag, ext - return - } - } - - if *o == nil { - *o = make([]extTypeTagFn, 0, 4) - } - *o = append(*o, extTypeTagFn{rtid, rt, tag, ext}) - return -} - -func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { - var v *extTypeTagFn - for i := range o { - v = &o[i] - if v.rtid == rtid { - return v - } - } - return nil -} - -func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn { - var v *extTypeTagFn - for i := range o { - v = &o[i] - if v.tag == tag { - return v - } - } - return nil -} - -const maxLevelsEmbedding = 16 - -type structFieldInfo struct { - encName string // encode name - fieldName string // field name - - is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct - nis uint8 // num levels of embedding. if 1, then it's not embedded. - omitEmpty bool - toArray bool // if field is _struct, is the toArray set? -} - -func (si *structFieldInfo) setToZeroValue(v reflect.Value) { - if v, valid := si.field(v, false); valid { - v.Set(reflect.Zero(v.Type())) - } -} - -// rv returns the field of the struct. -// If anonymous, it returns an Invalid -func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) { - // replicate FieldByIndex - for i, x := range si.is { - if uint8(i) == si.nis { - break - } - if v, valid = baseStructRv(v, update); !valid { - return - } - v = v.Field(int(x)) - } - - return v, true -} - -func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value { - v, _ = si.field(v, update) - return v -} - -func parseStructFieldInfo(fname string, stag string) *structFieldInfo { - // if fname == "" { - // panic(noFieldNameToStructFieldInfoErr) - // } - si := structFieldInfo{ - encName: fname, - } - - if stag != "" { - for i, s := range strings.Split(stag, ",") { - if i == 0 { - if s != "" { - si.encName = s - } - } else { - if s == "omitempty" { - si.omitEmpty = true - } else if s == "toarray" { - si.toArray = true - } - } - } - } - // si.encNameBs = []byte(si.encName) - return &si -} - -type sfiSortedByEncName []*structFieldInfo - -func (p sfiSortedByEncName) Len() int { - return len(p) -} - -func (p sfiSortedByEncName) Less(i, j int) bool { - return p[i].encName < p[j].encName -} - -func (p sfiSortedByEncName) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -const structFieldNodeNumToCache = 4 - -type structFieldNodeCache struct { - rv [structFieldNodeNumToCache]reflect.Value - idx [structFieldNodeNumToCache]uint32 - num uint8 -} - -func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) { - // defer func() { fmt.Printf(">>>> found in cache2? %v\n", valid) }() - for i, k := range &x.idx { - if uint8(i) == x.num { - return // break - } - if key == k { - return x.rv[i], true - } - } - return -} - -func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) { - if x.num < structFieldNodeNumToCache { - x.rv[x.num] = fv - x.idx[x.num] = key - x.num++ - return - } -} - -type structFieldNode struct { - v reflect.Value - cache2 structFieldNodeCache - cache3 structFieldNodeCache - update bool -} - -func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) { - // return si.fieldval(x.v, x.update) - // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding - // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc. - var valid bool - switch si.nis { - case 1: - fv = x.v.Field(int(si.is[0])) - case 2: - if fv, valid = x.cache2.get(uint32(si.is[0])); valid { - fv = fv.Field(int(si.is[1])) - return - } - fv = x.v.Field(int(si.is[0])) - if fv, valid = baseStructRv(fv, x.update); !valid { - return - } - x.cache2.tryAdd(fv, uint32(si.is[0])) - fv = fv.Field(int(si.is[1])) - case 3: - var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1]) - if fv, valid = x.cache3.get(key); valid { - fv = fv.Field(int(si.is[2])) - return - } - fv = x.v.Field(int(si.is[0])) - if fv, valid = baseStructRv(fv, x.update); !valid { - return - } - fv = fv.Field(int(si.is[1])) - if fv, valid = baseStructRv(fv, x.update); !valid { - return - } - x.cache3.tryAdd(fv, key) - fv = fv.Field(int(si.is[2])) - default: - fv, _ = si.field(x.v, x.update) - } - return -} - -func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - if !update { - return - } - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - return v, true -} - -// typeInfo keeps information about each type referenced in the encode/decode sequence. -// -// During an encode/decode sequence, we work as below: -// - If base is a built in type, en/decode base value -// - If base is registered as an extension, en/decode base value -// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method -// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method -// - Else decode appropriately based on the reflect.Kind -type typeInfo struct { - sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. - sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. - - rt reflect.Type - rtid uintptr - // rv0 reflect.Value // saved zero value, used if immutableKind - - numMeth uint16 // number of methods - - // baseId gives pointer to the base reflect.Type, after deferencing - // the pointers. E.g. base type of ***time.Time is time.Time. - base reflect.Type - baseId uintptr - baseIndir int8 // number of indirections to get to base - - anyOmitEmpty bool - - mbs bool // base type (T or *T) is a MapBySlice - - bm bool // base type (T or *T) is a binaryMarshaler - bunm bool // base type (T or *T) is a binaryUnmarshaler - bmIndir int8 // number of indirections to get to binaryMarshaler type - bunmIndir int8 // number of indirections to get to binaryUnmarshaler type - - tm bool // base type (T or *T) is a textMarshaler - tunm bool // base type (T or *T) is a textUnmarshaler - tmIndir int8 // number of indirections to get to textMarshaler type - tunmIndir int8 // number of indirections to get to textUnmarshaler type - - jm bool // base type (T or *T) is a jsonMarshaler - junm bool // base type (T or *T) is a jsonUnmarshaler - jmIndir int8 // number of indirections to get to jsonMarshaler type - junmIndir int8 // number of indirections to get to jsonUnmarshaler type - - cs bool // base type (T or *T) is a Selfer - csIndir int8 // number of indirections to get to Selfer type - - toArray bool // whether this (struct) type should be encoded as an array -} - -// define length beyond which we do a binary search instead of a linear search. -// From our testing, linear search seems faster than binary search up to 16-field structs. -// However, we set to 8 similar to what python does for hashtables. -const indexForEncNameBinarySearchThreshold = 8 - -func (ti *typeInfo) indexForEncName(name string) int { - // NOTE: name may be a stringView, so don't pass it to another function. - //tisfi := ti.sfi - sfilen := len(ti.sfi) - if sfilen < indexForEncNameBinarySearchThreshold { - for i, si := range ti.sfi { - if si.encName == name { - return i - } - } - return -1 - } - // binary search. adapted from sort/search.go. - h, i, j := 0, 0, sfilen - for i < j { - h = i + (j-i)/2 - if ti.sfi[h].encName < name { - i = h + 1 - } else { - j = h - } - } - if i < sfilen && ti.sfi[i].encName == name { - return i - } - return -1 -} - -type rtid2ti struct { - rtid uintptr - ti *typeInfo -} - -// TypeInfos caches typeInfo for each type on first inspection. -// -// It is configured with a set of tag keys, which are used to get -// configuration for the type. -type TypeInfos struct { - infos atomicTypeInfoSlice // formerly map[uintptr]*typeInfo, now *[]rtid2ti - mu sync.Mutex - tags []string -} - -// NewTypeInfos creates a TypeInfos given a set of struct tags keys. -// -// This allows users customize the struct tag keys which contain configuration -// of their types. -func NewTypeInfos(tags []string) *TypeInfos { - return &TypeInfos{tags: tags} -} - -func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { - // check for tags: codec, json, in that order. - // this allows seamless support for many configured structs. - for _, x := range x.tags { - s = t.Get(x) - if s != "" { - return s - } - } - return -} - -func (x *TypeInfos) find(sp *[]rtid2ti, rtid uintptr) (idx int, ti *typeInfo) { - // binary search. adapted from sort/search.go. - // if sp == nil { - // return -1, nil - // } - s := *sp - h, i, j := 0, 0, len(s) - for i < j { - h = i + (j-i)/2 - if s[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - } - if i < len(s) && s[i].rtid == rtid { - return i, s[i].ti - } - return i, nil -} - -func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - sp := x.infos.load() - var idx int - if sp != nil { - idx, pti = x.find(sp, rtid) - if pti != nil { - return - } - } - - // do not hold lock while computing this. - // it may lead to duplication, but that's ok. - ti := typeInfo{rt: rt, rtid: rtid} - // ti.rv0 = reflect.Zero(rt) - - ti.numMeth = uint16(rt.NumMethod()) - var ok bool - var indir int8 - if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { - ti.bm, ti.bmIndir = true, indir - } - if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { - ti.bunm, ti.bunmIndir = true, indir - } - if ok, indir = implementsIntf(rt, textMarshalerTyp); ok { - ti.tm, ti.tmIndir = true, indir - } - if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok { - ti.tunm, ti.tunmIndir = true, indir - } - if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok { - ti.jm, ti.jmIndir = true, indir - } - if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok { - ti.junm, ti.junmIndir = true, indir - } - if ok, indir = implementsIntf(rt, selferTyp); ok { - ti.cs, ti.csIndir = true, indir - } - if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { - ti.mbs = true - } - - pt := rt - var ptIndir int8 - // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } - for pt.Kind() == reflect.Ptr { - pt = pt.Elem() - ptIndir++ - } - if ptIndir == 0 { - ti.base = rt - ti.baseId = rtid - } else { - ti.base = pt - ti.baseId = rt2id(pt) - ti.baseIndir = ptIndir - } - - if rt.Kind() == reflect.Struct { - var omitEmpty bool - if f, ok := rt.FieldByName(structInfoFieldName); ok { - siInfo := parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag)) - ti.toArray = siInfo.toArray - omitEmpty = siInfo.omitEmpty - } - pp, pi := pool.tiLoad() - pv := pi.(*typeInfoLoadArray) - pv.etypes[0] = ti.baseId - vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} - x.rget(rt, rtid, omitEmpty, nil, &vv) - ti.sfip, ti.sfi, ti.anyOmitEmpty = rgetResolveSFI(vv.sfis, pv.sfiidx[:0]) - pp.Put(pi) - } - // sfi = sfip - - var vs []rtid2ti - x.mu.Lock() - sp = x.infos.load() - if sp == nil { - pti = &ti - vs = []rtid2ti{{rtid, pti}} - x.infos.store(&vs) - } else { - idx, pti = x.find(sp, rtid) - if pti == nil { - s := *sp - pti = &ti - vs = make([]rtid2ti, len(s)+1) - copy(vs, s[:idx]) - vs[idx] = rtid2ti{rtid, pti} - copy(vs[idx+1:], s[idx:]) - x.infos.store(&vs) - } - } - x.mu.Unlock() - return -} - -func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, - indexstack []uint16, pv *typeInfoLoad, -) { - // Read up fields and store how to access the value. - // - // It uses go's rules for message selectors, - // which say that the field with the shallowest depth is selected. - // - // Note: we consciously use slices, not a map, to simulate a set. - // Typically, types have < 16 fields, - // and iteration using equals is faster than maps there - flen := rt.NumField() - if flen > (1< maxLevelsEmbedding-1 { - panic(fmt.Errorf("codec: only supports up to %v depth of embedding - type has %v depth", maxLevelsEmbedding-1, len(indexstack))) - } - si.nis = uint8(len(indexstack)) + 1 - copy(si.is[:], indexstack) - si.is[len(indexstack)] = j - - if omitEmpty { - si.omitEmpty = true - } - pv.sfis = append(pv.sfis, si) - } -} - -// resolves the struct field info got from a call to rget. -// Returns a trimmed, unsorted and sorted []*structFieldInfo. -func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo, anyOmitEmpty bool) { - var n int - for i, v := range x { - xn := v.encName // TODO: fieldName or encName? use encName for now. - var found bool - for j, k := range pv { - if k.name == xn { - // one of them must be reset to nil, and the index updated appropriately to the other one - if v.nis == x[k.index].nis { - } else if v.nis < x[k.index].nis { - pv[j].index = i - if x[k.index] != nil { - x[k.index] = nil - n++ - } - } else { - if x[i] != nil { - x[i] = nil - n++ - } - } - found = true - break - } - } - if !found { - pv = append(pv, sfiIdx{xn, i}) - } - } - - // remove all the nils - y = make([]*structFieldInfo, len(x)-n) - n = 0 - for _, v := range x { - if v == nil { - continue - } - if !anyOmitEmpty && v.omitEmpty { - anyOmitEmpty = true - } - y[n] = v - n++ - } - - z = make([]*structFieldInfo, len(y)) - copy(z, y) - sort.Sort(sfiSortedByEncName(z)) - return -} - -func xprintf(format string, a ...interface{}) { - if xDebug { - fmt.Fprintf(os.Stderr, format, a...) - } -} - -func panicToErr(err *error) { - if recoverPanicToErr { - if x := recover(); x != nil { - // if false && xDebug { - // fmt.Printf("panic'ing with: %v\n", x) - // debug.PrintStack() - // } - panicValToErr(x, err) - } - } -} - -func panicToErrs2(err1, err2 *error) { - if recoverPanicToErr { - if x := recover(); x != nil { - panicValToErr(x, err1) - panicValToErr(x, err2) - } - } -} - -// func doPanic(tag string, format string, params ...interface{}) { -// params2 := make([]interface{}, len(params)+1) -// params2[0] = tag -// copy(params2[1:], params) -// panic(fmt.Errorf("%s: "+format, params2...)) -// } - -func isImmutableKind(k reflect.Kind) (v bool) { - return immutableKindsSet[k] - // return false || - // k == reflect.Int || - // k == reflect.Int8 || - // k == reflect.Int16 || - // k == reflect.Int32 || - // k == reflect.Int64 || - // k == reflect.Uint || - // k == reflect.Uint8 || - // k == reflect.Uint16 || - // k == reflect.Uint32 || - // k == reflect.Uint64 || - // k == reflect.Uintptr || - // k == reflect.Float32 || - // k == reflect.Float64 || - // k == reflect.Bool || - // k == reflect.String -} - -// ---- - -type codecFnInfo struct { - ti *typeInfo - xfFn Ext - xfTag uint64 - seq seqType - addr bool -} - -// codecFn encapsulates the captured variables and the encode function. -// This way, we only do some calculations one times, and pass to the -// code block that should be called (encapsulated in a function) -// instead of executing the checks every time. -type codecFn struct { - i codecFnInfo - fe func(*Encoder, *codecFnInfo, reflect.Value) - fd func(*Decoder, *codecFnInfo, reflect.Value) -} - -type codecRtidFn struct { - rtid uintptr - fn codecFn -} - -type codecFner struct { - hh Handle - h *BasicHandle - cs [arrayCacheLen]*[arrayCacheLen]codecRtidFn - s []*[arrayCacheLen]codecRtidFn - sn uint32 - be bool - js bool - cf [arrayCacheLen]codecRtidFn -} - -func (c *codecFner) reset(hh Handle) { - c.hh = hh - c.h = hh.getBasicHandle() - _, c.js = hh.(*JsonHandle) - c.be = hh.isBinary() -} - -func (c *codecFner) get(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) { - rtid := rt2id(rt) - var j uint32 - var sn uint32 = c.sn - if sn == 0 { - c.s = c.cs[:1] - c.s[0] = &c.cf - c.cf[0].rtid = rtid - fn = &(c.cf[0].fn) - c.sn = 1 - } else { - LOOP1: - for _, x := range c.s { - for i := range x { - if j == sn { - break LOOP1 - } - if x[i].rtid == rtid { - fn = &(x[i].fn) - return - } - j++ - } - } - sx, sy := sn/arrayCacheLen, sn%arrayCacheLen - if sy == 0 { - c.s = append(c.s, &[arrayCacheLen]codecRtidFn{}) - } - c.s[sx][sy].rtid = rtid - fn = &(c.s[sx][sy].fn) - c.sn++ - } - - ti := c.h.getTypeInfo(rtid, rt) - fi := &(fn.i) - fi.ti = ti - - if checkCodecSelfer && ti.cs { - fn.fe = (*Encoder).selferMarshal - fn.fd = (*Decoder).selferUnmarshal - } else if rtid == rawTypId { - fn.fe = (*Encoder).raw - fn.fd = (*Decoder).raw - } else if rtid == rawExtTypId { - fn.fe = (*Encoder).rawExt - fn.fd = (*Decoder).rawExt - fn.i.addr = true - } else if c.hh.IsBuiltinType(rtid) { - fn.fe = (*Encoder).builtin - fn.fd = (*Decoder).builtin - fn.i.addr = true - } else if xfFn := c.h.getExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext - fn.fe = (*Encoder).ext - fn.fd = (*Decoder).ext - fn.i.addr = true - } else if supportMarshalInterfaces && c.be && ti.bm && ti.bunm { - fn.fe = (*Encoder).binaryMarshal - fn.fd = (*Decoder).binaryUnmarshal - } else if supportMarshalInterfaces && !c.be && c.js && ti.jm && ti.junm { - //If JSON, we should check JSONMarshal before textMarshal - fn.fe = (*Encoder).jsonMarshal - fn.fd = (*Decoder).jsonUnmarshal - } else if supportMarshalInterfaces && !c.be && ti.tm && ti.tunm { - fn.fe = (*Encoder).textMarshal - fn.fd = (*Decoder).textUnmarshal - } else { - rk := rt.Kind() - if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { - if rt.PkgPath() == "" { // un-named slice or map - if idx := fastpathAV.index(rtid); idx != -1 { - fn.fe = fastpathAV[idx].encfn - fn.fd = fastpathAV[idx].decfn - fn.i.addr = true - } - } else { - // use mapping for underlying type if there - var rtu reflect.Type - if rk == reflect.Map { - rtu = reflect.MapOf(rt.Key(), rt.Elem()) - } else { - rtu = reflect.SliceOf(rt.Elem()) - } - rtuid := rt2id(rtu) - if idx := fastpathAV.index(rtuid); idx != -1 { - xfnf := fastpathAV[idx].encfn - xrt := fastpathAV[idx].rt - fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) { - xfnf(e, xf, xrv.Convert(xrt)) - } - fn.i.addr = true - xfnf2 := fastpathAV[idx].decfn - fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { - xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt))) - } - } - } - } - if fn.fe == nil && fn.fd == nil { - switch rk { - case reflect.Bool: - fn.fe = (*Encoder).kBool - fn.fd = (*Decoder).kBool - case reflect.String: - fn.fe = (*Encoder).kString - fn.fd = (*Decoder).kString - case reflect.Int: - fn.fd = (*Decoder).kInt - fn.fe = (*Encoder).kInt - case reflect.Int8: - fn.fe = (*Encoder).kInt - fn.fd = (*Decoder).kInt8 - case reflect.Int16: - fn.fe = (*Encoder).kInt - fn.fd = (*Decoder).kInt16 - case reflect.Int32: - fn.fe = (*Encoder).kInt - fn.fd = (*Decoder).kInt32 - case reflect.Int64: - fn.fe = (*Encoder).kInt - fn.fd = (*Decoder).kInt64 - case reflect.Uint: - fn.fd = (*Decoder).kUint - fn.fe = (*Encoder).kUint - case reflect.Uint8: - fn.fe = (*Encoder).kUint - fn.fd = (*Decoder).kUint8 - case reflect.Uint16: - fn.fe = (*Encoder).kUint - fn.fd = (*Decoder).kUint16 - case reflect.Uint32: - fn.fe = (*Encoder).kUint - fn.fd = (*Decoder).kUint32 - case reflect.Uint64: - fn.fe = (*Encoder).kUint - fn.fd = (*Decoder).kUint64 - // case reflect.Ptr: - // fn.fd = (*Decoder).kPtr - case reflect.Uintptr: - fn.fe = (*Encoder).kUint - fn.fd = (*Decoder).kUintptr - case reflect.Float32: - fn.fe = (*Encoder).kFloat32 - fn.fd = (*Decoder).kFloat32 - case reflect.Float64: - fn.fe = (*Encoder).kFloat64 - fn.fd = (*Decoder).kFloat64 - case reflect.Invalid: - fn.fe = (*Encoder).kInvalid - case reflect.Chan: - fi.seq = seqTypeChan - fn.fe = (*Encoder).kSlice - fn.fd = (*Decoder).kSlice - case reflect.Slice: - fi.seq = seqTypeSlice - fn.fe = (*Encoder).kSlice - fn.fd = (*Decoder).kSlice - case reflect.Array: - fi.seq = seqTypeArray - fn.fe = (*Encoder).kSlice - fi.addr = false - rt2 := reflect.SliceOf(rt.Elem()) - fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { - // println(">>>>>> decoding an array ... ") - d.cf.get(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len())) - // println(">>>>>> decoding an array ... DONE") - } - // fn.fd = (*Decoder).kArray - case reflect.Struct: - if ti.anyOmitEmpty { - fn.fe = (*Encoder).kStruct - } else { - fn.fe = (*Encoder).kStructNoOmitempty - } - fn.fd = (*Decoder).kStruct - // reflect.Ptr and reflect.Interface are handled already by preEncodeValue - // case reflect.Ptr: - // fn.fe = (*Encoder).kPtr - // case reflect.Interface: - // fn.fe = (*Encoder).kInterface - case reflect.Map: - fn.fe = (*Encoder).kMap - fn.fd = (*Decoder).kMap - case reflect.Interface: - // encode: reflect.Interface are handled already by preEncodeValue - fn.fd = (*Decoder).kInterface - default: - fn.fe = (*Encoder).kErr - fn.fd = (*Decoder).kErr - } - } - } - - return -} - -// ---- - -// these functions must be inlinable, and not call anybody -type checkOverflow struct{} - -func (_ checkOverflow) Float32(f float64) (overflow bool) { - if f < 0 { - f = -f - } - return math.MaxFloat32 < f && f <= math.MaxFloat64 -} - -func (_ checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { - if bitsize == 0 || bitsize >= 64 || v == 0 { - return - } - if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { - overflow = true - } - return -} - -func (_ checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { - if bitsize == 0 || bitsize >= 64 || v == 0 { - return - } - if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { - overflow = true - } - return -} - -func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) { - //e.g. -127 to 128 for int8 - pos := (v >> 63) == 0 - ui2 := v & 0x7fffffffffffffff - if pos { - if ui2 > math.MaxInt64 { - overflow = true - return - } - } else { - if ui2 > math.MaxInt64-1 { - overflow = true - return - } - } - i = int64(v) - return -} - -// ------------------ SORT ----------------- - -func isNaN(f float64) bool { return f != f } - -// ----------------------- - -type intSlice []int64 -type uintSlice []uint64 -type uintptrSlice []uintptr -type floatSlice []float64 -type boolSlice []bool -type stringSlice []string -type bytesSlice [][]byte - -func (p intSlice) Len() int { return len(p) } -func (p intSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p uintSlice) Len() int { return len(p) } -func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p uintptrSlice) Len() int { return len(p) } -func (p uintptrSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p uintptrSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p floatSlice) Len() int { return len(p) } -func (p floatSlice) Less(i, j int) bool { - return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j]) -} -func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p stringSlice) Len() int { return len(p) } -func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p bytesSlice) Len() int { return len(p) } -func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 } -func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p boolSlice) Len() int { return len(p) } -func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] } -func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// --------------------- - -type intRv struct { - v int64 - r reflect.Value -} -type intRvSlice []intRv -type uintRv struct { - v uint64 - r reflect.Value -} -type uintRvSlice []uintRv -type floatRv struct { - v float64 - r reflect.Value -} -type floatRvSlice []floatRv -type boolRv struct { - v bool - r reflect.Value -} -type boolRvSlice []boolRv -type stringRv struct { - v string - r reflect.Value -} -type stringRvSlice []stringRv -type bytesRv struct { - v []byte - r reflect.Value -} -type bytesRvSlice []bytesRv - -func (p intRvSlice) Len() int { return len(p) } -func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } -func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p uintRvSlice) Len() int { return len(p) } -func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } -func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p floatRvSlice) Len() int { return len(p) } -func (p floatRvSlice) Less(i, j int) bool { - return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v) -} -func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p stringRvSlice) Len() int { return len(p) } -func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } -func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p bytesRvSlice) Len() int { return len(p) } -func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } -func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p boolRvSlice) Len() int { return len(p) } -func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v } -func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// ----------------- - -type bytesI struct { - v []byte - i interface{} -} - -type bytesISlice []bytesI - -func (p bytesISlice) Len() int { return len(p) } -func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } -func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// ----------------- - -type set []uintptr - -func (s *set) add(v uintptr) (exists bool) { - // e.ci is always nil, or len >= 1 - x := *s - if x == nil { - x = make([]uintptr, 1, 8) - x[0] = v - *s = x - return - } - // typically, length will be 1. make this perform. - if len(x) == 1 { - if j := x[0]; j == 0 { - x[0] = v - } else if j == v { - exists = true - } else { - x = append(x, v) - *s = x - } - return - } - // check if it exists - for _, j := range x { - if j == v { - exists = true - return - } - } - // try to replace a "deleted" slot - for i, j := range x { - if j == 0 { - x[i] = v - return - } - } - // if unable to replace deleted slot, just append it. - x = append(x, v) - *s = x - return -} - -func (s *set) remove(v uintptr) (exists bool) { - x := *s - if len(x) == 0 { - return - } - if len(x) == 1 { - if x[0] == v { - x[0] = 0 - } - return - } - for i, j := range x { - if j == v { - exists = true - x[i] = 0 // set it to 0, as way to delete it. - // copy(x[i:], x[i+1:]) - // x = x[:len(x)-1] - return - } - } - return -} - -// ------ - -// bitset types are better than [256]bool, because they permit the whole -// bitset array being on a single cache line and use less memory. - -// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1). -// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7 - -type bitset256 [32]byte - -func (x *bitset256) set(pos byte) { - x[pos>>3] |= (1 << (pos & 7)) -} -func (x *bitset256) unset(pos byte) { - x[pos>>3] &^= (1 << (pos & 7)) -} -func (x *bitset256) isset(pos byte) bool { - return x[pos>>3]&(1<<(pos&7)) != 0 -} - -type bitset128 [16]byte - -func (x *bitset128) set(pos byte) { - x[pos>>3] |= (1 << (pos & 7)) -} -func (x *bitset128) unset(pos byte) { - x[pos>>3] &^= (1 << (pos & 7)) -} -func (x *bitset128) isset(pos byte) bool { - return x[pos>>3]&(1<<(pos&7)) != 0 -} - -type bitset32 [4]byte - -func (x *bitset32) set(pos byte) { - x[pos>>3] |= (1 << (pos & 7)) -} -func (x *bitset32) unset(pos byte) { - x[pos>>3] &^= (1 << (pos & 7)) -} -func (x *bitset32) isset(pos byte) bool { - return x[pos>>3]&(1<<(pos&7)) != 0 -} - -// ------------ - -type pooler struct { - // for stringRV - strRv8, strRv16, strRv32, strRv64, strRv128 sync.Pool - // for the decNaked - dn sync.Pool - tiload sync.Pool -} - -func (p *pooler) init() { - p.strRv8.New = func() interface{} { return new([8]stringRv) } - p.strRv16.New = func() interface{} { return new([16]stringRv) } - p.strRv32.New = func() interface{} { return new([32]stringRv) } - p.strRv64.New = func() interface{} { return new([64]stringRv) } - p.strRv128.New = func() interface{} { return new([128]stringRv) } - p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x } - p.tiload.New = func() interface{} { return new(typeInfoLoadArray) } -} - -func (p *pooler) stringRv8() (sp *sync.Pool, v interface{}) { - return &p.strRv8, p.strRv8.Get() -} -func (p *pooler) stringRv16() (sp *sync.Pool, v interface{}) { - return &p.strRv16, p.strRv16.Get() -} -func (p *pooler) stringRv32() (sp *sync.Pool, v interface{}) { - return &p.strRv32, p.strRv32.Get() -} -func (p *pooler) stringRv64() (sp *sync.Pool, v interface{}) { - return &p.strRv64, p.strRv64.Get() -} -func (p *pooler) stringRv128() (sp *sync.Pool, v interface{}) { - return &p.strRv128, p.strRv128.Get() -} -func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) { - return &p.dn, p.dn.Get() -} -func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) { - return &p.tiload, p.tiload.Get() -} diff --git a/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go deleted file mode 100644 index eb18e2ccaee..00000000000 --- a/vendor/github.com/ugorji/go/codec/helper_internal.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// All non-std package dependencies live in this file, -// so porting to different environment is easy (just update functions). - -import ( - "errors" - "fmt" - "math" - "reflect" -) - -func panicValToErr(panicVal interface{}, err *error) { - if panicVal == nil { - return - } - // case nil - switch xerr := panicVal.(type) { - case error: - *err = xerr - case string: - *err = errors.New(xerr) - default: - *err = fmt.Errorf("%v", panicVal) - } - return -} - -func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { - switch v.Kind() { - case reflect.Invalid: - return true - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if deref { - if v.IsNil() { - return true - } - return hIsEmptyValue(v.Elem(), deref, checkStruct) - } else { - return v.IsNil() - } - case reflect.Struct: - if !checkStruct { - return false - } - // return true if all fields are empty. else return false. - // we cannot use equality check, because some fields may be maps/slices/etc - // and consequently the structs are not comparable. - // return v.Interface() == reflect.Zero(v.Type()).Interface() - for i, n := 0, v.NumField(); i < n; i++ { - if !hIsEmptyValue(v.Field(i), deref, checkStruct) { - return false - } - } - return true - } - return false -} - -func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool { - return hIsEmptyValue(v, deref, checkStruct) -} - -func pruneSignExt(v []byte, pos bool) (n int) { - if len(v) < 2 { - } else if pos && v[0] == 0 { - for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { - } - } else if !pos && v[0] == 0xff { - for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { - } - } - return -} - -func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { - if typ == nil { - return - } - rt := typ - // The type might be a pointer and we need to keep - // dereferencing to the base type until we find an implementation. - for { - if rt.Implements(iTyp) { - return true, indir - } - if p := rt; p.Kind() == reflect.Ptr { - indir++ - if indir >= math.MaxInt8 { // insane number of indirections - return false, 0 - } - rt = p.Elem() - continue - } - break - } - // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. - if typ.Kind() != reflect.Ptr { - // Not a pointer, but does the pointer work? - if reflect.PtrTo(typ).Implements(iTyp) { - return true, -1 - } - } - return false, 0 -} - -// validate that this function is correct ... -// culled from OGRE (Object-Oriented Graphics Rendering Engine) -// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) -func halfFloatToFloatBits(yy uint16) (d uint32) { - y := uint32(yy) - s := (y >> 15) & 0x01 - e := (y >> 10) & 0x1f - m := y & 0x03ff - - if e == 0 { - if m == 0 { // plu or minus 0 - return s << 31 - } else { // Denormalized number -- renormalize it - for (m & 0x00000400) == 0 { - m <<= 1 - e -= 1 - } - e += 1 - const zz uint32 = 0x0400 - m &= ^zz - } - } else if e == 31 { - if m == 0 { // Inf - return (s << 31) | 0x7f800000 - } else { // NaN - return (s << 31) | 0x7f800000 | (m << 13) - } - } - e = e + (127 - 15) - m = m << 13 - return (s << 31) | (e << 23) | m -} - -// GrowCap will return a new capacity for a slice, given the following: -// - oldCap: current capacity -// - unit: in-memory size of an element -// - num: number of elements to add -func growCap(oldCap, unit, num int) (newCap int) { - // appendslice logic (if cap < 1024, *2, else *1.25): - // leads to many copy calls, especially when copying bytes. - // bytes.Buffer model (2*cap + n): much better for bytes. - // smarter way is to take the byte-size of the appended element(type) into account - - // maintain 3 thresholds: - // t1: if cap <= t1, newcap = 2x - // t2: if cap <= t2, newcap = 1.75x - // t3: if cap <= t3, newcap = 1.5x - // else newcap = 1.25x - // - // t1, t2, t3 >= 1024 always. - // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) - // - // With this, appending for bytes increase by: - // 100% up to 4K - // 75% up to 8K - // 50% up to 16K - // 25% beyond that - - // unit can be 0 e.g. for struct{}{}; handle that appropriately - var t1, t2, t3 int // thresholds - if unit <= 1 { - t1, t2, t3 = 4*1024, 8*1024, 16*1024 - } else if unit < 16 { - t3 = 16 / unit * 1024 - t1 = t3 * 1 / 4 - t2 = t3 * 2 / 4 - } else { - t1, t2, t3 = 1024, 1024, 1024 - } - - var x int // temporary variable - - // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively - if oldCap <= t1 { // [0,t1] - x = 8 - } else if oldCap > t3 { // (t3,infinity] - x = 5 - } else if oldCap <= t2 { // (t1,t2] - x = 7 - } else { // (t2,t3] - x = 6 - } - newCap = x * oldCap / 4 - - if num > 0 { - newCap += num - } - - // ensure newCap is a multiple of 64 (if it is > 64) or 16. - if newCap > 64 { - if x = newCap % 64; x != 0 { - x = newCap / 64 - newCap = 64 * (x + 1) - } - } else { - if x = newCap % 16; x != 0 { - x = newCap / 16 - newCap = 16 * (x + 1) - } - } - return -} diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go deleted file mode 100644 index ef5b73f49d2..00000000000 --- a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go +++ /dev/null @@ -1,160 +0,0 @@ -// +build !go1.7 safe appengine - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "reflect" - "sync/atomic" -) - -const safeMode = true - -// stringView returns a view of the []byte as a string. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -// -// Usage: Always maintain a reference to v while result of this call is in use, -// and call keepAlive4BytesView(v) at point where done with view. -func stringView(v []byte) string { - return string(v) -} - -// bytesView returns a view of the string as a []byte. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -// -// Usage: Always maintain a reference to v while result of this call is in use, -// and call keepAlive4BytesView(v) at point where done with view. -func bytesView(v string) []byte { - return []byte(v) -} - -func definitelyNil(v interface{}) bool { - // this is a best-effort option. - // We just return false, so we don't unneessarily incur the cost of reflection this early. - return false - // rv := reflect.ValueOf(v) - // switch rv.Kind() { - // case reflect.Invalid: - // return true - // case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func: - // return rv.IsNil() - // default: - // return false - // } -} - -// // keepAlive4BytesView maintains a reference to the input parameter for bytesView. -// // -// // Usage: call this at point where done with the bytes view. -// func keepAlive4BytesView(v string) {} - -// // keepAlive4BytesView maintains a reference to the input parameter for stringView. -// // -// // Usage: call this at point where done with the string view. -// func keepAlive4StringView(v []byte) {} - -func rv2i(rv reflect.Value) interface{} { - return rv.Interface() -} - -func rt2id(rt reflect.Type) uintptr { - return reflect.ValueOf(rt).Pointer() -} - -func rv2rtid(rv reflect.Value) uintptr { - return reflect.ValueOf(rv.Type()).Pointer() -} - -// -------------------------- -// type ptrToRvMap struct{} - -// func (_ *ptrToRvMap) init() {} -// func (_ *ptrToRvMap) get(i interface{}) reflect.Value { -// return reflect.ValueOf(i).Elem() -// } - -// -------------------------- -type atomicTypeInfoSlice struct { - v atomic.Value -} - -func (x *atomicTypeInfoSlice) load() *[]rtid2ti { - i := x.v.Load() - if i == nil { - return nil - } - return i.(*[]rtid2ti) -} - -func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) { - x.v.Store(p) -} - -// -------------------------- -func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { - rv.SetBytes(d.rawBytes()) -} - -func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { - rv.SetString(d.d.DecodeString()) -} - -func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { - rv.SetBool(d.d.DecodeBool()) -} - -func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { - rv.SetFloat(d.d.DecodeFloat(true)) -} - -func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { - rv.SetFloat(d.d.DecodeFloat(false)) -} - -func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { - rv.SetInt(d.d.DecodeInt(intBitsize)) -} - -func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { - rv.SetInt(d.d.DecodeInt(8)) -} - -func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { - rv.SetInt(d.d.DecodeInt(16)) -} - -func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { - rv.SetInt(d.d.DecodeInt(32)) -} - -func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { - rv.SetInt(d.d.DecodeInt(64)) -} - -func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { - rv.SetUint(d.d.DecodeUint(uintBitsize)) -} - -func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { - rv.SetUint(d.d.DecodeUint(uintBitsize)) -} - -func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { - rv.SetUint(d.d.DecodeUint(8)) -} - -func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { - rv.SetUint(d.d.DecodeUint(16)) -} - -func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { - rv.SetUint(d.d.DecodeUint(32)) -} - -func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { - rv.SetUint(d.d.DecodeUint(64)) -} diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go deleted file mode 100644 index e2c4afeceea..00000000000 --- a/vendor/github.com/ugorji/go/codec/helper_unsafe.go +++ /dev/null @@ -1,431 +0,0 @@ -// +build !safe -// +build !appengine -// +build go1.7 - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -// This file has unsafe variants of some helper methods. -// NOTE: See helper_not_unsafe.go for the usage information. - -// var zeroRTv [4]uintptr - -const safeMode = false -const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go - -type unsafeString struct { - Data uintptr - Len int -} - -type unsafeSlice struct { - Data uintptr - Len int - Cap int -} - -type unsafeIntf struct { - typ unsafe.Pointer - word unsafe.Pointer -} - -type unsafeReflectValue struct { - typ unsafe.Pointer - ptr unsafe.Pointer - flag uintptr -} - -func stringView(v []byte) string { - if len(v) == 0 { - return "" - } - - bx := (*unsafeSlice)(unsafe.Pointer(&v)) - sx := unsafeString{bx.Data, bx.Len} - return *(*string)(unsafe.Pointer(&sx)) -} - -func bytesView(v string) []byte { - if len(v) == 0 { - return zeroByteSlice - } - - sx := (*unsafeString)(unsafe.Pointer(&v)) - bx := unsafeSlice{sx.Data, sx.Len, sx.Len} - return *(*[]byte)(unsafe.Pointer(&bx)) -} - -func definitelyNil(v interface{}) bool { - // There is no global way of checking if an interface is nil. - // For true references (map, ptr, func, chan), you can just look - // at the word of the interface. However, for slices, you have to dereference - // the word, and get a pointer to the 3-word interface value. - - // var ui *unsafeIntf = (*unsafeIntf)(unsafe.Pointer(&v)) - // var word unsafe.Pointer = ui.word - // // fmt.Printf(">>>> definitely nil: isnil: %v, TYPE: \t%T, word: %v, *word: %v, type: %v, nil: %v\n", v == nil, v, word, *((*unsafe.Pointer)(word)), ui.typ, nil) - // return word == nil // || *((*unsafe.Pointer)(word)) == nil - return ((*unsafeIntf)(unsafe.Pointer(&v))).word == nil -} - -// func keepAlive4BytesView(v string) { -// runtime.KeepAlive(v) -// } - -// func keepAlive4StringView(v []byte) { -// runtime.KeepAlive(v) -// } - -// TODO: consider a more generally-known optimization for reflect.Value ==> Interface -// -// Currently, we use this fragile method that taps into implememtation details from -// the source go stdlib reflect/value.go, -// and trims the implementation. -func rv2i(rv reflect.Value) interface{} { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir - var ptr unsafe.Pointer - // kk := reflect.Kind(urv.flag & (1<<5 - 1)) - // if (kk == reflect.Map || kk == reflect.Ptr || kk == reflect.Chan || kk == reflect.Func) && urv.flag&unsafeFlagIndir != 0 { - if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 { - ptr = *(*unsafe.Pointer)(urv.ptr) - } else { - ptr = urv.ptr - } - return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr})) - // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) - // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) -} - -func rt2id(rt reflect.Type) uintptr { - return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) -} - -func rv2rtid(rv reflect.Value) uintptr { - return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ) -} - -// func rv0t(rt reflect.Type) reflect.Value { -// ut := (*unsafeIntf)(unsafe.Pointer(&rt)) -// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr -// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())} -// return *(*reflect.Value)(unsafe.Pointer(&uv}) -// } - -// -------------------------- -type atomicTypeInfoSlice struct { - v unsafe.Pointer -} - -func (x *atomicTypeInfoSlice) load() *[]rtid2ti { - return (*[]rtid2ti)(atomic.LoadPointer(&x.v)) -} - -func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) { - atomic.StorePointer(&x.v, unsafe.Pointer(p)) -} - -// -------------------------- -func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - // if urv.flag&unsafeFlagIndir != 0 { - // urv.ptr = *(*unsafe.Pointer)(urv.ptr) - // } - *(*[]byte)(urv.ptr) = d.rawBytes() -} - -func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*string)(urv.ptr) = d.d.DecodeString() -} - -func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*bool)(urv.ptr) = d.d.DecodeBool() -} - -func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*float32)(urv.ptr) = float32(d.d.DecodeFloat(true)) -} - -func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*float64)(urv.ptr) = d.d.DecodeFloat(false) -} - -func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int)(urv.ptr) = int(d.d.DecodeInt(intBitsize)) -} - -func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int8)(urv.ptr) = int8(d.d.DecodeInt(8)) -} - -func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int16)(urv.ptr) = int16(d.d.DecodeInt(16)) -} - -func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int32)(urv.ptr) = int32(d.d.DecodeInt(32)) -} - -func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int64)(urv.ptr) = d.d.DecodeInt(64) -} - -func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint)(urv.ptr) = uint(d.d.DecodeUint(uintBitsize)) -} - -func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uintptr)(urv.ptr) = uintptr(d.d.DecodeUint(uintBitsize)) -} - -func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint8)(urv.ptr) = uint8(d.d.DecodeUint(8)) -} - -func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint16)(urv.ptr) = uint16(d.d.DecodeUint(16)) -} - -func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint32)(urv.ptr) = uint32(d.d.DecodeUint(32)) -} - -func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint64)(urv.ptr) = d.d.DecodeUint(64) -} - -// ------------ - -// func rt2id(rt reflect.Type) uintptr { -// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) -// // var i interface{} = rt -// // // ui := (*unsafeIntf)(unsafe.Pointer(&i)) -// // return ((*unsafeIntf)(unsafe.Pointer(&i))).word -// } - -// func rv2i(rv reflect.Value) interface{} { -// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) -// // non-reference type: already indir -// // reference type: depend on flagIndir property ('cos maybe was double-referenced) -// // const (unsafeRvFlagKindMask = 1<<5 - 1 , unsafeRvFlagIndir = 1 << 7 ) -// // rvk := reflect.Kind(urv.flag & (1<<5 - 1)) -// // if (rvk == reflect.Chan || -// // rvk == reflect.Func || -// // rvk == reflect.Interface || -// // rvk == reflect.Map || -// // rvk == reflect.Ptr || -// // rvk == reflect.UnsafePointer) && urv.flag&(1<<8) != 0 { -// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) -// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) -// // } -// if urv.flag&(1<<5-1) == uintptr(reflect.Map) && urv.flag&(1<<7) != 0 { -// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) -// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) -// } -// // fmt.Printf(">>>>> ++++ direct reference: %v, %v\n", rvk, rv.Type()) -// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) -// } - -// const ( -// unsafeRvFlagKindMask = 1<<5 - 1 -// unsafeRvKindDirectIface = 1 << 5 -// unsafeRvFlagIndir = 1 << 7 -// unsafeRvFlagAddr = 1 << 8 -// unsafeRvFlagMethod = 1 << 9 - -// _USE_RV_INTERFACE bool = false -// _UNSAFE_RV_DEBUG = true -// ) - -// type unsafeRtype struct { -// _ [2]uintptr -// _ uint32 -// _ uint8 -// _ uint8 -// _ uint8 -// kind uint8 -// _ [2]uintptr -// _ int32 -// } - -// func _rv2i(rv reflect.Value) interface{} { -// // Note: From use, -// // - it's never an interface -// // - the only calls here are for ifaceIndir types. -// // (though that conditional is wrong) -// // To know for sure, we need the value of t.kind (which is not exposed). -// // -// // Need to validate the path: type is indirect ==> only value is indirect ==> default (value is direct) -// // - Type indirect, Value indirect: ==> numbers, boolean, slice, struct, array, string -// // - Type Direct, Value indirect: ==> map??? -// // - Type Direct, Value direct: ==> pointers, unsafe.Pointer, func, chan, map -// // -// // TRANSLATES TO: -// // if typeIndirect { } else if valueIndirect { } else { } -// // -// // Since we don't deal with funcs, then "flagNethod" is unset, and can be ignored. - -// if _USE_RV_INTERFACE { -// return rv.Interface() -// } -// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - -// // if urv.flag&unsafeRvFlagMethod != 0 || urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { -// // println("***** IS flag method or interface: delegating to rv.Interface()") -// // return rv.Interface() -// // } - -// // if urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { -// // println("***** IS Interface: delegate to rv.Interface") -// // return rv.Interface() -// // } -// // if urv.flag&unsafeRvFlagKindMask&unsafeRvKindDirectIface == 0 { -// // if urv.flag&unsafeRvFlagAddr == 0 { -// // println("***** IS ifaceIndir typ") -// // // ui := unsafeIntf{word: urv.ptr, typ: urv.typ} -// // // return *(*interface{})(unsafe.Pointer(&ui)) -// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) -// // } -// // } else if urv.flag&unsafeRvFlagIndir != 0 { -// // println("***** IS flagindir") -// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) -// // } else { -// // println("***** NOT flagindir") -// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) -// // } -// // println("***** default: delegate to rv.Interface") - -// urt := (*unsafeRtype)(unsafe.Pointer(urv.typ)) -// if _UNSAFE_RV_DEBUG { -// fmt.Printf(">>>> start: %v: ", rv.Type()) -// fmt.Printf("%v - %v\n", *urv, *urt) -// } -// if urt.kind&unsafeRvKindDirectIface == 0 { -// if _UNSAFE_RV_DEBUG { -// fmt.Printf("**** +ifaceIndir type: %v\n", rv.Type()) -// } -// // println("***** IS ifaceIndir typ") -// // if true || urv.flag&unsafeRvFlagAddr == 0 { -// // // println(" ***** IS NOT addr") -// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) -// // } -// } else if urv.flag&unsafeRvFlagIndir != 0 { -// if _UNSAFE_RV_DEBUG { -// fmt.Printf("**** +flagIndir type: %v\n", rv.Type()) -// } -// // println("***** IS flagindir") -// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) -// } else { -// if _UNSAFE_RV_DEBUG { -// fmt.Printf("**** -flagIndir type: %v\n", rv.Type()) -// } -// // println("***** NOT flagindir") -// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) -// } -// // println("***** default: delegating to rv.Interface()") -// // return rv.Interface() -// } - -// var staticM0 = make(map[string]uint64) -// var staticI0 = (int32)(-5) - -// func staticRv2iTest() { -// i0 := (int32)(-5) -// m0 := make(map[string]uint16) -// m0["1"] = 1 -// for _, i := range []interface{}{ -// (int)(7), -// (uint)(8), -// (int16)(-9), -// (uint16)(19), -// (uintptr)(77), -// (bool)(true), -// float32(-32.7), -// float64(64.9), -// complex(float32(19), 5), -// complex(float64(-32), 7), -// [4]uint64{1, 2, 3, 4}, -// (chan<- int)(nil), // chan, -// rv2i, // func -// io.Writer(ioutil.Discard), -// make(map[string]uint), -// (map[string]uint)(nil), -// staticM0, -// m0, -// &m0, -// i0, -// &i0, -// &staticI0, -// &staticM0, -// []uint32{6, 7, 8}, -// "abc", -// Raw{}, -// RawExt{}, -// &Raw{}, -// &RawExt{}, -// unsafe.Pointer(&i0), -// } { -// i2 := rv2i(reflect.ValueOf(i)) -// eq := reflect.DeepEqual(i, i2) -// fmt.Printf(">>>> %v == %v? %v\n", i, i2, eq) -// } -// // os.Exit(0) -// } - -// func init() { -// staticRv2iTest() -// } - -// func rv2i(rv reflect.Value) interface{} { -// if _USE_RV_INTERFACE || rv.Kind() == reflect.Interface || rv.CanAddr() { -// return rv.Interface() -// } -// // var i interface{} -// // ui := (*unsafeIntf)(unsafe.Pointer(&i)) -// var ui unsafeIntf -// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) -// // fmt.Printf("urv: flag: %b, typ: %b, ptr: %b\n", urv.flag, uintptr(urv.typ), uintptr(urv.ptr)) -// if (urv.flag&unsafeRvFlagKindMask)&unsafeRvKindDirectIface == 0 { -// if urv.flag&unsafeRvFlagAddr != 0 { -// println("***** indirect and addressable! Needs typed move - delegate to rv.Interface()") -// return rv.Interface() -// } -// println("****** indirect type/kind") -// ui.word = urv.ptr -// } else if urv.flag&unsafeRvFlagIndir != 0 { -// println("****** unsafe rv flag indir") -// ui.word = *(*unsafe.Pointer)(urv.ptr) -// } else { -// println("****** default: assign prt to word directly") -// ui.word = urv.ptr -// } -// // ui.word = urv.ptr -// ui.typ = urv.typ -// // fmt.Printf("(pointers) ui.typ: %p, word: %p\n", ui.typ, ui.word) -// // fmt.Printf("(binary) ui.typ: %b, word: %b\n", uintptr(ui.typ), uintptr(ui.word)) -// return *(*interface{})(unsafe.Pointer(&ui)) -// // return i -// } diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go deleted file mode 100644 index a2276070d51..00000000000 --- a/vendor/github.com/ugorji/go/codec/json.go +++ /dev/null @@ -1,1167 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// By default, this json support uses base64 encoding for bytes, because you cannot -// store and read any arbitrary string in json (only unicode). -// However, the user can configre how to encode/decode bytes. -// -// This library specifically supports UTF-8 for encoding and decoding only. -// -// Note that the library will happily encode/decode things which are not valid -// json e.g. a map[int64]string. We do it for consistency. With valid json, -// we will encode and decode appropriately. -// Users can specify their map type if necessary to force it. -// -// Note: -// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. -// We implement it here. -// - Also, strconv.ParseXXX for floats and integers -// - only works on strings resulting in unnecessary allocation and []byte-string conversion. -// - it does a lot of redundant checks, because json numbers are simpler that what it supports. -// - We parse numbers (floats and integers) directly here. -// We only delegate parsing floats if it is a hairy float which could cause a loss of precision. -// In that case, we delegate to strconv.ParseFloat. -// -// Note: -// - encode does not beautify. There is no whitespace when encoding. -// - rpc calls which take single integer arguments or write single numeric arguments will need care. - -// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver -// MUST not call one-another. - -import ( - "bytes" - "encoding/base64" - "reflect" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -//-------------------------------- - -var jsonLiterals = [...]byte{ - '"', - 't', 'r', 'u', 'e', - '"', - '"', - 'f', 'a', 'l', 's', 'e', - '"', - '"', - 'n', 'u', 'l', 'l', - '"', -} - -const ( - jsonLitTrueQ = 0 - jsonLitTrue = 1 - jsonLitFalseQ = 6 - jsonLitFalse = 7 - jsonLitNullQ = 13 - jsonLitNull = 14 -) - -var ( - // jsonFloat64Pow10 = [...]float64{ - // 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - // 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, - // 1e20, 1e21, 1e22, - // } - - // jsonUint64Pow10 = [...]uint64{ - // 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - // 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, - // } - - // jsonTabs and jsonSpaces are used as caches for indents - jsonTabs, jsonSpaces string - - jsonCharHtmlSafeSet bitset128 - jsonCharSafeSet bitset128 - jsonCharWhitespaceSet bitset256 - jsonNumSet bitset256 - // jsonIsFloatSet bitset256 - - jsonU4Set [256]byte -) - -const ( - // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: - // - If we see first character of null, false or true, - // do not validate subsequent characters. - // - e.g. if we see a n, assume null and skip next 3 characters, - // and do not validate they are ull. - // P.S. Do not expect a significant decoding boost from this. - jsonValidateSymbols = true - - jsonSpacesOrTabsLen = 128 - - jsonU4SetErrVal = 128 - - jsonAlwaysReturnInternString = false -) - -func init() { - var bs [jsonSpacesOrTabsLen]byte - for i := 0; i < jsonSpacesOrTabsLen; i++ { - bs[i] = ' ' - } - jsonSpaces = string(bs[:]) - - for i := 0; i < jsonSpacesOrTabsLen; i++ { - bs[i] = '\t' - } - jsonTabs = string(bs[:]) - - // populate the safe values as true: note: ASCII control characters are (0-31) - // jsonCharSafeSet: all true except (0-31) " \ - // jsonCharHtmlSafeSet: all true except (0-31) " \ < > & - var i byte - for i = 32; i < utf8.RuneSelf; i++ { - switch i { - case '"', '\\': - case '<', '>', '&': - jsonCharSafeSet.set(i) // = true - default: - jsonCharSafeSet.set(i) - jsonCharHtmlSafeSet.set(i) - } - } - for i = 0; i <= utf8.RuneSelf; i++ { - switch i { - case ' ', '\t', '\r', '\n': - jsonCharWhitespaceSet.set(i) - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-': - jsonNumSet.set(i) - } - } - for j := range jsonU4Set { - switch i = byte(j); i { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - jsonU4Set[i] = i - '0' - case 'a', 'b', 'c', 'd', 'e', 'f': - jsonU4Set[i] = i - 'a' + 10 - case 'A', 'B', 'C', 'D', 'E', 'F': - jsonU4Set[i] = i - 'A' + 10 - default: - jsonU4Set[i] = jsonU4SetErrVal - } - // switch i = byte(j); i { - // case 'e', 'E', '.': - // jsonIsFloatSet.set(i) - // } - } - // jsonU4Set[255] = jsonU4SetErrVal -} - -type jsonEncDriver struct { - e *Encoder - w encWriter - h *JsonHandle - b [64]byte // scratch - bs []byte // scratch - se setExtWrapper - ds string // indent string - dl uint16 // indent level - dt bool // indent using tabs - d bool // indent - c containerState - noBuiltInTypes -} - -// indent is done as below: -// - newline and indent are added before each mapKey or arrayElem -// - newline and indent are added before each ending, -// except there was no entry (so we can have {} or []) - -func (e *jsonEncDriver) WriteArrayStart(length int) { - if e.d { - e.dl++ - } - e.w.writen1('[') - e.c = containerArrayStart -} - -func (e *jsonEncDriver) WriteArrayElem() { - if e.c != containerArrayStart { - e.w.writen1(',') - } - if e.d { - e.writeIndent() - } - e.c = containerArrayElem -} - -func (e *jsonEncDriver) WriteArrayEnd() { - if e.d { - e.dl-- - if e.c != containerArrayStart { - e.writeIndent() - } - } - e.w.writen1(']') - e.c = containerArrayEnd -} - -func (e *jsonEncDriver) WriteMapStart(length int) { - if e.d { - e.dl++ - } - e.w.writen1('{') - e.c = containerMapStart -} - -func (e *jsonEncDriver) WriteMapElemKey() { - if e.c != containerMapStart { - e.w.writen1(',') - } - if e.d { - e.writeIndent() - } - e.c = containerMapKey -} - -func (e *jsonEncDriver) WriteMapElemValue() { - if e.d { - e.w.writen2(':', ' ') - } else { - e.w.writen1(':') - } - e.c = containerMapValue -} - -func (e *jsonEncDriver) WriteMapEnd() { - if e.d { - e.dl-- - if e.c != containerMapStart { - e.writeIndent() - } - } - e.w.writen1('}') - e.c = containerMapEnd -} - -func (e *jsonEncDriver) writeIndent() { - e.w.writen1('\n') - if x := len(e.ds) * int(e.dl); x <= jsonSpacesOrTabsLen { - if e.dt { - e.w.writestr(jsonTabs[:x]) - } else { - e.w.writestr(jsonSpaces[:x]) - } - } else { - for i := uint16(0); i < e.dl; i++ { - e.w.writestr(e.ds) - } - } -} - -func (e *jsonEncDriver) EncodeNil() { - // We always encode nil as just null (never in quotes) - // This allows us to easily decode if a nil in the json stream - // ie if initial token is n. - e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) - - // if e.h.MapKeyAsString && e.c == containerMapKey { - // e.w.writeb(jsonLiterals[jsonLitNullQ : jsonLitNullQ+6]) - // } else { - // e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) - // } -} - -func (e *jsonEncDriver) EncodeBool(b bool) { - if e.h.MapKeyAsString && e.c == containerMapKey { - if b { - e.w.writeb(jsonLiterals[jsonLitTrueQ : jsonLitTrueQ+6]) - } else { - e.w.writeb(jsonLiterals[jsonLitFalseQ : jsonLitFalseQ+7]) - } - } else { - if b { - e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) - } else { - e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) - } - } -} - -func (e *jsonEncDriver) EncodeFloat32(f float32) { - e.encodeFloat(float64(f), 32) -} - -func (e *jsonEncDriver) EncodeFloat64(f float64) { - e.encodeFloat(f, 64) -} - -func (e *jsonEncDriver) encodeFloat(f float64, numbits int) { - var blen int - var x []byte - if e.h.MapKeyAsString && e.c == containerMapKey { - e.b[0] = '"' - x = strconv.AppendFloat(e.b[1:1], f, 'G', -1, numbits) - blen = 1 + len(x) - if jsonIsFloatBytesB2(x) { - e.b[blen] = '"' - blen += 1 - } else { - e.b[blen] = '.' - e.b[blen+1] = '0' - e.b[blen+2] = '"' - blen += 3 - } - } else { - x = strconv.AppendFloat(e.b[:0], f, 'G', -1, numbits) - blen = len(x) - if !jsonIsFloatBytesB2(x) { - e.b[blen] = '.' - e.b[blen+1] = '0' - blen += 2 - } - } - e.w.writeb(e.b[:blen]) -} - -func (e *jsonEncDriver) EncodeInt(v int64) { - x := e.h.IntegerAsString - if x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) || (e.h.MapKeyAsString && e.c == containerMapKey) { - blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10)) - e.b[0] = '"' - e.b[blen-1] = '"' - e.w.writeb(e.b[:blen]) - return - } - e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) -} - -func (e *jsonEncDriver) EncodeUint(v uint64) { - x := e.h.IntegerAsString - if x == 'A' || x == 'L' && v > 1<<53 || (e.h.MapKeyAsString && e.c == containerMapKey) { - blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10)) - e.b[0] = '"' - e.b[blen-1] = '"' - e.w.writeb(e.b[:blen]) - return - } - e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) -} - -func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { - if v := ext.ConvertExt(rv); v == nil { - e.EncodeNil() - } else { - en.encode(v) - } -} - -func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { - // only encodes re.Value (never re.Data) - if re.Value == nil { - e.EncodeNil() - } else { - en.encode(re.Value) - } -} - -func (e *jsonEncDriver) EncodeString(c charEncoding, v string) { - e.quoteStr(v) -} - -func (e *jsonEncDriver) EncodeSymbol(v string) { - e.quoteStr(v) -} - -func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - // if encoding raw bytes and RawBytesExt is configured, use it to encode - if c == c_RAW && e.se.i != nil { - e.EncodeExt(v, 0, &e.se, e.e) - return - } - if c == c_RAW { - slen := base64.StdEncoding.EncodedLen(len(v)) - if cap(e.bs) >= slen { - e.bs = e.bs[:slen] - } else { - e.bs = make([]byte, slen) - } - base64.StdEncoding.Encode(e.bs, v) - e.w.writen1('"') - e.w.writeb(e.bs) - e.w.writen1('"') - } else { - e.quoteStr(stringView(v)) - } -} - -func (e *jsonEncDriver) EncodeAsis(v []byte) { - e.w.writeb(v) -} - -func (e *jsonEncDriver) quoteStr(s string) { - // adapted from std pkg encoding/json - const hex = "0123456789abcdef" - w := e.w - w.writen1('"') - var start int - for i, slen := 0, len(s); i < slen; { - // encode all bytes < 0x20 (except \r, \n). - // also encode < > & to prevent security holes when served to some browsers. - if b := s[i]; b < utf8.RuneSelf { - // if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - if jsonCharHtmlSafeSet.isset(b) || (e.h.HTMLCharsAsIs && jsonCharSafeSet.isset(b)) { - i++ - continue - } - if start < i { - w.writestr(s[start:i]) - } - switch b { - case '\\', '"': - w.writen2('\\', b) - case '\n': - w.writen2('\\', 'n') - case '\r': - w.writen2('\\', 'r') - case '\b': - w.writen2('\\', 'b') - case '\f': - w.writen2('\\', 'f') - case '\t': - w.writen2('\\', 't') - default: - w.writestr(`\u00`) - w.writen2(hex[b>>4], hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - w.writestr(s[start:i]) - } - w.writestr(`\ufffd`) - i += size - start = i - continue - } - // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. - // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally. - if c == '\u2028' || c == '\u2029' { - if start < i { - w.writestr(s[start:i]) - } - w.writestr(`\u202`) - w.writen1(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - w.writestr(s[start:]) - } - w.writen1('"') -} - -func (e *jsonEncDriver) atEndOfEncode() { - if e.h.TermWhitespace { - if e.d { - e.w.writen1('\n') - } else { - e.w.writen1(' ') - } - } -} - -type jsonDecDriver struct { - noBuiltInTypes - d *Decoder - h *JsonHandle - r decReader - - c containerState - // tok is used to store the token read right after skipWhiteSpace. - tok uint8 - - fnull bool // found null from appendStringAsBytes - - bstr [8]byte // scratch used for string \UXXX parsing - b [64]byte // scratch, used for parsing strings or numbers - b2 [64]byte // scratch, used only for decodeBytes (after base64) - bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. - - se setExtWrapper - - // n jsonNum -} - -func jsonIsWS(b byte) bool { - // return b == ' ' || b == '\t' || b == '\r' || b == '\n' - return jsonCharWhitespaceSet.isset(b) -} - -func (d *jsonDecDriver) uncacheRead() { - if d.tok != 0 { - d.r.unreadn1() - d.tok = 0 - } -} - -func (d *jsonDecDriver) ReadMapStart() int { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - if d.tok != '{' { - d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok) - } - d.tok = 0 - d.c = containerMapStart - return -1 -} - -func (d *jsonDecDriver) ReadArrayStart() int { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - if d.tok != '[' { - d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok) - } - d.tok = 0 - d.c = containerArrayStart - return -1 -} - -func (d *jsonDecDriver) CheckBreak() bool { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - return d.tok == '}' || d.tok == ']' -} - -func (d *jsonDecDriver) ReadArrayElem() { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - if d.c != containerArrayStart { - const xc uint8 = ',' - if d.tok != xc { - d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - } - d.c = containerArrayElem -} - -func (d *jsonDecDriver) ReadArrayEnd() { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - const xc uint8 = ']' - if d.tok != xc { - d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - d.c = containerArrayEnd -} - -func (d *jsonDecDriver) ReadMapElemKey() { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - if d.c != containerMapStart { - const xc uint8 = ',' - if d.tok != xc { - d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - } - d.c = containerMapKey -} - -func (d *jsonDecDriver) ReadMapElemValue() { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - const xc uint8 = ':' - if d.tok != xc { - d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - d.c = containerMapValue -} - -func (d *jsonDecDriver) ReadMapEnd() { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - const xc uint8 = '}' - if d.tok != xc { - d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - d.c = containerMapEnd -} - -// func (d *jsonDecDriver) readContainerState(c containerState, xc uint8, check bool) { -// if d.tok == 0 { -// d.tok = d.r.skip(&jsonCharWhitespaceSet) -// } -// if check { -// if d.tok != xc { -// d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) -// } -// d.tok = 0 -// } -// d.c = c -// } - -func (d *jsonDecDriver) readLit(length, fromIdx uint8) { - bs := d.r.readx(int(length)) - d.tok = 0 - if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:fromIdx+length]) { - d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:fromIdx+length], bs) - return - } -} - -func (d *jsonDecDriver) TryDecodeAsNil() bool { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - // TODO: we shouldn't try to see if "null" was here, right? - // only "null" denotes a nil - if d.tok == 'n' { - d.readLit(3, jsonLitNull+1) // ull - return true - } - return false -} - -func (d *jsonDecDriver) DecodeBool() (v bool) { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - fquot := d.c == containerMapKey && d.tok == '"' - if fquot { - d.tok = d.r.readn1() - } - switch d.tok { - case 'f': - d.readLit(4, jsonLitFalse+1) // alse - // v = false - case 't': - d.readLit(3, jsonLitTrue+1) // rue - v = true - default: - d.d.errorf("json: decode bool: got first char %c", d.tok) - // v = false // "unreachable" - } - if fquot { - d.r.readn1() - } - return -} - -func (d *jsonDecDriver) ContainerType() (vt valueType) { - // check container type by checking the first char - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - if b := d.tok; b == '{' { - return valueTypeMap - } else if b == '[' { - return valueTypeArray - } else if b == 'n' { - return valueTypeNil - } else if b == '"' { - return valueTypeString - } - return valueTypeUnset - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - // return false // "unreachable" -} - -func (d *jsonDecDriver) decNumBytes() (bs []byte) { - // stores num bytes in d.bs - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - if d.tok == '"' { - bs = d.r.readUntil(d.b2[:0], '"') - bs = bs[:len(bs)-1] - } else { - d.r.unreadn1() - bs = d.r.readTo(d.bs[:0], &jsonNumSet) - } - d.tok = 0 - return bs -} - -func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) { - bs := d.decNumBytes() - u, err := strconv.ParseUint(stringView(bs), 10, int(bitsize)) - if err != nil { - d.d.errorf("json: decode uint from %s: %v", bs, err) - return - } - return -} - -func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) { - bs := d.decNumBytes() - i, err := strconv.ParseInt(stringView(bs), 10, int(bitsize)) - if err != nil { - d.d.errorf("json: decode int from %s: %v", bs, err) - return - } - return -} - -func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - bs := d.decNumBytes() - bitsize := 64 - if chkOverflow32 { - bitsize = 32 - } - f, err := strconv.ParseFloat(stringView(bs), bitsize) - if err != nil { - d.d.errorf("json: decode float from %s: %v", bs, err) - return - } - return -} - -func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if ext == nil { - re := rv.(*RawExt) - re.Tag = xtag - d.d.decode(&re.Value) - } else { - var v interface{} - d.d.decode(&v) - ext.UpdateExt(rv, v) - } - return -} - -func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { - // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. - if d.se.i != nil { - bsOut = bs - d.DecodeExt(&bsOut, 0, &d.se) - return - } - d.appendStringAsBytes() - // base64 encodes []byte{} as "", and we encode nil []byte as null. - // Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}. - // appendStringAsBytes returns a zero-len slice for both, so as not to reset d.bs. - // However, it sets a fnull field to true, so we can check if a null was found. - if len(d.bs) == 0 { - if d.fnull { - return nil - } - return []byte{} - } - bs0 := d.bs - slen := base64.StdEncoding.DecodedLen(len(bs0)) - if slen <= cap(bs) { - bsOut = bs[:slen] - } else if zerocopy && slen <= cap(d.b2) { - bsOut = d.b2[:slen] - } else { - bsOut = make([]byte, slen) - } - slen2, err := base64.StdEncoding.Decode(bsOut, bs0) - if err != nil { - d.d.errorf("json: error decoding base64 binary '%s': %v", bs0, err) - return nil - } - if slen != slen2 { - bsOut = bsOut[:slen2] - } - return -} - -func (d *jsonDecDriver) DecodeString() (s string) { - d.appendStringAsBytes() - return d.bsToString() -} - -func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) { - d.appendStringAsBytes() - return d.bs -} - -func (d *jsonDecDriver) appendStringAsBytes() { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - - d.fnull = false - if d.tok != '"' { - // d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok) - // handle non-string scalar: null, true, false or a number - switch d.tok { - case 'n': - d.readLit(3, jsonLitNull+1) // ull - d.bs = d.bs[:0] - d.fnull = true - case 'f': - d.readLit(4, jsonLitFalse+1) // alse - d.bs = d.bs[:5] - copy(d.bs, "false") - case 't': - d.readLit(3, jsonLitTrue+1) // rue - d.bs = d.bs[:4] - copy(d.bs, "true") - default: - // try to parse a valid number - bs := d.decNumBytes() - d.bs = d.bs[:len(bs)] - copy(d.bs, bs) - } - return - } - - d.tok = 0 - r := d.r - var cs = r.readUntil(d.b2[:0], '"') - var cslen = len(cs) - var c uint8 - v := d.bs[:0] - // append on each byte seen can be expensive, so we just - // keep track of where we last read a contiguous set of - // non-special bytes (using cursor variable), - // and when we see a special byte - // e.g. end-of-slice, " or \, - // we will append the full range into the v slice before proceeding - for i, cursor := 0, 0; ; { - if i == cslen { - v = append(v, cs[cursor:]...) - cs = r.readUntil(d.b2[:0], '"') - cslen = len(cs) - i, cursor = 0, 0 - } - c = cs[i] - if c == '"' { - v = append(v, cs[cursor:i]...) - break - } - if c != '\\' { - i++ - continue - } - v = append(v, cs[cursor:i]...) - i++ - c = cs[i] - switch c { - case '"', '\\', '/', '\'': - v = append(v, c) - case 'b': - v = append(v, '\b') - case 'f': - v = append(v, '\f') - case 'n': - v = append(v, '\n') - case 'r': - v = append(v, '\r') - case 't': - v = append(v, '\t') - case 'u': - var r rune - var rr uint32 - if len(cs) < i+4 { // may help reduce bounds-checking - d.d.errorf(`json: need at least 4 more bytes for unicode sequence`) - } - // c = cs[i+4] // may help reduce bounds-checking - for j := 1; j < 5; j++ { - c = jsonU4Set[cs[i+j]] - if c == jsonU4SetErrVal { - // d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, c) - r = unicode.ReplacementChar - i += 4 - goto encode_rune - } - rr = rr*16 + uint32(c) - } - r = rune(rr) - i += 4 - if utf16.IsSurrogate(r) { - if len(cs) >= i+6 && cs[i+2] == 'u' && cs[i+1] == '\\' { - i += 2 - // c = cs[i+4] // may help reduce bounds-checking - var rr1 uint32 - for j := 1; j < 5; j++ { - c = jsonU4Set[cs[i+j]] - if c == jsonU4SetErrVal { - // d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, c) - r = unicode.ReplacementChar - i += 4 - goto encode_rune - } - rr1 = rr1*16 + uint32(c) - } - r = utf16.DecodeRune(r, rune(rr1)) - i += 4 - } else { - r = unicode.ReplacementChar - goto encode_rune - } - } - encode_rune: - w2 := utf8.EncodeRune(d.bstr[:], r) - v = append(v, d.bstr[:w2]...) - default: - d.d.errorf("json: unsupported escaped value: %c", c) - } - i++ - cursor = i - } - d.bs = v -} - -func (d *jsonDecDriver) nakedNum(z *decNaked, bs []byte) (err error) { - if d.h.PreferFloat || jsonIsFloatBytesB3(bs) { // bytes.IndexByte(bs, '.') != -1 ||... - // } else if d.h.PreferFloat || bytes.ContainsAny(bs, ".eE") { - z.v = valueTypeFloat - z.f, err = strconv.ParseFloat(stringView(bs), 64) - } else if d.h.SignedInteger || bs[0] == '-' { - z.v = valueTypeInt - z.i, err = strconv.ParseInt(stringView(bs), 10, 64) - } else { - z.v = valueTypeUint - z.u, err = strconv.ParseUint(stringView(bs), 10, 64) - } - if err != nil && z.v != valueTypeFloat { - if v, ok := err.(*strconv.NumError); ok && (v.Err == strconv.ErrRange || v.Err == strconv.ErrSyntax) { - z.v = valueTypeFloat - z.f, err = strconv.ParseFloat(stringView(bs), 64) - } - } - return -} - -func (d *jsonDecDriver) bsToString() string { - // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key - if jsonAlwaysReturnInternString || d.c == containerMapKey { - return d.d.string(d.bs) - } - return string(d.bs) -} - -func (d *jsonDecDriver) DecodeNaked() { - z := d.d.n - // var decodeFurther bool - - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - switch d.tok { - case 'n': - d.readLit(3, jsonLitNull+1) // ull - z.v = valueTypeNil - case 'f': - d.readLit(4, jsonLitFalse+1) // alse - z.v = valueTypeBool - z.b = false - case 't': - d.readLit(3, jsonLitTrue+1) // rue - z.v = valueTypeBool - z.b = true - case '{': - z.v = valueTypeMap // don't consume. kInterfaceNaked will call ReadMapStart - case '[': - z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart - case '"': - // if a string, and MapKeyAsString, then try to decode it as a nil, bool or number first - d.appendStringAsBytes() - if len(d.bs) > 0 && d.c == containerMapKey && d.h.MapKeyAsString { - switch stringView(d.bs) { - case "null": - z.v = valueTypeNil - case "true": - z.v = valueTypeBool - z.b = true - case "false": - z.v = valueTypeBool - z.b = false - default: - // check if a number: float, int or uint - if err := d.nakedNum(z, d.bs); err != nil { - z.v = valueTypeString - z.s = d.bsToString() - } - } - } else { - z.v = valueTypeString - z.s = d.bsToString() - } - default: // number - bs := d.decNumBytes() - if len(bs) == 0 { - d.d.errorf("json: decode number from empty string") - return - } - if err := d.nakedNum(z, bs); err != nil { - d.d.errorf("json: decode number from %s: %v", bs, err) - return - } - } - // if decodeFurther { - // d.s.sc.retryRead() - // } - return -} - -//---------------------- - -// JsonHandle is a handle for JSON encoding format. -// -// Json is comprehensively supported: -// - decodes numbers into interface{} as int, uint or float64 -// - configurable way to encode/decode []byte . -// by default, encodes and decodes []byte using base64 Std Encoding -// - UTF-8 support for encoding and decoding -// -// It has better performance than the json library in the standard library, -// by leveraging the performance improvements of the codec library and -// minimizing allocations. -// -// In addition, it doesn't read more bytes than necessary during a decode, which allows -// reading multiple values from a stream containing json and non-json content. -// For example, a user can read a json value, then a cbor value, then a msgpack value, -// all from the same stream in sequence. -// -// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs -// are not treated as an error. -// Instead, they are replaced by the Unicode replacement character U+FFFD. -type JsonHandle struct { - textEncodingType - BasicHandle - - // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. - // If not configured, raw bytes are encoded to/from base64 text. - RawBytesExt InterfaceExt - - // Indent indicates how a value is encoded. - // - If positive, indent by that number of spaces. - // - If negative, indent by that number of tabs. - Indent int8 - - // IntegerAsString controls how integers (signed and unsigned) are encoded. - // - // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. - // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. - // This can be mitigated by configuring how to encode integers. - // - // IntegerAsString interpretes the following values: - // - if 'L', then encode integers > 2^53 as a json string. - // - if 'A', then encode all integers as a json string - // containing the exact integer representation as a decimal. - // - else encode all integers as a json number (default) - IntegerAsString uint8 - - // HTMLCharsAsIs controls how to encode some special characters to html: < > & - // - // By default, we encode them as \uXXX - // to prevent security holes when served from some browsers. - HTMLCharsAsIs bool - - // PreferFloat says that we will default to decoding a number as a float. - // If not set, we will examine the characters of the number and decode as an - // integer type if it doesn't have any of the characters [.eE]. - PreferFloat bool - - // TermWhitespace says that we add a whitespace character - // at the end of an encoding. - // - // The whitespace is important, especially if using numbers in a context - // where multiple items are written to a stream. - TermWhitespace bool - - // MapKeyAsString says to encode all map keys as strings. - // - // Use this to enforce strict json output. - // The only caveat is that nil value is ALWAYS written as null (never as "null") - MapKeyAsString bool -} - -func (h *JsonHandle) hasElemSeparators() bool { return true } - -func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{i: ext}) -} - -func (h *JsonHandle) newEncDriver(e *Encoder) encDriver { - hd := jsonEncDriver{e: e, h: h} - hd.bs = hd.b[:0] - - hd.reset() - - return &hd -} - -func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { - // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} - hd := jsonDecDriver{d: d, h: h} - hd.bs = hd.b[:0] - hd.reset() - return &hd -} - -func (e *jsonEncDriver) reset() { - e.w = e.e.w - e.se.i = e.h.RawBytesExt - if e.bs != nil { - e.bs = e.bs[:0] - } - e.d, e.dt, e.dl, e.ds = false, false, 0, "" - e.c = 0 - if e.h.Indent > 0 { - e.d = true - e.ds = jsonSpaces[:e.h.Indent] - } else if e.h.Indent < 0 { - e.d = true - e.dt = true - e.ds = jsonTabs[:-(e.h.Indent)] - } -} - -func (d *jsonDecDriver) reset() { - d.r = d.d.r - d.se.i = d.h.RawBytesExt - if d.bs != nil { - d.bs = d.bs[:0] - } - d.c, d.tok = 0, 0 - // d.n.reset() -} - -// func jsonIsFloatBytes(bs []byte) bool { -// for _, v := range bs { -// // if v == '.' || v == 'e' || v == 'E' { -// if jsonIsFloatSet.isset(v) { -// return true -// } -// } -// return false -// } - -func jsonIsFloatBytesB2(bs []byte) bool { - return bytes.IndexByte(bs, '.') != -1 || - bytes.IndexByte(bs, 'E') != -1 -} - -func jsonIsFloatBytesB3(bs []byte) bool { - return bytes.IndexByte(bs, '.') != -1 || - bytes.IndexByte(bs, 'E') != -1 || - bytes.IndexByte(bs, 'e') != -1 -} - -var _ decDriver = (*jsonDecDriver)(nil) -var _ encDriver = (*jsonEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl b/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl deleted file mode 100644 index 9a46f138143..00000000000 --- a/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from mammoth-test.go.tmpl -// ************************************************************ - -package codec - -import "testing" -import "fmt" - -// TestMammoth has all the different paths optimized in fast-path -// It has all the primitives, slices and maps. -// -// For each of those types, it has a pointer and a non-pointer field. - -func init() { _ = fmt.Printf } // so we can include fmt as needed - -type TestMammoth struct { - -{{range .Values }}{{if .Primitive }}{{/* -*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }} -{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} -{{end}}{{end}} - -{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* -*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }} -{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} -{{end}}{{end}}{{end}} - -{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* -*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} -{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} -{{end}}{{end}}{{end}} - -} - -{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* -*/}} type {{ .MethodNamePfx "type" false }} []{{ .Elem }} -func (_ {{ .MethodNamePfx "type" false }}) MapBySlice() { } -{{end}}{{end}}{{end}} - -func doTestMammothSlices(t *testing.T, h Handle) { -{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* -*/}} - for _, v := range [][]{{ .Elem }}{ nil, []{{ .Elem }}{}, []{{ .Elem }}{ {{ nonzerocmd .Elem }}, {{ nonzerocmd .Elem }} } } { - // fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v) - var v{{$i}}v1, v{{$i}}v2, v{{$i}}v3, v{{$i}}v4 []{{ .Elem }} - v{{$i}}v1 = v - bs{{$i}}, _ := testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}") - if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } - testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}") - bs{{$i}}, _ = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p") - v{{$i}}v2 = nil - testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p") - // ... - v{{$i}}v2 = nil - if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } - v{{$i}}v3 = {{ .MethodNamePfx "type" false }}(v{{$i}}v1) - bs{{$i}}, _ = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom") - v{{$i}}v4 = {{ .MethodNamePfx "type" false }}(v{{$i}}v2) - testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom") - testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom") - v{{$i}}v2 = nil - bs{{$i}}, _ = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p") - v{{$i}}v4 = {{ .MethodNamePfx "type" false }}(v{{$i}}v2) - testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p") - testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p") - } -{{end}}{{end}}{{end}} -} - -func doTestMammothMaps(t *testing.T, h Handle) { -{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* -*/}} - for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, map[{{ .MapKey }}]{{ .Elem }}{}, map[{{ .MapKey }}]{{ .Elem }}{ {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} } } { - // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v) - var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }} - v{{$i}}v1 = v - bs{{$i}}, _ := testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}") - if v != nil { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } - testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}") - bs{{$i}}, _ = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p") - v{{$i}}v2 = nil - testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p") - } -{{end}}{{end}}{{end}} - -} - -func doTestMammothMapsAndSlices(t *testing.T, h Handle) { - doTestMammothSlices(t, h) - doTestMammothMaps(t, h) -} diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go deleted file mode 100644 index fcc3177aaab..00000000000 --- a/vendor/github.com/ugorji/go/codec/msgpack.go +++ /dev/null @@ -1,899 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -/* -MSGPACK - -Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. -We need to maintain compatibility with it and how it encodes integer values -without caring about the type. - -For compatibility with behaviour of msgpack-c reference implementation: - - Go intX (>0) and uintX - IS ENCODED AS - msgpack +ve fixnum, unsigned - - Go intX (<0) - IS ENCODED AS - msgpack -ve fixnum, signed - -*/ -package codec - -import ( - "fmt" - "io" - "math" - "net/rpc" - "reflect" -) - -const ( - mpPosFixNumMin byte = 0x00 - mpPosFixNumMax = 0x7f - mpFixMapMin = 0x80 - mpFixMapMax = 0x8f - mpFixArrayMin = 0x90 - mpFixArrayMax = 0x9f - mpFixStrMin = 0xa0 - mpFixStrMax = 0xbf - mpNil = 0xc0 - _ = 0xc1 - mpFalse = 0xc2 - mpTrue = 0xc3 - mpFloat = 0xca - mpDouble = 0xcb - mpUint8 = 0xcc - mpUint16 = 0xcd - mpUint32 = 0xce - mpUint64 = 0xcf - mpInt8 = 0xd0 - mpInt16 = 0xd1 - mpInt32 = 0xd2 - mpInt64 = 0xd3 - - // extensions below - mpBin8 = 0xc4 - mpBin16 = 0xc5 - mpBin32 = 0xc6 - mpExt8 = 0xc7 - mpExt16 = 0xc8 - mpExt32 = 0xc9 - mpFixExt1 = 0xd4 - mpFixExt2 = 0xd5 - mpFixExt4 = 0xd6 - mpFixExt8 = 0xd7 - mpFixExt16 = 0xd8 - - mpStr8 = 0xd9 // new - mpStr16 = 0xda - mpStr32 = 0xdb - - mpArray16 = 0xdc - mpArray32 = 0xdd - - mpMap16 = 0xde - mpMap32 = 0xdf - - mpNegFixNumMin = 0xe0 - mpNegFixNumMax = 0xff -) - -// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec -// that the backend RPC service takes multiple arguments, which have been arranged -// in sequence in the slice. -// -// The Codec then passes it AS-IS to the rpc service (without wrapping it in an -// array of 1 element). -type MsgpackSpecRpcMultiArgs []interface{} - -// A MsgpackContainer type specifies the different types of msgpackContainers. -type msgpackContainerType struct { - fixCutoff int - bFixMin, b8, b16, b32 byte - hasFixMin, has8, has8Always bool -} - -var ( - msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} - msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} - msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} - msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} -) - -//--------------------------------------------- - -type msgpackEncDriver struct { - noBuiltInTypes - encDriverNoopContainerWriter - // encNoSeparator - e *Encoder - w encWriter - h *MsgpackHandle - x [8]byte -} - -func (e *msgpackEncDriver) EncodeNil() { - e.w.writen1(mpNil) -} - -func (e *msgpackEncDriver) EncodeInt(i int64) { - if i >= 0 { - e.EncodeUint(uint64(i)) - } else if i >= -32 { - e.w.writen1(byte(i)) - } else if i >= math.MinInt8 { - e.w.writen2(mpInt8, byte(i)) - } else if i >= math.MinInt16 { - e.w.writen1(mpInt16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) - } else if i >= math.MinInt32 { - e.w.writen1(mpInt32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) - } else { - e.w.writen1(mpInt64) - bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) - } -} - -func (e *msgpackEncDriver) EncodeUint(i uint64) { - if i <= math.MaxInt8 { - e.w.writen1(byte(i)) - } else if i <= math.MaxUint8 { - e.w.writen2(mpUint8, byte(i)) - } else if i <= math.MaxUint16 { - e.w.writen1(mpUint16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) - } else if i <= math.MaxUint32 { - e.w.writen1(mpUint32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) - } else { - e.w.writen1(mpUint64) - bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) - } -} - -func (e *msgpackEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(mpTrue) - } else { - e.w.writen1(mpFalse) - } -} - -func (e *msgpackEncDriver) EncodeFloat32(f float32) { - e.w.writen1(mpFloat) - bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *msgpackEncDriver) EncodeFloat64(f float64) { - e.w.writen1(mpDouble) - bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) -} - -func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { - bs := ext.WriteExt(v) - if bs == nil { - e.EncodeNil() - return - } - if e.h.WriteExt { - e.encodeExtPreamble(uint8(xtag), len(bs)) - e.w.writeb(bs) - } else { - e.EncodeStringBytes(c_RAW, bs) - } -} - -func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { - e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.w.writeb(re.Data) -} - -func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { - if l == 1 { - e.w.writen2(mpFixExt1, xtag) - } else if l == 2 { - e.w.writen2(mpFixExt2, xtag) - } else if l == 4 { - e.w.writen2(mpFixExt4, xtag) - } else if l == 8 { - e.w.writen2(mpFixExt8, xtag) - } else if l == 16 { - e.w.writen2(mpFixExt16, xtag) - } else if l < 256 { - e.w.writen2(mpExt8, byte(l)) - e.w.writen1(xtag) - } else if l < 65536 { - e.w.writen1(mpExt16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) - e.w.writen1(xtag) - } else { - e.w.writen1(mpExt32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) - e.w.writen1(xtag) - } -} - -func (e *msgpackEncDriver) WriteArrayStart(length int) { - e.writeContainerLen(msgpackContainerList, length) -} - -func (e *msgpackEncDriver) WriteMapStart(length int) { - e.writeContainerLen(msgpackContainerMap, length) -} - -func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { - slen := len(s) - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, slen) - } else { - e.writeContainerLen(msgpackContainerStr, slen) - } - if slen > 0 { - e.w.writestr(s) - } -} - -func (e *msgpackEncDriver) EncodeSymbol(v string) { - e.EncodeString(c_UTF8, v) -} - -func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { - slen := len(bs) - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, slen) - } else { - e.writeContainerLen(msgpackContainerStr, slen) - } - if slen > 0 { - e.w.writeb(bs) - } -} - -func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { - if ct.hasFixMin && l < ct.fixCutoff { - e.w.writen1(ct.bFixMin | byte(l)) - } else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) { - e.w.writen2(ct.b8, uint8(l)) - } else if l < 65536 { - e.w.writen1(ct.b16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) - } else { - e.w.writen1(ct.b32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) - } -} - -//--------------------------------------------- - -type msgpackDecDriver struct { - d *Decoder - r decReader // *Decoder decReader decReaderT - h *MsgpackHandle - b [scratchByteArrayLen]byte - bd byte - bdRead bool - br bool // bytes reader - noBuiltInTypes - // noStreamingCodec - // decNoSeparator - decDriverNoopContainerReader -} - -// Note: This returns either a primitive (int, bool, etc) for non-containers, -// or a containerType, or a specific type denoting nil or extension. -// It is called when a nil interface{} is passed, leaving it up to the DecDriver -// to introspect the stream and decide how best to decode. -// It deciphers the value by looking at the stream first. -func (d *msgpackDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - bd := d.bd - n := d.d.n - var decodeFurther bool - - switch bd { - case mpNil: - n.v = valueTypeNil - d.bdRead = false - case mpFalse: - n.v = valueTypeBool - n.b = false - case mpTrue: - n.v = valueTypeBool - n.b = true - - case mpFloat: - n.v = valueTypeFloat - n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - case mpDouble: - n.v = valueTypeFloat - n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - - case mpUint8: - n.v = valueTypeUint - n.u = uint64(d.r.readn1()) - case mpUint16: - n.v = valueTypeUint - n.u = uint64(bigen.Uint16(d.r.readx(2))) - case mpUint32: - n.v = valueTypeUint - n.u = uint64(bigen.Uint32(d.r.readx(4))) - case mpUint64: - n.v = valueTypeUint - n.u = uint64(bigen.Uint64(d.r.readx(8))) - - case mpInt8: - n.v = valueTypeInt - n.i = int64(int8(d.r.readn1())) - case mpInt16: - n.v = valueTypeInt - n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) - case mpInt32: - n.v = valueTypeInt - n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) - case mpInt64: - n.v = valueTypeInt - n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) - - default: - switch { - case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: - // positive fixnum (always signed) - n.v = valueTypeInt - n.i = int64(int8(bd)) - case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: - // negative fixnum - n.v = valueTypeInt - n.i = int64(int8(bd)) - case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.RawToString { - n.v = valueTypeString - n.s = d.DecodeString() - } else { - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false) - } - case bd == mpBin8, bd == mpBin16, bd == mpBin32: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false) - case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - n.v = valueTypeArray - decodeFurther = true - case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - n.v = valueTypeMap - decodeFurther = true - case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: - n.v = valueTypeExt - clen := d.readExtLen() - n.u = uint64(d.r.readn1()) - n.l = d.r.readx(clen) - default: - d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } - } - if !decodeFurther { - d.bdRead = false - } - if n.v == valueTypeUint && d.h.SignedInteger { - n.v = valueTypeInt - n.i = int64(n.u) - } - return -} - -// int can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) { - if !d.bdRead { - d.readNextBd() - } - switch d.bd { - case mpUint8: - i = int64(uint64(d.r.readn1())) - case mpUint16: - i = int64(uint64(bigen.Uint16(d.r.readx(2)))) - case mpUint32: - i = int64(uint64(bigen.Uint32(d.r.readx(4)))) - case mpUint64: - i = int64(bigen.Uint64(d.r.readx(8))) - case mpInt8: - i = int64(int8(d.r.readn1())) - case mpInt16: - i = int64(int16(bigen.Uint16(d.r.readx(2)))) - case mpInt32: - i = int64(int32(bigen.Uint32(d.r.readx(4)))) - case mpInt64: - i = int64(bigen.Uint64(d.r.readx(8))) - default: - switch { - case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: - i = int64(int8(d.bd)) - case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - i = int64(int8(d.bd)) - default: - d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - return - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { - d.d.errorf("Overflow int value: %v", i) - return - } - } - d.bdRead = false - return -} - -// uint can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - if !d.bdRead { - d.readNextBd() - } - switch d.bd { - case mpUint8: - ui = uint64(d.r.readn1()) - case mpUint16: - ui = uint64(bigen.Uint16(d.r.readx(2))) - case mpUint32: - ui = uint64(bigen.Uint32(d.r.readx(4))) - case mpUint64: - ui = bigen.Uint64(d.r.readx(8)) - case mpInt8: - if i := int64(int8(d.r.readn1())); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - case mpInt16: - if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - case mpInt32: - if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - case mpInt64: - if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - default: - switch { - case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: - ui = uint64(d.bd) - case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd)) - return - default: - d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - return - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { - d.d.errorf("Overflow uint value: %v", ui) - return - } - } - d.bdRead = false - return -} - -// float can either be decoded from msgpack type: float, double or intX -func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == mpFloat { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if d.bd == mpDouble { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else { - f = float64(d.DecodeInt(0)) - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("msgpack: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool, fixnum 0 or 1. -func (d *msgpackDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == mpFalse || d.bd == 0 { - // b = false - } else if d.bd == mpTrue || d.bd == 1 { - b = true - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - - // DecodeBytes could be from: bin str fixstr fixarray array ... - var clen int - vt := d.ContainerType() - switch vt { - case valueTypeBytes: - // valueTypeBytes may be a mpBin or an mpStr container - if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { - clen = d.readContainerLen(msgpackContainerBin) - } else { - clen = d.readContainerLen(msgpackContainerStr) - } - case valueTypeString: - clen = d.readContainerLen(msgpackContainerStr) - case valueTypeArray: - clen = d.readContainerLen(msgpackContainerList) - // ensure everything after is one byte each - for i := 0; i < clen; i++ { - d.readNextBd() - if d.bd == mpNil { - bs = append(bs, 0) - } else if d.bd == mpUint8 { - bs = append(bs, d.r.readn1()) - } else { - d.d.errorf("cannot read non-byte into a byte array") - return - } - } - d.bdRead = false - return bs - default: - d.d.errorf("invalid container type: expecting bin|str|array") - return - } - - // these are (bin|str)(8|16|32) - // println("DecodeBytes: clen: ", clen) - d.bdRead = false - // bytes may be nil, so handle it. if nil, clen=-1. - if clen < 0 { - return nil - } - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) -} - -func (d *msgpackDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.b[:], true)) -} - -func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) { - return d.DecodeBytes(d.b[:], true) -} - -func (d *msgpackDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.bdRead = true -} - -func (d *msgpackDecDriver) uncacheRead() { - if d.bdRead { - d.r.unreadn1() - d.bdRead = false - } -} - -func (d *msgpackDecDriver) ContainerType() (vt valueType) { - if !d.bdRead { - d.readNextBd() - } - bd := d.bd - if bd == mpNil { - return valueTypeNil - } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 || - (!d.h.RawToString && - (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) { - return valueTypeBytes - } else if d.h.RawToString && - (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) { - return valueTypeString - } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { - return valueTypeArray - } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { - return valueTypeMap - } else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - } - return valueTypeUnset -} - -func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == mpNil { - d.bdRead = false - v = true - } - return -} - -func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { - bd := d.bd - if bd == mpNil { - clen = -1 // to represent nil - } else if bd == ct.b8 { - clen = int(d.r.readn1()) - } else if bd == ct.b16 { - clen = int(bigen.Uint16(d.r.readx(2))) - } else if bd == ct.b32 { - clen = int(bigen.Uint32(d.r.readx(4))) - } else if (ct.bFixMin & bd) == ct.bFixMin { - clen = int(ct.bFixMin ^ bd) - } else { - d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) - return - } - d.bdRead = false - return -} - -func (d *msgpackDecDriver) ReadMapStart() int { - if !d.bdRead { - d.readNextBd() - } - return d.readContainerLen(msgpackContainerMap) -} - -func (d *msgpackDecDriver) ReadArrayStart() int { - if !d.bdRead { - d.readNextBd() - } - return d.readContainerLen(msgpackContainerList) -} - -func (d *msgpackDecDriver) readExtLen() (clen int) { - switch d.bd { - case mpNil: - clen = -1 // to represent nil - case mpFixExt1: - clen = 1 - case mpFixExt2: - clen = 2 - case mpFixExt4: - clen = 4 - case mpFixExt8: - clen = 8 - case mpFixExt16: - clen = 16 - case mpExt8: - clen = int(d.r.readn1()) - case mpExt16: - clen = int(bigen.Uint16(d.r.readx(2))) - case mpExt32: - clen = int(bigen.Uint32(d.r.readx(4))) - default: - d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) - return - } - return -} - -func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) - } - return -} - -func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } - xbd := d.bd - if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { - xbs = d.DecodeBytes(nil, true) - } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || - (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { - xbs = d.DecodeStringAsBytes() - } else { - clen := d.readExtLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - return - } - xbs = d.r.readx(clen) - } - d.bdRead = false - return -} - -//-------------------------------------------------- - -//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. -type MsgpackHandle struct { - BasicHandle - - // RawToString controls how raw bytes are decoded into a nil interface{}. - RawToString bool - - // WriteExt flag supports encoding configured extensions with extension tags. - // It also controls whether other elements of the new spec are encoded (ie Str8). - // - // With WriteExt=false, configured extensions are serialized as raw bytes - // and Str8 is not encoded. - // - // A stream can still be decoded into a typed value, provided an appropriate value - // is provided, but the type cannot be inferred from the stream. If no appropriate - // type is provided (e.g. decoding into a nil interface{}), you get back - // a []byte or string based on the setting of RawToString. - WriteExt bool - binaryEncodingType - noElemSeparators -} - -func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{b: ext}) -} - -func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { - return &msgpackEncDriver{e: e, w: e.w, h: h} -} - -func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { - return &msgpackDecDriver{d: d, h: h, r: d.r, br: d.bytes} -} - -func (e *msgpackEncDriver) reset() { - e.w = e.e.w -} - -func (d *msgpackDecDriver) reset() { - d.r, d.br = d.d.r, d.d.bytes - d.bd, d.bdRead = 0, false -} - -//-------------------------------------------------- - -type msgpackSpecRpcCodec struct { - rpcCodec -} - -// /////////////// Spec RPC Codec /////////////////// -func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // WriteRequest can write to both a Go service, and other services that do - // not abide by the 1 argument rule of a Go service. - // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs - var bodyArr []interface{} - if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { - bodyArr = ([]interface{})(m) - } else { - bodyArr = []interface{}{body} - } - r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} - return c.write(r2, nil, false, true) -} - -func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - var moe interface{} - if r.Error != "" { - moe = r.Error - } - if moe != nil && body != nil { - body = nil - } - r2 := []interface{}{1, uint32(r.Seq), moe, body} - return c.write(r2, nil, false, true) -} - -func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { - return c.parseCustomHeader(1, &r.Seq, &r.Error) -} - -func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { - return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) -} - -func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { - if body == nil { // read and discard - return c.read(nil) - } - bodyArr := []interface{}{body} - return c.read(&bodyArr) -} - -func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { - - if c.isClosed() { - return io.EOF - } - - // We read the response header by hand - // so that the body can be decoded on its own from the stream at a later time. - - const fia byte = 0x94 //four item array descriptor value - // Not sure why the panic of EOF is swallowed above. - // if bs1 := c.dec.r.readn1(); bs1 != fia { - // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) - // return - // } - var b byte - b, err = c.br.ReadByte() - if err != nil { - return - } - if b != fia { - err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) - return - } - - if err = c.read(&b); err != nil { - return - } - if b != expectTypeByte { - err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) - return - } - if err = c.read(msgid); err != nil { - return - } - if err = c.read(methodOrError); err != nil { - return - } - return -} - -//-------------------------------------------------- - -// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol -// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md -type msgpackSpecRpc struct{} - -// MsgpackSpecRpc implements Rpc using the communication protocol defined in -// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. -var MsgpackSpecRpc msgpackSpecRpc - -func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} -} - -func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} -} - -var _ decDriver = (*msgpackDecDriver)(nil) -var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/rpc.go b/vendor/github.com/ugorji/go/codec/rpc.go deleted file mode 100644 index 3aa06fc7bb6..00000000000 --- a/vendor/github.com/ugorji/go/codec/rpc.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "bufio" - "errors" - "io" - "net/rpc" - "sync" -) - -// Rpc provides a rpc Server or Client Codec for rpc communication. -type Rpc interface { - ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec - ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec -} - -// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer -// used by the rpc connection. It accommodates use-cases where the connection -// should be used by rpc and non-rpc functions, e.g. streaming a file after -// sending an rpc response. -type RpcCodecBuffered interface { - BufferedReader() *bufio.Reader - BufferedWriter() *bufio.Writer -} - -// ------------------------------------- - -// rpcCodec defines the struct members and common methods. -type rpcCodec struct { - rwc io.ReadWriteCloser - dec *Decoder - enc *Encoder - bw *bufio.Writer - br *bufio.Reader - mu sync.Mutex - h Handle - - cls bool - clsmu sync.RWMutex -} - -func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { - bw := bufio.NewWriter(conn) - br := bufio.NewReader(conn) - - // defensive: ensure that jsonH has TermWhitespace turned on. - if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace { - panic(errors.New("rpc requires a JsonHandle with TermWhitespace set to true")) - } - - return rpcCodec{ - rwc: conn, - bw: bw, - br: br, - enc: NewEncoder(bw, h), - dec: NewDecoder(br, h), - h: h, - } -} - -func (c *rpcCodec) BufferedReader() *bufio.Reader { - return c.br -} - -func (c *rpcCodec) BufferedWriter() *bufio.Writer { - return c.bw -} - -func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { - if c.isClosed() { - return io.EOF - } - if err = c.enc.Encode(obj1); err != nil { - return - } - if writeObj2 { - if err = c.enc.Encode(obj2); err != nil { - return - } - } - if doFlush { - return c.bw.Flush() - } - return -} - -func (c *rpcCodec) read(obj interface{}) (err error) { - if c.isClosed() { - return io.EOF - } - //If nil is passed in, we should still attempt to read content to nowhere. - if obj == nil { - var obj2 interface{} - return c.dec.Decode(&obj2) - } - return c.dec.Decode(obj) -} - -func (c *rpcCodec) isClosed() bool { - c.clsmu.RLock() - x := c.cls - c.clsmu.RUnlock() - return x -} - -func (c *rpcCodec) Close() error { - if c.isClosed() { - return io.EOF - } - c.clsmu.Lock() - c.cls = true - c.clsmu.Unlock() - return c.rwc.Close() -} - -func (c *rpcCodec) ReadResponseBody(body interface{}) error { - return c.read(body) -} - -// ------------------------------------- - -type goRpcCodec struct { - rpcCodec -} - -func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // Must protect for concurrent access as per API - c.mu.Lock() - defer c.mu.Unlock() - return c.write(r, body, true, true) -} - -func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - c.mu.Lock() - defer c.mu.Unlock() - return c.write(r, body, true, true) -} - -func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { - return c.read(r) -} - -func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { - return c.read(r) -} - -func (c *goRpcCodec) ReadRequestBody(body interface{}) error { - return c.read(body) -} - -// ------------------------------------- - -// goRpc is the implementation of Rpc that uses the communication protocol -// as defined in net/rpc package. -type goRpc struct{} - -// GoRpc implements Rpc using the communication protocol defined in net/rpc package. -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. -var GoRpc goRpc - -func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { - return &goRpcCodec{newRPCCodec(conn, h)} -} - -func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { - return &goRpcCodec{newRPCCodec(conn, h)} -} - -var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go deleted file mode 100644 index b69a15e75a3..00000000000 --- a/vendor/github.com/ugorji/go/codec/simple.go +++ /dev/null @@ -1,541 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math" - "reflect" -) - -const ( - _ uint8 = iota - simpleVdNil = 1 - simpleVdFalse = 2 - simpleVdTrue = 3 - simpleVdFloat32 = 4 - simpleVdFloat64 = 5 - - // each lasts for 4 (ie n, n+1, n+2, n+3) - simpleVdPosInt = 8 - simpleVdNegInt = 12 - - // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) - simpleVdString = 216 - simpleVdByteArray = 224 - simpleVdArray = 232 - simpleVdMap = 240 - simpleVdExt = 248 -) - -type simpleEncDriver struct { - noBuiltInTypes - encDriverNoopContainerWriter - // encNoSeparator - e *Encoder - h *SimpleHandle - w encWriter - b [8]byte -} - -func (e *simpleEncDriver) EncodeNil() { - e.w.writen1(simpleVdNil) -} - -func (e *simpleEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(simpleVdTrue) - } else { - e.w.writen1(simpleVdFalse) - } -} - -func (e *simpleEncDriver) EncodeFloat32(f float32) { - e.w.writen1(simpleVdFloat32) - bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *simpleEncDriver) EncodeFloat64(f float64) { - e.w.writen1(simpleVdFloat64) - bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f)) -} - -func (e *simpleEncDriver) EncodeInt(v int64) { - if v < 0 { - e.encUint(uint64(-v), simpleVdNegInt) - } else { - e.encUint(uint64(v), simpleVdPosInt) - } -} - -func (e *simpleEncDriver) EncodeUint(v uint64) { - e.encUint(v, simpleVdPosInt) -} - -func (e *simpleEncDriver) encUint(v uint64, bd uint8) { - if v <= math.MaxUint8 { - e.w.writen2(bd, uint8(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd + 1) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.w.writen1(bd + 2) - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) - } else { // if v <= math.MaxUint64 { - e.w.writen1(bd + 3) - bigenHelper{e.b[:8], e.w}.writeUint64(v) - } -} - -func (e *simpleEncDriver) encLen(bd byte, length int) { - if length == 0 { - e.w.writen1(bd) - } else if length <= math.MaxUint8 { - e.w.writen1(bd + 1) - e.w.writen1(uint8(length)) - } else if length <= math.MaxUint16 { - e.w.writen1(bd + 2) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length)) - } else if int64(length) <= math.MaxUint32 { - e.w.writen1(bd + 3) - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length)) - } else { - e.w.writen1(bd + 4) - bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length)) - } -} - -func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { - bs := ext.WriteExt(rv) - if bs == nil { - e.EncodeNil() - return - } - e.encodeExtPreamble(uint8(xtag), len(bs)) - e.w.writeb(bs) -} - -func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { - e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.w.writeb(re.Data) -} - -func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { - e.encLen(simpleVdExt, length) - e.w.writen1(xtag) -} - -func (e *simpleEncDriver) WriteArrayStart(length int) { - e.encLen(simpleVdArray, length) -} - -func (e *simpleEncDriver) WriteMapStart(length int) { - e.encLen(simpleVdMap, length) -} - -func (e *simpleEncDriver) EncodeString(c charEncoding, v string) { - e.encLen(simpleVdString, len(v)) - e.w.writestr(v) -} - -func (e *simpleEncDriver) EncodeSymbol(v string) { - e.EncodeString(c_UTF8, v) -} - -func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - e.encLen(simpleVdByteArray, len(v)) - e.w.writeb(v) -} - -//------------------------------------ - -type simpleDecDriver struct { - d *Decoder - h *SimpleHandle - r decReader - bdRead bool - bd byte - br bool // bytes reader - b [scratchByteArrayLen]byte - noBuiltInTypes - // noStreamingCodec - decDriverNoopContainerReader -} - -func (d *simpleDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.bdRead = true -} - -func (d *simpleDecDriver) uncacheRead() { - if d.bdRead { - d.r.unreadn1() - d.bdRead = false - } -} - -func (d *simpleDecDriver) ContainerType() (vt valueType) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdNil { - return valueTypeNil - } else if d.bd == simpleVdByteArray || d.bd == simpleVdByteArray+1 || - d.bd == simpleVdByteArray+2 || d.bd == simpleVdByteArray+3 || d.bd == simpleVdByteArray+4 { - return valueTypeBytes - } else if d.bd == simpleVdString || d.bd == simpleVdString+1 || - d.bd == simpleVdString+2 || d.bd == simpleVdString+3 || d.bd == simpleVdString+4 { - return valueTypeString - } else if d.bd == simpleVdArray || d.bd == simpleVdArray+1 || - d.bd == simpleVdArray+2 || d.bd == simpleVdArray+3 || d.bd == simpleVdArray+4 { - return valueTypeArray - } else if d.bd == simpleVdMap || d.bd == simpleVdMap+1 || - d.bd == simpleVdMap+2 || d.bd == simpleVdMap+3 || d.bd == simpleVdMap+4 { - return valueTypeMap - } else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - } - return valueTypeUnset -} - -func (d *simpleDecDriver) TryDecodeAsNil() bool { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdNil { - d.bdRead = false - return true - } - return false -} - -func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) { - if !d.bdRead { - d.readNextBd() - } - switch d.bd { - case simpleVdPosInt: - ui = uint64(d.r.readn1()) - case simpleVdPosInt + 1: - ui = uint64(bigen.Uint16(d.r.readx(2))) - case simpleVdPosInt + 2: - ui = uint64(bigen.Uint32(d.r.readx(4))) - case simpleVdPosInt + 3: - ui = uint64(bigen.Uint64(d.r.readx(8))) - case simpleVdNegInt: - ui = uint64(d.r.readn1()) - neg = true - case simpleVdNegInt + 1: - ui = uint64(bigen.Uint16(d.r.readx(2))) - neg = true - case simpleVdNegInt + 2: - ui = uint64(bigen.Uint32(d.r.readx(4))) - neg = true - case simpleVdNegInt + 3: - ui = uint64(bigen.Uint64(d.r.readx(8))) - neg = true - default: - d.d.errorf("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) - return - } - // don't do this check, because callers may only want the unsigned value. - // if ui > math.MaxInt64 { - // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui) - // return - // } - return -} - -func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) { - ui, neg := d.decCheckInteger() - i, overflow := chkOvf.SignedInt(ui) - if overflow { - d.d.errorf("simple: overflow converting %v to signed integer", ui) - return - } - if neg { - i = -i - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("simple: overflow integer: %v", i) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - ui, neg := d.decCheckInteger() - if neg { - d.d.errorf("Assigning negative signed value to unsigned type") - return - } - if chkOvf.Uint(ui, bitsize) { - d.d.errorf("simple: overflow integer: %v", ui) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdFloat32 { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if d.bd == simpleVdFloat64 { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else { - if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { - f = float64(d.DecodeInt(64)) - } else { - d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd) - return - } - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("msgpack: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *simpleDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdTrue { - b = true - } else if d.bd == simpleVdFalse { - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) ReadMapStart() (length int) { - if !d.bdRead { - d.readNextBd() - } - d.bdRead = false - return d.decLen() -} - -func (d *simpleDecDriver) ReadArrayStart() (length int) { - if !d.bdRead { - d.readNextBd() - } - d.bdRead = false - return d.decLen() -} - -func (d *simpleDecDriver) decLen() int { - switch d.bd % 8 { - case 0: - return 0 - case 1: - return int(d.r.readn1()) - case 2: - return int(bigen.Uint16(d.r.readx(2))) - case 3: - ui := uint64(bigen.Uint32(d.r.readx(4))) - if chkOvf.Uint(ui, intBitsize) { - d.d.errorf("simple: overflow integer: %v", ui) - return 0 - } - return int(ui) - case 4: - ui := bigen.Uint64(d.r.readx(8)) - if chkOvf.Uint(ui, intBitsize) { - d.d.errorf("simple: overflow integer: %v", ui) - return 0 - } - return int(ui) - } - d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) - return -1 -} - -func (d *simpleDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.b[:], true)) -} - -func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) { - return d.DecodeBytes(d.b[:], true) -} - -func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdNil { - d.bdRead = false - return - } - clen := d.decLen() - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) -} - -func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) - } - return -} - -func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } - switch d.bd { - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - l := d.decLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - return - } - xbs = d.r.readx(l) - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - xbs = d.DecodeBytes(nil, true) - default: - d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := d.d.n - var decodeFurther bool - - switch d.bd { - case simpleVdNil: - n.v = valueTypeNil - case simpleVdFalse: - n.v = valueTypeBool - n.b = false - case simpleVdTrue: - n.v = valueTypeBool - n.b = true - case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - if d.h.SignedInteger { - n.v = valueTypeInt - n.i = d.DecodeInt(64) - } else { - n.v = valueTypeUint - n.u = d.DecodeUint(64) - } - case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - n.v = valueTypeInt - n.i = d.DecodeInt(64) - case simpleVdFloat32: - n.v = valueTypeFloat - n.f = d.DecodeFloat(true) - case simpleVdFloat64: - n.v = valueTypeFloat - n.f = d.DecodeFloat(false) - case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - n.v = valueTypeString - n.s = d.DecodeString() - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false) - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - n.v = valueTypeExt - l := d.decLen() - n.u = uint64(d.r.readn1()) - n.l = d.r.readx(l) - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - n.v = valueTypeArray - decodeFurther = true - case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - n.v = valueTypeMap - decodeFurther = true - default: - d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) - } - - if !decodeFurther { - d.bdRead = false - } - return -} - -//------------------------------------ - -// SimpleHandle is a Handle for a very simple encoding format. -// -// simple is a simplistic codec similar to binc, but not as compact. -// - Encoding of a value is always preceded by the descriptor byte (bd) -// - True, false, nil are encoded fully in 1 byte (the descriptor) -// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). -// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. -// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) -// - Lenght of containers (strings, bytes, array, map, extensions) -// are encoded in 0, 1, 2, 4 or 8 bytes. -// Zero-length containers have no length encoded. -// For others, the number of bytes is given by pow(2, bd%3) -// - maps are encoded as [bd] [length] [[key][value]]... -// - arrays are encoded as [bd] [length] [value]... -// - extensions are encoded as [bd] [length] [tag] [byte]... -// - strings/bytearrays are encoded as [bd] [length] [byte]... -// -// The full spec will be published soon. -type SimpleHandle struct { - BasicHandle - binaryEncodingType - noElemSeparators -} - -func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{b: ext}) -} - -func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver { - return &simpleEncDriver{e: e, w: e.w, h: h} -} - -func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver { - return &simpleDecDriver{d: d, h: h, r: d.r, br: d.bytes} -} - -func (e *simpleEncDriver) reset() { - e.w = e.e.w -} - -func (d *simpleDecDriver) reset() { - d.r, d.br = d.d.r, d.d.bytes - d.bd, d.bdRead = 0, false -} - -var _ decDriver = (*simpleDecDriver)(nil) -var _ encDriver = (*simpleEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json b/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json deleted file mode 100644 index 9028586711e..00000000000 --- a/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json +++ /dev/null @@ -1,639 +0,0 @@ -[ - { - "cbor": "AA==", - "hex": "00", - "roundtrip": true, - "decoded": 0 - }, - { - "cbor": "AQ==", - "hex": "01", - "roundtrip": true, - "decoded": 1 - }, - { - "cbor": "Cg==", - "hex": "0a", - "roundtrip": true, - "decoded": 10 - }, - { - "cbor": "Fw==", - "hex": "17", - "roundtrip": true, - "decoded": 23 - }, - { - "cbor": "GBg=", - "hex": "1818", - "roundtrip": true, - "decoded": 24 - }, - { - "cbor": "GBk=", - "hex": "1819", - "roundtrip": true, - "decoded": 25 - }, - { - "cbor": "GGQ=", - "hex": "1864", - "roundtrip": true, - "decoded": 100 - }, - { - "cbor": "GQPo", - "hex": "1903e8", - "roundtrip": true, - "decoded": 1000 - }, - { - "cbor": "GgAPQkA=", - "hex": "1a000f4240", - "roundtrip": true, - "decoded": 1000000 - }, - { - "cbor": "GwAAAOjUpRAA", - "hex": "1b000000e8d4a51000", - "roundtrip": true, - "decoded": 1000000000000 - }, - { - "cbor": "G///////////", - "hex": "1bffffffffffffffff", - "roundtrip": true, - "decoded": 18446744073709551615 - }, - { - "cbor": "wkkBAAAAAAAAAAA=", - "hex": "c249010000000000000000", - "roundtrip": true, - "decoded": 18446744073709551616 - }, - { - "cbor": "O///////////", - "hex": "3bffffffffffffffff", - "roundtrip": true, - "decoded": -18446744073709551616, - "skip": true - }, - { - "cbor": "w0kBAAAAAAAAAAA=", - "hex": "c349010000000000000000", - "roundtrip": true, - "decoded": -18446744073709551617 - }, - { - "cbor": "IA==", - "hex": "20", - "roundtrip": true, - "decoded": -1 - }, - { - "cbor": "KQ==", - "hex": "29", - "roundtrip": true, - "decoded": -10 - }, - { - "cbor": "OGM=", - "hex": "3863", - "roundtrip": true, - "decoded": -100 - }, - { - "cbor": "OQPn", - "hex": "3903e7", - "roundtrip": true, - "decoded": -1000 - }, - { - "cbor": "+QAA", - "hex": "f90000", - "roundtrip": true, - "decoded": 0.0 - }, - { - "cbor": "+YAA", - "hex": "f98000", - "roundtrip": true, - "decoded": -0.0 - }, - { - "cbor": "+TwA", - "hex": "f93c00", - "roundtrip": true, - "decoded": 1.0 - }, - { - "cbor": "+z/xmZmZmZma", - "hex": "fb3ff199999999999a", - "roundtrip": true, - "decoded": 1.1 - }, - { - "cbor": "+T4A", - "hex": "f93e00", - "roundtrip": true, - "decoded": 1.5 - }, - { - "cbor": "+Xv/", - "hex": "f97bff", - "roundtrip": true, - "decoded": 65504.0 - }, - { - "cbor": "+kfDUAA=", - "hex": "fa47c35000", - "roundtrip": true, - "decoded": 100000.0 - }, - { - "cbor": "+n9///8=", - "hex": "fa7f7fffff", - "roundtrip": true, - "decoded": 3.4028234663852886e+38 - }, - { - "cbor": "+3435DyIAHWc", - "hex": "fb7e37e43c8800759c", - "roundtrip": true, - "decoded": 1.0e+300 - }, - { - "cbor": "+QAB", - "hex": "f90001", - "roundtrip": true, - "decoded": 5.960464477539063e-08 - }, - { - "cbor": "+QQA", - "hex": "f90400", - "roundtrip": true, - "decoded": 6.103515625e-05 - }, - { - "cbor": "+cQA", - "hex": "f9c400", - "roundtrip": true, - "decoded": -4.0 - }, - { - "cbor": "+8AQZmZmZmZm", - "hex": "fbc010666666666666", - "roundtrip": true, - "decoded": -4.1 - }, - { - "cbor": "+XwA", - "hex": "f97c00", - "roundtrip": true, - "diagnostic": "Infinity" - }, - { - "cbor": "+X4A", - "hex": "f97e00", - "roundtrip": true, - "diagnostic": "NaN" - }, - { - "cbor": "+fwA", - "hex": "f9fc00", - "roundtrip": true, - "diagnostic": "-Infinity" - }, - { - "cbor": "+n+AAAA=", - "hex": "fa7f800000", - "roundtrip": false, - "diagnostic": "Infinity" - }, - { - "cbor": "+n/AAAA=", - "hex": "fa7fc00000", - "roundtrip": false, - "diagnostic": "NaN" - }, - { - "cbor": "+v+AAAA=", - "hex": "faff800000", - "roundtrip": false, - "diagnostic": "-Infinity" - }, - { - "cbor": "+3/wAAAAAAAA", - "hex": "fb7ff0000000000000", - "roundtrip": false, - "diagnostic": "Infinity" - }, - { - "cbor": "+3/4AAAAAAAA", - "hex": "fb7ff8000000000000", - "roundtrip": false, - "diagnostic": "NaN" - }, - { - "cbor": "+//wAAAAAAAA", - "hex": "fbfff0000000000000", - "roundtrip": false, - "diagnostic": "-Infinity" - }, - { - "cbor": "9A==", - "hex": "f4", - "roundtrip": true, - "decoded": false - }, - { - "cbor": "9Q==", - "hex": "f5", - "roundtrip": true, - "decoded": true - }, - { - "cbor": "9g==", - "hex": "f6", - "roundtrip": true, - "decoded": null - }, - { - "cbor": "9w==", - "hex": "f7", - "roundtrip": true, - "diagnostic": "undefined" - }, - { - "cbor": "8A==", - "hex": "f0", - "roundtrip": true, - "diagnostic": "simple(16)" - }, - { - "cbor": "+Bg=", - "hex": "f818", - "roundtrip": true, - "diagnostic": "simple(24)" - }, - { - "cbor": "+P8=", - "hex": "f8ff", - "roundtrip": true, - "diagnostic": "simple(255)" - }, - { - "cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==", - "hex": "c074323031332d30332d32315432303a30343a30305a", - "roundtrip": true, - "diagnostic": "0(\"2013-03-21T20:04:00Z\")" - }, - { - "cbor": "wRpRS2ew", - "hex": "c11a514b67b0", - "roundtrip": true, - "diagnostic": "1(1363896240)" - }, - { - "cbor": "wftB1FLZ7CAAAA==", - "hex": "c1fb41d452d9ec200000", - "roundtrip": true, - "diagnostic": "1(1363896240.5)" - }, - { - "cbor": "10QBAgME", - "hex": "d74401020304", - "roundtrip": true, - "diagnostic": "23(h'01020304')" - }, - { - "cbor": "2BhFZElFVEY=", - "hex": "d818456449455446", - "roundtrip": true, - "diagnostic": "24(h'6449455446')" - }, - { - "cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==", - "hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d", - "roundtrip": true, - "diagnostic": "32(\"http://www.example.com\")" - }, - { - "cbor": "QA==", - "hex": "40", - "roundtrip": true, - "diagnostic": "h''" - }, - { - "cbor": "RAECAwQ=", - "hex": "4401020304", - "roundtrip": true, - "diagnostic": "h'01020304'" - }, - { - "cbor": "YA==", - "hex": "60", - "roundtrip": true, - "decoded": "" - }, - { - "cbor": "YWE=", - "hex": "6161", - "roundtrip": true, - "decoded": "a" - }, - { - "cbor": "ZElFVEY=", - "hex": "6449455446", - "roundtrip": true, - "decoded": "IETF" - }, - { - "cbor": "YiJc", - "hex": "62225c", - "roundtrip": true, - "decoded": "\"\\" - }, - { - "cbor": "YsO8", - "hex": "62c3bc", - "roundtrip": true, - "decoded": "ü" - }, - { - "cbor": "Y+awtA==", - "hex": "63e6b0b4", - "roundtrip": true, - "decoded": "水" - }, - { - "cbor": "ZPCQhZE=", - "hex": "64f0908591", - "roundtrip": true, - "decoded": "𐅑" - }, - { - "cbor": "gA==", - "hex": "80", - "roundtrip": true, - "decoded": [ - - ] - }, - { - "cbor": "gwECAw==", - "hex": "83010203", - "roundtrip": true, - "decoded": [ - 1, - 2, - 3 - ] - }, - { - "cbor": "gwGCAgOCBAU=", - "hex": "8301820203820405", - "roundtrip": true, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=", - "hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819", - "roundtrip": true, - "decoded": [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25 - ] - }, - { - "cbor": "oA==", - "hex": "a0", - "roundtrip": true, - "decoded": { - } - }, - { - "cbor": "ogECAwQ=", - "hex": "a201020304", - "roundtrip": true, - "skip": true, - "diagnostic": "{1: 2, 3: 4}" - }, - { - "cbor": "omFhAWFiggID", - "hex": "a26161016162820203", - "roundtrip": true, - "decoded": { - "a": 1, - "b": [ - 2, - 3 - ] - } - }, - { - "cbor": "gmFhoWFiYWM=", - "hex": "826161a161626163", - "roundtrip": true, - "decoded": [ - "a", - { - "b": "c" - } - ] - }, - { - "cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF", - "hex": "a56161614161626142616361436164614461656145", - "roundtrip": true, - "decoded": { - "a": "A", - "b": "B", - "c": "C", - "d": "D", - "e": "E" - } - }, - { - "cbor": "X0IBAkMDBAX/", - "hex": "5f42010243030405ff", - "roundtrip": false, - "skip": true, - "diagnostic": "(_ h'0102', h'030405')" - }, - { - "cbor": "f2VzdHJlYWRtaW5n/w==", - "hex": "7f657374726561646d696e67ff", - "roundtrip": false, - "decoded": "streaming" - }, - { - "cbor": "n/8=", - "hex": "9fff", - "roundtrip": false, - "decoded": [ - - ] - }, - { - "cbor": "nwGCAgOfBAX//w==", - "hex": "9f018202039f0405ffff", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "nwGCAgOCBAX/", - "hex": "9f01820203820405ff", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "gwGCAgOfBAX/", - "hex": "83018202039f0405ff", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "gwGfAgP/ggQF", - "hex": "83019f0203ff820405", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=", - "hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff", - "roundtrip": false, - "decoded": [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25 - ] - }, - { - "cbor": "v2FhAWFinwID//8=", - "hex": "bf61610161629f0203ffff", - "roundtrip": false, - "decoded": { - "a": 1, - "b": [ - 2, - 3 - ] - } - }, - { - "cbor": "gmFhv2FiYWP/", - "hex": "826161bf61626163ff", - "roundtrip": false, - "decoded": [ - "a", - { - "b": "c" - } - ] - }, - { - "cbor": "v2NGdW71Y0FtdCH/", - "hex": "bf6346756ef563416d7421ff", - "roundtrip": false, - "decoded": { - "Fun": true, - "Amt": -2 - } - } -] diff --git a/vendor/github.com/ugorji/go/codec/test.py b/vendor/github.com/ugorji/go/codec/test.py deleted file mode 100755 index 800376f6841..00000000000 --- a/vendor/github.com/ugorji/go/codec/test.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env python - -# This will create golden files in a directory passed to it. -# A Test calls this internally to create the golden files -# So it can process them (so we don't have to checkin the files). - -# Ensure msgpack-python and cbor are installed first, using: -# sudo apt-get install python-dev -# sudo apt-get install python-pip -# pip install --user msgpack-python msgpack-rpc-python cbor - -# Ensure all "string" keys are utf strings (else encoded as bytes) - -import cbor, msgpack, msgpackrpc, sys, os, threading - -def get_test_data_list(): - # get list with all primitive types, and a combo type - l0 = [ - -8, - -1616, - -32323232, - -6464646464646464, - 192, - 1616, - 32323232, - 6464646464646464, - 192, - -3232.0, - -6464646464.0, - 3232.0, - 6464.0, - 6464646464.0, - False, - True, - u"null", - None, - u"some&day>some 0 - if stopTimeSec > 0: - def myStopRpcServer(): - server.stop() - t = threading.Timer(stopTimeSec, myStopRpcServer) - t.start() - server.start() - -def doRpcClientToPythonSvc(port): - address = msgpackrpc.Address('127.0.0.1', port) - client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("Echo123", "A1", "B2", "C3") - print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) - -def doRpcClientToGoSvc(port): - # print ">>>> port: ", port, " <<<<<" - address = msgpackrpc.Address('127.0.0.1', port) - client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) - print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) - -def doMain(args): - if len(args) == 2 and args[0] == "testdata": - build_test_data(args[1]) - elif len(args) == 3 and args[0] == "rpc-server": - doRpcServer(int(args[1]), int(args[2])) - elif len(args) == 2 and args[0] == "rpc-client-python-service": - doRpcClientToPythonSvc(int(args[1])) - elif len(args) == 2 and args[0] == "rpc-client-go-service": - doRpcClientToGoSvc(int(args[1])) - else: - print("Usage: test.py " + - "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") - -if __name__ == "__main__": - doMain(sys.argv[1:]) - diff --git a/vendor/github.com/ugorji/go/codec/time.go b/vendor/github.com/ugorji/go/codec/time.go deleted file mode 100644 index 55841d4cac6..00000000000 --- a/vendor/github.com/ugorji/go/codec/time.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "fmt" - "time" -) - -var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} - -type timeExt struct{} - -func (x timeExt) WriteExt(v interface{}) (bs []byte) { - switch v2 := v.(type) { - case time.Time: - bs = encodeTime(v2) - case *time.Time: - bs = encodeTime(*v2) - default: - panic(fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2)) - } - return -} -func (x timeExt) ReadExt(v interface{}, bs []byte) { - tt, err := decodeTime(bs) - if err != nil { - panic(err) - } - *(v.(*time.Time)) = tt -} - -func (x timeExt) ConvertExt(v interface{}) interface{} { - return x.WriteExt(v) -} -func (x timeExt) UpdateExt(v interface{}, src interface{}) { - x.ReadExt(v, src.([]byte)) -} - -// EncodeTime encodes a time.Time as a []byte, including -// information on the instant in time and UTC offset. -// -// Format Description -// -// A timestamp is composed of 3 components: -// -// - secs: signed integer representing seconds since unix epoch -// - nsces: unsigned integer representing fractional seconds as a -// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 -// - tz: signed integer representing timezone offset in minutes east of UTC, -// and a dst (daylight savings time) flag -// -// When encoding a timestamp, the first byte is the descriptor, which -// defines which components are encoded and how many bytes are used to -// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it -// is not encoded in the byte array explicitly*. -// -// Descriptor 8 bits are of the form `A B C DDD EE`: -// A: Is secs component encoded? 1 = true -// B: Is nsecs component encoded? 1 = true -// C: Is tz component encoded? 1 = true -// DDD: Number of extra bytes for secs (range 0-7). -// If A = 1, secs encoded in DDD+1 bytes. -// If A = 0, secs is not encoded, and is assumed to be 0. -// If A = 1, then we need at least 1 byte to encode secs. -// DDD says the number of extra bytes beyond that 1. -// E.g. if DDD=0, then secs is represented in 1 byte. -// if DDD=2, then secs is represented in 3 bytes. -// EE: Number of extra bytes for nsecs (range 0-3). -// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) -// -// Following the descriptor bytes, subsequent bytes are: -// -// secs component encoded in `DDD + 1` bytes (if A == 1) -// nsecs component encoded in `EE + 1` bytes (if B == 1) -// tz component encoded in 2 bytes (if C == 1) -// -// secs and nsecs components are integers encoded in a BigEndian -// 2-complement encoding format. -// -// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to -// Least significant bit 0 are described below: -// -// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). -// Bit 15 = have\_dst: set to 1 if we set the dst flag. -// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. -// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. -// -func encodeTime(t time.Time) []byte { - //t := rv.Interface().(time.Time) - tsecs, tnsecs := t.Unix(), t.Nanosecond() - var ( - bd byte - btmp [8]byte - bs [16]byte - i int = 1 - ) - l := t.Location() - if l == time.UTC { - l = nil - } - if tsecs != 0 { - bd = bd | 0x80 - bigen.PutUint64(btmp[:], uint64(tsecs)) - f := pruneSignExt(btmp[:], tsecs >= 0) - bd = bd | (byte(7-f) << 2) - copy(bs[i:], btmp[f:]) - i = i + (8 - f) - } - if tnsecs != 0 { - bd = bd | 0x40 - bigen.PutUint32(btmp[:4], uint32(tnsecs)) - f := pruneSignExt(btmp[:4], true) - bd = bd | byte(3-f) - copy(bs[i:], btmp[f:4]) - i = i + (4 - f) - } - if l != nil { - bd = bd | 0x20 - // Note that Go Libs do not give access to dst flag. - _, zoneOffset := t.Zone() - //zoneName, zoneOffset := t.Zone() - zoneOffset /= 60 - z := uint16(zoneOffset) - bigen.PutUint16(btmp[:2], z) - // clear dst flags - bs[i] = btmp[0] & 0x3f - bs[i+1] = btmp[1] - i = i + 2 - } - bs[0] = bd - return bs[0:i] -} - -// DecodeTime decodes a []byte into a time.Time. -func decodeTime(bs []byte) (tt time.Time, err error) { - bd := bs[0] - var ( - tsec int64 - tnsec uint32 - tz uint16 - i byte = 1 - i2 byte - n byte - ) - if bd&(1<<7) != 0 { - var btmp [8]byte - n = ((bd >> 2) & 0x7) + 1 - i2 = i + n - copy(btmp[8-n:], bs[i:i2]) - //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) - if bs[i]&(1<<7) != 0 { - copy(btmp[0:8-n], bsAll0xff) - //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } - } - i = i2 - tsec = int64(bigen.Uint64(btmp[:])) - } - if bd&(1<<6) != 0 { - var btmp [4]byte - n = (bd & 0x3) + 1 - i2 = i + n - copy(btmp[4-n:], bs[i:i2]) - i = i2 - tnsec = bigen.Uint32(btmp[:]) - } - if bd&(1<<5) == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - return - } - // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. - // However, we need name here, so it can be shown when time is printed. - // Zone name is in form: UTC-08:00. - // Note that Go Libs do not give access to dst flag, so we ignore dst bits - - i2 = i + 2 - tz = bigen.Uint16(bs[i:i2]) - i = i2 - // sign extend sign bit into top 2 MSB (which were dst bits): - if tz&(1<<13) == 0 { // positive - tz = tz & 0x3fff //clear 2 MSBs: dst bits - } else { // negative - tz = tz | 0xc000 //set 2 MSBs: dst bits - //tzname[3] = '-' (TODO: verify. this works here) - } - tzint := int16(tz) - if tzint == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - } else { - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - // var zoneName = timeLocUTCName(tzint) - tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) - } - return -} - -// func timeLocUTCName(tzint int16) string { -// if tzint == 0 { -// return "UTC" -// } -// var tzname = []byte("UTC+00:00") -// //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. -// //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first -// var tzhr, tzmin int16 -// if tzint < 0 { -// tzname[3] = '-' // (TODO: verify. this works here) -// tzhr, tzmin = -tzint/60, (-tzint)%60 -// } else { -// tzhr, tzmin = tzint/60, tzint%60 -// } -// tzname[4] = timeDigits[tzhr/10] -// tzname[5] = timeDigits[tzhr%10] -// tzname[7] = timeDigits[tzmin/10] -// tzname[8] = timeDigits[tzmin%10] -// return string(tzname) -// //return time.FixedZone(string(tzname), int(tzint)*60) -// } diff --git a/vendor/github.com/ugorji/go/codec/z.go b/vendor/github.com/ugorji/go/codec/z.go deleted file mode 100644 index b6ac0769a05..00000000000 --- a/vendor/github.com/ugorji/go/codec/z.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import "sort" - -// TODO: this is brittle, as it depends on z.go's init() being called last. -// The current build tools all honor that files are passed in lexical order. -// However, we should consider using an init_channel, -// that each person doing init will write to. - -func init() { - if !useLookupRecognizedTypes { - return - } - sort.Sort(uintptrSlice(recognizedRtids)) - sort.Sort(uintptrSlice(recognizedRtidPtrs)) - recognizedRtidOrPtrs = make([]uintptr, len(recognizedRtids)+len(recognizedRtidPtrs)) - copy(recognizedRtidOrPtrs, recognizedRtids) - copy(recognizedRtidOrPtrs[len(recognizedRtids):], recognizedRtidPtrs) - sort.Sort(uintptrSlice(recognizedRtidOrPtrs)) -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index 606cf1f9726..00000000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp // import "golang.org/x/net/context/ctxhttp" - -import ( - "io" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/context" -) - -// Do sends an HTTP request with the provided http.Client and returns -// an HTTP response. -// -// If the client is nil, http.DefaultClient is used. -// -// The provided ctx must be non-nil. If it is canceled or times out, -// ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - resp, err := client.Do(req.WithContext(ctx)) - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err != nil { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - } - } - return resp, err -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go deleted file mode 100644 index 926870cc23f..00000000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package ctxhttp // import "golang.org/x/net/context/ctxhttp" - -import ( - "io" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/context" -) - -func nop() {} - -var ( - testHookContextDoneBeforeHeaders = nop - testHookDoReturned = nop - testHookDidBodyClose = nop -) - -// Do sends an HTTP request with the provided http.Client and returns an HTTP response. -// If the client is nil, http.DefaultClient is used. -// If the context is canceled or times out, ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - - // TODO(djd): Respect any existing value of req.Cancel. - cancel := make(chan struct{}) - req.Cancel = cancel - - type responseAndError struct { - resp *http.Response - err error - } - result := make(chan responseAndError, 1) - - // Make local copies of test hooks closed over by goroutines below. - // Prevents data races in tests. - testHookDoReturned := testHookDoReturned - testHookDidBodyClose := testHookDidBodyClose - - go func() { - resp, err := client.Do(req) - testHookDoReturned() - result <- responseAndError{resp, err} - }() - - var resp *http.Response - - select { - case <-ctx.Done(): - testHookContextDoneBeforeHeaders() - close(cancel) - // Clean up after the goroutine calling client.Do: - go func() { - if r := <-result; r.resp != nil { - testHookDidBodyClose() - r.resp.Body.Close() - } - }() - return nil, ctx.Err() - case r := <-result: - var err error - resp, err = r.resp, r.err - if err != nil { - return resp, err - } - } - - c := make(chan struct{}) - go func() { - select { - case <-ctx.Done(): - close(cancel) - case <-c: - // The response's Body is closed. - } - }() - resp.Body = ¬ifyingReader{resp.Body, c} - - return resp, nil -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} - -// notifyingReader is an io.ReadCloser that closes the notify channel after -// Close is called or a Read fails on the underlying ReadCloser. -type notifyingReader struct { - io.ReadCloser - notify chan<- struct{} -} - -func (r *notifyingReader) Read(p []byte) (int, error) { - n, err := r.ReadCloser.Read(p) - if err != nil && r.notify != nil { - close(r.notify) - r.notify = nil - } - return n, err -} - -func (r *notifyingReader) Close() error { - err := r.ReadCloser.Close() - if r.notify != nil { - close(r.notify) - r.notify = nil - } - return err -} diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS deleted file mode 100644 index 15167cd746c..00000000000 --- a/vendor/golang.org/x/oauth2/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md deleted file mode 100644 index 46aa2b12dda..00000000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - - -## Filing issues - -When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS deleted file mode 100644 index 1c4577e9680..00000000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE deleted file mode 100644 index 6a66aea5eaf..00000000000 --- a/vendor/golang.org/x/oauth2/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md deleted file mode 100644 index eb8dcee179e..00000000000 --- a/vendor/golang.org/x/oauth2/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# OAuth2 for Go - -[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) -[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) - -oauth2 package contains a client implementation for OAuth 2.0 spec. - -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - -See godoc for further documentation and examples. - -* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) - - -## App Engine - -In change 96e89be (March 2015), we removed the `oauth2.Context2` type in favor -of the [`context.Context`](https://golang.org/x/net/context#Context) type from -the `golang.org/x/net/context` package - -This means it's no longer possible to use the "Classic App Engine" -`appengine.Context` type with the `oauth2` package. (You're using -Classic App Engine if you import the package `"appengine"`.) - -To work around this, you may use the new `"google.golang.org/appengine"` -package. This package has almost the same API as the `"appengine"` package, -but it can be fetched with `go get` and used on "Managed VMs" and well as -Classic App Engine. - -See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) -for information on updating your app. - -If you don't want to update your entire app to use the new App Engine packages, -you may use both sets of packages in parallel, using only the new packages -with the `oauth2` package. - -```go -import ( - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - newappengine "google.golang.org/appengine" - newurlfetch "google.golang.org/appengine/urlfetch" - - "appengine" -) - -func handler(w http.ResponseWriter, r *http.Request) { - var c appengine.Context = appengine.NewContext(r) - c.Infof("Logging a message with the old package") - - var ctx context.Context = newappengine.NewContext(r) - client := &http.Client{ - Transport: &oauth2.Transport{ - Source: google.AppEngineTokenSource(ctx, "scope"), - Base: &newurlfetch.Transport{Context: ctx}, - }, - } - client.Get("...") -} -``` - -## Report Issues / Send Patches - -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. - -The main issue tracker for the oauth2 repository is located at -https://github.com/golang/oauth2/issues. diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go deleted file mode 100644 index 50d918b8788..00000000000 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "sort" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" -) - -// appengineFlex is set at init time by appengineflex_hook.go. If true, we are on App Engine Flex. -var appengineFlex bool - -// Set at init time by appengine_hook.go. If nil, we're not on App Engine. -var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) - -// Set at init time by appengine_hook.go. If nil, we're not on App Engine. -var appengineAppIDFunc func(c context.Context) string - -// AppEngineTokenSource returns a token source that fetches tokens -// issued to the current App Engine application's service account. -// If you are implementing a 3-legged OAuth 2.0 flow on App Engine -// that involves user accounts, see oauth2.Config instead. -// -// The provided context must have come from appengine.NewContext. -func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - if appengineTokenFunc == nil { - panic("google: AppEngineTokenSource can only be used on App Engine.") - } - scopes := append([]string{}, scope...) - sort.Strings(scopes) - return &appEngineTokenSource{ - ctx: ctx, - scopes: scopes, - key: strings.Join(scopes, " "), - } -} - -// aeTokens helps the fetched tokens to be reused until their expiration. -var ( - aeTokensMu sync.Mutex - aeTokens = make(map[string]*tokenLock) // key is space-separated scopes -) - -type tokenLock struct { - mu sync.Mutex // guards t; held while fetching or updating t - t *oauth2.Token -} - -type appEngineTokenSource struct { - ctx context.Context - scopes []string - key string // to aeTokens map; space-separated scopes -} - -func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { - if appengineTokenFunc == nil { - panic("google: AppEngineTokenSource can only be used on App Engine.") - } - - aeTokensMu.Lock() - tok, ok := aeTokens[ts.key] - if !ok { - tok = &tokenLock{} - aeTokens[ts.key] = tok - } - aeTokensMu.Unlock() - - tok.mu.Lock() - defer tok.mu.Unlock() - if tok.t.Valid() { - return tok.t, nil - } - access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) - if err != nil { - return nil, err - } - tok.t = &oauth2.Token{ - AccessToken: access, - Expiry: exp, - } - return tok.t, nil -} diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go deleted file mode 100644 index 56669eaa98d..00000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_hook.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine appenginevm - -package google - -import "google.golang.org/appengine" - -func init() { - appengineTokenFunc = appengine.AccessToken - appengineAppIDFunc = appengine.AppID -} diff --git a/vendor/golang.org/x/oauth2/google/appengineflex_hook.go b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go deleted file mode 100644 index 5d0231af2dd..00000000000 --- a/vendor/golang.org/x/oauth2/google/appengineflex_hook.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appenginevm - -package google - -func init() { - appengineFlex = true // Flex doesn't support appengine.AccessToken; depend on metadata server. -} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go deleted file mode 100644 index b4b62745c45..00000000000 --- a/vendor/golang.org/x/oauth2/google/default.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "runtime" - - "cloud.google.com/go/compute/metadata" - "golang.org/x/net/context" - "golang.org/x/oauth2" -) - -// DefaultCredentials holds "Application Default Credentials". -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -type DefaultCredentials struct { - ProjectID string // may be empty - TokenSource oauth2.TokenSource - - // JSON contains the raw bytes from a JSON credentials file. - // This field may be nil if authentication is provided by the - // environment and not with a credentials file, e.g. when code is - // running on Google Cloud Platform. - JSON []byte -} - -// DefaultClient returns an HTTP Client that uses the -// DefaultTokenSource to obtain authentication credentials. -func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { - ts, err := DefaultTokenSource(ctx, scope...) - if err != nil { - return nil, err - } - return oauth2.NewClient(ctx, ts), nil -} - -// DefaultTokenSource returns the token source for -// "Application Default Credentials". -// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. -func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { - creds, err := FindDefaultCredentials(ctx, scope...) - if err != nil { - return nil, err - } - return creds.TokenSource, nil -} - -// FindDefaultCredentials searches for "Application Default Credentials". -// -// It looks for credentials in the following places, -// preferring the first location found: -// -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine it uses the appengine.AccessToken function. -// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches -// credentials from the metadata server. -// (In this final case any provided scopes are ignored.) -func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCredentials, error) { - // First, try the environment variable. - const envVar = "GOOGLE_APPLICATION_CREDENTIALS" - if filename := os.Getenv(envVar); filename != "" { - creds, err := readCredentialsFile(ctx, filename, scope) - if err != nil { - return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) - } - return creds, nil - } - - // Second, try a well-known file. - filename := wellKnownFile() - if creds, err := readCredentialsFile(ctx, filename, scope); err == nil { - return creds, nil - } else if !os.IsNotExist(err) { - return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) - } - - // Third, if we're on Google App Engine use those credentials. - if appengineTokenFunc != nil && !appengineFlex { - return &DefaultCredentials{ - ProjectID: appengineAppIDFunc(ctx), - TokenSource: AppEngineTokenSource(ctx, scope...), - }, nil - } - - // Fourth, if we're on Google Compute Engine use the metadata server. - if metadata.OnGCE() { - id, _ := metadata.ProjectID() - return &DefaultCredentials{ - ProjectID: id, - TokenSource: ComputeTokenSource(""), - }, nil - } - - // None are found; return helpful error. - const url = "https://developers.google.com/accounts/docs/application-default-credentials" - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) -} - -func wellKnownFile() string { - const f = "application_default_credentials.json" - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) - } - return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) -} - -func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - var f credentialsFile - if err := json.Unmarshal(b, &f); err != nil { - return nil, err - } - ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) - if err != nil { - return nil, err - } - return &DefaultCredentials{ - ProjectID: f.ProjectID, - TokenSource: ts, - JSON: b, - }, nil -} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go deleted file mode 100644 index 66a8b0e1812..00000000000 --- a/vendor/golang.org/x/oauth2/google/google.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package google provides support for making OAuth2 authorized and -// authenticated HTTP requests to Google APIs. -// It supports the Web server flow, client-side credentials, service accounts, -// Google Compute Engine service accounts, and Google App Engine service -// accounts. -// -// For more information, please read -// https://developers.google.com/accounts/docs/OAuth2 -// and -// https://developers.google.com/accounts/docs/application-default-credentials. -package google // import "golang.org/x/oauth2/google" - -import ( - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - "cloud.google.com/go/compute/metadata" - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/jwt" -) - -// Endpoint is Google's OAuth 2.0 endpoint. -var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://accounts.google.com/o/oauth2/token", -} - -// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. -const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" - -// ConfigFromJSON uses a Google Developers Console client_credentials.json -// file to construct a config. -// client_credentials.json can be downloaded from -// https://console.developers.google.com, under "Credentials". Download the Web -// application credentials in the JSON format and provide the contents of the -// file as jsonKey. -func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { - type cred struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - RedirectURIs []string `json:"redirect_uris"` - AuthURI string `json:"auth_uri"` - TokenURI string `json:"token_uri"` - } - var j struct { - Web *cred `json:"web"` - Installed *cred `json:"installed"` - } - if err := json.Unmarshal(jsonKey, &j); err != nil { - return nil, err - } - var c *cred - switch { - case j.Web != nil: - c = j.Web - case j.Installed != nil: - c = j.Installed - default: - return nil, fmt.Errorf("oauth2/google: no credentials found") - } - if len(c.RedirectURIs) < 1 { - return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") - } - return &oauth2.Config{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - RedirectURL: c.RedirectURIs[0], - Scopes: scope, - Endpoint: oauth2.Endpoint{ - AuthURL: c.AuthURI, - TokenURL: c.TokenURI, - }, - }, nil -} - -// JWTConfigFromJSON uses a Google Developers service account JSON key file to read -// the credentials that authorize and authenticate the requests. -// Create a service account on "Credentials" for your project at -// https://console.developers.google.com to download a JSON key file. -func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { - var f credentialsFile - if err := json.Unmarshal(jsonKey, &f); err != nil { - return nil, err - } - if f.Type != serviceAccountKey { - return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) - } - scope = append([]string(nil), scope...) // copy - return f.jwtConfig(scope), nil -} - -// JSON key file types. -const ( - serviceAccountKey = "service_account" - userCredentialsKey = "authorized_user" -) - -// credentialsFile is the unmarshalled representation of a credentials file. -type credentialsFile struct { - Type string `json:"type"` // serviceAccountKey or userCredentialsKey - - // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - TokenURL string `json:"token_uri"` - ProjectID string `json:"project_id"` - - // User Credential fields - // (These typically come from gcloud auth.) - ClientSecret string `json:"client_secret"` - ClientID string `json:"client_id"` - RefreshToken string `json:"refresh_token"` -} - -func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { - cfg := &jwt.Config{ - Email: f.ClientEmail, - PrivateKey: []byte(f.PrivateKey), - PrivateKeyID: f.PrivateKeyID, - Scopes: scopes, - TokenURL: f.TokenURL, - } - if cfg.TokenURL == "" { - cfg.TokenURL = JWTTokenURL - } - return cfg -} - -func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { - switch f.Type { - case serviceAccountKey: - cfg := f.jwtConfig(scopes) - return cfg.TokenSource(ctx), nil - case userCredentialsKey: - cfg := &oauth2.Config{ - ClientID: f.ClientID, - ClientSecret: f.ClientSecret, - Scopes: scopes, - Endpoint: Endpoint, - } - tok := &oauth2.Token{RefreshToken: f.RefreshToken} - return cfg.TokenSource(ctx, tok), nil - case "": - return nil, errors.New("missing 'type' field in credentials") - default: - return nil, fmt.Errorf("unknown credential type: %q", f.Type) - } -} - -// ComputeTokenSource returns a token source that fetches access tokens -// from Google Compute Engine (GCE)'s metadata server. It's only valid to use -// this token source if your program is running on a GCE instance. -// If no account is specified, "default" is used. -// Further information about retrieving access tokens from the GCE metadata -// server can be found at https://cloud.google.com/compute/docs/authentication. -func ComputeTokenSource(account string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account}) -} - -type computeSource struct { - account string -} - -func (cs computeSource) Token() (*oauth2.Token, error) { - if !metadata.OnGCE() { - return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") - } - acct := cs.account - if acct == "" { - acct = "default" - } - tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") - if err != nil { - return nil, err - } - var res struct { - AccessToken string `json:"access_token"` - ExpiresInSec int `json:"expires_in"` - TokenType string `json:"token_type"` - } - err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) - if err != nil { - return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) - } - if res.ExpiresInSec == 0 || res.AccessToken == "" { - return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") - } - return &oauth2.Token{ - AccessToken: res.AccessToken, - TokenType: res.TokenType, - Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), - }, nil -} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go deleted file mode 100644 index b0fdb3a888a..00000000000 --- a/vendor/golang.org/x/oauth2/google/jwt.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "crypto/rsa" - "fmt" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON -// key file to read the credentials that authorize and authenticate the -// requests, and returns a TokenSource that does not use any OAuth2 flow but -// instead creates a JWT and sends that as the access token. -// The audience is typically a URL that specifies the scope of the credentials. -// -// Note that this is not a standard OAuth flow, but rather an -// optimization supported by a few Google services. -// Unless you know otherwise, you should use JWTConfigFromJSON instead. -func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { - cfg, err := JWTConfigFromJSON(jsonKey) - if err != nil { - return nil, fmt.Errorf("google: could not parse JSON key: %v", err) - } - pk, err := internal.ParseKey(cfg.PrivateKey) - if err != nil { - return nil, fmt.Errorf("google: could not parse key: %v", err) - } - ts := &jwtAccessTokenSource{ - email: cfg.Email, - audience: audience, - pk: pk, - pkID: cfg.PrivateKeyID, - } - tok, err := ts.Token() - if err != nil { - return nil, err - } - return oauth2.ReuseTokenSource(tok, ts), nil -} - -type jwtAccessTokenSource struct { - email, audience string - pk *rsa.PrivateKey - pkID string -} - -func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { - iat := time.Now() - exp := iat.Add(time.Hour) - cs := &jws.ClaimSet{ - Iss: ts.email, - Sub: ts.email, - Aud: ts.audience, - Iat: iat.Unix(), - Exp: exp.Unix(), - } - hdr := &jws.Header{ - Algorithm: "RS256", - Typ: "JWT", - KeyID: string(ts.pkID), - } - msg, err := jws.Encode(hdr, cs, ts.pk) - if err != nil { - return nil, fmt.Errorf("google: could not encode JWT: %v", err) - } - return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil -} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go deleted file mode 100644 index bdc18084b1f..00000000000 --- a/vendor/golang.org/x/oauth2/google/sdk.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "os" - "os/user" - "path/filepath" - "runtime" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" -) - -type sdkCredentials struct { - Data []struct { - Credential struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - TokenExpiry *time.Time `json:"token_expiry"` - } `json:"credential"` - Key struct { - Account string `json:"account"` - Scope string `json:"scope"` - } `json:"key"` - } -} - -// An SDKConfig provides access to tokens from an account already -// authorized via the Google Cloud SDK. -type SDKConfig struct { - conf oauth2.Config - initialToken *oauth2.Token -} - -// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK -// account. If account is empty, the account currently active in -// Google Cloud SDK properties is used. -// Google Cloud SDK credentials must be created by running `gcloud auth` -// before using this function. -// The Google Cloud SDK is available at https://cloud.google.com/sdk/. -func NewSDKConfig(account string) (*SDKConfig, error) { - configPath, err := sdkConfigPath() - if err != nil { - return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) - } - credentialsPath := filepath.Join(configPath, "credentials") - f, err := os.Open(credentialsPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) - } - defer f.Close() - - var c sdkCredentials - if err := json.NewDecoder(f).Decode(&c); err != nil { - return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) - } - if len(c.Data) == 0 { - return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) - } - if account == "" { - propertiesPath := filepath.Join(configPath, "properties") - f, err := os.Open(propertiesPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) - } - defer f.Close() - ini, err := internal.ParseINI(f) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) - } - core, ok := ini["core"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) - } - active, ok := core["account"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) - } - account = active - } - - for _, d := range c.Data { - if account == "" || d.Key.Account == account { - if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { - return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) - } - var expiry time.Time - if d.Credential.TokenExpiry != nil { - expiry = *d.Credential.TokenExpiry - } - return &SDKConfig{ - conf: oauth2.Config{ - ClientID: d.Credential.ClientID, - ClientSecret: d.Credential.ClientSecret, - Scopes: strings.Split(d.Key.Scope, " "), - Endpoint: Endpoint, - RedirectURL: "oob", - }, - initialToken: &oauth2.Token{ - AccessToken: d.Credential.AccessToken, - RefreshToken: d.Credential.RefreshToken, - Expiry: expiry, - }, - }, nil - } - } - return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) -} - -// Client returns an HTTP client using Google Cloud SDK credentials to -// authorize requests. The token will auto-refresh as necessary. The -// underlying http.RoundTripper will be obtained using the provided -// context. The returned client and its Transport should not be -// modified. -func (c *SDKConfig) Client(ctx context.Context) *http.Client { - return &http.Client{ - Transport: &oauth2.Transport{ - Source: c.TokenSource(ctx), - }, - } -} - -// TokenSource returns an oauth2.TokenSource that retrieve tokens from -// Google Cloud SDK credentials using the provided context. -// It will returns the current access token stored in the credentials, -// and refresh it when it expires, but it won't update the credentials -// with the new access token. -func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { - return c.conf.TokenSource(ctx, c.initialToken) -} - -// Scopes are the OAuth 2.0 scopes the current account is authorized for. -func (c *SDKConfig) Scopes() []string { - return c.conf.Scopes -} - -// sdkConfigPath tries to guess where the gcloud config is located. -// It can be overridden during tests. -var sdkConfigPath = func() (string, error) { - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil - } - homeDir := guessUnixHomeDir() - if homeDir == "" { - return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") - } - return filepath.Join(homeDir, ".config", "gcloud"), nil -} - -func guessUnixHomeDir() string { - // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 - if v := os.Getenv("HOME"); v != "" { - return v - } - // Else, fall back to user.Current: - if u, err := user.Current(); err == nil { - return u.HomeDir - } - return "" -} diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go deleted file mode 100644 index 03265e888af..00000000000 --- a/vendor/golang.org/x/oauth2/internal/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go deleted file mode 100644 index 6978192a99c..00000000000 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "bufio" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io" - "strings" -) - -// ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key -// from PEM container before conversion. It only supports PEM -// containers with no passphrase. -func ParseKey(key []byte) (*rsa.PrivateKey, error) { - block, _ := pem.Decode(key) - if block != nil { - key = block.Bytes - } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) - } - } - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("private key is invalid") - } - return parsed, nil -} - -func ParseINI(ini io.Reader) (map[string]map[string]string, error) { - result := map[string]map[string]string{ - "": {}, // root section - } - scanner := bufio.NewScanner(ini) - currentSection := "" - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, ";") { - // comment. - continue - } - if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { - currentSection = strings.TrimSpace(line[1 : len(line)-1]) - result[currentSection] = map[string]string{} - continue - } - parts := strings.SplitN(line, "=", 2) - if len(parts) == 2 && parts[0] != "" { - result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) - } - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning ini: %v", err) - } - return result, nil -} - -func CondVal(v string) []string { - if v == "" { - return nil - } - return []string{v} -} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go deleted file mode 100644 index cf959ea69f6..00000000000 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "mime" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" -) - -// Token represents the crendentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// This type is a mirror of oauth2.Token and exists to break -// an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time - - // Raw optionally contains extra metadata from the server - // when updating a token. - Raw interface{} -} - -// tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. -type tokenJSON struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - RefreshToken string `json:"refresh_token"` - ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number - Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in -} - -func (e *tokenJSON) expiry() (t time.Time) { - if v := e.ExpiresIn; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - if v := e.Expires; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - return -} - -type expirationTime int32 - -func (e *expirationTime) UnmarshalJSON(b []byte) error { - var n json.Number - err := json.Unmarshal(b, &n) - if err != nil { - return err - } - i, err := n.Int64() - if err != nil { - return err - } - *e = expirationTime(i) - return nil -} - -var brokenAuthHeaderProviders = []string{ - "https://accounts.google.com/", - "https://api.codeswholesale.com/oauth/token", - "https://api.dropbox.com/", - "https://api.dropboxapi.com/", - "https://api.instagram.com/", - "https://api.netatmo.net/", - "https://api.odnoklassniki.ru/", - "https://api.pushbullet.com/", - "https://api.soundcloud.com/", - "https://api.twitch.tv/", - "https://app.box.com/", - "https://connect.stripe.com/", - "https://graph.facebook.com", // see https://github.com/golang/oauth2/issues/214 - "https://login.microsoftonline.com/", - "https://login.salesforce.com/", - "https://login.windows.net", - "https://oauth.sandbox.trainingpeaks.com/", - "https://oauth.trainingpeaks.com/", - "https://oauth.vk.com/", - "https://openapi.baidu.com/", - "https://slack.com/", - "https://test-sandbox.auth.corp.google.com", - "https://test.salesforce.com/", - "https://user.gini.net/", - "https://www.douban.com/", - "https://www.googleapis.com/", - "https://www.linkedin.com/", - "https://www.strava.com/oauth/", - "https://www.wunderlist.com/oauth/", - "https://api.patreon.com/", - "https://sandbox.codeswholesale.com/oauth/token", - "https://api.sipgate.com/v1/authorization/oauth", -} - -// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints. -var brokenAuthHeaderDomains = []string{ - ".force.com", - ".myshopify.com", - ".okta.com", - ".oktapreview.com", -} - -func RegisterBrokenAuthHeaderProvider(tokenURL string) { - brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) -} - -// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL -// implements the OAuth2 spec correctly -// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. -// In summary: -// - Reddit only accepts client secret in the Authorization header -// - Dropbox accepts either it in URL param or Auth header, but not both. -// - Google only accepts URL param (not spec compliant?), not Auth header -// - Stripe only accepts client secret in Auth header with Bearer method, not Basic -func providerAuthHeaderWorks(tokenURL string) bool { - for _, s := range brokenAuthHeaderProviders { - if strings.HasPrefix(tokenURL, s) { - // Some sites fail to implement the OAuth2 spec fully. - return false - } - } - - if u, err := url.Parse(tokenURL); err == nil { - for _, s := range brokenAuthHeaderDomains { - if strings.HasSuffix(u.Host, s) { - return false - } - } - } - - // Assume the provider implements the spec properly - // otherwise. We can add more exceptions as they're - // discovered. We will _not_ be adding configurable hooks - // to this package to let users select server bugs. - return true -} - -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) { - hc, err := ContextClient(ctx) - if err != nil { - return nil, err - } - bustedAuth := !providerAuthHeaderWorks(tokenURL) - if bustedAuth { - if clientID != "" { - v.Set("client_id", clientID) - } - if clientSecret != "" { - v.Set("client_secret", clientSecret) - } - } - req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - if !bustedAuth { - req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret)) - } - r, err := ctxhttp.Do(ctx, hc, req) - if err != nil { - return nil, err - } - defer r.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) - } - - var token *Token - content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) - switch content { - case "application/x-www-form-urlencoded", "text/plain": - vals, err := url.ParseQuery(string(body)) - if err != nil { - return nil, err - } - token = &Token{ - AccessToken: vals.Get("access_token"), - TokenType: vals.Get("token_type"), - RefreshToken: vals.Get("refresh_token"), - Raw: vals, - } - e := vals.Get("expires_in") - if e == "" { - // TODO(jbd): Facebook's OAuth2 implementation is broken and - // returns expires_in field in expires. Remove the fallback to expires, - // when Facebook fixes their implementation. - e = vals.Get("expires") - } - expires, _ := strconv.Atoi(e) - if expires != 0 { - token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) - } - default: - var tj tokenJSON - if err = json.Unmarshal(body, &tj); err != nil { - return nil, err - } - token = &Token{ - AccessToken: tj.AccessToken, - TokenType: tj.TokenType, - RefreshToken: tj.RefreshToken, - Expiry: tj.expiry(), - Raw: make(map[string]interface{}), - } - json.Unmarshal(body, &token.Raw) // no error checks for optional fields - } - // Don't overwrite `RefreshToken` with an empty value - // if this was a token refreshing request. - if token.RefreshToken == "" { - token.RefreshToken = v.Get("refresh_token") - } - return token, nil -} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go deleted file mode 100644 index 783bd98c8b4..00000000000 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "net/http" - - "golang.org/x/net/context" -) - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient ContextKey - -// ContextKey is just an empty struct. It exists so HTTPClient can be -// an immutable public variable with a unique type. It's immutable -// because nobody else can create a ContextKey, being unexported. -type ContextKey struct{} - -// ContextClientFunc is a func which tries to return an *http.Client -// given a Context value. If it returns an error, the search stops -// with that error. If it returns (nil, nil), the search continues -// down the list of registered funcs. -type ContextClientFunc func(context.Context) (*http.Client, error) - -var contextClientFuncs []ContextClientFunc - -func RegisterContextClientFunc(fn ContextClientFunc) { - contextClientFuncs = append(contextClientFuncs, fn) -} - -func ContextClient(ctx context.Context) (*http.Client, error) { - if ctx != nil { - if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { - return hc, nil - } - } - for _, fn := range contextClientFuncs { - c, err := fn(ctx) - if err != nil { - return nil, err - } - if c != nil { - return c, nil - } - } - return http.DefaultClient, nil -} - -func ContextTransport(ctx context.Context) http.RoundTripper { - hc, err := ContextClient(ctx) - // This is a rare error case (somebody using nil on App Engine). - if err != nil { - return ErrorTransport{err} - } - return hc.Transport -} - -// ErrorTransport returns the specified error on RoundTrip. -// This RoundTripper should be used in rare error cases where -// error handling can be postponed to response handling time. -type ErrorTransport struct{ Err error } - -func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { - return nil, t.Err -} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go deleted file mode 100644 index 683d2d271a3..00000000000 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jws provides a partial implementation -// of JSON Web Signature encoding and decoding. -// It exists to support the golang.org/x/oauth2 package. -// -// See RFC 7515. -// -// Deprecated: this package is not intended for public use and might be -// removed in the future. It exists for internal use only. -// Please switch to another JWS package or copy this package into your own -// source tree. -package jws // import "golang.org/x/oauth2/jws" - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strings" - "time" -) - -// ClaimSet contains information about the JWT signature including the -// permissions being requested (scopes), the target of the token, the issuer, -// the time the token was issued, and the lifetime of the token. -type ClaimSet struct { - Iss string `json:"iss"` // email address of the client_id of the application making the access token request - Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests - Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). - Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) - Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) - Typ string `json:"typ,omitempty"` // token type (Optional). - - // Email for which the application is requesting delegated access (Optional). - Sub string `json:"sub,omitempty"` - - // The old name of Sub. Client keeps setting Prn to be - // complaint with legacy OAuth 2.0 providers. (Optional) - Prn string `json:"prn,omitempty"` - - // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 - // This array is marshalled using custom code (see (c *ClaimSet) encode()). - PrivateClaims map[string]interface{} `json:"-"` -} - -func (c *ClaimSet) encode() (string, error) { - // Reverting time back for machines whose time is not perfectly in sync. - // If client machine's time is in the future according - // to Google servers, an access token will not be issued. - now := time.Now().Add(-10 * time.Second) - if c.Iat == 0 { - c.Iat = now.Unix() - } - if c.Exp == 0 { - c.Exp = now.Add(time.Hour).Unix() - } - if c.Exp < c.Iat { - return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) - } - - b, err := json.Marshal(c) - if err != nil { - return "", err - } - - if len(c.PrivateClaims) == 0 { - return base64.RawURLEncoding.EncodeToString(b), nil - } - - // Marshal private claim set and then append it to b. - prv, err := json.Marshal(c.PrivateClaims) - if err != nil { - return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) - } - - // Concatenate public and private claim JSON objects. - if !bytes.HasSuffix(b, []byte{'}'}) { - return "", fmt.Errorf("jws: invalid JSON %s", b) - } - if !bytes.HasPrefix(prv, []byte{'{'}) { - return "", fmt.Errorf("jws: invalid JSON %s", prv) - } - b[len(b)-1] = ',' // Replace closing curly brace with a comma. - b = append(b, prv[1:]...) // Append private claims. - return base64.RawURLEncoding.EncodeToString(b), nil -} - -// Header represents the header for the signed JWS payloads. -type Header struct { - // The algorithm used for signature. - Algorithm string `json:"alg"` - - // Represents the token type. - Typ string `json:"typ"` - - // The optional hint of which key is being used. - KeyID string `json:"kid,omitempty"` -} - -func (h *Header) encode() (string, error) { - b, err := json.Marshal(h) - if err != nil { - return "", err - } - return base64.RawURLEncoding.EncodeToString(b), nil -} - -// Decode decodes a claim set from a JWS payload. -func Decode(payload string) (*ClaimSet, error) { - // decode returned id token to get expiry - s := strings.Split(payload, ".") - if len(s) < 2 { - // TODO(jbd): Provide more context about the error. - return nil, errors.New("jws: invalid token received") - } - decoded, err := base64.RawURLEncoding.DecodeString(s[1]) - if err != nil { - return nil, err - } - c := &ClaimSet{} - err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) - return c, err -} - -// Signer returns a signature for the given data. -type Signer func(data []byte) (sig []byte, err error) - -// EncodeWithSigner encodes a header and claim set with the provided signer. -func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { - head, err := header.encode() - if err != nil { - return "", err - } - cs, err := c.encode() - if err != nil { - return "", err - } - ss := fmt.Sprintf("%s.%s", head, cs) - sig, err := sg([]byte(ss)) - if err != nil { - return "", err - } - return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil -} - -// Encode encodes a signed JWS with provided header and claim set. -// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. -func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { - sg := func(data []byte) (sig []byte, err error) { - h := sha256.New() - h.Write(data) - return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) - } - return EncodeWithSigner(header, c, sg) -} - -// Verify tests whether the provided JWT token's signature was produced by the private key -// associated with the supplied public key. -func Verify(token string, key *rsa.PublicKey) error { - parts := strings.Split(token, ".") - if len(parts) != 3 { - return errors.New("jws: invalid token received, token must have 3 parts") - } - - signedContent := parts[0] + "." + parts[1] - signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) - if err != nil { - return err - } - - h := sha256.New() - h.Write([]byte(signedContent)) - return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) -} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go deleted file mode 100644 index e016db42178..00000000000 --- a/vendor/golang.org/x/oauth2/jwt/jwt.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly -// known as "two-legged OAuth 2.0". -// -// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 -package jwt - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -var ( - defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" - defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} -) - -// Config is the configuration for using JWT to fetch tokens, -// commonly known as "two-legged OAuth 2.0". -type Config struct { - // Email is the OAuth client identifier used when communicating with - // the configured OAuth provider. - Email string - - // PrivateKey contains the contents of an RSA private key or the - // contents of a PEM file that contains a private key. The provided - // private key is used to sign JWT payloads. - // PEM containers with a passphrase are not supported. - // Use the following command to convert a PKCS 12 file into a PEM. - // - // $ openssl pkcs12 -in key.p12 -out key.pem -nodes - // - PrivateKey []byte - - // PrivateKeyID contains an optional hint indicating which key is being - // used. - PrivateKeyID string - - // Subject is the optional user to impersonate. - Subject string - - // Scopes optionally specifies a list of requested permission scopes. - Scopes []string - - // TokenURL is the endpoint required to complete the 2-legged JWT flow. - TokenURL string - - // Expires optionally specifies how long the token is valid for. - Expires time.Duration -} - -// TokenSource returns a JWT TokenSource using the configuration -// in c and the HTTP client from the provided context. -func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) -} - -// Client returns an HTTP client wrapping the context's -// HTTP transport and adding Authorization headers with tokens -// obtained from c. -// -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context) *http.Client { - return oauth2.NewClient(ctx, c.TokenSource(ctx)) -} - -// jwtSource is a source that always does a signed JWT request for a token. -// It should typically be wrapped with a reuseTokenSource. -type jwtSource struct { - ctx context.Context - conf *Config -} - -func (js jwtSource) Token() (*oauth2.Token, error) { - pk, err := internal.ParseKey(js.conf.PrivateKey) - if err != nil { - return nil, err - } - hc := oauth2.NewClient(js.ctx, nil) - claimSet := &jws.ClaimSet{ - Iss: js.conf.Email, - Scope: strings.Join(js.conf.Scopes, " "), - Aud: js.conf.TokenURL, - } - if subject := js.conf.Subject; subject != "" { - claimSet.Sub = subject - // prn is the old name of sub. Keep setting it - // to be compatible with legacy OAuth 2.0 providers. - claimSet.Prn = subject - } - if t := js.conf.Expires; t > 0 { - claimSet.Exp = time.Now().Add(t).Unix() - } - h := *defaultHeader - h.KeyID = js.conf.PrivateKeyID - payload, err := jws.Encode(&h, claimSet, pk) - if err != nil { - return nil, err - } - v := url.Values{} - v.Set("grant_type", defaultGrantType) - v.Set("assertion", payload) - resp, err := hc.PostForm(js.conf.TokenURL, v) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) - } - // tokenRes is the JSON response body. - var tokenRes struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - IDToken string `json:"id_token"` - ExpiresIn int64 `json:"expires_in"` // relative seconds from now - } - if err := json.Unmarshal(body, &tokenRes); err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - token := &oauth2.Token{ - AccessToken: tokenRes.AccessToken, - TokenType: tokenRes.TokenType, - } - raw := make(map[string]interface{}) - json.Unmarshal(body, &raw) // no error checks for optional fields - token = token.WithExtra(raw) - - if secs := tokenRes.ExpiresIn; secs > 0 { - token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) - } - if v := tokenRes.IDToken; v != "" { - // decode returned id token to get expiry - claimSet, err := jws.Decode(v) - if err != nil { - return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) - } - token.Expiry = time.Unix(claimSet.Exp, 0) - } - return token, nil -} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go deleted file mode 100644 index 4bafe873d0d..00000000000 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package oauth2 provides support for making -// OAuth2 authorized and authenticated HTTP requests. -// It can additionally grant authorization with Bearer JWT. -package oauth2 // import "golang.org/x/oauth2" - -import ( - "bytes" - "errors" - "net/http" - "net/url" - "strings" - "sync" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" -) - -// NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). -// -// Deprecated: Use context.Background() or context.TODO() instead. -var NoContext = context.TODO() - -// RegisterBrokenAuthHeaderProvider registers an OAuth2 server -// identified by the tokenURL prefix as an OAuth2 implementation -// which doesn't support the HTTP Basic authentication -// scheme to authenticate with the authorization server. -// Once a server is registered, credentials (client_id and client_secret) -// will be passed as query parameters rather than being present -// in the Authorization header. -// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. -func RegisterBrokenAuthHeaderProvider(tokenURL string) { - internal.RegisterBrokenAuthHeaderProvider(tokenURL) -} - -// Config describes a typical 3-legged OAuth2 flow, with both the -// client application information and the server's endpoint URLs. -// For the client credentials 2-legged OAuth2 flow, see the clientcredentials -// package (https://golang.org/x/oauth2/clientcredentials). -type Config struct { - // ClientID is the application's ID. - ClientID string - - // ClientSecret is the application's secret. - ClientSecret string - - // Endpoint contains the resource server's token endpoint - // URLs. These are constants specific to each server and are - // often available via site-specific packages, such as - // google.Endpoint or github.Endpoint. - Endpoint Endpoint - - // RedirectURL is the URL to redirect users going through - // the OAuth flow, after the resource owner's URLs. - RedirectURL string - - // Scope specifies optional requested permissions. - Scopes []string -} - -// A TokenSource is anything that can return a token. -type TokenSource interface { - // Token returns a token or an error. - // Token must be safe for concurrent use by multiple goroutines. - // The returned Token must not be modified. - Token() (*Token, error) -} - -// Endpoint contains the OAuth 2.0 provider's authorization and token -// endpoint URLs. -type Endpoint struct { - AuthURL string - TokenURL string -} - -var ( - // AccessTypeOnline and AccessTypeOffline are options passed - // to the Options.AuthCodeURL method. They modify the - // "access_type" field that gets sent in the URL returned by - // AuthCodeURL. - // - // Online is the default if neither is specified. If your - // application needs to refresh access tokens when the user - // is not present at the browser, then use offline. This will - // result in your application obtaining a refresh token the - // first time your application exchanges an authorization - // code for a user. - AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") - AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") - - // ApprovalForce forces the users to view the consent dialog - // and confirm the permissions request at the URL returned - // from AuthCodeURL, even if they've already done so. - ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") -) - -// An AuthCodeOption is passed to Config.AuthCodeURL. -type AuthCodeOption interface { - setValue(url.Values) -} - -type setParam struct{ k, v string } - -func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } - -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters -// to a provider's authorization endpoint. -func SetAuthURLParam(key, value string) AuthCodeOption { - return setParam{key, value} -} - -// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page -// that asks for permissions for the required scopes explicitly. -// -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-zero string and validate that it matches the -// the state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. -// -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. -func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { - var buf bytes.Buffer - buf.WriteString(c.Endpoint.AuthURL) - v := url.Values{ - "response_type": {"code"}, - "client_id": {c.ClientID}, - "redirect_uri": internal.CondVal(c.RedirectURL), - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - "state": internal.CondVal(state), - } - for _, opt := range opts { - opt.setValue(v) - } - if strings.Contains(c.Endpoint.AuthURL, "?") { - buf.WriteByte('&') - } else { - buf.WriteByte('?') - } - buf.WriteString(v.Encode()) - return buf.String() -} - -// PasswordCredentialsToken converts a resource owner username and password -// pair into a token. -// -// Per the RFC, this grant type should only be used "when there is a high -// degree of trust between the resource owner and the client (e.g., the client -// is part of the device operating system or a highly privileged application), -// and when other authorization grant types are not available." -// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. -// -// The HTTP client to use is derived from the context. -// If nil, http.DefaultClient is used. -func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { - return retrieveToken(ctx, c, url.Values{ - "grant_type": {"password"}, - "username": {username}, - "password": {password}, - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - }) -} - -// Exchange converts an authorization code into a token. -// -// It is used after a resource provider redirects the user back -// to the Redirect URI (the URL obtained from AuthCodeURL). -// -// The HTTP client to use is derived from the context. -// If a client is not provided via the context, http.DefaultClient is used. -// -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). -func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { - return retrieveToken(ctx, c, url.Values{ - "grant_type": {"authorization_code"}, - "code": {code}, - "redirect_uri": internal.CondVal(c.RedirectURL), - }) -} - -// Client returns an HTTP client using the provided token. -// The token will auto-refresh as necessary. The underlying -// HTTP transport will be obtained using the provided context. -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context, t *Token) *http.Client { - return NewClient(ctx, c.TokenSource(ctx, t)) -} - -// TokenSource returns a TokenSource that returns t until t expires, -// automatically refreshing it as necessary using the provided context. -// -// Most users will use Config.Client instead. -func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { - tkr := &tokenRefresher{ - ctx: ctx, - conf: c, - } - if t != nil { - tkr.refreshToken = t.RefreshToken - } - return &reuseTokenSource{ - t: t, - new: tkr, - } -} - -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" -// HTTP requests to renew a token using a RefreshToken. -type tokenRefresher struct { - ctx context.Context // used to get HTTP requests - conf *Config - refreshToken string -} - -// WARNING: Token is not safe for concurrent access, as it -// updates the tokenRefresher's refreshToken field. -// Within this package, it is used by reuseTokenSource which -// synchronizes calls to this method with its own mutex. -func (tf *tokenRefresher) Token() (*Token, error) { - if tf.refreshToken == "" { - return nil, errors.New("oauth2: token expired and refresh token is not set") - } - - tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ - "grant_type": {"refresh_token"}, - "refresh_token": {tf.refreshToken}, - }) - - if err != nil { - return nil, err - } - if tf.refreshToken != tk.RefreshToken { - tf.refreshToken = tk.RefreshToken - } - return tk, err -} - -// reuseTokenSource is a TokenSource that holds a single token in memory -// and validates its expiry before each call to retrieve it with -// Token. If it's expired, it will be auto-refreshed using the -// new TokenSource. -type reuseTokenSource struct { - new TokenSource // called when t is expired. - - mu sync.Mutex // guards t - t *Token -} - -// Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. -func (s *reuseTokenSource) Token() (*Token, error) { - s.mu.Lock() - defer s.mu.Unlock() - if s.t.Valid() { - return s.t, nil - } - t, err := s.new.Token() - if err != nil { - return nil, err - } - s.t = t - return t, nil -} - -// StaticTokenSource returns a TokenSource that always returns the same token. -// Because the provided token t is never refreshed, StaticTokenSource is only -// useful for tokens that never expire. -func StaticTokenSource(t *Token) TokenSource { - return staticTokenSource{t} -} - -// staticTokenSource is a TokenSource that always returns the same Token. -type staticTokenSource struct { - t *Token -} - -func (s staticTokenSource) Token() (*Token, error) { - return s.t, nil -} - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient internal.ContextKey - -// NewClient creates an *http.Client from a Context and TokenSource. -// The returned client is not valid beyond the lifetime of the context. -// -// Note that if a custom *http.Client is provided via the Context it -// is used only for token acquisition and is not used to configure the -// *http.Client returned from NewClient. -// -// As a special case, if src is nil, a non-OAuth2 client is returned -// using the provided context. This exists to support related OAuth2 -// packages. -func NewClient(ctx context.Context, src TokenSource) *http.Client { - if src == nil { - c, err := internal.ContextClient(ctx) - if err != nil { - return &http.Client{Transport: internal.ErrorTransport{Err: err}} - } - return c - } - return &http.Client{ - Transport: &Transport{ - Base: internal.ContextTransport(ctx), - Source: ReuseTokenSource(nil, src), - }, - } -} - -// ReuseTokenSource returns a TokenSource which repeatedly returns the -// same token as long as it's valid, starting with t. -// When its cached token is invalid, a new token is obtained from src. -// -// ReuseTokenSource is typically used to reuse tokens from a cache -// (such as a file on disk) between runs of a program, rather than -// obtaining new tokens unnecessarily. -// -// The initial token t may be nil, in which case the TokenSource is -// wrapped in a caching version if it isn't one already. This also -// means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. -func ReuseTokenSource(t *Token, src TokenSource) TokenSource { - // Don't wrap a reuseTokenSource in itself. That would work, - // but cause an unnecessary number of mutex operations. - // Just build the equivalent one. - if rt, ok := src.(*reuseTokenSource); ok { - if t == nil { - // Just use it directly. - return rt - } - src = rt.new - } - return &reuseTokenSource{ - t: t, - new: src, - } -} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go deleted file mode 100644 index 7a3167f15b0..00000000000 --- a/vendor/golang.org/x/oauth2/token.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" -) - -// expiryDelta determines how earlier a token should be considered -// expired than its actual expiration time. It is used to avoid late -// expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second - -// Token represents the crendentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// Most users of this package should not access fields of Token -// directly. They're exported mostly for use by related packages -// implementing derivative OAuth2 flows. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string `json:"access_token"` - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string `json:"token_type,omitempty"` - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string `json:"refresh_token,omitempty"` - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time `json:"expiry,omitempty"` - - // raw optionally contains extra metadata from the server - // when updating a token. - raw interface{} -} - -// Type returns t.TokenType if non-empty, else "Bearer". -func (t *Token) Type() string { - if strings.EqualFold(t.TokenType, "bearer") { - return "Bearer" - } - if strings.EqualFold(t.TokenType, "mac") { - return "MAC" - } - if strings.EqualFold(t.TokenType, "basic") { - return "Basic" - } - if t.TokenType != "" { - return t.TokenType - } - return "Bearer" -} - -// SetAuthHeader sets the Authorization header to r using the access -// token in t. -// -// This method is unnecessary when using Transport or an HTTP Client -// returned by this package. -func (t *Token) SetAuthHeader(r *http.Request) { - r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) -} - -// WithExtra returns a new Token that's a clone of t, but using the -// provided raw extra map. This is only intended for use by packages -// implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { - t2 := new(Token) - *t2 = *t - t2.raw = extra - return t2 -} - -// Extra returns an extra field. -// Extra fields are key-value pairs returned by the server as a -// part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { - return raw[key] - } - - vals, ok := t.raw.(url.Values) - if !ok { - return nil - } - - v := vals.Get(key) - switch s := strings.TrimSpace(v); strings.Count(s, ".") { - case 0: // Contains no "."; try to parse as int - if i, err := strconv.ParseInt(s, 10, 64); err == nil { - return i - } - case 1: // Contains a single "."; try to parse as float - if f, err := strconv.ParseFloat(s, 64); err == nil { - return f - } - } - - return v -} - -// expired reports whether the token is expired. -// t must be non-nil. -func (t *Token) expired() bool { - if t.Expiry.IsZero() { - return false - } - return t.Expiry.Add(-expiryDelta).Before(time.Now()) -} - -// Valid reports whether t is non-nil, has an AccessToken, and is not expired. -func (t *Token) Valid() bool { - return t != nil && t.AccessToken != "" && !t.expired() -} - -// tokenFromInternal maps an *internal.Token struct into -// a *Token struct. -func tokenFromInternal(t *internal.Token) *Token { - if t == nil { - return nil - } - return &Token{ - AccessToken: t.AccessToken, - TokenType: t.TokenType, - RefreshToken: t.RefreshToken, - Expiry: t.Expiry, - raw: t.Raw, - } -} - -// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. -// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along -// with an error.. -func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) - if err != nil { - return nil, err - } - return tokenFromInternal(tk), nil -} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go deleted file mode 100644 index 92ac7e2531f..00000000000 --- a/vendor/golang.org/x/oauth2/transport.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "errors" - "io" - "net/http" - "sync" -) - -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. -// -// Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. -type Transport struct { - // Source supplies the token to add to outgoing requests' - // Authorization headers. - Source TokenSource - - // Base is the base RoundTripper used to make HTTP requests. - // If nil, http.DefaultTransport is used. - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - if t.Source == nil { - return nil, errors.New("oauth2: Transport's Source is nil") - } - token, err := t.Source.Token() - if err != nil { - return nil, err - } - - req2 := cloneRequest(req) // per RoundTripper contract - token.SetAuthHeader(req2) - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *Transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *Transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE deleted file mode 100644 index 263aa7a0c12..00000000000 --- a/vendor/google.golang.org/api/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/gensupport/backoff.go b/vendor/google.golang.org/api/gensupport/backoff.go deleted file mode 100644 index 1356140472a..00000000000 --- a/vendor/google.golang.org/api/gensupport/backoff.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "math/rand" - "time" -) - -type BackoffStrategy interface { - // Pause returns the duration of the next pause and true if the operation should be - // retried, or false if no further retries should be attempted. - Pause() (time.Duration, bool) - - // Reset restores the strategy to its initial state. - Reset() -} - -// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff. -// The initial pause time is given by Base. -// Once the total pause time exceeds Max, Pause will indicate no further retries. -type ExponentialBackoff struct { - Base time.Duration - Max time.Duration - total time.Duration - n uint -} - -func (eb *ExponentialBackoff) Pause() (time.Duration, bool) { - if eb.total > eb.Max { - return 0, false - } - - // The next pause is selected from randomly from [0, 2^n * Base). - d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base))) - eb.total += d - eb.n++ - return d, true -} - -func (eb *ExponentialBackoff) Reset() { - eb.n = 0 - eb.total = 0 -} diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/gensupport/buffer.go deleted file mode 100644 index 99210491153..00000000000 --- a/vendor/google.golang.org/api/gensupport/buffer.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "bytes" - "io" - - "google.golang.org/api/googleapi" -) - -// MediaBuffer buffers data from an io.Reader to support uploading media in retryable chunks. -type MediaBuffer struct { - media io.Reader - - chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. - err error // Any error generated when populating chunk by reading media. - - // The absolute position of chunk in the underlying media. - off int64 -} - -func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer { - return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)} -} - -// Chunk returns the current buffered chunk, the offset in the underlying media -// from which the chunk is drawn, and the size of the chunk. -// Successive calls to Chunk return the same chunk between calls to Next. -func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { - // There may already be data in chunk if Next has not been called since the previous call to Chunk. - if mb.err == nil && len(mb.chunk) == 0 { - mb.err = mb.loadChunk() - } - return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err -} - -// loadChunk will read from media into chunk, up to the capacity of chunk. -func (mb *MediaBuffer) loadChunk() error { - bufSize := cap(mb.chunk) - mb.chunk = mb.chunk[:bufSize] - - read := 0 - var err error - for err == nil && read < bufSize { - var n int - n, err = mb.media.Read(mb.chunk[read:]) - read += n - } - mb.chunk = mb.chunk[:read] - return err -} - -// Next advances to the next chunk, which will be returned by the next call to Chunk. -// Calls to Next without a corresponding prior call to Chunk will have no effect. -func (mb *MediaBuffer) Next() { - mb.off += int64(len(mb.chunk)) - mb.chunk = mb.chunk[0:0] -} - -type readerTyper struct { - io.Reader - googleapi.ContentTyper -} - -// ReaderAtToReader adapts a ReaderAt to be used as a Reader. -// If ra implements googleapi.ContentTyper, then the returned reader -// will also implement googleapi.ContentTyper, delegating to ra. -func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader { - r := io.NewSectionReader(ra, 0, size) - if typer, ok := ra.(googleapi.ContentTyper); ok { - return readerTyper{r, typer} - } - return r -} diff --git a/vendor/google.golang.org/api/gensupport/doc.go b/vendor/google.golang.org/api/gensupport/doc.go deleted file mode 100644 index 752c4b411b2..00000000000 --- a/vendor/google.golang.org/api/gensupport/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gensupport is an internal implementation detail used by code -// generated by the google-api-go-generator tool. -// -// This package may be modified at any time without regard for backwards -// compatibility. It should not be used directly by API users. -package gensupport diff --git a/vendor/google.golang.org/api/gensupport/header.go b/vendor/google.golang.org/api/gensupport/header.go deleted file mode 100644 index cb5e67c77a2..00000000000 --- a/vendor/google.golang.org/api/gensupport/header.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "fmt" - "runtime" - "strings" -) - -// GoogleClientHeader returns the value to use for the x-goog-api-client -// header, which is used internally by Google. -func GoogleClientHeader(generatorVersion, clientElement string) string { - elts := []string{"gl-go/" + strings.Replace(runtime.Version(), " ", "_", -1)} - if clientElement != "" { - elts = append(elts, clientElement) - } - elts = append(elts, fmt.Sprintf("gdcl/%s", generatorVersion)) - return strings.Join(elts, " ") -} diff --git a/vendor/google.golang.org/api/gensupport/json.go b/vendor/google.golang.org/api/gensupport/json.go deleted file mode 100644 index c01e32189f4..00000000000 --- a/vendor/google.golang.org/api/gensupport/json.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" -) - -// MarshalJSON returns a JSON encoding of schema containing only selected fields. -// A field is selected if any of the following is true: -// * it has a non-empty value -// * its field name is present in forceSendFields and it is not a nil pointer or nil interface -// * its field name is present in nullFields. -// The JSON key for each selected field is taken from the field's json: struct tag. -func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { - if len(forceSendFields) == 0 && len(nullFields) == 0 { - return json.Marshal(schema) - } - - mustInclude := make(map[string]bool) - for _, f := range forceSendFields { - mustInclude[f] = true - } - useNull := make(map[string]bool) - useNullMaps := make(map[string]map[string]bool) - for _, nf := range nullFields { - parts := strings.SplitN(nf, ".", 2) - field := parts[0] - if len(parts) == 1 { - useNull[field] = true - } else { - if useNullMaps[field] == nil { - useNullMaps[field] = map[string]bool{} - } - useNullMaps[field][parts[1]] = true - } - } - - dataMap, err := schemaToMap(schema, mustInclude, useNull, useNullMaps) - if err != nil { - return nil, err - } - return json.Marshal(dataMap) -} - -func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) { - m := make(map[string]interface{}) - s := reflect.ValueOf(schema) - st := s.Type() - - for i := 0; i < s.NumField(); i++ { - jsonTag := st.Field(i).Tag.Get("json") - if jsonTag == "" { - continue - } - tag, err := parseJSONTag(jsonTag) - if err != nil { - return nil, err - } - if tag.ignore { - continue - } - - v := s.Field(i) - f := st.Field(i) - - if useNull[f.Name] { - if !isEmptyValue(v) { - return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name) - } - m[tag.apiName] = nil - continue - } - - if !includeField(v, f, mustInclude) { - continue - } - - // If map fields are explicitly set to null, use a map[string]interface{}. - if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil { - ms, ok := v.Interface().(map[string]string) - if !ok { - return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name) - } - mi := map[string]interface{}{} - for k, v := range ms { - mi[k] = v - } - for k := range useNullMaps[f.Name] { - mi[k] = nil - } - m[tag.apiName] = mi - continue - } - - // nil maps are treated as empty maps. - if f.Type.Kind() == reflect.Map && v.IsNil() { - m[tag.apiName] = map[string]string{} - continue - } - - // nil slices are treated as empty slices. - if f.Type.Kind() == reflect.Slice && v.IsNil() { - m[tag.apiName] = []bool{} - continue - } - - if tag.stringFormat { - m[tag.apiName] = formatAsString(v, f.Type.Kind()) - } else { - m[tag.apiName] = v.Interface() - } - } - return m, nil -} - -// formatAsString returns a string representation of v, dereferencing it first if possible. -func formatAsString(v reflect.Value, kind reflect.Kind) string { - if kind == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - - return fmt.Sprintf("%v", v.Interface()) -} - -// jsonTag represents a restricted version of the struct tag format used by encoding/json. -// It is used to describe the JSON encoding of fields in a Schema struct. -type jsonTag struct { - apiName string - stringFormat bool - ignore bool -} - -// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. -// The format of the tag must match that generated by the Schema.writeSchemaStruct method -// in the api generator. -func parseJSONTag(val string) (jsonTag, error) { - if val == "-" { - return jsonTag{ignore: true}, nil - } - - var tag jsonTag - - i := strings.Index(val, ",") - if i == -1 || val[:i] == "" { - return tag, fmt.Errorf("malformed json tag: %s", val) - } - - tag = jsonTag{ - apiName: val[:i], - } - - switch val[i+1:] { - case "omitempty": - case "omitempty,string": - tag.stringFormat = true - default: - return tag, fmt.Errorf("malformed json tag: %s", val) - } - - return tag, nil -} - -// Reports whether the struct field "f" with value "v" should be included in JSON output. -func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]bool) bool { - // The regular JSON encoding of a nil pointer is "null", which means "delete this field". - // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. - // However, many fields are not pointers, so there would be no way to delete these fields. - // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. - // Deletion will be handled by a separate mechanism. - if f.Type.Kind() == reflect.Ptr && v.IsNil() { - return false - } - - // The "any" type is represented as an interface{}. If this interface - // is nil, there is no reasonable representation to send. We ignore - // these fields, for the same reasons as given above for pointers. - if f.Type.Kind() == reflect.Interface && v.IsNil() { - return false - } - - return mustInclude[f.Name] || !isEmptyValue(v) -} - -// isEmptyValue reports whether v is the empty value for its type. This -// implementation is based on that of the encoding/json package, but its -// correctness does not depend on it being identical. What's important is that -// this function return false in situations where v should not be sent as part -// of a PATCH operation. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} diff --git a/vendor/google.golang.org/api/gensupport/jsonfloat.go b/vendor/google.golang.org/api/gensupport/jsonfloat.go deleted file mode 100644 index cb02335d22d..00000000000 --- a/vendor/google.golang.org/api/gensupport/jsonfloat.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gensupport - -import ( - "encoding/json" - "errors" - "fmt" - "math" -) - -// JSONFloat64 is a float64 that supports proper unmarshaling of special float -// values in JSON, according to -// https://developers.google.com/protocol-buffers/docs/proto3#json. Although -// that is a proto-to-JSON spec, it applies to all Google APIs. -// -// The jsonpb package -// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has -// similar functionality, but only for direct translation from proto messages -// to JSON. -type JSONFloat64 float64 - -func (f *JSONFloat64) UnmarshalJSON(data []byte) error { - var ff float64 - if err := json.Unmarshal(data, &ff); err == nil { - *f = JSONFloat64(ff) - return nil - } - var s string - if err := json.Unmarshal(data, &s); err == nil { - switch s { - case "NaN": - ff = math.NaN() - case "Infinity": - ff = math.Inf(1) - case "-Infinity": - ff = math.Inf(-1) - default: - return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s) - } - *f = JSONFloat64(ff) - return nil - } - return errors.New("google.golang.org/api/internal: data not float or string") -} diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go deleted file mode 100644 index f3e77fc5297..00000000000 --- a/vendor/google.golang.org/api/gensupport/media.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/textproto" - - "google.golang.org/api/googleapi" -) - -const sniffBuffSize = 512 - -func newContentSniffer(r io.Reader) *contentSniffer { - return &contentSniffer{r: r} -} - -// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. -type contentSniffer struct { - r io.Reader - start []byte // buffer for the sniffed bytes. - err error // set to any error encountered while reading bytes to be sniffed. - - ctype string // set on first sniff. - sniffed bool // set to true on first sniff. -} - -func (cs *contentSniffer) Read(p []byte) (n int, err error) { - // Ensure that the content type is sniffed before any data is consumed from Reader. - _, _ = cs.ContentType() - - if len(cs.start) > 0 { - n := copy(p, cs.start) - cs.start = cs.start[n:] - return n, nil - } - - // We may have read some bytes into start while sniffing, even if the read ended in an error. - // We should first return those bytes, then the error. - if cs.err != nil { - return 0, cs.err - } - - // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. - return cs.r.Read(p) -} - -// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed. -func (cs *contentSniffer) ContentType() (string, bool) { - if cs.sniffed { - return cs.ctype, cs.ctype != "" - } - cs.sniffed = true - // If ReadAll hits EOF, it returns err==nil. - cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) - - // Don't try to detect the content type based on possibly incomplete data. - if cs.err != nil { - return "", false - } - - cs.ctype = http.DetectContentType(cs.start) - return cs.ctype, true -} - -// DetermineContentType determines the content type of the supplied reader. -// If the content type is already known, it can be specified via ctype. -// Otherwise, the content of media will be sniffed to determine the content type. -// If media implements googleapi.ContentTyper (deprecated), this will be used -// instead of sniffing the content. -// After calling DetectContentType the caller must not perform further reads on -// media, but rather read from the Reader that is returned. -func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { - // Note: callers could avoid calling DetectContentType if ctype != "", - // but doing the check inside this function reduces the amount of - // generated code. - if ctype != "" { - return media, ctype - } - - // For backwards compatability, allow clients to set content - // type by providing a ContentTyper for media. - if typer, ok := media.(googleapi.ContentTyper); ok { - return media, typer.ContentType() - } - - sniffer := newContentSniffer(media) - if ctype, ok := sniffer.ContentType(); ok { - return sniffer, ctype - } - // If content type could not be sniffed, reads from sniffer will eventually fail with an error. - return sniffer, "" -} - -type typeReader struct { - io.Reader - typ string -} - -// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body. -// Close must be called if reads from the multipartReader are abandoned before reaching EOF. -type multipartReader struct { - pr *io.PipeReader - pipeOpen bool - ctype string -} - -func newMultipartReader(parts []typeReader) *multipartReader { - mp := &multipartReader{pipeOpen: true} - var pw *io.PipeWriter - mp.pr, pw = io.Pipe() - mpw := multipart.NewWriter(pw) - mp.ctype = "multipart/related; boundary=" + mpw.Boundary() - go func() { - for _, part := range parts { - w, err := mpw.CreatePart(typeHeader(part.typ)) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err)) - return - } - _, err = io.Copy(w, part.Reader) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err)) - return - } - } - - mpw.Close() - pw.Close() - }() - return mp -} - -func (mp *multipartReader) Read(data []byte) (n int, err error) { - return mp.pr.Read(data) -} - -func (mp *multipartReader) Close() error { - if !mp.pipeOpen { - return nil - } - mp.pipeOpen = false - return mp.pr.Close() -} - -// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body. -// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary. -// -// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF. -func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) { - mp := newMultipartReader([]typeReader{ - {body, bodyContentType}, - {media, mediaContentType}, - }) - return mp, mp.ctype -} - -func typeHeader(contentType string) textproto.MIMEHeader { - h := make(textproto.MIMEHeader) - if contentType != "" { - h.Set("Content-Type", contentType) - } - return h -} - -// PrepareUpload determines whether the data in the supplied reader should be -// uploaded in a single request, or in sequential chunks. -// chunkSize is the size of the chunk that media should be split into. -// -// If chunkSize is zero, media is returned as the first value, and the other -// two return values are nil, true. -// -// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the -// contents of media fit in a single chunk. -// -// After PrepareUpload has been called, media should no longer be used: the -// media content should be accessed via one of the return values. -func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer, singleChunk bool) { - if chunkSize == 0 { // do not chunk - return media, nil, true - } - mb = NewMediaBuffer(media, chunkSize) - _, _, _, err := mb.Chunk() - // If err is io.EOF, we can upload this in a single request. Otherwise, err is - // either nil or a non-EOF error. If it is the latter, then the next call to - // mb.Chunk will return the same error. Returning a MediaBuffer ensures that this - // error will be handled at some point. - return nil, mb, err == io.EOF -} - -// MediaInfo holds information for media uploads. It is intended for use by generated -// code only. -type MediaInfo struct { - // At most one of Media and MediaBuffer will be set. - media io.Reader - buffer *MediaBuffer - singleChunk bool - mType string - size int64 // mediaSize, if known. Used only for calls to progressUpdater_. - progressUpdater googleapi.ProgressUpdater -} - -// NewInfoFromMedia should be invoked from the Media method of a call. It returns a -// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer -// if needed. -func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { - mi := &MediaInfo{} - opts := googleapi.ProcessMediaOptions(options) - if !opts.ForceEmptyContentType { - r, mi.mType = DetermineContentType(r, opts.ContentType) - } - mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) - return mi -} - -// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a -// call. It returns a MediaInfo using the given reader, size and media type. -func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { - rdr := ReaderAtToReader(r, size) - rdr, mType := DetermineContentType(rdr, mediaType) - return &MediaInfo{ - size: size, - mType: mType, - buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize), - media: nil, - singleChunk: false, - } -} - -func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) { - if mi != nil { - mi.progressUpdater = pu - } -} - -// UploadType determines the type of upload: a single request, or a resumable -// series of requests. -func (mi *MediaInfo) UploadType() string { - if mi.singleChunk { - return "multipart" - } - return "resumable" -} - -// UploadRequest sets up an HTTP request for media upload. It adds headers -// as necessary, and returns a replacement for the body. -func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, cleanup func()) { - cleanup = func() {} - if mi == nil { - return body, cleanup - } - var media io.Reader - if mi.media != nil { - // This only happens when the caller has turned off chunking. In that - // case, we write all of media in a single non-retryable request. - media = mi.media - } else if mi.singleChunk { - // The data fits in a single chunk, which has now been read into the MediaBuffer. - // We obtain that chunk so we can write it in a single request. The request can - // be retried because the data is stored in the MediaBuffer. - media, _, _, _ = mi.buffer.Chunk() - } - if media != nil { - combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) - cleanup = func() { combined.Close() } - reqHeaders.Set("Content-Type", ctype) - body = combined - } - if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { - reqHeaders.Set("X-Upload-Content-Type", mi.mType) - } - return body, cleanup -} - -// ResumableUpload returns an appropriately configured ResumableUpload value if the -// upload is resumable, or nil otherwise. -func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload { - if mi == nil || mi.singleChunk { - return nil - } - return &ResumableUpload{ - URI: locURI, - Media: mi.buffer, - MediaType: mi.mType, - Callback: func(curr int64) { - if mi.progressUpdater != nil { - mi.progressUpdater(curr, mi.size) - } - }, - } -} diff --git a/vendor/google.golang.org/api/gensupport/params.go b/vendor/google.golang.org/api/gensupport/params.go deleted file mode 100644 index 3b3c743967e..00000000000 --- a/vendor/google.golang.org/api/gensupport/params.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "net/url" - - "google.golang.org/api/googleapi" -) - -// URLParams is a simplified replacement for url.Values -// that safely builds up URL parameters for encoding. -type URLParams map[string][]string - -// Get returns the first value for the given key, or "". -func (u URLParams) Get(key string) string { - vs := u[key] - if len(vs) == 0 { - return "" - } - return vs[0] -} - -// Set sets the key to value. -// It replaces any existing values. -func (u URLParams) Set(key, value string) { - u[key] = []string{value} -} - -// SetMulti sets the key to an array of values. -// It replaces any existing values. -// Note that values must not be modified after calling SetMulti -// so the caller is responsible for making a copy if necessary. -func (u URLParams) SetMulti(key string, values []string) { - u[key] = values -} - -// Encode encodes the values into ``URL encoded'' form -// ("bar=baz&foo=quux") sorted by key. -func (u URLParams) Encode() string { - return url.Values(u).Encode() -} - -func SetOptions(u URLParams, opts ...googleapi.CallOption) { - for _, o := range opts { - u.Set(o.Get()) - } -} diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/gensupport/resumable.go deleted file mode 100644 index dcd591f7ff6..00000000000 --- a/vendor/google.golang.org/api/gensupport/resumable.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "errors" - "fmt" - "io" - "net/http" - "sync" - "time" - - "golang.org/x/net/context" -) - -const ( - // statusTooManyRequests is returned by the storage API if the - // per-project limits have been temporarily exceeded. The request - // should be retried. - // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes - statusTooManyRequests = 429 -) - -// ResumableUpload is used by the generated APIs to provide resumable uploads. -// It is not used by developers directly. -type ResumableUpload struct { - Client *http.Client - // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". - URI string - UserAgent string // User-Agent for header of the request - // Media is the object being uploaded. - Media *MediaBuffer - // MediaType defines the media type, e.g. "image/jpeg". - MediaType string - - mu sync.Mutex // guards progress - progress int64 // number of bytes uploaded so far - - // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. - Callback func(int64) - - // If not specified, a default exponential backoff strategy will be used. - Backoff BackoffStrategy -} - -// Progress returns the number of bytes uploaded at this point. -func (rx *ResumableUpload) Progress() int64 { - rx.mu.Lock() - defer rx.mu.Unlock() - return rx.progress -} - -// doUploadRequest performs a single HTTP request to upload data. -// off specifies the offset in rx.Media from which data is drawn. -// size is the number of bytes in data. -// final specifies whether data is the final chunk to be uploaded. -func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) { - req, err := http.NewRequest("POST", rx.URI, data) - if err != nil { - return nil, err - } - - req.ContentLength = size - var contentRange string - if final { - if size == 0 { - contentRange = fmt.Sprintf("bytes */%v", off) - } else { - contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size) - } - } else { - contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1) - } - req.Header.Set("Content-Range", contentRange) - req.Header.Set("Content-Type", rx.MediaType) - req.Header.Set("User-Agent", rx.UserAgent) - - // Google's upload endpoint uses status code 308 for a - // different purpose than the "308 Permanent Redirect" - // since-standardized in RFC 7238. Because of the conflict in - // semantics, Google added this new request header which - // causes it to not use "308" and instead reply with 200 OK - // and sets the upload-specific "X-HTTP-Status-Code-Override: - // 308" response header. - req.Header.Set("X-GUploader-No-308", "yes") - - return SendRequest(ctx, rx.Client, req) -} - -func statusResumeIncomplete(resp *http.Response) bool { - // This is how the server signals "status resume incomplete" - // when X-GUploader-No-308 is set to "yes": - return resp != nil && resp.Header.Get("X-Http-Status-Code-Override") == "308" -} - -// reportProgress calls a user-supplied callback to report upload progress. -// If old==updated, the callback is not called. -func (rx *ResumableUpload) reportProgress(old, updated int64) { - if updated-old == 0 { - return - } - rx.mu.Lock() - rx.progress = updated - rx.mu.Unlock() - if rx.Callback != nil { - rx.Callback(updated) - } -} - -// transferChunk performs a single HTTP request to upload a single chunk from rx.Media. -func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) { - chunk, off, size, err := rx.Media.Chunk() - - done := err == io.EOF - if !done && err != nil { - return nil, err - } - - res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done) - if err != nil { - return res, err - } - - // We sent "X-GUploader-No-308: yes" (see comment elsewhere in - // this file), so we don't expect to get a 308. - if res.StatusCode == 308 { - return nil, errors.New("unexpected 308 response status code") - } - - if res.StatusCode == http.StatusOK { - rx.reportProgress(off, off+int64(size)) - } - - if statusResumeIncomplete(res) { - rx.Media.Next() - } - return res, nil -} - -func contextDone(ctx context.Context) bool { - select { - case <-ctx.Done(): - return true - default: - return false - } -} - -// Upload starts the process of a resumable upload with a cancellable context. -// It retries using the provided back off strategy until cancelled or the -// strategy indicates to stop retrying. -// It is called from the auto-generated API code and is not visible to the user. -// Before sending an HTTP request, Upload calls any registered hook functions, -// and calls the returned functions after the request returns (see send.go). -// rx is private to the auto-generated API code. -// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. -func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { - var pause time.Duration - backoff := rx.Backoff - if backoff == nil { - backoff = DefaultBackoffStrategy() - } - - for { - // Ensure that we return in the case of cancelled context, even if pause is 0. - if contextDone(ctx) { - return nil, ctx.Err() - } - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-time.After(pause): - } - - resp, err = rx.transferChunk(ctx) - - var status int - if resp != nil { - status = resp.StatusCode - } - - // Check if we should retry the request. - if shouldRetry(status, err) { - var retry bool - pause, retry = backoff.Pause() - if retry { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - continue - } - } - - // If the chunk was uploaded successfully, but there's still - // more to go, upload the next chunk without any delay. - if statusResumeIncomplete(resp) { - pause = 0 - backoff.Reset() - resp.Body.Close() - continue - } - - // It's possible for err and resp to both be non-nil here, but we expose a simpler - // contract to our callers: exactly one of resp and err will be non-nil. This means - // that any response body must be closed here before returning a non-nil error. - if err != nil { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - return nil, err - } - - return resp, nil - } -} diff --git a/vendor/google.golang.org/api/gensupport/retry.go b/vendor/google.golang.org/api/gensupport/retry.go deleted file mode 100644 index c60b3c394b3..00000000000 --- a/vendor/google.golang.org/api/gensupport/retry.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gensupport - -import ( - "io" - "net" - "net/http" - "time" - - "golang.org/x/net/context" -) - -// Retry invokes the given function, retrying it multiple times if the connection failed or -// the HTTP status response indicates the request should be attempted again. ctx may be nil. -func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) { - for { - resp, err := f() - - var status int - if resp != nil { - status = resp.StatusCode - } - - // Return if we shouldn't retry. - pause, retry := backoff.Pause() - if !shouldRetry(status, err) || !retry { - return resp, err - } - - // Ensure the response body is closed, if any. - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - - // Pause, but still listen to ctx.Done if context is not nil. - var done <-chan struct{} - if ctx != nil { - done = ctx.Done() - } - select { - case <-done: - return nil, ctx.Err() - case <-time.After(pause): - } - } -} - -// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests. -func DefaultBackoffStrategy() BackoffStrategy { - return &ExponentialBackoff{ - Base: 250 * time.Millisecond, - Max: 16 * time.Second, - } -} - -// shouldRetry returns true if the HTTP response / error indicates that the -// request should be attempted again. -func shouldRetry(status int, err error) bool { - if 500 <= status && status <= 599 { - return true - } - if status == statusTooManyRequests { - return true - } - if err == io.ErrUnexpectedEOF { - return true - } - if err, ok := err.(net.Error); ok { - return err.Temporary() - } - return false -} diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/gensupport/send.go deleted file mode 100644 index 092044f448c..00000000000 --- a/vendor/google.golang.org/api/gensupport/send.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "errors" - "net/http" - - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" -) - -// Hook is the type of a function that is called once before each HTTP request -// that is sent by a generated API. It returns a function that is called after -// the request returns. -// Hooks are not called if the context is nil. -type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response) - -var hooks []Hook - -// RegisterHook registers a Hook to be called before each HTTP request by a -// generated API. Hooks are called in the order they are registered. Each -// hook can return a function; if it is non-nil, it is called after the HTTP -// request returns. These functions are called in the reverse order. -// RegisterHook should not be called concurrently with itself or SendRequest. -func RegisterHook(h Hook) { - hooks = append(hooks, h) -} - -// SendRequest sends a single HTTP request using the given client. -// If ctx is non-nil, it calls all hooks, then sends the request with -// ctxhttp.Do, then calls any functions returned by the hooks in reverse order. -func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - // Disallow Accept-Encoding because it interferes with the automatic gzip handling - // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. - if _, ok := req.Header["Accept-Encoding"]; ok { - return nil, errors.New("google api: custom Accept-Encoding headers not allowed") - } - if ctx == nil { - return client.Do(req) - } - // Call hooks in order of registration, store returned funcs. - post := make([]func(resp *http.Response), len(hooks)) - for i, h := range hooks { - fn := h(ctx, req) - post[i] = fn - } - - // Send request. - resp, err := ctxhttp.Do(ctx, client, req) - - // Call returned funcs in reverse order. - for i := len(post) - 1; i >= 0; i-- { - if fn := post[i]; fn != nil { - fn(resp) - } - } - return resp, err -} diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go deleted file mode 100644 index f6e15be35da..00000000000 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ /dev/null @@ -1,406 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package googleapi contains the common code shared by all Google API -// libraries. -package googleapi // import "google.golang.org/api/googleapi" - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "google.golang.org/api/googleapi/internal/uritemplates" -) - -// ContentTyper is an interface for Readers which know (or would like -// to override) their Content-Type. If a media body doesn't implement -// ContentTyper, the type is sniffed from the content using -// http.DetectContentType. -type ContentTyper interface { - ContentType() string -} - -// A SizeReaderAt is a ReaderAt with a Size method. -// An io.SectionReader implements SizeReaderAt. -type SizeReaderAt interface { - io.ReaderAt - Size() int64 -} - -// ServerResponse is embedded in each Do response and -// provides the HTTP status code and header sent by the server. -type ServerResponse struct { - // HTTPStatusCode is the server's response status code. - // When using a resource method's Do call, this will always be in the 2xx range. - HTTPStatusCode int - // Header contains the response header fields from the server. - Header http.Header -} - -const ( - Version = "0.5" - - // UserAgent is the header string used to identify this package. - UserAgent = "google-api-go-client/" + Version - - // The default chunk size to use for resumable uploads if not specified by the user. - DefaultUploadChunkSize = 8 * 1024 * 1024 - - // The minimum chunk size that can be used for resumable uploads. All - // user-specified chunk sizes must be multiple of this value. - MinUploadChunkSize = 256 * 1024 -) - -// Error contains an error response from the server. -type Error struct { - // Code is the HTTP response status code and will always be populated. - Code int `json:"code"` - // Message is the server response message and is only populated when - // explicitly referenced by the JSON server response. - Message string `json:"message"` - // Body is the raw response returned by the server. - // It is often but not always JSON, depending on how the request fails. - Body string - // Header contains the response header fields from the server. - Header http.Header - - Errors []ErrorItem -} - -// ErrorItem is a detailed error code & message from the Google API frontend. -type ErrorItem struct { - // Reason is the typed error code. For example: "some_example". - Reason string `json:"reason"` - // Message is the human-readable description of the error. - Message string `json:"message"` -} - -func (e *Error) Error() string { - if len(e.Errors) == 0 && e.Message == "" { - return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) - } - var buf bytes.Buffer - fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) - if e.Message != "" { - fmt.Fprintf(&buf, "%s", e.Message) - } - if len(e.Errors) == 0 { - return strings.TrimSpace(buf.String()) - } - if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { - fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) - return buf.String() - } - fmt.Fprintln(&buf, "\nMore details:") - for _, v := range e.Errors { - fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) - } - return buf.String() -} - -type errorReply struct { - Error *Error `json:"error"` -} - -// CheckResponse returns an error (of type *Error) if the response -// status code is not 2xx. -func CheckResponse(res *http.Response) error { - if res.StatusCode >= 200 && res.StatusCode <= 299 { - return nil - } - slurp, err := ioutil.ReadAll(res.Body) - if err == nil { - jerr := new(errorReply) - err = json.Unmarshal(slurp, jerr) - if err == nil && jerr.Error != nil { - if jerr.Error.Code == 0 { - jerr.Error.Code = res.StatusCode - } - jerr.Error.Body = string(slurp) - return jerr.Error - } - } - return &Error{ - Code: res.StatusCode, - Body: string(slurp), - Header: res.Header, - } -} - -// IsNotModified reports whether err is the result of the -// server replying with http.StatusNotModified. -// Such error values are sometimes returned by "Do" methods -// on calls when If-None-Match is used. -func IsNotModified(err error) bool { - if err == nil { - return false - } - ae, ok := err.(*Error) - return ok && ae.Code == http.StatusNotModified -} - -// CheckMediaResponse returns an error (of type *Error) if the response -// status code is not 2xx. Unlike CheckResponse it does not assume the -// body is a JSON error document. -// It is the caller's responsibility to close res.Body. -func CheckMediaResponse(res *http.Response) error { - if res.StatusCode >= 200 && res.StatusCode <= 299 { - return nil - } - slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) - return &Error{ - Code: res.StatusCode, - Body: string(slurp), - } -} - -type MarshalStyle bool - -var WithDataWrapper = MarshalStyle(true) -var WithoutDataWrapper = MarshalStyle(false) - -func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { - buf := new(bytes.Buffer) - if wrap { - buf.Write([]byte(`{"data": `)) - } - err := json.NewEncoder(buf).Encode(v) - if err != nil { - return nil, err - } - if wrap { - buf.Write([]byte(`}`)) - } - return buf, nil -} - -// endingWithErrorReader from r until it returns an error. If the -// final error from r is io.EOF and e is non-nil, e is used instead. -type endingWithErrorReader struct { - r io.Reader - e error -} - -func (er endingWithErrorReader) Read(p []byte) (n int, err error) { - n, err = er.r.Read(p) - if err == io.EOF && er.e != nil { - err = er.e - } - return -} - -// countingWriter counts the number of bytes it receives to write, but -// discards them. -type countingWriter struct { - n *int64 -} - -func (w countingWriter) Write(p []byte) (int, error) { - *w.n += int64(len(p)) - return len(p), nil -} - -// ProgressUpdater is a function that is called upon every progress update of a resumable upload. -// This is the only part of a resumable upload (from googleapi) that is usable by the developer. -// The remaining usable pieces of resumable uploads is exposed in each auto-generated API. -type ProgressUpdater func(current, total int64) - -type MediaOption interface { - setOptions(o *MediaOptions) -} - -type contentTypeOption string - -func (ct contentTypeOption) setOptions(o *MediaOptions) { - o.ContentType = string(ct) - if o.ContentType == "" { - o.ForceEmptyContentType = true - } -} - -// ContentType returns a MediaOption which sets the Content-Type header for media uploads. -// If ctype is empty, the Content-Type header will be omitted. -func ContentType(ctype string) MediaOption { - return contentTypeOption(ctype) -} - -type chunkSizeOption int - -func (cs chunkSizeOption) setOptions(o *MediaOptions) { - size := int(cs) - if size%MinUploadChunkSize != 0 { - size += MinUploadChunkSize - (size % MinUploadChunkSize) - } - o.ChunkSize = size -} - -// ChunkSize returns a MediaOption which sets the chunk size for media uploads. -// size will be rounded up to the nearest multiple of 256K. -// Media which contains fewer than size bytes will be uploaded in a single request. -// Media which contains size bytes or more will be uploaded in separate chunks. -// If size is zero, media will be uploaded in a single request. -func ChunkSize(size int) MediaOption { - return chunkSizeOption(size) -} - -// MediaOptions stores options for customizing media upload. It is not used by developers directly. -type MediaOptions struct { - ContentType string - ForceEmptyContentType bool - - ChunkSize int -} - -// ProcessMediaOptions stores options from opts in a MediaOptions. -// It is not used by developers directly. -func ProcessMediaOptions(opts []MediaOption) *MediaOptions { - mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize} - for _, o := range opts { - o.setOptions(mo) - } - return mo -} - -func ResolveRelative(basestr, relstr string) string { - u, _ := url.Parse(basestr) - rel, _ := url.Parse(relstr) - u = u.ResolveReference(rel) - us := u.String() - us = strings.Replace(us, "%7B", "{", -1) - us = strings.Replace(us, "%7D", "}", -1) - return us -} - -// Expand subsitutes any {encoded} strings in the URL passed in using -// the map supplied. -// -// This calls SetOpaque to avoid encoding of the parameters in the URL path. -func Expand(u *url.URL, expansions map[string]string) { - escaped, unescaped, err := uritemplates.Expand(u.Path, expansions) - if err == nil { - u.Path = unescaped - u.RawPath = escaped - } -} - -// CloseBody is used to close res.Body. -// Prior to calling Close, it also tries to Read a small amount to see an EOF. -// Not seeing an EOF can prevent HTTP Transports from reusing connections. -func CloseBody(res *http.Response) { - if res == nil || res.Body == nil { - return - } - // Justification for 3 byte reads: two for up to "\r\n" after - // a JSON/XML document, and then 1 to see EOF if we haven't yet. - // TODO(bradfitz): detect Go 1.3+ and skip these reads. - // See https://codereview.appspot.com/58240043 - // and https://codereview.appspot.com/49570044 - buf := make([]byte, 1) - for i := 0; i < 3; i++ { - _, err := res.Body.Read(buf) - if err != nil { - break - } - } - res.Body.Close() - -} - -// VariantType returns the type name of the given variant. -// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. -// This is used to support "variant" APIs that can return one of a number of different types. -func VariantType(t map[string]interface{}) string { - s, _ := t["type"].(string) - return s -} - -// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. -// This is used to support "variant" APIs that can return one of a number of different types. -// It reports whether the conversion was successful. -func ConvertVariant(v map[string]interface{}, dst interface{}) bool { - var buf bytes.Buffer - err := json.NewEncoder(&buf).Encode(v) - if err != nil { - return false - } - return json.Unmarshal(buf.Bytes(), dst) == nil -} - -// A Field names a field to be retrieved with a partial response. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// -// Partial responses can dramatically reduce the amount of data that must be sent to your application. -// In order to request partial responses, you can specify the full list of fields -// that your application needs by adding the Fields option to your request. -// -// Field strings use camelCase with leading lower-case characters to identify fields within the response. -// -// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, -// you could request just those fields like this: -// -// svc.Events.List().Fields("nextPageToken", "items/id").Do() -// -// or if you were also interested in each Item's "Updated" field, you can combine them like this: -// -// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() -// -// More information about field formatting can be found here: -// https://developers.google.com/+/api/#fields-syntax -// -// Another way to find field names is through the Google API explorer: -// https://developers.google.com/apis-explorer/#p/ -type Field string - -// CombineFields combines fields into a single string. -func CombineFields(s []Field) string { - r := make([]string, len(s)) - for i, v := range s { - r[i] = string(v) - } - return strings.Join(r, ",") -} - -// A CallOption is an optional argument to an API call. -// It should be treated as an opaque value by users of Google APIs. -// -// A CallOption is something that configures an API call in a way that is -// not specific to that API; for instance, controlling the quota user for -// an API call is common across many APIs, and is thus a CallOption. -type CallOption interface { - Get() (key, value string) -} - -// QuotaUser returns a CallOption that will set the quota user for a call. -// The quota user can be used by server-side applications to control accounting. -// It can be an arbitrary string up to 40 characters, and will override UserIP -// if both are provided. -func QuotaUser(u string) CallOption { return quotaUser(u) } - -type quotaUser string - -func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) } - -// UserIP returns a CallOption that will set the "userIp" parameter of a call. -// This should be the IP address of the originating request. -func UserIP(ip string) CallOption { return userIP(ip) } - -type userIP string - -func (i userIP) Get() (string, string) { return "userIp", string(i) } - -// Trace returns a CallOption that enables diagnostic tracing for a call. -// traceToken is an ID supplied by Google support. -func Trace(traceToken string) CallOption { return traceTok(traceToken) } - -type traceTok string - -func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) } - -// TODO: Fields too diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE deleted file mode 100644 index de9c88cb65c..00000000000 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE +++ /dev/null @@ -1,18 +0,0 @@ -Copyright (c) 2013 Joshua Tacoma - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go deleted file mode 100644 index 63bf0538301..00000000000 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2013 Joshua Tacoma. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uritemplates is a level 3 implementation of RFC 6570 (URI -// Template, http://tools.ietf.org/html/rfc6570). -// uritemplates does not support composite values (in Go: slices or maps) -// and so does not qualify as a level 4 implementation. -package uritemplates - -import ( - "bytes" - "errors" - "regexp" - "strconv" - "strings" -) - -var ( - unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") - reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") - validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") - hex = []byte("0123456789ABCDEF") -) - -func pctEncode(src []byte) []byte { - dst := make([]byte, len(src)*3) - for i, b := range src { - buf := dst[i*3 : i*3+3] - buf[0] = 0x25 - buf[1] = hex[b/16] - buf[2] = hex[b%16] - } - return dst -} - -// pairWriter is a convenience struct which allows escaped and unescaped -// versions of the template to be written in parallel. -type pairWriter struct { - escaped, unescaped bytes.Buffer -} - -// Write writes the provided string directly without any escaping. -func (w *pairWriter) Write(s string) { - w.escaped.WriteString(s) - w.unescaped.WriteString(s) -} - -// Escape writes the provided string, escaping the string for the -// escaped output. -func (w *pairWriter) Escape(s string, allowReserved bool) { - w.unescaped.WriteString(s) - if allowReserved { - w.escaped.Write(reserved.ReplaceAllFunc([]byte(s), pctEncode)) - } else { - w.escaped.Write(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) - } -} - -// Escaped returns the escaped string. -func (w *pairWriter) Escaped() string { - return w.escaped.String() -} - -// Unescaped returns the unescaped string. -func (w *pairWriter) Unescaped() string { - return w.unescaped.String() -} - -// A uriTemplate is a parsed representation of a URI template. -type uriTemplate struct { - raw string - parts []templatePart -} - -// parse parses a URI template string into a uriTemplate object. -func parse(rawTemplate string) (*uriTemplate, error) { - split := strings.Split(rawTemplate, "{") - parts := make([]templatePart, len(split)*2-1) - for i, s := range split { - if i == 0 { - if strings.Contains(s, "}") { - return nil, errors.New("unexpected }") - } - parts[i].raw = s - continue - } - subsplit := strings.Split(s, "}") - if len(subsplit) != 2 { - return nil, errors.New("malformed template") - } - expression := subsplit[0] - var err error - parts[i*2-1], err = parseExpression(expression) - if err != nil { - return nil, err - } - parts[i*2].raw = subsplit[1] - } - return &uriTemplate{ - raw: rawTemplate, - parts: parts, - }, nil -} - -type templatePart struct { - raw string - terms []templateTerm - first string - sep string - named bool - ifemp string - allowReserved bool -} - -type templateTerm struct { - name string - explode bool - truncate int -} - -func parseExpression(expression string) (result templatePart, err error) { - switch expression[0] { - case '+': - result.sep = "," - result.allowReserved = true - expression = expression[1:] - case '.': - result.first = "." - result.sep = "." - expression = expression[1:] - case '/': - result.first = "/" - result.sep = "/" - expression = expression[1:] - case ';': - result.first = ";" - result.sep = ";" - result.named = true - expression = expression[1:] - case '?': - result.first = "?" - result.sep = "&" - result.named = true - result.ifemp = "=" - expression = expression[1:] - case '&': - result.first = "&" - result.sep = "&" - result.named = true - result.ifemp = "=" - expression = expression[1:] - case '#': - result.first = "#" - result.sep = "," - result.allowReserved = true - expression = expression[1:] - default: - result.sep = "," - } - rawterms := strings.Split(expression, ",") - result.terms = make([]templateTerm, len(rawterms)) - for i, raw := range rawterms { - result.terms[i], err = parseTerm(raw) - if err != nil { - break - } - } - return result, err -} - -func parseTerm(term string) (result templateTerm, err error) { - // TODO(djd): Remove "*" suffix parsing once we check that no APIs have - // mistakenly used that attribute. - if strings.HasSuffix(term, "*") { - result.explode = true - term = term[:len(term)-1] - } - split := strings.Split(term, ":") - if len(split) == 1 { - result.name = term - } else if len(split) == 2 { - result.name = split[0] - var parsed int64 - parsed, err = strconv.ParseInt(split[1], 10, 0) - result.truncate = int(parsed) - } else { - err = errors.New("multiple colons in same term") - } - if !validname.MatchString(result.name) { - err = errors.New("not a valid name: " + result.name) - } - if result.explode && result.truncate > 0 { - err = errors.New("both explode and prefix modifers on same term") - } - return result, err -} - -// Expand expands a URI template with a set of values to produce the -// resultant URI. Two forms of the result are returned: one with all the -// elements escaped, and one with the elements unescaped. -func (t *uriTemplate) Expand(values map[string]string) (escaped, unescaped string) { - var w pairWriter - for _, p := range t.parts { - p.expand(&w, values) - } - return w.Escaped(), w.Unescaped() -} - -func (tp *templatePart) expand(w *pairWriter, values map[string]string) { - if len(tp.raw) > 0 { - w.Write(tp.raw) - return - } - var first = true - for _, term := range tp.terms { - value, exists := values[term.name] - if !exists { - continue - } - if first { - w.Write(tp.first) - first = false - } else { - w.Write(tp.sep) - } - tp.expandString(w, term, value) - } -} - -func (tp *templatePart) expandName(w *pairWriter, name string, empty bool) { - if tp.named { - w.Write(name) - if empty { - w.Write(tp.ifemp) - } else { - w.Write("=") - } - } -} - -func (tp *templatePart) expandString(w *pairWriter, t templateTerm, s string) { - if len(s) > t.truncate && t.truncate > 0 { - s = s[:t.truncate] - } - tp.expandName(w, t.name, len(s) == 0) - w.Escape(s, tp.allowReserved) -} diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go deleted file mode 100644 index 2e70b81543d..00000000000 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uritemplates - -// Expand parses then expands a URI template with a set of values to produce -// the resultant URI. Two forms of the result are returned: one with all the -// elements escaped, and one with the elements unescaped. -func Expand(path string, values map[string]string) (escaped, unescaped string, err error) { - template, err := parse(path) - if err != nil { - return "", "", err - } - escaped, unescaped = template.Expand(values) - return escaped, unescaped, nil -} diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go deleted file mode 100644 index eca1ea25077..00000000000 --- a/vendor/google.golang.org/api/googleapi/transport/apikey.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package transport contains HTTP transports used to make -// authenticated API requests. -package transport - -import ( - "errors" - "net/http" -) - -// APIKey is an HTTP Transport which wraps an underlying transport and -// appends an API Key "key" parameter to the URL of outgoing requests. -type APIKey struct { - // Key is the API Key to set on requests. - Key string - - // Transport is the underlying HTTP transport. - // If nil, http.DefaultTransport is used. - Transport http.RoundTripper -} - -func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) { - rt := t.Transport - if rt == nil { - rt = http.DefaultTransport - if rt == nil { - return nil, errors.New("googleapi/transport: no Transport specified or available") - } - } - newReq := *req - args := newReq.URL.Query() - args.Set("key", t.Key) - newReq.URL.RawQuery = args.Encode() - return rt.RoundTrip(&newReq) -} diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go deleted file mode 100644 index c8fdd541611..00000000000 --- a/vendor/google.golang.org/api/googleapi/types.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package googleapi - -import ( - "encoding/json" - "errors" - "strconv" -) - -// Int64s is a slice of int64s that marshal as quoted strings in JSON. -type Int64s []int64 - -func (q *Int64s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return err - } - *q = append(*q, int64(v)) - } - return nil -} - -// Int32s is a slice of int32s that marshal as quoted strings in JSON. -type Int32s []int32 - -func (q *Int32s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseInt(s, 10, 32) - if err != nil { - return err - } - *q = append(*q, int32(v)) - } - return nil -} - -// Uint64s is a slice of uint64s that marshal as quoted strings in JSON. -type Uint64s []uint64 - -func (q *Uint64s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return err - } - *q = append(*q, uint64(v)) - } - return nil -} - -// Uint32s is a slice of uint32s that marshal as quoted strings in JSON. -type Uint32s []uint32 - -func (q *Uint32s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return err - } - *q = append(*q, uint32(v)) - } - return nil -} - -// Float64s is a slice of float64s that marshal as quoted strings in JSON. -type Float64s []float64 - -func (q *Float64s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseFloat(s, 64) - if err != nil { - return err - } - *q = append(*q, float64(v)) - } - return nil -} - -func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { - dst := make([]byte, 0, 2+n*10) // somewhat arbitrary - dst = append(dst, '[') - for i := 0; i < n; i++ { - if i > 0 { - dst = append(dst, ',') - } - dst = append(dst, '"') - dst = fn(dst, i) - dst = append(dst, '"') - } - dst = append(dst, ']') - return dst, nil -} - -func (s Int64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendInt(dst, s[i], 10) - }) -} - -func (s Int32s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendInt(dst, int64(s[i]), 10) - }) -} - -func (s Uint64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendUint(dst, s[i], 10) - }) -} - -func (s Uint32s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendUint(dst, uint64(s[i]), 10) - }) -} - -func (s Float64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendFloat(dst, s[i], 'g', -1, 64) - }) -} - -// RawMessage is a raw encoded JSON value. -// It is identical to json.RawMessage, except it does not suffer from -// https://golang.org/issue/14493. -type RawMessage []byte - -// MarshalJSON returns m. -func (m RawMessage) MarshalJSON() ([]byte, error) { - return m, nil -} - -// UnmarshalJSON sets *m to a copy of data. -func (m *RawMessage) UnmarshalJSON(data []byte) error { - if m == nil { - return errors.New("googleapi.RawMessage: UnmarshalJSON on nil pointer") - } - *m = append((*m)[:0], data...) - return nil -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { return &v } - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { return &v } - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { return &v } - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { return &v } - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { return &v } - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { return &v } - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { return &v } diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go deleted file mode 100644 index b546b63b472..00000000000 --- a/vendor/google.golang.org/api/internal/creds.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" -) - -// Creds returns credential information obtained from DialSettings, or if none, then -// it returns default credential information. -func Creds(ctx context.Context, ds *DialSettings) (*google.DefaultCredentials, error) { - if ds.CredentialsFile != "" { - return credFileTokenSource(ctx, ds.CredentialsFile, ds.Scopes...) - } - if ds.TokenSource != nil { - return &google.DefaultCredentials{TokenSource: ds.TokenSource}, nil - } - return google.FindDefaultCredentials(ctx, ds.Scopes...) -} - -// credFileTokenSource reads a refresh token file or a service account and returns -// a TokenSource constructed from the config. -func credFileTokenSource(ctx context.Context, filename string, scope ...string) (*google.DefaultCredentials, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - return nil, fmt.Errorf("cannot read credentials file: %v", err) - } - // See if it is a refresh token credentials file first. - ts, ok, err := refreshTokenTokenSource(ctx, data, scope...) - if err != nil { - return nil, err - } - if ok { - return &google.DefaultCredentials{ - TokenSource: ts, - JSON: data, - }, nil - } - - // If not, it should be a service account. - cfg, err := google.JWTConfigFromJSON(data, scope...) - if err != nil { - return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err) - } - // jwt.Config does not expose the project ID, so re-unmarshal to get it. - var pid struct { - ProjectID string `json:"project_id"` - } - if err := json.Unmarshal(data, &pid); err != nil { - return nil, err - } - return &google.DefaultCredentials{ - ProjectID: pid.ProjectID, - TokenSource: cfg.TokenSource(ctx), - JSON: data, - }, nil -} - -func refreshTokenTokenSource(ctx context.Context, data []byte, scope ...string) (oauth2.TokenSource, bool, error) { - var c cred - if err := json.Unmarshal(data, &c); err != nil { - return nil, false, fmt.Errorf("cannot unmarshal credentials file: %v", err) - } - if c.ClientID == "" || c.ClientSecret == "" || c.RefreshToken == "" || c.Type != "authorized_user" { - return nil, false, nil - } - cfg := &oauth2.Config{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - Endpoint: google.Endpoint, - RedirectURL: "urn:ietf:wg:oauth:2.0:oob", - Scopes: scope, - } - return cfg.TokenSource(ctx, &oauth2.Token{ - RefreshToken: c.RefreshToken, - Expiry: time.Now(), - }), true, nil -} - -type cred struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - RefreshToken string `json:"refresh_token"` - Type string `json:"type"` -} diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go deleted file mode 100644 index 4150feb6bb0..00000000000 --- a/vendor/google.golang.org/api/internal/pool.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "errors" - "google.golang.org/grpc/naming" -) - -// PoolResolver provides a fixed list of addresses to load balance between -// and does not provide further updates. -type PoolResolver struct { - poolSize int - dialOpt *DialSettings - ch chan []*naming.Update -} - -// NewPoolResolver returns a PoolResolver -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func NewPoolResolver(size int, o *DialSettings) *PoolResolver { - return &PoolResolver{poolSize: size, dialOpt: o} -} - -// Resolve returns a Watcher for the endpoint defined by the DialSettings -// provided to NewPoolResolver. -func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) { - if r.dialOpt.Endpoint == "" { - return nil, errors.New("No endpoint configured") - } - addrs := make([]*naming.Update, 0, r.poolSize) - for i := 0; i < r.poolSize; i++ { - addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i}) - } - r.ch = make(chan []*naming.Update, 1) - r.ch <- addrs - return r, nil -} - -// Next returns a static list of updates on the first call, -// and blocks indefinitely until Close is called on subsequent calls. -func (r *PoolResolver) Next() ([]*naming.Update, error) { - return <-r.ch, nil -} - -func (r *PoolResolver) Close() { - close(r.ch) -} diff --git a/vendor/google.golang.org/api/internal/service-account.json b/vendor/google.golang.org/api/internal/service-account.json deleted file mode 100644 index 2cb54c292e0..00000000000 --- a/vendor/google.golang.org/api/internal/service-account.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "type": "service_account", - "project_id": "project_id", - "private_key_id": "private_key_id", - "private_key": "private_key", - "client_email": "xyz@developer.gserviceaccount.com", - "client_id": "123", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://accounts.google.com/o/oauth2/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/xyz%40developer.gserviceaccount.com" -} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go deleted file mode 100644 index dde7a62496c..00000000000 --- a/vendor/google.golang.org/api/internal/settings.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package internal supports the options and transport packages. -package internal - -import ( - "errors" - "net/http" - - "golang.org/x/oauth2" - "google.golang.org/grpc" -) - -// DialSettings holds information needed to establish a connection with a -// Google API service. -type DialSettings struct { - Endpoint string - Scopes []string - TokenSource oauth2.TokenSource - CredentialsFile string // if set, Token Source is ignored. - UserAgent string - APIKey string - HTTPClient *http.Client - GRPCDialOpts []grpc.DialOption - GRPCConn *grpc.ClientConn - NoAuth bool -} - -// Validate reports an error if ds is invalid. -func (ds *DialSettings) Validate() error { - hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" - if ds.NoAuth && hasCreds { - return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials") - } - if ds.HTTPClient != nil && ds.GRPCConn != nil { - return errors.New("WithHTTPClient is incompatible with WithGRPCConn") - } - if ds.HTTPClient != nil && ds.GRPCDialOpts != nil { - return errors.New("WithHTTPClient is incompatible with gRPC dial options") - } - return nil -} diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go deleted file mode 100644 index 0640c82311a..00000000000 --- a/vendor/google.golang.org/api/iterator/iterator.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package iterator provides support for standard Google API iterators. -// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines. -package iterator - -import ( - "errors" - "fmt" - "reflect" -) - -// Done is returned by an iterator's Next method when the iteration is -// complete; when there are no more items to return. -var Done = errors.New("no more items in iterator") - -// We don't support mixed calls to Next and NextPage because they play -// with the paging state in incompatible ways. -var errMixed = errors.New("iterator: Next and NextPage called on same iterator") - -// PageInfo contains information about an iterator's paging state. -type PageInfo struct { - // Token is the token used to retrieve the next page of items from the - // API. You may set Token immediately after creating an iterator to - // begin iteration at a particular point. If Token is the empty string, - // the iterator will begin with the first eligible item. - // - // The result of setting Token after the first call to Next is undefined. - // - // After the underlying API method is called to retrieve a page of items, - // Token is set to the next-page token in the response. - Token string - - // MaxSize is the maximum number of items returned by a call to the API. - // Set MaxSize as a hint to optimize the buffering behavior of the iterator. - // If zero, the page size is determined by the underlying service. - // - // Use Pager to retrieve a page of a specific, exact size. - MaxSize int - - // The error state of the iterator. Manipulated by PageInfo.next and Pager. - // This is a latch: it starts as nil, and once set should never change. - err error - - // If true, no more calls to fetch should be made. Set to true when fetch - // returns an empty page token. The iterator is Done when this is true AND - // the buffer is empty. - atEnd bool - - // Function that fetches a page from the underlying service. It should pass - // the pageSize and pageToken arguments to the service, fill the buffer - // with the results from the call, and return the next-page token returned - // by the service. The function must not remove any existing items from the - // buffer. If the underlying RPC takes an int32 page size, pageSize should - // be silently truncated. - fetch func(pageSize int, pageToken string) (nextPageToken string, err error) - - // Function that clears the iterator's buffer, returning any currently buffered items. - bufLen func() int - - // Function that returns the buffer, after setting the buffer variable to nil. - takeBuf func() interface{} - - // Set to true on first call to PageInfo.next or Pager.NextPage. Used to check - // for calls to both Next and NextPage with the same iterator. - nextCalled, nextPageCalled bool -} - -// NewPageInfo exposes internals for iterator implementations. -// It is not a stable interface. -var NewPageInfo = newPageInfo - -// If an iterator can support paging, its iterator-creating method should call -// this (via the NewPageInfo variable above). -// -// The fetch, bufLen and takeBuf arguments provide access to the -// iterator's internal slice of buffered items. They behave as described in -// PageInfo, above. -// -// The return value is the PageInfo.next method bound to the returned PageInfo value. -// (Returning it avoids exporting PageInfo.next.) -func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) { - pi := &PageInfo{ - fetch: fetch, - bufLen: bufLen, - takeBuf: takeBuf, - } - return pi, pi.next -} - -// Remaining returns the number of items available before the iterator makes another API call. -func (pi *PageInfo) Remaining() int { return pi.bufLen() } - -// next provides support for an iterator's Next function. An iterator's Next -// should return the error returned by next if non-nil; else it can assume -// there is at least one item in its buffer, and it should return that item and -// remove it from the buffer. -func (pi *PageInfo) next() error { - pi.nextCalled = true - if pi.err != nil { // Once we get an error, always return it. - // TODO(jba): fix so users can retry on transient errors? Probably not worth it. - return pi.err - } - if pi.nextPageCalled { - pi.err = errMixed - return pi.err - } - // Loop until we get some items or reach the end. - for pi.bufLen() == 0 && !pi.atEnd { - if err := pi.fill(pi.MaxSize); err != nil { - pi.err = err - return pi.err - } - if pi.Token == "" { - pi.atEnd = true - } - } - // Either the buffer is non-empty or pi.atEnd is true (or both). - if pi.bufLen() == 0 { - // The buffer is empty and pi.atEnd is true, i.e. the service has no - // more items. - pi.err = Done - } - return pi.err -} - -// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the -// next-page token returned by the call. -// If fill returns a non-nil error, the buffer will be empty. -func (pi *PageInfo) fill(size int) error { - tok, err := pi.fetch(size, pi.Token) - if err != nil { - pi.takeBuf() // clear the buffer - return err - } - pi.Token = tok - return nil -} - -// Pageable is implemented by iterators that support paging. -type Pageable interface { - // PageInfo returns paging information associated with the iterator. - PageInfo() *PageInfo -} - -// Pager supports retrieving iterator items a page at a time. -type Pager struct { - pageInfo *PageInfo - pageSize int -} - -// NewPager returns a pager that uses iter. Calls to its NextPage method will -// obtain exactly pageSize items, unless fewer remain. The pageToken argument -// indicates where to start the iteration. Pass the empty string to start at -// the beginning, or pass a token retrieved from a call to Pager.NextPage. -// -// If you use an iterator with a Pager, you must not call Next on the iterator. -func NewPager(iter Pageable, pageSize int, pageToken string) *Pager { - p := &Pager{ - pageInfo: iter.PageInfo(), - pageSize: pageSize, - } - p.pageInfo.Token = pageToken - if pageSize <= 0 { - p.pageInfo.err = errors.New("iterator: page size must be positive") - } - return p -} - -// NextPage retrieves a sequence of items from the iterator and appends them -// to slicep, which must be a pointer to a slice of the iterator's item type. -// Exactly p.pageSize items will be appended, unless fewer remain. -// -// The first return value is the page token to use for the next page of items. -// If empty, there are no more pages. Aside from checking for the end of the -// iteration, the returned page token is only needed if the iteration is to be -// resumed a later time, in another context (possibly another process). -// -// The second return value is non-nil if an error occurred. It will never be -// the special iterator sentinel value Done. To recognize the end of the -// iteration, compare nextPageToken to the empty string. -// -// It is possible for NextPage to return a single zero-length page along with -// an empty page token when there are no more items in the iteration. -func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) { - p.pageInfo.nextPageCalled = true - if p.pageInfo.err != nil { - return "", p.pageInfo.err - } - if p.pageInfo.nextCalled { - p.pageInfo.err = errMixed - return "", p.pageInfo.err - } - if p.pageInfo.bufLen() > 0 { - return "", errors.New("must call NextPage with an empty buffer") - } - // The buffer must be empty here, so takeBuf is a no-op. We call it just to get - // the buffer's type. - wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type()) - if slicep == nil { - return "", errors.New("nil passed to Pager.NextPage") - } - vslicep := reflect.ValueOf(slicep) - if vslicep.Type() != wantSliceType { - return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep) - } - for p.pageInfo.bufLen() < p.pageSize { - if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil { - p.pageInfo.err = err - return "", p.pageInfo.err - } - if p.pageInfo.Token == "" { - break - } - } - e := vslicep.Elem() - e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf()))) - return p.pageInfo.Token, nil -} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go deleted file mode 100644 index ffbee329511..00000000000 --- a/vendor/google.golang.org/api/option/option.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package option contains options for Google API clients. -package option - -import ( - "net/http" - - "golang.org/x/oauth2" - "google.golang.org/api/internal" - "google.golang.org/grpc" -) - -// A ClientOption is an option for a Google API client. -type ClientOption interface { - Apply(*internal.DialSettings) -} - -// WithTokenSource returns a ClientOption that specifies an OAuth2 token -// source to be used as the basis for authentication. -func WithTokenSource(s oauth2.TokenSource) ClientOption { - return withTokenSource{s} -} - -type withTokenSource struct{ ts oauth2.TokenSource } - -func (w withTokenSource) Apply(o *internal.DialSettings) { - o.TokenSource = w.ts -} - -type withCredFile string - -func (w withCredFile) Apply(o *internal.DialSettings) { - o.CredentialsFile = string(w) -} - -// WithCredentialsFile returns a ClientOption that authenticates -// API calls with the given service account or refresh token JSON -// credentials file. -func WithCredentialsFile(filename string) ClientOption { - return withCredFile(filename) -} - -// WithServiceAccountFile returns a ClientOption that uses a Google service -// account credentials file to authenticate. -// -// Deprecated: Use WithCredentialsFile instead. -func WithServiceAccountFile(filename string) ClientOption { - return WithCredentialsFile(filename) -} - -// WithEndpoint returns a ClientOption that overrides the default endpoint -// to be used for a service. -func WithEndpoint(url string) ClientOption { - return withEndpoint(url) -} - -type withEndpoint string - -func (w withEndpoint) Apply(o *internal.DialSettings) { - o.Endpoint = string(w) -} - -// WithScopes returns a ClientOption that overrides the default OAuth2 scopes -// to be used for a service. -func WithScopes(scope ...string) ClientOption { - return withScopes(scope) -} - -type withScopes []string - -func (w withScopes) Apply(o *internal.DialSettings) { - s := make([]string, len(w)) - copy(s, w) - o.Scopes = s -} - -// WithUserAgent returns a ClientOption that sets the User-Agent. -func WithUserAgent(ua string) ClientOption { - return withUA(ua) -} - -type withUA string - -func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) } - -// WithHTTPClient returns a ClientOption that specifies the HTTP client to use -// as the basis of communications. This option may only be used with services -// that support HTTP as their communication transport. When used, the -// WithHTTPClient option takes precedent over all other supplied options. -func WithHTTPClient(client *http.Client) ClientOption { - return withHTTPClient{client} -} - -type withHTTPClient struct{ client *http.Client } - -func (w withHTTPClient) Apply(o *internal.DialSettings) { - o.HTTPClient = w.client -} - -// WithGRPCConn returns a ClientOption that specifies the gRPC client -// connection to use as the basis of communications. This option many only be -// used with services that support gRPC as their communication transport. When -// used, the WithGRPCConn option takes precedent over all other supplied -// options. -func WithGRPCConn(conn *grpc.ClientConn) ClientOption { - return withGRPCConn{conn} -} - -type withGRPCConn struct{ conn *grpc.ClientConn } - -func (w withGRPCConn) Apply(o *internal.DialSettings) { - o.GRPCConn = w.conn -} - -// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption -// to an underlying gRPC dial. It does not work with WithGRPCConn. -func WithGRPCDialOption(opt grpc.DialOption) ClientOption { - return withGRPCDialOption{opt} -} - -type withGRPCDialOption struct{ opt grpc.DialOption } - -func (w withGRPCDialOption) Apply(o *internal.DialSettings) { - o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt) -} - -// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC -// connections that requests will be balanced between. -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func WithGRPCConnectionPool(size int) ClientOption { - return withGRPCConnectionPool(size) -} - -type withGRPCConnectionPool int - -func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { - balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o)) - o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer)) -} - -// WithAPIKey returns a ClientOption that specifies an API key to be used -// as the basis for authentication. -func WithAPIKey(apiKey string) ClientOption { - return withAPIKey(apiKey) -} - -type withAPIKey string - -func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) } - -// WithoutAuthentication returns a ClientOption that specifies that no -// authentication should be used. It is suitable only for testing and for -// accessing public resources, like public Google Cloud Storage buckets. -// It is an error to provide both WithoutAuthentication and any of WithAPIKey, -// WithTokenSource, WithCredentialsFile or WithServiceAccountFile. -func WithoutAuthentication() ClientOption { - return withoutAuthentication{} -} - -type withoutAuthentication struct{} - -func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true } diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json deleted file mode 100644 index f49c93f7857..00000000000 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ /dev/null @@ -1,3711 +0,0 @@ -{ - "kind": "discovery#restDescription", - "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/akxawO6Ey81E_n6KZZ_RFctOG6Q\"", - "discoveryVersion": "v1", - "id": "storage:v1", - "name": "storage", - "version": "v1", - "revision": "20171011", - "title": "Cloud Storage JSON API", - "description": "Stores and retrieves potentially large, immutable data objects.", - "ownerDomain": "google.com", - "ownerName": "Google", - "icons": { - "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", - "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" - }, - "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "labels": [ - "labs" - ], - "protocol": "rest", - "baseUrl": "https://www.googleapis.com/storage/v1/", - "basePath": "/storage/v1/", - "rootUrl": "https://www.googleapis.com/", - "servicePath": "storage/v1/", - "batchPath": "batch", - "parameters": { - "alt": { - "type": "string", - "description": "Data format for the response.", - "default": "json", - "enum": [ - "json" - ], - "enumDescriptions": [ - "Responses with Content-Type of application/json" - ], - "location": "query" - }, - "fields": { - "type": "string", - "description": "Selector specifying which fields to include in a partial response.", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "oauth_token": { - "type": "string", - "description": "OAuth 2.0 token for the current user.", - "location": "query" - }, - "prettyPrint": { - "type": "boolean", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "location": "query" - }, - "quotaUser": { - "type": "string", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", - "location": "query" - }, - "userIp": { - "type": "string", - "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", - "location": "query" - } - }, - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/cloud-platform.read-only": { - "description": "View your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/devstorage.full_control": { - "description": "Manage your data and permissions in Google Cloud Storage" - }, - "https://www.googleapis.com/auth/devstorage.read_only": { - "description": "View your data in Google Cloud Storage" - }, - "https://www.googleapis.com/auth/devstorage.read_write": { - "description": "Manage your data in Google Cloud Storage" - } - } - } - }, - "schemas": { - "Bucket": { - "id": "Bucket", - "type": "object", - "description": "A bucket.", - "properties": { - "acl": { - "type": "array", - "description": "Access controls on the bucket.", - "items": { - "$ref": "BucketAccessControl" - }, - "annotations": { - "required": [ - "storage.buckets.update" - ] - } - }, - "billing": { - "type": "object", - "description": "The bucket's billing configuration.", - "properties": { - "requesterPays": { - "type": "boolean", - "description": "When set to true, bucket is requester pays." - } - } - }, - "cors": { - "type": "array", - "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.", - "items": { - "type": "object", - "properties": { - "maxAgeSeconds": { - "type": "integer", - "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.", - "format": "int32" - }, - "method": { - "type": "array", - "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".", - "items": { - "type": "string" - } - }, - "origin": { - "type": "array", - "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".", - "items": { - "type": "string" - } - }, - "responseHeader": { - "type": "array", - "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.", - "items": { - "type": "string" - } - } - } - } - }, - "defaultObjectAcl": { - "type": "array", - "description": "Default access controls to apply to new objects when no ACL is provided.", - "items": { - "$ref": "ObjectAccessControl" - } - }, - "encryption": { - "type": "object", - "description": "Encryption configuration used by default for newly inserted objects, when no encryption config is specified.", - "properties": { - "defaultKmsKeyName": { - "type": "string" - } - } - }, - "etag": { - "type": "string", - "description": "HTTP 1.1 Entity tag for the bucket." - }, - "id": { - "type": "string", - "description": "The ID of the bucket. For buckets, the id and name properities are the same." - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For buckets, this is always storage#bucket.", - "default": "storage#bucket" - }, - "labels": { - "type": "object", - "description": "User-provided labels, in key/value pairs.", - "additionalProperties": { - "type": "string", - "description": "An individual label entry." - } - }, - "lifecycle": { - "type": "object", - "description": "The bucket's lifecycle configuration. See lifecycle management for more information.", - "properties": { - "rule": { - "type": "array", - "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.", - "items": { - "type": "object", - "properties": { - "action": { - "type": "object", - "description": "The action to take.", - "properties": { - "storageClass": { - "type": "string", - "description": "Target storage class. Required iff the type of the action is SetStorageClass." - }, - "type": { - "type": "string", - "description": "Type of the action. Currently, only Delete and SetStorageClass are supported." - } - } - }, - "condition": { - "type": "object", - "description": "The condition(s) under which the action will be taken.", - "properties": { - "age": { - "type": "integer", - "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.", - "format": "int32" - }, - "createdBefore": { - "type": "string", - "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.", - "format": "date" - }, - "isLive": { - "type": "boolean", - "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects." - }, - "matchesStorageClass": { - "type": "array", - "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.", - "items": { - "type": "string" - } - }, - "numNewerVersions": { - "type": "integer", - "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.", - "format": "int32" - } - } - } - } - } - } - } - }, - "location": { - "type": "string", - "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list." - }, - "logging": { - "type": "object", - "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.", - "properties": { - "logBucket": { - "type": "string", - "description": "The destination bucket where the current bucket's logs should be placed." - }, - "logObjectPrefix": { - "type": "string", - "description": "A prefix for log object names." - } - } - }, - "metageneration": { - "type": "string", - "description": "The metadata generation of this bucket.", - "format": "int64" - }, - "name": { - "type": "string", - "description": "The name of the bucket.", - "annotations": { - "required": [ - "storage.buckets.insert" - ] - } - }, - "owner": { - "type": "object", - "description": "The owner of the bucket. This is always the project team's owner group.", - "properties": { - "entity": { - "type": "string", - "description": "The entity, in the form project-owner-projectId." - }, - "entityId": { - "type": "string", - "description": "The ID for the entity." - } - } - }, - "projectNumber": { - "type": "string", - "description": "The project number of the project the bucket belongs to.", - "format": "uint64" - }, - "selfLink": { - "type": "string", - "description": "The URI of this bucket." - }, - "storageClass": { - "type": "string", - "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes." - }, - "timeCreated": { - "type": "string", - "description": "The creation time of the bucket in RFC 3339 format.", - "format": "date-time" - }, - "updated": { - "type": "string", - "description": "The modification time of the bucket in RFC 3339 format.", - "format": "date-time" - }, - "versioning": { - "type": "object", - "description": "The bucket's versioning configuration.", - "properties": { - "enabled": { - "type": "boolean", - "description": "While set to true, versioning is fully enabled for this bucket." - } - } - }, - "website": { - "type": "object", - "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.", - "properties": { - "mainPageSuffix": { - "type": "string", - "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages." - }, - "notFoundPage": { - "type": "string", - "description": "If the requested object path is missing, and any mainPageSuffix object is missing, if applicable, the service will return the named object from this bucket as the content for a 404 Not Found result." - } - } - } - } - }, - "BucketAccessControl": { - "id": "BucketAccessControl", - "type": "object", - "description": "An access-control entry.", - "properties": { - "bucket": { - "type": "string", - "description": "The name of the bucket." - }, - "domain": { - "type": "string", - "description": "The domain associated with the entity, if any." - }, - "email": { - "type": "string", - "description": "The email address associated with the entity, if any." - }, - "entity": { - "type": "string", - "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", - "annotations": { - "required": [ - "storage.bucketAccessControls.insert" - ] - } - }, - "entityId": { - "type": "string", - "description": "The ID for the entity, if any." - }, - "etag": { - "type": "string", - "description": "HTTP 1.1 Entity tag for the access-control entry." - }, - "id": { - "type": "string", - "description": "The ID of the access-control entry." - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.", - "default": "storage#bucketAccessControl" - }, - "projectTeam": { - "type": "object", - "description": "The project team associated with the entity, if any.", - "properties": { - "projectNumber": { - "type": "string", - "description": "The project number." - }, - "team": { - "type": "string", - "description": "The team." - } - } - }, - "role": { - "type": "string", - "description": "The access permission for the entity.", - "annotations": { - "required": [ - "storage.bucketAccessControls.insert" - ] - } - }, - "selfLink": { - "type": "string", - "description": "The link to this access-control entry." - } - } - }, - "BucketAccessControls": { - "id": "BucketAccessControls", - "type": "object", - "description": "An access-control list.", - "properties": { - "items": { - "type": "array", - "description": "The list of items.", - "items": { - "$ref": "BucketAccessControl" - } - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.", - "default": "storage#bucketAccessControls" - } - } - }, - "Buckets": { - "id": "Buckets", - "type": "object", - "description": "A list of buckets.", - "properties": { - "items": { - "type": "array", - "description": "The list of items.", - "items": { - "$ref": "Bucket" - } - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.", - "default": "storage#buckets" - }, - "nextPageToken": { - "type": "string", - "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." - } - } - }, - "Channel": { - "id": "Channel", - "type": "object", - "description": "An notification channel used to watch for resource changes.", - "properties": { - "address": { - "type": "string", - "description": "The address where notifications are delivered for this channel." - }, - "expiration": { - "type": "string", - "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", - "format": "int64" - }, - "id": { - "type": "string", - "description": "A UUID or similar unique string that identifies this channel." - }, - "kind": { - "type": "string", - "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".", - "default": "api#channel" - }, - "params": { - "type": "object", - "description": "Additional parameters controlling delivery channel behavior. Optional.", - "additionalProperties": { - "type": "string", - "description": "Declares a new parameter by name." - } - }, - "payload": { - "type": "boolean", - "description": "A Boolean value to indicate whether payload is wanted. Optional." - }, - "resourceId": { - "type": "string", - "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions." - }, - "resourceUri": { - "type": "string", - "description": "A version-specific identifier for the watched resource." - }, - "token": { - "type": "string", - "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional." - }, - "type": { - "type": "string", - "description": "The type of delivery mechanism used for this channel." - } - } - }, - "ComposeRequest": { - "id": "ComposeRequest", - "type": "object", - "description": "A Compose request.", - "properties": { - "destination": { - "$ref": "Object", - "description": "Properties of the resulting object." - }, - "kind": { - "type": "string", - "description": "The kind of item this is.", - "default": "storage#composeRequest" - }, - "sourceObjects": { - "type": "array", - "description": "The list of source objects that will be concatenated into a single object.", - "items": { - "type": "object", - "properties": { - "generation": { - "type": "string", - "description": "The generation of this object to use as the source.", - "format": "int64" - }, - "name": { - "type": "string", - "description": "The source object's name. The source object's bucket is implicitly the destination bucket.", - "annotations": { - "required": [ - "storage.objects.compose" - ] - } - }, - "objectPreconditions": { - "type": "object", - "description": "Conditions that must be met for this operation to execute.", - "properties": { - "ifGenerationMatch": { - "type": "string", - "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.", - "format": "int64" - } - } - } - } - }, - "annotations": { - "required": [ - "storage.objects.compose" - ] - } - } - } - }, - "Notification": { - "id": "Notification", - "type": "object", - "description": "A subscription to receive Google PubSub notifications.", - "properties": { - "custom_attributes": { - "type": "object", - "description": "An optional list of additional attributes to attach to each Cloud PubSub message published for this notification subscription.", - "additionalProperties": { - "type": "string" - } - }, - "etag": { - "type": "string", - "description": "HTTP 1.1 Entity tag for this subscription notification." - }, - "event_types": { - "type": "array", - "description": "If present, only send notifications about listed event types. If empty, sent notifications for all event types.", - "items": { - "type": "string" - } - }, - "id": { - "type": "string", - "description": "The ID of the notification." - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For notifications, this is always storage#notification.", - "default": "storage#notification" - }, - "object_name_prefix": { - "type": "string", - "description": "If present, only apply this notification configuration to object names that begin with this prefix." - }, - "payload_format": { - "type": "string", - "description": "The desired content of the Payload.", - "default": "JSON_API_V1" - }, - "selfLink": { - "type": "string", - "description": "The canonical URL of this notification." - }, - "topic": { - "type": "string", - "description": "The Cloud PubSub topic to which this subscription publishes. Formatted as: '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'", - "annotations": { - "required": [ - "storage.notifications.insert" - ] - } - } - } - }, - "Notifications": { - "id": "Notifications", - "type": "object", - "description": "A list of notification subscriptions.", - "properties": { - "items": { - "type": "array", - "description": "The list of items.", - "items": { - "$ref": "Notification" - } - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For lists of notifications, this is always storage#notifications.", - "default": "storage#notifications" - } - } - }, - "Object": { - "id": "Object", - "type": "object", - "description": "An object.", - "properties": { - "acl": { - "type": "array", - "description": "Access controls on the object.", - "items": { - "$ref": "ObjectAccessControl" - }, - "annotations": { - "required": [ - "storage.objects.update" - ] - } - }, - "bucket": { - "type": "string", - "description": "The name of the bucket containing this object." - }, - "cacheControl": { - "type": "string", - "description": "Cache-Control directive for the object data. If omitted, and the object is accessible to all anonymous users, the default will be public, max-age=3600." - }, - "componentCount": { - "type": "integer", - "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.", - "format": "int32" - }, - "contentDisposition": { - "type": "string", - "description": "Content-Disposition of the object data." - }, - "contentEncoding": { - "type": "string", - "description": "Content-Encoding of the object data." - }, - "contentLanguage": { - "type": "string", - "description": "Content-Language of the object data." - }, - "contentType": { - "type": "string", - "description": "Content-Type of the object data. If an object is stored without a Content-Type, it is served as application/octet-stream." - }, - "crc32c": { - "type": "string", - "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices." - }, - "customerEncryption": { - "type": "object", - "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.", - "properties": { - "encryptionAlgorithm": { - "type": "string", - "description": "The encryption algorithm." - }, - "keySha256": { - "type": "string", - "description": "SHA256 hash value of the encryption key." - } - } - }, - "etag": { - "type": "string", - "description": "HTTP 1.1 Entity tag for the object." - }, - "generation": { - "type": "string", - "description": "The content generation of this object. Used for object versioning.", - "format": "int64" - }, - "id": { - "type": "string", - "description": "The ID of the object, including the bucket name, object name, and generation number." - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For objects, this is always storage#object.", - "default": "storage#object" - }, - "kmsKeyName": { - "type": "string", - "description": "Cloud KMS Key used to encrypt this object, if the object is encrypted by such a key." - }, - "md5Hash": { - "type": "string", - "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices." - }, - "mediaLink": { - "type": "string", - "description": "Media download link." - }, - "metadata": { - "type": "object", - "description": "User-provided metadata, in key/value pairs.", - "additionalProperties": { - "type": "string", - "description": "An individual metadata entry." - } - }, - "metageneration": { - "type": "string", - "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.", - "format": "int64" - }, - "name": { - "type": "string", - "description": "The name of the object. Required if not specified by URL parameter." - }, - "owner": { - "type": "object", - "description": "The owner of the object. This will always be the uploader of the object.", - "properties": { - "entity": { - "type": "string", - "description": "The entity, in the form user-userId." - }, - "entityId": { - "type": "string", - "description": "The ID for the entity." - } - } - }, - "selfLink": { - "type": "string", - "description": "The link to this object." - }, - "size": { - "type": "string", - "description": "Content-Length of the data in bytes.", - "format": "uint64" - }, - "storageClass": { - "type": "string", - "description": "Storage class of the object." - }, - "timeCreated": { - "type": "string", - "description": "The creation time of the object in RFC 3339 format.", - "format": "date-time" - }, - "timeDeleted": { - "type": "string", - "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", - "format": "date-time" - }, - "timeStorageClassUpdated": { - "type": "string", - "description": "The time at which the object's storage class was last changed. When the object is initially created, it will be set to timeCreated.", - "format": "date-time" - }, - "updated": { - "type": "string", - "description": "The modification time of the object metadata in RFC 3339 format.", - "format": "date-time" - } - } - }, - "ObjectAccessControl": { - "id": "ObjectAccessControl", - "type": "object", - "description": "An access-control entry.", - "properties": { - "bucket": { - "type": "string", - "description": "The name of the bucket." - }, - "domain": { - "type": "string", - "description": "The domain associated with the entity, if any." - }, - "email": { - "type": "string", - "description": "The email address associated with the entity, if any." - }, - "entity": { - "type": "string", - "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", - "annotations": { - "required": [ - "storage.defaultObjectAccessControls.insert", - "storage.objectAccessControls.insert" - ] - } - }, - "entityId": { - "type": "string", - "description": "The ID for the entity, if any." - }, - "etag": { - "type": "string", - "description": "HTTP 1.1 Entity tag for the access-control entry." - }, - "generation": { - "type": "string", - "description": "The content generation of the object, if applied to an object.", - "format": "int64" - }, - "id": { - "type": "string", - "description": "The ID of the access-control entry." - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.", - "default": "storage#objectAccessControl" - }, - "object": { - "type": "string", - "description": "The name of the object, if applied to an object." - }, - "projectTeam": { - "type": "object", - "description": "The project team associated with the entity, if any.", - "properties": { - "projectNumber": { - "type": "string", - "description": "The project number." - }, - "team": { - "type": "string", - "description": "The team." - } - } - }, - "role": { - "type": "string", - "description": "The access permission for the entity.", - "annotations": { - "required": [ - "storage.defaultObjectAccessControls.insert", - "storage.objectAccessControls.insert" - ] - } - }, - "selfLink": { - "type": "string", - "description": "The link to this access-control entry." - } - } - }, - "ObjectAccessControls": { - "id": "ObjectAccessControls", - "type": "object", - "description": "An access-control list.", - "properties": { - "items": { - "type": "array", - "description": "The list of items.", - "items": { - "$ref": "ObjectAccessControl" - } - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.", - "default": "storage#objectAccessControls" - } - } - }, - "Objects": { - "id": "Objects", - "type": "object", - "description": "A list of objects.", - "properties": { - "items": { - "type": "array", - "description": "The list of items.", - "items": { - "$ref": "Object" - } - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For lists of objects, this is always storage#objects.", - "default": "storage#objects" - }, - "nextPageToken": { - "type": "string", - "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." - }, - "prefixes": { - "type": "array", - "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.", - "items": { - "type": "string" - } - } - } - }, - "Policy": { - "id": "Policy", - "type": "object", - "description": "A bucket/object IAM policy.", - "properties": { - "bindings": { - "type": "array", - "description": "An association between a role, which comes with a set of permissions, and members who may assume that role.", - "items": { - "type": "object", - "properties": { - "condition": { - "type": "any" - }, - "members": { - "type": "array", - "description": "A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows: \n- allUsers — A special identifier that represents anyone on the internet; with or without a Google account. \n- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account. \n- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com. \n- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com . \n- group:emailid — An email address that represents a Google group. For example, group:admins@example.com. \n- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com. \n- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project \n- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project \n- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project", - "items": { - "type": "string" - }, - "annotations": { - "required": [ - "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" - ] - } - }, - "role": { - "type": "string", - "description": "The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.\nThe new IAM roles are: \n- roles/storage.admin — Full control of Google Cloud Storage resources. \n- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects. \n- roles/storage.objectCreator — Access to create objects in Google Cloud Storage. \n- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are: \n- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role. \n- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role. \n- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role. \n- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role. \n- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.", - "annotations": { - "required": [ - "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" - ] - } - } - } - }, - "annotations": { - "required": [ - "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" - ] - } - }, - "etag": { - "type": "string", - "description": "HTTP 1.1 Entity tag for the policy.", - "format": "byte" - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For policies, this is always storage#policy. This field is ignored on input.", - "default": "storage#policy" - }, - "resourceId": { - "type": "string", - "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input." - } - } - }, - "RewriteResponse": { - "id": "RewriteResponse", - "type": "object", - "description": "A rewrite response.", - "properties": { - "done": { - "type": "boolean", - "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response." - }, - "kind": { - "type": "string", - "description": "The kind of item this is.", - "default": "storage#rewriteResponse" - }, - "objectSize": { - "type": "string", - "description": "The total size of the object being copied in bytes. This property is always present in the response.", - "format": "int64" - }, - "resource": { - "$ref": "Object", - "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes." - }, - "rewriteToken": { - "type": "string", - "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy." - }, - "totalBytesRewritten": { - "type": "string", - "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.", - "format": "int64" - } - } - }, - "ServiceAccount": { - "id": "ServiceAccount", - "type": "object", - "description": "A subscription to receive Google PubSub notifications.", - "properties": { - "email_address": { - "type": "string", - "description": "The ID of the notification." - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For notifications, this is always storage#notification.", - "default": "storage#serviceAccount" - } - } - }, - "TestIamPermissionsResponse": { - "id": "TestIamPermissionsResponse", - "type": "object", - "description": "A storage.(buckets|objects).testIamPermissions response.", - "properties": { - "kind": { - "type": "string", - "description": "The kind of item this is.", - "default": "storage#testIamPermissionsResponse" - }, - "permissions": { - "type": "array", - "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets or objects. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata.", - "items": { - "type": "string" - } - } - } - } - }, - "resources": { - "bucketAccessControls": { - "methods": { - "delete": { - "id": "storage.bucketAccessControls.delete", - "path": "b/{bucket}/acl/{entity}", - "httpMethod": "DELETE", - "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "get": { - "id": "storage.bucketAccessControls.get", - "path": "b/{bucket}/acl/{entity}", - "httpMethod": "GET", - "description": "Returns the ACL entry for the specified entity on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "response": { - "$ref": "BucketAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "insert": { - "id": "storage.bucketAccessControls.insert", - "path": "b/{bucket}/acl", - "httpMethod": "POST", - "description": "Creates a new ACL entry on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "request": { - "$ref": "BucketAccessControl" - }, - "response": { - "$ref": "BucketAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "list": { - "id": "storage.bucketAccessControls.list", - "path": "b/{bucket}/acl", - "httpMethod": "GET", - "description": "Retrieves ACL entries on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "response": { - "$ref": "BucketAccessControls" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "patch": { - "id": "storage.bucketAccessControls.patch", - "path": "b/{bucket}/acl/{entity}", - "httpMethod": "PATCH", - "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "request": { - "$ref": "BucketAccessControl" - }, - "response": { - "$ref": "BucketAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "update": { - "id": "storage.bucketAccessControls.update", - "path": "b/{bucket}/acl/{entity}", - "httpMethod": "PUT", - "description": "Updates an ACL entry on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "request": { - "$ref": "BucketAccessControl" - }, - "response": { - "$ref": "BucketAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - } - } - }, - "buckets": { - "methods": { - "delete": { - "id": "storage.buckets.delete", - "path": "b/{bucket}", - "httpMethod": "DELETE", - "description": "Permanently deletes an empty bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "If set, only deletes the bucket if its metageneration matches this value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "If set, only deletes the bucket if its metageneration does not match this value.", - "format": "int64", - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "get": { - "id": "storage.buckets.get", - "path": "b/{bucket}", - "httpMethod": "GET", - "description": "Returns metadata for the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit owner, acl and defaultObjectAcl properties." - ], - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "response": { - "$ref": "Bucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "getIamPolicy": { - "id": "storage.buckets.getIamPolicy", - "path": "b/{bucket}/iam", - "httpMethod": "GET", - "description": "Returns an IAM policy for the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "insert": { - "id": "storage.buckets.insert", - "path": "b", - "httpMethod": "POST", - "description": "Creates a new bucket.", - "parameters": { - "predefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to this bucket.", - "enum": [ - "authenticatedRead", - "private", - "projectPrivate", - "publicRead", - "publicReadWrite" - ], - "enumDescriptions": [ - "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - "Project team owners get OWNER access.", - "Project team members get access according to their roles.", - "Project team owners get OWNER access, and allUsers get READER access.", - "Project team owners get OWNER access, and allUsers get WRITER access." - ], - "location": "query" - }, - "predefinedDefaultObjectAcl": { - "type": "string", - "description": "Apply a predefined set of default object access controls to this bucket.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "project": { - "type": "string", - "description": "A valid API project identifier.", - "required": true, - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit owner, acl and defaultObjectAcl properties." - ], - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request.", - "location": "query" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "Bucket" - }, - "response": { - "$ref": "Bucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "list": { - "id": "storage.buckets.list", - "path": "b", - "httpMethod": "GET", - "description": "Retrieves a list of buckets for a given project.", - "parameters": { - "maxResults": { - "type": "integer", - "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", - "default": "1000", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "A previously-returned page token representing part of the larger set of results to view.", - "location": "query" - }, - "prefix": { - "type": "string", - "description": "Filter results to buckets whose names begin with this prefix.", - "location": "query" - }, - "project": { - "type": "string", - "description": "A valid API project identifier.", - "required": true, - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit owner, acl and defaultObjectAcl properties." - ], - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request.", - "location": "query" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "Buckets" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "patch": { - "id": "storage.buckets.patch", - "path": "b/{bucket}", - "httpMethod": "PATCH", - "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate. This method supports patch semantics.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "predefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to this bucket.", - "enum": [ - "authenticatedRead", - "private", - "projectPrivate", - "publicRead", - "publicReadWrite" - ], - "enumDescriptions": [ - "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - "Project team owners get OWNER access.", - "Project team members get access according to their roles.", - "Project team owners get OWNER access, and allUsers get READER access.", - "Project team owners get OWNER access, and allUsers get WRITER access." - ], - "location": "query" - }, - "predefinedDefaultObjectAcl": { - "type": "string", - "description": "Apply a predefined set of default object access controls to this bucket.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit owner, acl and defaultObjectAcl properties." - ], - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "request": { - "$ref": "Bucket" - }, - "response": { - "$ref": "Bucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "setIamPolicy": { - "id": "storage.buckets.setIamPolicy", - "path": "b/{bucket}/iam", - "httpMethod": "PUT", - "description": "Updates an IAM policy for the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "request": { - "$ref": "Policy" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "testIamPermissions": { - "id": "storage.buckets.testIamPermissions", - "path": "b/{bucket}/iam/testPermissions", - "httpMethod": "GET", - "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "permissions": { - "type": "string", - "description": "Permissions to test.", - "required": true, - "repeated": true, - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "permissions" - ], - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "update": { - "id": "storage.buckets.update", - "path": "b/{bucket}", - "httpMethod": "PUT", - "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "predefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to this bucket.", - "enum": [ - "authenticatedRead", - "private", - "projectPrivate", - "publicRead", - "publicReadWrite" - ], - "enumDescriptions": [ - "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - "Project team owners get OWNER access.", - "Project team members get access according to their roles.", - "Project team owners get OWNER access, and allUsers get READER access.", - "Project team owners get OWNER access, and allUsers get WRITER access." - ], - "location": "query" - }, - "predefinedDefaultObjectAcl": { - "type": "string", - "description": "Apply a predefined set of default object access controls to this bucket.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit owner, acl and defaultObjectAcl properties." - ], - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "request": { - "$ref": "Bucket" - }, - "response": { - "$ref": "Bucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - } - } - }, - "channels": { - "methods": { - "stop": { - "id": "storage.channels.stop", - "path": "channels/stop", - "httpMethod": "POST", - "description": "Stop watching resources through this channel", - "request": { - "$ref": "Channel", - "parameterName": "resource" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - } - } - }, - "defaultObjectAccessControls": { - "methods": { - "delete": { - "id": "storage.defaultObjectAccessControls.delete", - "path": "b/{bucket}/defaultObjectAcl/{entity}", - "httpMethod": "DELETE", - "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "get": { - "id": "storage.defaultObjectAccessControls.get", - "path": "b/{bucket}/defaultObjectAcl/{entity}", - "httpMethod": "GET", - "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "insert": { - "id": "storage.defaultObjectAccessControls.insert", - "path": "b/{bucket}/defaultObjectAcl", - "httpMethod": "POST", - "description": "Creates a new default object ACL entry on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "list": { - "id": "storage.defaultObjectAccessControls.list", - "path": "b/{bucket}/defaultObjectAcl", - "httpMethod": "GET", - "description": "Retrieves default object ACL entries on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "response": { - "$ref": "ObjectAccessControls" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "patch": { - "id": "storage.defaultObjectAccessControls.patch", - "path": "b/{bucket}/defaultObjectAcl/{entity}", - "httpMethod": "PATCH", - "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "update": { - "id": "storage.defaultObjectAccessControls.update", - "path": "b/{bucket}/defaultObjectAcl/{entity}", - "httpMethod": "PUT", - "description": "Updates a default object ACL entry on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - } - } - }, - "notifications": { - "methods": { - "delete": { - "id": "storage.notifications.delete", - "path": "b/{bucket}/notificationConfigs/{notification}", - "httpMethod": "DELETE", - "description": "Permanently deletes a notification subscription.", - "parameters": { - "bucket": { - "type": "string", - "description": "The parent bucket of the notification.", - "required": true, - "location": "path" - }, - "notification": { - "type": "string", - "description": "ID of the notification to delete.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "notification" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "get": { - "id": "storage.notifications.get", - "path": "b/{bucket}/notificationConfigs/{notification}", - "httpMethod": "GET", - "description": "View a notification configuration.", - "parameters": { - "bucket": { - "type": "string", - "description": "The parent bucket of the notification.", - "required": true, - "location": "path" - }, - "notification": { - "type": "string", - "description": "Notification ID", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "notification" - ], - "response": { - "$ref": "Notification" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "insert": { - "id": "storage.notifications.insert", - "path": "b/{bucket}/notificationConfigs", - "httpMethod": "POST", - "description": "Creates a notification subscription for a given bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "The parent bucket of the notification.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "request": { - "$ref": "Notification" - }, - "response": { - "$ref": "Notification" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "list": { - "id": "storage.notifications.list", - "path": "b/{bucket}/notificationConfigs", - "httpMethod": "GET", - "description": "Retrieves a list of notification subscriptions for a given bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a Google Cloud Storage bucket.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "response": { - "$ref": "Notifications" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - } - } - }, - "objectAccessControls": { - "methods": { - "delete": { - "id": "storage.objectAccessControls.delete", - "path": "b/{bucket}/o/{object}/acl/{entity}", - "httpMethod": "DELETE", - "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object", - "entity" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "get": { - "id": "storage.objectAccessControls.get", - "path": "b/{bucket}/o/{object}/acl/{entity}", - "httpMethod": "GET", - "description": "Returns the ACL entry for the specified entity on the specified object.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object", - "entity" - ], - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "insert": { - "id": "storage.objectAccessControls.insert", - "path": "b/{bucket}/o/{object}/acl", - "httpMethod": "POST", - "description": "Creates a new ACL entry on the specified object.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object" - ], - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "list": { - "id": "storage.objectAccessControls.list", - "path": "b/{bucket}/o/{object}/acl", - "httpMethod": "GET", - "description": "Retrieves ACL entries on the specified object.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object" - ], - "response": { - "$ref": "ObjectAccessControls" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "patch": { - "id": "storage.objectAccessControls.patch", - "path": "b/{bucket}/o/{object}/acl/{entity}", - "httpMethod": "PATCH", - "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object", - "entity" - ], - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "update": { - "id": "storage.objectAccessControls.update", - "path": "b/{bucket}/o/{object}/acl/{entity}", - "httpMethod": "PUT", - "description": "Updates an ACL entry on the specified object.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object", - "entity" - ], - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - } - } - }, - "objects": { - "methods": { - "compose": { - "id": "storage.objects.compose", - "path": "b/{destinationBucket}/o/{destinationObject}/compose", - "httpMethod": "POST", - "description": "Concatenates a list of existing objects into a new object in the same bucket.", - "parameters": { - "destinationBucket": { - "type": "string", - "description": "Name of the bucket in which to store the new object.", - "required": true, - "location": "path" - }, - "destinationObject": { - "type": "string", - "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "destinationPredefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to the destination object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "kmsKeyName": { - "type": "string", - "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "destinationBucket", - "destinationObject" - ], - "request": { - "$ref": "ComposeRequest" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true - }, - "copy": { - "id": "storage.objects.copy", - "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", - "httpMethod": "POST", - "description": "Copies a source object to a destination object. Optionally overrides metadata.", - "parameters": { - "destinationBucket": { - "type": "string", - "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "destinationObject": { - "type": "string", - "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", - "required": true, - "location": "path" - }, - "destinationPredefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to the destination object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query" - }, - "ifGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query" - }, - "sourceBucket": { - "type": "string", - "description": "Name of the bucket in which to find the source object.", - "required": true, - "location": "path" - }, - "sourceGeneration": { - "type": "string", - "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "sourceObject": { - "type": "string", - "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "sourceBucket", - "sourceObject", - "destinationBucket", - "destinationObject" - ], - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true - }, - "delete": { - "id": "storage.objects.delete", - "path": "b/{bucket}/o/{object}", - "httpMethod": "DELETE", - "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which the object resides.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query" - }, - "ifGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "get": { - "id": "storage.objects.get", - "path": "b/{bucket}/o/{object}", - "httpMethod": "GET", - "description": "Retrieves an object or its metadata.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which the object resides.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query" - }, - "ifGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object" - ], - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true - }, - "getIamPolicy": { - "id": "storage.objects.getIamPolicy", - "path": "b/{bucket}/o/{object}/iam", - "httpMethod": "GET", - "description": "Returns an IAM policy for the specified object.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which the object resides.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object" - ], - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "insert": { - "id": "storage.objects.insert", - "path": "b/{bucket}/o", - "httpMethod": "POST", - "description": "Stores a new object and metadata.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - "required": true, - "location": "path" - }, - "contentEncoding": { - "type": "string", - "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query" - }, - "ifGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "kmsKeyName": { - "type": "string", - "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - "location": "query" - }, - "name": { - "type": "string", - "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "query" - }, - "predefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to this object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true, - "supportsMediaUpload": true, - "mediaUpload": { - "accept": [ - "*/*" - ], - "protocols": { - "simple": { - "multipart": true, - "path": "/upload/storage/v1/b/{bucket}/o" - }, - "resumable": { - "multipart": true, - "path": "/resumable/upload/storage/v1/b/{bucket}/o" - } - } - } - }, - "list": { - "id": "storage.objects.list", - "path": "b/{bucket}/o", - "httpMethod": "GET", - "description": "Retrieves a list of objects matching the criteria.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which to look for objects.", - "required": true, - "location": "path" - }, - "delimiter": { - "type": "string", - "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - "default": "1000", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "A previously-returned page token representing part of the larger set of results to view.", - "location": "query" - }, - "prefix": { - "type": "string", - "description": "Filter results to objects whose names begin with this prefix.", - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - }, - "versions": { - "type": "boolean", - "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "response": { - "$ref": "Objects" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsSubscription": true - }, - "patch": { - "id": "storage.objects.patch", - "path": "b/{bucket}/o/{object}", - "httpMethod": "PATCH", - "description": "Updates an object's metadata. This method supports patch semantics.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which the object resides.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query" - }, - "ifGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "predefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to this object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object" - ], - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "rewrite": { - "id": "storage.objects.rewrite", - "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", - "httpMethod": "POST", - "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", - "parameters": { - "destinationBucket": { - "type": "string", - "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - "required": true, - "location": "path" - }, - "destinationKmsKeyName": { - "type": "string", - "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - "location": "query" - }, - "destinationObject": { - "type": "string", - "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "destinationPredefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to the destination object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query" - }, - "ifGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "maxBytesRewrittenPerCall": { - "type": "string", - "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", - "format": "int64", - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query" - }, - "rewriteToken": { - "type": "string", - "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", - "location": "query" - }, - "sourceBucket": { - "type": "string", - "description": "Name of the bucket in which to find the source object.", - "required": true, - "location": "path" - }, - "sourceGeneration": { - "type": "string", - "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "sourceObject": { - "type": "string", - "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "sourceBucket", - "sourceObject", - "destinationBucket", - "destinationObject" - ], - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "RewriteResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "setIamPolicy": { - "id": "storage.objects.setIamPolicy", - "path": "b/{bucket}/o/{object}/iam", - "httpMethod": "PUT", - "description": "Updates an IAM policy for the specified object.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which the object resides.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object" - ], - "request": { - "$ref": "Policy" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "testIamPermissions": { - "id": "storage.objects.testIamPermissions", - "path": "b/{bucket}/o/{object}/iam/testPermissions", - "httpMethod": "GET", - "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which the object resides.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "permissions": { - "type": "string", - "description": "Permissions to test.", - "required": true, - "repeated": true, - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object", - "permissions" - ], - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "update": { - "id": "storage.objects.update", - "path": "b/{bucket}/o/{object}", - "httpMethod": "PUT", - "description": "Updates an object's metadata.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which the object resides.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query" - }, - "ifGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "predefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to this object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object" - ], - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true - }, - "watchAll": { - "id": "storage.objects.watchAll", - "path": "b/{bucket}/o/watch", - "httpMethod": "POST", - "description": "Watch for changes on all objects in a bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which to look for objects.", - "required": true, - "location": "path" - }, - "delimiter": { - "type": "string", - "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - "default": "1000", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "A previously-returned page token representing part of the larger set of results to view.", - "location": "query" - }, - "prefix": { - "type": "string", - "description": "Filter results to objects whose names begin with this prefix.", - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query" - }, - "versions": { - "type": "boolean", - "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "request": { - "$ref": "Channel", - "parameterName": "resource" - }, - "response": { - "$ref": "Channel" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsSubscription": true - } - } - }, - "projects": { - "resources": { - "serviceAccount": { - "methods": { - "get": { - "id": "storage.projects.serviceAccount.get", - "path": "projects/{projectId}/serviceAccount", - "httpMethod": "GET", - "description": "Get the email address of this project's Google Cloud Storage service account.", - "parameters": { - "projectId": { - "type": "string", - "description": "Project ID", - "required": true, - "location": "path" - }, - "userProject": { - "type": "string", - "description": "The project to be billed for this request.", - "location": "query" - } - }, - "parameterOrder": [ - "projectId" - ], - "response": { - "$ref": "ServiceAccount" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - } - } - } - } - } - } -} diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go deleted file mode 100644 index 62a627594c2..00000000000 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ /dev/null @@ -1,10976 +0,0 @@ -// Package storage provides access to the Cloud Storage JSON API. -// -// See https://developers.google.com/storage/docs/json_api/ -// -// Usage example: -// -// import "google.golang.org/api/storage/v1" -// ... -// storageService, err := storage.New(oauthHttpClient) -package storage // import "google.golang.org/api/storage/v1" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - context "golang.org/x/net/context" - ctxhttp "golang.org/x/net/context/ctxhttp" - gensupport "google.golang.org/api/gensupport" - googleapi "google.golang.org/api/googleapi" - "io" - "net/http" - "net/url" - "strconv" - "strings" -) - -// Always reference these packages, just in case the auto-generated code -// below doesn't. -var _ = bytes.NewBuffer -var _ = strconv.Itoa -var _ = fmt.Sprintf -var _ = json.NewDecoder -var _ = io.Copy -var _ = url.Parse -var _ = gensupport.MarshalJSON -var _ = googleapi.Version -var _ = errors.New -var _ = strings.Replace -var _ = context.Canceled -var _ = ctxhttp.Do - -const apiId = "storage:v1" -const apiName = "storage" -const apiVersion = "v1" -const basePath = "https://www.googleapis.com/storage/v1/" - -// OAuth2 scopes used by this API. -const ( - // View and manage your data across Google Cloud Platform services - CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" - - // View your data across Google Cloud Platform services - CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" - - // Manage your data and permissions in Google Cloud Storage - DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" - - // View your data in Google Cloud Storage - DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" - - // Manage your data in Google Cloud Storage - DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" -) - -func New(client *http.Client) (*Service, error) { - if client == nil { - return nil, errors.New("client is nil") - } - s := &Service{client: client, BasePath: basePath} - s.BucketAccessControls = NewBucketAccessControlsService(s) - s.Buckets = NewBucketsService(s) - s.Channels = NewChannelsService(s) - s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) - s.Notifications = NewNotificationsService(s) - s.ObjectAccessControls = NewObjectAccessControlsService(s) - s.Objects = NewObjectsService(s) - s.Projects = NewProjectsService(s) - return s, nil -} - -type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment - - BucketAccessControls *BucketAccessControlsService - - Buckets *BucketsService - - Channels *ChannelsService - - DefaultObjectAccessControls *DefaultObjectAccessControlsService - - Notifications *NotificationsService - - ObjectAccessControls *ObjectAccessControlsService - - Objects *ObjectsService - - Projects *ProjectsService -} - -func (s *Service) userAgent() string { - if s.UserAgent == "" { - return googleapi.UserAgent - } - return googleapi.UserAgent + " " + s.UserAgent -} - -func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { - rs := &BucketAccessControlsService{s: s} - return rs -} - -type BucketAccessControlsService struct { - s *Service -} - -func NewBucketsService(s *Service) *BucketsService { - rs := &BucketsService{s: s} - return rs -} - -type BucketsService struct { - s *Service -} - -func NewChannelsService(s *Service) *ChannelsService { - rs := &ChannelsService{s: s} - return rs -} - -type ChannelsService struct { - s *Service -} - -func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService { - rs := &DefaultObjectAccessControlsService{s: s} - return rs -} - -type DefaultObjectAccessControlsService struct { - s *Service -} - -func NewNotificationsService(s *Service) *NotificationsService { - rs := &NotificationsService{s: s} - return rs -} - -type NotificationsService struct { - s *Service -} - -func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService { - rs := &ObjectAccessControlsService{s: s} - return rs -} - -type ObjectAccessControlsService struct { - s *Service -} - -func NewObjectsService(s *Service) *ObjectsService { - rs := &ObjectsService{s: s} - return rs -} - -type ObjectsService struct { - s *Service -} - -func NewProjectsService(s *Service) *ProjectsService { - rs := &ProjectsService{s: s} - rs.ServiceAccount = NewProjectsServiceAccountService(s) - return rs -} - -type ProjectsService struct { - s *Service - - ServiceAccount *ProjectsServiceAccountService -} - -func NewProjectsServiceAccountService(s *Service) *ProjectsServiceAccountService { - rs := &ProjectsServiceAccountService{s: s} - return rs -} - -type ProjectsServiceAccountService struct { - s *Service -} - -// Bucket: A bucket. -type Bucket struct { - // Acl: Access controls on the bucket. - Acl []*BucketAccessControl `json:"acl,omitempty"` - - // Billing: The bucket's billing configuration. - Billing *BucketBilling `json:"billing,omitempty"` - - // Cors: The bucket's Cross-Origin Resource Sharing (CORS) - // configuration. - Cors []*BucketCors `json:"cors,omitempty"` - - // DefaultObjectAcl: Default access controls to apply to new objects - // when no ACL is provided. - DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` - - // Encryption: Encryption configuration used by default for newly - // inserted objects, when no encryption config is specified. - Encryption *BucketEncryption `json:"encryption,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the bucket. - Etag string `json:"etag,omitempty"` - - // Id: The ID of the bucket. For buckets, the id and name properities - // are the same. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For buckets, this is always - // storage#bucket. - Kind string `json:"kind,omitempty"` - - // Labels: User-provided labels, in key/value pairs. - Labels map[string]string `json:"labels,omitempty"` - - // Lifecycle: The bucket's lifecycle configuration. See lifecycle - // management for more information. - Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` - - // Location: The location of the bucket. Object data for objects in the - // bucket resides in physical storage within this region. Defaults to - // US. See the developer's guide for the authoritative list. - Location string `json:"location,omitempty"` - - // Logging: The bucket's logging configuration, which defines the - // destination bucket and optional name prefix for the current bucket's - // logs. - Logging *BucketLogging `json:"logging,omitempty"` - - // Metageneration: The metadata generation of this bucket. - Metageneration int64 `json:"metageneration,omitempty,string"` - - // Name: The name of the bucket. - Name string `json:"name,omitempty"` - - // Owner: The owner of the bucket. This is always the project team's - // owner group. - Owner *BucketOwner `json:"owner,omitempty"` - - // ProjectNumber: The project number of the project the bucket belongs - // to. - ProjectNumber uint64 `json:"projectNumber,omitempty,string"` - - // SelfLink: The URI of this bucket. - SelfLink string `json:"selfLink,omitempty"` - - // StorageClass: The bucket's default storage class, used whenever no - // storageClass is specified for a newly-created object. This defines - // how objects in the bucket are stored and determines the SLA and the - // cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, - // NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value - // is not specified when the bucket is created, it will default to - // STANDARD. For more information, see storage classes. - StorageClass string `json:"storageClass,omitempty"` - - // TimeCreated: The creation time of the bucket in RFC 3339 format. - TimeCreated string `json:"timeCreated,omitempty"` - - // Updated: The modification time of the bucket in RFC 3339 format. - Updated string `json:"updated,omitempty"` - - // Versioning: The bucket's versioning configuration. - Versioning *BucketVersioning `json:"versioning,omitempty"` - - // Website: The bucket's website configuration, controlling how the - // service behaves when accessing bucket contents as a web site. See the - // Static Website Examples for more information. - Website *BucketWebsite `json:"website,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Acl") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Acl") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Bucket) MarshalJSON() ([]byte, error) { - type noMethod Bucket - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketBilling: The bucket's billing configuration. -type BucketBilling struct { - // RequesterPays: When set to true, bucket is requester pays. - RequesterPays bool `json:"requesterPays,omitempty"` - - // ForceSendFields is a list of field names (e.g. "RequesterPays") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "RequesterPays") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketBilling) MarshalJSON() ([]byte, error) { - type noMethod BucketBilling - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type BucketCors struct { - // MaxAgeSeconds: The value, in seconds, to return in the - // Access-Control-Max-Age header used in preflight responses. - MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` - - // Method: The list of HTTP methods on which to include CORS response - // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list - // of methods, and means "any method". - Method []string `json:"method,omitempty"` - - // Origin: The list of Origins eligible to receive CORS response - // headers. Note: "*" is permitted in the list of origins, and means - // "any Origin". - Origin []string `json:"origin,omitempty"` - - // ResponseHeader: The list of HTTP headers other than the simple - // response headers to give permission for the user-agent to share - // across domains. - ResponseHeader []string `json:"responseHeader,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MaxAgeSeconds") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketCors) MarshalJSON() ([]byte, error) { - type noMethod BucketCors - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketEncryption: Encryption configuration used by default for newly -// inserted objects, when no encryption config is specified. -type BucketEncryption struct { - DefaultKmsKeyName string `json:"defaultKmsKeyName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DefaultKmsKeyName") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DefaultKmsKeyName") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *BucketEncryption) MarshalJSON() ([]byte, error) { - type noMethod BucketEncryption - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle -// management for more information. -type BucketLifecycle struct { - // Rule: A lifecycle management rule, which is made of an action to take - // and the condition(s) under which the action will be taken. - Rule []*BucketLifecycleRule `json:"rule,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Rule") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Rule") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketLifecycle) MarshalJSON() ([]byte, error) { - type noMethod BucketLifecycle - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type BucketLifecycleRule struct { - // Action: The action to take. - Action *BucketLifecycleRuleAction `json:"action,omitempty"` - - // Condition: The condition(s) under which the action will be taken. - Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Action") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Action") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) { - type noMethod BucketLifecycleRule - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketLifecycleRuleAction: The action to take. -type BucketLifecycleRuleAction struct { - // StorageClass: Target storage class. Required iff the type of the - // action is SetStorageClass. - StorageClass string `json:"storageClass,omitempty"` - - // Type: Type of the action. Currently, only Delete and SetStorageClass - // are supported. - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "StorageClass") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "StorageClass") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { - type noMethod BucketLifecycleRuleAction - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketLifecycleRuleCondition: The condition(s) under which the action -// will be taken. -type BucketLifecycleRuleCondition struct { - // Age: Age of an object (in days). This condition is satisfied when an - // object reaches the specified age. - Age int64 `json:"age,omitempty"` - - // CreatedBefore: A date in RFC 3339 format with only the date part (for - // instance, "2013-01-15"). This condition is satisfied when an object - // is created before midnight of the specified date in UTC. - CreatedBefore string `json:"createdBefore,omitempty"` - - // IsLive: Relevant only for versioned objects. If the value is true, - // this condition matches live objects; if the value is false, it - // matches archived objects. - IsLive *bool `json:"isLive,omitempty"` - - // MatchesStorageClass: Objects having any of the storage classes - // specified by this condition will be matched. Values include - // MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and - // DURABLE_REDUCED_AVAILABILITY. - MatchesStorageClass []string `json:"matchesStorageClass,omitempty"` - - // NumNewerVersions: Relevant only for versioned objects. If the value - // is N, this condition is satisfied when there are at least N versions - // (including the live version) newer than this version of the object. - NumNewerVersions int64 `json:"numNewerVersions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Age") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Age") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { - type noMethod BucketLifecycleRuleCondition - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketLogging: The bucket's logging configuration, which defines the -// destination bucket and optional name prefix for the current bucket's -// logs. -type BucketLogging struct { - // LogBucket: The destination bucket where the current bucket's logs - // should be placed. - LogBucket string `json:"logBucket,omitempty"` - - // LogObjectPrefix: A prefix for log object names. - LogObjectPrefix string `json:"logObjectPrefix,omitempty"` - - // ForceSendFields is a list of field names (e.g. "LogBucket") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "LogBucket") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketLogging) MarshalJSON() ([]byte, error) { - type noMethod BucketLogging - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketOwner: The owner of the bucket. This is always the project -// team's owner group. -type BucketOwner struct { - // Entity: The entity, in the form project-owner-projectId. - Entity string `json:"entity,omitempty"` - - // EntityId: The ID for the entity. - EntityId string `json:"entityId,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Entity") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Entity") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketOwner) MarshalJSON() ([]byte, error) { - type noMethod BucketOwner - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketVersioning: The bucket's versioning configuration. -type BucketVersioning struct { - // Enabled: While set to true, versioning is fully enabled for this - // bucket. - Enabled bool `json:"enabled,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Enabled") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Enabled") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketVersioning) MarshalJSON() ([]byte, error) { - type noMethod BucketVersioning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketWebsite: The bucket's website configuration, controlling how -// the service behaves when accessing bucket contents as a web site. See -// the Static Website Examples for more information. -type BucketWebsite struct { - // MainPageSuffix: If the requested object path is missing, the service - // will ensure the path has a trailing '/', append this suffix, and - // attempt to retrieve the resulting object. This allows the creation of - // index.html objects to represent directory pages. - MainPageSuffix string `json:"mainPageSuffix,omitempty"` - - // NotFoundPage: If the requested object path is missing, and any - // mainPageSuffix object is missing, if applicable, the service will - // return the named object from this bucket as the content for a 404 Not - // Found result. - NotFoundPage string `json:"notFoundPage,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MainPageSuffix") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *BucketWebsite) MarshalJSON() ([]byte, error) { - type noMethod BucketWebsite - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketAccessControl: An access-control entry. -type BucketAccessControl struct { - // Bucket: The name of the bucket. - Bucket string `json:"bucket,omitempty"` - - // Domain: The domain associated with the entity, if any. - Domain string `json:"domain,omitempty"` - - // Email: The email address associated with the entity, if any. - Email string `json:"email,omitempty"` - - // Entity: The entity holding the permission, in one of the following - // forms: - // - user-userId - // - user-email - // - group-groupId - // - group-email - // - domain-domain - // - project-team-projectId - // - allUsers - // - allAuthenticatedUsers Examples: - // - The user liz@example.com would be user-liz@example.com. - // - The group example@googlegroups.com would be - // group-example@googlegroups.com. - // - To refer to all members of the Google Apps for Business domain - // example.com, the entity would be domain-example.com. - Entity string `json:"entity,omitempty"` - - // EntityId: The ID for the entity, if any. - EntityId string `json:"entityId,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the access-control entry. - Etag string `json:"etag,omitempty"` - - // Id: The ID of the access-control entry. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For bucket access control entries, - // this is always storage#bucketAccessControl. - Kind string `json:"kind,omitempty"` - - // ProjectTeam: The project team associated with the entity, if any. - ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"` - - // Role: The access permission for the entity. - Role string `json:"role,omitempty"` - - // SelfLink: The link to this access-control entry. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bucket") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Bucket") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketAccessControl) MarshalJSON() ([]byte, error) { - type noMethod BucketAccessControl - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketAccessControlProjectTeam: The project team associated with the -// entity, if any. -type BucketAccessControlProjectTeam struct { - // ProjectNumber: The project number. - ProjectNumber string `json:"projectNumber,omitempty"` - - // Team: The team. - Team string `json:"team,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ProjectNumber") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ProjectNumber") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { - type noMethod BucketAccessControlProjectTeam - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketAccessControls: An access-control list. -type BucketAccessControls struct { - // Items: The list of items. - Items []*BucketAccessControl `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of bucket access control - // entries, this is always storage#bucketAccessControls. - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketAccessControls) MarshalJSON() ([]byte, error) { - type noMethod BucketAccessControls - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Buckets: A list of buckets. -type Buckets struct { - // Items: The list of items. - Items []*Bucket `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of buckets, this is always - // storage#buckets. - Kind string `json:"kind,omitempty"` - - // NextPageToken: The continuation token, used to page through large - // result sets. Provide this value in a subsequent request to return the - // next page of results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Buckets) MarshalJSON() ([]byte, error) { - type noMethod Buckets - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Channel: An notification channel used to watch for resource changes. -type Channel struct { - // Address: The address where notifications are delivered for this - // channel. - Address string `json:"address,omitempty"` - - // Expiration: Date and time of notification channel expiration, - // expressed as a Unix timestamp, in milliseconds. Optional. - Expiration int64 `json:"expiration,omitempty,string"` - - // Id: A UUID or similar unique string that identifies this channel. - Id string `json:"id,omitempty"` - - // Kind: Identifies this as a notification channel used to watch for - // changes to a resource. Value: the fixed string "api#channel". - Kind string `json:"kind,omitempty"` - - // Params: Additional parameters controlling delivery channel behavior. - // Optional. - Params map[string]string `json:"params,omitempty"` - - // Payload: A Boolean value to indicate whether payload is wanted. - // Optional. - Payload bool `json:"payload,omitempty"` - - // ResourceId: An opaque ID that identifies the resource being watched - // on this channel. Stable across different API versions. - ResourceId string `json:"resourceId,omitempty"` - - // ResourceUri: A version-specific identifier for the watched resource. - ResourceUri string `json:"resourceUri,omitempty"` - - // Token: An arbitrary string delivered to the target address with each - // notification delivered over this channel. Optional. - Token string `json:"token,omitempty"` - - // Type: The type of delivery mechanism used for this channel. - Type string `json:"type,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Address") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Address") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Channel) MarshalJSON() ([]byte, error) { - type noMethod Channel - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ComposeRequest: A Compose request. -type ComposeRequest struct { - // Destination: Properties of the resulting object. - Destination *Object `json:"destination,omitempty"` - - // Kind: The kind of item this is. - Kind string `json:"kind,omitempty"` - - // SourceObjects: The list of source objects that will be concatenated - // into a single object. - SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Destination") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Destination") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ComposeRequest) MarshalJSON() ([]byte, error) { - type noMethod ComposeRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ComposeRequestSourceObjects struct { - // Generation: The generation of this object to use as the source. - Generation int64 `json:"generation,omitempty,string"` - - // Name: The source object's name. The source object's bucket is - // implicitly the destination bucket. - Name string `json:"name,omitempty"` - - // ObjectPreconditions: Conditions that must be met for this operation - // to execute. - ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Generation") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Generation") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { - type noMethod ComposeRequestSourceObjects - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must -// be met for this operation to execute. -type ComposeRequestSourceObjectsObjectPreconditions struct { - // IfGenerationMatch: Only perform the composition if the generation of - // the source object that would be used matches this value. If this - // value and a generation are both specified, they must be the same - // value or the call will fail. - IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"` - - // ForceSendFields is a list of field names (e.g. "IfGenerationMatch") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IfGenerationMatch") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { - type noMethod ComposeRequestSourceObjectsObjectPreconditions - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Notification: A subscription to receive Google PubSub notifications. -type Notification struct { - // CustomAttributes: An optional list of additional attributes to attach - // to each Cloud PubSub message published for this notification - // subscription. - CustomAttributes map[string]string `json:"custom_attributes,omitempty"` - - // Etag: HTTP 1.1 Entity tag for this subscription notification. - Etag string `json:"etag,omitempty"` - - // EventTypes: If present, only send notifications about listed event - // types. If empty, sent notifications for all event types. - EventTypes []string `json:"event_types,omitempty"` - - // Id: The ID of the notification. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For notifications, this is always - // storage#notification. - Kind string `json:"kind,omitempty"` - - // ObjectNamePrefix: If present, only apply this notification - // configuration to object names that begin with this prefix. - ObjectNamePrefix string `json:"object_name_prefix,omitempty"` - - // PayloadFormat: The desired content of the Payload. - PayloadFormat string `json:"payload_format,omitempty"` - - // SelfLink: The canonical URL of this notification. - SelfLink string `json:"selfLink,omitempty"` - - // Topic: The Cloud PubSub topic to which this subscription publishes. - // Formatted as: - // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topi - // c}' - Topic string `json:"topic,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CustomAttributes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CustomAttributes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Notification) MarshalJSON() ([]byte, error) { - type noMethod Notification - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Notifications: A list of notification subscriptions. -type Notifications struct { - // Items: The list of items. - Items []*Notification `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of notifications, this is - // always storage#notifications. - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Notifications) MarshalJSON() ([]byte, error) { - type noMethod Notifications - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Object: An object. -type Object struct { - // Acl: Access controls on the object. - Acl []*ObjectAccessControl `json:"acl,omitempty"` - - // Bucket: The name of the bucket containing this object. - Bucket string `json:"bucket,omitempty"` - - // CacheControl: Cache-Control directive for the object data. If - // omitted, and the object is accessible to all anonymous users, the - // default will be public, max-age=3600. - CacheControl string `json:"cacheControl,omitempty"` - - // ComponentCount: Number of underlying components that make up this - // object. Components are accumulated by compose operations. - ComponentCount int64 `json:"componentCount,omitempty"` - - // ContentDisposition: Content-Disposition of the object data. - ContentDisposition string `json:"contentDisposition,omitempty"` - - // ContentEncoding: Content-Encoding of the object data. - ContentEncoding string `json:"contentEncoding,omitempty"` - - // ContentLanguage: Content-Language of the object data. - ContentLanguage string `json:"contentLanguage,omitempty"` - - // ContentType: Content-Type of the object data. If an object is stored - // without a Content-Type, it is served as application/octet-stream. - ContentType string `json:"contentType,omitempty"` - - // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; - // encoded using base64 in big-endian byte order. For more information - // about using the CRC32c checksum, see Hashes and ETags: Best - // Practices. - Crc32c string `json:"crc32c,omitempty"` - - // CustomerEncryption: Metadata of customer-supplied encryption key, if - // the object is encrypted by such a key. - CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the object. - Etag string `json:"etag,omitempty"` - - // Generation: The content generation of this object. Used for object - // versioning. - Generation int64 `json:"generation,omitempty,string"` - - // Id: The ID of the object, including the bucket name, object name, and - // generation number. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For objects, this is always - // storage#object. - Kind string `json:"kind,omitempty"` - - // KmsKeyName: Cloud KMS Key used to encrypt this object, if the object - // is encrypted by such a key. - KmsKeyName string `json:"kmsKeyName,omitempty"` - - // Md5Hash: MD5 hash of the data; encoded using base64. For more - // information about using the MD5 hash, see Hashes and ETags: Best - // Practices. - Md5Hash string `json:"md5Hash,omitempty"` - - // MediaLink: Media download link. - MediaLink string `json:"mediaLink,omitempty"` - - // Metadata: User-provided metadata, in key/value pairs. - Metadata map[string]string `json:"metadata,omitempty"` - - // Metageneration: The version of the metadata for this object at this - // generation. Used for preconditions and for detecting changes in - // metadata. A metageneration number is only meaningful in the context - // of a particular generation of a particular object. - Metageneration int64 `json:"metageneration,omitempty,string"` - - // Name: The name of the object. Required if not specified by URL - // parameter. - Name string `json:"name,omitempty"` - - // Owner: The owner of the object. This will always be the uploader of - // the object. - Owner *ObjectOwner `json:"owner,omitempty"` - - // SelfLink: The link to this object. - SelfLink string `json:"selfLink,omitempty"` - - // Size: Content-Length of the data in bytes. - Size uint64 `json:"size,omitempty,string"` - - // StorageClass: Storage class of the object. - StorageClass string `json:"storageClass,omitempty"` - - // TimeCreated: The creation time of the object in RFC 3339 format. - TimeCreated string `json:"timeCreated,omitempty"` - - // TimeDeleted: The deletion time of the object in RFC 3339 format. Will - // be returned if and only if this version of the object has been - // deleted. - TimeDeleted string `json:"timeDeleted,omitempty"` - - // TimeStorageClassUpdated: The time at which the object's storage class - // was last changed. When the object is initially created, it will be - // set to timeCreated. - TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"` - - // Updated: The modification time of the object metadata in RFC 3339 - // format. - Updated string `json:"updated,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Acl") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Acl") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Object) MarshalJSON() ([]byte, error) { - type noMethod Object - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ObjectCustomerEncryption: Metadata of customer-supplied encryption -// key, if the object is encrypted by such a key. -type ObjectCustomerEncryption struct { - // EncryptionAlgorithm: The encryption algorithm. - EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` - - // KeySha256: SHA256 hash value of the encryption key. - KeySha256 string `json:"keySha256,omitempty"` - - // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "EncryptionAlgorithm") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { - type noMethod ObjectCustomerEncryption - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ObjectOwner: The owner of the object. This will always be the -// uploader of the object. -type ObjectOwner struct { - // Entity: The entity, in the form user-userId. - Entity string `json:"entity,omitempty"` - - // EntityId: The ID for the entity. - EntityId string `json:"entityId,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Entity") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Entity") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ObjectOwner) MarshalJSON() ([]byte, error) { - type noMethod ObjectOwner - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ObjectAccessControl: An access-control entry. -type ObjectAccessControl struct { - // Bucket: The name of the bucket. - Bucket string `json:"bucket,omitempty"` - - // Domain: The domain associated with the entity, if any. - Domain string `json:"domain,omitempty"` - - // Email: The email address associated with the entity, if any. - Email string `json:"email,omitempty"` - - // Entity: The entity holding the permission, in one of the following - // forms: - // - user-userId - // - user-email - // - group-groupId - // - group-email - // - domain-domain - // - project-team-projectId - // - allUsers - // - allAuthenticatedUsers Examples: - // - The user liz@example.com would be user-liz@example.com. - // - The group example@googlegroups.com would be - // group-example@googlegroups.com. - // - To refer to all members of the Google Apps for Business domain - // example.com, the entity would be domain-example.com. - Entity string `json:"entity,omitempty"` - - // EntityId: The ID for the entity, if any. - EntityId string `json:"entityId,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the access-control entry. - Etag string `json:"etag,omitempty"` - - // Generation: The content generation of the object, if applied to an - // object. - Generation int64 `json:"generation,omitempty,string"` - - // Id: The ID of the access-control entry. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For object access control entries, - // this is always storage#objectAccessControl. - Kind string `json:"kind,omitempty"` - - // Object: The name of the object, if applied to an object. - Object string `json:"object,omitempty"` - - // ProjectTeam: The project team associated with the entity, if any. - ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"` - - // Role: The access permission for the entity. - Role string `json:"role,omitempty"` - - // SelfLink: The link to this access-control entry. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bucket") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Bucket") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) { - type noMethod ObjectAccessControl - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ObjectAccessControlProjectTeam: The project team associated with the -// entity, if any. -type ObjectAccessControlProjectTeam struct { - // ProjectNumber: The project number. - ProjectNumber string `json:"projectNumber,omitempty"` - - // Team: The team. - Team string `json:"team,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ProjectNumber") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ProjectNumber") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { - type noMethod ObjectAccessControlProjectTeam - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ObjectAccessControls: An access-control list. -type ObjectAccessControls struct { - // Items: The list of items. - Items []*ObjectAccessControl `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of object access control - // entries, this is always storage#objectAccessControls. - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) { - type noMethod ObjectAccessControls - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Objects: A list of objects. -type Objects struct { - // Items: The list of items. - Items []*Object `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of objects, this is always - // storage#objects. - Kind string `json:"kind,omitempty"` - - // NextPageToken: The continuation token, used to page through large - // result sets. Provide this value in a subsequent request to return the - // next page of results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Prefixes: The list of prefixes of objects matching-but-not-listed up - // to and including the requested delimiter. - Prefixes []string `json:"prefixes,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Objects) MarshalJSON() ([]byte, error) { - type noMethod Objects - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Policy: A bucket/object IAM policy. -type Policy struct { - // Bindings: An association between a role, which comes with a set of - // permissions, and members who may assume that role. - Bindings []*PolicyBindings `json:"bindings,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the policy. - Etag string `json:"etag,omitempty"` - - // Kind: The kind of item this is. For policies, this is always - // storage#policy. This field is ignored on input. - Kind string `json:"kind,omitempty"` - - // ResourceId: The ID of the resource to which this policy belongs. Will - // be of the form projects/_/buckets/bucket for buckets, and - // projects/_/buckets/bucket/objects/object for objects. A specific - // generation may be specified by appending #generationNumber to the end - // of the object name, e.g. - // projects/_/buckets/my-bucket/objects/data.txt#17. The current - // generation can be denoted with #0. This field is ignored on input. - ResourceId string `json:"resourceId,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bindings") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Bindings") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Policy) MarshalJSON() ([]byte, error) { - type noMethod Policy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type PolicyBindings struct { - Condition interface{} `json:"condition,omitempty"` - - // Members: A collection of identifiers for members who may assume the - // provided role. Recognized identifiers are as follows: - // - allUsers — A special identifier that represents anyone on the - // internet; with or without a Google account. - // - allAuthenticatedUsers — A special identifier that represents - // anyone who is authenticated with a Google account or a service - // account. - // - user:emailid — An email address that represents a specific - // account. For example, user:alice@gmail.com or user:joe@example.com. - // - // - serviceAccount:emailid — An email address that represents a - // service account. For example, - // serviceAccount:my-other-app@appspot.gserviceaccount.com . - // - group:emailid — An email address that represents a Google group. - // For example, group:admins@example.com. - // - domain:domain — A Google Apps domain name that represents all the - // users of that domain. For example, domain:google.com or - // domain:example.com. - // - projectOwner:projectid — Owners of the given project. For - // example, projectOwner:my-example-project - // - projectEditor:projectid — Editors of the given project. For - // example, projectEditor:my-example-project - // - projectViewer:projectid — Viewers of the given project. For - // example, projectViewer:my-example-project - Members []string `json:"members,omitempty"` - - // Role: The role to which members belong. Two types of roles are - // supported: new IAM roles, which grant permissions that do not map - // directly to those provided by ACLs, and legacy IAM roles, which do - // map directly to ACL permissions. All roles are of the format - // roles/storage.specificRole. - // The new IAM roles are: - // - roles/storage.admin — Full control of Google Cloud Storage - // resources. - // - roles/storage.objectViewer — Read-Only access to Google Cloud - // Storage objects. - // - roles/storage.objectCreator — Access to create objects in Google - // Cloud Storage. - // - roles/storage.objectAdmin — Full control of Google Cloud Storage - // objects. The legacy IAM roles are: - // - roles/storage.legacyObjectReader — Read-only access to objects - // without listing. Equivalent to an ACL entry on an object with the - // READER role. - // - roles/storage.legacyObjectOwner — Read/write access to existing - // objects without listing. Equivalent to an ACL entry on an object with - // the OWNER role. - // - roles/storage.legacyBucketReader — Read access to buckets with - // object listing. Equivalent to an ACL entry on a bucket with the - // READER role. - // - roles/storage.legacyBucketWriter — Read access to buckets with - // object listing/creation/deletion. Equivalent to an ACL entry on a - // bucket with the WRITER role. - // - roles/storage.legacyBucketOwner — Read and write access to - // existing buckets with object listing/creation/deletion. Equivalent to - // an ACL entry on a bucket with the OWNER role. - Role string `json:"role,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Condition") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Condition") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PolicyBindings) MarshalJSON() ([]byte, error) { - type noMethod PolicyBindings - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RewriteResponse: A rewrite response. -type RewriteResponse struct { - // Done: true if the copy is finished; otherwise, false if the copy is - // in progress. This property is always present in the response. - Done bool `json:"done,omitempty"` - - // Kind: The kind of item this is. - Kind string `json:"kind,omitempty"` - - // ObjectSize: The total size of the object being copied in bytes. This - // property is always present in the response. - ObjectSize int64 `json:"objectSize,omitempty,string"` - - // Resource: A resource containing the metadata for the copied-to - // object. This property is present in the response only when copying - // completes. - Resource *Object `json:"resource,omitempty"` - - // RewriteToken: A token to use in subsequent requests to continue - // copying data. This token is present in the response only when there - // is more data to copy. - RewriteToken string `json:"rewriteToken,omitempty"` - - // TotalBytesRewritten: The total bytes written so far, which can be - // used to provide a waiting user with a progress indicator. This - // property is always present in the response. - TotalBytesRewritten int64 `json:"totalBytesRewritten,omitempty,string"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Done") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Done") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RewriteResponse) MarshalJSON() ([]byte, error) { - type noMethod RewriteResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ServiceAccount: A subscription to receive Google PubSub -// notifications. -type ServiceAccount struct { - // EmailAddress: The ID of the notification. - EmailAddress string `json:"email_address,omitempty"` - - // Kind: The kind of item this is. For notifications, this is always - // storage#notification. - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "EmailAddress") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "EmailAddress") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ServiceAccount) MarshalJSON() ([]byte, error) { - type noMethod ServiceAccount - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TestIamPermissionsResponse: A -// storage.(buckets|objects).testIamPermissions response. -type TestIamPermissionsResponse struct { - // Kind: The kind of item this is. - Kind string `json:"kind,omitempty"` - - // Permissions: The permissions held by the caller. Permissions are - // always of the format storage.resource.capability, where resource is - // one of buckets or objects. The supported permissions are as follows: - // - // - storage.buckets.delete — Delete bucket. - // - storage.buckets.get — Read bucket metadata. - // - storage.buckets.getIamPolicy — Read bucket IAM policy. - // - storage.buckets.create — Create bucket. - // - storage.buckets.list — List buckets. - // - storage.buckets.setIamPolicy — Update bucket IAM policy. - // - storage.buckets.update — Update bucket metadata. - // - storage.objects.delete — Delete object. - // - storage.objects.get — Read object data and metadata. - // - storage.objects.getIamPolicy — Read object IAM policy. - // - storage.objects.create — Create object. - // - storage.objects.list — List objects. - // - storage.objects.setIamPolicy — Update object IAM policy. - // - storage.objects.update — Update object metadata. - Permissions []string `json:"permissions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { - type noMethod TestIamPermissionsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// method id "storage.bucketAccessControls.delete": - -type BucketAccessControlsDeleteCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Permanently deletes the ACL entry for the specified entity on -// the specified bucket. -func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { - c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketAccessControlsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.bucketAccessControls.delete" call. -func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "DELETE", - // "id": "storage.bucketAccessControls.delete", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.get": - -type BucketAccessControlsGetCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the ACL entry for the specified entity on the specified -// bucket. -func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { - c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketAccessControlsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.bucketAccessControls.get" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BucketAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.bucketAccessControls.get", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.insert": - -type BucketAccessControlsInsertCall struct { - s *Service - bucket string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a new ACL entry on the specified bucket. -func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { - c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketAccessControlsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.bucketAccessControls.insert" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BucketAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new ACL entry on the specified bucket.", - // "httpMethod": "POST", - // "id": "storage.bucketAccessControls.insert", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl", - // "request": { - // "$ref": "BucketAccessControl" - // }, - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.list": - -type BucketAccessControlsListCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves ACL entries on the specified bucket. -func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { - c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketAccessControlsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.bucketAccessControls.list" call. -// Exactly one of *BucketAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BucketAccessControls{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves ACL entries on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.bucketAccessControls.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl", - // "response": { - // "$ref": "BucketAccessControls" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.patch": - -type BucketAccessControlsPatchCall struct { - s *Service - bucket string - entity string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates an ACL entry on the specified bucket. This method -// supports patch semantics. -func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { - c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketAccessControlsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.bucketAccessControls.patch" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BucketAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "storage.bucketAccessControls.patch", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "request": { - // "$ref": "BucketAccessControl" - // }, - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.update": - -type BucketAccessControlsUpdateCall struct { - s *Service - bucket string - entity string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates an ACL entry on the specified bucket. -func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { - c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketAccessControlsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.bucketAccessControls.update" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BucketAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an ACL entry on the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.bucketAccessControls.update", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "request": { - // "$ref": "BucketAccessControl" - // }, - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.buckets.delete": - -type BucketsDeleteCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Permanently deletes an empty bucket. -func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { - c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": If set, only deletes the bucket if its -// metageneration matches this value. -func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": If set, only deletes the bucket if its -// metageneration does not match this value. -func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.delete" call. -func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Permanently deletes an empty bucket.", - // "httpMethod": "DELETE", - // "id": "storage.buckets.delete", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "If set, only deletes the bucket if its metageneration matches this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "If set, only deletes the bucket if its metageneration does not match this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.get": - -type BucketsGetCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns metadata for the specified bucket. -func (r *BucketsService) Get(bucket string) *BucketsGetCall { - c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.get" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns metadata for the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.buckets.get", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.getIamPolicy": - -type BucketsGetIamPolicyCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Returns an IAM policy for the specified bucket. -func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { - c := &BucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsGetIamPolicyCall) Context(ctx context.Context) *BucketsGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns an IAM policy for the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.buckets.getIamPolicy", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/iam", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.insert": - -type BucketsInsertCall struct { - s *Service - bucket *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a new bucket. -func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { - c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.urlParams_.Set("project", projectid) - c.bucket = bucket - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and -// allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers -// get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and -// allUsers get WRITER access. -func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the bucket resource -// specifies acl or defaultObjectAcl properties, when it defaults to -// full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.insert" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new bucket.", - // "httpMethod": "POST", - // "id": "storage.buckets.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "A valid API project identifier.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b", - // "request": { - // "$ref": "Bucket" - // }, - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.list": - -type BucketsListCall struct { - s *Service - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of buckets for a given project. -func (r *BucketsService) List(projectid string) *BucketsListCall { - c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.urlParams_.Set("project", projectid) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of buckets to return in a single response. The service will use this -// parameter or 1,000 items, whichever is smaller. -func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. -func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Prefix sets the optional parameter "prefix": Filter results to -// buckets whose names begin with this prefix. -func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { - c.urlParams_.Set("prefix", prefix) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsListCall) Projection(projection string) *BucketsListCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.list" call. -// Exactly one of *Buckets or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Buckets.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Buckets{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of buckets for a given project.", - // "httpMethod": "GET", - // "id": "storage.buckets.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to buckets whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "A valid API project identifier.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b", - // "response": { - // "$ref": "Buckets" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "storage.buckets.patch": - -type BucketsPatchCall struct { - s *Service - bucket string - bucket2 *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates a bucket. Changes to the bucket will be readable -// immediately after writing, but configuration changes may take time to -// propagate. This method supports patch semantics. -func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { - c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.bucket2 = bucket2 - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and -// allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers -// get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and -// allUsers get WRITER access. -func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.patch" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "storage.buckets.patch", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "request": { - // "$ref": "Bucket" - // }, - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.buckets.setIamPolicy": - -type BucketsSetIamPolicyCall struct { - s *Service - bucket string - policy *Policy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Updates an IAM policy for the specified bucket. -func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSetIamPolicyCall { - c := &BucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.policy = policy - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsSetIamPolicyCall) Context(ctx context.Context) *BucketsSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an IAM policy for the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.buckets.setIamPolicy", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/iam", - // "request": { - // "$ref": "Policy" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.testIamPermissions": - -type BucketsTestIamPermissionsCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Tests a set of permissions on the given bucket to -// see which, if any, are held by the caller. -func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) *BucketsTestIamPermissionsCall { - c := &BucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTestIamPermissionsCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsTestIamPermissionsCall) Context(ctx context.Context) *BucketsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", - // "httpMethod": "GET", - // "id": "storage.buckets.testIamPermissions", - // "parameterOrder": [ - // "bucket", - // "permissions" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "permissions": { - // "description": "Permissions to test.", - // "location": "query", - // "repeated": true, - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/iam/testPermissions", - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.update": - -type BucketsUpdateCall struct { - s *Service - bucket string - bucket2 *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates a bucket. Changes to the bucket will be readable -// immediately after writing, but configuration changes may take time to -// propagate. -func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { - c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.bucket2 = bucket2 - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and -// allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers -// get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and -// allUsers get WRITER access. -func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.update" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", - // "httpMethod": "PUT", - // "id": "storage.buckets.update", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "request": { - // "$ref": "Bucket" - // }, - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.channels.stop": - -type ChannelsStopCall struct { - s *Service - channel *Channel - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Stop: Stop watching resources through this channel -func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { - c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.channel = channel - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ChannelsStopCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.channels.stop" call. -func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Stop watching resources through this channel", - // "httpMethod": "POST", - // "id": "storage.channels.stop", - // "path": "channels/stop", - // "request": { - // "$ref": "Channel", - // "parameterName": "resource" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.delete": - -type DefaultObjectAccessControlsDeleteCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Permanently deletes the default object ACL entry for the -// specified entity on the specified bucket. -func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { - c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.defaultObjectAccessControls.delete" call. -func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "DELETE", - // "id": "storage.defaultObjectAccessControls.delete", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.get": - -type DefaultObjectAccessControlsGetCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the default object ACL entry for the specified entity on -// the specified bucket. -func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { - c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.defaultObjectAccessControls.get" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.defaultObjectAccessControls.get", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.insert": - -type DefaultObjectAccessControlsInsertCall struct { - s *Service - bucket string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a new default object ACL entry on the specified -// bucket. -func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { - c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.defaultObjectAccessControls.insert" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new default object ACL entry on the specified bucket.", - // "httpMethod": "POST", - // "id": "storage.defaultObjectAccessControls.insert", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.list": - -type DefaultObjectAccessControlsListCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves default object ACL entries on the specified bucket. -func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { - c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": If present, only return default ACL listing -// if the bucket's current metageneration matches this value. -func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": If present, only return default ACL -// listing if the bucket's current metageneration does not match the -// given value. -func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DefaultObjectAccessControlsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.defaultObjectAccessControls.list" call. -// Exactly one of *ObjectAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControls{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves default object ACL entries on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.defaultObjectAccessControls.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl", - // "response": { - // "$ref": "ObjectAccessControls" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.patch": - -type DefaultObjectAccessControlsPatchCall struct { - s *Service - bucket string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates a default object ACL entry on the specified bucket. -// This method supports patch semantics. -func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { - c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.defaultObjectAccessControls.patch" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "storage.defaultObjectAccessControls.patch", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.update": - -type DefaultObjectAccessControlsUpdateCall struct { - s *Service - bucket string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates a default object ACL entry on the specified bucket. -func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { - c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.defaultObjectAccessControls.update" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a default object ACL entry on the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.defaultObjectAccessControls.update", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.notifications.delete": - -type NotificationsDeleteCall struct { - s *Service - bucket string - notification string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Permanently deletes a notification subscription. -func (r *NotificationsService) Delete(bucket string, notification string) *NotificationsDeleteCall { - c := &NotificationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.notification = notification - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NotificationsDeleteCall) Context(ctx context.Context) *NotificationsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NotificationsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "notification": c.notification, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.notifications.delete" call. -func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Permanently deletes a notification subscription.", - // "httpMethod": "DELETE", - // "id": "storage.notifications.delete", - // "parameterOrder": [ - // "bucket", - // "notification" - // ], - // "parameters": { - // "bucket": { - // "description": "The parent bucket of the notification.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "notification": { - // "description": "ID of the notification to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs/{notification}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.notifications.get": - -type NotificationsGetCall struct { - s *Service - bucket string - notification string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: View a notification configuration. -func (r *NotificationsService) Get(bucket string, notification string) *NotificationsGetCall { - c := &NotificationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.notification = notification - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NotificationsGetCall) Fields(s ...googleapi.Field) *NotificationsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NotificationsGetCall) IfNoneMatch(entityTag string) *NotificationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NotificationsGetCall) Context(ctx context.Context) *NotificationsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NotificationsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "notification": c.notification, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.notifications.get" call. -// Exactly one of *Notification or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notification.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Notification{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "View a notification configuration.", - // "httpMethod": "GET", - // "id": "storage.notifications.get", - // "parameterOrder": [ - // "bucket", - // "notification" - // ], - // "parameters": { - // "bucket": { - // "description": "The parent bucket of the notification.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "notification": { - // "description": "Notification ID", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs/{notification}", - // "response": { - // "$ref": "Notification" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.notifications.insert": - -type NotificationsInsertCall struct { - s *Service - bucket string - notification *Notification - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a notification subscription for a given bucket. -func (r *NotificationsService) Insert(bucket string, notification *Notification) *NotificationsInsertCall { - c := &NotificationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.notification = notification - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NotificationsInsertCall) Context(ctx context.Context) *NotificationsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NotificationsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.notifications.insert" call. -// Exactly one of *Notification or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notification.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notification, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Notification{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a notification subscription for a given bucket.", - // "httpMethod": "POST", - // "id": "storage.notifications.insert", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "The parent bucket of the notification.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs", - // "request": { - // "$ref": "Notification" - // }, - // "response": { - // "$ref": "Notification" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.notifications.list": - -type NotificationsListCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of notification subscriptions for a given -// bucket. -func (r *NotificationsService) List(bucket string) *NotificationsListCall { - c := &NotificationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NotificationsListCall) Context(ctx context.Context) *NotificationsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NotificationsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.notifications.list" call. -// Exactly one of *Notifications or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notifications.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Notifications{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of notification subscriptions for a given bucket.", - // "httpMethod": "GET", - // "id": "storage.notifications.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a Google Cloud Storage bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs", - // "response": { - // "$ref": "Notifications" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objectAccessControls.delete": - -type ObjectAccessControlsDeleteCall struct { - s *Service - bucket string - object string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Permanently deletes the ACL entry for the specified entity on -// the specified object. -func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { - c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.entity = entity - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectAccessControlsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objectAccessControls.delete" call. -func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", - // "httpMethod": "DELETE", - // "id": "storage.objectAccessControls.delete", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.get": - -type ObjectAccessControlsGetCall struct { - s *Service - bucket string - object string - entity string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the ACL entry for the specified entity on the specified -// object. -func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { - c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.entity = entity - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectAccessControlsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objectAccessControls.get" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the ACL entry for the specified entity on the specified object.", - // "httpMethod": "GET", - // "id": "storage.objectAccessControls.get", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.insert": - -type ObjectAccessControlsInsertCall struct { - s *Service - bucket string - object string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a new ACL entry on the specified object. -func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { - c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectAccessControlsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objectAccessControls.insert" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new ACL entry on the specified object.", - // "httpMethod": "POST", - // "id": "storage.objectAccessControls.insert", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.list": - -type ObjectAccessControlsListCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves ACL entries on the specified object. -func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { - c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectAccessControlsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objectAccessControls.list" call. -// Exactly one of *ObjectAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControls{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves ACL entries on the specified object.", - // "httpMethod": "GET", - // "id": "storage.objectAccessControls.list", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl", - // "response": { - // "$ref": "ObjectAccessControls" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.patch": - -type ObjectAccessControlsPatchCall struct { - s *Service - bucket string - object string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates an ACL entry on the specified object. This method -// supports patch semantics. -func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { - c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectAccessControlsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objectAccessControls.patch" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "storage.objectAccessControls.patch", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.update": - -type ObjectAccessControlsUpdateCall struct { - s *Service - bucket string - object string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates an ACL entry on the specified object. -func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { - c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectAccessControlsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objectAccessControls.update" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an ACL entry on the specified object.", - // "httpMethod": "PUT", - // "id": "storage.objectAccessControls.update", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objects.compose": - -type ObjectsComposeCall struct { - s *Service - destinationBucket string - destinationObject string - composerequest *ComposeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Compose: Concatenates a list of existing objects into a new object in -// the same bucket. -func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { - c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject - c.composerequest = composerequest - return c -} - -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of -// the Cloud KMS key, of the form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object -// metadata's kms_key_name value, if any. -func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { - c.urlParams_.Set("kmsKeyName", kmsKeyName) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do and Download -// methods. Any pending HTTP request will be aborted if the provided -// context is canceled. -func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsComposeCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Download fetches the API endpoint's "media" value, instead of the normal -// API response value. If the returned error is nil, the Response is guaranteed to -// have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsComposeCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("media") - if err != nil { - return nil, err - } - if err := googleapi.CheckMediaResponse(res); err != nil { - res.Body.Close() - return nil, err - } - return res, nil -} - -// Do executes the "storage.objects.compose" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Concatenates a list of existing objects into a new object in the same bucket.", - // "httpMethod": "POST", - // "id": "storage.objects.compose", - // "parameterOrder": [ - // "destinationBucket", - // "destinationObject" - // ], - // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationObject": { - // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "kmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{destinationBucket}/o/{destinationObject}/compose", - // "request": { - // "$ref": "ComposeRequest" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaDownload": true, - // "useMediaDownloadService": true - // } - -} - -// method id "storage.objects.copy": - -type ObjectsCopyCall struct { - s *Service - sourceBucket string - sourceObject string - destinationBucket string - destinationObject string - object *Object - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Copy: Copies a source object to a destination object. Optionally -// overrides metadata. -func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { - c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sourceBucket = sourceBucket - c.sourceObject = sourceObject - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject - c.object = object - return c -} - -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the destination object's -// current generation matches the given value. Setting to 0 makes the -// operation succeed only if there are no live versions of the object. -func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the destination object's current generation does not match the given -// value. If no live object exists, the precondition fails. Setting to 0 -// makes the operation succeed only if there is a live version of the -// object. -func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the destination object's current metageneration matches the given -// value. -func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the destination object's current metageneration does not -// match the given value. -func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// IfSourceGenerationMatch sets the optional parameter -// "ifSourceGenerationMatch": Makes the operation conditional on whether -// the source object's current generation matches the given value. -func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) - return c -} - -// IfSourceGenerationNotMatch sets the optional parameter -// "ifSourceGenerationNotMatch": Makes the operation conditional on -// whether the source object's current generation does not match the -// given value. -func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) - return c -} - -// IfSourceMetagenerationMatch sets the optional parameter -// "ifSourceMetagenerationMatch": Makes the operation conditional on -// whether the source object's current metageneration matches the given -// value. -func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) - return c -} - -// IfSourceMetagenerationNotMatch sets the optional parameter -// "ifSourceMetagenerationNotMatch": Makes the operation conditional on -// whether the source object's current metageneration does not match the -// given value. -func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { - c.urlParams_.Set("projection", projection) - return c -} - -// SourceGeneration sets the optional parameter "sourceGeneration": If -// present, selects a specific revision of the source object (as opposed -// to the latest version, the default). -func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { - c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do and Download -// methods. Any pending HTTP request will be aborted if the provided -// context is canceled. -func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsCopyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "sourceBucket": c.sourceBucket, - "sourceObject": c.sourceObject, - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Download fetches the API endpoint's "media" value, instead of the normal -// API response value. If the returned error is nil, the Response is guaranteed to -// have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsCopyCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("media") - if err != nil { - return nil, err - } - if err := googleapi.CheckMediaResponse(res); err != nil { - res.Body.Close() - return nil, err - } - return res, nil -} - -// Do executes the "storage.objects.copy" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Copies a source object to a destination object. Optionally overrides metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.copy", - // "parameterOrder": [ - // "sourceBucket", - // "sourceObject", - // "destinationBucket", - // "destinationObject" - // ], - // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "sourceBucket": { - // "description": "Name of the bucket in which to find the source object.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "sourceGeneration": { - // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaDownload": true, - // "useMediaDownloadService": true - // } - -} - -// method id "storage.objects.delete": - -type ObjectsDeleteCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes an object and its metadata. Deletions are permanent -// if versioning is not enabled for the bucket, or if the generation -// parameter is used. -func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { - c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// permanently deletes a specific revision of this object (as opposed to -// the latest version, the default). -func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.delete" call. -func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", - // "httpMethod": "DELETE", - // "id": "storage.objects.delete", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.get": - -type ObjectsGetCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Retrieves an object or its metadata. -func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { - c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do and Download -// methods. Any pending HTTP request will be aborted if the provided -// context is canceled. -func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Download fetches the API endpoint's "media" value, instead of the normal -// API response value. If the returned error is nil, the Response is guaranteed to -// have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("media") - if err != nil { - return nil, err - } - if err := googleapi.CheckMediaResponse(res); err != nil { - res.Body.Close() - return nil, err - } - return res, nil -} - -// Do executes the "storage.objects.get" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an object or its metadata.", - // "httpMethod": "GET", - // "id": "storage.objects.get", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaDownload": true, - // "useMediaDownloadService": true - // } - -} - -// method id "storage.objects.getIamPolicy": - -type ObjectsGetIamPolicyCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Returns an IAM policy for the specified object. -func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { - c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPolicyCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsGetIamPolicyCall) IfNoneMatch(entityTag string) *ObjectsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsGetIamPolicyCall) Context(ctx context.Context) *ObjectsGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns an IAM policy for the specified object.", - // "httpMethod": "GET", - // "id": "storage.objects.getIamPolicy", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/iam", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.insert": - -type ObjectsInsertCall struct { - s *Service - bucket string - object *Object - urlParams_ gensupport.URLParams - mediaInfo_ *gensupport.MediaInfo - ctx_ context.Context - header_ http.Header -} - -// Insert: Stores a new object and metadata. -func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { - c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// ContentEncoding sets the optional parameter "contentEncoding": If -// set, sets the contentEncoding property of the final object to this -// value. Setting this parameter is equivalent to setting the -// contentEncoding metadata property. This can be useful when uploading -// an object with uploadType=media to indicate the encoding of the -// content being uploaded. -func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { - c.urlParams_.Set("contentEncoding", contentEncoding) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of -// the Cloud KMS key, of the form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object -// metadata's kms_key_name value, if any. -func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { - c.urlParams_.Set("kmsKeyName", kmsKeyName) - return c -} - -// Name sets the optional parameter "name": Name of the object. Required -// when the object metadata is not otherwise provided. Overrides the -// object metadata's name value, if any. For information about how to -// URL encode object names to be path safe, see Encoding URI Path Parts. -func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { - c.urlParams_.Set("name", name) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this object. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Media specifies the media to upload in one or more chunks. The chunk -// size may be controlled by supplying a MediaOption generated by -// googleapi.ChunkSize. The chunk size defaults to -// googleapi.DefaultUploadChunkSize.The Content-Type header used in the -// upload request will be determined by sniffing the contents of r, -// unless a MediaOption generated by googleapi.ContentType is -// supplied. -// At most one of Media and ResumableMedia may be set. -func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { - if ct := c.object.ContentType; ct != "" { - options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...) - } - c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options) - return c -} - -// ResumableMedia specifies the media to upload in chunks and can be -// canceled with ctx. -// -// Deprecated: use Media instead. -// -// At most one of Media and ResumableMedia may be set. mediaType -// identifies the MIME media type of the upload, such as "image/png". If -// mediaType is "", it will be auto-detected. The provided ctx will -// supersede any context previously provided to the Context method. -func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { - c.ctx_ = ctx - c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType) - return c -} - -// ProgressUpdater provides a callback function that will be called -// after every chunk. It should be a low-latency function in order to -// not slow down the upload operation. This should only be called when -// using ResumableMedia (as opposed to Media). -func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { - c.mediaInfo_.SetProgressUpdater(pu) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -// This context will supersede any context previously provided to the -// ResumableMedia method. -func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") - if c.mediaInfo_ != nil { - urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) - c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) - } - if body == nil { - body = new(bytes.Buffer) - reqHeaders.Set("Content-Type", "application/json") - } - body, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) - defer cleanup() - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.insert" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) - if rx != nil { - rx.Client = c.s.client - rx.UserAgent = c.s.userAgent() - ctx := c.ctx_ - if ctx == nil { - ctx = context.TODO() - } - res, err = rx.Upload(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Stores a new object and metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.insert", - // "mediaUpload": { - // "accept": [ - // "*/*" - // ], - // "protocols": { - // "resumable": { - // "multipart": true, - // "path": "/resumable/upload/storage/v1/b/{bucket}/o" - // }, - // "simple": { - // "multipart": true, - // "path": "/upload/storage/v1/b/{bucket}/o" - // } - // } - // }, - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "contentEncoding": { - // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "kmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "name": { - // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaDownload": true, - // "supportsMediaUpload": true, - // "useMediaDownloadService": true - // } - -} - -// method id "storage.objects.list": - -type ObjectsListCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of objects matching the criteria. -func (r *ObjectsService) List(bucket string) *ObjectsListCall { - c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// Delimiter sets the optional parameter "delimiter": Returns results in -// a directory-like mode. items will contain only objects whose names, -// aside from the prefix, do not contain delimiter. Objects whose names, -// aside from the prefix, contain delimiter will have their name, -// truncated after the delimiter, returned in prefixes. Duplicate -// prefixes are omitted. -func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { - c.urlParams_.Set("delimiter", delimiter) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of items plus prefixes to return in a single page of responses. As -// duplicate prefixes are omitted, fewer total results may be returned -// than requested. The service will use this parameter or 1,000 items, -// whichever is smaller. -func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. -func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Prefix sets the optional parameter "prefix": Filter results to -// objects whose names begin with this prefix. -func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { - c.urlParams_.Set("prefix", prefix) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Versions sets the optional parameter "versions": If true, lists all -// versions of an object as distinct results. The default is false. For -// more information, see Object Versioning. -func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { - c.urlParams_.Set("versions", fmt.Sprint(versions)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.list" call. -// Exactly one of *Objects or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Objects.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Objects{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of objects matching the criteria.", - // "httpMethod": "GET", - // "id": "storage.objects.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to look for objects.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "delimiter": { - // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to objects whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // }, - // "versions": { - // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "b/{bucket}/o", - // "response": { - // "$ref": "Objects" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsSubscription": true - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "storage.objects.patch": - -type ObjectsPatchCall struct { - s *Service - bucket string - object string - object2 *Object - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates an object's metadata. This method supports patch -// semantics. -func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { - c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.object2 = object2 - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this object. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.patch" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an object's metadata. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "storage.objects.patch", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objects.rewrite": - -type ObjectsRewriteCall struct { - s *Service - sourceBucket string - sourceObject string - destinationBucket string - destinationObject string - object *Object - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Rewrite: Rewrites a source object to a destination object. Optionally -// overrides metadata. -func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { - c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sourceBucket = sourceBucket - c.sourceObject = sourceObject - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject - c.object = object - return c -} - -// DestinationKmsKeyName sets the optional parameter -// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the -// form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object -// metadata's kms_key_name value, if any. -func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { - c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) - return c -} - -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the destination object's current metageneration matches the given -// value. -func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the destination object's current metageneration does not -// match the given value. -func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// IfSourceGenerationMatch sets the optional parameter -// "ifSourceGenerationMatch": Makes the operation conditional on whether -// the source object's current generation matches the given value. -func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) - return c -} - -// IfSourceGenerationNotMatch sets the optional parameter -// "ifSourceGenerationNotMatch": Makes the operation conditional on -// whether the source object's current generation does not match the -// given value. -func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) - return c -} - -// IfSourceMetagenerationMatch sets the optional parameter -// "ifSourceMetagenerationMatch": Makes the operation conditional on -// whether the source object's current metageneration matches the given -// value. -func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) - return c -} - -// IfSourceMetagenerationNotMatch sets the optional parameter -// "ifSourceMetagenerationNotMatch": Makes the operation conditional on -// whether the source object's current metageneration does not match the -// given value. -func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) - return c -} - -// MaxBytesRewrittenPerCall sets the optional parameter -// "maxBytesRewrittenPerCall": The maximum number of bytes that will be -// rewritten per rewrite request. Most callers shouldn't need to specify -// this parameter - it is primarily in place to support testing. If -// specified the value must be an integral multiple of 1 MiB (1048576). -// Also, this only applies to requests where the source and destination -// span locations and/or storage classes. Finally, this value must not -// change across rewrite calls else you'll get an error that the -// rewriteToken is invalid. -func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { - c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { - c.urlParams_.Set("projection", projection) - return c -} - -// RewriteToken sets the optional parameter "rewriteToken": Include this -// field (from the previous rewrite response) on each rewrite request -// after the first one, until the rewrite response 'done' flag is true. -// Calls that provide a rewriteToken can omit all other request fields, -// but if included those fields must match the values provided in the -// first rewrite request. -func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { - c.urlParams_.Set("rewriteToken", rewriteToken) - return c -} - -// SourceGeneration sets the optional parameter "sourceGeneration": If -// present, selects a specific revision of the source object (as opposed -// to the latest version, the default). -func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { - c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsRewriteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "sourceBucket": c.sourceBucket, - "sourceObject": c.sourceObject, - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.rewrite" call. -// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RewriteResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RewriteResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.rewrite", - // "parameterOrder": [ - // "sourceBucket", - // "sourceObject", - // "destinationBucket", - // "destinationObject" - // ], - // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationKmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "maxBytesRewrittenPerCall": { - // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "rewriteToken": { - // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", - // "location": "query", - // "type": "string" - // }, - // "sourceBucket": { - // "description": "Name of the bucket in which to find the source object.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "sourceGeneration": { - // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "RewriteResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.setIamPolicy": - -type ObjectsSetIamPolicyCall struct { - s *Service - bucket string - object string - policy *Policy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Updates an IAM policy for the specified object. -func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { - c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.policy = policy - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPolicyCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsSetIamPolicyCall) Context(ctx context.Context) *ObjectsSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an IAM policy for the specified object.", - // "httpMethod": "PUT", - // "id": "storage.objects.setIamPolicy", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/iam", - // "request": { - // "$ref": "Policy" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.testIamPermissions": - -type ObjectsTestIamPermissionsCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Tests a set of permissions on the given object to -// see which, if any, are held by the caller. -func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { - c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTestIamPermissionsCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ObjectsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsTestIamPermissionsCall) IfNoneMatch(entityTag string) *ObjectsTestIamPermissionsCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsTestIamPermissionsCall) Context(ctx context.Context) *ObjectsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", - // "httpMethod": "GET", - // "id": "storage.objects.testIamPermissions", - // "parameterOrder": [ - // "bucket", - // "object", - // "permissions" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "permissions": { - // "description": "Permissions to test.", - // "location": "query", - // "repeated": true, - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/iam/testPermissions", - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.update": - -type ObjectsUpdateCall struct { - s *Service - bucket string - object string - object2 *Object - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates an object's metadata. -func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { - c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.object2 = object2 - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this object. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do and Download -// methods. Any pending HTTP request will be aborted if the provided -// context is canceled. -func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Download fetches the API endpoint's "media" value, instead of the normal -// API response value. If the returned error is nil, the Response is guaranteed to -// have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsUpdateCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("media") - if err != nil { - return nil, err - } - if err := googleapi.CheckMediaResponse(res); err != nil { - res.Body.Close() - return nil, err - } - return res, nil -} - -// Do executes the "storage.objects.update" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an object's metadata.", - // "httpMethod": "PUT", - // "id": "storage.objects.update", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ], - // "supportsMediaDownload": true, - // "useMediaDownloadService": true - // } - -} - -// method id "storage.objects.watchAll": - -type ObjectsWatchAllCall struct { - s *Service - bucket string - channel *Channel - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// WatchAll: Watch for changes on all objects in a bucket. -func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { - c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.channel = channel - return c -} - -// Delimiter sets the optional parameter "delimiter": Returns results in -// a directory-like mode. items will contain only objects whose names, -// aside from the prefix, do not contain delimiter. Objects whose names, -// aside from the prefix, contain delimiter will have their name, -// truncated after the delimiter, returned in prefixes. Duplicate -// prefixes are omitted. -func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { - c.urlParams_.Set("delimiter", delimiter) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of items plus prefixes to return in a single page of responses. As -// duplicate prefixes are omitted, fewer total results may be returned -// than requested. The service will use this parameter or 1,000 items, -// whichever is smaller. -func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. -func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Prefix sets the optional parameter "prefix": Filter results to -// objects whose names begin with this prefix. -func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { - c.urlParams_.Set("prefix", prefix) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Versions sets the optional parameter "versions": If true, lists all -// versions of an object as distinct results. The default is false. For -// more information, see Object Versioning. -func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { - c.urlParams_.Set("versions", fmt.Sprint(versions)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsWatchAllCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.watchAll" call. -// Exactly one of *Channel or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Channel.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Channel{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Watch for changes on all objects in a bucket.", - // "httpMethod": "POST", - // "id": "storage.objects.watchAll", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to look for objects.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "delimiter": { - // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to objects whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // }, - // "versions": { - // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "b/{bucket}/o/watch", - // "request": { - // "$ref": "Channel", - // "parameterName": "resource" - // }, - // "response": { - // "$ref": "Channel" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsSubscription": true - // } - -} - -// method id "storage.projects.serviceAccount.get": - -type ProjectsServiceAccountGetCall struct { - s *Service - projectId string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Get the email address of this project's Google Cloud Storage -// service account. -func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAccountGetCall { - c := &ProjectsServiceAccountGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.projectId = projectId - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *ProjectsServiceAccountGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsServiceAccountGetCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsServiceAccountGetCall) IfNoneMatch(entityTag string) *ProjectsServiceAccountGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsServiceAccountGetCall) Context(ctx context.Context) *ProjectsServiceAccountGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsServiceAccountGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "projectId": c.projectId, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.projects.serviceAccount.get" call. -// Exactly one of *ServiceAccount or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ServiceAccount.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*ServiceAccount, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ServiceAccount{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Get the email address of this project's Google Cloud Storage service account.", - // "httpMethod": "GET", - // "id": "storage.projects.serviceAccount.get", - // "parameterOrder": [ - // "projectId" - // ], - // "parameters": { - // "projectId": { - // "description": "Project ID", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{projectId}/serviceAccount", - // "response": { - // "$ref": "ServiceAccount" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go deleted file mode 100644 index eda6e5eddcc..00000000000 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package transport/http supports network connections to HTTP servers. -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. -package http - -import ( - "errors" - "net/http" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "google.golang.org/api/googleapi/transport" - "google.golang.org/api/internal" - "google.golang.org/api/option" -) - -// NewClient returns an HTTP client for use communicating with a Google cloud -// service, configured with the given ClientOptions. It also returns the endpoint -// for the service as specified in the options. -func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { - var o internal.DialSettings - for _, opt := range opts { - opt.Apply(&o) - } - if err := o.Validate(); err != nil { - return nil, "", err - } - if o.GRPCConn != nil { - return nil, "", errors.New("unsupported gRPC connection specified") - } - // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? - if o.HTTPClient != nil { - return o.HTTPClient, o.Endpoint, nil - } - uat := userAgentTransport{ - base: baseTransport(ctx), - userAgent: o.UserAgent, - } - var hc *http.Client - switch { - case o.NoAuth: - hc = &http.Client{Transport: uat} - case o.APIKey != "": - hc = &http.Client{ - Transport: &transport.APIKey{ - Key: o.APIKey, - Transport: uat, - }, - } - default: - creds, err := internal.Creds(ctx, &o) - if err != nil { - return nil, "", err - } - hc = &http.Client{ - Transport: &oauth2.Transport{ - Source: creds.TokenSource, - Base: uat, - }, - } - } - return hc, o.Endpoint, nil -} - -type userAgentTransport struct { - userAgent string - base http.RoundTripper -} - -func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) { - rt := t.base - if rt == nil { - return nil, errors.New("transport: no Transport specified") - } - if t.userAgent == "" { - return rt.RoundTrip(req) - } - newReq := *req - newReq.Header = make(http.Header) - for k, vv := range req.Header { - newReq.Header[k] = vv - } - // TODO(cbro): append to existing User-Agent header? - newReq.Header["User-Agent"] = []string{t.userAgent} - return rt.RoundTrip(&newReq) -} - -// Set at init time by dial_appengine.go. If nil, we're not on App Engine. -var appengineUrlfetchHook func(context.Context) http.RoundTripper - -// baseTransport returns the base HTTP transport. -// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport. -func baseTransport(ctx context.Context) http.RoundTripper { - if appengineUrlfetchHook != nil { - return appengineUrlfetchHook(ctx) - } - return http.DefaultTransport -} diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md deleted file mode 100644 index ffc29852085..00000000000 --- a/vendor/google.golang.org/appengine/CONTRIBUTING.md +++ /dev/null @@ -1,90 +0,0 @@ -# Contributing - -1. Sign one of the contributor license agreements below. -1. Get the package: - - `go get -d google.golang.org/appengine` -1. Change into the checked out source: - - `cd $GOPATH/src/google.golang.org/appengine` -1. Fork the repo. -1. Set your fork as a remote: - - `git remote add fork git@github.com:GITHUB_USERNAME/appengine.git` -1. Make changes, commit to your fork. -1. Send a pull request with your changes. - The first line of your commit message is conventionally a one-line summary of the change, prefixed by the primary affected package, and is used as the title of your pull request. - -# Testing - -## Running system tests - -Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`. - -Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`. - -Run tests with `goapp test`: - -``` -goapp test -v google.golang.org/appengine/... -``` - -## Contributor License Agreements - -Before we can accept your pull requests you'll need to sign a Contributor -License Agreement (CLA): - -- **If you are an individual writing original source code** and **you own the -intellectual property**, then you'll need to sign an [individual CLA][indvcla]. -- **If you work for a company that wants to allow you to contribute your work**, -then you'll need to sign a [corporate CLA][corpcla]. - -You can sign these electronically (just scroll to the bottom). After that, -we'll be able to accept your pull requests. - -## Contributor Code of Conduct - -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) - -[indvcla]: https://developers.google.com/open-source/cla/individual -[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/vendor/google.golang.org/appengine/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md deleted file mode 100644 index d86768a2c66..00000000000 --- a/vendor/google.golang.org/appengine/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Go App Engine packages - -[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) - -This repository supports the Go runtime on *App Engine standard*. -It provides APIs for interacting with App Engine services. -Its canonical import path is `google.golang.org/appengine`. - -See https://cloud.google.com/appengine/docs/go/ -for more information. - -File issue reports and feature requests on the [GitHub's issue -tracker](https://github.com/golang/appengine/issues). - -## Upgrading an App Engine app to the flexible environment - -This package does not work on *App Engine flexible*. - -There are many differences between the App Engine standard environment and -the flexible environment. - -See the [documentation on upgrading to the flexible environment](https://cloud.google.com/appengine/docs/flexible/go/upgrading). - -## Directory structure - -The top level directory of this repository is the `appengine` package. It -contains the -basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API -packages are in subdirectories (e.g. `datastore`). - -There is an `internal` subdirectory that contains service protocol buffers, -plus packages required for connectivity to make API calls. App Engine apps -should not directly import any package under `internal`. - -## Updating from legacy (`import "appengine"`) packages - -If you're currently using the bare `appengine` packages -(that is, not these ones, imported via `google.golang.org/appengine`), -then you can use the `aefix` tool to help automate an upgrade to these packages. - -Run `go get google.golang.org/appengine/cmd/aefix` to install it. - -### 1. Update import paths - -The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`. -You will need to update your code to use import paths starting with that; for instance, -code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`. - -### 2. Update code using deprecated, removed or modified APIs - -Most App Engine services are available with exactly the same API. -A few APIs were cleaned up, and there are some differences: - -* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. -* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. -* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. -* `appengine.Datacenter` now takes a `context.Context` argument. -* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels. -* `delay.Call` now returns an error. -* `search.FieldLoadSaver` now handles document metadata. -* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the - `context.Context` instead. -* `aetest` no longer declares its own Context type, and uses the standard one instead. -* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been - deprecated and unused for a long time. -* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature. - Use `appengine.ModuleHostname`and `appengine.ModuleName` instead. -* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated. - Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the - feature you require is not present in the new - [blobstore package](https://google.golang.org/appengine/blobstore). -* `appengine/socket` is not required on App Engine flexible environment / Managed VMs. - Use the standard `net` package instead. diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go deleted file mode 100644 index d4f808442b7..00000000000 --- a/vendor/google.golang.org/appengine/appengine.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package appengine provides basic functionality for Google App Engine. -// -// For more information on how to write Go apps for Google App Engine, see: -// https://cloud.google.com/appengine/docs/go/ -package appengine // import "google.golang.org/appengine" - -import ( - "net/http" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// The gophers party all night; the rabbits provide the beats. - -// Main is the principal entry point for an app running in App Engine. -// -// On App Engine Flexible it installs a trivial health checker if one isn't -// already registered, and starts listening on port 8080 (overridden by the -// $PORT environment variable). -// -// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests -// for details on how to do your own health checking. -// -// On App Engine Standard it ensures the server has started and is prepared to -// receive requests. -// -// Main never returns. -// -// Main is designed so that the app's main package looks like this: -// -// package main -// -// import ( -// "google.golang.org/appengine" -// -// _ "myapp/package0" -// _ "myapp/package1" -// ) -// -// func main() { -// appengine.Main() -// } -// -// The "myapp/packageX" packages are expected to register HTTP handlers -// in their init functions. -func Main() { - internal.Main() -} - -// IsDevAppServer reports whether the App Engine app is running in the -// development App Server. -func IsDevAppServer() bool { - return internal.IsDevAppServer() -} - -// NewContext returns a context for an in-flight HTTP request. -// This function is cheap. -func NewContext(req *http.Request) context.Context { - return WithContext(context.Background(), req) -} - -// WithContext returns a copy of the parent context -// and associates it with an in-flight HTTP request. -// This function is cheap. -func WithContext(parent context.Context, req *http.Request) context.Context { - return internal.WithContext(parent, req) -} - -// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call. - -// BlobKey is a key for a blobstore blob. -// -// Conceptually, this type belongs in the blobstore package, but it lives in -// the appengine package to avoid a circular dependency: blobstore depends on -// datastore, and datastore needs to refer to the BlobKey type. -type BlobKey string - -// GeoPoint represents a location as latitude/longitude in degrees. -type GeoPoint struct { - Lat, Lng float64 -} - -// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. -func (g GeoPoint) Valid() bool { - return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 -} - -// APICallFunc defines a function type for handling an API call. -// See WithCallOverride. -type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error - -// WithAPICallFunc returns a copy of the parent context -// that will cause API calls to invoke f instead of their normal operation. -// -// This is intended for advanced users only. -func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { - return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) -} - -// APICall performs an API call. -// -// This is not intended for general use; it is exported for use in conjunction -// with WithAPICallFunc. -func APICall(ctx context.Context, service, method string, in, out proto.Message) error { - return internal.Call(ctx, service, method, in, out) -} diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go deleted file mode 100644 index f4b645aad3b..00000000000 --- a/vendor/google.golang.org/appengine/appengine_vm.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package appengine - -import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// BackgroundContext returns a context not associated with a request. -// This should only be used when not servicing a request. -// This only works in App Engine "flexible environment". -func BackgroundContext() context.Context { - return internal.BackgroundContext() -} diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go deleted file mode 100644 index 16d0772e2a4..00000000000 --- a/vendor/google.golang.org/appengine/errors.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// This file provides error functions for common API failure modes. - -package appengine - -import ( - "fmt" - - "google.golang.org/appengine/internal" -) - -// IsOverQuota reports whether err represents an API call failure -// due to insufficient available quota. -func IsOverQuota(err error) bool { - callErr, ok := err.(*internal.CallError) - return ok && callErr.Code == 4 -} - -// MultiError is returned by batch operations when there are errors with -// particular elements. Errors will be in a one-to-one correspondence with -// the input elements; successful elements will have a nil entry. -type MultiError []error - -func (m MultiError) Error() string { - s, n := "", 0 - for _, e := range m { - if e != nil { - if n == 0 { - s = e.Error() - } - n++ - } - } - switch n { - case 0: - return "(0 errors)" - case 1: - return s - case 2: - return s + " (and 1 other error)" - } - return fmt.Sprintf("%s (and %d other errors)", s, n-1) -} diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go deleted file mode 100644 index b8dcf8f3619..00000000000 --- a/vendor/google.golang.org/appengine/identity.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "time" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/app_identity" - modpb "google.golang.org/appengine/internal/modules" -) - -// AppID returns the application ID for the current application. -// The string will be a plain application ID (e.g. "appid"), with a -// domain prefix for custom domain deployments (e.g. "example.com:appid"). -func AppID(c context.Context) string { return internal.AppID(c) } - -// DefaultVersionHostname returns the standard hostname of the default version -// of the current application (e.g. "my-app.appspot.com"). This is suitable for -// use in constructing URLs. -func DefaultVersionHostname(c context.Context) string { - return internal.DefaultVersionHostname(c) -} - -// ModuleName returns the module name of the current instance. -func ModuleName(c context.Context) string { - return internal.ModuleName(c) -} - -// ModuleHostname returns a hostname of a module instance. -// If module is the empty string, it refers to the module of the current instance. -// If version is empty, it refers to the version of the current instance if valid, -// or the default version of the module of the current instance. -// If instance is empty, ModuleHostname returns the load-balancing hostname. -func ModuleHostname(c context.Context, module, version, instance string) (string, error) { - req := &modpb.GetHostnameRequest{} - if module != "" { - req.Module = &module - } - if version != "" { - req.Version = &version - } - if instance != "" { - req.Instance = &instance - } - res := &modpb.GetHostnameResponse{} - if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { - return "", err - } - return *res.Hostname, nil -} - -// VersionID returns the version ID for the current application. -// It will be of the form "X.Y", where X is specified in app.yaml, -// and Y is a number generated when each version of the app is uploaded. -// It does not include a module name. -func VersionID(c context.Context) string { return internal.VersionID(c) } - -// InstanceID returns a mostly-unique identifier for this instance. -func InstanceID() string { return internal.InstanceID() } - -// Datacenter returns an identifier for the datacenter that the instance is running in. -func Datacenter(c context.Context) string { return internal.Datacenter(c) } - -// ServerSoftware returns the App Engine release version. -// In production, it looks like "Google App Engine/X.Y.Z". -// In the development appserver, it looks like "Development/X.Y". -func ServerSoftware() string { return internal.ServerSoftware() } - -// RequestID returns a string that uniquely identifies the request. -func RequestID(c context.Context) string { return internal.RequestID(c) } - -// AccessToken generates an OAuth2 access token for the specified scopes on -// behalf of service account of this application. This token will expire after -// the returned time. -func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { - req := &pb.GetAccessTokenRequest{Scope: scopes} - res := &pb.GetAccessTokenResponse{} - - err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) - if err != nil { - return "", time.Time{}, err - } - return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil -} - -// Certificate represents a public certificate for the app. -type Certificate struct { - KeyName string - Data []byte // PEM-encoded X.509 certificate -} - -// PublicCertificates retrieves the public certificates for the app. -// They can be used to verify a signature returned by SignBytes. -func PublicCertificates(c context.Context) ([]Certificate, error) { - req := &pb.GetPublicCertificateForAppRequest{} - res := &pb.GetPublicCertificateForAppResponse{} - if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { - return nil, err - } - var cs []Certificate - for _, pc := range res.PublicCertificateList { - cs = append(cs, Certificate{ - KeyName: pc.GetKeyName(), - Data: []byte(pc.GetX509CertificatePem()), - }) - } - return cs, nil -} - -// ServiceAccount returns a string representing the service account name, in -// the form of an email address (typically app_id@appspot.gserviceaccount.com). -func ServiceAccount(c context.Context) (string, error) { - req := &pb.GetServiceAccountNameRequest{} - res := &pb.GetServiceAccountNameResponse{} - - err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) - if err != nil { - return "", err - } - return res.GetServiceAccountName(), err -} - -// SignBytes signs bytes using a private key unique to your application. -func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { - req := &pb.SignForAppRequest{BytesToSign: bytes} - res := &pb.SignForAppResponse{} - - if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { - return "", nil, err - } - return res.GetKeyName(), res.GetSignatureBytes(), nil -} - -func init() { - internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) - internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go deleted file mode 100644 index efee06090fc..00000000000 --- a/vendor/google.golang.org/appengine/internal/api.go +++ /dev/null @@ -1,677 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - logpb "google.golang.org/appengine/internal/log" - remotepb "google.golang.org/appengine/internal/remote_api" -) - -const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" -) - -var ( - // Incoming headers. - ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") - dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") - traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") - curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") - userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") - remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") - - // Outgoing headers. - apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") - apiEndpointHeaderValue = []string{"app-engine-apis"} - apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") - apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} - apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") - apiContentType = http.CanonicalHeaderKey("Content-Type") - apiContentTypeValue = []string{"application/octet-stream"} - logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") - - apiHTTPClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: limitDial, - }, - } - - defaultTicketOnce sync.Once - defaultTicket string -) - -func apiURL() *url.URL { - host, port := "appengine.googleapis.internal", "10001" - if h := os.Getenv("API_HOST"); h != "" { - host = h - } - if p := os.Getenv("API_PORT"); p != "" { - port = p - } - return &url.URL{ - Scheme: "http", - Host: host + ":" + port, - Path: apiPath, - } -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - stopFlushing := make(chan int) - - ctxs.Lock() - ctxs.m[r] = c - ctxs.Unlock() - defer func() { - ctxs.Lock() - delete(ctxs.m, r) - ctxs.Unlock() - }() - - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } - - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) - - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more - - stopFlushing <- 1 // any logging beyond this point will be dropped - - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - go c.flushLog(false) - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) - - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } -} - -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() - - http.DefaultServeMux.ServeHTTP(c, r) -} - -func renderPanic(x interface{}) string { - buf := make([]byte, 16<<10) // 16 KB should be plenty - buf = buf[:runtime.Stack(buf, false)] - - // Remove the first few stack frames: - // this func - // the recover closure in the caller - // That will root the stack trace at the site of the panic. - const ( - skipStart = "internal.renderPanic" - skipFrames = 2 - ) - start := bytes.Index(buf, []byte(skipStart)) - p := start - for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { - p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 - if p < 0 { - break - } - } - if p >= 0 { - // buf[start:p+1] is the block to remove. - // Copy buf[p+1:] over buf[start:] and shrink buf. - copy(buf[start:], buf[p+1:]) - buf = buf[:len(buf)-(p+1-start)] - } - - // Add panic heading. - head := fmt.Sprintf("panic: %v\n\n", x) - if len(head) > len(buf) { - // Extremely unlikely to happen. - return head - } - copy(buf[len(head):], buf) - copy(buf, head) - - return string(buf) -} - -var ctxs = struct { - sync.Mutex - m map[*http.Request]*context - bg *context // background context, lazily initialized - // dec is used by tests to decorate the netcontext.Context returned - // for a given request. This allows tests to add overrides (such as - // WithAppIDOverride) to the context. The map is nil outside tests. - dec map[*http.Request]func(netcontext.Context) netcontext.Context -}{ - m: make(map[*http.Request]*context), -} - -// context represents the context of an in-flight HTTP request. -// It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { - req *http.Request - - outCode int - outHeader http.Header - outBody []byte - - pendingLogs struct { - sync.Mutex - lines []*logpb.UserAppLogLine - flushes int - } - - apiURL *url.URL -} - -var contextKey = "holds a *context" - -// fromContext returns the App Engine context or nil if ctx is not -// derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) - return c -} - -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { - ctx = withNamespace(ctx, ns) - } - return ctx -} - -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - return c.req.Header - } - return nil -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - ctxs.Lock() - c := ctxs.m[req] - d := ctxs.dec[req] - ctxs.Unlock() - - if d != nil { - parent = d(parent) - } - - if c == nil { - // Someone passed in an http.Request that is not in-flight. - // We panic here rather than panicking at a later point - // so that stack traces will be more sensible. - log.Panic("appengine: NewContext passed an unknown http.Request") - } - return withContext(parent, c) -} - -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - ctxs.Lock() - defer ctxs.Unlock() - - if ctxs.bg != nil { - return toContext(ctxs.bg) - } - - // Compute background security ticket. - ticket := DefaultTicket() - - ctxs.bg = &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go ctxs.bg.logFlusher(make(chan int)) - - return toContext(ctxs.bg) -} - -// RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. -// It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() { - c := &context{ - req: req, - apiURL: apiURL, - } - ctxs.Lock() - defer ctxs.Unlock() - if _, ok := ctxs.m[req]; ok { - log.Panic("req already associated with context") - } - if _, ok := ctxs.dec[req]; ok { - log.Panic("req already associated with context") - } - if ctxs.dec == nil { - ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) - } - ctxs.m[req] = c - ctxs.dec[req] = decorate - - return func() { - ctxs.Lock() - delete(ctxs.m, req) - delete(ctxs.dec, req) - ctxs.Unlock() - } -} - -var errTimeout = &CallError{ - Detail: "Deadline exceeded", - Code: int32(remotepb.RpcError_CANCELLED), - Timeout: true, -} - -func (c *context) Header() http.Header { return c.outHeader } - -// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status -// codes do not permit a response body (nor response entity headers such as -// Content-Length, Content-Type, etc). -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -func (c *context) Write(b []byte) (int, error) { - if c.outCode == 0 { - c.WriteHeader(http.StatusOK) - } - if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { - return 0, http.ErrBodyNotAllowed - } - c.outBody = append(c.outBody, b...) - return len(b), nil -} - -func (c *context) WriteHeader(code int) { - if c.outCode != 0 { - logf(c, 3, "WriteHeader called multiple times on request.") // error level - return - } - c.outCode = code -} - -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { - hreq := &http.Request{ - Method: "POST", - URL: c.apiURL, - Header: http.Header{ - apiEndpointHeader: apiEndpointHeaderValue, - apiMethodHeader: apiMethodHeaderValue, - apiContentType: apiContentTypeValue, - apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, - }, - Body: ioutil.NopCloser(bytes.NewReader(body)), - ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) - } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) - } - - tr := apiHTTPClient.Transport.(*http.Transport) - - var timedOut int32 // atomic; set to 1 if timed out - t := time.AfterFunc(timeout, func() { - atomic.StoreInt32(&timedOut, 1) - tr.CancelRequest(hreq) - }) - defer t.Stop() - defer func() { - // Check if timeout was exceeded. - if atomic.LoadInt32(&timedOut) != 0 { - err = errTimeout - } - }() - - hresp, err := apiHTTPClient.Do(hreq) - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - defer hresp.Body.Close() - hrespBody, err := ioutil.ReadAll(hresp.Body) - if hresp.StatusCode != 200 { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge response bad: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return hrespBody, nil -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - // Default RPC timeout is 60s. - timeout := 60 * time.Second - if deadline, ok := ctx.Deadline(); ok { - timeout = deadline.Sub(time.Now()) - } - - data, err := proto.Marshal(in) - if err != nil { - return err - } - - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix - } - } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - req := &remotepb.Request{ - ServiceName: &service, - Method: &method, - Request: data, - RequestId: &ticket, - } - hreqBody, err := proto.Marshal(req) - if err != nil { - return err - } - - hrespBody, err := c.post(hreqBody, timeout) - if err != nil { - return err - } - - res := &remotepb.Response{} - if err := proto.Unmarshal(hrespBody, res); err != nil { - return err - } - if res.RpcError != nil { - ce := &CallError{ - Detail: res.RpcError.GetDetail(), - Code: *res.RpcError.Code, - } - switch remotepb.RpcError_ErrorCode(ce.Code) { - case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: - ce.Timeout = true - } - return ce - } - if res.ApplicationError != nil { - return &APIError{ - Service: *req.ServiceName, - Detail: res.ApplicationError.GetDetail(), - Code: *res.ApplicationError.Code, - } - } - if res.Exception != nil || res.JavaException != nil { - // This shouldn't happen, but let's be defensive. - return &CallError{ - Detail: "service bridge returned exception", - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return proto.Unmarshal(res.Response, out) -} - -func (c *context) Request() *http.Request { - return c.req -} - -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { - // Truncate long log lines. - // TODO(dsymonds): Check if this is still necessary. - const lim = 8 << 10 - if len(*ll.Message) > lim { - suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) - ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) - } - - c.pendingLogs.Lock() - c.pendingLogs.lines = append(c.pendingLogs.lines, ll) - c.pendingLogs.Unlock() -} - -var logLevelName = map[int64]string{ - 0: "DEBUG", - 1: "INFO", - 2: "WARNING", - 3: "ERROR", - 4: "CRITICAL", -} - -func logf(c *context, level int64, format string, args ...interface{}) { - if c == nil { - panic("not an App Engine context") - } - s := fmt.Sprintf(format, args...) - s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - log.Print(logLevelName[level] + ": " + s) -} - -// flushLog attempts to flush any pending logs to the appserver. -// It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { - c.pendingLogs.Lock() - // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. - n, rem := 0, 30<<20 - for ; n < len(c.pendingLogs.lines); n++ { - ll := c.pendingLogs.lines[n] - // Each log line will require about 3 bytes of overhead. - nb := proto.Size(ll) + 3 - if nb > rem { - break - } - rem -= nb - } - lines := c.pendingLogs.lines[:n] - c.pendingLogs.lines = c.pendingLogs.lines[n:] - c.pendingLogs.Unlock() - - if len(lines) == 0 && !force { - // Nothing to flush. - return false - } - - rescueLogs := false - defer func() { - if rescueLogs { - c.pendingLogs.Lock() - c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) - c.pendingLogs.Unlock() - } - }() - - buf, err := proto.Marshal(&logpb.UserAppLogGroup{ - LogLine: lines, - }) - if err != nil { - log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) - rescueLogs = true - return false - } - - req := &logpb.FlushRequest{ - Logs: buf, - } - res := &basepb.VoidProto{} - c.pendingLogs.Lock() - c.pendingLogs.flushes++ - c.pendingLogs.Unlock() - if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { - log.Printf("internal.flushLog: Flush RPC: %v", err) - rescueLogs = true - return false - } - return true -} - -const ( - // Log flushing parameters. - flushInterval = 1 * time.Second - forceFlushInterval = 60 * time.Second -) - -func (c *context) logFlusher(stop <-chan int) { - lastFlush := time.Now() - tick := time.NewTicker(flushInterval) - for { - select { - case <-stop: - // Request finished. - tick.Stop() - return - case <-tick.C: - force := time.Now().Sub(lastFlush) > forceFlushInterval - if c.flushLog(force) { - lastFlush = time.Now() - } - } - } -} - -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) -} diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go deleted file mode 100644 index e0c0b214b72..00000000000 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "errors" - "os" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -var errNotAppEngineContext = errors.New("not an App Engine context") - -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error - -var callOverrideKey = "holds []CallOverrideFunc" - -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { - // We avoid appending to any existing call override - // so we don't risk overwriting a popped stack below. - var cofs []CallOverrideFunc - if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { - cofs = append(cofs, uf...) - } - cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) -} - -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { - cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) - if len(cofs) == 0 { - return nil, nil, false - } - // We found a list of overrides; grab the last, and reconstitute a - // context that will hide it. - f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) - return f, ctx, true -} - -type logOverrideFunc func(level int64, format string, args ...interface{}) - -var logOverrideKey = "holds a logOverrideFunc" - -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) -} - -var appIDOverrideKey = "holds a string, being the full app ID" - -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) -} - -var namespaceKey = "holds the namespace string" - -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) -} - -func NamespaceFromContext(ctx netcontext.Context) string { - // If there's no namespace, return the empty string. - ns, _ := ctx.Value(&namespaceKey).(string) - return ns -} - -// FullyQualifiedAppID returns the fully-qualified application ID. -// This may contain a partition prefix (e.g. "s~" for High Replication apps), -// or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { - if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { - return id - } - return fullyQualifiedAppID(ctx) -} - -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { - if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { - f(level, format, args...) - return - } - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - logf(c, level, format, args...) -} - -// NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { - return withNamespace(ctx, namespace) -} - -// SetTestEnv sets the env variables for testing background ticket in Flex. -func SetTestEnv() func() { - var environ = []struct { - key, value string - }{ - {"GAE_LONG_APP_ID", "my-app-id"}, - {"GAE_MINOR_VERSION", "067924799508853122"}, - {"GAE_MODULE_INSTANCE", "0"}, - {"GAE_MODULE_NAME", "default"}, - {"GAE_MODULE_VERSION", "20150612t184001"}, - } - - for _, v := range environ { - old := os.Getenv(v.key) - os.Setenv(v.key, v.value) - v.value = old - } - return func() { // Restore old environment after the test completes. - for _, v := range environ { - if v.value == "" { - os.Unsetenv(v.key) - continue - } - os.Setenv(v.key, v.value) - } - } -} diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go deleted file mode 100644 index 11df8c07b53..00000000000 --- a/vendor/google.golang.org/appengine/internal/app_id.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "strings" -) - -func parseFullAppID(appid string) (partition, domain, displayID string) { - if i := strings.Index(appid, "~"); i != -1 { - partition, appid = appid[:i], appid[i+1:] - } - if i := strings.Index(appid, ":"); i != -1 { - domain, appid = appid[:i], appid[i+1:] - } - return partition, domain, appid -} - -// appID returns "appid" or "domain.com:appid". -func appID(fullAppID string) string { - _, dom, dis := parseFullAppID(fullAppID) - if dom != "" { - return dom + ":" + dis - } - return dis -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go deleted file mode 100644 index 87d9701b8df..00000000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go +++ /dev/null @@ -1,296 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto -// DO NOT EDIT! - -/* -Package app_identity is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/app_identity/app_identity_service.proto - -It has these top-level messages: - AppIdentityServiceError - SignForAppRequest - SignForAppResponse - GetPublicCertificateForAppRequest - PublicCertificate - GetPublicCertificateForAppResponse - GetServiceAccountNameRequest - GetServiceAccountNameResponse - GetAccessTokenRequest - GetAccessTokenResponse - GetDefaultGcsBucketNameRequest - GetDefaultGcsBucketNameResponse -*/ -package app_identity - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type AppIdentityServiceError_ErrorCode int32 - -const ( - AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 - AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 - AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 - AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 - AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 - AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 - AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 - AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 -) - -var AppIdentityServiceError_ErrorCode_name = map[int32]string{ - 0: "SUCCESS", - 9: "UNKNOWN_SCOPE", - 1000: "BLOB_TOO_LARGE", - 1001: "DEADLINE_EXCEEDED", - 1002: "NOT_A_VALID_APP", - 1003: "UNKNOWN_ERROR", - 1005: "NOT_ALLOWED", - 1006: "NOT_IMPLEMENTED", -} -var AppIdentityServiceError_ErrorCode_value = map[string]int32{ - "SUCCESS": 0, - "UNKNOWN_SCOPE": 9, - "BLOB_TOO_LARGE": 1000, - "DEADLINE_EXCEEDED": 1001, - "NOT_A_VALID_APP": 1002, - "UNKNOWN_ERROR": 1003, - "NOT_ALLOWED": 1005, - "NOT_IMPLEMENTED": 1006, -} - -func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { - p := new(AppIdentityServiceError_ErrorCode) - *p = x - return p -} -func (x AppIdentityServiceError_ErrorCode) String() string { - return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) -} -func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") - if err != nil { - return err - } - *x = AppIdentityServiceError_ErrorCode(value) - return nil -} - -type AppIdentityServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } -func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } -func (*AppIdentityServiceError) ProtoMessage() {} - -type SignForAppRequest struct { - BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } -func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } -func (*SignForAppRequest) ProtoMessage() {} - -func (m *SignForAppRequest) GetBytesToSign() []byte { - if m != nil { - return m.BytesToSign - } - return nil -} - -type SignForAppResponse struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` - SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } -func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } -func (*SignForAppResponse) ProtoMessage() {} - -func (m *SignForAppResponse) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *SignForAppResponse) GetSignatureBytes() []byte { - if m != nil { - return m.SignatureBytes - } - return nil -} - -type GetPublicCertificateForAppRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } -func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppRequest) ProtoMessage() {} - -type PublicCertificate struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` - X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } -func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } -func (*PublicCertificate) ProtoMessage() {} - -func (m *PublicCertificate) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *PublicCertificate) GetX509CertificatePem() string { - if m != nil && m.X509CertificatePem != nil { - return *m.X509CertificatePem - } - return "" -} - -type GetPublicCertificateForAppResponse struct { - PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"` - MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } -func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppResponse) ProtoMessage() {} - -func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { - if m != nil { - return m.PublicCertificateList - } - return nil -} - -func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { - if m != nil && m.MaxClientCacheTimeInSecond != nil { - return *m.MaxClientCacheTimeInSecond - } - return 0 -} - -type GetServiceAccountNameRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } -func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameRequest) ProtoMessage() {} - -type GetServiceAccountNameResponse struct { - ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } -func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameResponse) ProtoMessage() {} - -func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenRequest struct { - Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` - ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"` - ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } -func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenRequest) ProtoMessage() {} - -func (m *GetAccessTokenRequest) GetScope() []string { - if m != nil { - return m.Scope - } - return nil -} - -func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { - if m != nil && m.ServiceAccountId != nil { - return *m.ServiceAccountId - } - return 0 -} - -func (m *GetAccessTokenRequest) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenResponse struct { - AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"` - ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } -func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenResponse) ProtoMessage() {} - -func (m *GetAccessTokenResponse) GetAccessToken() string { - if m != nil && m.AccessToken != nil { - return *m.AccessToken - } - return "" -} - -func (m *GetAccessTokenResponse) GetExpirationTime() int64 { - if m != nil && m.ExpirationTime != nil { - return *m.ExpirationTime - } - return 0 -} - -type GetDefaultGcsBucketNameRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } -func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} - -type GetDefaultGcsBucketNameResponse struct { - DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } -func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} - -func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { - if m != nil && m.DefaultGcsBucketName != nil { - return *m.DefaultGcsBucketName - } - return "" -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto deleted file mode 100644 index 19610ca5b75..00000000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto2"; -option go_package = "app_identity"; - -package appengine; - -message AppIdentityServiceError { - enum ErrorCode { - SUCCESS = 0; - UNKNOWN_SCOPE = 9; - BLOB_TOO_LARGE = 1000; - DEADLINE_EXCEEDED = 1001; - NOT_A_VALID_APP = 1002; - UNKNOWN_ERROR = 1003; - NOT_ALLOWED = 1005; - NOT_IMPLEMENTED = 1006; - } -} - -message SignForAppRequest { - optional bytes bytes_to_sign = 1; -} - -message SignForAppResponse { - optional string key_name = 1; - optional bytes signature_bytes = 2; -} - -message GetPublicCertificateForAppRequest { -} - -message PublicCertificate { - optional string key_name = 1; - optional string x509_certificate_pem = 2; -} - -message GetPublicCertificateForAppResponse { - repeated PublicCertificate public_certificate_list = 1; - optional int64 max_client_cache_time_in_second = 2; -} - -message GetServiceAccountNameRequest { -} - -message GetServiceAccountNameResponse { - optional string service_account_name = 1; -} - -message GetAccessTokenRequest { - repeated string scope = 1; - optional int64 service_account_id = 2; - optional string service_account_name = 3; -} - -message GetAccessTokenResponse { - optional string access_token = 1; - optional int64 expiration_time = 2; -} - -message GetDefaultGcsBucketNameRequest { -} - -message GetDefaultGcsBucketNameResponse { - optional string default_gcs_bucket_name = 1; -} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go deleted file mode 100644 index 36a195650a9..00000000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go +++ /dev/null @@ -1,133 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/base/api_base.proto -// DO NOT EDIT! - -/* -Package base is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/base/api_base.proto - -It has these top-level messages: - StringProto - Integer32Proto - Integer64Proto - BoolProto - DoubleProto - BytesProto - VoidProto -*/ -package base - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type StringProto struct { - Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StringProto) Reset() { *m = StringProto{} } -func (m *StringProto) String() string { return proto.CompactTextString(m) } -func (*StringProto) ProtoMessage() {} - -func (m *StringProto) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Integer32Proto struct { - Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } -func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } -func (*Integer32Proto) ProtoMessage() {} - -func (m *Integer32Proto) GetValue() int32 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Integer64Proto struct { - Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } -func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } -func (*Integer64Proto) ProtoMessage() {} - -func (m *Integer64Proto) GetValue() int64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BoolProto struct { - Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BoolProto) Reset() { *m = BoolProto{} } -func (m *BoolProto) String() string { return proto.CompactTextString(m) } -func (*BoolProto) ProtoMessage() {} - -func (m *BoolProto) GetValue() bool { - if m != nil && m.Value != nil { - return *m.Value - } - return false -} - -type DoubleProto struct { - Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DoubleProto) Reset() { *m = DoubleProto{} } -func (m *DoubleProto) String() string { return proto.CompactTextString(m) } -func (*DoubleProto) ProtoMessage() {} - -func (m *DoubleProto) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BytesProto struct { - Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BytesProto) Reset() { *m = BytesProto{} } -func (m *BytesProto) String() string { return proto.CompactTextString(m) } -func (*BytesProto) ProtoMessage() {} - -func (m *BytesProto) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type VoidProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *VoidProto) Reset() { *m = VoidProto{} } -func (m *VoidProto) String() string { return proto.CompactTextString(m) } -func (*VoidProto) ProtoMessage() {} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto deleted file mode 100644 index 56cd7a3cad0..00000000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.proto +++ /dev/null @@ -1,33 +0,0 @@ -// Built-in base types for API calls. Primarily useful as return types. - -syntax = "proto2"; -option go_package = "base"; - -package appengine.base; - -message StringProto { - required string value = 1; -} - -message Integer32Proto { - required int32 value = 1; -} - -message Integer64Proto { - required int64 value = 1; -} - -message BoolProto { - required bool value = 1; -} - -message DoubleProto { - required double value = 1; -} - -message BytesProto { - required bytes value = 1 [ctype=CORD]; -} - -message VoidProto { -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go deleted file mode 100644 index 8613cb7311a..00000000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go +++ /dev/null @@ -1,2778 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto -// DO NOT EDIT! - -/* -Package datastore is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/datastore/datastore_v3.proto - -It has these top-level messages: - Action - PropertyValue - Property - Path - Reference - User - EntityProto - CompositeProperty - Index - CompositeIndex - IndexPostfix - IndexPosition - Snapshot - InternalHeader - Transaction - Query - CompiledQuery - CompiledCursor - Cursor - Error - Cost - GetRequest - GetResponse - PutRequest - PutResponse - TouchRequest - TouchResponse - DeleteRequest - DeleteResponse - NextRequest - QueryResult - AllocateIdsRequest - AllocateIdsResponse - CompositeIndices - AddActionsRequest - AddActionsResponse - BeginTransactionRequest - CommitResponse -*/ -package datastore - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type Property_Meaning int32 - -const ( - Property_NO_MEANING Property_Meaning = 0 - Property_BLOB Property_Meaning = 14 - Property_TEXT Property_Meaning = 15 - Property_BYTESTRING Property_Meaning = 16 - Property_ATOM_CATEGORY Property_Meaning = 1 - Property_ATOM_LINK Property_Meaning = 2 - Property_ATOM_TITLE Property_Meaning = 3 - Property_ATOM_CONTENT Property_Meaning = 4 - Property_ATOM_SUMMARY Property_Meaning = 5 - Property_ATOM_AUTHOR Property_Meaning = 6 - Property_GD_WHEN Property_Meaning = 7 - Property_GD_EMAIL Property_Meaning = 8 - Property_GEORSS_POINT Property_Meaning = 9 - Property_GD_IM Property_Meaning = 10 - Property_GD_PHONENUMBER Property_Meaning = 11 - Property_GD_POSTALADDRESS Property_Meaning = 12 - Property_GD_RATING Property_Meaning = 13 - Property_BLOBKEY Property_Meaning = 17 - Property_ENTITY_PROTO Property_Meaning = 19 - Property_INDEX_VALUE Property_Meaning = 18 -) - -var Property_Meaning_name = map[int32]string{ - 0: "NO_MEANING", - 14: "BLOB", - 15: "TEXT", - 16: "BYTESTRING", - 1: "ATOM_CATEGORY", - 2: "ATOM_LINK", - 3: "ATOM_TITLE", - 4: "ATOM_CONTENT", - 5: "ATOM_SUMMARY", - 6: "ATOM_AUTHOR", - 7: "GD_WHEN", - 8: "GD_EMAIL", - 9: "GEORSS_POINT", - 10: "GD_IM", - 11: "GD_PHONENUMBER", - 12: "GD_POSTALADDRESS", - 13: "GD_RATING", - 17: "BLOBKEY", - 19: "ENTITY_PROTO", - 18: "INDEX_VALUE", -} -var Property_Meaning_value = map[string]int32{ - "NO_MEANING": 0, - "BLOB": 14, - "TEXT": 15, - "BYTESTRING": 16, - "ATOM_CATEGORY": 1, - "ATOM_LINK": 2, - "ATOM_TITLE": 3, - "ATOM_CONTENT": 4, - "ATOM_SUMMARY": 5, - "ATOM_AUTHOR": 6, - "GD_WHEN": 7, - "GD_EMAIL": 8, - "GEORSS_POINT": 9, - "GD_IM": 10, - "GD_PHONENUMBER": 11, - "GD_POSTALADDRESS": 12, - "GD_RATING": 13, - "BLOBKEY": 17, - "ENTITY_PROTO": 19, - "INDEX_VALUE": 18, -} - -func (x Property_Meaning) Enum() *Property_Meaning { - p := new(Property_Meaning) - *p = x - return p -} -func (x Property_Meaning) String() string { - return proto.EnumName(Property_Meaning_name, int32(x)) -} -func (x *Property_Meaning) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") - if err != nil { - return err - } - *x = Property_Meaning(value) - return nil -} - -type Property_FtsTokenizationOption int32 - -const ( - Property_HTML Property_FtsTokenizationOption = 1 - Property_ATOM Property_FtsTokenizationOption = 2 -) - -var Property_FtsTokenizationOption_name = map[int32]string{ - 1: "HTML", - 2: "ATOM", -} -var Property_FtsTokenizationOption_value = map[string]int32{ - "HTML": 1, - "ATOM": 2, -} - -func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { - p := new(Property_FtsTokenizationOption) - *p = x - return p -} -func (x Property_FtsTokenizationOption) String() string { - return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) -} -func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") - if err != nil { - return err - } - *x = Property_FtsTokenizationOption(value) - return nil -} - -type EntityProto_Kind int32 - -const ( - EntityProto_GD_CONTACT EntityProto_Kind = 1 - EntityProto_GD_EVENT EntityProto_Kind = 2 - EntityProto_GD_MESSAGE EntityProto_Kind = 3 -) - -var EntityProto_Kind_name = map[int32]string{ - 1: "GD_CONTACT", - 2: "GD_EVENT", - 3: "GD_MESSAGE", -} -var EntityProto_Kind_value = map[string]int32{ - "GD_CONTACT": 1, - "GD_EVENT": 2, - "GD_MESSAGE": 3, -} - -func (x EntityProto_Kind) Enum() *EntityProto_Kind { - p := new(EntityProto_Kind) - *p = x - return p -} -func (x EntityProto_Kind) String() string { - return proto.EnumName(EntityProto_Kind_name, int32(x)) -} -func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") - if err != nil { - return err - } - *x = EntityProto_Kind(value) - return nil -} - -type Index_Property_Direction int32 - -const ( - Index_Property_ASCENDING Index_Property_Direction = 1 - Index_Property_DESCENDING Index_Property_Direction = 2 -) - -var Index_Property_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Index_Property_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Index_Property_Direction) Enum() *Index_Property_Direction { - p := new(Index_Property_Direction) - *p = x - return p -} -func (x Index_Property_Direction) String() string { - return proto.EnumName(Index_Property_Direction_name, int32(x)) -} -func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") - if err != nil { - return err - } - *x = Index_Property_Direction(value) - return nil -} - -type CompositeIndex_State int32 - -const ( - CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 - CompositeIndex_READ_WRITE CompositeIndex_State = 2 - CompositeIndex_DELETED CompositeIndex_State = 3 - CompositeIndex_ERROR CompositeIndex_State = 4 -) - -var CompositeIndex_State_name = map[int32]string{ - 1: "WRITE_ONLY", - 2: "READ_WRITE", - 3: "DELETED", - 4: "ERROR", -} -var CompositeIndex_State_value = map[string]int32{ - "WRITE_ONLY": 1, - "READ_WRITE": 2, - "DELETED": 3, - "ERROR": 4, -} - -func (x CompositeIndex_State) Enum() *CompositeIndex_State { - p := new(CompositeIndex_State) - *p = x - return p -} -func (x CompositeIndex_State) String() string { - return proto.EnumName(CompositeIndex_State_name, int32(x)) -} -func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") - if err != nil { - return err - } - *x = CompositeIndex_State(value) - return nil -} - -type Snapshot_Status int32 - -const ( - Snapshot_INACTIVE Snapshot_Status = 0 - Snapshot_ACTIVE Snapshot_Status = 1 -) - -var Snapshot_Status_name = map[int32]string{ - 0: "INACTIVE", - 1: "ACTIVE", -} -var Snapshot_Status_value = map[string]int32{ - "INACTIVE": 0, - "ACTIVE": 1, -} - -func (x Snapshot_Status) Enum() *Snapshot_Status { - p := new(Snapshot_Status) - *p = x - return p -} -func (x Snapshot_Status) String() string { - return proto.EnumName(Snapshot_Status_name, int32(x)) -} -func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") - if err != nil { - return err - } - *x = Snapshot_Status(value) - return nil -} - -type Query_Hint int32 - -const ( - Query_ORDER_FIRST Query_Hint = 1 - Query_ANCESTOR_FIRST Query_Hint = 2 - Query_FILTER_FIRST Query_Hint = 3 -) - -var Query_Hint_name = map[int32]string{ - 1: "ORDER_FIRST", - 2: "ANCESTOR_FIRST", - 3: "FILTER_FIRST", -} -var Query_Hint_value = map[string]int32{ - "ORDER_FIRST": 1, - "ANCESTOR_FIRST": 2, - "FILTER_FIRST": 3, -} - -func (x Query_Hint) Enum() *Query_Hint { - p := new(Query_Hint) - *p = x - return p -} -func (x Query_Hint) String() string { - return proto.EnumName(Query_Hint_name, int32(x)) -} -func (x *Query_Hint) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") - if err != nil { - return err - } - *x = Query_Hint(value) - return nil -} - -type Query_Filter_Operator int32 - -const ( - Query_Filter_LESS_THAN Query_Filter_Operator = 1 - Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 - Query_Filter_GREATER_THAN Query_Filter_Operator = 3 - Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 - Query_Filter_EQUAL Query_Filter_Operator = 5 - Query_Filter_IN Query_Filter_Operator = 6 - Query_Filter_EXISTS Query_Filter_Operator = 7 -) - -var Query_Filter_Operator_name = map[int32]string{ - 1: "LESS_THAN", - 2: "LESS_THAN_OR_EQUAL", - 3: "GREATER_THAN", - 4: "GREATER_THAN_OR_EQUAL", - 5: "EQUAL", - 6: "IN", - 7: "EXISTS", -} -var Query_Filter_Operator_value = map[string]int32{ - "LESS_THAN": 1, - "LESS_THAN_OR_EQUAL": 2, - "GREATER_THAN": 3, - "GREATER_THAN_OR_EQUAL": 4, - "EQUAL": 5, - "IN": 6, - "EXISTS": 7, -} - -func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { - p := new(Query_Filter_Operator) - *p = x - return p -} -func (x Query_Filter_Operator) String() string { - return proto.EnumName(Query_Filter_Operator_name, int32(x)) -} -func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") - if err != nil { - return err - } - *x = Query_Filter_Operator(value) - return nil -} - -type Query_Order_Direction int32 - -const ( - Query_Order_ASCENDING Query_Order_Direction = 1 - Query_Order_DESCENDING Query_Order_Direction = 2 -) - -var Query_Order_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Query_Order_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Query_Order_Direction) Enum() *Query_Order_Direction { - p := new(Query_Order_Direction) - *p = x - return p -} -func (x Query_Order_Direction) String() string { - return proto.EnumName(Query_Order_Direction_name, int32(x)) -} -func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") - if err != nil { - return err - } - *x = Query_Order_Direction(value) - return nil -} - -type Error_ErrorCode int32 - -const ( - Error_BAD_REQUEST Error_ErrorCode = 1 - Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 - Error_INTERNAL_ERROR Error_ErrorCode = 3 - Error_NEED_INDEX Error_ErrorCode = 4 - Error_TIMEOUT Error_ErrorCode = 5 - Error_PERMISSION_DENIED Error_ErrorCode = 6 - Error_BIGTABLE_ERROR Error_ErrorCode = 7 - Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 - Error_CAPABILITY_DISABLED Error_ErrorCode = 9 - Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 - Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 -) - -var Error_ErrorCode_name = map[int32]string{ - 1: "BAD_REQUEST", - 2: "CONCURRENT_TRANSACTION", - 3: "INTERNAL_ERROR", - 4: "NEED_INDEX", - 5: "TIMEOUT", - 6: "PERMISSION_DENIED", - 7: "BIGTABLE_ERROR", - 8: "COMMITTED_BUT_STILL_APPLYING", - 9: "CAPABILITY_DISABLED", - 10: "TRY_ALTERNATE_BACKEND", - 11: "SAFE_TIME_TOO_OLD", -} -var Error_ErrorCode_value = map[string]int32{ - "BAD_REQUEST": 1, - "CONCURRENT_TRANSACTION": 2, - "INTERNAL_ERROR": 3, - "NEED_INDEX": 4, - "TIMEOUT": 5, - "PERMISSION_DENIED": 6, - "BIGTABLE_ERROR": 7, - "COMMITTED_BUT_STILL_APPLYING": 8, - "CAPABILITY_DISABLED": 9, - "TRY_ALTERNATE_BACKEND": 10, - "SAFE_TIME_TOO_OLD": 11, -} - -func (x Error_ErrorCode) Enum() *Error_ErrorCode { - p := new(Error_ErrorCode) - *p = x - return p -} -func (x Error_ErrorCode) String() string { - return proto.EnumName(Error_ErrorCode_name, int32(x)) -} -func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") - if err != nil { - return err - } - *x = Error_ErrorCode(value) - return nil -} - -type PutRequest_AutoIdPolicy int32 - -const ( - PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 - PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 -) - -var PutRequest_AutoIdPolicy_name = map[int32]string{ - 0: "CURRENT", - 1: "SEQUENTIAL", -} -var PutRequest_AutoIdPolicy_value = map[string]int32{ - "CURRENT": 0, - "SEQUENTIAL": 1, -} - -func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { - p := new(PutRequest_AutoIdPolicy) - *p = x - return p -} -func (x PutRequest_AutoIdPolicy) String() string { - return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) -} -func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") - if err != nil { - return err - } - *x = PutRequest_AutoIdPolicy(value) - return nil -} - -type Action struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Action) Reset() { *m = Action{} } -func (m *Action) String() string { return proto.CompactTextString(m) } -func (*Action) ProtoMessage() {} - -type PropertyValue struct { - Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` - BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` - StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` - Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"` - Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"` - Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue) Reset() { *m = PropertyValue{} } -func (m *PropertyValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue) ProtoMessage() {} - -func (m *PropertyValue) GetInt64Value() int64 { - if m != nil && m.Int64Value != nil { - return *m.Int64Value - } - return 0 -} - -func (m *PropertyValue) GetBooleanValue() bool { - if m != nil && m.BooleanValue != nil { - return *m.BooleanValue - } - return false -} - -func (m *PropertyValue) GetStringValue() string { - if m != nil && m.StringValue != nil { - return *m.StringValue - } - return "" -} - -func (m *PropertyValue) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { - if m != nil { - return m.Pointvalue - } - return nil -} - -func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { - if m != nil { - return m.Uservalue - } - return nil -} - -func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { - if m != nil { - return m.Referencevalue - } - return nil -} - -type PropertyValue_PointValue struct { - X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` - Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } -func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_PointValue) ProtoMessage() {} - -func (m *PropertyValue_PointValue) GetX() float64 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *PropertyValue_PointValue) GetY() float64 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type PropertyValue_UserValue struct { - Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } -func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_UserValue) ProtoMessage() {} - -func (m *PropertyValue_UserValue) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *PropertyValue_UserValue) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *PropertyValue_UserValue) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type PropertyValue_ReferenceValue struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` - Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } -func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue) ProtoMessage() {} - -func (m *PropertyValue_ReferenceValue) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { - if m != nil { - return m.Pathelement - } - return nil -} - -type PropertyValue_ReferenceValue_PathElement struct { - Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_ReferenceValue_PathElement) Reset() { - *m = PropertyValue_ReferenceValue_PathElement{} -} -func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} - -func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Property struct { - Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` - MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"` - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` - Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` - Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` - FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` - Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Property) Reset() { *m = Property{} } -func (m *Property) String() string { return proto.CompactTextString(m) } -func (*Property) ProtoMessage() {} - -const Default_Property_Meaning Property_Meaning = Property_NO_MEANING -const Default_Property_Searchable bool = false -const Default_Property_Locale string = "en" - -func (m *Property) GetMeaning() Property_Meaning { - if m != nil && m.Meaning != nil { - return *m.Meaning - } - return Default_Property_Meaning -} - -func (m *Property) GetMeaningUri() string { - if m != nil && m.MeaningUri != nil { - return *m.MeaningUri - } - return "" -} - -func (m *Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Property) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -func (m *Property) GetMultiple() bool { - if m != nil && m.Multiple != nil { - return *m.Multiple - } - return false -} - -func (m *Property) GetSearchable() bool { - if m != nil && m.Searchable != nil { - return *m.Searchable - } - return Default_Property_Searchable -} - -func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { - if m != nil && m.FtsTokenizationOption != nil { - return *m.FtsTokenizationOption - } - return Property_HTML -} - -func (m *Property) GetLocale() string { - if m != nil && m.Locale != nil { - return *m.Locale - } - return Default_Property_Locale -} - -type Path struct { - Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} - -func (m *Path) GetElement() []*Path_Element { - if m != nil { - return m.Element - } - return nil -} - -type Path_Element struct { - Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Path_Element) Reset() { *m = Path_Element{} } -func (m *Path_Element) String() string { return proto.CompactTextString(m) } -func (*Path_Element) ProtoMessage() {} - -func (m *Path_Element) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *Path_Element) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *Path_Element) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Reference struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` - Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Reference) Reset() { *m = Reference{} } -func (m *Reference) String() string { return proto.CompactTextString(m) } -func (*Reference) ProtoMessage() {} - -func (m *Reference) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Reference) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Reference) GetPath() *Path { - if m != nil { - return m.Path - } - return nil -} - -type User struct { - Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} - -func (m *User) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *User) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *User) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *User) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *User) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type EntityProto struct { - Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` - EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"` - Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` - Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` - KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"` - Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EntityProto) Reset() { *m = EntityProto{} } -func (m *EntityProto) String() string { return proto.CompactTextString(m) } -func (*EntityProto) ProtoMessage() {} - -func (m *EntityProto) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *EntityProto) GetEntityGroup() *Path { - if m != nil { - return m.EntityGroup - } - return nil -} - -func (m *EntityProto) GetOwner() *User { - if m != nil { - return m.Owner - } - return nil -} - -func (m *EntityProto) GetKind() EntityProto_Kind { - if m != nil && m.Kind != nil { - return *m.Kind - } - return EntityProto_GD_CONTACT -} - -func (m *EntityProto) GetKindUri() string { - if m != nil && m.KindUri != nil { - return *m.KindUri - } - return "" -} - -func (m *EntityProto) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -func (m *EntityProto) GetRawProperty() []*Property { - if m != nil { - return m.RawProperty - } - return nil -} - -func (m *EntityProto) GetRank() int32 { - if m != nil && m.Rank != nil { - return *m.Rank - } - return 0 -} - -type CompositeProperty struct { - IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"` - Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } -func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } -func (*CompositeProperty) ProtoMessage() {} - -func (m *CompositeProperty) GetIndexId() int64 { - if m != nil && m.IndexId != nil { - return *m.IndexId - } - return 0 -} - -func (m *CompositeProperty) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -type Index struct { - EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"` - Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` - Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Index) Reset() { *m = Index{} } -func (m *Index) String() string { return proto.CompactTextString(m) } -func (*Index) ProtoMessage() {} - -func (m *Index) GetEntityType() string { - if m != nil && m.EntityType != nil { - return *m.EntityType - } - return "" -} - -func (m *Index) GetAncestor() bool { - if m != nil && m.Ancestor != nil { - return *m.Ancestor - } - return false -} - -func (m *Index) GetProperty() []*Index_Property { - if m != nil { - return m.Property - } - return nil -} - -type Index_Property struct { - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Index_Property) Reset() { *m = Index_Property{} } -func (m *Index_Property) String() string { return proto.CompactTextString(m) } -func (*Index_Property) ProtoMessage() {} - -const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING - -func (m *Index_Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Index_Property) GetDirection() Index_Property_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Index_Property_Direction -} - -type CompositeIndex struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` - Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` - State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` - OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } -func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } -func (*CompositeIndex) ProtoMessage() {} - -const Default_CompositeIndex_OnlyUseIfRequired bool = false - -func (m *CompositeIndex) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CompositeIndex) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *CompositeIndex) GetDefinition() *Index { - if m != nil { - return m.Definition - } - return nil -} - -func (m *CompositeIndex) GetState() CompositeIndex_State { - if m != nil && m.State != nil { - return *m.State - } - return CompositeIndex_WRITE_ONLY -} - -func (m *CompositeIndex) GetOnlyUseIfRequired() bool { - if m != nil && m.OnlyUseIfRequired != nil { - return *m.OnlyUseIfRequired - } - return Default_CompositeIndex_OnlyUseIfRequired -} - -type IndexPostfix struct { - IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"` - Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } -func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix) ProtoMessage() {} - -const Default_IndexPostfix_Before bool = true - -func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { - if m != nil { - return m.IndexValue - } - return nil -} - -func (m *IndexPostfix) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *IndexPostfix) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPostfix_Before -} - -type IndexPostfix_IndexValue struct { - PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"` - Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } -func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix_IndexValue) ProtoMessage() {} - -func (m *IndexPostfix_IndexValue) GetPropertyName() string { - if m != nil && m.PropertyName != nil { - return *m.PropertyName - } - return "" -} - -func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type IndexPosition struct { - Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPosition) Reset() { *m = IndexPosition{} } -func (m *IndexPosition) String() string { return proto.CompactTextString(m) } -func (*IndexPosition) ProtoMessage() {} - -const Default_IndexPosition_Before bool = true - -func (m *IndexPosition) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *IndexPosition) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPosition_Before -} - -type Snapshot struct { - Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} - -func (m *Snapshot) GetTs() int64 { - if m != nil && m.Ts != nil { - return *m.Ts - } - return 0 -} - -type InternalHeader struct { - Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *InternalHeader) Reset() { *m = InternalHeader{} } -func (m *InternalHeader) String() string { return proto.CompactTextString(m) } -func (*InternalHeader) ProtoMessage() {} - -func (m *InternalHeader) GetQos() string { - if m != nil && m.Qos != nil { - return *m.Qos - } - return "" -} - -type Transaction struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` - App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` - MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Transaction) Reset() { *m = Transaction{} } -func (m *Transaction) String() string { return proto.CompactTextString(m) } -func (*Transaction) ProtoMessage() {} - -const Default_Transaction_MarkChanges bool = false - -func (m *Transaction) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Transaction) GetHandle() uint64 { - if m != nil && m.Handle != nil { - return *m.Handle - } - return 0 -} - -func (m *Transaction) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Transaction) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_Transaction_MarkChanges -} - -type Query struct { - Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"` - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` - Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"` - SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"` - Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"` - Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` - Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` - EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"` - RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"` - KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"` - Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` - Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` - FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` - PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"` - GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"` - Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` - MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"` - SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"` - PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} - -const Default_Query_Offset int32 = 0 -const Default_Query_RequirePerfectPlan bool = false -const Default_Query_KeysOnly bool = false -const Default_Query_Compile bool = false -const Default_Query_PersistOffset bool = false - -func (m *Query) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Query) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Query) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Query) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *Query) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -func (m *Query) GetFilter() []*Query_Filter { - if m != nil { - return m.Filter - } - return nil -} - -func (m *Query) GetSearchQuery() string { - if m != nil && m.SearchQuery != nil { - return *m.SearchQuery - } - return "" -} - -func (m *Query) GetOrder() []*Query_Order { - if m != nil { - return m.Order - } - return nil -} - -func (m *Query) GetHint() Query_Hint { - if m != nil && m.Hint != nil { - return *m.Hint - } - return Query_ORDER_FIRST -} - -func (m *Query) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *Query) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_Query_Offset -} - -func (m *Query) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *Query) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *Query) GetEndCompiledCursor() *CompiledCursor { - if m != nil { - return m.EndCompiledCursor - } - return nil -} - -func (m *Query) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *Query) GetRequirePerfectPlan() bool { - if m != nil && m.RequirePerfectPlan != nil { - return *m.RequirePerfectPlan - } - return Default_Query_RequirePerfectPlan -} - -func (m *Query) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return Default_Query_KeysOnly -} - -func (m *Query) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *Query) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_Query_Compile -} - -func (m *Query) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *Query) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *Query) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *Query) GetGroupByPropertyName() []string { - if m != nil { - return m.GroupByPropertyName - } - return nil -} - -func (m *Query) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return false -} - -func (m *Query) GetMinSafeTimeSeconds() int64 { - if m != nil && m.MinSafeTimeSeconds != nil { - return *m.MinSafeTimeSeconds - } - return 0 -} - -func (m *Query) GetSafeReplicaName() []string { - if m != nil { - return m.SafeReplicaName - } - return nil -} - -func (m *Query) GetPersistOffset() bool { - if m != nil && m.PersistOffset != nil { - return *m.PersistOffset - } - return Default_Query_PersistOffset -} - -type Query_Filter struct { - Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query_Filter) Reset() { *m = Query_Filter{} } -func (m *Query_Filter) String() string { return proto.CompactTextString(m) } -func (*Query_Filter) ProtoMessage() {} - -func (m *Query_Filter) GetOp() Query_Filter_Operator { - if m != nil && m.Op != nil { - return *m.Op - } - return Query_Filter_LESS_THAN -} - -func (m *Query_Filter) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -type Query_Order struct { - Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` - Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query_Order) Reset() { *m = Query_Order{} } -func (m *Query_Order) String() string { return proto.CompactTextString(m) } -func (*Query_Order) ProtoMessage() {} - -const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING - -func (m *Query_Order) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *Query_Order) GetDirection() Query_Order_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Query_Order_Direction -} - -type CompiledQuery struct { - Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"` - Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"` - IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"` - Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` - KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"` - PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"` - DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"` - Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } -func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery) ProtoMessage() {} - -const Default_CompiledQuery_Offset int32 = 0 - -func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { - if m != nil { - return m.Primaryscan - } - return nil -} - -func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { - if m != nil { - return m.Mergejoinscan - } - return nil -} - -func (m *CompiledQuery) GetIndexDef() *Index { - if m != nil { - return m.IndexDef - } - return nil -} - -func (m *CompiledQuery) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_CompiledQuery_Offset -} - -func (m *CompiledQuery) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *CompiledQuery) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *CompiledQuery) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *CompiledQuery) GetDistinctInfixSize() int32 { - if m != nil && m.DistinctInfixSize != nil { - return *m.DistinctInfixSize - } - return 0 -} - -func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { - if m != nil { - return m.Entityfilter - } - return nil -} - -type CompiledQuery_PrimaryScan struct { - IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` - StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"` - StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"` - EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"` - EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"` - StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"` - EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"` - EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } -func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_PrimaryScan) ProtoMessage() {} - -func (m *CompiledQuery_PrimaryScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetEndKey() string { - if m != nil && m.EndKey != nil { - return *m.EndKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { - if m != nil && m.EndInclusive != nil { - return *m.EndInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { - if m != nil { - return m.StartPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { - if m != nil { - return m.EndPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { - if m != nil && m.EndUnappliedLogTimestampUs != nil { - return *m.EndUnappliedLogTimestampUs - } - return 0 -} - -type CompiledQuery_MergeJoinScan struct { - IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"` - PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"` - ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } -func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} - -const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false - -func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { - if m != nil { - return m.PrefixValue - } - return nil -} - -func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { - if m != nil && m.ValuePrefix != nil { - return *m.ValuePrefix - } - return Default_CompiledQuery_MergeJoinScan_ValuePrefix -} - -type CompiledQuery_EntityFilter struct { - Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` - Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } -func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_EntityFilter) ProtoMessage() {} - -const Default_CompiledQuery_EntityFilter_Distinct bool = false - -func (m *CompiledQuery_EntityFilter) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return Default_CompiledQuery_EntityFilter_Distinct -} - -func (m *CompiledQuery_EntityFilter) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -type CompiledCursor struct { - Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } -func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor) ProtoMessage() {} - -func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { - if m != nil { - return m.Position - } - return nil -} - -type CompiledCursor_Position struct { - StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"` - Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"` - Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` - StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } -func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position) ProtoMessage() {} - -const Default_CompiledCursor_Position_StartInclusive bool = true - -func (m *CompiledCursor_Position) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { - if m != nil { - return m.Indexvalue - } - return nil -} - -func (m *CompiledCursor_Position) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *CompiledCursor_Position) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return Default_CompiledCursor_Position_StartInclusive -} - -type CompiledCursor_Position_IndexValue struct { - Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` - Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } -func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} - -func (m *CompiledCursor_Position_IndexValue) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type Cursor struct { - Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` - App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cursor) Reset() { *m = Cursor{} } -func (m *Cursor) String() string { return proto.CompactTextString(m) } -func (*Cursor) ProtoMessage() {} - -func (m *Cursor) GetCursor() uint64 { - if m != nil && m.Cursor != nil { - return *m.Cursor - } - return 0 -} - -func (m *Cursor) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -type Error struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Error) Reset() { *m = Error{} } -func (m *Error) String() string { return proto.CompactTextString(m) } -func (*Error) ProtoMessage() {} - -type Cost struct { - IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"` - IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"` - EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"` - EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"` - Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"` - ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"` - IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cost) Reset() { *m = Cost{} } -func (m *Cost) String() string { return proto.CompactTextString(m) } -func (*Cost) ProtoMessage() {} - -func (m *Cost) GetIndexWrites() int32 { - if m != nil && m.IndexWrites != nil { - return *m.IndexWrites - } - return 0 -} - -func (m *Cost) GetIndexWriteBytes() int32 { - if m != nil && m.IndexWriteBytes != nil { - return *m.IndexWriteBytes - } - return 0 -} - -func (m *Cost) GetEntityWrites() int32 { - if m != nil && m.EntityWrites != nil { - return *m.EntityWrites - } - return 0 -} - -func (m *Cost) GetEntityWriteBytes() int32 { - if m != nil && m.EntityWriteBytes != nil { - return *m.EntityWriteBytes - } - return 0 -} - -func (m *Cost) GetCommitcost() *Cost_CommitCost { - if m != nil { - return m.Commitcost - } - return nil -} - -func (m *Cost) GetApproximateStorageDelta() int32 { - if m != nil && m.ApproximateStorageDelta != nil { - return *m.ApproximateStorageDelta - } - return 0 -} - -func (m *Cost) GetIdSequenceUpdates() int32 { - if m != nil && m.IdSequenceUpdates != nil { - return *m.IdSequenceUpdates - } - return 0 -} - -type Cost_CommitCost struct { - RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"` - RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } -func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } -func (*Cost_CommitCost) ProtoMessage() {} - -func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { - if m != nil && m.RequestedEntityPuts != nil { - return *m.RequestedEntityPuts - } - return 0 -} - -func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { - if m != nil && m.RequestedEntityDeletes != nil { - return *m.RequestedEntityDeletes - } - return 0 -} - -type GetRequest struct { - Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` - AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} - -const Default_GetRequest_AllowDeferred bool = false - -func (m *GetRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *GetRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *GetRequest) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *GetRequest) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *GetRequest) GetAllowDeferred() bool { - if m != nil && m.AllowDeferred != nil { - return *m.AllowDeferred - } - return Default_GetRequest_AllowDeferred -} - -type GetResponse struct { - Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"` - Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` - InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} - -const Default_GetResponse_InOrder bool = true - -func (m *GetResponse) GetEntity() []*GetResponse_Entity { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse) GetDeferred() []*Reference { - if m != nil { - return m.Deferred - } - return nil -} - -func (m *GetResponse) GetInOrder() bool { - if m != nil && m.InOrder != nil { - return *m.InOrder - } - return Default_GetResponse_InOrder -} - -type GetResponse_Entity struct { - Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` - Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` - Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } -func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } -func (*GetResponse_Entity) ProtoMessage() {} - -func (m *GetResponse_Entity) GetEntity() *EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse_Entity) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetResponse_Entity) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -type PutRequest struct { - Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` - Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} - -const Default_PutRequest_Trusted bool = false -const Default_PutRequest_Force bool = false -const Default_PutRequest_MarkChanges bool = false -const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT - -func (m *PutRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *PutRequest) GetEntity() []*EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *PutRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *PutRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_PutRequest_Trusted -} - -func (m *PutRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_PutRequest_Force -} - -func (m *PutRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_PutRequest_MarkChanges -} - -func (m *PutRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { - if m != nil && m.AutoIdPolicy != nil { - return *m.AutoIdPolicy - } - return Default_PutRequest_AutoIdPolicy -} - -type PutResponse struct { - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} - -func (m *PutResponse) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *PutResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *PutResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type TouchRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"` - Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TouchRequest) Reset() { *m = TouchRequest{} } -func (m *TouchRequest) String() string { return proto.CompactTextString(m) } -func (*TouchRequest) ProtoMessage() {} - -const Default_TouchRequest_Force bool = false - -func (m *TouchRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *TouchRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *TouchRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_TouchRequest_Force -} - -func (m *TouchRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type TouchResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TouchResponse) Reset() { *m = TouchResponse{} } -func (m *TouchResponse) String() string { return proto.CompactTextString(m) } -func (*TouchResponse) ProtoMessage() {} - -func (m *TouchResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type DeleteRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} - -const Default_DeleteRequest_Trusted bool = false -const Default_DeleteRequest_Force bool = false -const Default_DeleteRequest_MarkChanges bool = false - -func (m *DeleteRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *DeleteRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *DeleteRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *DeleteRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_DeleteRequest_Trusted -} - -func (m *DeleteRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_DeleteRequest_Force -} - -func (m *DeleteRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_DeleteRequest_MarkChanges -} - -func (m *DeleteRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type DeleteResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } -func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteResponse) ProtoMessage() {} - -func (m *DeleteResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *DeleteResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type NextRequest struct { - Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` - Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` - Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` - Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NextRequest) Reset() { *m = NextRequest{} } -func (m *NextRequest) String() string { return proto.CompactTextString(m) } -func (*NextRequest) ProtoMessage() {} - -const Default_NextRequest_Offset int32 = 0 -const Default_NextRequest_Compile bool = false - -func (m *NextRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *NextRequest) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *NextRequest) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *NextRequest) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_NextRequest_Offset -} - -func (m *NextRequest) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_NextRequest_Compile -} - -type QueryResult struct { - Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` - Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` - SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"` - MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"` - KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"` - IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"` - SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"` - CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` - Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` - Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *QueryResult) Reset() { *m = QueryResult{} } -func (m *QueryResult) String() string { return proto.CompactTextString(m) } -func (*QueryResult) ProtoMessage() {} - -func (m *QueryResult) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *QueryResult) GetResult() []*EntityProto { - if m != nil { - return m.Result - } - return nil -} - -func (m *QueryResult) GetSkippedResults() int32 { - if m != nil && m.SkippedResults != nil { - return *m.SkippedResults - } - return 0 -} - -func (m *QueryResult) GetMoreResults() bool { - if m != nil && m.MoreResults != nil { - return *m.MoreResults - } - return false -} - -func (m *QueryResult) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *QueryResult) GetIndexOnly() bool { - if m != nil && m.IndexOnly != nil { - return *m.IndexOnly - } - return false -} - -func (m *QueryResult) GetSmallOps() bool { - if m != nil && m.SmallOps != nil { - return *m.SmallOps - } - return false -} - -func (m *QueryResult) GetCompiledQuery() *CompiledQuery { - if m != nil { - return m.CompiledQuery - } - return nil -} - -func (m *QueryResult) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *QueryResult) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -func (m *QueryResult) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type AllocateIdsRequest struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"` - Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` - Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` - Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } -func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsRequest) ProtoMessage() {} - -func (m *AllocateIdsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AllocateIdsRequest) GetModelKey() *Reference { - if m != nil { - return m.ModelKey - } - return nil -} - -func (m *AllocateIdsRequest) GetSize() int64 { - if m != nil && m.Size != nil { - return *m.Size - } - return 0 -} - -func (m *AllocateIdsRequest) GetMax() int64 { - if m != nil && m.Max != nil { - return *m.Max - } - return 0 -} - -func (m *AllocateIdsRequest) GetReserve() []*Reference { - if m != nil { - return m.Reserve - } - return nil -} - -type AllocateIdsResponse struct { - Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` - End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` - Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } -func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsResponse) ProtoMessage() {} - -func (m *AllocateIdsResponse) GetStart() int64 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *AllocateIdsResponse) GetEnd() int64 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *AllocateIdsResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type CompositeIndices struct { - Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } -func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } -func (*CompositeIndices) ProtoMessage() {} - -func (m *CompositeIndices) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -type AddActionsRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` - Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } -func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } -func (*AddActionsRequest) ProtoMessage() {} - -func (m *AddActionsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AddActionsRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *AddActionsRequest) GetAction() []*Action { - if m != nil { - return m.Action - } - return nil -} - -type AddActionsResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } -func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } -func (*AddActionsResponse) ProtoMessage() {} - -type BeginTransactionRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } -func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } -func (*BeginTransactionRequest) ProtoMessage() {} - -const Default_BeginTransactionRequest_AllowMultipleEg bool = false - -func (m *BeginTransactionRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *BeginTransactionRequest) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { - if m != nil && m.AllowMultipleEg != nil { - return *m.AllowMultipleEg - } - return Default_BeginTransactionRequest_AllowMultipleEg -} - -type CommitResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CommitResponse) Reset() { *m = CommitResponse{} } -func (m *CommitResponse) String() string { return proto.CompactTextString(m) } -func (*CommitResponse) ProtoMessage() {} - -func (m *CommitResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *CommitResponse) GetVersion() []*CommitResponse_Version { - if m != nil { - return m.Version - } - return nil -} - -type CommitResponse_Version struct { - RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"` - Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } -func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } -func (*CommitResponse_Version) ProtoMessage() {} - -func (m *CommitResponse_Version) GetRootEntityKey() *Reference { - if m != nil { - return m.RootEntityKey - } - return nil -} - -func (m *CommitResponse_Version) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto deleted file mode 100755 index e76f126ff7c..00000000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto +++ /dev/null @@ -1,541 +0,0 @@ -syntax = "proto2"; -option go_package = "datastore"; - -package appengine; - -message Action{} - -message PropertyValue { - optional int64 int64Value = 1; - optional bool booleanValue = 2; - optional string stringValue = 3; - optional double doubleValue = 4; - - optional group PointValue = 5 { - required double x = 6; - required double y = 7; - } - - optional group UserValue = 8 { - required string email = 9; - required string auth_domain = 10; - optional string nickname = 11; - optional string federated_identity = 21; - optional string federated_provider = 22; - } - - optional group ReferenceValue = 12 { - required string app = 13; - optional string name_space = 20; - repeated group PathElement = 14 { - required string type = 15; - optional int64 id = 16; - optional string name = 17; - } - } -} - -message Property { - enum Meaning { - NO_MEANING = 0; - BLOB = 14; - TEXT = 15; - BYTESTRING = 16; - - ATOM_CATEGORY = 1; - ATOM_LINK = 2; - ATOM_TITLE = 3; - ATOM_CONTENT = 4; - ATOM_SUMMARY = 5; - ATOM_AUTHOR = 6; - - GD_WHEN = 7; - GD_EMAIL = 8; - GEORSS_POINT = 9; - GD_IM = 10; - - GD_PHONENUMBER = 11; - GD_POSTALADDRESS = 12; - - GD_RATING = 13; - - BLOBKEY = 17; - ENTITY_PROTO = 19; - - INDEX_VALUE = 18; - }; - - optional Meaning meaning = 1 [default = NO_MEANING]; - optional string meaning_uri = 2; - - required string name = 3; - - required PropertyValue value = 5; - - required bool multiple = 4; - - optional bool searchable = 6 [default=false]; - - enum FtsTokenizationOption { - HTML = 1; - ATOM = 2; - } - - optional FtsTokenizationOption fts_tokenization_option = 8; - - optional string locale = 9 [default = "en"]; -} - -message Path { - repeated group Element = 1 { - required string type = 2; - optional int64 id = 3; - optional string name = 4; - } -} - -message Reference { - required string app = 13; - optional string name_space = 20; - required Path path = 14; -} - -message User { - required string email = 1; - required string auth_domain = 2; - optional string nickname = 3; - optional string federated_identity = 6; - optional string federated_provider = 7; -} - -message EntityProto { - required Reference key = 13; - required Path entity_group = 16; - optional User owner = 17; - - enum Kind { - GD_CONTACT = 1; - GD_EVENT = 2; - GD_MESSAGE = 3; - } - optional Kind kind = 4; - optional string kind_uri = 5; - - repeated Property property = 14; - repeated Property raw_property = 15; - - optional int32 rank = 18; -} - -message CompositeProperty { - required int64 index_id = 1; - repeated string value = 2; -} - -message Index { - required string entity_type = 1; - required bool ancestor = 5; - repeated group Property = 2 { - required string name = 3; - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - optional Direction direction = 4 [default = ASCENDING]; - } -} - -message CompositeIndex { - required string app_id = 1; - required int64 id = 2; - required Index definition = 3; - - enum State { - WRITE_ONLY = 1; - READ_WRITE = 2; - DELETED = 3; - ERROR = 4; - } - required State state = 4; - - optional bool only_use_if_required = 6 [default = false]; -} - -message IndexPostfix { - message IndexValue { - required string property_name = 1; - required PropertyValue value = 2; - } - - repeated IndexValue index_value = 1; - - optional Reference key = 2; - - optional bool before = 3 [default=true]; -} - -message IndexPosition { - optional string key = 1; - - optional bool before = 2 [default=true]; -} - -message Snapshot { - enum Status { - INACTIVE = 0; - ACTIVE = 1; - } - - required int64 ts = 1; -} - -message InternalHeader { - optional string qos = 1; -} - -message Transaction { - optional InternalHeader header = 4; - required fixed64 handle = 1; - required string app = 2; - optional bool mark_changes = 3 [default = false]; -} - -message Query { - optional InternalHeader header = 39; - - required string app = 1; - optional string name_space = 29; - - optional string kind = 3; - optional Reference ancestor = 17; - - repeated group Filter = 4 { - enum Operator { - LESS_THAN = 1; - LESS_THAN_OR_EQUAL = 2; - GREATER_THAN = 3; - GREATER_THAN_OR_EQUAL = 4; - EQUAL = 5; - IN = 6; - EXISTS = 7; - } - - required Operator op = 6; - repeated Property property = 14; - } - - optional string search_query = 8; - - repeated group Order = 9 { - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - - required string property = 10; - optional Direction direction = 11 [default = ASCENDING]; - } - - enum Hint { - ORDER_FIRST = 1; - ANCESTOR_FIRST = 2; - FILTER_FIRST = 3; - } - optional Hint hint = 18; - - optional int32 count = 23; - - optional int32 offset = 12 [default = 0]; - - optional int32 limit = 16; - - optional CompiledCursor compiled_cursor = 30; - optional CompiledCursor end_compiled_cursor = 31; - - repeated CompositeIndex composite_index = 19; - - optional bool require_perfect_plan = 20 [default = false]; - - optional bool keys_only = 21 [default = false]; - - optional Transaction transaction = 22; - - optional bool compile = 25 [default = false]; - - optional int64 failover_ms = 26; - - optional bool strong = 32; - - repeated string property_name = 33; - - repeated string group_by_property_name = 34; - - optional bool distinct = 24; - - optional int64 min_safe_time_seconds = 35; - - repeated string safe_replica_name = 36; - - optional bool persist_offset = 37 [default=false]; -} - -message CompiledQuery { - required group PrimaryScan = 1 { - optional string index_name = 2; - - optional string start_key = 3; - optional bool start_inclusive = 4; - optional string end_key = 5; - optional bool end_inclusive = 6; - - repeated string start_postfix_value = 22; - repeated string end_postfix_value = 23; - - optional int64 end_unapplied_log_timestamp_us = 19; - } - - repeated group MergeJoinScan = 7 { - required string index_name = 8; - - repeated string prefix_value = 9; - - optional bool value_prefix = 20 [default=false]; - } - - optional Index index_def = 21; - - optional int32 offset = 10 [default = 0]; - - optional int32 limit = 11; - - required bool keys_only = 12; - - repeated string property_name = 24; - - optional int32 distinct_infix_size = 25; - - optional group EntityFilter = 13 { - optional bool distinct = 14 [default=false]; - - optional string kind = 17; - optional Reference ancestor = 18; - } -} - -message CompiledCursor { - optional group Position = 2 { - optional string start_key = 27; - - repeated group IndexValue = 29 { - optional string property = 30; - required PropertyValue value = 31; - } - - optional Reference key = 32; - - optional bool start_inclusive = 28 [default=true]; - } -} - -message Cursor { - required fixed64 cursor = 1; - - optional string app = 2; -} - -message Error { - enum ErrorCode { - BAD_REQUEST = 1; - CONCURRENT_TRANSACTION = 2; - INTERNAL_ERROR = 3; - NEED_INDEX = 4; - TIMEOUT = 5; - PERMISSION_DENIED = 6; - BIGTABLE_ERROR = 7; - COMMITTED_BUT_STILL_APPLYING = 8; - CAPABILITY_DISABLED = 9; - TRY_ALTERNATE_BACKEND = 10; - SAFE_TIME_TOO_OLD = 11; - } -} - -message Cost { - optional int32 index_writes = 1; - optional int32 index_write_bytes = 2; - optional int32 entity_writes = 3; - optional int32 entity_write_bytes = 4; - optional group CommitCost = 5 { - optional int32 requested_entity_puts = 6; - optional int32 requested_entity_deletes = 7; - }; - optional int32 approximate_storage_delta = 8; - optional int32 id_sequence_updates = 9; -} - -message GetRequest { - optional InternalHeader header = 6; - - repeated Reference key = 1; - optional Transaction transaction = 2; - - optional int64 failover_ms = 3; - - optional bool strong = 4; - - optional bool allow_deferred = 5 [default=false]; -} - -message GetResponse { - repeated group Entity = 1 { - optional EntityProto entity = 2; - optional Reference key = 4; - - optional int64 version = 3; - } - - repeated Reference deferred = 5; - - optional bool in_order = 6 [default=true]; -} - -message PutRequest { - optional InternalHeader header = 11; - - repeated EntityProto entity = 1; - optional Transaction transaction = 2; - repeated CompositeIndex composite_index = 3; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; - - enum AutoIdPolicy { - CURRENT = 0; - SEQUENTIAL = 1; - } - optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; -} - -message PutResponse { - repeated Reference key = 1; - optional Cost cost = 2; - repeated int64 version = 3; -} - -message TouchRequest { - optional InternalHeader header = 10; - - repeated Reference key = 1; - repeated CompositeIndex composite_index = 2; - optional bool force = 3 [default = false]; - repeated Snapshot snapshot = 9; -} - -message TouchResponse { - optional Cost cost = 1; -} - -message DeleteRequest { - optional InternalHeader header = 10; - - repeated Reference key = 6; - optional Transaction transaction = 5; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; -} - -message DeleteResponse { - optional Cost cost = 1; - repeated int64 version = 3; -} - -message NextRequest { - optional InternalHeader header = 5; - - required Cursor cursor = 1; - optional int32 count = 2; - - optional int32 offset = 4 [default = 0]; - - optional bool compile = 3 [default = false]; -} - -message QueryResult { - optional Cursor cursor = 1; - - repeated EntityProto result = 2; - - optional int32 skipped_results = 7; - - required bool more_results = 3; - - optional bool keys_only = 4; - - optional bool index_only = 9; - - optional bool small_ops = 10; - - optional CompiledQuery compiled_query = 5; - - optional CompiledCursor compiled_cursor = 6; - - repeated CompositeIndex index = 8; - - repeated int64 version = 11; -} - -message AllocateIdsRequest { - optional InternalHeader header = 4; - - optional Reference model_key = 1; - - optional int64 size = 2; - - optional int64 max = 3; - - repeated Reference reserve = 5; -} - -message AllocateIdsResponse { - required int64 start = 1; - required int64 end = 2; - optional Cost cost = 3; -} - -message CompositeIndices { - repeated CompositeIndex index = 1; -} - -message AddActionsRequest { - optional InternalHeader header = 3; - - required Transaction transaction = 1; - repeated Action action = 2; -} - -message AddActionsResponse { -} - -message BeginTransactionRequest { - optional InternalHeader header = 3; - - required string app = 1; - optional bool allow_multiple_eg = 2 [default = false]; -} - -message CommitResponse { - optional Cost cost = 1; - - repeated group Version = 3 { - required Reference root_entity_key = 4; - required int64 version = 5; - } -} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go deleted file mode 100644 index d538701ab3b..00000000000 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import netcontext "golang.org/x/net/context" - -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. - -func AppID(c netcontext.Context) string { - return appID(FullyQualifiedAppID(c)) -} diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go deleted file mode 100644 index d5fa75be78e..00000000000 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "net/http" - "os" - - netcontext "golang.org/x/net/context" -) - -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. - -const ( - hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" - hRequestLogId = "X-AppEngine-Request-Log-Id" - hDatacenter = "X-AppEngine-Datacenter" -) - -func ctxHeaders(ctx netcontext.Context) http.Header { - c := fromContext(ctx) - if c == nil { - return nil - } - return c.Request().Header -} - -func DefaultVersionHostname(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hDefaultVersionHostname) -} - -func RequestID(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hRequestLogId) -} - -func Datacenter(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hDatacenter) -} - -func ServerSoftware() string { - // TODO(dsymonds): Remove fallback when we've verified this. - if s := os.Getenv("SERVER_SOFTWARE"); s != "" { - return s - } - return "Google App Engine/1.x.x" -} - -// TODO(dsymonds): Remove the metadata fetches. - -func ModuleName(_ netcontext.Context) string { - if s := os.Getenv("GAE_MODULE_NAME"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_name")) -} - -func VersionID(_ netcontext.Context) string { - if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { - return s1 + "." + s2 - } - return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) -} - -func InstanceID() string { - if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_instance")) -} - -func partitionlessAppID() string { - // gae_project has everything except the partition prefix. - appID := os.Getenv("GAE_LONG_APP_ID") - if appID == "" { - appID = string(mustGetMetadata("instance/attributes/gae_project")) - } - return appID -} - -func fullyQualifiedAppID(_ netcontext.Context) string { - appID := partitionlessAppID() - - part := os.Getenv("GAE_PARTITION") - if part == "" { - part = string(mustGetMetadata("instance/attributes/gae_partition")) - } - - if part != "" { - appID = part + "~" + appID - } - return appID -} - -func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" -} diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go deleted file mode 100644 index 051ea3980ab..00000000000 --- a/vendor/google.golang.org/appengine/internal/internal.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package internal provides support for package appengine. -// -// Programs should not use this package directly. Its API is not stable. -// Use packages appengine and appengine/* instead. -package internal - -import ( - "fmt" - - "github.com/golang/protobuf/proto" - - remotepb "google.golang.org/appengine/internal/remote_api" -) - -// errorCodeMaps is a map of service name to the error code map for the service. -var errorCodeMaps = make(map[string]map[int32]string) - -// RegisterErrorCodeMap is called from API implementations to register their -// error code map. This should only be called from init functions. -func RegisterErrorCodeMap(service string, m map[int32]string) { - errorCodeMaps[service] = m -} - -type timeoutCodeKey struct { - service string - code int32 -} - -// timeoutCodes is the set of service+code pairs that represent timeouts. -var timeoutCodes = make(map[timeoutCodeKey]bool) - -func RegisterTimeoutErrorCode(service string, code int32) { - timeoutCodes[timeoutCodeKey{service, code}] = true -} - -// APIError is the type returned by appengine.Context's Call method -// when an API call fails in an API-specific way. This may be, for instance, -// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. -type APIError struct { - Service string - Detail string - Code int32 // API-specific error code -} - -func (e *APIError) Error() string { - if e.Code == 0 { - if e.Detail == "" { - return "APIError " - } - return e.Detail - } - s := fmt.Sprintf("API error %d", e.Code) - if m, ok := errorCodeMaps[e.Service]; ok { - s += " (" + e.Service + ": " + m[e.Code] + ")" - } else { - // Shouldn't happen, but provide a bit more detail if it does. - s = e.Service + " " + s - } - if e.Detail != "" { - s += ": " + e.Detail - } - return s -} - -func (e *APIError) IsTimeout() bool { - return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] -} - -// CallError is the type returned by appengine.Context's Call method when an -// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. -type CallError struct { - Detail string - Code int32 - // TODO: Remove this if we get a distinguishable error code. - Timeout bool -} - -func (e *CallError) Error() string { - var msg string - switch remotepb.RpcError_ErrorCode(e.Code) { - case remotepb.RpcError_UNKNOWN: - return e.Detail - case remotepb.RpcError_OVER_QUOTA: - msg = "Over quota" - case remotepb.RpcError_CAPABILITY_DISABLED: - msg = "Capability disabled" - case remotepb.RpcError_CANCELLED: - msg = "Canceled" - default: - msg = fmt.Sprintf("Call error %d", e.Code) - } - s := msg + ": " + e.Detail - if e.Timeout { - s += " (timeout)" - } - return s -} - -func (e *CallError) IsTimeout() bool { - return e.Timeout -} - -// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. -// The function should be prepared to be called on the same message more than once; it should only modify the -// RPC request the first time. -var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go deleted file mode 100644 index 20c595be30a..00000000000 --- a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go +++ /dev/null @@ -1,899 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/log/log_service.proto -// DO NOT EDIT! - -/* -Package log is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/log/log_service.proto - -It has these top-level messages: - LogServiceError - UserAppLogLine - UserAppLogGroup - FlushRequest - SetStatusRequest - LogOffset - LogLine - RequestLog - LogModuleVersion - LogReadRequest - LogReadResponse - LogUsageRecord - LogUsageRequest - LogUsageResponse -*/ -package log - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type LogServiceError_ErrorCode int32 - -const ( - LogServiceError_OK LogServiceError_ErrorCode = 0 - LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 - LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 -) - -var LogServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_REQUEST", - 2: "STORAGE_ERROR", -} -var LogServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_REQUEST": 1, - "STORAGE_ERROR": 2, -} - -func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { - p := new(LogServiceError_ErrorCode) - *p = x - return p -} -func (x LogServiceError_ErrorCode) String() string { - return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) -} -func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") - if err != nil { - return err - } - *x = LogServiceError_ErrorCode(value) - return nil -} - -type LogServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogServiceError) Reset() { *m = LogServiceError{} } -func (m *LogServiceError) String() string { return proto.CompactTextString(m) } -func (*LogServiceError) ProtoMessage() {} - -type UserAppLogLine struct { - TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"` - Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } -func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } -func (*UserAppLogLine) ProtoMessage() {} - -func (m *UserAppLogLine) GetTimestampUsec() int64 { - if m != nil && m.TimestampUsec != nil { - return *m.TimestampUsec - } - return 0 -} - -func (m *UserAppLogLine) GetLevel() int64 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *UserAppLogLine) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -type UserAppLogGroup struct { - LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } -func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } -func (*UserAppLogGroup) ProtoMessage() {} - -func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { - if m != nil { - return m.LogLine - } - return nil -} - -type FlushRequest struct { - Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FlushRequest) Reset() { *m = FlushRequest{} } -func (m *FlushRequest) String() string { return proto.CompactTextString(m) } -func (*FlushRequest) ProtoMessage() {} - -func (m *FlushRequest) GetLogs() []byte { - if m != nil { - return m.Logs - } - return nil -} - -type SetStatusRequest struct { - Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } -func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } -func (*SetStatusRequest) ProtoMessage() {} - -func (m *SetStatusRequest) GetStatus() string { - if m != nil && m.Status != nil { - return *m.Status - } - return "" -} - -type LogOffset struct { - RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogOffset) Reset() { *m = LogOffset{} } -func (m *LogOffset) String() string { return proto.CompactTextString(m) } -func (*LogOffset) ProtoMessage() {} - -func (m *LogOffset) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -type LogLine struct { - Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` - Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogLine) Reset() { *m = LogLine{} } -func (m *LogLine) String() string { return proto.CompactTextString(m) } -func (*LogLine) ProtoMessage() {} - -func (m *LogLine) GetTime() int64 { - if m != nil && m.Time != nil { - return *m.Time - } - return 0 -} - -func (m *LogLine) GetLevel() int32 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *LogLine) GetLogMessage() string { - if m != nil && m.LogMessage != nil { - return *m.LogMessage - } - return "" -} - -type RequestLog struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"` - RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"` - Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` - Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` - Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` - StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"` - Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` - Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` - Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` - Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` - HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"` - Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` - ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"` - Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` - UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"` - UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"` - Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` - ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"` - Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` - Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` - TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"` - TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"` - WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"` - PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"` - Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` - CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"` - Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` - LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"` - AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"` - ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"` - WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"` - WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"` - ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"` - ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RequestLog) Reset() { *m = RequestLog{} } -func (m *RequestLog) String() string { return proto.CompactTextString(m) } -func (*RequestLog) ProtoMessage() {} - -const Default_RequestLog_ModuleId string = "default" -const Default_RequestLog_ReplicaIndex int32 = -1 -const Default_RequestLog_Finished bool = true - -func (m *RequestLog) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *RequestLog) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_RequestLog_ModuleId -} - -func (m *RequestLog) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *RequestLog) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *RequestLog) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *RequestLog) GetIp() string { - if m != nil && m.Ip != nil { - return *m.Ip - } - return "" -} - -func (m *RequestLog) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *RequestLog) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *RequestLog) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *RequestLog) GetLatency() int64 { - if m != nil && m.Latency != nil { - return *m.Latency - } - return 0 -} - -func (m *RequestLog) GetMcycles() int64 { - if m != nil && m.Mcycles != nil { - return *m.Mcycles - } - return 0 -} - -func (m *RequestLog) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *RequestLog) GetResource() string { - if m != nil && m.Resource != nil { - return *m.Resource - } - return "" -} - -func (m *RequestLog) GetHttpVersion() string { - if m != nil && m.HttpVersion != nil { - return *m.HttpVersion - } - return "" -} - -func (m *RequestLog) GetStatus() int32 { - if m != nil && m.Status != nil { - return *m.Status - } - return 0 -} - -func (m *RequestLog) GetResponseSize() int64 { - if m != nil && m.ResponseSize != nil { - return *m.ResponseSize - } - return 0 -} - -func (m *RequestLog) GetReferrer() string { - if m != nil && m.Referrer != nil { - return *m.Referrer - } - return "" -} - -func (m *RequestLog) GetUserAgent() string { - if m != nil && m.UserAgent != nil { - return *m.UserAgent - } - return "" -} - -func (m *RequestLog) GetUrlMapEntry() string { - if m != nil && m.UrlMapEntry != nil { - return *m.UrlMapEntry - } - return "" -} - -func (m *RequestLog) GetCombined() string { - if m != nil && m.Combined != nil { - return *m.Combined - } - return "" -} - -func (m *RequestLog) GetApiMcycles() int64 { - if m != nil && m.ApiMcycles != nil { - return *m.ApiMcycles - } - return 0 -} - -func (m *RequestLog) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *RequestLog) GetCost() float64 { - if m != nil && m.Cost != nil { - return *m.Cost - } - return 0 -} - -func (m *RequestLog) GetTaskQueueName() string { - if m != nil && m.TaskQueueName != nil { - return *m.TaskQueueName - } - return "" -} - -func (m *RequestLog) GetTaskName() string { - if m != nil && m.TaskName != nil { - return *m.TaskName - } - return "" -} - -func (m *RequestLog) GetWasLoadingRequest() bool { - if m != nil && m.WasLoadingRequest != nil { - return *m.WasLoadingRequest - } - return false -} - -func (m *RequestLog) GetPendingTime() int64 { - if m != nil && m.PendingTime != nil { - return *m.PendingTime - } - return 0 -} - -func (m *RequestLog) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return Default_RequestLog_ReplicaIndex -} - -func (m *RequestLog) GetFinished() bool { - if m != nil && m.Finished != nil { - return *m.Finished - } - return Default_RequestLog_Finished -} - -func (m *RequestLog) GetCloneKey() []byte { - if m != nil { - return m.CloneKey - } - return nil -} - -func (m *RequestLog) GetLine() []*LogLine { - if m != nil { - return m.Line - } - return nil -} - -func (m *RequestLog) GetLinesIncomplete() bool { - if m != nil && m.LinesIncomplete != nil { - return *m.LinesIncomplete - } - return false -} - -func (m *RequestLog) GetAppEngineRelease() []byte { - if m != nil { - return m.AppEngineRelease - } - return nil -} - -func (m *RequestLog) GetExitReason() int32 { - if m != nil && m.ExitReason != nil { - return *m.ExitReason - } - return 0 -} - -func (m *RequestLog) GetWasThrottledForTime() bool { - if m != nil && m.WasThrottledForTime != nil { - return *m.WasThrottledForTime - } - return false -} - -func (m *RequestLog) GetWasThrottledForRequests() bool { - if m != nil && m.WasThrottledForRequests != nil { - return *m.WasThrottledForRequests - } - return false -} - -func (m *RequestLog) GetThrottledTime() int64 { - if m != nil && m.ThrottledTime != nil { - return *m.ThrottledTime - } - return 0 -} - -func (m *RequestLog) GetServerName() []byte { - if m != nil { - return m.ServerName - } - return nil -} - -type LogModuleVersion struct { - ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } -func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } -func (*LogModuleVersion) ProtoMessage() {} - -const Default_LogModuleVersion_ModuleId string = "default" - -func (m *LogModuleVersion) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_LogModuleVersion_ModuleId -} - -func (m *LogModuleVersion) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -type LogReadRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` - ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"` - StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` - Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` - RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"` - MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"` - IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"` - Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` - CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"` - HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"` - IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"` - AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"` - IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"` - IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"` - CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"` - NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } -func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } -func (*LogReadRequest) ProtoMessage() {} - -func (m *LogReadRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogReadRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { - if m != nil { - return m.ModuleVersion - } - return nil -} - -func (m *LogReadRequest) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogReadRequest) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogReadRequest) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadRequest) GetRequestId() [][]byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *LogReadRequest) GetMinimumLogLevel() int32 { - if m != nil && m.MinimumLogLevel != nil { - return *m.MinimumLogLevel - } - return 0 -} - -func (m *LogReadRequest) GetIncludeIncomplete() bool { - if m != nil && m.IncludeIncomplete != nil { - return *m.IncludeIncomplete - } - return false -} - -func (m *LogReadRequest) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogReadRequest) GetCombinedLogRegex() string { - if m != nil && m.CombinedLogRegex != nil { - return *m.CombinedLogRegex - } - return "" -} - -func (m *LogReadRequest) GetHostRegex() string { - if m != nil && m.HostRegex != nil { - return *m.HostRegex - } - return "" -} - -func (m *LogReadRequest) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return 0 -} - -func (m *LogReadRequest) GetIncludeAppLogs() bool { - if m != nil && m.IncludeAppLogs != nil { - return *m.IncludeAppLogs - } - return false -} - -func (m *LogReadRequest) GetAppLogsPerRequest() int32 { - if m != nil && m.AppLogsPerRequest != nil { - return *m.AppLogsPerRequest - } - return 0 -} - -func (m *LogReadRequest) GetIncludeHost() bool { - if m != nil && m.IncludeHost != nil { - return *m.IncludeHost - } - return false -} - -func (m *LogReadRequest) GetIncludeAll() bool { - if m != nil && m.IncludeAll != nil { - return *m.IncludeAll - } - return false -} - -func (m *LogReadRequest) GetCacheIterator() bool { - if m != nil && m.CacheIterator != nil { - return *m.CacheIterator - } - return false -} - -func (m *LogReadRequest) GetNumShards() int32 { - if m != nil && m.NumShards != nil { - return *m.NumShards - } - return 0 -} - -type LogReadResponse struct { - Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` - Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` - LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } -func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } -func (*LogReadResponse) ProtoMessage() {} - -func (m *LogReadResponse) GetLog() []*RequestLog { - if m != nil { - return m.Log - } - return nil -} - -func (m *LogReadResponse) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadResponse) GetLastEndTime() int64 { - if m != nil && m.LastEndTime != nil { - return *m.LastEndTime - } - return 0 -} - -type LogUsageRecord struct { - VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"` - Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"` - Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } -func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } -func (*LogUsageRecord) ProtoMessage() {} - -func (m *LogUsageRecord) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *LogUsageRecord) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRecord) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRecord) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogUsageRecord) GetTotalSize() int64 { - if m != nil && m.TotalSize != nil { - return *m.TotalSize - } - return 0 -} - -func (m *LogUsageRecord) GetRecords() int32 { - if m != nil && m.Records != nil { - return *m.Records - } - return 0 -} - -type LogUsageRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` - ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"` - CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"` - UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"` - VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } -func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } -func (*LogUsageRequest) ProtoMessage() {} - -const Default_LogUsageRequest_ResolutionHours uint32 = 1 - -func (m *LogUsageRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogUsageRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogUsageRequest) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRequest) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRequest) GetResolutionHours() uint32 { - if m != nil && m.ResolutionHours != nil { - return *m.ResolutionHours - } - return Default_LogUsageRequest_ResolutionHours -} - -func (m *LogUsageRequest) GetCombineVersions() bool { - if m != nil && m.CombineVersions != nil { - return *m.CombineVersions - } - return false -} - -func (m *LogUsageRequest) GetUsageVersion() int32 { - if m != nil && m.UsageVersion != nil { - return *m.UsageVersion - } - return 0 -} - -func (m *LogUsageRequest) GetVersionsOnly() bool { - if m != nil && m.VersionsOnly != nil { - return *m.VersionsOnly - } - return false -} - -type LogUsageResponse struct { - Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` - Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } -func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } -func (*LogUsageResponse) ProtoMessage() {} - -func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { - if m != nil { - return m.Usage - } - return nil -} - -func (m *LogUsageResponse) GetSummary() *LogUsageRecord { - if m != nil { - return m.Summary - } - return nil -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto deleted file mode 100644 index 8981dc47577..00000000000 --- a/vendor/google.golang.org/appengine/internal/log/log_service.proto +++ /dev/null @@ -1,150 +0,0 @@ -syntax = "proto2"; -option go_package = "log"; - -package appengine; - -message LogServiceError { - enum ErrorCode { - OK = 0; - INVALID_REQUEST = 1; - STORAGE_ERROR = 2; - } -} - -message UserAppLogLine { - required int64 timestamp_usec = 1; - required int64 level = 2; - required string message = 3; -} - -message UserAppLogGroup { - repeated UserAppLogLine log_line = 2; -} - -message FlushRequest { - optional bytes logs = 1; -} - -message SetStatusRequest { - required string status = 1; -} - - -message LogOffset { - optional bytes request_id = 1; -} - -message LogLine { - required int64 time = 1; - required int32 level = 2; - required string log_message = 3; -} - -message RequestLog { - required string app_id = 1; - optional string module_id = 37 [default="default"]; - required string version_id = 2; - required bytes request_id = 3; - optional LogOffset offset = 35; - required string ip = 4; - optional string nickname = 5; - required int64 start_time = 6; - required int64 end_time = 7; - required int64 latency = 8; - required int64 mcycles = 9; - required string method = 10; - required string resource = 11; - required string http_version = 12; - required int32 status = 13; - required int64 response_size = 14; - optional string referrer = 15; - optional string user_agent = 16; - required string url_map_entry = 17; - required string combined = 18; - optional int64 api_mcycles = 19; - optional string host = 20; - optional double cost = 21; - - optional string task_queue_name = 22; - optional string task_name = 23; - - optional bool was_loading_request = 24; - optional int64 pending_time = 25; - optional int32 replica_index = 26 [default = -1]; - optional bool finished = 27 [default = true]; - optional bytes clone_key = 28; - - repeated LogLine line = 29; - - optional bool lines_incomplete = 36; - optional bytes app_engine_release = 38; - - optional int32 exit_reason = 30; - optional bool was_throttled_for_time = 31; - optional bool was_throttled_for_requests = 32; - optional int64 throttled_time = 33; - - optional bytes server_name = 34; -} - -message LogModuleVersion { - optional string module_id = 1 [default="default"]; - optional string version_id = 2; -} - -message LogReadRequest { - required string app_id = 1; - repeated string version_id = 2; - repeated LogModuleVersion module_version = 19; - - optional int64 start_time = 3; - optional int64 end_time = 4; - optional LogOffset offset = 5; - repeated bytes request_id = 6; - - optional int32 minimum_log_level = 7; - optional bool include_incomplete = 8; - optional int64 count = 9; - - optional string combined_log_regex = 14; - optional string host_regex = 15; - optional int32 replica_index = 16; - - optional bool include_app_logs = 10; - optional int32 app_logs_per_request = 17; - optional bool include_host = 11; - optional bool include_all = 12; - optional bool cache_iterator = 13; - optional int32 num_shards = 18; -} - -message LogReadResponse { - repeated RequestLog log = 1; - optional LogOffset offset = 2; - optional int64 last_end_time = 3; -} - -message LogUsageRecord { - optional string version_id = 1; - optional int32 start_time = 2; - optional int32 end_time = 3; - optional int64 count = 4; - optional int64 total_size = 5; - optional int32 records = 6; -} - -message LogUsageRequest { - required string app_id = 1; - repeated string version_id = 2; - optional int32 start_time = 3; - optional int32 end_time = 4; - optional uint32 resolution_hours = 5 [default = 1]; - optional bool combine_versions = 6; - optional int32 usage_version = 7; - optional bool versions_only = 8; -} - -message LogUsageResponse { - repeated LogUsageRecord usage = 1; - optional LogUsageRecord summary = 2; -} diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go deleted file mode 100644 index 822e784a458..00000000000 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "io" - "log" - "net/http" - "net/url" - "os" -) - -func Main() { - installHealthChecker(http.DefaultServeMux) - - port := "8080" - if s := os.Getenv("PORT"); s != "" { - port = s - } - - host := "" - if IsDevAppServer() { - host = "127.0.0.1" - } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { - log.Fatalf("http.ListenAndServe: %v", err) - } -} - -func installHealthChecker(mux *http.ServeMux) { - // If no health check handler has been installed by this point, add a trivial one. - const healthPath = "/_ah/health" - hreq := &http.Request{ - Method: "GET", - URL: &url.URL{ - Path: healthPath, - }, - } - if _, pat := mux.Handler(hreq); pat != healthPath { - mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, "ok") - }) - } -} diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go deleted file mode 100644 index 9cc1f71d104..00000000000 --- a/vendor/google.golang.org/appengine/internal/metadata.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file has code for accessing metadata. -// -// References: -// https://cloud.google.com/compute/docs/metadata - -import ( - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" -) - -const ( - metadataHost = "metadata" - metadataPath = "/computeMetadata/v1/" -) - -var ( - metadataRequestHeaders = http.Header{ - "Metadata-Flavor": []string{"Google"}, - } -) - -// TODO(dsymonds): Do we need to support default values, like Python? -func mustGetMetadata(key string) []byte { - b, err := getMetadata(key) - if err != nil { - log.Fatalf("Metadata fetch failed: %v", err) - } - return b -} - -func getMetadata(key string) ([]byte, error) { - // TODO(dsymonds): May need to use url.Parse to support keys with query args. - req := &http.Request{ - Method: "GET", - URL: &url.URL{ - Scheme: "http", - Host: metadataHost, - Path: metadataPath + key, - }, - Header: metadataRequestHeaders, - Host: metadataHost, - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) - } - return ioutil.ReadAll(resp.Body) -} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go deleted file mode 100644 index a0145ed317c..00000000000 --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go +++ /dev/null @@ -1,375 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/modules/modules_service.proto -// DO NOT EDIT! - -/* -Package modules is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/modules/modules_service.proto - -It has these top-level messages: - ModulesServiceError - GetModulesRequest - GetModulesResponse - GetVersionsRequest - GetVersionsResponse - GetDefaultVersionRequest - GetDefaultVersionResponse - GetNumInstancesRequest - GetNumInstancesResponse - SetNumInstancesRequest - SetNumInstancesResponse - StartModuleRequest - StartModuleResponse - StopModuleRequest - StopModuleResponse - GetHostnameRequest - GetHostnameResponse -*/ -package modules - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type ModulesServiceError_ErrorCode int32 - -const ( - ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 - ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 - ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 - ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 - ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 - ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 -) - -var ModulesServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_MODULE", - 2: "INVALID_VERSION", - 3: "INVALID_INSTANCES", - 4: "TRANSIENT_ERROR", - 5: "UNEXPECTED_STATE", -} -var ModulesServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_MODULE": 1, - "INVALID_VERSION": 2, - "INVALID_INSTANCES": 3, - "TRANSIENT_ERROR": 4, - "UNEXPECTED_STATE": 5, -} - -func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { - p := new(ModulesServiceError_ErrorCode) - *p = x - return p -} -func (x ModulesServiceError_ErrorCode) String() string { - return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) -} -func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") - if err != nil { - return err - } - *x = ModulesServiceError_ErrorCode(value) - return nil -} - -type ModulesServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } -func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } -func (*ModulesServiceError) ProtoMessage() {} - -type GetModulesRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } -func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } -func (*GetModulesRequest) ProtoMessage() {} - -type GetModulesResponse struct { - Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } -func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } -func (*GetModulesResponse) ProtoMessage() {} - -func (m *GetModulesResponse) GetModule() []string { - if m != nil { - return m.Module - } - return nil -} - -type GetVersionsRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } -func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetVersionsRequest) ProtoMessage() {} - -func (m *GetVersionsRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetVersionsResponse struct { - Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } -func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } -func (*GetVersionsResponse) ProtoMessage() {} - -func (m *GetVersionsResponse) GetVersion() []string { - if m != nil { - return m.Version - } - return nil -} - -type GetDefaultVersionRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } -func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionRequest) ProtoMessage() {} - -func (m *GetDefaultVersionRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetDefaultVersionResponse struct { - Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } -func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionResponse) ProtoMessage() {} - -func (m *GetDefaultVersionResponse) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } -func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesRequest) ProtoMessage() {} - -func (m *GetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesResponse struct { - Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } -func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesResponse) ProtoMessage() {} - -func (m *GetNumInstancesResponse) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } -func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesRequest) ProtoMessage() {} - -func (m *SetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *SetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *SetNumInstancesRequest) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } -func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesResponse) ProtoMessage() {} - -type StartModuleRequest struct { - Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } -func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StartModuleRequest) ProtoMessage() {} - -func (m *StartModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StartModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StartModuleResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } -func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StartModuleResponse) ProtoMessage() {} - -type StopModuleRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } -func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StopModuleRequest) ProtoMessage() {} - -func (m *StopModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StopModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StopModuleResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } -func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StopModuleResponse) ProtoMessage() {} - -type GetHostnameRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } -func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } -func (*GetHostnameRequest) ProtoMessage() {} - -func (m *GetHostnameRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetHostnameRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *GetHostnameRequest) GetInstance() string { - if m != nil && m.Instance != nil { - return *m.Instance - } - return "" -} - -type GetHostnameResponse struct { - Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } -func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } -func (*GetHostnameResponse) ProtoMessage() {} - -func (m *GetHostnameResponse) GetHostname() string { - if m != nil && m.Hostname != nil { - return *m.Hostname - } - return "" -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto deleted file mode 100644 index d29f0065a2f..00000000000 --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto2"; -option go_package = "modules"; - -package appengine; - -message ModulesServiceError { - enum ErrorCode { - OK = 0; - INVALID_MODULE = 1; - INVALID_VERSION = 2; - INVALID_INSTANCES = 3; - TRANSIENT_ERROR = 4; - UNEXPECTED_STATE = 5; - } -} - -message GetModulesRequest { -} - -message GetModulesResponse { - repeated string module = 1; -} - -message GetVersionsRequest { - optional string module = 1; -} - -message GetVersionsResponse { - repeated string version = 1; -} - -message GetDefaultVersionRequest { - optional string module = 1; -} - -message GetDefaultVersionResponse { - required string version = 1; -} - -message GetNumInstancesRequest { - optional string module = 1; - optional string version = 2; -} - -message GetNumInstancesResponse { - required int64 instances = 1; -} - -message SetNumInstancesRequest { - optional string module = 1; - optional string version = 2; - required int64 instances = 3; -} - -message SetNumInstancesResponse {} - -message StartModuleRequest { - required string module = 1; - required string version = 2; -} - -message StartModuleResponse {} - -message StopModuleRequest { - optional string module = 1; - optional string version = 2; -} - -message StopModuleResponse {} - -message GetHostnameRequest { - optional string module = 1; - optional string version = 2; - optional string instance = 3; -} - -message GetHostnameResponse { - required string hostname = 1; -} - diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go deleted file mode 100644 index 3b94cf0c6a8..00000000000 --- a/vendor/google.golang.org/appengine/internal/net.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements a network dialer that limits the number of concurrent connections. -// It is only used for API calls. - -import ( - "log" - "net" - "runtime" - "sync" - "time" -) - -var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. - -func limitRelease() { - // non-blocking - select { - case <-limitSem: - default: - // This should not normally happen. - log.Print("appengine: unbalanced limitSem release!") - } -} - -func limitDial(network, addr string) (net.Conn, error) { - limitSem <- 1 - - // Dial with a timeout in case the API host is MIA. - // The connection should normally be very fast. - conn, err := net.DialTimeout(network, addr, 500*time.Millisecond) - if err != nil { - limitRelease() - return nil, err - } - lc := &limitConn{Conn: conn} - runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required - return lc, nil -} - -type limitConn struct { - close sync.Once - net.Conn -} - -func (lc *limitConn) Close() error { - defer lc.close.Do(func() { - limitRelease() - runtime.SetFinalizer(lc, nil) - }) - return lc.Conn.Close() -} diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh deleted file mode 100755 index 2fdb546a633..00000000000 --- a/vendor/google.golang.org/appengine/internal/regen.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -e -# -# This script rebuilds the generated code for the protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. - -PKG=google.golang.org/appengine - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd $base - -# Run protoc once per package. -for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do - echo 1>&2 "* $dir" - protoc --go_out=. $dir/*.proto -done - -for f in $(find $PKG/internal -name '*.pb.go'); do - # Remove proto.RegisterEnum calls. - # These cause duplicate registration panics when these packages - # are used on classic App Engine. proto.RegisterEnum only affects - # parsing the text format; we don't care about that. - # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 - sed -i '/proto.RegisterEnum/d' $f -done diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go deleted file mode 100644 index 526bd39e6d1..00000000000 --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go +++ /dev/null @@ -1,231 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/remote_api/remote_api.proto -// DO NOT EDIT! - -/* -Package remote_api is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/remote_api/remote_api.proto - -It has these top-level messages: - Request - ApplicationError - RpcError - Response -*/ -package remote_api - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type RpcError_ErrorCode int32 - -const ( - RpcError_UNKNOWN RpcError_ErrorCode = 0 - RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 - RpcError_PARSE_ERROR RpcError_ErrorCode = 2 - RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 - RpcError_OVER_QUOTA RpcError_ErrorCode = 4 - RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 - RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 - RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 - RpcError_BAD_REQUEST RpcError_ErrorCode = 8 - RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 - RpcError_CANCELLED RpcError_ErrorCode = 10 - RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 - RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 -) - -var RpcError_ErrorCode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CALL_NOT_FOUND", - 2: "PARSE_ERROR", - 3: "SECURITY_VIOLATION", - 4: "OVER_QUOTA", - 5: "REQUEST_TOO_LARGE", - 6: "CAPABILITY_DISABLED", - 7: "FEATURE_DISABLED", - 8: "BAD_REQUEST", - 9: "RESPONSE_TOO_LARGE", - 10: "CANCELLED", - 11: "REPLAY_ERROR", - 12: "DEADLINE_EXCEEDED", -} -var RpcError_ErrorCode_value = map[string]int32{ - "UNKNOWN": 0, - "CALL_NOT_FOUND": 1, - "PARSE_ERROR": 2, - "SECURITY_VIOLATION": 3, - "OVER_QUOTA": 4, - "REQUEST_TOO_LARGE": 5, - "CAPABILITY_DISABLED": 6, - "FEATURE_DISABLED": 7, - "BAD_REQUEST": 8, - "RESPONSE_TOO_LARGE": 9, - "CANCELLED": 10, - "REPLAY_ERROR": 11, - "DEADLINE_EXCEEDED": 12, -} - -func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { - p := new(RpcError_ErrorCode) - *p = x - return p -} -func (x RpcError_ErrorCode) String() string { - return proto.EnumName(RpcError_ErrorCode_name, int32(x)) -} -func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") - if err != nil { - return err - } - *x = RpcError_ErrorCode(value) - return nil -} - -type Request struct { - ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"` - Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` - Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` - RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} - -func (m *Request) GetServiceName() string { - if m != nil && m.ServiceName != nil { - return *m.ServiceName - } - return "" -} - -func (m *Request) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *Request) GetRequest() []byte { - if m != nil { - return m.Request - } - return nil -} - -func (m *Request) GetRequestId() string { - if m != nil && m.RequestId != nil { - return *m.RequestId - } - return "" -} - -type ApplicationError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ApplicationError) Reset() { *m = ApplicationError{} } -func (m *ApplicationError) String() string { return proto.CompactTextString(m) } -func (*ApplicationError) ProtoMessage() {} - -func (m *ApplicationError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *ApplicationError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type RpcError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RpcError) Reset() { *m = RpcError{} } -func (m *RpcError) String() string { return proto.CompactTextString(m) } -func (*RpcError) ProtoMessage() {} - -func (m *RpcError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *RpcError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type Response struct { - Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` - Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` - ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"` - JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"` - RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} - -func (m *Response) GetResponse() []byte { - if m != nil { - return m.Response - } - return nil -} - -func (m *Response) GetException() []byte { - if m != nil { - return m.Exception - } - return nil -} - -func (m *Response) GetApplicationError() *ApplicationError { - if m != nil { - return m.ApplicationError - } - return nil -} - -func (m *Response) GetJavaException() []byte { - if m != nil { - return m.JavaException - } - return nil -} - -func (m *Response) GetRpcError() *RpcError { - if m != nil { - return m.RpcError - } - return nil -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto deleted file mode 100644 index f21763a4e23..00000000000 --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto2"; -option go_package = "remote_api"; - -package remote_api; - -message Request { - required string service_name = 2; - required string method = 3; - required bytes request = 4; - optional string request_id = 5; -} - -message ApplicationError { - required int32 code = 1; - required string detail = 2; -} - -message RpcError { - enum ErrorCode { - UNKNOWN = 0; - CALL_NOT_FOUND = 1; - PARSE_ERROR = 2; - SECURITY_VIOLATION = 3; - OVER_QUOTA = 4; - REQUEST_TOO_LARGE = 5; - CAPABILITY_DISABLED = 6; - FEATURE_DISABLED = 7; - BAD_REQUEST = 8; - RESPONSE_TOO_LARGE = 9; - CANCELLED = 10; - REPLAY_ERROR = 11; - DEADLINE_EXCEEDED = 12; - } - required int32 code = 1; - optional string detail = 2; -} - -message Response { - optional bytes response = 1; - optional bytes exception = 2; - optional ApplicationError application_error = 3; - optional bytes java_exception = 4; - optional RpcError rpc_error = 5; -} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go deleted file mode 100644 index 28a6d181206..00000000000 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements hooks for applying datastore transactions. - -import ( - "errors" - "reflect" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - pb "google.golang.org/appengine/internal/datastore" -) - -var transactionSetters = make(map[reflect.Type]reflect.Value) - -// RegisterTransactionSetter registers a function that sets transaction information -// in a protocol buffer message. f should be a function with two arguments, -// the first being a protocol buffer type, and the second being *datastore.Transaction. -func RegisterTransactionSetter(f interface{}) { - v := reflect.ValueOf(f) - transactionSetters[v.Type().In(0)] = v -} - -// applyTransaction applies the transaction t to message pb -// by using the relevant setter passed to RegisterTransactionSetter. -func applyTransaction(pb proto.Message, t *pb.Transaction) { - v := reflect.ValueOf(pb) - if f, ok := transactionSetters[v.Type()]; ok { - f.Call([]reflect.Value{v, reflect.ValueOf(t)}) - } -} - -var transactionKey = "used for *Transaction" - -func transactionFromContext(ctx netcontext.Context) *transaction { - t, _ := ctx.Value(&transactionKey).(*transaction) - return t -} - -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) -} - -type transaction struct { - transaction pb.Transaction - finished bool -} - -var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") - -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error { - if transactionFromContext(c) != nil { - return errors.New("nested transactions are not supported") - } - - // Begin the transaction. - t := &transaction{} - req := &pb.BeginTransactionRequest{ - App: proto.String(FullyQualifiedAppID(c)), - } - if xg { - req.AllowMultipleEg = proto.Bool(true) - } - if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { - return err - } - - // Call f, rolling back the transaction if f returns a non-nil error, or panics. - // The panic is not recovered. - defer func() { - if t.finished { - return - } - t.finished = true - // Ignore the error return value, since we are already returning a non-nil - // error (or we're panicking). - Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) - }() - if err := f(withTransaction(c, t)); err != nil { - return err - } - t.finished = true - - // Commit the transaction. - res := &pb.CommitResponse{} - err := Call(c, "datastore_v3", "Commit", &t.transaction, res) - if ae, ok := err.(*APIError); ok { - /* TODO: restore this conditional - if appengine.IsDevAppServer() { - */ - // The Python Dev AppServer raises an ApplicationError with error code 2 (which is - // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". - if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { - return ErrConcurrentTransaction - } - if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { - return ErrConcurrentTransaction - } - } - return err -} diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go deleted file mode 100644 index 21860ca0822..00000000000 --- a/vendor/google.golang.org/appengine/namespace.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "fmt" - "regexp" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// Namespace returns a replacement context that operates within the given namespace. -func Namespace(c context.Context, namespace string) (context.Context, error) { - if !validNamespace.MatchString(namespace) { - return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) - } - return internal.NamespacedContext(c, namespace), nil -} - -// validNamespace matches valid namespace names. -var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go deleted file mode 100644 index 05642a992a3..00000000000 --- a/vendor/google.golang.org/appengine/timeout.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import "golang.org/x/net/context" - -// IsTimeoutError reports whether err is a timeout error. -func IsTimeoutError(err error) bool { - if err == context.DeadlineExceeded { - return true - } - if t, ok := err.(interface { - IsTimeout() bool - }); ok { - return t.IsTimeout() - } - return false -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go deleted file mode 100644 index 53d57f67a53..00000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/api/annotations.proto - -/* -Package annotations is a generated protocol buffer package. - -It is generated from these files: - google/api/annotations.proto - google/api/http.proto - -It has these top-level messages: - Http - HttpRule - CustomHttpPattern -*/ -package annotations - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -var E_Http = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MethodOptions)(nil), - ExtensionType: (*HttpRule)(nil), - Field: 72295728, - Name: "google.api.http", - Tag: "bytes,72295728,opt,name=http", - Filename: "google/api/annotations.proto", -} - -func init() { - proto.RegisterExtension(E_Http) -} - -func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 208 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, - 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, - 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79, - 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15, - 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53, - 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51, - 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a, - 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08, - 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, - 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64, - 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, - 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go deleted file mode 100644 index f91c604620b..00000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ /dev/null @@ -1,566 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/api/http.proto - -package annotations - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// Defines the HTTP configuration for a service. It contains a list of -// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. -type Http struct { - // A list of HTTP configuration rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` -} - -func (m *Http) Reset() { *m = Http{} } -func (m *Http) String() string { return proto.CompactTextString(m) } -func (*Http) ProtoMessage() {} -func (*Http) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } - -func (m *Http) GetRules() []*HttpRule { - if m != nil { - return m.Rules - } - return nil -} - -// `HttpRule` defines the mapping of an RPC method to one or more HTTP -// REST APIs. The mapping determines what portions of the request -// message are populated from the path, query parameters, or body of -// the HTTP request. The mapping is typically specified as an -// `google.api.http` annotation, see "google/api/annotations.proto" -// for details. -// -// The mapping consists of a field specifying the path template and -// method kind. The path template can refer to fields in the request -// message, as in the example below which describes a REST GET -// operation on a resource collection of messages: -// -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // mapped to the URL -// SubMessage sub = 2; // `sub.subfield` is url-mapped -// } -// message Message { -// string text = 1; // content of the resource -// } -// -// The same http annotation can alternatively be expressed inside the -// `GRPC API Configuration` YAML file. -// -// http: -// rules: -// - selector: .Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// This definition enables an automatic, bidrectional mapping of HTTP -// JSON to RPC. Example: -// -// HTTP | RPC -// -----|----- -// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` -// -// In general, not only fields but also field paths can be referenced -// from a path pattern. Fields mapped to the path pattern cannot be -// repeated and must have a primitive (non-message) type. -// -// Any fields in the request message which are not bound by the path -// pattern automatically become (optional) HTTP query -// parameters. Assume the following definition of the request message: -// -// -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // mapped to the URL -// int64 revision = 2; // becomes a parameter -// SubMessage sub = 3; // `sub.subfield` becomes a parameter -// } -// -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | RPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` -// -// Note that fields which are mapped to HTTP parameters must have a -// primitive type or a repeated primitive type. Message types are not -// allowed. In the case of a repeated type, the parameter can be -// repeated in the URL, as in `...?param=A¶m=B`. -// -// For HTTP method kinds which allow a request body, the `body` field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// put: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// -// The following HTTP JSON to RPC mapping is enabled, where the -// representation of the JSON in the request body is determined by -// protos JSON encoding: -// -// HTTP | RPC -// -----|----- -// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define that -// every field not bound by the path template should be mapped to the -// request body. This enables the following alternative definition of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// put: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | RPC -// -----|----- -// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice of -// defining REST APIs. The common usage of `*` is in custom methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// -// This enables the following two alternative HTTP JSON to RPC -// mappings: -// -// HTTP | RPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` -// -// # Rules for HTTP mapping -// -// The rules for mapping HTTP path, query parameters, and body fields -// to the request message are as follows: -// -// 1. The `body` field specifies either `*` or a field path, or is -// omitted. If omitted, it assumes there is no HTTP body. -// 2. Leaf fields (recursive expansion of nested messages in the -// request) can be classified into three types: -// (a) Matched in the URL template. -// (b) Covered by body (if body is `*`, everything except (a) fields; -// else everything under the body field) -// (c) All other fields. -// 3. URL query parameters found in the HTTP request are mapped to (c) fields. -// 4. Any body sent with an HTTP request can contain only (b) fields. -// -// The syntax of the path template is as follows: -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single path segment. It follows the semantics of -// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String -// Expansion. -// -// The syntax `**` matches zero or more path segments. It follows the semantics -// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved -// Expansion. NOTE: it must be the last segment in the path except the Verb. -// -// The syntax `LITERAL` matches literal text in the URL path. -// -// The syntax `Variable` matches the entire path as specified by its template; -// this nested template must not contain further variables. If a variable -// matches a single path segment, its template may be omitted, e.g. `{var}` -// is equivalent to `{var=*}`. -// -// NOTE: the field paths in variables and in the `body` must not refer to -// repeated fields or map fields. -// -// Use CustomHttpPattern to specify any HTTP method that is not included in the -// `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for -// a given URL path rule. The wild-card rule is useful for services that provide -// content to Web (HTML) clients. -type HttpRule struct { - // Selects methods to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` - // Determines the URL pattern is matched by this rules. This pattern can be - // used with any of the {get|put|post|delete|patch} methods. A custom method - // can be defined using the 'custom' field. - // - // Types that are valid to be assigned to Pattern: - // *HttpRule_Get - // *HttpRule_Put - // *HttpRule_Post - // *HttpRule_Delete - // *HttpRule_Patch - // *HttpRule_Custom - Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` - // The name of the request field whose value is mapped to the HTTP body, or - // `*` for mapping all fields not captured by the path pattern to the HTTP - // body. NOTE: the referred field must not be a repeated field and must be - // present at the top-level of request message type. - Body string `protobuf:"bytes,7,opt,name=body" json:"body,omitempty"` - // Additional HTTP bindings for the selector. Nested bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). - AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings" json:"additional_bindings,omitempty"` -} - -func (m *HttpRule) Reset() { *m = HttpRule{} } -func (m *HttpRule) String() string { return proto.CompactTextString(m) } -func (*HttpRule) ProtoMessage() {} -func (*HttpRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } - -type isHttpRule_Pattern interface { - isHttpRule_Pattern() -} - -type HttpRule_Get struct { - Get string `protobuf:"bytes,2,opt,name=get,oneof"` -} -type HttpRule_Put struct { - Put string `protobuf:"bytes,3,opt,name=put,oneof"` -} -type HttpRule_Post struct { - Post string `protobuf:"bytes,4,opt,name=post,oneof"` -} -type HttpRule_Delete struct { - Delete string `protobuf:"bytes,5,opt,name=delete,oneof"` -} -type HttpRule_Patch struct { - Patch string `protobuf:"bytes,6,opt,name=patch,oneof"` -} -type HttpRule_Custom struct { - Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,oneof"` -} - -func (*HttpRule_Get) isHttpRule_Pattern() {} -func (*HttpRule_Put) isHttpRule_Pattern() {} -func (*HttpRule_Post) isHttpRule_Pattern() {} -func (*HttpRule_Delete) isHttpRule_Pattern() {} -func (*HttpRule_Patch) isHttpRule_Pattern() {} -func (*HttpRule_Custom) isHttpRule_Pattern() {} - -func (m *HttpRule) GetPattern() isHttpRule_Pattern { - if m != nil { - return m.Pattern - } - return nil -} - -func (m *HttpRule) GetSelector() string { - if m != nil { - return m.Selector - } - return "" -} - -func (m *HttpRule) GetGet() string { - if x, ok := m.GetPattern().(*HttpRule_Get); ok { - return x.Get - } - return "" -} - -func (m *HttpRule) GetPut() string { - if x, ok := m.GetPattern().(*HttpRule_Put); ok { - return x.Put - } - return "" -} - -func (m *HttpRule) GetPost() string { - if x, ok := m.GetPattern().(*HttpRule_Post); ok { - return x.Post - } - return "" -} - -func (m *HttpRule) GetDelete() string { - if x, ok := m.GetPattern().(*HttpRule_Delete); ok { - return x.Delete - } - return "" -} - -func (m *HttpRule) GetPatch() string { - if x, ok := m.GetPattern().(*HttpRule_Patch); ok { - return x.Patch - } - return "" -} - -func (m *HttpRule) GetCustom() *CustomHttpPattern { - if x, ok := m.GetPattern().(*HttpRule_Custom); ok { - return x.Custom - } - return nil -} - -func (m *HttpRule) GetBody() string { - if m != nil { - return m.Body - } - return "" -} - -func (m *HttpRule) GetAdditionalBindings() []*HttpRule { - if m != nil { - return m.AdditionalBindings - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{ - (*HttpRule_Get)(nil), - (*HttpRule_Put)(nil), - (*HttpRule_Post)(nil), - (*HttpRule_Delete)(nil), - (*HttpRule_Patch)(nil), - (*HttpRule_Custom)(nil), - } -} - -func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*HttpRule) - // pattern - switch x := m.Pattern.(type) { - case *HttpRule_Get: - b.EncodeVarint(2<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Get) - case *HttpRule_Put: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Put) - case *HttpRule_Post: - b.EncodeVarint(4<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Post) - case *HttpRule_Delete: - b.EncodeVarint(5<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Delete) - case *HttpRule_Patch: - b.EncodeVarint(6<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Patch) - case *HttpRule_Custom: - b.EncodeVarint(8<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Custom); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x) - } - return nil -} - -func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*HttpRule) - switch tag { - case 2: // pattern.get - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Get{x} - return true, err - case 3: // pattern.put - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Put{x} - return true, err - case 4: // pattern.post - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Post{x} - return true, err - case 5: // pattern.delete - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Delete{x} - return true, err - case 6: // pattern.patch - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Patch{x} - return true, err - case 8: // pattern.custom - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(CustomHttpPattern) - err := b.DecodeMessage(msg) - m.Pattern = &HttpRule_Custom{msg} - return true, err - default: - return false, nil - } -} - -func _HttpRule_OneofSizer(msg proto.Message) (n int) { - m := msg.(*HttpRule) - // pattern - switch x := m.Pattern.(type) { - case *HttpRule_Get: - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Get))) - n += len(x.Get) - case *HttpRule_Put: - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Put))) - n += len(x.Put) - case *HttpRule_Post: - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Post))) - n += len(x.Post) - case *HttpRule_Delete: - n += proto.SizeVarint(5<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Delete))) - n += len(x.Delete) - case *HttpRule_Patch: - n += proto.SizeVarint(6<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Patch))) - n += len(x.Patch) - case *HttpRule_Custom: - s := proto.Size(x.Custom) - n += proto.SizeVarint(8<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// A custom pattern is used for defining custom HTTP verb. -type CustomHttpPattern struct { - // The name of this custom HTTP verb. - Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` - // The path matched by this custom verb. - Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` -} - -func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } -func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) } -func (*CustomHttpPattern) ProtoMessage() {} -func (*CustomHttpPattern) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } - -func (m *CustomHttpPattern) GetKind() string { - if m != nil { - return m.Kind - } - return "" -} - -func (m *CustomHttpPattern) GetPath() string { - if m != nil { - return m.Path - } - return "" -} - -func init() { - proto.RegisterType((*Http)(nil), "google.api.Http") - proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") - proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") -} - -func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor1) } - -var fileDescriptor1 = []byte{ - // 359 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x6a, 0xe3, 0x30, - 0x10, 0xc6, 0xd7, 0x89, 0xe3, 0x24, 0x13, 0x58, 0x58, 0x6d, 0x76, 0x11, 0x85, 0x42, 0xc8, 0x29, - 0xf4, 0x60, 0x43, 0x7a, 0xe8, 0x21, 0xa7, 0xb8, 0x94, 0xa6, 0xb7, 0xe0, 0x63, 0x2f, 0x45, 0xb1, - 0x85, 0xa2, 0xd6, 0x91, 0x84, 0x3d, 0x3e, 0xf4, 0x75, 0xfa, 0x0e, 0x7d, 0xb7, 0x1e, 0x8b, 0xfe, - 0xa4, 0x09, 0x14, 0x7a, 0x9b, 0xef, 0x37, 0x9f, 0x34, 0xa3, 0x19, 0xc1, 0x3f, 0xa1, 0xb5, 0xa8, - 0x79, 0xc6, 0x8c, 0xcc, 0xf6, 0x88, 0x26, 0x35, 0x8d, 0x46, 0x4d, 0xc0, 0xe3, 0x94, 0x19, 0x39, - 0x5f, 0x42, 0xbc, 0x41, 0x34, 0xe4, 0x0a, 0x06, 0x4d, 0x57, 0xf3, 0x96, 0x46, 0xb3, 0xfe, 0x62, - 0xb2, 0x9c, 0xa6, 0x27, 0x4f, 0x6a, 0x0d, 0x45, 0x57, 0xf3, 0xc2, 0x5b, 0xe6, 0xef, 0x3d, 0x18, - 0x1d, 0x19, 0xb9, 0x80, 0x51, 0xcb, 0x6b, 0x5e, 0xa2, 0x6e, 0x68, 0x34, 0x8b, 0x16, 0xe3, 0xe2, - 0x4b, 0x13, 0x02, 0x7d, 0xc1, 0x91, 0xf6, 0x2c, 0xde, 0xfc, 0x2a, 0xac, 0xb0, 0xcc, 0x74, 0x48, - 0xfb, 0x47, 0x66, 0x3a, 0x24, 0x53, 0x88, 0x8d, 0x6e, 0x91, 0xc6, 0x01, 0x3a, 0x45, 0x28, 0x24, - 0x15, 0xaf, 0x39, 0x72, 0x3a, 0x08, 0x3c, 0x68, 0xf2, 0x1f, 0x06, 0x86, 0x61, 0xb9, 0xa7, 0x49, - 0x48, 0x78, 0x49, 0x6e, 0x20, 0x29, 0xbb, 0x16, 0xf5, 0x81, 0x8e, 0x66, 0xd1, 0x62, 0xb2, 0xbc, - 0x3c, 0x7f, 0xc5, 0xad, 0xcb, 0xd8, 0xbe, 0xb7, 0x0c, 0x91, 0x37, 0xca, 0x5e, 0xe8, 0xed, 0x84, - 0x40, 0xbc, 0xd3, 0xd5, 0x2b, 0x1d, 0xba, 0x07, 0xb8, 0x98, 0xdc, 0xc1, 0x5f, 0x56, 0x55, 0x12, - 0xa5, 0x56, 0xac, 0x7e, 0xda, 0x49, 0x55, 0x49, 0x25, 0x5a, 0x3a, 0xf9, 0x61, 0x3e, 0xe4, 0x74, - 0x20, 0x0f, 0xfe, 0x7c, 0x0c, 0x43, 0xe3, 0xeb, 0xcd, 0x57, 0xf0, 0xe7, 0x5b, 0x13, 0xb6, 0xf4, - 0x8b, 0x54, 0x55, 0x98, 0x9d, 0x8b, 0x2d, 0x33, 0x0c, 0xf7, 0x7e, 0x70, 0x85, 0x8b, 0xf3, 0x67, - 0xf8, 0x5d, 0xea, 0xc3, 0x59, 0xd9, 0x7c, 0xec, 0xae, 0xb1, 0x1b, 0xdd, 0x46, 0x8f, 0xeb, 0x90, - 0x10, 0xba, 0x66, 0x4a, 0xa4, 0xba, 0x11, 0x99, 0xe0, 0xca, 0xed, 0x3b, 0xf3, 0x29, 0x66, 0x64, - 0xeb, 0x7e, 0x02, 0x53, 0x4a, 0x23, 0xb3, 0x6d, 0xb6, 0xab, 0xb3, 0xf8, 0x23, 0x8a, 0xde, 0x7a, - 0xf1, 0xfd, 0x7a, 0xfb, 0xb0, 0x4b, 0xdc, 0xb9, 0xeb, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68, - 0x15, 0x60, 0x5b, 0x40, 0x02, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go deleted file mode 100644 index 2f481a39642..00000000000 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go +++ /dev/null @@ -1,337 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/iam/v1/iam_policy.proto - -/* -Package iam is a generated protocol buffer package. - -It is generated from these files: - google/iam/v1/iam_policy.proto - google/iam/v1/policy.proto - -It has these top-level messages: - SetIamPolicyRequest - GetIamPolicyRequest - TestIamPermissionsRequest - TestIamPermissionsResponse - Policy - Binding - PolicyDelta - BindingDelta -*/ -package iam - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "google.golang.org/genproto/googleapis/api/annotations" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Request message for `SetIamPolicy` method. -type SetIamPolicyRequest struct { - // REQUIRED: The resource for which the policy is being specified. - // `resource` is usually specified as a path. For example, a Project - // resource is specified as `projects/{project}`. - Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` - // REQUIRED: The complete policy to be applied to the `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as Projects) - // might reject them. - Policy *Policy `protobuf:"bytes,2,opt,name=policy" json:"policy,omitempty"` -} - -func (m *SetIamPolicyRequest) Reset() { *m = SetIamPolicyRequest{} } -func (m *SetIamPolicyRequest) String() string { return proto.CompactTextString(m) } -func (*SetIamPolicyRequest) ProtoMessage() {} -func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *SetIamPolicyRequest) GetResource() string { - if m != nil { - return m.Resource - } - return "" -} - -func (m *SetIamPolicyRequest) GetPolicy() *Policy { - if m != nil { - return m.Policy - } - return nil -} - -// Request message for `GetIamPolicy` method. -type GetIamPolicyRequest struct { - // REQUIRED: The resource for which the policy is being requested. - // `resource` is usually specified as a path. For example, a Project - // resource is specified as `projects/{project}`. - Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` -} - -func (m *GetIamPolicyRequest) Reset() { *m = GetIamPolicyRequest{} } -func (m *GetIamPolicyRequest) String() string { return proto.CompactTextString(m) } -func (*GetIamPolicyRequest) ProtoMessage() {} -func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *GetIamPolicyRequest) GetResource() string { - if m != nil { - return m.Resource - } - return "" -} - -// Request message for `TestIamPermissions` method. -type TestIamPermissionsRequest struct { - // REQUIRED: The resource for which the policy detail is being requested. - // `resource` is usually specified as a path. For example, a Project - // resource is specified as `projects/{project}`. - Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` - // The set of permissions to check for the `resource`. Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For more - // information see - // [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). - Permissions []string `protobuf:"bytes,2,rep,name=permissions" json:"permissions,omitempty"` -} - -func (m *TestIamPermissionsRequest) Reset() { *m = TestIamPermissionsRequest{} } -func (m *TestIamPermissionsRequest) String() string { return proto.CompactTextString(m) } -func (*TestIamPermissionsRequest) ProtoMessage() {} -func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *TestIamPermissionsRequest) GetResource() string { - if m != nil { - return m.Resource - } - return "" -} - -func (m *TestIamPermissionsRequest) GetPermissions() []string { - if m != nil { - return m.Permissions - } - return nil -} - -// Response message for `TestIamPermissions` method. -type TestIamPermissionsResponse struct { - // A subset of `TestPermissionsRequest.permissions` that the caller is - // allowed. - Permissions []string `protobuf:"bytes,1,rep,name=permissions" json:"permissions,omitempty"` -} - -func (m *TestIamPermissionsResponse) Reset() { *m = TestIamPermissionsResponse{} } -func (m *TestIamPermissionsResponse) String() string { return proto.CompactTextString(m) } -func (*TestIamPermissionsResponse) ProtoMessage() {} -func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *TestIamPermissionsResponse) GetPermissions() []string { - if m != nil { - return m.Permissions - } - return nil -} - -func init() { - proto.RegisterType((*SetIamPolicyRequest)(nil), "google.iam.v1.SetIamPolicyRequest") - proto.RegisterType((*GetIamPolicyRequest)(nil), "google.iam.v1.GetIamPolicyRequest") - proto.RegisterType((*TestIamPermissionsRequest)(nil), "google.iam.v1.TestIamPermissionsRequest") - proto.RegisterType((*TestIamPermissionsResponse)(nil), "google.iam.v1.TestIamPermissionsResponse") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for IAMPolicy service - -type IAMPolicyClient interface { - // Sets the access control policy on the specified resource. Replaces any - // existing policy. - SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) - // Gets the access control policy for a resource. - // Returns an empty policy if the resource exists and does not have a policy - // set. - GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) - // Returns permissions that a caller has on the specified resource. - // If the resource does not exist, this will return an empty set of - // permissions, not a NOT_FOUND error. - TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) -} - -type iAMPolicyClient struct { - cc *grpc.ClientConn -} - -func NewIAMPolicyClient(cc *grpc.ClientConn) IAMPolicyClient { - return &iAMPolicyClient{cc} -} - -func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { - out := new(Policy) - err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { - out := new(Policy) - err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) { - out := new(TestIamPermissionsResponse) - err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for IAMPolicy service - -type IAMPolicyServer interface { - // Sets the access control policy on the specified resource. Replaces any - // existing policy. - SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) - // Gets the access control policy for a resource. - // Returns an empty policy if the resource exists and does not have a policy - // set. - GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) - // Returns permissions that a caller has on the specified resource. - // If the resource does not exist, this will return an empty set of - // permissions, not a NOT_FOUND error. - TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) -} - -func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { - s.RegisterService(&_IAMPolicy_serviceDesc, srv) -} - -func _IAMPolicy_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetIamPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IAMPolicyServer).SetIamPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.iam.v1.IAMPolicy/SetIamPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IAMPolicyServer).SetIamPolicy(ctx, req.(*SetIamPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _IAMPolicy_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetIamPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IAMPolicyServer).GetIamPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.iam.v1.IAMPolicy/GetIamPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IAMPolicyServer).GetIamPolicy(ctx, req.(*GetIamPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _IAMPolicy_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TestIamPermissionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IAMPolicyServer).TestIamPermissions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.iam.v1.IAMPolicy/TestIamPermissions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IAMPolicyServer).TestIamPermissions(ctx, req.(*TestIamPermissionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _IAMPolicy_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.iam.v1.IAMPolicy", - HandlerType: (*IAMPolicyServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SetIamPolicy", - Handler: _IAMPolicy_SetIamPolicy_Handler, - }, - { - MethodName: "GetIamPolicy", - Handler: _IAMPolicy_GetIamPolicy_Handler, - }, - { - MethodName: "TestIamPermissions", - Handler: _IAMPolicy_TestIamPermissions_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/iam/v1/iam_policy.proto", -} - -func init() { proto.RegisterFile("google/iam/v1/iam_policy.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 396 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0xcf, 0x4a, 0xe3, 0x40, - 0x18, 0x67, 0x52, 0x28, 0xdb, 0xe9, 0xee, 0xc2, 0xa6, 0x2c, 0xd4, 0x20, 0x25, 0x8c, 0x1e, 0xd2, - 0x80, 0x13, 0x53, 0x6f, 0x15, 0x05, 0xeb, 0x21, 0xf4, 0x20, 0x94, 0x2a, 0x82, 0x5e, 0x74, 0xac, - 0x43, 0x18, 0x48, 0x32, 0x31, 0x33, 0x2d, 0x88, 0x78, 0xf1, 0x15, 0xf4, 0xe4, 0x23, 0xf8, 0x3a, - 0xbe, 0x82, 0x0f, 0xe1, 0x51, 0x92, 0x89, 0x35, 0x6d, 0xaa, 0x54, 0xf0, 0x54, 0x3a, 0xf3, 0xfb, - 0xf7, 0xfd, 0xbe, 0x0c, 0x6c, 0xf9, 0x9c, 0xfb, 0x01, 0x75, 0x18, 0x09, 0x9d, 0x89, 0x9b, 0xfe, - 0x9c, 0xc5, 0x3c, 0x60, 0xa3, 0x6b, 0x1c, 0x27, 0x5c, 0x72, 0xfd, 0x8f, 0xba, 0xc7, 0x8c, 0x84, - 0x78, 0xe2, 0x1a, 0xab, 0x39, 0x9c, 0xc4, 0xcc, 0x21, 0x51, 0xc4, 0x25, 0x91, 0x8c, 0x47, 0x42, - 0x81, 0x0d, 0x63, 0x56, 0xac, 0x28, 0x84, 0xce, 0x61, 0xe3, 0x90, 0xca, 0x3e, 0x09, 0x07, 0xd9, - 0xe9, 0x90, 0x5e, 0x8d, 0xa9, 0x90, 0xba, 0x01, 0x7f, 0x25, 0x54, 0xf0, 0x71, 0x32, 0xa2, 0x4d, - 0x60, 0x02, 0xab, 0x36, 0x9c, 0xfe, 0xd7, 0x37, 0x60, 0x55, 0x49, 0x34, 0x35, 0x13, 0x58, 0xf5, - 0xce, 0x7f, 0x3c, 0x13, 0x06, 0xe7, 0x4a, 0x39, 0x08, 0xb9, 0xb0, 0xe1, 0x7d, 0xcf, 0x01, 0x9d, - 0xc0, 0x95, 0x23, 0x2a, 0x32, 0x0e, 0x4d, 0x42, 0x26, 0x44, 0x3a, 0xcc, 0x32, 0xd1, 0x4c, 0x58, - 0x8f, 0x3f, 0x18, 0x4d, 0xcd, 0xac, 0x58, 0xb5, 0x61, 0xf1, 0x08, 0xed, 0x42, 0x63, 0x91, 0xb4, - 0x88, 0x79, 0x24, 0x4a, 0x7c, 0x50, 0xe2, 0x77, 0x1e, 0x2a, 0xb0, 0xd6, 0xdf, 0x3b, 0x50, 0xb3, - 0xe8, 0x12, 0xfe, 0x2e, 0xb6, 0xa7, 0xa3, 0xb9, 0x2a, 0x16, 0x54, 0x6b, 0x2c, 0xae, 0x0b, 0xb5, - 0xef, 0x9e, 0x5f, 0xee, 0xb5, 0x35, 0xd4, 0x4a, 0x57, 0x74, 0xf3, 0x3e, 0xd1, 0x8e, 0x6d, 0xdf, - 0x76, 0x45, 0x41, 0xa5, 0x0b, 0xec, 0xd4, 0xd5, 0xfb, 0xca, 0xd5, 0xfb, 0x11, 0x57, 0x7f, 0xce, - 0xf5, 0x11, 0x40, 0xbd, 0x5c, 0x9d, 0x6e, 0xcd, 0x09, 0x7f, 0xba, 0x38, 0xa3, 0xbd, 0x04, 0x52, - 0xed, 0x01, 0x39, 0x59, 0xac, 0x36, 0x5a, 0x2f, 0xc7, 0x92, 0x25, 0x56, 0x17, 0xd8, 0xbd, 0x18, - 0xfe, 0x1b, 0xf1, 0x70, 0xd6, 0xa0, 0xf7, 0x77, 0x9a, 0x7f, 0x90, 0x7e, 0xeb, 0x03, 0x70, 0xba, - 0x99, 0x03, 0x7c, 0x1e, 0x90, 0xc8, 0xc7, 0x3c, 0xf1, 0x1d, 0x9f, 0x46, 0xd9, 0x4b, 0x70, 0xd4, - 0x15, 0x89, 0x99, 0xc8, 0x1f, 0xca, 0x36, 0x23, 0xe1, 0x2b, 0x00, 0x4f, 0x5a, 0xc3, 0x53, 0xac, - 0xfd, 0x80, 0x8f, 0x2f, 0x71, 0x9f, 0x84, 0xf8, 0xd8, 0xbd, 0xa8, 0x66, 0xac, 0xad, 0xb7, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x6c, 0x3a, 0x2b, 0x4d, 0xaa, 0x03, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go deleted file mode 100644 index a22ae91bebb..00000000000 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go +++ /dev/null @@ -1,269 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/iam/v1/policy.proto - -package iam - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "google.golang.org/genproto/googleapis/api/annotations" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// The type of action performed on a Binding in a policy. -type BindingDelta_Action int32 - -const ( - // Unspecified. - BindingDelta_ACTION_UNSPECIFIED BindingDelta_Action = 0 - // Addition of a Binding. - BindingDelta_ADD BindingDelta_Action = 1 - // Removal of a Binding. - BindingDelta_REMOVE BindingDelta_Action = 2 -) - -var BindingDelta_Action_name = map[int32]string{ - 0: "ACTION_UNSPECIFIED", - 1: "ADD", - 2: "REMOVE", -} -var BindingDelta_Action_value = map[string]int32{ - "ACTION_UNSPECIFIED": 0, - "ADD": 1, - "REMOVE": 2, -} - -func (x BindingDelta_Action) String() string { - return proto.EnumName(BindingDelta_Action_name, int32(x)) -} -func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0} } - -// Defines an Identity and Access Management (IAM) policy. It is used to -// specify access control policies for Cloud Platform resources. -// -// -// A `Policy` consists of a list of `bindings`. A `Binding` binds a list of -// `members` to a `role`, where the members can be user accounts, Google groups, -// Google domains, and service accounts. A `role` is a named list of permissions -// defined by IAM. -// -// **Example** -// -// { -// "bindings": [ -// { -// "role": "roles/owner", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// "serviceAccount:my-other-app@appspot.gserviceaccount.com", -// ] -// }, -// { -// "role": "roles/viewer", -// "members": ["user:sean@example.com"] -// } -// ] -// } -// -// For a description of IAM and its features, see the -// [IAM developer's guide](https://cloud.google.com/iam). -type Policy struct { - // Version of the `Policy`. The default version is 0. - Version int32 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"` - // Associates a list of `members` to a `role`. - // Multiple `bindings` must not be specified for the same `role`. - // `bindings` with no members will result in an error. - Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings" json:"bindings,omitempty"` - // `etag` is used for optimistic concurrency control as a way to help - // prevent simultaneous updates of a policy from overwriting each other. - // It is strongly suggested that systems make use of the `etag` in the - // read-modify-write cycle to perform policy updates in order to avoid race - // conditions: An `etag` is returned in the response to `getIamPolicy`, and - // systems are expected to put that etag in the request to `setIamPolicy` to - // ensure that their change will be applied to the same version of the policy. - // - // If no `etag` is provided in the call to `setIamPolicy`, then the existing - // policy is overwritten blindly. - Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` -} - -func (m *Policy) Reset() { *m = Policy{} } -func (m *Policy) String() string { return proto.CompactTextString(m) } -func (*Policy) ProtoMessage() {} -func (*Policy) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } - -func (m *Policy) GetVersion() int32 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *Policy) GetBindings() []*Binding { - if m != nil { - return m.Bindings - } - return nil -} - -func (m *Policy) GetEtag() []byte { - if m != nil { - return m.Etag - } - return nil -} - -// Associates `members` with a `role`. -type Binding struct { - // Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. - // Required - Role string `protobuf:"bytes,1,opt,name=role" json:"role,omitempty"` - // Specifies the identities requesting access for a Cloud Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. - // - // * `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@gmail.com` or `joe@example.com`. - // - // - // * `serviceAccount:{emailid}`: An email address that represents a service - // account. For example, `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. - // - // * `domain:{domain}`: A Google Apps domain name that represents all the - // users of that domain. For example, `google.com` or `example.com`. - // - // - Members []string `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` -} - -func (m *Binding) Reset() { *m = Binding{} } -func (m *Binding) String() string { return proto.CompactTextString(m) } -func (*Binding) ProtoMessage() {} -func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } - -func (m *Binding) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -func (m *Binding) GetMembers() []string { - if m != nil { - return m.Members - } - return nil -} - -// The difference delta between two policies. -type PolicyDelta struct { - // The delta for Bindings between two policies. - BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas" json:"binding_deltas,omitempty"` -} - -func (m *PolicyDelta) Reset() { *m = PolicyDelta{} } -func (m *PolicyDelta) String() string { return proto.CompactTextString(m) } -func (*PolicyDelta) ProtoMessage() {} -func (*PolicyDelta) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } - -func (m *PolicyDelta) GetBindingDeltas() []*BindingDelta { - if m != nil { - return m.BindingDeltas - } - return nil -} - -// One delta entry for Binding. Each individual change (only one member in each -// entry) to a binding will be a separate entry. -type BindingDelta struct { - // The action that was performed on a Binding. - // Required - Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"` - // Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. - // Required - Role string `protobuf:"bytes,2,opt,name=role" json:"role,omitempty"` - // A single identity requesting access for a Cloud Platform resource. - // Follows the same format of Binding.members. - // Required - Member string `protobuf:"bytes,3,opt,name=member" json:"member,omitempty"` -} - -func (m *BindingDelta) Reset() { *m = BindingDelta{} } -func (m *BindingDelta) String() string { return proto.CompactTextString(m) } -func (*BindingDelta) ProtoMessage() {} -func (*BindingDelta) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } - -func (m *BindingDelta) GetAction() BindingDelta_Action { - if m != nil { - return m.Action - } - return BindingDelta_ACTION_UNSPECIFIED -} - -func (m *BindingDelta) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -func (m *BindingDelta) GetMember() string { - if m != nil { - return m.Member - } - return "" -} - -func init() { - proto.RegisterType((*Policy)(nil), "google.iam.v1.Policy") - proto.RegisterType((*Binding)(nil), "google.iam.v1.Binding") - proto.RegisterType((*PolicyDelta)(nil), "google.iam.v1.PolicyDelta") - proto.RegisterType((*BindingDelta)(nil), "google.iam.v1.BindingDelta") - proto.RegisterEnum("google.iam.v1.BindingDelta_Action", BindingDelta_Action_name, BindingDelta_Action_value) -} - -func init() { proto.RegisterFile("google/iam/v1/policy.proto", fileDescriptor1) } - -var fileDescriptor1 = []byte{ - // 387 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x4d, 0x8f, 0xd3, 0x30, - 0x10, 0xc5, 0xed, 0x92, 0xd2, 0xd9, 0x0f, 0x15, 0x23, 0x55, 0xd1, 0xc2, 0xa1, 0xca, 0x29, 0x27, - 0x87, 0x16, 0x21, 0x24, 0x38, 0x35, 0x4d, 0x40, 0x39, 0xb0, 0x1b, 0x0c, 0xec, 0x81, 0xcb, 0xca, - 0x69, 0x2d, 0xcb, 0x28, 0xb6, 0xa3, 0x24, 0x54, 0xe2, 0x2f, 0x21, 0xf1, 0xff, 0x38, 0xa2, 0xd8, - 0xee, 0xaa, 0x95, 0x10, 0xb7, 0x79, 0x79, 0xef, 0x65, 0xde, 0xcc, 0x18, 0xae, 0x85, 0x31, 0xa2, - 0xe6, 0x89, 0x64, 0x2a, 0xd9, 0x2f, 0x93, 0xc6, 0xd4, 0x72, 0xfb, 0x93, 0x34, 0xad, 0xe9, 0x0d, - 0xbe, 0x74, 0x1c, 0x91, 0x4c, 0x91, 0xfd, 0xf2, 0xfa, 0x85, 0x97, 0xb2, 0x46, 0x26, 0x4c, 0x6b, - 0xd3, 0xb3, 0x5e, 0x1a, 0xdd, 0x39, 0x71, 0xf4, 0x1d, 0x82, 0xd2, 0x9a, 0x71, 0x08, 0x93, 0x3d, - 0x6f, 0x3b, 0x69, 0x74, 0x88, 0x16, 0x28, 0x7e, 0x4c, 0x0f, 0x10, 0xaf, 0xe0, 0x49, 0x25, 0xf5, - 0x4e, 0x6a, 0xd1, 0x85, 0x67, 0x8b, 0x71, 0x7c, 0xbe, 0x9a, 0x93, 0x93, 0x1e, 0x24, 0x75, 0x34, - 0x7d, 0xd0, 0x61, 0x0c, 0x67, 0xbc, 0x67, 0x22, 0x1c, 0x2f, 0x50, 0x7c, 0x41, 0x6d, 0x1d, 0xbd, - 0x81, 0x89, 0x17, 0x0e, 0x74, 0x6b, 0x6a, 0x6e, 0x3b, 0x4d, 0xa9, 0xad, 0x87, 0x00, 0x8a, 0xab, - 0x8a, 0xb7, 0x5d, 0x38, 0x5a, 0x8c, 0xe3, 0x29, 0x3d, 0xc0, 0xe8, 0x13, 0x9c, 0xbb, 0x90, 0x19, - 0xaf, 0x7b, 0x86, 0x53, 0xb8, 0xf2, 0x7d, 0xee, 0x77, 0xc3, 0x87, 0x2e, 0x44, 0x36, 0xd5, 0xf3, - 0x7f, 0xa7, 0xb2, 0x26, 0x7a, 0x59, 0x1d, 0xa1, 0x2e, 0xfa, 0x8d, 0xe0, 0xe2, 0x98, 0xc7, 0x6f, - 0x21, 0x60, 0xdb, 0xfe, 0x30, 0xfd, 0xd5, 0x2a, 0xfa, 0xcf, 0xcf, 0xc8, 0xda, 0x2a, 0xa9, 0x77, - 0x3c, 0x4c, 0x33, 0x3a, 0x9a, 0x66, 0x0e, 0x81, 0x8b, 0x6f, 0x57, 0x30, 0xa5, 0x1e, 0x45, 0xaf, - 0x21, 0x70, 0x6e, 0x3c, 0x07, 0xbc, 0xde, 0x7c, 0x29, 0x6e, 0x6f, 0xee, 0xbf, 0xde, 0x7c, 0x2e, - 0xf3, 0x4d, 0xf1, 0xbe, 0xc8, 0xb3, 0xd9, 0x23, 0x3c, 0x81, 0xf1, 0x3a, 0xcb, 0x66, 0x08, 0x03, - 0x04, 0x34, 0xff, 0x78, 0x7b, 0x97, 0xcf, 0x46, 0xa9, 0x82, 0xa7, 0x5b, 0xa3, 0x4e, 0x33, 0xa5, - 0x7e, 0x2b, 0xe5, 0x70, 0xc9, 0x12, 0x7d, 0x7b, 0xe9, 0x59, 0x61, 0x6a, 0xa6, 0x05, 0x31, 0xad, - 0x48, 0x04, 0xd7, 0xf6, 0xce, 0x89, 0xa3, 0x58, 0x23, 0x3b, 0xff, 0x66, 0xde, 0x49, 0xa6, 0xfe, - 0x20, 0xf4, 0x6b, 0xf4, 0xec, 0x83, 0x73, 0x6d, 0x6a, 0xf3, 0x63, 0x47, 0x0a, 0xa6, 0xc8, 0xdd, - 0xb2, 0x0a, 0xac, 0xeb, 0xd5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x4a, 0x85, 0x10, 0x68, - 0x02, 0x00, 0x00, -} diff --git a/vendor/vendor.json b/vendor/vendor.json index 0e7ee7d8133..0023824ee6a 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -2,96 +2,12 @@ "comment": "", "ignore": "appengine test github.com/hashicorp/nomad/ github.com/hashicorp/terraform/backend", "package": [ - { - "checksumSHA1": "AH7jcN7pvaPDU6UjHdpT081DDGk=", - "path": "cloud.google.com/go/compute/metadata", - "revision": "36dc44d65c859022eaa611ad10621be885d3643a", - "revisionTime": "2017-10-25T01:01:36Z" - }, - { - "checksumSHA1": "/ixPd+hSgsbAjBI/fPqmHtTFRM8=", - "path": "cloud.google.com/go/iam", - "revision": "8c4ed1f54434ff9ea67929c91a4a10db57a52780", - "revisionTime": "2017-11-02T20:51:44Z" - }, - { - "checksumSHA1": "+2A2Mazq65iiT8xIDgSh5cypBSQ=", - "path": "cloud.google.com/go/internal", - "revision": "8c4ed1f54434ff9ea67929c91a4a10db57a52780", - "revisionTime": "2017-11-02T20:51:44Z" - }, - { - "checksumSHA1": "MCns2LLZtUZEx6JWyYBrcbSuTXg=", - "path": "cloud.google.com/go/internal/optional", - "revision": "8c4ed1f54434ff9ea67929c91a4a10db57a52780", - "revisionTime": "2017-11-02T20:51:44Z" - }, - { - "checksumSHA1": "QXE70x1YpmwfX8bqcncO5LxjeEA=", - "path": "cloud.google.com/go/internal/version", - "revision": "8c4ed1f54434ff9ea67929c91a4a10db57a52780", - "revisionTime": "2017-11-02T20:51:44Z" - }, - { - "checksumSHA1": "OjwUqj0+fsSmPGrenGRxHeUeM2o=", - "path": "cloud.google.com/go/storage", - "revision": "8c4ed1f54434ff9ea67929c91a4a10db57a52780", - "revisionTime": "2017-11-02T20:51:44Z" - }, - { - "checksumSHA1": "mUr+CfHry/vyDz3SeO2f7UXI/PI=", - "path": "github.com/Azure/azure-sdk-for-go/arm/storage", - "revision": "f7bb4db3ea4c73dc58bd284c38ea644a79324be0", - "revisionTime": "2017-10-23T18:42:54Z" - }, - { - "checksumSHA1": "KzijvZt+5kIVOsdv8GJkUK9njpc=", - "path": "github.com/Azure/azure-sdk-for-go/storage", - "revision": "f7bb4db3ea4c73dc58bd284c38ea644a79324be0", - "revisionTime": "2017-10-23T18:42:54Z" - }, - { - "checksumSHA1": "Xd5xJ9A8KynuExnf30qfukN3RR0=", - "path": "github.com/Azure/go-autorest/autorest", - "revision": "c0eb859387e57a164bf64171da307e2ef8168b58", - "revisionTime": "2017-10-20T21:14:44Z" - }, - { - "checksumSHA1": "Ktj3H1WpOqxnC9kdAA+F7Ol7/RQ=", - "path": "github.com/Azure/go-autorest/autorest/adal", - "revision": "c0eb859387e57a164bf64171da307e2ef8168b58", - "revisionTime": "2017-10-20T21:14:44Z" - }, - { - "checksumSHA1": "Pt0rzDhmiWpNeGvXT73kra89guI=", - "path": "github.com/Azure/go-autorest/autorest/azure", - "revision": "c0eb859387e57a164bf64171da307e2ef8168b58", - "revisionTime": "2017-10-20T21:14:44Z" - }, - { - "checksumSHA1": "9nXCi9qQsYjxCeajJKWttxgEt0I=", - "path": "github.com/Azure/go-autorest/autorest/date", - "revision": "c0eb859387e57a164bf64171da307e2ef8168b58", - "revisionTime": "2017-10-20T21:14:44Z" - }, - { - "checksumSHA1": "HfqZyKllcHQDvTwgCaYL1jUPmW0=", - "path": "github.com/Azure/go-autorest/autorest/validation", - "revision": "c0eb859387e57a164bf64171da307e2ef8168b58", - "revisionTime": "2017-10-20T21:14:44Z" - }, { "checksumSHA1": "PYNaEEt9v8iAvGVUD4do0YIeR1A=", "path": "github.com/Azure/go-ntlmssp", "revision": "c92175d540060095c69ced311f76aea56c83ecdb", "revisionTime": "2017-08-03T03:49:30Z" }, - { - "checksumSHA1": "IrtvVIFBTQmk0+vM7g2xtka5SFg=", - "path": "github.com/Unknwon/com", - "revision": "7677a1d7c1137cd3dd5ba7a076d0c898a1ef4520", - "revisionTime": "2017-08-19T22:39:52Z" - }, { "checksumSHA1": "jQh1fnoKPKMURvKkpdRjN695nAQ=", "path": "github.com/agext/levenshtein", @@ -1018,102 +934,12 @@ "revision": "41eea22f717c616615e1e59aa06cf831f9901f35", "revisionTime": "2017-03-13T23:49:21Z" }, - { - "checksumSHA1": "7BC2/27NId9xaPDB5w3nWN2mn9A=", - "path": "github.com/coreos/etcd/auth/authpb", - "revision": "20f2914e1301abde233b2039e110f2be57b91f89", - "revisionTime": "2017-10-26T00:52:00Z" - }, - { - "checksumSHA1": "XtVAwbJWD12FGuZrIkxpe8t9TB8=", - "path": "github.com/coreos/etcd/client", - "revision": "20f2914e1301abde233b2039e110f2be57b91f89", - "revisionTime": "2017-10-26T00:52:00Z" - }, - { - "checksumSHA1": "4m3qRCZfmTyRSzd4eH8ovMb5OXY=", - "path": "github.com/coreos/etcd/clientv3", - "revision": "20f2914e1301abde233b2039e110f2be57b91f89", - "revisionTime": "2017-10-26T00:52:00Z" - }, - { - "checksumSHA1": "LpOgTec6cz2Tf3zDav7VkqMHmBM=", - "path": "github.com/coreos/etcd/clientv3/concurrency", - "revision": "20f2914e1301abde233b2039e110f2be57b91f89", - "revisionTime": "2017-10-26T00:52:00Z" - }, - { - "checksumSHA1": "VMC9J0rMVk3Fv8r8Bj7qqLlXc3E=", - "path": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "revision": "20f2914e1301abde233b2039e110f2be57b91f89", - "revisionTime": "2017-10-26T00:52:00Z" - }, - { - "checksumSHA1": "c0ltvGUOnk8qaEshFwc0PDH5nbc=", - "path": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "revision": "20f2914e1301abde233b2039e110f2be57b91f89", - "revisionTime": "2017-10-26T00:52:00Z" - }, - { - "checksumSHA1": "JAkX9DfIBrSe0vUa07xl5cikxVQ=", - "path": "github.com/coreos/etcd/mvcc/mvccpb", - "revision": "20f2914e1301abde233b2039e110f2be57b91f89", - "revisionTime": "2017-10-26T00:52:00Z" - }, - { - "checksumSHA1": "mKIXx1kDwmVmdIpZ3pJtRBuUKso=", - "path": "github.com/coreos/etcd/pkg/pathutil", - "revision": "20f2914e1301abde233b2039e110f2be57b91f89", - "revisionTime": "2017-10-26T00:52:00Z" - }, - { - "checksumSHA1": "z+C4BtPa8wbOUKW5dmHyhNnTulg=", - "path": "github.com/coreos/etcd/pkg/srv", - "revision": "20f2914e1301abde233b2039e110f2be57b91f89", - "revisionTime": "2017-10-26T00:52:00Z" - }, - { - "checksumSHA1": "rMyIh9PsSvPs6Yd+YgKITQzQJx8=", - "path": "github.com/coreos/etcd/pkg/tlsutil", - "revision": "20f2914e1301abde233b2039e110f2be57b91f89", - "revisionTime": "2017-10-26T00:52:00Z" - }, - { - "checksumSHA1": "agofzi+YZ7VYbxCldLaHYHAtlpc=", - "path": "github.com/coreos/etcd/pkg/transport", - "revision": "20f2914e1301abde233b2039e110f2be57b91f89", - "revisionTime": "2017-10-26T00:52:00Z" - }, - { - "checksumSHA1": "gx1gJIMU6T0UNQ0bPZ/drQ8cpCI=", - "path": "github.com/coreos/etcd/pkg/types", - "revision": "20f2914e1301abde233b2039e110f2be57b91f89", - "revisionTime": "2017-10-26T00:52:00Z" - }, - { - "checksumSHA1": "sp2FkEyaIGiQFOEZCTDkBZgyHOs=", - "path": "github.com/coreos/etcd/version", - "revision": "20f2914e1301abde233b2039e110f2be57b91f89", - "revisionTime": "2017-10-26T00:52:00Z" - }, - { - "checksumSHA1": "97BsbXOiZ8+Kr+LIuZkQFtSj7H4=", - "path": "github.com/coreos/go-semver/semver", - "revision": "1817cd4bea52af76542157eeabd74b057d1a199e", - "revisionTime": "2017-06-13T09:22:38Z" - }, { "checksumSHA1": "dvabztWVQX8f6oMLRyv4dLH+TGY=", "path": "github.com/davecgh/go-spew/spew", "revision": "346938d642f2ec3594ed81d874461961cd0faa76", "revisionTime": "2016-10-29T20:57:26Z" }, - { - "checksumSHA1": "+TKtBzv23ywvmmqRiGEjUba4YmI=", - "path": "github.com/dgrijalva/jwt-go", - "revision": "dbeaa9332f19a944acb5736b4456cfcc02140e29", - "revisionTime": "2017-10-19T21:57:19Z" - }, { "checksumSHA1": "GCskdwYAPW2S34918Z5CgNMJ2Wc=", "path": "github.com/dylanmei/iso8601", @@ -1146,12 +972,6 @@ "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", "revisionTime": "2017-10-21T04:39:52Z" }, - { - "checksumSHA1": "XNHQiRltA7NQJV0RvUroY+cf+zg=", - "path": "github.com/golang/protobuf/protoc-gen-go/descriptor", - "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", - "revisionTime": "2017-10-21T04:39:52Z" - }, { "checksumSHA1": "VfkiItDBFFkZluaAMAzJipDXNBY=", "path": "github.com/golang/protobuf/ptypes", @@ -1176,342 +996,6 @@ "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", "revisionTime": "2017-10-21T04:39:52Z" }, - { - "checksumSHA1": "y1/eOdw+BOXCuT83J7mP3ReXaf8=", - "path": "github.com/googleapis/gax-go", - "revision": "317e0006254c44a0ac427cc52a0e083ff0b9622f", - "revisionTime": "2017-09-15T02:47:31Z" - }, - { - "checksumSHA1": "Zcapl92yLsRGDnDfmD5BMZrU0i8=", - "path": "github.com/gophercloud/gophercloud", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "b7g9TcU1OmW7e2UySYeOAmcfHpY=", - "path": "github.com/gophercloud/gophercloud/internal", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "0rCdHaLp8s8+UK/Uk/7E3qsLtPI=", - "path": "github.com/gophercloud/gophercloud/openstack", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "xO3K2ugrp3x0VQQ+230flEPMFLs=", - "path": "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "lfD3gq0G0BLLrwS3/7IE2SabPXI=", - "path": "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "B4IXSmq364HcBruvvV0QjDFxZgc=", - "path": "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "HXIWMjezz3bJS/U+AczZ5YkDT3E=", - "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "kCHEEeRVZeR1LhbvNP+WyvB8z2s=", - "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "vFS5BwnCdQIfKm1nNWrR+ijsAZA=", - "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "jOyPWAJGRqLHofGLL/aiYBZzJR0=", - "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "+hlElX7o8ULWTc0r7oGyDlOnwWM=", - "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "Z8X3+qVpl9EImYGJ5tJ9Pxn3qqc=", - "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "PWbrce9A6LMWIGr0fK04AEEuQec=", - "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "qfVZltu1fYTYXS97WbjeLuLPgUc=", - "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "+Gif+WFd0WVjefjvmlR7jyTrdzQ=", - "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "TMkBTIrYeL78yJ6wZNJQqG0tHR4=", - "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "NwHRS4gvS7xcXlWRT8WpJbBbvbs=", - "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "CSnfH01hSas0bdc/3m/f5Rt6SFY=", - "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/images", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "BzQbqNKKDkIHN+1G6sRsMeWz4Zs=", - "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/servers", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "hBVpoXsfRy6beVIhE6tcmtvgx+s=", - "path": "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "VxvzmFE0VrKXbmVvSN5dZ+w2zPE=", - "path": "github.com/gophercloud/gophercloud/openstack/dns/v2/zones", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "oOJkelRgWx0NzUmxuI3kTS27gM0=", - "path": "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "z5NsqMZX3TLMzpmwzOOXE4M5D9w=", - "path": "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "ov1ZPU9SK5I1l733CprPgALenhc=", - "path": "github.com/gophercloud/gophercloud/openstack/identity/v3/groups", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "nasom1r/2W2OnR//mipNfasWv5U=", - "path": "github.com/gophercloud/gophercloud/openstack/identity/v3/projects", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "HzVyehPh0jvNZpL5iodoWpUd46k=", - "path": "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "qEjy/34oEk+X1l96ii8ot8vK47g=", - "path": "github.com/gophercloud/gophercloud/openstack/identity/v3/users", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "I7QtHxTYPCZybsi+u/J5AQaNxxQ=", - "path": "github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "gnDqkD2hdbV0ek3m2eNgsc4opdg=", - "path": "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "eLeGhmXLNp+j9PBTjLhpHY+3YxE=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "QuUP9C0vhUMQx6OUgekqjn6/3xs=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "FDgt1WaVGbp8w6vyCJbtkbsbWBc=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "hanDdNBxWh1gdmyKl6QhytbprYc=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "I6Q1xkqwqx6yL9MU1oMKfSozH2U=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "QZx6LgtiiVOYViHwIUo8SvBGDW4=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "HiGzZXGIxrItijPCH+L7EbbCb9w=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "KZJLk70d5SX8TQO1q0MLAxGEBCU=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "KXKo/TByIHelRDES1IB01nlQMoM=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "3QfwJnsJHohoYV16/BOtDXpNEYI=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "yFJA+pHR+xjch66bvrKhQqVFlf0=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "HPfnRxqVgYpr3xeGjBhfDcA5wEw=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "jNrZgB/ulP49gM7LOl4tkXQNlv8=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "eHPa8JiswxlLbASzyur/kPn8ewc=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "eUvwdXvz/Zkckr/isvP+2nXY2J8=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "fxOov7uyY0I8Y/LlwoFVfVnWrbA=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "ja7FbTK5eFj577HM5YxeQ8qWQ+I=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "hmTj0vghsx3PEnkk3b3NUXWC2Ew=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/networks", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "68RL/R3cHJvkHPNZmu0GW4xd4cY=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/ports", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "P6Wvbidr+AvcKL0qDC7Dmpy8nyM=", - "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "Q9VHGSztw2B4PmN4kyvY9TcJD6c=", - "path": "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "/ooNeOLNs+Z2Ez6O9oqnfY5n1H8=", - "path": "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "F1gaBPZj2MGwEF7u8GARRa6XlvQ=", - "path": "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "aODoF15ZwA4XJOWciguaAJRq/6o=", - "path": "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "hOTxf5bdm8+cwSTO+OO/maDz6ss=", - "path": "github.com/gophercloud/gophercloud/openstack/utils", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, - { - "checksumSHA1": "YspETi3tOMvawKIT91HyuqaA5lM=", - "path": "github.com/gophercloud/gophercloud/pagination", - "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", - "revisionTime": "2017-10-26T02:45:19Z" - }, { "checksumSHA1": "FUiF2WLrih0JdHsUTMMDz3DRokw=", "path": "github.com/hashicorp/atlas-go/archive", @@ -1524,12 +1008,6 @@ "revision": "1ea2527d6a49b4c9f18753aa21627aae6a7c26a1", "revisionTime": "2017-06-19T22:02:35Z" }, - { - "checksumSHA1": "4hc6jGp9/1m7dp5ACRcGnspjO5E=", - "path": "github.com/hashicorp/consul/api", - "revision": "b31cfaaf2ac5ddd6ddee1d2e1041466e939dc097", - "revisionTime": "2017-10-25T22:34:43Z" - }, { "checksumSHA1": "cdOCt0Yb+hdErz8NAQqayxPmRsY=", "path": "github.com/hashicorp/errwrap", @@ -1576,12 +1054,6 @@ "revision": "a5174f84d7f8ff00fb07ab4ef1f380d32eee0e63", "revisionTime": "2017-08-16T15:18:19Z" }, - { - "checksumSHA1": "yzoWV7yrS/TvOrKy5ZrdUjsYaOA=", - "path": "github.com/hashicorp/go-retryablehttp", - "revision": "794af36148bf63c118d6db80eb902a136b907e71", - "revisionTime": "2017-08-24T18:08:59Z" - }, { "checksumSHA1": "A1PcINvF3UiwHRKn8UcgARgvGRs=", "path": "github.com/hashicorp/go-rootcerts", @@ -1731,96 +1203,12 @@ "revision": "0dc08b1671f34c4250ce212759ebd880f743d883", "revisionTime": "2015-06-09T07:04:31Z" }, - { - "checksumSHA1": "mS15CkImPzXYsgNwl3Mt9Gh3Vb0=", - "path": "github.com/hashicorp/serf/coordinate", - "revision": "c20a0b1b1ea9eb8168bcdec0116688fa9254e449", - "revisionTime": "2017-10-22T02:00:50Z" - }, { "checksumSHA1": "QOrITO2Dm4mWEYA8RXP1x0UeiUw=", "path": "github.com/hashicorp/terraform", "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", "revisionTime": "2017-12-20T14:20:43Z" }, - { - "checksumSHA1": "nKKclpNMMvJrUbtMNDujRtJcR6I=", - "path": "github.com/hashicorp/terraform/backend", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, - { - "checksumSHA1": "ZWqZhZxaT2AMNy4dzCcvMKc46GY=", - "path": "github.com/hashicorp/terraform/backend/atlas", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, - { - "checksumSHA1": "StxVDAMzeMUdXUcRbjcuDxm8GD0=", - "path": "github.com/hashicorp/terraform/backend/init", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, - { - "checksumSHA1": "ISwgLoSPkcEYAcwFoYu5FNsMDD0=", - "path": "github.com/hashicorp/terraform/backend/legacy", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, - { - "checksumSHA1": "1bm/jLoSRCTVo1eu6c3u7dwRQ28=", - "path": "github.com/hashicorp/terraform/backend/local", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, - { - "checksumSHA1": "dL2tWGJpT3ohSID91w/6wQaFhX0=", - "path": "github.com/hashicorp/terraform/backend/remote-state/azure", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, - { - "checksumSHA1": "hs39fP+wdfuvpN/lsMpYwUZUV8I=", - "path": "github.com/hashicorp/terraform/backend/remote-state/consul", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, - { - "checksumSHA1": "SllujprNPMotiPKfcPsQRF/7r64=", - "path": "github.com/hashicorp/terraform/backend/remote-state/etcdv3", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, - { - "checksumSHA1": "RolC84+BZoeoV8Nf8pdLFv2sUz8=", - "path": "github.com/hashicorp/terraform/backend/remote-state/gcs", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, - { - "checksumSHA1": "uJi6XL6OFIzU0r3G0YX0L3YzxRE=", - "path": "github.com/hashicorp/terraform/backend/remote-state/inmem", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, - { - "checksumSHA1": "xH9qq/2HWzIPk4E9AY0PY0AQf2Q=", - "path": "github.com/hashicorp/terraform/backend/remote-state/manta", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, - { - "checksumSHA1": "Px1bBSMVKEsHyXL655w3LX3clRM=", - "path": "github.com/hashicorp/terraform/backend/remote-state/s3", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, - { - "checksumSHA1": "l0SZPCxWxxlYHOedkUCZUCWw4R0=", - "path": "github.com/hashicorp/terraform/backend/remote-state/swift", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, { "checksumSHA1": "AELhei0e6ZKXmc7Gl3zeOvKK6Ko=", "path": "github.com/hashicorp/terraform/builtin/providers/terraform", @@ -1857,12 +1245,6 @@ "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", "revisionTime": "2017-12-20T14:20:43Z" }, - { - "checksumSHA1": "z2TjmtULJ0grkdY1eQ0pGB6look=", - "path": "github.com/hashicorp/terraform/command", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, { "checksumSHA1": "HWbnuaEFdfRFeKxZdlYUWZm+DU0=", "path": "github.com/hashicorp/terraform/command/clistate", @@ -2029,14 +1411,6 @@ "version": "master", "versionExact": "master" }, - { - "checksumSHA1": "1yCGh/Wl4H4ODBBRmIRFcV025b0=", - "path": "github.com/hashicorp/terraform/helper/shadow", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z", - "version": "master", - "versionExact": "master" - }, { "checksumSHA1": "eQ6F8nDi/R+F/SX51xCEY8iPZOE=", "path": "github.com/hashicorp/terraform/helper/slowmessage", @@ -2135,12 +1509,6 @@ "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", "revisionTime": "2017-12-20T14:20:43Z" }, - { - "checksumSHA1": "xe9XpHd/H/N6fkZ4iAL8MiHFnKs=", - "path": "github.com/hashicorp/terraform/state/remote", - "revision": "a262a0e046c288f3b3565aaabc2d2e55db97708b", - "revisionTime": "2017-12-20T14:20:43Z" - }, { "checksumSHA1": "VXlzRRDVOqeMvnnrbUcR9H64OA4=", "path": "github.com/hashicorp/terraform/svchost", @@ -2216,72 +1584,6 @@ "revision": "bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d", "revisionTime": "2016-08-03T19:07:31Z" }, - { - "checksumSHA1": "NOwNdnb70M6s9LvhaPFabBVwlBs=", - "path": "github.com/joyent/gocommon", - "revision": "b78708995d1c2ebdb64a3061b0bca5d8ccdf0fc2", - "revisionTime": "2016-12-02T19:23:17Z" - }, - { - "checksumSHA1": "O0WFPpYSMzeDL11yO84IYBSXrmk=", - "path": "github.com/joyent/gocommon/client", - "revision": "b78708995d1c2ebdb64a3061b0bca5d8ccdf0fc2", - "revisionTime": "2016-12-02T19:23:17Z" - }, - { - "checksumSHA1": "fsnHUI4h0h1BUeiWNhkRJhzZD/s=", - "path": "github.com/joyent/gocommon/errors", - "revision": "b78708995d1c2ebdb64a3061b0bca5d8ccdf0fc2", - "revisionTime": "2016-12-02T19:23:17Z" - }, - { - "checksumSHA1": "cz4amcSofbyq0dH1sdOHNUvznWw=", - "path": "github.com/joyent/gocommon/http", - "revision": "b78708995d1c2ebdb64a3061b0bca5d8ccdf0fc2", - "revisionTime": "2016-12-02T19:23:17Z" - }, - { - "checksumSHA1": "yMJdr6tmhoSgkjLjfyeDZL6vuXo=", - "path": "github.com/joyent/gocommon/jpc", - "revision": "b78708995d1c2ebdb64a3061b0bca5d8ccdf0fc2", - "revisionTime": "2016-12-02T19:23:17Z" - }, - { - "checksumSHA1": "ysHPYOB/xV0dMIAES8ZOQu9KwQQ=", - "path": "github.com/joyent/gomanta/manta", - "revision": "41c8bc3faef9789454337062b5fac84699f75834", - "revisionTime": "2016-12-02T19:23:08Z" - }, - { - "checksumSHA1": "N0NRIcJF7aj1wd56DA1N9GpYq/4=", - "path": "github.com/joyent/gosign/auth", - "revision": "9abcee278795b82b36858cdfc857c8a0e7de797c", - "revisionTime": "2016-11-14T19:17:44Z" - }, - { - "checksumSHA1": "4z6Ouu/tFbbGSQ0b4P9a/mAgv/8=", - "path": "github.com/joyent/triton-go", - "revision": "8f217b9dcc618ec8ca755a027a3666c021dd0d16", - "revisionTime": "2017-11-02T14:59:11Z" - }, - { - "checksumSHA1": "Cth7NCLH/HaeKh9ZMRpQtudTEQQ=", - "path": "github.com/joyent/triton-go/authentication", - "revision": "8f217b9dcc618ec8ca755a027a3666c021dd0d16", - "revisionTime": "2017-11-02T14:59:11Z" - }, - { - "checksumSHA1": "D/DrtmftcDSlshuOShCxA6SiVMI=", - "path": "github.com/joyent/triton-go/client", - "revision": "8f217b9dcc618ec8ca755a027a3666c021dd0d16", - "revisionTime": "2017-11-02T14:59:11Z" - }, - { - "checksumSHA1": "9VONvM4aQL088cLPgg+Z0K0dshc=", - "path": "github.com/joyent/triton-go/storage", - "revision": "8f217b9dcc618ec8ca755a027a3666c021dd0d16", - "revisionTime": "2017-11-02T14:59:11Z" - }, { "checksumSHA1": "gEjGS03N1eysvpQ+FCHTxPcbxXc=", "path": "github.com/kardianos/osext", @@ -2342,12 +1644,6 @@ "revision": "93f5b35093ba15e0f86e412cc5c767d5c10c15fd", "revisionTime": "2016-10-04T15:35:44Z" }, - { - "checksumSHA1": "90/rTD6BDXVZZSY3Qeqnrg9Wb10=", - "path": "github.com/lusis/go-artifactory/src/artifactory.v401", - "revision": "da4b7cd952125757a375908730ba9445adc5ba99", - "revisionTime": "2017-10-20T17:55:49Z" - }, { "checksumSHA1": "DlpjiTUHFFoU39yh4FIKS8g7L7A=", "path": "github.com/masterzen/azure-sdk-for-go/core/http", @@ -2533,12 +1829,6 @@ "revision": "b061729afc07e77a8aa4fad0a2fd840958f1942a", "revisionTime": "2016-09-27T10:08:44Z" }, - { - "checksumSHA1": "q12HCybmDTjU2efDmyBOStiZ9/4=", - "path": "github.com/terraform-providers/terraform-provider-openstack/openstack", - "revision": "635bc49af8a8270ec645f085e0652ea3b1bb200e", - "revisionTime": "2017-10-23T15:34:37Z" - }, { "checksumSHA1": "EUR26b2t3XDPxiEMwDBtn8Ajp8A=", "path": "github.com/terraform-providers/terraform-provider-template/template", @@ -2555,12 +1845,6 @@ "version": "v0.1.0", "versionExact": "v0.1.0" }, - { - "checksumSHA1": "IJYn37+lvgZhQcErB0J4s1qvBds=", - "path": "github.com/ugorji/go/codec", - "revision": "16373bd7016380b084e8a4e8f9560efe82cbc98c", - "revisionTime": "2017-10-26T02:16:19Z" - }, { "checksumSHA1": "qgMa75aMGbkFY0jIqqqgVnCUoNA=", "path": "github.com/ulikunitz/xz", @@ -2735,12 +2019,6 @@ "revision": "4b14673ba32bee7f5ac0f990a48f033919fd418b", "revisionTime": "2017-09-15T10:16:46Z" }, - { - "checksumSHA1": "WHc3uByvGaMcnSoI21fhzYgbOgg=", - "path": "golang.org/x/net/context/ctxhttp", - "revision": "4b14673ba32bee7f5ac0f990a48f033919fd418b", - "revisionTime": "2017-09-15T10:16:46Z" - }, { "checksumSHA1": "vqc3a+oTUGX8PmD0TS+qQ7gmN8I=", "path": "golang.org/x/net/html", @@ -2789,36 +2067,6 @@ "revision": "4b14673ba32bee7f5ac0f990a48f033919fd418b", "revisionTime": "2017-09-15T10:16:46Z" }, - { - "checksumSHA1": "6AmgST478RkzWypvthanPXFDTrw=", - "path": "golang.org/x/oauth2", - "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", - "revisionTime": "2017-09-28T00:25:42Z" - }, - { - "checksumSHA1": "JTBn9MQUhwHtjwv7rC9Zg4KRN7g=", - "path": "golang.org/x/oauth2/google", - "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", - "revisionTime": "2017-09-28T00:25:42Z" - }, - { - "checksumSHA1": "4XCNzl3cgGtdd0SrUZHSBdsW+m4=", - "path": "golang.org/x/oauth2/internal", - "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", - "revisionTime": "2017-09-28T00:25:42Z" - }, - { - "checksumSHA1": "huVltYnXdRFDJLgp/ZP9IALzG7g=", - "path": "golang.org/x/oauth2/jws", - "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", - "revisionTime": "2017-09-28T00:25:42Z" - }, - { - "checksumSHA1": "/eV4E08BY+f1ZikiR7OOMJAj3m0=", - "path": "golang.org/x/oauth2/jwt", - "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", - "revisionTime": "2017-09-28T00:25:42Z" - }, { "checksumSHA1": "NqlOcIqzDg46JUFFXqZdKUER0B0=", "path": "golang.org/x/sys/unix", @@ -2849,120 +2097,6 @@ "revision": "6eab0e8f74e86c598ec3b6fad4888e0c11482d48", "revisionTime": "2017-09-27T13:34:20Z" }, - { - "checksumSHA1": "/y0saWnM+kTnSvZrNlvoNOgj0Uo=", - "path": "google.golang.org/api/gensupport", - "revision": "c5d94a018003dc535877ebf36b0ed720c690202a", - "revisionTime": "2017-10-26T00:03:36Z" - }, - { - "checksumSHA1": "BWKmb7kGYbfbvXO6E7tCpTh9zKE=", - "path": "google.golang.org/api/googleapi", - "revision": "c5d94a018003dc535877ebf36b0ed720c690202a", - "revisionTime": "2017-10-26T00:03:36Z" - }, - { - "checksumSHA1": "1K0JxrUfDqAB3MyRiU1LKjfHyf4=", - "path": "google.golang.org/api/googleapi/internal/uritemplates", - "revision": "c5d94a018003dc535877ebf36b0ed720c690202a", - "revisionTime": "2017-10-26T00:03:36Z" - }, - { - "checksumSHA1": "Mr2fXhMRzlQCgANFm91s536pG7E=", - "path": "google.golang.org/api/googleapi/transport", - "revision": "9fdb53e0c633e09f7df19c7745ee0e39ce3f5e6a", - "revisionTime": "2017-11-06T00:03:11Z" - }, - { - "checksumSHA1": "CpjSGeyQJbLLPxVl/CWs5o9p+jU=", - "path": "google.golang.org/api/internal", - "revision": "9fdb53e0c633e09f7df19c7745ee0e39ce3f5e6a", - "revisionTime": "2017-11-06T00:03:11Z" - }, - { - "checksumSHA1": "slcGOTGSdukEPPSN81Q5WZGmhog=", - "path": "google.golang.org/api/iterator", - "revision": "9fdb53e0c633e09f7df19c7745ee0e39ce3f5e6a", - "revisionTime": "2017-11-06T00:03:11Z" - }, - { - "checksumSHA1": "Z9LQvCPO0WV9PdjgIXlfVOGZRlM=", - "path": "google.golang.org/api/option", - "revision": "9fdb53e0c633e09f7df19c7745ee0e39ce3f5e6a", - "revisionTime": "2017-11-06T00:03:11Z" - }, - { - "checksumSHA1": "Zd7ojgrWPn3j7fLx9HB6/Oub8lE=", - "path": "google.golang.org/api/storage/v1", - "revision": "c5d94a018003dc535877ebf36b0ed720c690202a", - "revisionTime": "2017-10-26T00:03:36Z" - }, - { - "checksumSHA1": "6uZUOs+hU9PFsKbqrzqS3KF72Fg=", - "path": "google.golang.org/api/transport/http", - "revision": "9fdb53e0c633e09f7df19c7745ee0e39ce3f5e6a", - "revisionTime": "2017-11-06T00:03:11Z" - }, - { - "checksumSHA1": "WPEbk80NB3Esdh4Yk0PXr2K7xVU=", - "path": "google.golang.org/appengine", - "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", - "revisionTime": "2017-10-11T21:50:12Z" - }, - { - "checksumSHA1": "/XD6hF+tqSNrfPrFmukDeHKFVVA=", - "path": "google.golang.org/appengine/internal", - "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", - "revisionTime": "2017-10-11T21:50:12Z" - }, - { - "checksumSHA1": "x6Thdfyasqd68dWZWqzWWeIfAfI=", - "path": "google.golang.org/appengine/internal/app_identity", - "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", - "revisionTime": "2017-10-11T21:50:12Z" - }, - { - "checksumSHA1": "TsNO8P0xUlLNyh3Ic/tzSp/fDWM=", - "path": "google.golang.org/appengine/internal/base", - "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", - "revisionTime": "2017-10-11T21:50:12Z" - }, - { - "checksumSHA1": "5QsV5oLGSfKZqTCVXP6NRz5T4Tw=", - "path": "google.golang.org/appengine/internal/datastore", - "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", - "revisionTime": "2017-10-11T21:50:12Z" - }, - { - "checksumSHA1": "Gep2T9zmVYV8qZfK2gu3zrmG6QE=", - "path": "google.golang.org/appengine/internal/log", - "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", - "revisionTime": "2017-10-11T21:50:12Z" - }, - { - "checksumSHA1": "eLZVX1EHLclFtQnjDIszsdyWRHo=", - "path": "google.golang.org/appengine/internal/modules", - "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", - "revisionTime": "2017-10-11T21:50:12Z" - }, - { - "checksumSHA1": "a1XY7rz3BieOVqVI2Et6rKiwQCk=", - "path": "google.golang.org/appengine/internal/remote_api", - "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", - "revisionTime": "2017-10-11T21:50:12Z" - }, - { - "checksumSHA1": "B22iMMY2vi1Q9kseWb/ZznpW8lQ=", - "path": "google.golang.org/genproto/googleapis/api/annotations", - "revision": "11c7f9e547da6db876260ce49ea7536985904c9b", - "revisionTime": "2017-11-03T03:06:25Z" - }, - { - "checksumSHA1": "m5IWVQJ4fVYc3b+5OrZ7BdNlvkA=", - "path": "google.golang.org/genproto/googleapis/iam/v1", - "revision": "11c7f9e547da6db876260ce49ea7536985904c9b", - "revisionTime": "2017-11-03T03:06:25Z" - }, { "checksumSHA1": "Tc3BU26zThLzcyqbVtiSEp7EpU8=", "path": "google.golang.org/genproto/googleapis/rpc/status", From 0cb3568983d0f224f32c28623f6db0bce0c9deef Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 20 Dec 2017 10:07:30 -0500 Subject: [PATCH 152/350] don't build tests twice in makefile There's no reason to spend time installing the test dependencies separately from running the tests themselves. Remove `grep -v vendor`, the `./...` wildcard already excludes it. --- GNUmakefile | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index 5d5ed69f0cf..f5dda561479 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -1,5 +1,4 @@ SWEEP?=us-east-1,us-west-2 -TEST?=$$(go list ./... |grep -v 'vendor') GOFMT_FILES?=$$(find . -name '*.go' |grep -v vendor) default: build @@ -9,15 +8,13 @@ build: fmtcheck sweep: @echo "WARNING: This will destroy infrastructure. Use only in development accounts." - go test $(TEST) -v -sweep=$(SWEEP) $(SWEEPARGS) + go test ./... -v -sweep=$(SWEEP) $(SWEEPARGS) test: fmtcheck - go test -i $(TEST) || exit 1 - echo $(TEST) | \ - xargs -t -n4 go test $(TESTARGS) -timeout=30s -parallel=4 + go test ./... -timeout=30s -parallel=4 testacc: fmtcheck - TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 120m + TF_ACC=1 go test ./... -v $(TESTARGS) -timeout 120m vet: @echo "go vet ." From 65b06012e6708f41dbf2e854ad307d7a4562de6c Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 10 Jan 2018 10:59:50 -0500 Subject: [PATCH 153/350] update terraform to v0.11.2 Get the tagged version of terraform core. Removed a few more unused packages in the process. --- vendor/github.com/Azure/go-ntlmssp/LICENSE | 21 - vendor/github.com/Azure/go-ntlmssp/README.md | 29 - .../Azure/go-ntlmssp/authenticate_message.go | 128 - .../github.com/Azure/go-ntlmssp/authheader.go | 37 - vendor/github.com/Azure/go-ntlmssp/avids.go | 17 - .../Azure/go-ntlmssp/challenge_message.go | 82 - .../Azure/go-ntlmssp/messageheader.go | 21 - .../Azure/go-ntlmssp/negotiate_flags.go | 44 - .../Azure/go-ntlmssp/negotiate_message.go | 31 - .../github.com/Azure/go-ntlmssp/negotiator.go | 124 - vendor/github.com/Azure/go-ntlmssp/nlmp.go | 51 - vendor/github.com/Azure/go-ntlmssp/unicode.go | 29 - .../github.com/Azure/go-ntlmssp/varfield.go | 40 - vendor/github.com/armon/circbuf/LICENSE | 20 - vendor/github.com/armon/circbuf/README.md | 28 - vendor/github.com/armon/circbuf/circbuf.go | 92 - .../aws/aws-sdk-go/service/appsync/api.go | 5417 ----- .../aws/aws-sdk-go/service/appsync/doc.go | 29 - .../aws/aws-sdk-go/service/appsync/errors.go | 63 - .../aws/aws-sdk-go/service/appsync/service.go | 97 - .../aws/aws-sdk-go/service/budgets/api.go | 3169 --- .../aws/aws-sdk-go/service/budgets/doc.go | 26 - .../aws/aws-sdk-go/service/budgets/errors.go | 53 - .../aws/aws-sdk-go/service/budgets/service.go | 95 - .../aws/aws-sdk-go/service/dax/api.go | 4677 ---- .../aws/aws-sdk-go/service/dax/doc.go | 33 - .../aws/aws-sdk-go/service/dax/errors.go | 160 - .../aws/aws-sdk-go/service/dax/service.go | 95 - .../aws/aws-sdk-go/service/gamelift/api.go | 19092 ---------------- .../aws/aws-sdk-go/service/gamelift/doc.go | 304 - .../aws/aws-sdk-go/service/gamelift/errors.go | 102 - .../aws-sdk-go/service/gamelift/service.go | 95 - .../aws/aws-sdk-go/service/glue/api.go | 18679 --------------- .../aws/aws-sdk-go/service/glue/doc.go | 28 - .../aws/aws-sdk-go/service/glue/errors.go | 120 - .../aws/aws-sdk-go/service/glue/service.go | 95 - .../service/lexmodelbuildingservice/api.go | 10328 --------- .../service/lexmodelbuildingservice/doc.go | 30 - .../service/lexmodelbuildingservice/errors.go | 61 - .../lexmodelbuildingservice/service.go | 97 - .../aws-sdk-go/service/mediaconvert/api.go | 14536 ------------ .../aws-sdk-go/service/mediaconvert/doc.go | 28 - .../aws-sdk-go/service/mediaconvert/errors.go | 30 - .../service/mediaconvert/service.go | 97 - .../aws/aws-sdk-go/service/medialive/api.go | 10820 --------- .../aws/aws-sdk-go/service/medialive/doc.go | 28 - .../aws-sdk-go/service/medialive/errors.go | 42 - .../aws-sdk-go/service/medialive/service.go | 97 - .../aws-sdk-go/service/mediapackage/api.go | 3242 --- .../aws-sdk-go/service/mediapackage/doc.go | 28 - .../aws-sdk-go/service/mediapackage/errors.go | 30 - .../service/mediapackage/service.go | 97 - .../aws-sdk-go/service/mediastoredata/api.go | 1103 - .../aws-sdk-go/service/mediastoredata/doc.go | 30 - .../service/mediastoredata/errors.go | 30 - .../service/mediastoredata/service.go | 96 - .../aws/aws-sdk-go/service/sagemaker/api.go | 7569 ------ .../aws/aws-sdk-go/service/sagemaker/doc.go | 28 - .../aws-sdk-go/service/sagemaker/errors.go | 25 - .../aws-sdk-go/service/sagemaker/service.go | 98 - .../aws-sdk-go/service/sagemaker/waiters.go | 331 - .../aws/aws-sdk-go/service/shield/api.go | 2171 -- .../aws/aws-sdk-go/service/shield/doc.go | 32 - .../aws/aws-sdk-go/service/shield/errors.go | 69 - .../aws/aws-sdk-go/service/shield/service.go | 95 - .../aws/aws-sdk-go/service/swf/api.go | 15446 ------------- .../aws/aws-sdk-go/service/swf/doc.go | 38 - .../aws/aws-sdk-go/service/swf/errors.go | 80 - .../aws/aws-sdk-go/service/swf/service.go | 95 - .../aws/aws-sdk-go/service/workspaces/api.go | 3686 --- .../aws/aws-sdk-go/service/workspaces/doc.go | 29 - .../aws-sdk-go/service/workspaces/errors.go | 56 - .../aws-sdk-go/service/workspaces/service.go | 95 - .../github.com/chzyer/readline/CHANGELOG.md | 58 - vendor/github.com/chzyer/readline/LICENSE | 22 - vendor/github.com/chzyer/readline/README.md | 114 - .../chzyer/readline/ansi_windows.go | 246 - vendor/github.com/chzyer/readline/complete.go | 283 - .../chzyer/readline/complete_helper.go | 165 - .../chzyer/readline/complete_segment.go | 82 - vendor/github.com/chzyer/readline/history.go | 312 - .../github.com/chzyer/readline/operation.go | 504 - vendor/github.com/chzyer/readline/password.go | 32 - .../chzyer/readline/rawreader_windows.go | 125 - vendor/github.com/chzyer/readline/readline.go | 288 - vendor/github.com/chzyer/readline/remote.go | 474 - vendor/github.com/chzyer/readline/runebuf.go | 572 - vendor/github.com/chzyer/readline/runes.go | 223 - vendor/github.com/chzyer/readline/search.go | 164 - vendor/github.com/chzyer/readline/std.go | 133 - .../github.com/chzyer/readline/std_windows.go | 9 - vendor/github.com/chzyer/readline/term.go | 123 - vendor/github.com/chzyer/readline/term_bsd.go | 29 - .../github.com/chzyer/readline/term_linux.go | 33 - .../chzyer/readline/term_solaris.go | 32 - .../github.com/chzyer/readline/term_unix.go | 24 - .../chzyer/readline/term_windows.go | 171 - vendor/github.com/chzyer/readline/terminal.go | 232 - vendor/github.com/chzyer/readline/utils.go | 276 - .../github.com/chzyer/readline/utils_unix.go | 83 - .../chzyer/readline/utils_windows.go | 41 - vendor/github.com/chzyer/readline/vim.go | 174 - .../github.com/chzyer/readline/windows_api.go | 152 - vendor/github.com/dylanmei/iso8601/LICENSE | 21 - vendor/github.com/dylanmei/iso8601/README.md | 9 - .../github.com/dylanmei/iso8601/duration.go | 96 - vendor/github.com/dylanmei/winrmtest/LICENSE | 22 - .../github.com/dylanmei/winrmtest/README.md | 48 - .../github.com/dylanmei/winrmtest/remote.go | 79 - vendor/github.com/dylanmei/winrmtest/wsman.go | 170 - vendor/github.com/hashicorp/atlas-go/LICENSE | 353 - .../hashicorp/atlas-go/archive/archive.go | 528 - .../hashicorp/atlas-go/archive/vcs.go | 365 - .../hashicorp/atlas-go/v1/application.go | 164 - .../hashicorp/atlas-go/v1/artifact.go | 248 - .../hashicorp/atlas-go/v1/authentication.go | 88 - .../hashicorp/atlas-go/v1/build_config.go | 193 - .../hashicorp/atlas-go/v1/client.go | 339 - .../hashicorp/atlas-go/v1/terraform.go | 106 - .../github.com/hashicorp/atlas-go/v1/util.go | 22 - .../hashicorp/go-checkpoint/LICENSE | 354 - .../hashicorp/go-checkpoint/README.md | 22 - .../hashicorp/go-checkpoint/checkpoint.go | 464 - .../github.com/hashicorp/go-rootcerts/LICENSE | 363 - .../hashicorp/go-rootcerts/Makefile | 8 - .../hashicorp/go-rootcerts/README.md | 43 - .../github.com/hashicorp/go-rootcerts/doc.go | 9 - .../hashicorp/go-rootcerts/rootcerts.go | 103 - .../hashicorp/go-rootcerts/rootcerts_base.go | 12 - .../go-rootcerts/rootcerts_darwin.go | 48 - .../hashicorp/hcl/hcl/fmtcmd/fmtcmd.go | 162 - .../hashicorp/hcl/hcl/printer/nodes.go | 779 - .../hashicorp/hcl/hcl/printer/printer.go | 66 - .../hashicorp/terraform/BUILDING.md | 56 - .../hashicorp/terraform/CHANGELOG.md | 4599 ---- .../github.com/hashicorp/terraform/Dockerfile | 24 - vendor/github.com/hashicorp/terraform/LICENSE | 354 - .../github.com/hashicorp/terraform/Makefile | 105 - .../github.com/hashicorp/terraform/README.md | 170 - .../hashicorp/terraform/Vagrantfile | 92 - .../providers/terraform/data_source_state.go | 132 - .../builtin/providers/terraform/flatten.go | 76 - .../builtin/providers/terraform/provider.go | 21 - .../provisioners/chef/linux_provisioner.go | 115 - .../provisioners/chef/resource_provisioner.go | 819 - .../provisioners/chef/windows_provisioner.go | 84 - .../provisioners/file/resource_provisioner.go | 164 - .../local-exec/resource_provisioner.go | 130 - .../remote-exec/resource_provisioner.go | 322 - .../salt-masterless/resource_provisioner.go | 533 - .../hashicorp/terraform/checkpoint.go | 81 - .../terraform/command/clistate/state.go | 94 - .../terraform/command/format/diagnostic.go | 65 - .../terraform/command/format/format.go | 8 - .../terraform/command/format/plan.go | 349 - .../terraform/command/format/state.go | 265 - .../hashicorp/terraform/commands.go | 399 - .../terraform/communicator/communicator.go | 53 - .../communicator/communicator_mock.go | 91 - .../terraform/communicator/remote/command.go | 67 - .../terraform/communicator/shared/shared.go | 17 - .../communicator/ssh/communicator.go | 706 - .../terraform/communicator/ssh/password.go | 28 - .../terraform/communicator/ssh/provisioner.go | 274 - .../communicator/winrm/communicator.go | 218 - .../communicator/winrm/provisioner.go | 123 - .../github.com/hashicorp/terraform/config.go | 312 - .../terraform/config/interpolate_funcs.go | 2 +- .../terraform/config/module/inode.go | 2 +- .../terraform/config/module/storage.go | 10 +- .../hashicorp/terraform/config_unix.go | 58 - .../hashicorp/terraform/config_windows.go | 46 - vendor/github.com/hashicorp/terraform/help.go | 90 - .../terraform/helper/experiment/experiment.go | 154 - .../terraform/helper/experiment/id.go | 34 - .../terraform/helper/schema/resource.go | 1 - .../terraform/helper/schema/schema.go | 1 - .../helper/slowmessage/slowmessage.go | 34 - .../terraform/helper/variables/flag.go | 38 - .../terraform/helper/variables/flag_any.go | 25 - .../terraform/helper/variables/flag_file.go | 65 - .../terraform/helper/variables/merge.go | 66 - .../terraform/helper/variables/parse.go | 118 - .../terraform/helper/variables/variables.go | 3 - .../helper/wrappedreadline/wrappedreadline.go | 77 - .../wrappedreadline/wrappedreadline_unix.go | 46 - .../wrappedreadline_windows.go | 8 - .../helper/wrappedstreams/streams.go | 52 - .../helper/wrappedstreams/streams_other.go | 14 - .../helper/wrappedstreams/streams_windows.go | 52 - vendor/github.com/hashicorp/terraform/main.go | 354 - .../github.com/hashicorp/terraform/panic.go | 67 - .../github.com/hashicorp/terraform/plugins.go | 29 - .../hashicorp/terraform/repl/format.go | 92 - .../hashicorp/terraform/repl/repl.go | 4 - .../hashicorp/terraform/repl/session.go | 95 - .../hashicorp/terraform/signal_unix.go | 11 - .../hashicorp/terraform/signal_windows.go | 10 - .../hashicorp/terraform/state/backup.go | 85 - .../hashicorp/terraform/state/inmem.go | 108 - .../hashicorp/terraform/state/local.go | 301 - .../terraform/state/local_lock_unix.go | 34 - .../terraform/state/local_lock_windows.go | 108 - .../hashicorp/terraform/state/lock.go | 38 - .../hashicorp/terraform/state/state.go | 242 - .../hashicorp/terraform/state/testing.go | 159 - .../terraform/synchronized_writers.go | 31 - .../hashicorp/terraform/terraform/context.go | 9 +- .../terraform/eval_check_prevent_refresh.go | 49 - .../terraform/terraform/eval_validate.go | 1 + .../terraform/terraform/interpolate.go | 3 - .../terraform/module_dependencies.go | 1 - .../terraform/resource_provider_mock.go | 4 +- .../hashicorp/terraform/terraform/state.go | 21 + .../terraform/terraform/transform_provider.go | 8 +- .../github.com/hashicorp/terraform/version.go | 12 - .../hashicorp/terraform/version/version.go | 2 +- vendor/github.com/kardianos/osext/LICENSE | 27 - vendor/github.com/kardianos/osext/README.md | 21 - vendor/github.com/kardianos/osext/osext.go | 33 - .../github.com/kardianos/osext/osext_go18.go | 9 - .../github.com/kardianos/osext/osext_plan9.go | 22 - .../kardianos/osext/osext_procfs.go | 36 - .../kardianos/osext/osext_sysctl.go | 126 - .../kardianos/osext/osext_windows.go | 36 - .../masterzen/azure-sdk-for-go/LICENSE | 202 - .../azure-sdk-for-go/core/http/chunked.go | 203 - .../azure-sdk-for-go/core/http/client.go | 487 - .../azure-sdk-for-go/core/http/cookie.go | 363 - .../azure-sdk-for-go/core/http/doc.go | 80 - .../core/http/filetransport.go | 123 - .../azure-sdk-for-go/core/http/fs.go | 549 - .../azure-sdk-for-go/core/http/header.go | 211 - .../azure-sdk-for-go/core/http/jar.go | 27 - .../azure-sdk-for-go/core/http/lex.go | 96 - .../azure-sdk-for-go/core/http/race.go | 11 - .../azure-sdk-for-go/core/http/request.go | 875 - .../azure-sdk-for-go/core/http/response.go | 291 - .../azure-sdk-for-go/core/http/server.go | 2052 -- .../azure-sdk-for-go/core/http/sniff.go | 214 - .../azure-sdk-for-go/core/http/status.go | 120 - .../azure-sdk-for-go/core/http/transfer.go | 730 - .../azure-sdk-for-go/core/http/transport.go | 1208 - .../azure-sdk-for-go/core/tls/alert.go | 77 - .../core/tls/cipher_suites.go | 270 - .../azure-sdk-for-go/core/tls/common.go | 438 - .../azure-sdk-for-go/core/tls/conn.go | 1026 - .../core/tls/handshake_client.go | 411 - .../core/tls/handshake_messages.go | 1304 -- .../core/tls/handshake_server.go | 638 - .../core/tls/key_agreement.go | 400 - .../azure-sdk-for-go/core/tls/prf.go | 291 - .../azure-sdk-for-go/core/tls/ticket.go | 182 - .../azure-sdk-for-go/core/tls/tls.go | 225 - vendor/github.com/masterzen/simplexml/LICENSE | 202 - .../masterzen/simplexml/dom/document.go | 35 - .../masterzen/simplexml/dom/element.go | 200 - .../masterzen/simplexml/dom/namespace.go | 10 - vendor/github.com/masterzen/winrm/LICENSE | 202 - vendor/github.com/masterzen/winrm/Makefile | 34 - vendor/github.com/masterzen/winrm/README.md | 223 - vendor/github.com/masterzen/winrm/auth.go | 106 - vendor/github.com/masterzen/winrm/client.go | 187 - vendor/github.com/masterzen/winrm/command.go | 239 - vendor/github.com/masterzen/winrm/endpoint.go | 63 - vendor/github.com/masterzen/winrm/error.go | 13 - vendor/github.com/masterzen/winrm/http.go | 101 - vendor/github.com/masterzen/winrm/ntlm.go | 23 - .../github.com/masterzen/winrm/parameters.go | 24 - .../github.com/masterzen/winrm/powershell.go | 23 - vendor/github.com/masterzen/winrm/request.go | 155 - vendor/github.com/masterzen/winrm/response.go | 125 - vendor/github.com/masterzen/winrm/shell.go | 36 - .../github.com/masterzen/winrm/soap/header.go | 181 - .../masterzen/winrm/soap/message.go | 73 - .../masterzen/winrm/soap/namespaces.go | 37 - vendor/github.com/masterzen/xmlpath/LICENSE | 185 - vendor/github.com/masterzen/xmlpath/doc.go | 95 - vendor/github.com/masterzen/xmlpath/parser.go | 233 - vendor/github.com/masterzen/xmlpath/path.go | 642 - vendor/github.com/mattn/go-colorable/LICENSE | 21 - .../github.com/mattn/go-colorable/README.md | 48 - .../mattn/go-colorable/colorable_others.go | 30 - .../mattn/go-colorable/colorable_windows.go | 914 - .../mattn/go-colorable/noncolorable.go | 55 - vendor/github.com/mattn/go-shellwords/LICENSE | 21 - .../github.com/mattn/go-shellwords/README.md | 47 - .../mattn/go-shellwords/shellwords.go | 145 - .../mattn/go-shellwords/util_posix.go | 22 - .../mattn/go-shellwords/util_windows.go | 20 - .../github.com/mitchellh/colorstring/LICENSE | 21 - .../mitchellh/colorstring/README.md | 30 - .../mitchellh/colorstring/colorstring.go | 244 - .../mitchellh/go-linereader/LICENSE.md | 21 - .../mitchellh/go-linereader/README.md | 30 - .../mitchellh/go-linereader/linereader.go | 118 - vendor/github.com/mitchellh/panicwrap/LICENSE | 21 - .../github.com/mitchellh/panicwrap/README.md | 108 - .../mitchellh/panicwrap/panicwrap.go | 359 - .../github.com/mitchellh/prefixedio/LICENSE | 19 - .../github.com/mitchellh/prefixedio/README.md | 30 - .../github.com/mitchellh/prefixedio/reader.go | 220 - vendor/github.com/nu7hatch/gouuid/COPYING | 19 - vendor/github.com/nu7hatch/gouuid/README.md | 21 - vendor/github.com/nu7hatch/gouuid/uuid.go | 173 - .../packer-community/winrmcp/LICENSE | 22 - .../packer-community/winrmcp/winrmcp/cp.go | 224 - .../winrmcp/winrmcp/endpoint.go | 58 - .../packer-community/winrmcp/winrmcp/ls.go | 70 - .../packer-community/winrmcp/winrmcp/path.go | 18 - .../winrmcp/winrmcp/psobject.go | 15 - .../winrmcp/winrmcp/winrmcp.go | 128 - .../rancher/go-rancher/Dockerfile.dapper | 30 - vendor/github.com/rancher/go-rancher/LICENSE | 177 - vendor/github.com/rancher/go-rancher/Makefile | 23 - .../github.com/rancher/go-rancher/README.md | 55 - vendor/github.com/rancher/go-rancher/main.go | 10 - .../github.com/rancher/go-rancher/trash.conf | 6 - vendor/github.com/ryanuber/columnize/LICENSE | 20 - .../github.com/ryanuber/columnize/README.md | 71 - .../ryanuber/columnize/columnize.go | 191 - vendor/github.com/xanzy/ssh-agent/LICENSE | 202 - vendor/github.com/xanzy/ssh-agent/README.md | 23 - .../xanzy/ssh-agent/pageant_windows.go | 146 - vendor/github.com/xanzy/ssh-agent/sshagent.go | 49 - .../xanzy/ssh-agent/sshagent_windows.go | 80 - vendor/github.com/xlab/treeprint/LICENSE | 20 - vendor/github.com/xlab/treeprint/README.md | 126 - vendor/github.com/xlab/treeprint/helpers.go | 47 - vendor/github.com/xlab/treeprint/struct.go | 340 - vendor/github.com/xlab/treeprint/treeprint.go | 184 - vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 11 +- vendor/golang.org/x/crypto/blowfish/cipher.go | 2 +- vendor/golang.org/x/crypto/blowfish/const.go | 2 +- .../x/crypto/curve25519/const_amd64.h | 2 +- .../x/crypto/curve25519/const_amd64.s | 2 +- .../x/crypto/curve25519/cswap_amd64.s | 131 +- .../x/crypto/curve25519/curve25519.go | 25 +- vendor/golang.org/x/crypto/curve25519/doc.go | 2 +- .../x/crypto/curve25519/freeze_amd64.s | 2 +- .../x/crypto/curve25519/ladderstep_amd64.s | 2 +- .../x/crypto/curve25519/mul_amd64.s | 2 +- .../x/crypto/curve25519/square_amd64.s | 2 +- vendor/golang.org/x/crypto/ed25519/ed25519.go | 8 +- vendor/golang.org/x/crypto/md4/md4.go | 118 - vendor/golang.org/x/crypto/md4/md4block.go | 89 - vendor/golang.org/x/crypto/openpgp/keys.go | 3 +- .../golang.org/x/crypto/ssh/agent/client.go | 683 - .../golang.org/x/crypto/ssh/agent/forward.go | 103 - .../golang.org/x/crypto/ssh/agent/keyring.go | 215 - .../golang.org/x/crypto/ssh/agent/server.go | 523 - vendor/golang.org/x/crypto/ssh/buffer.go | 5 +- vendor/golang.org/x/crypto/ssh/certs.go | 40 +- vendor/golang.org/x/crypto/ssh/channel.go | 142 +- vendor/golang.org/x/crypto/ssh/cipher.go | 14 +- vendor/golang.org/x/crypto/ssh/client.go | 77 +- vendor/golang.org/x/crypto/ssh/client_auth.go | 77 +- vendor/golang.org/x/crypto/ssh/common.go | 18 +- vendor/golang.org/x/crypto/ssh/connection.go | 2 +- vendor/golang.org/x/crypto/ssh/doc.go | 3 + vendor/golang.org/x/crypto/ssh/handshake.go | 59 +- vendor/golang.org/x/crypto/ssh/kex.go | 32 +- vendor/golang.org/x/crypto/ssh/keys.go | 226 +- vendor/golang.org/x/crypto/ssh/messages.go | 38 +- vendor/golang.org/x/crypto/ssh/mux.go | 6 +- vendor/golang.org/x/crypto/ssh/server.go | 125 +- vendor/golang.org/x/crypto/ssh/session.go | 22 +- vendor/golang.org/x/crypto/ssh/streamlocal.go | 115 + vendor/golang.org/x/crypto/ssh/tcpip.go | 198 +- vendor/golang.org/x/crypto/ssh/transport.go | 16 +- vendor/vendor.json | 756 +- 371 files changed, 1177 insertions(+), 172970 deletions(-) delete mode 100644 vendor/github.com/Azure/go-ntlmssp/LICENSE delete mode 100644 vendor/github.com/Azure/go-ntlmssp/README.md delete mode 100644 vendor/github.com/Azure/go-ntlmssp/authenticate_message.go delete mode 100644 vendor/github.com/Azure/go-ntlmssp/authheader.go delete mode 100644 vendor/github.com/Azure/go-ntlmssp/avids.go delete mode 100644 vendor/github.com/Azure/go-ntlmssp/challenge_message.go delete mode 100644 vendor/github.com/Azure/go-ntlmssp/messageheader.go delete mode 100644 vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go delete mode 100644 vendor/github.com/Azure/go-ntlmssp/negotiate_message.go delete mode 100644 vendor/github.com/Azure/go-ntlmssp/negotiator.go delete mode 100644 vendor/github.com/Azure/go-ntlmssp/nlmp.go delete mode 100644 vendor/github.com/Azure/go-ntlmssp/unicode.go delete mode 100644 vendor/github.com/Azure/go-ntlmssp/varfield.go delete mode 100755 vendor/github.com/armon/circbuf/LICENSE delete mode 100644 vendor/github.com/armon/circbuf/README.md delete mode 100644 vendor/github.com/armon/circbuf/circbuf.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/appsync/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/appsync/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/appsync/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/appsync/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/budgets/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/budgets/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/budgets/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/dax/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/dax/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/dax/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/dax/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/gamelift/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/glue/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/glue/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/glue/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/glue/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/mediaconvert/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/mediaconvert/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/mediaconvert/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/medialive/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/medialive/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/medialive/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/medialive/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/mediapackage/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/mediapackage/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/mediapackage/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/mediastoredata/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/mediastoredata/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sagemaker/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sagemaker/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sagemaker/waiters.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/shield/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/shield/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/shield/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/shield/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/swf/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/swf/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/swf/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/swf/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/workspaces/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/workspaces/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go delete mode 100644 vendor/github.com/chzyer/readline/CHANGELOG.md delete mode 100644 vendor/github.com/chzyer/readline/LICENSE delete mode 100644 vendor/github.com/chzyer/readline/README.md delete mode 100644 vendor/github.com/chzyer/readline/ansi_windows.go delete mode 100644 vendor/github.com/chzyer/readline/complete.go delete mode 100644 vendor/github.com/chzyer/readline/complete_helper.go delete mode 100644 vendor/github.com/chzyer/readline/complete_segment.go delete mode 100644 vendor/github.com/chzyer/readline/history.go delete mode 100644 vendor/github.com/chzyer/readline/operation.go delete mode 100644 vendor/github.com/chzyer/readline/password.go delete mode 100644 vendor/github.com/chzyer/readline/rawreader_windows.go delete mode 100644 vendor/github.com/chzyer/readline/readline.go delete mode 100644 vendor/github.com/chzyer/readline/remote.go delete mode 100644 vendor/github.com/chzyer/readline/runebuf.go delete mode 100644 vendor/github.com/chzyer/readline/runes.go delete mode 100644 vendor/github.com/chzyer/readline/search.go delete mode 100644 vendor/github.com/chzyer/readline/std.go delete mode 100644 vendor/github.com/chzyer/readline/std_windows.go delete mode 100644 vendor/github.com/chzyer/readline/term.go delete mode 100644 vendor/github.com/chzyer/readline/term_bsd.go delete mode 100644 vendor/github.com/chzyer/readline/term_linux.go delete mode 100644 vendor/github.com/chzyer/readline/term_solaris.go delete mode 100644 vendor/github.com/chzyer/readline/term_unix.go delete mode 100644 vendor/github.com/chzyer/readline/term_windows.go delete mode 100644 vendor/github.com/chzyer/readline/terminal.go delete mode 100644 vendor/github.com/chzyer/readline/utils.go delete mode 100644 vendor/github.com/chzyer/readline/utils_unix.go delete mode 100644 vendor/github.com/chzyer/readline/utils_windows.go delete mode 100644 vendor/github.com/chzyer/readline/vim.go delete mode 100644 vendor/github.com/chzyer/readline/windows_api.go delete mode 100644 vendor/github.com/dylanmei/iso8601/LICENSE delete mode 100644 vendor/github.com/dylanmei/iso8601/README.md delete mode 100644 vendor/github.com/dylanmei/iso8601/duration.go delete mode 100644 vendor/github.com/dylanmei/winrmtest/LICENSE delete mode 100644 vendor/github.com/dylanmei/winrmtest/README.md delete mode 100644 vendor/github.com/dylanmei/winrmtest/remote.go delete mode 100644 vendor/github.com/dylanmei/winrmtest/wsman.go delete mode 100644 vendor/github.com/hashicorp/atlas-go/LICENSE delete mode 100644 vendor/github.com/hashicorp/atlas-go/archive/archive.go delete mode 100644 vendor/github.com/hashicorp/atlas-go/archive/vcs.go delete mode 100644 vendor/github.com/hashicorp/atlas-go/v1/application.go delete mode 100644 vendor/github.com/hashicorp/atlas-go/v1/artifact.go delete mode 100644 vendor/github.com/hashicorp/atlas-go/v1/authentication.go delete mode 100644 vendor/github.com/hashicorp/atlas-go/v1/build_config.go delete mode 100644 vendor/github.com/hashicorp/atlas-go/v1/client.go delete mode 100644 vendor/github.com/hashicorp/atlas-go/v1/terraform.go delete mode 100644 vendor/github.com/hashicorp/atlas-go/v1/util.go delete mode 100644 vendor/github.com/hashicorp/go-checkpoint/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-checkpoint/README.md delete mode 100644 vendor/github.com/hashicorp/go-checkpoint/checkpoint.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/Makefile delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/README.md delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/doc.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/printer.go delete mode 100644 vendor/github.com/hashicorp/terraform/BUILDING.md delete mode 100644 vendor/github.com/hashicorp/terraform/CHANGELOG.md delete mode 100644 vendor/github.com/hashicorp/terraform/Dockerfile delete mode 100644 vendor/github.com/hashicorp/terraform/LICENSE delete mode 100644 vendor/github.com/hashicorp/terraform/Makefile delete mode 100644 vendor/github.com/hashicorp/terraform/README.md delete mode 100644 vendor/github.com/hashicorp/terraform/Vagrantfile delete mode 100644 vendor/github.com/hashicorp/terraform/builtin/providers/terraform/data_source_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/builtin/providers/terraform/flatten.go delete mode 100644 vendor/github.com/hashicorp/terraform/builtin/providers/terraform/provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/linux_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/resource_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/windows_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/builtin/provisioners/file/resource_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/builtin/provisioners/local-exec/resource_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/builtin/provisioners/remote-exec/resource_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/builtin/provisioners/salt-masterless/resource_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/checkpoint.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/clistate/state.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/format/diagnostic.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/format/format.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/format/plan.go delete mode 100644 vendor/github.com/hashicorp/terraform/command/format/state.go delete mode 100644 vendor/github.com/hashicorp/terraform/commands.go delete mode 100644 vendor/github.com/hashicorp/terraform/communicator/communicator.go delete mode 100644 vendor/github.com/hashicorp/terraform/communicator/communicator_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/communicator/remote/command.go delete mode 100644 vendor/github.com/hashicorp/terraform/communicator/shared/shared.go delete mode 100644 vendor/github.com/hashicorp/terraform/communicator/ssh/communicator.go delete mode 100644 vendor/github.com/hashicorp/terraform/communicator/ssh/password.go delete mode 100644 vendor/github.com/hashicorp/terraform/communicator/ssh/provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/communicator/winrm/communicator.go delete mode 100644 vendor/github.com/hashicorp/terraform/communicator/winrm/provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/config.go delete mode 100644 vendor/github.com/hashicorp/terraform/config_unix.go delete mode 100644 vendor/github.com/hashicorp/terraform/config_windows.go delete mode 100644 vendor/github.com/hashicorp/terraform/help.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/experiment/id.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/slowmessage/slowmessage.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/variables/flag.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/variables/flag_any.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/variables/flag_file.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/variables/merge.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/variables/parse.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/variables/variables.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/wrappedreadline/wrappedreadline.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/wrappedreadline/wrappedreadline_unix.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/wrappedreadline/wrappedreadline_windows.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/wrappedstreams/streams.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/wrappedstreams/streams_other.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/wrappedstreams/streams_windows.go delete mode 100644 vendor/github.com/hashicorp/terraform/main.go delete mode 100644 vendor/github.com/hashicorp/terraform/panic.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugins.go delete mode 100644 vendor/github.com/hashicorp/terraform/repl/format.go delete mode 100644 vendor/github.com/hashicorp/terraform/repl/repl.go delete mode 100644 vendor/github.com/hashicorp/terraform/repl/session.go delete mode 100644 vendor/github.com/hashicorp/terraform/signal_unix.go delete mode 100644 vendor/github.com/hashicorp/terraform/signal_windows.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/backup.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/inmem.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/local.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/local_lock_unix.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/local_lock_windows.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/lock.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/state.go delete mode 100644 vendor/github.com/hashicorp/terraform/state/testing.go delete mode 100644 vendor/github.com/hashicorp/terraform/synchronized_writers.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform/version.go delete mode 100644 vendor/github.com/kardianos/osext/LICENSE delete mode 100644 vendor/github.com/kardianos/osext/README.md delete mode 100644 vendor/github.com/kardianos/osext/osext.go delete mode 100644 vendor/github.com/kardianos/osext/osext_go18.go delete mode 100644 vendor/github.com/kardianos/osext/osext_plan9.go delete mode 100644 vendor/github.com/kardianos/osext/osext_procfs.go delete mode 100644 vendor/github.com/kardianos/osext/osext_sysctl.go delete mode 100644 vendor/github.com/kardianos/osext/osext_windows.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/LICENSE delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/chunked.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/client.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/cookie.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/doc.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/filetransport.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/fs.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/header.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/jar.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/lex.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/race.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/request.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/response.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/server.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/sniff.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/status.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/transfer.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/http/transport.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/tls/alert.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/tls/cipher_suites.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/tls/common.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/tls/conn.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/tls/handshake_client.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/tls/handshake_messages.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/tls/handshake_server.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/tls/key_agreement.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/tls/prf.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/tls/ticket.go delete mode 100644 vendor/github.com/masterzen/azure-sdk-for-go/core/tls/tls.go delete mode 100644 vendor/github.com/masterzen/simplexml/LICENSE delete mode 100644 vendor/github.com/masterzen/simplexml/dom/document.go delete mode 100644 vendor/github.com/masterzen/simplexml/dom/element.go delete mode 100644 vendor/github.com/masterzen/simplexml/dom/namespace.go delete mode 100644 vendor/github.com/masterzen/winrm/LICENSE delete mode 100644 vendor/github.com/masterzen/winrm/Makefile delete mode 100644 vendor/github.com/masterzen/winrm/README.md delete mode 100644 vendor/github.com/masterzen/winrm/auth.go delete mode 100644 vendor/github.com/masterzen/winrm/client.go delete mode 100644 vendor/github.com/masterzen/winrm/command.go delete mode 100644 vendor/github.com/masterzen/winrm/endpoint.go delete mode 100644 vendor/github.com/masterzen/winrm/error.go delete mode 100644 vendor/github.com/masterzen/winrm/http.go delete mode 100644 vendor/github.com/masterzen/winrm/ntlm.go delete mode 100644 vendor/github.com/masterzen/winrm/parameters.go delete mode 100644 vendor/github.com/masterzen/winrm/powershell.go delete mode 100644 vendor/github.com/masterzen/winrm/request.go delete mode 100644 vendor/github.com/masterzen/winrm/response.go delete mode 100644 vendor/github.com/masterzen/winrm/shell.go delete mode 100644 vendor/github.com/masterzen/winrm/soap/header.go delete mode 100644 vendor/github.com/masterzen/winrm/soap/message.go delete mode 100644 vendor/github.com/masterzen/winrm/soap/namespaces.go delete mode 100644 vendor/github.com/masterzen/xmlpath/LICENSE delete mode 100644 vendor/github.com/masterzen/xmlpath/doc.go delete mode 100644 vendor/github.com/masterzen/xmlpath/parser.go delete mode 100644 vendor/github.com/masterzen/xmlpath/path.go delete mode 100644 vendor/github.com/mattn/go-colorable/LICENSE delete mode 100644 vendor/github.com/mattn/go-colorable/README.md delete mode 100644 vendor/github.com/mattn/go-colorable/colorable_others.go delete mode 100644 vendor/github.com/mattn/go-colorable/colorable_windows.go delete mode 100644 vendor/github.com/mattn/go-colorable/noncolorable.go delete mode 100644 vendor/github.com/mattn/go-shellwords/LICENSE delete mode 100644 vendor/github.com/mattn/go-shellwords/README.md delete mode 100644 vendor/github.com/mattn/go-shellwords/shellwords.go delete mode 100644 vendor/github.com/mattn/go-shellwords/util_posix.go delete mode 100644 vendor/github.com/mattn/go-shellwords/util_windows.go delete mode 100644 vendor/github.com/mitchellh/colorstring/LICENSE delete mode 100644 vendor/github.com/mitchellh/colorstring/README.md delete mode 100644 vendor/github.com/mitchellh/colorstring/colorstring.go delete mode 100644 vendor/github.com/mitchellh/go-linereader/LICENSE.md delete mode 100644 vendor/github.com/mitchellh/go-linereader/README.md delete mode 100644 vendor/github.com/mitchellh/go-linereader/linereader.go delete mode 100644 vendor/github.com/mitchellh/panicwrap/LICENSE delete mode 100644 vendor/github.com/mitchellh/panicwrap/README.md delete mode 100644 vendor/github.com/mitchellh/panicwrap/panicwrap.go delete mode 100644 vendor/github.com/mitchellh/prefixedio/LICENSE delete mode 100644 vendor/github.com/mitchellh/prefixedio/README.md delete mode 100644 vendor/github.com/mitchellh/prefixedio/reader.go delete mode 100644 vendor/github.com/nu7hatch/gouuid/COPYING delete mode 100644 vendor/github.com/nu7hatch/gouuid/README.md delete mode 100644 vendor/github.com/nu7hatch/gouuid/uuid.go delete mode 100644 vendor/github.com/packer-community/winrmcp/LICENSE delete mode 100644 vendor/github.com/packer-community/winrmcp/winrmcp/cp.go delete mode 100644 vendor/github.com/packer-community/winrmcp/winrmcp/endpoint.go delete mode 100644 vendor/github.com/packer-community/winrmcp/winrmcp/ls.go delete mode 100644 vendor/github.com/packer-community/winrmcp/winrmcp/path.go delete mode 100644 vendor/github.com/packer-community/winrmcp/winrmcp/psobject.go delete mode 100644 vendor/github.com/packer-community/winrmcp/winrmcp/winrmcp.go delete mode 100644 vendor/github.com/rancher/go-rancher/Dockerfile.dapper delete mode 100644 vendor/github.com/rancher/go-rancher/LICENSE delete mode 100644 vendor/github.com/rancher/go-rancher/Makefile delete mode 100644 vendor/github.com/rancher/go-rancher/README.md delete mode 100644 vendor/github.com/rancher/go-rancher/main.go delete mode 100644 vendor/github.com/rancher/go-rancher/trash.conf delete mode 100644 vendor/github.com/ryanuber/columnize/LICENSE delete mode 100644 vendor/github.com/ryanuber/columnize/README.md delete mode 100644 vendor/github.com/ryanuber/columnize/columnize.go delete mode 100644 vendor/github.com/xanzy/ssh-agent/LICENSE delete mode 100644 vendor/github.com/xanzy/ssh-agent/README.md delete mode 100644 vendor/github.com/xanzy/ssh-agent/pageant_windows.go delete mode 100644 vendor/github.com/xanzy/ssh-agent/sshagent.go delete mode 100644 vendor/github.com/xanzy/ssh-agent/sshagent_windows.go delete mode 100644 vendor/github.com/xlab/treeprint/LICENSE delete mode 100644 vendor/github.com/xlab/treeprint/README.md delete mode 100644 vendor/github.com/xlab/treeprint/helpers.go delete mode 100644 vendor/github.com/xlab/treeprint/struct.go delete mode 100644 vendor/github.com/xlab/treeprint/treeprint.go delete mode 100644 vendor/golang.org/x/crypto/md4/md4.go delete mode 100644 vendor/golang.org/x/crypto/md4/md4block.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/client.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/forward.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/keyring.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/server.go create mode 100644 vendor/golang.org/x/crypto/ssh/streamlocal.go diff --git a/vendor/github.com/Azure/go-ntlmssp/LICENSE b/vendor/github.com/Azure/go-ntlmssp/LICENSE deleted file mode 100644 index dc1cf39d135..00000000000 --- a/vendor/github.com/Azure/go-ntlmssp/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/Azure/go-ntlmssp/README.md b/vendor/github.com/Azure/go-ntlmssp/README.md deleted file mode 100644 index 55cdcefab70..00000000000 --- a/vendor/github.com/Azure/go-ntlmssp/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# go-ntlmssp -Golang package that provides NTLM/Negotiate authentication over HTTP - -[![GoDoc](https://godoc.org/github.com/Azure/go-ntlmssp?status.svg)](https://godoc.org/github.com/Azure/go-ntlmssp) [![Build Status](https://travis-ci.org/Azure/go-ntlmssp.svg?branch=dev)](https://travis-ci.org/Azure/go-ntlmssp) - -Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx -Implementation hints from http://davenport.sourceforge.net/ntlm.html - -This package only implements authentication, no key exchange or encryption. It -only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding. -This package implements NTLMv2. - -# Usage - -``` -url, user, password := "http://www.example.com/secrets", "robpike", "pw123" -client := &http.Client{ - Transport: ntlmssp.Negotiator{ - RoundTripper:&http.Transport{}, - }, -} - -req, _ := http.NewRequest("GET", url, nil) -req.SetBasicAuth(user, password) -res, _ := client.Do(req) -``` - ------ -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go b/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go deleted file mode 100644 index c6fbe444914..00000000000 --- a/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go +++ /dev/null @@ -1,128 +0,0 @@ -package ntlmssp - -import ( - "bytes" - "crypto/rand" - "encoding/binary" - "errors" - "time" -) - -type authenicateMessage struct { - LmChallengeResponse []byte - NtChallengeResponse []byte - - TargetName string - UserName string - - // only set if negotiateFlag_NTLMSSP_NEGOTIATE_KEY_EXCH - EncryptedRandomSessionKey []byte - - NegotiateFlags negotiateFlags - - MIC []byte -} - -type authenticateMessageFields struct { - messageHeader - LmChallengeResponse varField - NtChallengeResponse varField - TargetName varField - UserName varField - Workstation varField - _ [8]byte - NegotiateFlags negotiateFlags -} - -func (m authenicateMessage) MarshalBinary() ([]byte, error) { - if !m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE) { - return nil, errors.New("Only unicode is supported") - } - - target, user := toUnicode(m.TargetName), toUnicode(m.UserName) - workstation := toUnicode("go-ntlmssp") - - ptr := binary.Size(&authenticateMessageFields{}) - f := authenticateMessageFields{ - messageHeader: newMessageHeader(3), - NegotiateFlags: m.NegotiateFlags, - LmChallengeResponse: newVarField(&ptr, len(m.LmChallengeResponse)), - NtChallengeResponse: newVarField(&ptr, len(m.NtChallengeResponse)), - TargetName: newVarField(&ptr, len(target)), - UserName: newVarField(&ptr, len(user)), - Workstation: newVarField(&ptr, len(workstation)), - } - - f.NegotiateFlags.Unset(negotiateFlagNTLMSSPNEGOTIATEVERSION) - - b := bytes.Buffer{} - if err := binary.Write(&b, binary.LittleEndian, &f); err != nil { - return nil, err - } - if err := binary.Write(&b, binary.LittleEndian, &m.LmChallengeResponse); err != nil { - return nil, err - } - if err := binary.Write(&b, binary.LittleEndian, &m.NtChallengeResponse); err != nil { - return nil, err - } - if err := binary.Write(&b, binary.LittleEndian, &target); err != nil { - return nil, err - } - if err := binary.Write(&b, binary.LittleEndian, &user); err != nil { - return nil, err - } - if err := binary.Write(&b, binary.LittleEndian, &workstation); err != nil { - return nil, err - } - - return b.Bytes(), nil -} - -//ProcessChallenge crafts an AUTHENTICATE message in response to the CHALLENGE message -//that was received from the server -func ProcessChallenge(challengeMessageData []byte, user, password string) ([]byte, error) { - if user == "" && password == "" { - return nil, errors.New("Anonymous authentication not supported") - } - - var cm challengeMessage - if err := cm.UnmarshalBinary(challengeMessageData); err != nil { - return nil, err - } - - if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) { - return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)") - } - if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) { - return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)") - } - - am := authenicateMessage{ - UserName: user, - TargetName: cm.TargetName, - NegotiateFlags: cm.NegotiateFlags, - } - - timestamp := cm.TargetInfo[avIDMsvAvTimestamp] - if timestamp == nil { // no time sent, take current time - ft := uint64(time.Now().UnixNano()) / 100 - ft += 116444736000000000 // add time between unix & windows offset - timestamp = make([]byte, 8) - binary.LittleEndian.PutUint64(timestamp, ft) - } - - clientChallenge := make([]byte, 8) - rand.Reader.Read(clientChallenge) - - ntlmV2Hash := getNtlmV2Hash(password, user, cm.TargetName) - - am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash, - cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw) - - if cm.TargetInfoRaw == nil { - am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash, - cm.ServerChallenge[:], clientChallenge) - } - - return am.MarshalBinary() -} diff --git a/vendor/github.com/Azure/go-ntlmssp/authheader.go b/vendor/github.com/Azure/go-ntlmssp/authheader.go deleted file mode 100644 index aac3f77d100..00000000000 --- a/vendor/github.com/Azure/go-ntlmssp/authheader.go +++ /dev/null @@ -1,37 +0,0 @@ -package ntlmssp - -import ( - "encoding/base64" - "strings" -) - -type authheader string - -func (h authheader) IsBasic() bool { - return strings.HasPrefix(string(h), "Basic ") -} - -func (h authheader) IsNegotiate() bool { - return strings.HasPrefix(string(h), "Negotiate") -} - -func (h authheader) IsNTLM() bool { - return strings.HasPrefix(string(h), "NTLM") -} - -func (h authheader) GetData() ([]byte, error) { - p := strings.Split(string(h), " ") - if len(p) < 2 { - return nil, nil - } - return base64.StdEncoding.DecodeString(string(p[1])) -} - -func (h authheader) GetBasicCreds() (username, password string, err error) { - d, err := h.GetData() - if err != nil { - return "", "", err - } - parts := strings.SplitN(string(d), ":", 2) - return parts[0], parts[1], nil -} diff --git a/vendor/github.com/Azure/go-ntlmssp/avids.go b/vendor/github.com/Azure/go-ntlmssp/avids.go deleted file mode 100644 index 196b5f13163..00000000000 --- a/vendor/github.com/Azure/go-ntlmssp/avids.go +++ /dev/null @@ -1,17 +0,0 @@ -package ntlmssp - -type avID uint16 - -const ( - avIDMsvAvEOL avID = iota - avIDMsvAvNbComputerName - avIDMsvAvNbDomainName - avIDMsvAvDNSComputerName - avIDMsvAvDNSDomainName - avIDMsvAvDNSTreeName - avIDMsvAvFlags - avIDMsvAvTimestamp - avIDMsvAvSingleHost - avIDMsvAvTargetName - avIDMsvChannelBindings -) diff --git a/vendor/github.com/Azure/go-ntlmssp/challenge_message.go b/vendor/github.com/Azure/go-ntlmssp/challenge_message.go deleted file mode 100644 index 053b55e4adf..00000000000 --- a/vendor/github.com/Azure/go-ntlmssp/challenge_message.go +++ /dev/null @@ -1,82 +0,0 @@ -package ntlmssp - -import ( - "bytes" - "encoding/binary" - "fmt" -) - -type challengeMessageFields struct { - messageHeader - TargetName varField - NegotiateFlags negotiateFlags - ServerChallenge [8]byte - _ [8]byte - TargetInfo varField -} - -func (m challengeMessageFields) IsValid() bool { - return m.messageHeader.IsValid() && m.MessageType == 2 -} - -type challengeMessage struct { - challengeMessageFields - TargetName string - TargetInfo map[avID][]byte - TargetInfoRaw []byte -} - -func (m *challengeMessage) UnmarshalBinary(data []byte) error { - r := bytes.NewReader(data) - err := binary.Read(r, binary.LittleEndian, &m.challengeMessageFields) - if err != nil { - return err - } - if !m.challengeMessageFields.IsValid() { - return fmt.Errorf("Message is not a valid challenge message: %+v", m.challengeMessageFields.messageHeader) - } - - if m.challengeMessageFields.TargetName.Len > 0 { - m.TargetName, err = m.challengeMessageFields.TargetName.ReadStringFrom(data, m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE)) - if err != nil { - return err - } - } - - if m.challengeMessageFields.TargetInfo.Len > 0 { - d, err := m.challengeMessageFields.TargetInfo.ReadFrom(data) - m.TargetInfoRaw = d - if err != nil { - return err - } - m.TargetInfo = make(map[avID][]byte) - r := bytes.NewReader(d) - for { - var id avID - var l uint16 - err = binary.Read(r, binary.LittleEndian, &id) - if err != nil { - return err - } - if id == avIDMsvAvEOL { - break - } - - err = binary.Read(r, binary.LittleEndian, &l) - if err != nil { - return err - } - value := make([]byte, l) - n, err := r.Read(value) - if err != nil { - return err - } - if n != int(l) { - return fmt.Errorf("Expected to read %d bytes, got only %d", l, n) - } - m.TargetInfo[id] = value - } - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ntlmssp/messageheader.go b/vendor/github.com/Azure/go-ntlmssp/messageheader.go deleted file mode 100644 index 247e284652c..00000000000 --- a/vendor/github.com/Azure/go-ntlmssp/messageheader.go +++ /dev/null @@ -1,21 +0,0 @@ -package ntlmssp - -import ( - "bytes" -) - -var signature = [8]byte{'N', 'T', 'L', 'M', 'S', 'S', 'P', 0} - -type messageHeader struct { - Signature [8]byte - MessageType uint32 -} - -func (h messageHeader) IsValid() bool { - return bytes.Equal(h.Signature[:], signature[:]) && - h.MessageType > 0 && h.MessageType < 4 -} - -func newMessageHeader(messageType uint32) messageHeader { - return messageHeader{signature, messageType} -} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go b/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go deleted file mode 100644 index 6c3ce7b014e..00000000000 --- a/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go +++ /dev/null @@ -1,44 +0,0 @@ -package ntlmssp - -type negotiateFlags uint32 - -const ( - /*A*/ negotiateFlagNTLMSSPNEGOTIATEUNICODE negotiateFlags = 1 << 0 - /*B*/ negotiateFlagNTLMNEGOTIATEOEM = 1 << 1 - /*C*/ negotiateFlagNTLMSSPREQUESTTARGET = 1 << 2 - - /*D*/ negotiateFlagNTLMSSPNEGOTIATESIGN = 1 << 4 - /*E*/ negotiateFlagNTLMSSPNEGOTIATESEAL = 1 << 5 - /*F*/ negotiateFlagNTLMSSPNEGOTIATEDATAGRAM = 1 << 6 - /*G*/ negotiateFlagNTLMSSPNEGOTIATELMKEY = 1 << 7 - - /*H*/ negotiateFlagNTLMSSPNEGOTIATENTLM = 1 << 9 - - /*J*/ negotiateFlagANONYMOUS = 1 << 11 - /*K*/ negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED = 1 << 12 - /*L*/ negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED = 1 << 13 - - /*M*/ negotiateFlagNTLMSSPNEGOTIATEALWAYSSIGN = 1 << 15 - /*N*/ negotiateFlagNTLMSSPTARGETTYPEDOMAIN = 1 << 16 - /*O*/ negotiateFlagNTLMSSPTARGETTYPESERVER = 1 << 17 - - /*P*/ negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY = 1 << 19 - /*Q*/ negotiateFlagNTLMSSPNEGOTIATEIDENTIFY = 1 << 20 - - /*R*/ negotiateFlagNTLMSSPREQUESTNONNTSESSIONKEY = 1 << 22 - /*S*/ negotiateFlagNTLMSSPNEGOTIATETARGETINFO = 1 << 23 - - /*T*/ negotiateFlagNTLMSSPNEGOTIATEVERSION = 1 << 25 - - /*U*/ negotiateFlagNTLMSSPNEGOTIATE128 = 1 << 29 - /*V*/ negotiateFlagNTLMSSPNEGOTIATEKEYEXCH = 1 << 30 - /*W*/ negotiateFlagNTLMSSPNEGOTIATE56 = 1 << 31 -) - -func (field negotiateFlags) Has(flags negotiateFlags) bool { - return field&flags == flags -} - -func (field *negotiateFlags) Unset(flags negotiateFlags) { - *field = *field ^ (*field & flags) -} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go b/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go deleted file mode 100644 index 97aa07e8fba..00000000000 --- a/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go +++ /dev/null @@ -1,31 +0,0 @@ -package ntlmssp - -import ( - "bytes" - "encoding/binary" -) - -type negotiateMessageFields struct { - messageHeader - NegotiateFlags negotiateFlags -} - -//NewNegotiateMessage creates a new NEGOTIATE message with the -//flags that this package supports. -func NewNegotiateMessage() []byte { - m := negotiateMessageFields{ - messageHeader: newMessageHeader(1), - } - - m.NegotiateFlags = negotiateFlagNTLMSSPREQUESTTARGET | - negotiateFlagNTLMSSPNEGOTIATENTLM | - negotiateFlagNTLMSSPNEGOTIATEALWAYSSIGN | - negotiateFlagNTLMSSPNEGOTIATEUNICODE - - b := bytes.Buffer{} - err := binary.Write(&b, binary.LittleEndian, &m) - if err != nil { - panic(err) - } - return b.Bytes() -} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiator.go b/vendor/github.com/Azure/go-ntlmssp/negotiator.go deleted file mode 100644 index 9ae4586a26a..00000000000 --- a/vendor/github.com/Azure/go-ntlmssp/negotiator.go +++ /dev/null @@ -1,124 +0,0 @@ -package ntlmssp - -import ( - "bytes" - "encoding/base64" - "io" - "io/ioutil" - "net/http" -) - -//Negotiator is a http.Roundtripper decorator that automatically -//converts basic authentication to NTLM/Negotiate authentication when appropriate. -type Negotiator struct{ http.RoundTripper } - -//RoundTrip sends the request to the server, handling any authentication -//re-sends as needed. -func (l Negotiator) RoundTrip(req *http.Request) (res *http.Response, err error) { - // Use default round tripper if not provided - rt := l.RoundTripper - if rt == nil { - rt = http.DefaultTransport - } - // If it is not basic auth, just round trip the request as usual - reqauth := authheader(req.Header.Get("Authorization")) - if !reqauth.IsBasic() { - return rt.RoundTrip(req) - } - // Save request body - body := bytes.Buffer{} - if req.Body != nil { - _, err = body.ReadFrom(req.Body) - if err != nil { - return nil, err - } - - req.Body.Close() - req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) - } - // first try anonymous, in case the server still finds us - // authenticated from previous traffic - req.Header.Del("Authorization") - res, err = rt.RoundTrip(req) - if err != nil { - return nil, err - } - if res.StatusCode != http.StatusUnauthorized { - return res, err - } - - resauth := authheader(res.Header.Get("Www-Authenticate")) - if !resauth.IsNegotiate() && !resauth.IsNTLM() { - // Unauthorized, Negotiate not requested, let's try with basic auth - req.Header.Set("Authorization", string(reqauth)) - io.Copy(ioutil.Discard, res.Body) - res.Body.Close() - req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) - - res, err = rt.RoundTrip(req) - if err != nil { - return nil, err - } - if res.StatusCode != http.StatusUnauthorized { - return res, err - } - resauth = authheader(res.Header.Get("Www-Authenticate")) - } - - if resauth.IsNegotiate() || resauth.IsNTLM() { - // 401 with request:Basic and response:Negotiate - io.Copy(ioutil.Discard, res.Body) - res.Body.Close() - - // recycle credentials - u, p, err := reqauth.GetBasicCreds() - if err != nil { - return nil, err - } - - // send negotiate - negotiateMessage := NewNegotiateMessage() - if resauth.IsNTLM() { - req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(negotiateMessage)) - } else { - req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(negotiateMessage)) - } - - req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) - - res, err = rt.RoundTrip(req) - if err != nil { - return nil, err - } - - // receive challenge? - resauth = authheader(res.Header.Get("Www-Authenticate")) - challengeMessage, err := resauth.GetData() - if err != nil { - return nil, err - } - if !(resauth.IsNegotiate() || resauth.IsNTLM()) || len(challengeMessage) == 0 { - // Negotiation failed, let client deal with response - return res, nil - } - io.Copy(ioutil.Discard, res.Body) - res.Body.Close() - - // send authenticate - authenticateMessage, err := ProcessChallenge(challengeMessage, u, p) - if err != nil { - return nil, err - } - if resauth.IsNTLM() { - req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(authenticateMessage)) - } else { - req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(authenticateMessage)) - } - - req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) - - res, err = rt.RoundTrip(req) - } - - return res, err -} diff --git a/vendor/github.com/Azure/go-ntlmssp/nlmp.go b/vendor/github.com/Azure/go-ntlmssp/nlmp.go deleted file mode 100644 index 1e65abe8b53..00000000000 --- a/vendor/github.com/Azure/go-ntlmssp/nlmp.go +++ /dev/null @@ -1,51 +0,0 @@ -// Package ntlmssp provides NTLM/Negotiate authentication over HTTP -// -// Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx, -// implementation hints from http://davenport.sourceforge.net/ntlm.html . -// This package only implements authentication, no key exchange or encryption. It -// only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding. -// This package implements NTLMv2. -package ntlmssp - -import ( - "crypto/hmac" - "crypto/md5" - "golang.org/x/crypto/md4" - "strings" -) - -func getNtlmV2Hash(password, username, target string) []byte { - return hmacMd5(getNtlmHash(password), toUnicode(strings.ToUpper(username)+target)) -} - -func getNtlmHash(password string) []byte { - hash := md4.New() - hash.Write(toUnicode(password)) - return hash.Sum(nil) -} - -func computeNtlmV2Response(ntlmV2Hash, serverChallenge, clientChallenge, - timestamp, targetInfo []byte) []byte { - - temp := []byte{1, 1, 0, 0, 0, 0, 0, 0} - temp = append(temp, timestamp...) - temp = append(temp, clientChallenge...) - temp = append(temp, 0, 0, 0, 0) - temp = append(temp, targetInfo...) - temp = append(temp, 0, 0, 0, 0) - - NTProofStr := hmacMd5(ntlmV2Hash, serverChallenge, temp) - return append(NTProofStr, temp...) -} - -func computeLmV2Response(ntlmV2Hash, serverChallenge, clientChallenge []byte) []byte { - return append(hmacMd5(ntlmV2Hash, serverChallenge, clientChallenge), clientChallenge...) -} - -func hmacMd5(key []byte, data ...[]byte) []byte { - mac := hmac.New(md5.New, key) - for _, d := range data { - mac.Write(d) - } - return mac.Sum(nil) -} diff --git a/vendor/github.com/Azure/go-ntlmssp/unicode.go b/vendor/github.com/Azure/go-ntlmssp/unicode.go deleted file mode 100644 index 7b4f47163d0..00000000000 --- a/vendor/github.com/Azure/go-ntlmssp/unicode.go +++ /dev/null @@ -1,29 +0,0 @@ -package ntlmssp - -import ( - "bytes" - "encoding/binary" - "errors" - "unicode/utf16" -) - -// helper func's for dealing with Windows Unicode (UTF16LE) - -func fromUnicode(d []byte) (string, error) { - if len(d)%2 > 0 { - return "", errors.New("Unicode (UTF 16 LE) specified, but uneven data length") - } - s := make([]uint16, len(d)/2) - err := binary.Read(bytes.NewReader(d), binary.LittleEndian, &s) - if err != nil { - return "", err - } - return string(utf16.Decode(s)), nil -} - -func toUnicode(s string) []byte { - uints := utf16.Encode([]rune(s)) - b := bytes.Buffer{} - binary.Write(&b, binary.LittleEndian, &uints) - return b.Bytes() -} diff --git a/vendor/github.com/Azure/go-ntlmssp/varfield.go b/vendor/github.com/Azure/go-ntlmssp/varfield.go deleted file mode 100644 index 15f9aa113d8..00000000000 --- a/vendor/github.com/Azure/go-ntlmssp/varfield.go +++ /dev/null @@ -1,40 +0,0 @@ -package ntlmssp - -import ( - "errors" -) - -type varField struct { - Len uint16 - MaxLen uint16 - BufferOffset uint32 -} - -func (f varField) ReadFrom(buffer []byte) ([]byte, error) { - if len(buffer) < int(f.BufferOffset+uint32(f.Len)) { - return nil, errors.New("Error reading data, varField extends beyond buffer") - } - return buffer[f.BufferOffset : f.BufferOffset+uint32(f.Len)], nil -} - -func (f varField) ReadStringFrom(buffer []byte, unicode bool) (string, error) { - d, err := f.ReadFrom(buffer) - if err != nil { - return "", err - } - if unicode { // UTF-16LE encoding scheme - return fromUnicode(d) - } - // OEM encoding, close enough to ASCII, since no code page is specified - return string(d), err -} - -func newVarField(ptr *int, fieldsize int) varField { - f := varField{ - Len: uint16(fieldsize), - MaxLen: uint16(fieldsize), - BufferOffset: uint32(*ptr), - } - *ptr += fieldsize - return f -} diff --git a/vendor/github.com/armon/circbuf/LICENSE b/vendor/github.com/armon/circbuf/LICENSE deleted file mode 100755 index 106569e542b..00000000000 --- a/vendor/github.com/armon/circbuf/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/circbuf/README.md b/vendor/github.com/armon/circbuf/README.md deleted file mode 100644 index f2e356b8d7a..00000000000 --- a/vendor/github.com/armon/circbuf/README.md +++ /dev/null @@ -1,28 +0,0 @@ -circbuf -======= - -This repository provides the `circbuf` package. This provides a `Buffer` object -which is a circular (or ring) buffer. It has a fixed size, but can be written -to infinitely. Only the last `size` bytes are ever retained. The buffer implements -the `io.Writer` interface. - -Documentation -============= - -Full documentation can be found on [Godoc](http://godoc.org/github.com/armon/circbuf) - -Usage -===== - -The `circbuf` package is very easy to use: - -```go -buf, _ := NewBuffer(6) -buf.Write([]byte("hello world")) - -if string(buf.Bytes()) != " world" { - panic("should only have last 6 bytes!") -} - -``` - diff --git a/vendor/github.com/armon/circbuf/circbuf.go b/vendor/github.com/armon/circbuf/circbuf.go deleted file mode 100644 index de3cb94a390..00000000000 --- a/vendor/github.com/armon/circbuf/circbuf.go +++ /dev/null @@ -1,92 +0,0 @@ -package circbuf - -import ( - "fmt" -) - -// Buffer implements a circular buffer. It is a fixed size, -// and new writes overwrite older data, such that for a buffer -// of size N, for any amount of writes, only the last N bytes -// are retained. -type Buffer struct { - data []byte - size int64 - writeCursor int64 - written int64 -} - -// NewBuffer creates a new buffer of a given size. The size -// must be greater than 0. -func NewBuffer(size int64) (*Buffer, error) { - if size <= 0 { - return nil, fmt.Errorf("Size must be positive") - } - - b := &Buffer{ - size: size, - data: make([]byte, size), - } - return b, nil -} - -// Write writes up to len(buf) bytes to the internal ring, -// overriding older data if necessary. -func (b *Buffer) Write(buf []byte) (int, error) { - // Account for total bytes written - n := len(buf) - b.written += int64(n) - - // If the buffer is larger than ours, then we only care - // about the last size bytes anyways - if int64(n) > b.size { - buf = buf[int64(n)-b.size:] - } - - // Copy in place - remain := b.size - b.writeCursor - copy(b.data[b.writeCursor:], buf) - if int64(len(buf)) > remain { - copy(b.data, buf[remain:]) - } - - // Update location of the cursor - b.writeCursor = ((b.writeCursor + int64(len(buf))) % b.size) - return n, nil -} - -// Size returns the size of the buffer -func (b *Buffer) Size() int64 { - return b.size -} - -// TotalWritten provides the total number of bytes written -func (b *Buffer) TotalWritten() int64 { - return b.written -} - -// Bytes provides a slice of the bytes written. This -// slice should not be written to. -func (b *Buffer) Bytes() []byte { - switch { - case b.written >= b.size && b.writeCursor == 0: - return b.data - case b.written > b.size: - out := make([]byte, b.size) - copy(out, b.data[b.writeCursor:]) - copy(out[b.size-b.writeCursor:], b.data[:b.writeCursor]) - return out - default: - return b.data[:b.writeCursor] - } -} - -// Reset resets the buffer so it has no content. -func (b *Buffer) Reset() { - b.writeCursor = 0 - b.written = 0 -} - -// String returns the contents of the buffer as a string -func (b *Buffer) String() string { - return string(b.Bytes()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/appsync/api.go b/vendor/github.com/aws/aws-sdk-go/service/appsync/api.go deleted file mode 100644 index a70770acdb0..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/appsync/api.go +++ /dev/null @@ -1,5417 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package appsync - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opCreateApiKey = "CreateApiKey" - -// CreateApiKeyRequest generates a "aws/request.Request" representing the -// client's request for the CreateApiKey operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateApiKey for more information on using the CreateApiKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateApiKeyRequest method. -// req, resp := client.CreateApiKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateApiKey -func (c *AppSync) CreateApiKeyRequest(input *CreateApiKeyInput) (req *request.Request, output *CreateApiKeyOutput) { - op := &request.Operation{ - Name: opCreateApiKey, - HTTPMethod: "POST", - HTTPPath: "/v1/apis/{apiId}/apikeys", - } - - if input == nil { - input = &CreateApiKeyInput{} - } - - output = &CreateApiKeyOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateApiKey API operation for AWS AppSync. -// -// Creates a unique key that you can distribute to clients who are executing -// your API. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation CreateApiKey for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// * ErrCodeApiKeyLimitExceededException "ApiKeyLimitExceededException" -// The API key exceeded a limit. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateApiKey -func (c *AppSync) CreateApiKey(input *CreateApiKeyInput) (*CreateApiKeyOutput, error) { - req, out := c.CreateApiKeyRequest(input) - return out, req.Send() -} - -// CreateApiKeyWithContext is the same as CreateApiKey with the addition of -// the ability to pass a context and additional request options. -// -// See CreateApiKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) CreateApiKeyWithContext(ctx aws.Context, input *CreateApiKeyInput, opts ...request.Option) (*CreateApiKeyOutput, error) { - req, out := c.CreateApiKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateDataSource = "CreateDataSource" - -// CreateDataSourceRequest generates a "aws/request.Request" representing the -// client's request for the CreateDataSource operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateDataSource for more information on using the CreateDataSource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateDataSourceRequest method. -// req, resp := client.CreateDataSourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateDataSource -func (c *AppSync) CreateDataSourceRequest(input *CreateDataSourceInput) (req *request.Request, output *CreateDataSourceOutput) { - op := &request.Operation{ - Name: opCreateDataSource, - HTTPMethod: "POST", - HTTPPath: "/v1/apis/{apiId}/datasources", - } - - if input == nil { - input = &CreateDataSourceInput{} - } - - output = &CreateDataSourceOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateDataSource API operation for AWS AppSync. -// -// Creates a DataSource object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation CreateDataSource for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateDataSource -func (c *AppSync) CreateDataSource(input *CreateDataSourceInput) (*CreateDataSourceOutput, error) { - req, out := c.CreateDataSourceRequest(input) - return out, req.Send() -} - -// CreateDataSourceWithContext is the same as CreateDataSource with the addition of -// the ability to pass a context and additional request options. -// -// See CreateDataSource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) CreateDataSourceWithContext(ctx aws.Context, input *CreateDataSourceInput, opts ...request.Option) (*CreateDataSourceOutput, error) { - req, out := c.CreateDataSourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateGraphqlApi = "CreateGraphqlApi" - -// CreateGraphqlApiRequest generates a "aws/request.Request" representing the -// client's request for the CreateGraphqlApi operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateGraphqlApi for more information on using the CreateGraphqlApi -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateGraphqlApiRequest method. -// req, resp := client.CreateGraphqlApiRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateGraphqlApi -func (c *AppSync) CreateGraphqlApiRequest(input *CreateGraphqlApiInput) (req *request.Request, output *CreateGraphqlApiOutput) { - op := &request.Operation{ - Name: opCreateGraphqlApi, - HTTPMethod: "POST", - HTTPPath: "/v1/apis", - } - - if input == nil { - input = &CreateGraphqlApiInput{} - } - - output = &CreateGraphqlApiOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateGraphqlApi API operation for AWS AppSync. -// -// Creates a GraphqlApi object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation CreateGraphqlApi for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeApiLimitExceededException "ApiLimitExceededException" -// The GraphQL API exceeded a limit. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateGraphqlApi -func (c *AppSync) CreateGraphqlApi(input *CreateGraphqlApiInput) (*CreateGraphqlApiOutput, error) { - req, out := c.CreateGraphqlApiRequest(input) - return out, req.Send() -} - -// CreateGraphqlApiWithContext is the same as CreateGraphqlApi with the addition of -// the ability to pass a context and additional request options. -// -// See CreateGraphqlApi for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) CreateGraphqlApiWithContext(ctx aws.Context, input *CreateGraphqlApiInput, opts ...request.Option) (*CreateGraphqlApiOutput, error) { - req, out := c.CreateGraphqlApiRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateResolver = "CreateResolver" - -// CreateResolverRequest generates a "aws/request.Request" representing the -// client's request for the CreateResolver operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateResolver for more information on using the CreateResolver -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateResolverRequest method. -// req, resp := client.CreateResolverRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateResolver -func (c *AppSync) CreateResolverRequest(input *CreateResolverInput) (req *request.Request, output *CreateResolverOutput) { - op := &request.Operation{ - Name: opCreateResolver, - HTTPMethod: "POST", - HTTPPath: "/v1/apis/{apiId}/types/{typeName}/resolvers", - } - - if input == nil { - input = &CreateResolverInput{} - } - - output = &CreateResolverOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateResolver API operation for AWS AppSync. -// -// Creates a Resolver object. -// -// A resolver converts incoming requests into a format that a data source can -// understand and converts the data source's responses into GraphQL. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation CreateResolver for usage and error information. -// -// Returned Error Codes: -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateResolver -func (c *AppSync) CreateResolver(input *CreateResolverInput) (*CreateResolverOutput, error) { - req, out := c.CreateResolverRequest(input) - return out, req.Send() -} - -// CreateResolverWithContext is the same as CreateResolver with the addition of -// the ability to pass a context and additional request options. -// -// See CreateResolver for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) CreateResolverWithContext(ctx aws.Context, input *CreateResolverInput, opts ...request.Option) (*CreateResolverOutput, error) { - req, out := c.CreateResolverRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateType = "CreateType" - -// CreateTypeRequest generates a "aws/request.Request" representing the -// client's request for the CreateType operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateType for more information on using the CreateType -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateTypeRequest method. -// req, resp := client.CreateTypeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateType -func (c *AppSync) CreateTypeRequest(input *CreateTypeInput) (req *request.Request, output *CreateTypeOutput) { - op := &request.Operation{ - Name: opCreateType, - HTTPMethod: "POST", - HTTPPath: "/v1/apis/{apiId}/types", - } - - if input == nil { - input = &CreateTypeInput{} - } - - output = &CreateTypeOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateType API operation for AWS AppSync. -// -// Creates a Type object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation CreateType for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateType -func (c *AppSync) CreateType(input *CreateTypeInput) (*CreateTypeOutput, error) { - req, out := c.CreateTypeRequest(input) - return out, req.Send() -} - -// CreateTypeWithContext is the same as CreateType with the addition of -// the ability to pass a context and additional request options. -// -// See CreateType for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) CreateTypeWithContext(ctx aws.Context, input *CreateTypeInput, opts ...request.Option) (*CreateTypeOutput, error) { - req, out := c.CreateTypeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteApiKey = "DeleteApiKey" - -// DeleteApiKeyRequest generates a "aws/request.Request" representing the -// client's request for the DeleteApiKey operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteApiKey for more information on using the DeleteApiKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteApiKeyRequest method. -// req, resp := client.DeleteApiKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteApiKey -func (c *AppSync) DeleteApiKeyRequest(input *DeleteApiKeyInput) (req *request.Request, output *DeleteApiKeyOutput) { - op := &request.Operation{ - Name: opDeleteApiKey, - HTTPMethod: "DELETE", - HTTPPath: "/v1/apis/{apiId}/apikeys/{id}", - } - - if input == nil { - input = &DeleteApiKeyInput{} - } - - output = &DeleteApiKeyOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteApiKey API operation for AWS AppSync. -// -// Deletes an API key. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation DeleteApiKey for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteApiKey -func (c *AppSync) DeleteApiKey(input *DeleteApiKeyInput) (*DeleteApiKeyOutput, error) { - req, out := c.DeleteApiKeyRequest(input) - return out, req.Send() -} - -// DeleteApiKeyWithContext is the same as DeleteApiKey with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteApiKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) DeleteApiKeyWithContext(ctx aws.Context, input *DeleteApiKeyInput, opts ...request.Option) (*DeleteApiKeyOutput, error) { - req, out := c.DeleteApiKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteDataSource = "DeleteDataSource" - -// DeleteDataSourceRequest generates a "aws/request.Request" representing the -// client's request for the DeleteDataSource operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteDataSource for more information on using the DeleteDataSource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteDataSourceRequest method. -// req, resp := client.DeleteDataSourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteDataSource -func (c *AppSync) DeleteDataSourceRequest(input *DeleteDataSourceInput) (req *request.Request, output *DeleteDataSourceOutput) { - op := &request.Operation{ - Name: opDeleteDataSource, - HTTPMethod: "DELETE", - HTTPPath: "/v1/apis/{apiId}/datasources/{name}", - } - - if input == nil { - input = &DeleteDataSourceInput{} - } - - output = &DeleteDataSourceOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteDataSource API operation for AWS AppSync. -// -// Deletes a DataSource object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation DeleteDataSource for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteDataSource -func (c *AppSync) DeleteDataSource(input *DeleteDataSourceInput) (*DeleteDataSourceOutput, error) { - req, out := c.DeleteDataSourceRequest(input) - return out, req.Send() -} - -// DeleteDataSourceWithContext is the same as DeleteDataSource with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteDataSource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) DeleteDataSourceWithContext(ctx aws.Context, input *DeleteDataSourceInput, opts ...request.Option) (*DeleteDataSourceOutput, error) { - req, out := c.DeleteDataSourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteGraphqlApi = "DeleteGraphqlApi" - -// DeleteGraphqlApiRequest generates a "aws/request.Request" representing the -// client's request for the DeleteGraphqlApi operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteGraphqlApi for more information on using the DeleteGraphqlApi -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteGraphqlApiRequest method. -// req, resp := client.DeleteGraphqlApiRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteGraphqlApi -func (c *AppSync) DeleteGraphqlApiRequest(input *DeleteGraphqlApiInput) (req *request.Request, output *DeleteGraphqlApiOutput) { - op := &request.Operation{ - Name: opDeleteGraphqlApi, - HTTPMethod: "DELETE", - HTTPPath: "/v1/apis/{apiId}", - } - - if input == nil { - input = &DeleteGraphqlApiInput{} - } - - output = &DeleteGraphqlApiOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteGraphqlApi API operation for AWS AppSync. -// -// Deletes a GraphqlApi object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation DeleteGraphqlApi for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteGraphqlApi -func (c *AppSync) DeleteGraphqlApi(input *DeleteGraphqlApiInput) (*DeleteGraphqlApiOutput, error) { - req, out := c.DeleteGraphqlApiRequest(input) - return out, req.Send() -} - -// DeleteGraphqlApiWithContext is the same as DeleteGraphqlApi with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteGraphqlApi for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) DeleteGraphqlApiWithContext(ctx aws.Context, input *DeleteGraphqlApiInput, opts ...request.Option) (*DeleteGraphqlApiOutput, error) { - req, out := c.DeleteGraphqlApiRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteResolver = "DeleteResolver" - -// DeleteResolverRequest generates a "aws/request.Request" representing the -// client's request for the DeleteResolver operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteResolver for more information on using the DeleteResolver -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteResolverRequest method. -// req, resp := client.DeleteResolverRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteResolver -func (c *AppSync) DeleteResolverRequest(input *DeleteResolverInput) (req *request.Request, output *DeleteResolverOutput) { - op := &request.Operation{ - Name: opDeleteResolver, - HTTPMethod: "DELETE", - HTTPPath: "/v1/apis/{apiId}/types/{typeName}/resolvers/{fieldName}", - } - - if input == nil { - input = &DeleteResolverInput{} - } - - output = &DeleteResolverOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteResolver API operation for AWS AppSync. -// -// Deletes a Resolver object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation DeleteResolver for usage and error information. -// -// Returned Error Codes: -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteResolver -func (c *AppSync) DeleteResolver(input *DeleteResolverInput) (*DeleteResolverOutput, error) { - req, out := c.DeleteResolverRequest(input) - return out, req.Send() -} - -// DeleteResolverWithContext is the same as DeleteResolver with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteResolver for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) DeleteResolverWithContext(ctx aws.Context, input *DeleteResolverInput, opts ...request.Option) (*DeleteResolverOutput, error) { - req, out := c.DeleteResolverRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteType = "DeleteType" - -// DeleteTypeRequest generates a "aws/request.Request" representing the -// client's request for the DeleteType operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteType for more information on using the DeleteType -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteTypeRequest method. -// req, resp := client.DeleteTypeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteType -func (c *AppSync) DeleteTypeRequest(input *DeleteTypeInput) (req *request.Request, output *DeleteTypeOutput) { - op := &request.Operation{ - Name: opDeleteType, - HTTPMethod: "DELETE", - HTTPPath: "/v1/apis/{apiId}/types/{typeName}", - } - - if input == nil { - input = &DeleteTypeInput{} - } - - output = &DeleteTypeOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteType API operation for AWS AppSync. -// -// Deletes a Type object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation DeleteType for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteType -func (c *AppSync) DeleteType(input *DeleteTypeInput) (*DeleteTypeOutput, error) { - req, out := c.DeleteTypeRequest(input) - return out, req.Send() -} - -// DeleteTypeWithContext is the same as DeleteType with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteType for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) DeleteTypeWithContext(ctx aws.Context, input *DeleteTypeInput, opts ...request.Option) (*DeleteTypeOutput, error) { - req, out := c.DeleteTypeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetDataSource = "GetDataSource" - -// GetDataSourceRequest generates a "aws/request.Request" representing the -// client's request for the GetDataSource operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetDataSource for more information on using the GetDataSource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetDataSourceRequest method. -// req, resp := client.GetDataSourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetDataSource -func (c *AppSync) GetDataSourceRequest(input *GetDataSourceInput) (req *request.Request, output *GetDataSourceOutput) { - op := &request.Operation{ - Name: opGetDataSource, - HTTPMethod: "GET", - HTTPPath: "/v1/apis/{apiId}/datasources/{name}", - } - - if input == nil { - input = &GetDataSourceInput{} - } - - output = &GetDataSourceOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetDataSource API operation for AWS AppSync. -// -// Retrieves a DataSource object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation GetDataSource for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetDataSource -func (c *AppSync) GetDataSource(input *GetDataSourceInput) (*GetDataSourceOutput, error) { - req, out := c.GetDataSourceRequest(input) - return out, req.Send() -} - -// GetDataSourceWithContext is the same as GetDataSource with the addition of -// the ability to pass a context and additional request options. -// -// See GetDataSource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) GetDataSourceWithContext(ctx aws.Context, input *GetDataSourceInput, opts ...request.Option) (*GetDataSourceOutput, error) { - req, out := c.GetDataSourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetGraphqlApi = "GetGraphqlApi" - -// GetGraphqlApiRequest generates a "aws/request.Request" representing the -// client's request for the GetGraphqlApi operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetGraphqlApi for more information on using the GetGraphqlApi -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetGraphqlApiRequest method. -// req, resp := client.GetGraphqlApiRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetGraphqlApi -func (c *AppSync) GetGraphqlApiRequest(input *GetGraphqlApiInput) (req *request.Request, output *GetGraphqlApiOutput) { - op := &request.Operation{ - Name: opGetGraphqlApi, - HTTPMethod: "GET", - HTTPPath: "/v1/apis/{apiId}", - } - - if input == nil { - input = &GetGraphqlApiInput{} - } - - output = &GetGraphqlApiOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetGraphqlApi API operation for AWS AppSync. -// -// Retrieves a GraphqlApi object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation GetGraphqlApi for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetGraphqlApi -func (c *AppSync) GetGraphqlApi(input *GetGraphqlApiInput) (*GetGraphqlApiOutput, error) { - req, out := c.GetGraphqlApiRequest(input) - return out, req.Send() -} - -// GetGraphqlApiWithContext is the same as GetGraphqlApi with the addition of -// the ability to pass a context and additional request options. -// -// See GetGraphqlApi for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) GetGraphqlApiWithContext(ctx aws.Context, input *GetGraphqlApiInput, opts ...request.Option) (*GetGraphqlApiOutput, error) { - req, out := c.GetGraphqlApiRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetIntrospectionSchema = "GetIntrospectionSchema" - -// GetIntrospectionSchemaRequest generates a "aws/request.Request" representing the -// client's request for the GetIntrospectionSchema operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetIntrospectionSchema for more information on using the GetIntrospectionSchema -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetIntrospectionSchemaRequest method. -// req, resp := client.GetIntrospectionSchemaRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetIntrospectionSchema -func (c *AppSync) GetIntrospectionSchemaRequest(input *GetIntrospectionSchemaInput) (req *request.Request, output *GetIntrospectionSchemaOutput) { - op := &request.Operation{ - Name: opGetIntrospectionSchema, - HTTPMethod: "GET", - HTTPPath: "/v1/apis/{apiId}/schema", - } - - if input == nil { - input = &GetIntrospectionSchemaInput{} - } - - output = &GetIntrospectionSchemaOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetIntrospectionSchema API operation for AWS AppSync. -// -// Retrieves the introspection schema for a GraphQL API. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation GetIntrospectionSchema for usage and error information. -// -// Returned Error Codes: -// * ErrCodeGraphQLSchemaException "GraphQLSchemaException" -// The GraphQL schema is not valid. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetIntrospectionSchema -func (c *AppSync) GetIntrospectionSchema(input *GetIntrospectionSchemaInput) (*GetIntrospectionSchemaOutput, error) { - req, out := c.GetIntrospectionSchemaRequest(input) - return out, req.Send() -} - -// GetIntrospectionSchemaWithContext is the same as GetIntrospectionSchema with the addition of -// the ability to pass a context and additional request options. -// -// See GetIntrospectionSchema for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) GetIntrospectionSchemaWithContext(ctx aws.Context, input *GetIntrospectionSchemaInput, opts ...request.Option) (*GetIntrospectionSchemaOutput, error) { - req, out := c.GetIntrospectionSchemaRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetResolver = "GetResolver" - -// GetResolverRequest generates a "aws/request.Request" representing the -// client's request for the GetResolver operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetResolver for more information on using the GetResolver -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetResolverRequest method. -// req, resp := client.GetResolverRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetResolver -func (c *AppSync) GetResolverRequest(input *GetResolverInput) (req *request.Request, output *GetResolverOutput) { - op := &request.Operation{ - Name: opGetResolver, - HTTPMethod: "GET", - HTTPPath: "/v1/apis/{apiId}/types/{typeName}/resolvers/{fieldName}", - } - - if input == nil { - input = &GetResolverInput{} - } - - output = &GetResolverOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetResolver API operation for AWS AppSync. -// -// Retrieves a Resolver object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation GetResolver for usage and error information. -// -// Returned Error Codes: -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetResolver -func (c *AppSync) GetResolver(input *GetResolverInput) (*GetResolverOutput, error) { - req, out := c.GetResolverRequest(input) - return out, req.Send() -} - -// GetResolverWithContext is the same as GetResolver with the addition of -// the ability to pass a context and additional request options. -// -// See GetResolver for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) GetResolverWithContext(ctx aws.Context, input *GetResolverInput, opts ...request.Option) (*GetResolverOutput, error) { - req, out := c.GetResolverRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetSchemaCreationStatus = "GetSchemaCreationStatus" - -// GetSchemaCreationStatusRequest generates a "aws/request.Request" representing the -// client's request for the GetSchemaCreationStatus operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSchemaCreationStatus for more information on using the GetSchemaCreationStatus -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetSchemaCreationStatusRequest method. -// req, resp := client.GetSchemaCreationStatusRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetSchemaCreationStatus -func (c *AppSync) GetSchemaCreationStatusRequest(input *GetSchemaCreationStatusInput) (req *request.Request, output *GetSchemaCreationStatusOutput) { - op := &request.Operation{ - Name: opGetSchemaCreationStatus, - HTTPMethod: "GET", - HTTPPath: "/v1/apis/{apiId}/schemacreation", - } - - if input == nil { - input = &GetSchemaCreationStatusInput{} - } - - output = &GetSchemaCreationStatusOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSchemaCreationStatus API operation for AWS AppSync. -// -// Retrieves the current status of a schema creation operation. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation GetSchemaCreationStatus for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetSchemaCreationStatus -func (c *AppSync) GetSchemaCreationStatus(input *GetSchemaCreationStatusInput) (*GetSchemaCreationStatusOutput, error) { - req, out := c.GetSchemaCreationStatusRequest(input) - return out, req.Send() -} - -// GetSchemaCreationStatusWithContext is the same as GetSchemaCreationStatus with the addition of -// the ability to pass a context and additional request options. -// -// See GetSchemaCreationStatus for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) GetSchemaCreationStatusWithContext(ctx aws.Context, input *GetSchemaCreationStatusInput, opts ...request.Option) (*GetSchemaCreationStatusOutput, error) { - req, out := c.GetSchemaCreationStatusRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetType = "GetType" - -// GetTypeRequest generates a "aws/request.Request" representing the -// client's request for the GetType operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetType for more information on using the GetType -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetTypeRequest method. -// req, resp := client.GetTypeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetType -func (c *AppSync) GetTypeRequest(input *GetTypeInput) (req *request.Request, output *GetTypeOutput) { - op := &request.Operation{ - Name: opGetType, - HTTPMethod: "GET", - HTTPPath: "/v1/apis/{apiId}/types/{typeName}", - } - - if input == nil { - input = &GetTypeInput{} - } - - output = &GetTypeOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetType API operation for AWS AppSync. -// -// Retrieves a Type object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation GetType for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetType -func (c *AppSync) GetType(input *GetTypeInput) (*GetTypeOutput, error) { - req, out := c.GetTypeRequest(input) - return out, req.Send() -} - -// GetTypeWithContext is the same as GetType with the addition of -// the ability to pass a context and additional request options. -// -// See GetType for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) GetTypeWithContext(ctx aws.Context, input *GetTypeInput, opts ...request.Option) (*GetTypeOutput, error) { - req, out := c.GetTypeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListApiKeys = "ListApiKeys" - -// ListApiKeysRequest generates a "aws/request.Request" representing the -// client's request for the ListApiKeys operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListApiKeys for more information on using the ListApiKeys -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListApiKeysRequest method. -// req, resp := client.ListApiKeysRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListApiKeys -func (c *AppSync) ListApiKeysRequest(input *ListApiKeysInput) (req *request.Request, output *ListApiKeysOutput) { - op := &request.Operation{ - Name: opListApiKeys, - HTTPMethod: "GET", - HTTPPath: "/v1/apis/{apiId}/apikeys", - } - - if input == nil { - input = &ListApiKeysInput{} - } - - output = &ListApiKeysOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListApiKeys API operation for AWS AppSync. -// -// Lists the API keys for a given API. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation ListApiKeys for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListApiKeys -func (c *AppSync) ListApiKeys(input *ListApiKeysInput) (*ListApiKeysOutput, error) { - req, out := c.ListApiKeysRequest(input) - return out, req.Send() -} - -// ListApiKeysWithContext is the same as ListApiKeys with the addition of -// the ability to pass a context and additional request options. -// -// See ListApiKeys for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) ListApiKeysWithContext(ctx aws.Context, input *ListApiKeysInput, opts ...request.Option) (*ListApiKeysOutput, error) { - req, out := c.ListApiKeysRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListDataSources = "ListDataSources" - -// ListDataSourcesRequest generates a "aws/request.Request" representing the -// client's request for the ListDataSources operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListDataSources for more information on using the ListDataSources -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListDataSourcesRequest method. -// req, resp := client.ListDataSourcesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListDataSources -func (c *AppSync) ListDataSourcesRequest(input *ListDataSourcesInput) (req *request.Request, output *ListDataSourcesOutput) { - op := &request.Operation{ - Name: opListDataSources, - HTTPMethod: "GET", - HTTPPath: "/v1/apis/{apiId}/datasources", - } - - if input == nil { - input = &ListDataSourcesInput{} - } - - output = &ListDataSourcesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListDataSources API operation for AWS AppSync. -// -// Lists the data sources for a given API. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation ListDataSources for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListDataSources -func (c *AppSync) ListDataSources(input *ListDataSourcesInput) (*ListDataSourcesOutput, error) { - req, out := c.ListDataSourcesRequest(input) - return out, req.Send() -} - -// ListDataSourcesWithContext is the same as ListDataSources with the addition of -// the ability to pass a context and additional request options. -// -// See ListDataSources for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) ListDataSourcesWithContext(ctx aws.Context, input *ListDataSourcesInput, opts ...request.Option) (*ListDataSourcesOutput, error) { - req, out := c.ListDataSourcesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListGraphqlApis = "ListGraphqlApis" - -// ListGraphqlApisRequest generates a "aws/request.Request" representing the -// client's request for the ListGraphqlApis operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListGraphqlApis for more information on using the ListGraphqlApis -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListGraphqlApisRequest method. -// req, resp := client.ListGraphqlApisRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListGraphqlApis -func (c *AppSync) ListGraphqlApisRequest(input *ListGraphqlApisInput) (req *request.Request, output *ListGraphqlApisOutput) { - op := &request.Operation{ - Name: opListGraphqlApis, - HTTPMethod: "GET", - HTTPPath: "/v1/apis", - } - - if input == nil { - input = &ListGraphqlApisInput{} - } - - output = &ListGraphqlApisOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListGraphqlApis API operation for AWS AppSync. -// -// Lists your GraphQL APIs. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation ListGraphqlApis for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListGraphqlApis -func (c *AppSync) ListGraphqlApis(input *ListGraphqlApisInput) (*ListGraphqlApisOutput, error) { - req, out := c.ListGraphqlApisRequest(input) - return out, req.Send() -} - -// ListGraphqlApisWithContext is the same as ListGraphqlApis with the addition of -// the ability to pass a context and additional request options. -// -// See ListGraphqlApis for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) ListGraphqlApisWithContext(ctx aws.Context, input *ListGraphqlApisInput, opts ...request.Option) (*ListGraphqlApisOutput, error) { - req, out := c.ListGraphqlApisRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListResolvers = "ListResolvers" - -// ListResolversRequest generates a "aws/request.Request" representing the -// client's request for the ListResolvers operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListResolvers for more information on using the ListResolvers -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListResolversRequest method. -// req, resp := client.ListResolversRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListResolvers -func (c *AppSync) ListResolversRequest(input *ListResolversInput) (req *request.Request, output *ListResolversOutput) { - op := &request.Operation{ - Name: opListResolvers, - HTTPMethod: "GET", - HTTPPath: "/v1/apis/{apiId}/types/{typeName}/resolvers", - } - - if input == nil { - input = &ListResolversInput{} - } - - output = &ListResolversOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListResolvers API operation for AWS AppSync. -// -// Lists the resolvers for a given API and type. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation ListResolvers for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListResolvers -func (c *AppSync) ListResolvers(input *ListResolversInput) (*ListResolversOutput, error) { - req, out := c.ListResolversRequest(input) - return out, req.Send() -} - -// ListResolversWithContext is the same as ListResolvers with the addition of -// the ability to pass a context and additional request options. -// -// See ListResolvers for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) ListResolversWithContext(ctx aws.Context, input *ListResolversInput, opts ...request.Option) (*ListResolversOutput, error) { - req, out := c.ListResolversRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListTypes = "ListTypes" - -// ListTypesRequest generates a "aws/request.Request" representing the -// client's request for the ListTypes operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListTypes for more information on using the ListTypes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListTypesRequest method. -// req, resp := client.ListTypesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListTypes -func (c *AppSync) ListTypesRequest(input *ListTypesInput) (req *request.Request, output *ListTypesOutput) { - op := &request.Operation{ - Name: opListTypes, - HTTPMethod: "GET", - HTTPPath: "/v1/apis/{apiId}/types", - } - - if input == nil { - input = &ListTypesInput{} - } - - output = &ListTypesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListTypes API operation for AWS AppSync. -// -// Lists the types for a given API. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation ListTypes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListTypes -func (c *AppSync) ListTypes(input *ListTypesInput) (*ListTypesOutput, error) { - req, out := c.ListTypesRequest(input) - return out, req.Send() -} - -// ListTypesWithContext is the same as ListTypes with the addition of -// the ability to pass a context and additional request options. -// -// See ListTypes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) ListTypesWithContext(ctx aws.Context, input *ListTypesInput, opts ...request.Option) (*ListTypesOutput, error) { - req, out := c.ListTypesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStartSchemaCreation = "StartSchemaCreation" - -// StartSchemaCreationRequest generates a "aws/request.Request" representing the -// client's request for the StartSchemaCreation operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StartSchemaCreation for more information on using the StartSchemaCreation -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StartSchemaCreationRequest method. -// req, resp := client.StartSchemaCreationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/StartSchemaCreation -func (c *AppSync) StartSchemaCreationRequest(input *StartSchemaCreationInput) (req *request.Request, output *StartSchemaCreationOutput) { - op := &request.Operation{ - Name: opStartSchemaCreation, - HTTPMethod: "POST", - HTTPPath: "/v1/apis/{apiId}/schemacreation", - } - - if input == nil { - input = &StartSchemaCreationInput{} - } - - output = &StartSchemaCreationOutput{} - req = c.newRequest(op, input, output) - return -} - -// StartSchemaCreation API operation for AWS AppSync. -// -// Adds a new schema to your GraphQL API. -// -// This operation is asynchronous. Use to determine when it has completed. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation StartSchemaCreation for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/StartSchemaCreation -func (c *AppSync) StartSchemaCreation(input *StartSchemaCreationInput) (*StartSchemaCreationOutput, error) { - req, out := c.StartSchemaCreationRequest(input) - return out, req.Send() -} - -// StartSchemaCreationWithContext is the same as StartSchemaCreation with the addition of -// the ability to pass a context and additional request options. -// -// See StartSchemaCreation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) StartSchemaCreationWithContext(ctx aws.Context, input *StartSchemaCreationInput, opts ...request.Option) (*StartSchemaCreationOutput, error) { - req, out := c.StartSchemaCreationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateDataSource = "UpdateDataSource" - -// UpdateDataSourceRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDataSource operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateDataSource for more information on using the UpdateDataSource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateDataSourceRequest method. -// req, resp := client.UpdateDataSourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateDataSource -func (c *AppSync) UpdateDataSourceRequest(input *UpdateDataSourceInput) (req *request.Request, output *UpdateDataSourceOutput) { - op := &request.Operation{ - Name: opUpdateDataSource, - HTTPMethod: "POST", - HTTPPath: "/v1/apis/{apiId}/datasources/{name}", - } - - if input == nil { - input = &UpdateDataSourceInput{} - } - - output = &UpdateDataSourceOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateDataSource API operation for AWS AppSync. -// -// Updates a DataSource object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation UpdateDataSource for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateDataSource -func (c *AppSync) UpdateDataSource(input *UpdateDataSourceInput) (*UpdateDataSourceOutput, error) { - req, out := c.UpdateDataSourceRequest(input) - return out, req.Send() -} - -// UpdateDataSourceWithContext is the same as UpdateDataSource with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateDataSource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) UpdateDataSourceWithContext(ctx aws.Context, input *UpdateDataSourceInput, opts ...request.Option) (*UpdateDataSourceOutput, error) { - req, out := c.UpdateDataSourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateGraphqlApi = "UpdateGraphqlApi" - -// UpdateGraphqlApiRequest generates a "aws/request.Request" representing the -// client's request for the UpdateGraphqlApi operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateGraphqlApi for more information on using the UpdateGraphqlApi -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateGraphqlApiRequest method. -// req, resp := client.UpdateGraphqlApiRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateGraphqlApi -func (c *AppSync) UpdateGraphqlApiRequest(input *UpdateGraphqlApiInput) (req *request.Request, output *UpdateGraphqlApiOutput) { - op := &request.Operation{ - Name: opUpdateGraphqlApi, - HTTPMethod: "POST", - HTTPPath: "/v1/apis/{apiId}", - } - - if input == nil { - input = &UpdateGraphqlApiInput{} - } - - output = &UpdateGraphqlApiOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateGraphqlApi API operation for AWS AppSync. -// -// Updates a GraphqlApi object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation UpdateGraphqlApi for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateGraphqlApi -func (c *AppSync) UpdateGraphqlApi(input *UpdateGraphqlApiInput) (*UpdateGraphqlApiOutput, error) { - req, out := c.UpdateGraphqlApiRequest(input) - return out, req.Send() -} - -// UpdateGraphqlApiWithContext is the same as UpdateGraphqlApi with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateGraphqlApi for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) UpdateGraphqlApiWithContext(ctx aws.Context, input *UpdateGraphqlApiInput, opts ...request.Option) (*UpdateGraphqlApiOutput, error) { - req, out := c.UpdateGraphqlApiRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateResolver = "UpdateResolver" - -// UpdateResolverRequest generates a "aws/request.Request" representing the -// client's request for the UpdateResolver operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateResolver for more information on using the UpdateResolver -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateResolverRequest method. -// req, resp := client.UpdateResolverRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateResolver -func (c *AppSync) UpdateResolverRequest(input *UpdateResolverInput) (req *request.Request, output *UpdateResolverOutput) { - op := &request.Operation{ - Name: opUpdateResolver, - HTTPMethod: "POST", - HTTPPath: "/v1/apis/{apiId}/types/{typeName}/resolvers/{fieldName}", - } - - if input == nil { - input = &UpdateResolverInput{} - } - - output = &UpdateResolverOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateResolver API operation for AWS AppSync. -// -// Updates a Resolver object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation UpdateResolver for usage and error information. -// -// Returned Error Codes: -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateResolver -func (c *AppSync) UpdateResolver(input *UpdateResolverInput) (*UpdateResolverOutput, error) { - req, out := c.UpdateResolverRequest(input) - return out, req.Send() -} - -// UpdateResolverWithContext is the same as UpdateResolver with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateResolver for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) UpdateResolverWithContext(ctx aws.Context, input *UpdateResolverInput, opts ...request.Option) (*UpdateResolverOutput, error) { - req, out := c.UpdateResolverRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateType = "UpdateType" - -// UpdateTypeRequest generates a "aws/request.Request" representing the -// client's request for the UpdateType operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateType for more information on using the UpdateType -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateTypeRequest method. -// req, resp := client.UpdateTypeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateType -func (c *AppSync) UpdateTypeRequest(input *UpdateTypeInput) (req *request.Request, output *UpdateTypeOutput) { - op := &request.Operation{ - Name: opUpdateType, - HTTPMethod: "POST", - HTTPPath: "/v1/apis/{apiId}/types/{typeName}", - } - - if input == nil { - input = &UpdateTypeInput{} - } - - output = &UpdateTypeOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateType API operation for AWS AppSync. -// -// Updates a Type object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS AppSync's -// API operation UpdateType for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Another modification is being made. That modification must complete before -// you can make your change. -// -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal AWS AppSync error occurred. Try your request again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateType -func (c *AppSync) UpdateType(input *UpdateTypeInput) (*UpdateTypeOutput, error) { - req, out := c.UpdateTypeRequest(input) - return out, req.Send() -} - -// UpdateTypeWithContext is the same as UpdateType with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateType for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *AppSync) UpdateTypeWithContext(ctx aws.Context, input *UpdateTypeInput, opts ...request.Option) (*UpdateTypeOutput, error) { - req, out := c.UpdateTypeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Describes an API key. -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ApiKey -type ApiKey struct { - _ struct{} `type:"structure"` - - // A description of the purpose of the API key. - Description *string `locationName:"description" type:"string"` - - // The time when the API key expires. - Expires *int64 `locationName:"expires" type:"long"` - - // The API key ID. - Id *string `locationName:"id" type:"string"` -} - -// String returns the string representation -func (s ApiKey) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ApiKey) GoString() string { - return s.String() -} - -// SetDescription sets the Description field's value. -func (s *ApiKey) SetDescription(v string) *ApiKey { - s.Description = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *ApiKey) SetExpires(v int64) *ApiKey { - s.Expires = &v - return s -} - -// SetId sets the Id field's value. -func (s *ApiKey) SetId(v string) *ApiKey { - s.Id = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateApiKeyRequest -type CreateApiKeyInput struct { - _ struct{} `type:"structure"` - - // The ID for your GraphQL API. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // A description of the purpose of the API key. - Description *string `locationName:"description" type:"string"` -} - -// String returns the string representation -func (s CreateApiKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateApiKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateApiKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateApiKeyInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *CreateApiKeyInput) SetApiId(v string) *CreateApiKeyInput { - s.ApiId = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateApiKeyInput) SetDescription(v string) *CreateApiKeyInput { - s.Description = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateApiKeyResponse -type CreateApiKeyOutput struct { - _ struct{} `type:"structure"` - - // The API key. - ApiKey *ApiKey `locationName:"apiKey" type:"structure"` -} - -// String returns the string representation -func (s CreateApiKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateApiKeyOutput) GoString() string { - return s.String() -} - -// SetApiKey sets the ApiKey field's value. -func (s *CreateApiKeyOutput) SetApiKey(v *ApiKey) *CreateApiKeyOutput { - s.ApiKey = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateDataSourceRequest -type CreateDataSourceInput struct { - _ struct{} `type:"structure"` - - // The API ID for the GraphQL API for the DataSource. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // A description of the DataSource. - Description *string `locationName:"description" type:"string"` - - // DynamoDB settings. - DynamodbConfig *DynamodbDataSourceConfig `locationName:"dynamodbConfig" type:"structure"` - - // Amazon Elasticsearch settings. - ElasticsearchConfig *ElasticsearchDataSourceConfig `locationName:"elasticsearchConfig" type:"structure"` - - // AWS Lambda settings. - LambdaConfig *LambdaDataSourceConfig `locationName:"lambdaConfig" type:"structure"` - - // A user-supplied name for the DataSource. - // - // Name is a required field - Name *string `locationName:"name" type:"string" required:"true"` - - // The IAM service role ARN for the data source. The system assumes this role - // when accessing the data source. - ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string"` - - // The type of the DataSource. - // - // Type is a required field - Type *string `locationName:"type" type:"string" required:"true" enum:"DataSourceType"` -} - -// String returns the string representation -func (s CreateDataSourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateDataSourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDataSourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDataSourceInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - if s.DynamodbConfig != nil { - if err := s.DynamodbConfig.Validate(); err != nil { - invalidParams.AddNested("DynamodbConfig", err.(request.ErrInvalidParams)) - } - } - if s.ElasticsearchConfig != nil { - if err := s.ElasticsearchConfig.Validate(); err != nil { - invalidParams.AddNested("ElasticsearchConfig", err.(request.ErrInvalidParams)) - } - } - if s.LambdaConfig != nil { - if err := s.LambdaConfig.Validate(); err != nil { - invalidParams.AddNested("LambdaConfig", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *CreateDataSourceInput) SetApiId(v string) *CreateDataSourceInput { - s.ApiId = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateDataSourceInput) SetDescription(v string) *CreateDataSourceInput { - s.Description = &v - return s -} - -// SetDynamodbConfig sets the DynamodbConfig field's value. -func (s *CreateDataSourceInput) SetDynamodbConfig(v *DynamodbDataSourceConfig) *CreateDataSourceInput { - s.DynamodbConfig = v - return s -} - -// SetElasticsearchConfig sets the ElasticsearchConfig field's value. -func (s *CreateDataSourceInput) SetElasticsearchConfig(v *ElasticsearchDataSourceConfig) *CreateDataSourceInput { - s.ElasticsearchConfig = v - return s -} - -// SetLambdaConfig sets the LambdaConfig field's value. -func (s *CreateDataSourceInput) SetLambdaConfig(v *LambdaDataSourceConfig) *CreateDataSourceInput { - s.LambdaConfig = v - return s -} - -// SetName sets the Name field's value. -func (s *CreateDataSourceInput) SetName(v string) *CreateDataSourceInput { - s.Name = &v - return s -} - -// SetServiceRoleArn sets the ServiceRoleArn field's value. -func (s *CreateDataSourceInput) SetServiceRoleArn(v string) *CreateDataSourceInput { - s.ServiceRoleArn = &v - return s -} - -// SetType sets the Type field's value. -func (s *CreateDataSourceInput) SetType(v string) *CreateDataSourceInput { - s.Type = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateDataSourceResponse -type CreateDataSourceOutput struct { - _ struct{} `type:"structure"` - - // The DataSource object. - DataSource *DataSource `locationName:"dataSource" type:"structure"` -} - -// String returns the string representation -func (s CreateDataSourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateDataSourceOutput) GoString() string { - return s.String() -} - -// SetDataSource sets the DataSource field's value. -func (s *CreateDataSourceOutput) SetDataSource(v *DataSource) *CreateDataSourceOutput { - s.DataSource = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateGraphqlApiRequest -type CreateGraphqlApiInput struct { - _ struct{} `type:"structure"` - - // The authentication type: API key, IAM, or Amazon Cognito User Pools. - // - // AuthenticationType is a required field - AuthenticationType *string `locationName:"authenticationType" type:"string" required:"true" enum:"AuthenticationType"` - - // A user-supplied name for the GraphqlApi. - // - // Name is a required field - Name *string `locationName:"name" type:"string" required:"true"` - - // The Amazon Cognito User Pool configuration. - UserPoolConfig *UserPoolConfig `locationName:"userPoolConfig" type:"structure"` -} - -// String returns the string representation -func (s CreateGraphqlApiInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateGraphqlApiInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateGraphqlApiInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateGraphqlApiInput"} - if s.AuthenticationType == nil { - invalidParams.Add(request.NewErrParamRequired("AuthenticationType")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.UserPoolConfig != nil { - if err := s.UserPoolConfig.Validate(); err != nil { - invalidParams.AddNested("UserPoolConfig", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAuthenticationType sets the AuthenticationType field's value. -func (s *CreateGraphqlApiInput) SetAuthenticationType(v string) *CreateGraphqlApiInput { - s.AuthenticationType = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateGraphqlApiInput) SetName(v string) *CreateGraphqlApiInput { - s.Name = &v - return s -} - -// SetUserPoolConfig sets the UserPoolConfig field's value. -func (s *CreateGraphqlApiInput) SetUserPoolConfig(v *UserPoolConfig) *CreateGraphqlApiInput { - s.UserPoolConfig = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateGraphqlApiResponse -type CreateGraphqlApiOutput struct { - _ struct{} `type:"structure"` - - // The GraphqlApi. - GraphqlApi *GraphqlApi `locationName:"graphqlApi" type:"structure"` -} - -// String returns the string representation -func (s CreateGraphqlApiOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateGraphqlApiOutput) GoString() string { - return s.String() -} - -// SetGraphqlApi sets the GraphqlApi field's value. -func (s *CreateGraphqlApiOutput) SetGraphqlApi(v *GraphqlApi) *CreateGraphqlApiOutput { - s.GraphqlApi = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateResolverRequest -type CreateResolverInput struct { - _ struct{} `type:"structure"` - - // The ID for the GraphQL API for which the resolver is being created. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The name of the data source for which the resolver is being created. - // - // DataSourceName is a required field - DataSourceName *string `locationName:"dataSourceName" type:"string" required:"true"` - - // The name of the field to attach the resolver to. - // - // FieldName is a required field - FieldName *string `locationName:"fieldName" type:"string" required:"true"` - - // The mapping template to be used for requests. - // - // A resolver use a request mapping template to convert a GraphQL expression - // into a format that a data source can understand. Mapping templates are written - // in Apache Velocity Template Language (VTL). - // - // RequestMappingTemplate is a required field - RequestMappingTemplate *string `locationName:"requestMappingTemplate" type:"string" required:"true"` - - // The mapping template to be used for responses from the data source. - ResponseMappingTemplate *string `locationName:"responseMappingTemplate" type:"string"` - - // The name of the Type. - // - // TypeName is a required field - TypeName *string `location:"uri" locationName:"typeName" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateResolverInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateResolverInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateResolverInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateResolverInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.DataSourceName == nil { - invalidParams.Add(request.NewErrParamRequired("DataSourceName")) - } - if s.FieldName == nil { - invalidParams.Add(request.NewErrParamRequired("FieldName")) - } - if s.RequestMappingTemplate == nil { - invalidParams.Add(request.NewErrParamRequired("RequestMappingTemplate")) - } - if s.TypeName == nil { - invalidParams.Add(request.NewErrParamRequired("TypeName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *CreateResolverInput) SetApiId(v string) *CreateResolverInput { - s.ApiId = &v - return s -} - -// SetDataSourceName sets the DataSourceName field's value. -func (s *CreateResolverInput) SetDataSourceName(v string) *CreateResolverInput { - s.DataSourceName = &v - return s -} - -// SetFieldName sets the FieldName field's value. -func (s *CreateResolverInput) SetFieldName(v string) *CreateResolverInput { - s.FieldName = &v - return s -} - -// SetRequestMappingTemplate sets the RequestMappingTemplate field's value. -func (s *CreateResolverInput) SetRequestMappingTemplate(v string) *CreateResolverInput { - s.RequestMappingTemplate = &v - return s -} - -// SetResponseMappingTemplate sets the ResponseMappingTemplate field's value. -func (s *CreateResolverInput) SetResponseMappingTemplate(v string) *CreateResolverInput { - s.ResponseMappingTemplate = &v - return s -} - -// SetTypeName sets the TypeName field's value. -func (s *CreateResolverInput) SetTypeName(v string) *CreateResolverInput { - s.TypeName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateResolverResponse -type CreateResolverOutput struct { - _ struct{} `type:"structure"` - - // The Resolver object. - Resolver *Resolver `locationName:"resolver" type:"structure"` -} - -// String returns the string representation -func (s CreateResolverOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateResolverOutput) GoString() string { - return s.String() -} - -// SetResolver sets the Resolver field's value. -func (s *CreateResolverOutput) SetResolver(v *Resolver) *CreateResolverOutput { - s.Resolver = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateTypeRequest -type CreateTypeInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The type definition, in GraphQL Schema Definition Language (SDL) format. - // - // For more information, see the GraphQL SDL documentation (http://graphql.org/learn/schema/). - // - // Definition is a required field - Definition *string `locationName:"definition" type:"string" required:"true"` - - // The type format: SDL or JSON. - // - // Format is a required field - Format *string `locationName:"format" type:"string" required:"true" enum:"TypeDefinitionFormat"` -} - -// String returns the string representation -func (s CreateTypeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateTypeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateTypeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateTypeInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.Definition == nil { - invalidParams.Add(request.NewErrParamRequired("Definition")) - } - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *CreateTypeInput) SetApiId(v string) *CreateTypeInput { - s.ApiId = &v - return s -} - -// SetDefinition sets the Definition field's value. -func (s *CreateTypeInput) SetDefinition(v string) *CreateTypeInput { - s.Definition = &v - return s -} - -// SetFormat sets the Format field's value. -func (s *CreateTypeInput) SetFormat(v string) *CreateTypeInput { - s.Format = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/CreateTypeResponse -type CreateTypeOutput struct { - _ struct{} `type:"structure"` - - // The Type object. - Type *Type `locationName:"type" type:"structure"` -} - -// String returns the string representation -func (s CreateTypeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateTypeOutput) GoString() string { - return s.String() -} - -// SetType sets the Type field's value. -func (s *CreateTypeOutput) SetType(v *Type) *CreateTypeOutput { - s.Type = v - return s -} - -// Describes a data source. -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DataSource -type DataSource struct { - _ struct{} `type:"structure"` - - // The data source ARN. - DataSourceArn *string `locationName:"dataSourceArn" type:"string"` - - // The description of the data source. - Description *string `locationName:"description" type:"string"` - - // DynamoDB settings. - DynamodbConfig *DynamodbDataSourceConfig `locationName:"dynamodbConfig" type:"structure"` - - // Amazon Elasticsearch settings. - ElasticsearchConfig *ElasticsearchDataSourceConfig `locationName:"elasticsearchConfig" type:"structure"` - - // Lambda settings. - LambdaConfig *LambdaDataSourceConfig `locationName:"lambdaConfig" type:"structure"` - - // The name of the data source. - Name *string `locationName:"name" type:"string"` - - // The IAM service role ARN for the data source. The system assumes this role - // when accessing the data source. - ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string"` - - // The type of the data source. - Type *string `locationName:"type" type:"string" enum:"DataSourceType"` -} - -// String returns the string representation -func (s DataSource) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DataSource) GoString() string { - return s.String() -} - -// SetDataSourceArn sets the DataSourceArn field's value. -func (s *DataSource) SetDataSourceArn(v string) *DataSource { - s.DataSourceArn = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *DataSource) SetDescription(v string) *DataSource { - s.Description = &v - return s -} - -// SetDynamodbConfig sets the DynamodbConfig field's value. -func (s *DataSource) SetDynamodbConfig(v *DynamodbDataSourceConfig) *DataSource { - s.DynamodbConfig = v - return s -} - -// SetElasticsearchConfig sets the ElasticsearchConfig field's value. -func (s *DataSource) SetElasticsearchConfig(v *ElasticsearchDataSourceConfig) *DataSource { - s.ElasticsearchConfig = v - return s -} - -// SetLambdaConfig sets the LambdaConfig field's value. -func (s *DataSource) SetLambdaConfig(v *LambdaDataSourceConfig) *DataSource { - s.LambdaConfig = v - return s -} - -// SetName sets the Name field's value. -func (s *DataSource) SetName(v string) *DataSource { - s.Name = &v - return s -} - -// SetServiceRoleArn sets the ServiceRoleArn field's value. -func (s *DataSource) SetServiceRoleArn(v string) *DataSource { - s.ServiceRoleArn = &v - return s -} - -// SetType sets the Type field's value. -func (s *DataSource) SetType(v string) *DataSource { - s.Type = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteApiKeyRequest -type DeleteApiKeyInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The ID for the API key. - // - // Id is a required field - Id *string `location:"uri" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteApiKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteApiKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteApiKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteApiKeyInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *DeleteApiKeyInput) SetApiId(v string) *DeleteApiKeyInput { - s.ApiId = &v - return s -} - -// SetId sets the Id field's value. -func (s *DeleteApiKeyInput) SetId(v string) *DeleteApiKeyInput { - s.Id = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteApiKeyResponse -type DeleteApiKeyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteApiKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteApiKeyOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteDataSourceRequest -type DeleteDataSourceInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The name of the data source. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteDataSourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteDataSourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDataSourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDataSourceInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *DeleteDataSourceInput) SetApiId(v string) *DeleteDataSourceInput { - s.ApiId = &v - return s -} - -// SetName sets the Name field's value. -func (s *DeleteDataSourceInput) SetName(v string) *DeleteDataSourceInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteDataSourceResponse -type DeleteDataSourceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteDataSourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteDataSourceOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteGraphqlApiRequest -type DeleteGraphqlApiInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteGraphqlApiInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteGraphqlApiInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteGraphqlApiInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteGraphqlApiInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *DeleteGraphqlApiInput) SetApiId(v string) *DeleteGraphqlApiInput { - s.ApiId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteGraphqlApiResponse -type DeleteGraphqlApiOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteGraphqlApiOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteGraphqlApiOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteResolverRequest -type DeleteResolverInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The resolver field name. - // - // FieldName is a required field - FieldName *string `location:"uri" locationName:"fieldName" type:"string" required:"true"` - - // The name of the resolver type. - // - // TypeName is a required field - TypeName *string `location:"uri" locationName:"typeName" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteResolverInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteResolverInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteResolverInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteResolverInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.FieldName == nil { - invalidParams.Add(request.NewErrParamRequired("FieldName")) - } - if s.TypeName == nil { - invalidParams.Add(request.NewErrParamRequired("TypeName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *DeleteResolverInput) SetApiId(v string) *DeleteResolverInput { - s.ApiId = &v - return s -} - -// SetFieldName sets the FieldName field's value. -func (s *DeleteResolverInput) SetFieldName(v string) *DeleteResolverInput { - s.FieldName = &v - return s -} - -// SetTypeName sets the TypeName field's value. -func (s *DeleteResolverInput) SetTypeName(v string) *DeleteResolverInput { - s.TypeName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteResolverResponse -type DeleteResolverOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteResolverOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteResolverOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteTypeRequest -type DeleteTypeInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The type name. - // - // TypeName is a required field - TypeName *string `location:"uri" locationName:"typeName" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteTypeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteTypeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteTypeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteTypeInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.TypeName == nil { - invalidParams.Add(request.NewErrParamRequired("TypeName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *DeleteTypeInput) SetApiId(v string) *DeleteTypeInput { - s.ApiId = &v - return s -} - -// SetTypeName sets the TypeName field's value. -func (s *DeleteTypeInput) SetTypeName(v string) *DeleteTypeInput { - s.TypeName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DeleteTypeResponse -type DeleteTypeOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteTypeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteTypeOutput) GoString() string { - return s.String() -} - -// Describes a DynamoDB data source configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/DynamodbDataSourceConfig -type DynamodbDataSourceConfig struct { - _ struct{} `type:"structure"` - - // The AWS region. - // - // AwsRegion is a required field - AwsRegion *string `locationName:"awsRegion" type:"string" required:"true"` - - // The table name. - // - // TableName is a required field - TableName *string `locationName:"tableName" type:"string" required:"true"` - - // Set to TRUE to use Amazon Cognito credentials with this data source. - UseCallerCredentials *bool `locationName:"useCallerCredentials" type:"boolean"` -} - -// String returns the string representation -func (s DynamodbDataSourceConfig) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DynamodbDataSourceConfig) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DynamodbDataSourceConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DynamodbDataSourceConfig"} - if s.AwsRegion == nil { - invalidParams.Add(request.NewErrParamRequired("AwsRegion")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAwsRegion sets the AwsRegion field's value. -func (s *DynamodbDataSourceConfig) SetAwsRegion(v string) *DynamodbDataSourceConfig { - s.AwsRegion = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *DynamodbDataSourceConfig) SetTableName(v string) *DynamodbDataSourceConfig { - s.TableName = &v - return s -} - -// SetUseCallerCredentials sets the UseCallerCredentials field's value. -func (s *DynamodbDataSourceConfig) SetUseCallerCredentials(v bool) *DynamodbDataSourceConfig { - s.UseCallerCredentials = &v - return s -} - -// Describes an Elasticsearch data source configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ElasticsearchDataSourceConfig -type ElasticsearchDataSourceConfig struct { - _ struct{} `type:"structure"` - - // The AWS region. - // - // AwsRegion is a required field - AwsRegion *string `locationName:"awsRegion" type:"string" required:"true"` - - // The endpoint. - // - // Endpoint is a required field - Endpoint *string `locationName:"endpoint" type:"string" required:"true"` -} - -// String returns the string representation -func (s ElasticsearchDataSourceConfig) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ElasticsearchDataSourceConfig) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ElasticsearchDataSourceConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ElasticsearchDataSourceConfig"} - if s.AwsRegion == nil { - invalidParams.Add(request.NewErrParamRequired("AwsRegion")) - } - if s.Endpoint == nil { - invalidParams.Add(request.NewErrParamRequired("Endpoint")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAwsRegion sets the AwsRegion field's value. -func (s *ElasticsearchDataSourceConfig) SetAwsRegion(v string) *ElasticsearchDataSourceConfig { - s.AwsRegion = &v - return s -} - -// SetEndpoint sets the Endpoint field's value. -func (s *ElasticsearchDataSourceConfig) SetEndpoint(v string) *ElasticsearchDataSourceConfig { - s.Endpoint = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetDataSourceRequest -type GetDataSourceInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The name of the data source. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetDataSourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetDataSourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetDataSourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDataSourceInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *GetDataSourceInput) SetApiId(v string) *GetDataSourceInput { - s.ApiId = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetDataSourceInput) SetName(v string) *GetDataSourceInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetDataSourceResponse -type GetDataSourceOutput struct { - _ struct{} `type:"structure"` - - // The DataSource object. - DataSource *DataSource `locationName:"dataSource" type:"structure"` -} - -// String returns the string representation -func (s GetDataSourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetDataSourceOutput) GoString() string { - return s.String() -} - -// SetDataSource sets the DataSource field's value. -func (s *GetDataSourceOutput) SetDataSource(v *DataSource) *GetDataSourceOutput { - s.DataSource = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetGraphqlApiRequest -type GetGraphqlApiInput struct { - _ struct{} `type:"structure"` - - // The API ID for the GraphQL API. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetGraphqlApiInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetGraphqlApiInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetGraphqlApiInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetGraphqlApiInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *GetGraphqlApiInput) SetApiId(v string) *GetGraphqlApiInput { - s.ApiId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetGraphqlApiResponse -type GetGraphqlApiOutput struct { - _ struct{} `type:"structure"` - - // The GraphqlApi object. - GraphqlApi *GraphqlApi `locationName:"graphqlApi" type:"structure"` -} - -// String returns the string representation -func (s GetGraphqlApiOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetGraphqlApiOutput) GoString() string { - return s.String() -} - -// SetGraphqlApi sets the GraphqlApi field's value. -func (s *GetGraphqlApiOutput) SetGraphqlApi(v *GraphqlApi) *GetGraphqlApiOutput { - s.GraphqlApi = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetIntrospectionSchemaRequest -type GetIntrospectionSchemaInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The schema format: SDL or JSON. - // - // Format is a required field - Format *string `location:"querystring" locationName:"format" type:"string" required:"true" enum:"OutputType"` -} - -// String returns the string representation -func (s GetIntrospectionSchemaInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetIntrospectionSchemaInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetIntrospectionSchemaInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetIntrospectionSchemaInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *GetIntrospectionSchemaInput) SetApiId(v string) *GetIntrospectionSchemaInput { - s.ApiId = &v - return s -} - -// SetFormat sets the Format field's value. -func (s *GetIntrospectionSchemaInput) SetFormat(v string) *GetIntrospectionSchemaInput { - s.Format = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetIntrospectionSchemaResponse -type GetIntrospectionSchemaOutput struct { - _ struct{} `type:"structure" payload:"Schema"` - - // The schema, in GraphQL Schema Definition Language (SDL) format. - // - // For more information, see the GraphQL SDL documentation (http://graphql.org/learn/schema/). - Schema []byte `locationName:"schema" type:"blob"` -} - -// String returns the string representation -func (s GetIntrospectionSchemaOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetIntrospectionSchemaOutput) GoString() string { - return s.String() -} - -// SetSchema sets the Schema field's value. -func (s *GetIntrospectionSchemaOutput) SetSchema(v []byte) *GetIntrospectionSchemaOutput { - s.Schema = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetResolverRequest -type GetResolverInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The resolver field name. - // - // FieldName is a required field - FieldName *string `location:"uri" locationName:"fieldName" type:"string" required:"true"` - - // The resolver type name. - // - // TypeName is a required field - TypeName *string `location:"uri" locationName:"typeName" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetResolverInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetResolverInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetResolverInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetResolverInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.FieldName == nil { - invalidParams.Add(request.NewErrParamRequired("FieldName")) - } - if s.TypeName == nil { - invalidParams.Add(request.NewErrParamRequired("TypeName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *GetResolverInput) SetApiId(v string) *GetResolverInput { - s.ApiId = &v - return s -} - -// SetFieldName sets the FieldName field's value. -func (s *GetResolverInput) SetFieldName(v string) *GetResolverInput { - s.FieldName = &v - return s -} - -// SetTypeName sets the TypeName field's value. -func (s *GetResolverInput) SetTypeName(v string) *GetResolverInput { - s.TypeName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetResolverResponse -type GetResolverOutput struct { - _ struct{} `type:"structure"` - - // The Resolver object. - Resolver *Resolver `locationName:"resolver" type:"structure"` -} - -// String returns the string representation -func (s GetResolverOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetResolverOutput) GoString() string { - return s.String() -} - -// SetResolver sets the Resolver field's value. -func (s *GetResolverOutput) SetResolver(v *Resolver) *GetResolverOutput { - s.Resolver = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetSchemaCreationStatusRequest -type GetSchemaCreationStatusInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetSchemaCreationStatusInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSchemaCreationStatusInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSchemaCreationStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSchemaCreationStatusInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *GetSchemaCreationStatusInput) SetApiId(v string) *GetSchemaCreationStatusInput { - s.ApiId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetSchemaCreationStatusResponse -type GetSchemaCreationStatusOutput struct { - _ struct{} `type:"structure"` - - // Detailed information about the status of the schema creation operation. - Details *string `locationName:"details" type:"string"` - - // The current state of the schema (PROCESSING, ACTIVE, or DELETING). Once the - // schema is in the ACTIVE state, you can add data. - Status *string `locationName:"status" type:"string" enum:"SchemaStatus"` -} - -// String returns the string representation -func (s GetSchemaCreationStatusOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSchemaCreationStatusOutput) GoString() string { - return s.String() -} - -// SetDetails sets the Details field's value. -func (s *GetSchemaCreationStatusOutput) SetDetails(v string) *GetSchemaCreationStatusOutput { - s.Details = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *GetSchemaCreationStatusOutput) SetStatus(v string) *GetSchemaCreationStatusOutput { - s.Status = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetTypeRequest -type GetTypeInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The type format: SDL or JSON. - // - // Format is a required field - Format *string `location:"querystring" locationName:"format" type:"string" required:"true" enum:"TypeDefinitionFormat"` - - // The type name. - // - // TypeName is a required field - TypeName *string `location:"uri" locationName:"typeName" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetTypeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetTypeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetTypeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTypeInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) - } - if s.TypeName == nil { - invalidParams.Add(request.NewErrParamRequired("TypeName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *GetTypeInput) SetApiId(v string) *GetTypeInput { - s.ApiId = &v - return s -} - -// SetFormat sets the Format field's value. -func (s *GetTypeInput) SetFormat(v string) *GetTypeInput { - s.Format = &v - return s -} - -// SetTypeName sets the TypeName field's value. -func (s *GetTypeInput) SetTypeName(v string) *GetTypeInput { - s.TypeName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GetTypeResponse -type GetTypeOutput struct { - _ struct{} `type:"structure"` - - // The Type object. - Type *Type `locationName:"type" type:"structure"` -} - -// String returns the string representation -func (s GetTypeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetTypeOutput) GoString() string { - return s.String() -} - -// SetType sets the Type field's value. -func (s *GetTypeOutput) SetType(v *Type) *GetTypeOutput { - s.Type = v - return s -} - -// Describes a GraphQL API. -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/GraphqlApi -type GraphqlApi struct { - _ struct{} `type:"structure"` - - // The API ID. - ApiId *string `locationName:"apiId" type:"string"` - - // The ARN. - Arn *string `locationName:"arn" type:"string"` - - // The authentication type. - AuthenticationType *string `locationName:"authenticationType" type:"string" enum:"AuthenticationType"` - - // The API name. - Name *string `locationName:"name" type:"string"` - - // The URIs. - Uris map[string]*string `locationName:"uris" type:"map"` - - // The Amazon Cognito User Pool configuration. - UserPoolConfig *UserPoolConfig `locationName:"userPoolConfig" type:"structure"` -} - -// String returns the string representation -func (s GraphqlApi) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GraphqlApi) GoString() string { - return s.String() -} - -// SetApiId sets the ApiId field's value. -func (s *GraphqlApi) SetApiId(v string) *GraphqlApi { - s.ApiId = &v - return s -} - -// SetArn sets the Arn field's value. -func (s *GraphqlApi) SetArn(v string) *GraphqlApi { - s.Arn = &v - return s -} - -// SetAuthenticationType sets the AuthenticationType field's value. -func (s *GraphqlApi) SetAuthenticationType(v string) *GraphqlApi { - s.AuthenticationType = &v - return s -} - -// SetName sets the Name field's value. -func (s *GraphqlApi) SetName(v string) *GraphqlApi { - s.Name = &v - return s -} - -// SetUris sets the Uris field's value. -func (s *GraphqlApi) SetUris(v map[string]*string) *GraphqlApi { - s.Uris = v - return s -} - -// SetUserPoolConfig sets the UserPoolConfig field's value. -func (s *GraphqlApi) SetUserPoolConfig(v *UserPoolConfig) *GraphqlApi { - s.UserPoolConfig = v - return s -} - -// Describes a Lambda data source configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/LambdaDataSourceConfig -type LambdaDataSourceConfig struct { - _ struct{} `type:"structure"` - - // The ARN for the Lambda function. - // - // LambdaFunctionArn is a required field - LambdaFunctionArn *string `locationName:"lambdaFunctionArn" type:"string" required:"true"` -} - -// String returns the string representation -func (s LambdaDataSourceConfig) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LambdaDataSourceConfig) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LambdaDataSourceConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LambdaDataSourceConfig"} - if s.LambdaFunctionArn == nil { - invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. -func (s *LambdaDataSourceConfig) SetLambdaFunctionArn(v string) *LambdaDataSourceConfig { - s.LambdaFunctionArn = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListApiKeysRequest -type ListApiKeysInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The maximum number of results you want the request to return. - MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` - - // An identifier that was returned from the previous call to this operation, - // which can be used to return the next set of items in the list. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListApiKeysInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListApiKeysInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListApiKeysInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListApiKeysInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *ListApiKeysInput) SetApiId(v string) *ListApiKeysInput { - s.ApiId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListApiKeysInput) SetMaxResults(v int64) *ListApiKeysInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListApiKeysInput) SetNextToken(v string) *ListApiKeysInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListApiKeysResponse -type ListApiKeysOutput struct { - _ struct{} `type:"structure"` - - // The ApiKey objects. - ApiKeys []*ApiKey `locationName:"apiKeys" type:"list"` - - // An identifier to be passed in the next request to this operation to return - // the next set of items in the list. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListApiKeysOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListApiKeysOutput) GoString() string { - return s.String() -} - -// SetApiKeys sets the ApiKeys field's value. -func (s *ListApiKeysOutput) SetApiKeys(v []*ApiKey) *ListApiKeysOutput { - s.ApiKeys = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListApiKeysOutput) SetNextToken(v string) *ListApiKeysOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListDataSourcesRequest -type ListDataSourcesInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The maximum number of results you want the request to return. - MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` - - // An identifier that was returned from the previous call to this operation, - // which can be used to return the next set of items in the list. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListDataSourcesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListDataSourcesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListDataSourcesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListDataSourcesInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *ListDataSourcesInput) SetApiId(v string) *ListDataSourcesInput { - s.ApiId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListDataSourcesInput) SetMaxResults(v int64) *ListDataSourcesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListDataSourcesInput) SetNextToken(v string) *ListDataSourcesInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListDataSourcesResponse -type ListDataSourcesOutput struct { - _ struct{} `type:"structure"` - - // The DataSource objects. - DataSources []*DataSource `locationName:"dataSources" type:"list"` - - // An identifier to be passed in the next request to this operation to return - // the next set of items in the list. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListDataSourcesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListDataSourcesOutput) GoString() string { - return s.String() -} - -// SetDataSources sets the DataSources field's value. -func (s *ListDataSourcesOutput) SetDataSources(v []*DataSource) *ListDataSourcesOutput { - s.DataSources = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListDataSourcesOutput) SetNextToken(v string) *ListDataSourcesOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListGraphqlApisRequest -type ListGraphqlApisInput struct { - _ struct{} `type:"structure"` - - // The maximum number of results you want the request to return. - MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` - - // An identifier that was returned from the previous call to this operation, - // which can be used to return the next set of items in the list. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListGraphqlApisInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListGraphqlApisInput) GoString() string { - return s.String() -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListGraphqlApisInput) SetMaxResults(v int64) *ListGraphqlApisInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListGraphqlApisInput) SetNextToken(v string) *ListGraphqlApisInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListGraphqlApisResponse -type ListGraphqlApisOutput struct { - _ struct{} `type:"structure"` - - // The GraphqlApi objects. - GraphqlApis []*GraphqlApi `locationName:"graphqlApis" type:"list"` - - // An identifier to be passed in the next request to this operation to return - // the next set of items in the list. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListGraphqlApisOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListGraphqlApisOutput) GoString() string { - return s.String() -} - -// SetGraphqlApis sets the GraphqlApis field's value. -func (s *ListGraphqlApisOutput) SetGraphqlApis(v []*GraphqlApi) *ListGraphqlApisOutput { - s.GraphqlApis = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListGraphqlApisOutput) SetNextToken(v string) *ListGraphqlApisOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListResolversRequest -type ListResolversInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The maximum number of results you want the request to return. - MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` - - // An identifier that was returned from the previous call to this operation, - // which can be used to return the next set of items in the list. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - - // The type name. - // - // TypeName is a required field - TypeName *string `location:"uri" locationName:"typeName" type:"string" required:"true"` -} - -// String returns the string representation -func (s ListResolversInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListResolversInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListResolversInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListResolversInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.TypeName == nil { - invalidParams.Add(request.NewErrParamRequired("TypeName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *ListResolversInput) SetApiId(v string) *ListResolversInput { - s.ApiId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListResolversInput) SetMaxResults(v int64) *ListResolversInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListResolversInput) SetNextToken(v string) *ListResolversInput { - s.NextToken = &v - return s -} - -// SetTypeName sets the TypeName field's value. -func (s *ListResolversInput) SetTypeName(v string) *ListResolversInput { - s.TypeName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListResolversResponse -type ListResolversOutput struct { - _ struct{} `type:"structure"` - - // An identifier to be passed in the next request to this operation to return - // the next set of items in the list. - NextToken *string `locationName:"nextToken" type:"string"` - - // The Resolver objects. - Resolvers []*Resolver `locationName:"resolvers" type:"list"` -} - -// String returns the string representation -func (s ListResolversOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListResolversOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListResolversOutput) SetNextToken(v string) *ListResolversOutput { - s.NextToken = &v - return s -} - -// SetResolvers sets the Resolvers field's value. -func (s *ListResolversOutput) SetResolvers(v []*Resolver) *ListResolversOutput { - s.Resolvers = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListTypesRequest -type ListTypesInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The type format: SDL or JSON. - // - // Format is a required field - Format *string `location:"querystring" locationName:"format" type:"string" required:"true" enum:"TypeDefinitionFormat"` - - // The maximum number of results you want the request to return. - MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` - - // An identifier that was returned from the previous call to this operation, - // which can be used to return the next set of items in the list. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListTypesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListTypesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTypesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTypesInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *ListTypesInput) SetApiId(v string) *ListTypesInput { - s.ApiId = &v - return s -} - -// SetFormat sets the Format field's value. -func (s *ListTypesInput) SetFormat(v string) *ListTypesInput { - s.Format = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListTypesInput) SetMaxResults(v int64) *ListTypesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTypesInput) SetNextToken(v string) *ListTypesInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListTypesResponse -type ListTypesOutput struct { - _ struct{} `type:"structure"` - - // An identifier to be passed in the next request to this operation to return - // the next set of items in the list. - NextToken *string `locationName:"nextToken" type:"string"` - - // The Type objects. - Types []*Type `locationName:"types" type:"list"` -} - -// String returns the string representation -func (s ListTypesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListTypesOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTypesOutput) SetNextToken(v string) *ListTypesOutput { - s.NextToken = &v - return s -} - -// SetTypes sets the Types field's value. -func (s *ListTypesOutput) SetTypes(v []*Type) *ListTypesOutput { - s.Types = v - return s -} - -// Describes a resolver. -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/Resolver -type Resolver struct { - _ struct{} `type:"structure"` - - // The resolver data source name. - DataSourceName *string `locationName:"dataSourceName" type:"string"` - - // The resolver field name. - FieldName *string `locationName:"fieldName" type:"string"` - - // The request mapping template. - RequestMappingTemplate *string `locationName:"requestMappingTemplate" type:"string"` - - // The resolver ARN. - ResolverArn *string `locationName:"resolverArn" type:"string"` - - // The response mapping template. - ResponseMappingTemplate *string `locationName:"responseMappingTemplate" type:"string"` - - // The resolver type name. - TypeName *string `locationName:"typeName" type:"string"` -} - -// String returns the string representation -func (s Resolver) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Resolver) GoString() string { - return s.String() -} - -// SetDataSourceName sets the DataSourceName field's value. -func (s *Resolver) SetDataSourceName(v string) *Resolver { - s.DataSourceName = &v - return s -} - -// SetFieldName sets the FieldName field's value. -func (s *Resolver) SetFieldName(v string) *Resolver { - s.FieldName = &v - return s -} - -// SetRequestMappingTemplate sets the RequestMappingTemplate field's value. -func (s *Resolver) SetRequestMappingTemplate(v string) *Resolver { - s.RequestMappingTemplate = &v - return s -} - -// SetResolverArn sets the ResolverArn field's value. -func (s *Resolver) SetResolverArn(v string) *Resolver { - s.ResolverArn = &v - return s -} - -// SetResponseMappingTemplate sets the ResponseMappingTemplate field's value. -func (s *Resolver) SetResponseMappingTemplate(v string) *Resolver { - s.ResponseMappingTemplate = &v - return s -} - -// SetTypeName sets the TypeName field's value. -func (s *Resolver) SetTypeName(v string) *Resolver { - s.TypeName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/StartSchemaCreationRequest -type StartSchemaCreationInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The schema definition, in GraphQL schema language format. - // - // Definition is automatically base64 encoded/decoded by the SDK. - // - // Definition is a required field - Definition []byte `locationName:"definition" type:"blob" required:"true"` -} - -// String returns the string representation -func (s StartSchemaCreationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartSchemaCreationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartSchemaCreationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartSchemaCreationInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.Definition == nil { - invalidParams.Add(request.NewErrParamRequired("Definition")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *StartSchemaCreationInput) SetApiId(v string) *StartSchemaCreationInput { - s.ApiId = &v - return s -} - -// SetDefinition sets the Definition field's value. -func (s *StartSchemaCreationInput) SetDefinition(v []byte) *StartSchemaCreationInput { - s.Definition = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/StartSchemaCreationResponse -type StartSchemaCreationOutput struct { - _ struct{} `type:"structure"` - - // The current state of the schema (PROCESSING, ACTIVE, or DELETING). Once the - // schema is in the ACTIVE state, you can add data. - Status *string `locationName:"status" type:"string" enum:"SchemaStatus"` -} - -// String returns the string representation -func (s StartSchemaCreationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartSchemaCreationOutput) GoString() string { - return s.String() -} - -// SetStatus sets the Status field's value. -func (s *StartSchemaCreationOutput) SetStatus(v string) *StartSchemaCreationOutput { - s.Status = &v - return s -} - -// Describes a type. -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/Type -type Type struct { - _ struct{} `type:"structure"` - - // The type ARN. - Arn *string `locationName:"arn" type:"string"` - - // The type definition. - Definition *string `locationName:"definition" type:"string"` - - // The type description. - Description *string `locationName:"description" type:"string"` - - // The type format: SDL or JSON. - Format *string `locationName:"format" type:"string" enum:"TypeDefinitionFormat"` - - // The type name. - Name *string `locationName:"name" type:"string"` -} - -// String returns the string representation -func (s Type) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Type) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *Type) SetArn(v string) *Type { - s.Arn = &v - return s -} - -// SetDefinition sets the Definition field's value. -func (s *Type) SetDefinition(v string) *Type { - s.Definition = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *Type) SetDescription(v string) *Type { - s.Description = &v - return s -} - -// SetFormat sets the Format field's value. -func (s *Type) SetFormat(v string) *Type { - s.Format = &v - return s -} - -// SetName sets the Name field's value. -func (s *Type) SetName(v string) *Type { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateDataSourceRequest -type UpdateDataSourceInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The new description for the data source. - Description *string `locationName:"description" type:"string"` - - // The new DynamoDB configuration. - DynamodbConfig *DynamodbDataSourceConfig `locationName:"dynamodbConfig" type:"structure"` - - // The new Elasticsearch configuration. - ElasticsearchConfig *ElasticsearchDataSourceConfig `locationName:"elasticsearchConfig" type:"structure"` - - // The new Lambda configuration. - LambdaConfig *LambdaDataSourceConfig `locationName:"lambdaConfig" type:"structure"` - - // The new name for the data source. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" type:"string" required:"true"` - - // The new service role ARN for the data source. - ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string"` - - // The new data source type. - // - // Type is a required field - Type *string `locationName:"type" type:"string" required:"true" enum:"DataSourceType"` -} - -// String returns the string representation -func (s UpdateDataSourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateDataSourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateDataSourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateDataSourceInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - if s.DynamodbConfig != nil { - if err := s.DynamodbConfig.Validate(); err != nil { - invalidParams.AddNested("DynamodbConfig", err.(request.ErrInvalidParams)) - } - } - if s.ElasticsearchConfig != nil { - if err := s.ElasticsearchConfig.Validate(); err != nil { - invalidParams.AddNested("ElasticsearchConfig", err.(request.ErrInvalidParams)) - } - } - if s.LambdaConfig != nil { - if err := s.LambdaConfig.Validate(); err != nil { - invalidParams.AddNested("LambdaConfig", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *UpdateDataSourceInput) SetApiId(v string) *UpdateDataSourceInput { - s.ApiId = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *UpdateDataSourceInput) SetDescription(v string) *UpdateDataSourceInput { - s.Description = &v - return s -} - -// SetDynamodbConfig sets the DynamodbConfig field's value. -func (s *UpdateDataSourceInput) SetDynamodbConfig(v *DynamodbDataSourceConfig) *UpdateDataSourceInput { - s.DynamodbConfig = v - return s -} - -// SetElasticsearchConfig sets the ElasticsearchConfig field's value. -func (s *UpdateDataSourceInput) SetElasticsearchConfig(v *ElasticsearchDataSourceConfig) *UpdateDataSourceInput { - s.ElasticsearchConfig = v - return s -} - -// SetLambdaConfig sets the LambdaConfig field's value. -func (s *UpdateDataSourceInput) SetLambdaConfig(v *LambdaDataSourceConfig) *UpdateDataSourceInput { - s.LambdaConfig = v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateDataSourceInput) SetName(v string) *UpdateDataSourceInput { - s.Name = &v - return s -} - -// SetServiceRoleArn sets the ServiceRoleArn field's value. -func (s *UpdateDataSourceInput) SetServiceRoleArn(v string) *UpdateDataSourceInput { - s.ServiceRoleArn = &v - return s -} - -// SetType sets the Type field's value. -func (s *UpdateDataSourceInput) SetType(v string) *UpdateDataSourceInput { - s.Type = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateDataSourceResponse -type UpdateDataSourceOutput struct { - _ struct{} `type:"structure"` - - // The updated DataSource object. - DataSource *DataSource `locationName:"dataSource" type:"structure"` -} - -// String returns the string representation -func (s UpdateDataSourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateDataSourceOutput) GoString() string { - return s.String() -} - -// SetDataSource sets the DataSource field's value. -func (s *UpdateDataSourceOutput) SetDataSource(v *DataSource) *UpdateDataSourceOutput { - s.DataSource = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateGraphqlApiRequest -type UpdateGraphqlApiInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The new authentication type for the GraphqlApi object. - AuthenticationType *string `locationName:"authenticationType" type:"string" enum:"AuthenticationType"` - - // The new name for the GraphqlApi object. - // - // Name is a required field - Name *string `locationName:"name" type:"string" required:"true"` - - // The new Amazon Cognito User Pool configuration for the GraphqlApi object. - UserPoolConfig *UserPoolConfig `locationName:"userPoolConfig" type:"structure"` -} - -// String returns the string representation -func (s UpdateGraphqlApiInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateGraphqlApiInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateGraphqlApiInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateGraphqlApiInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.UserPoolConfig != nil { - if err := s.UserPoolConfig.Validate(); err != nil { - invalidParams.AddNested("UserPoolConfig", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *UpdateGraphqlApiInput) SetApiId(v string) *UpdateGraphqlApiInput { - s.ApiId = &v - return s -} - -// SetAuthenticationType sets the AuthenticationType field's value. -func (s *UpdateGraphqlApiInput) SetAuthenticationType(v string) *UpdateGraphqlApiInput { - s.AuthenticationType = &v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateGraphqlApiInput) SetName(v string) *UpdateGraphqlApiInput { - s.Name = &v - return s -} - -// SetUserPoolConfig sets the UserPoolConfig field's value. -func (s *UpdateGraphqlApiInput) SetUserPoolConfig(v *UserPoolConfig) *UpdateGraphqlApiInput { - s.UserPoolConfig = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateGraphqlApiResponse -type UpdateGraphqlApiOutput struct { - _ struct{} `type:"structure"` - - // The udpated GraphqlApi object. - GraphqlApi *GraphqlApi `locationName:"graphqlApi" type:"structure"` -} - -// String returns the string representation -func (s UpdateGraphqlApiOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateGraphqlApiOutput) GoString() string { - return s.String() -} - -// SetGraphqlApi sets the GraphqlApi field's value. -func (s *UpdateGraphqlApiOutput) SetGraphqlApi(v *GraphqlApi) *UpdateGraphqlApiOutput { - s.GraphqlApi = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateResolverRequest -type UpdateResolverInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The new data source name. - // - // DataSourceName is a required field - DataSourceName *string `locationName:"dataSourceName" type:"string" required:"true"` - - // The new field name. - // - // FieldName is a required field - FieldName *string `location:"uri" locationName:"fieldName" type:"string" required:"true"` - - // The new request mapping template. - // - // RequestMappingTemplate is a required field - RequestMappingTemplate *string `locationName:"requestMappingTemplate" type:"string" required:"true"` - - // The new response mapping template. - ResponseMappingTemplate *string `locationName:"responseMappingTemplate" type:"string"` - - // The new type name. - // - // TypeName is a required field - TypeName *string `location:"uri" locationName:"typeName" type:"string" required:"true"` -} - -// String returns the string representation -func (s UpdateResolverInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateResolverInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateResolverInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateResolverInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.DataSourceName == nil { - invalidParams.Add(request.NewErrParamRequired("DataSourceName")) - } - if s.FieldName == nil { - invalidParams.Add(request.NewErrParamRequired("FieldName")) - } - if s.RequestMappingTemplate == nil { - invalidParams.Add(request.NewErrParamRequired("RequestMappingTemplate")) - } - if s.TypeName == nil { - invalidParams.Add(request.NewErrParamRequired("TypeName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *UpdateResolverInput) SetApiId(v string) *UpdateResolverInput { - s.ApiId = &v - return s -} - -// SetDataSourceName sets the DataSourceName field's value. -func (s *UpdateResolverInput) SetDataSourceName(v string) *UpdateResolverInput { - s.DataSourceName = &v - return s -} - -// SetFieldName sets the FieldName field's value. -func (s *UpdateResolverInput) SetFieldName(v string) *UpdateResolverInput { - s.FieldName = &v - return s -} - -// SetRequestMappingTemplate sets the RequestMappingTemplate field's value. -func (s *UpdateResolverInput) SetRequestMappingTemplate(v string) *UpdateResolverInput { - s.RequestMappingTemplate = &v - return s -} - -// SetResponseMappingTemplate sets the ResponseMappingTemplate field's value. -func (s *UpdateResolverInput) SetResponseMappingTemplate(v string) *UpdateResolverInput { - s.ResponseMappingTemplate = &v - return s -} - -// SetTypeName sets the TypeName field's value. -func (s *UpdateResolverInput) SetTypeName(v string) *UpdateResolverInput { - s.TypeName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateResolverResponse -type UpdateResolverOutput struct { - _ struct{} `type:"structure"` - - // The updated Resolver object. - Resolver *Resolver `locationName:"resolver" type:"structure"` -} - -// String returns the string representation -func (s UpdateResolverOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateResolverOutput) GoString() string { - return s.String() -} - -// SetResolver sets the Resolver field's value. -func (s *UpdateResolverOutput) SetResolver(v *Resolver) *UpdateResolverOutput { - s.Resolver = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateTypeRequest -type UpdateTypeInput struct { - _ struct{} `type:"structure"` - - // The API ID. - // - // ApiId is a required field - ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - - // The new definition. - Definition *string `locationName:"definition" type:"string"` - - // The new type format: SDL or JSON. - // - // Format is a required field - Format *string `locationName:"format" type:"string" required:"true" enum:"TypeDefinitionFormat"` - - // The new type name. - // - // TypeName is a required field - TypeName *string `location:"uri" locationName:"typeName" type:"string" required:"true"` -} - -// String returns the string representation -func (s UpdateTypeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateTypeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateTypeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateTypeInput"} - if s.ApiId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiId")) - } - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) - } - if s.TypeName == nil { - invalidParams.Add(request.NewErrParamRequired("TypeName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApiId sets the ApiId field's value. -func (s *UpdateTypeInput) SetApiId(v string) *UpdateTypeInput { - s.ApiId = &v - return s -} - -// SetDefinition sets the Definition field's value. -func (s *UpdateTypeInput) SetDefinition(v string) *UpdateTypeInput { - s.Definition = &v - return s -} - -// SetFormat sets the Format field's value. -func (s *UpdateTypeInput) SetFormat(v string) *UpdateTypeInput { - s.Format = &v - return s -} - -// SetTypeName sets the TypeName field's value. -func (s *UpdateTypeInput) SetTypeName(v string) *UpdateTypeInput { - s.TypeName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UpdateTypeResponse -type UpdateTypeOutput struct { - _ struct{} `type:"structure"` - - // The updated Type object. - Type *Type `locationName:"type" type:"structure"` -} - -// String returns the string representation -func (s UpdateTypeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateTypeOutput) GoString() string { - return s.String() -} - -// SetType sets the Type field's value. -func (s *UpdateTypeOutput) SetType(v *Type) *UpdateTypeOutput { - s.Type = v - return s -} - -// Describes an Amazon Cognito User Pool configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/UserPoolConfig -type UserPoolConfig struct { - _ struct{} `type:"structure"` - - // A regular expression for validating the incoming Amazon Cognito User Pool - // app client ID. - AppIdClientRegex *string `locationName:"appIdClientRegex" type:"string"` - - // The AWS region in which the user pool was created. - // - // AwsRegion is a required field - AwsRegion *string `locationName:"awsRegion" type:"string" required:"true"` - - // The action that you want your GraphQL API to take when a request that uses - // Amazon Cognito User Pool authentication doesn't match the Amazon Cognito - // User Pool configuration. - // - // DefaultAction is a required field - DefaultAction *string `locationName:"defaultAction" type:"string" required:"true" enum:"DefaultAction"` - - // The user pool ID. - // - // UserPoolId is a required field - UserPoolId *string `locationName:"userPoolId" type:"string" required:"true"` -} - -// String returns the string representation -func (s UserPoolConfig) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UserPoolConfig) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UserPoolConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UserPoolConfig"} - if s.AwsRegion == nil { - invalidParams.Add(request.NewErrParamRequired("AwsRegion")) - } - if s.DefaultAction == nil { - invalidParams.Add(request.NewErrParamRequired("DefaultAction")) - } - if s.UserPoolId == nil { - invalidParams.Add(request.NewErrParamRequired("UserPoolId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAppIdClientRegex sets the AppIdClientRegex field's value. -func (s *UserPoolConfig) SetAppIdClientRegex(v string) *UserPoolConfig { - s.AppIdClientRegex = &v - return s -} - -// SetAwsRegion sets the AwsRegion field's value. -func (s *UserPoolConfig) SetAwsRegion(v string) *UserPoolConfig { - s.AwsRegion = &v - return s -} - -// SetDefaultAction sets the DefaultAction field's value. -func (s *UserPoolConfig) SetDefaultAction(v string) *UserPoolConfig { - s.DefaultAction = &v - return s -} - -// SetUserPoolId sets the UserPoolId field's value. -func (s *UserPoolConfig) SetUserPoolId(v string) *UserPoolConfig { - s.UserPoolId = &v - return s -} - -const ( - // AuthenticationTypeApiKey is a AuthenticationType enum value - AuthenticationTypeApiKey = "API_KEY" - - // AuthenticationTypeAwsIam is a AuthenticationType enum value - AuthenticationTypeAwsIam = "AWS_IAM" - - // AuthenticationTypeAmazonCognitoUserPools is a AuthenticationType enum value - AuthenticationTypeAmazonCognitoUserPools = "AMAZON_COGNITO_USER_POOLS" -) - -const ( - // DataSourceTypeAwsLambda is a DataSourceType enum value - DataSourceTypeAwsLambda = "AWS_LAMBDA" - - // DataSourceTypeAmazonDynamodb is a DataSourceType enum value - DataSourceTypeAmazonDynamodb = "AMAZON_DYNAMODB" - - // DataSourceTypeAmazonElasticsearch is a DataSourceType enum value - DataSourceTypeAmazonElasticsearch = "AMAZON_ELASTICSEARCH" -) - -const ( - // DefaultActionAllow is a DefaultAction enum value - DefaultActionAllow = "ALLOW" - - // DefaultActionDeny is a DefaultAction enum value - DefaultActionDeny = "DENY" -) - -const ( - // OutputTypeSdl is a OutputType enum value - OutputTypeSdl = "SDL" - - // OutputTypeJson is a OutputType enum value - OutputTypeJson = "JSON" -) - -const ( - // SchemaStatusProcessing is a SchemaStatus enum value - SchemaStatusProcessing = "PROCESSING" - - // SchemaStatusActive is a SchemaStatus enum value - SchemaStatusActive = "ACTIVE" - - // SchemaStatusDeleting is a SchemaStatus enum value - SchemaStatusDeleting = "DELETING" -) - -const ( - // TypeDefinitionFormatSdl is a TypeDefinitionFormat enum value - TypeDefinitionFormatSdl = "SDL" - - // TypeDefinitionFormatJson is a TypeDefinitionFormat enum value - TypeDefinitionFormatJson = "JSON" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/appsync/doc.go b/vendor/github.com/aws/aws-sdk-go/service/appsync/doc.go deleted file mode 100644 index 2b6bd39070a..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/appsync/doc.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package appsync provides the client and types for making API -// requests to AWS AppSync. -// -// AWS AppSync provides API actions for creating and interacting with data sources -// using GraphQL from your application. -// -// See https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25 for more information on this service. -// -// See appsync package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/appsync/ -// -// Using the Client -// -// To contact AWS AppSync with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS AppSync client AppSync for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/appsync/#New -package appsync diff --git a/vendor/github.com/aws/aws-sdk-go/service/appsync/errors.go b/vendor/github.com/aws/aws-sdk-go/service/appsync/errors.go deleted file mode 100644 index d69bbf0d030..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/appsync/errors.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package appsync - -const ( - - // ErrCodeApiKeyLimitExceededException for service response error code - // "ApiKeyLimitExceededException". - // - // The API key exceeded a limit. Try your request again. - ErrCodeApiKeyLimitExceededException = "ApiKeyLimitExceededException" - - // ErrCodeApiLimitExceededException for service response error code - // "ApiLimitExceededException". - // - // The GraphQL API exceeded a limit. Try your request again. - ErrCodeApiLimitExceededException = "ApiLimitExceededException" - - // ErrCodeBadRequestException for service response error code - // "BadRequestException". - // - // The request is not well formed. For example, a value is invalid or a required - // field is missing. Check the field values, and try again. - ErrCodeBadRequestException = "BadRequestException" - - // ErrCodeConcurrentModificationException for service response error code - // "ConcurrentModificationException". - // - // Another modification is being made. That modification must complete before - // you can make your change. - ErrCodeConcurrentModificationException = "ConcurrentModificationException" - - // ErrCodeGraphQLSchemaException for service response error code - // "GraphQLSchemaException". - // - // The GraphQL schema is not valid. - ErrCodeGraphQLSchemaException = "GraphQLSchemaException" - - // ErrCodeInternalFailureException for service response error code - // "InternalFailureException". - // - // An internal AWS AppSync error occurred. Try your request again. - ErrCodeInternalFailureException = "InternalFailureException" - - // ErrCodeLimitExceededException for service response error code - // "LimitExceededException". - // - // The request exceeded a limit. Try your request again. - ErrCodeLimitExceededException = "LimitExceededException" - - // ErrCodeNotFoundException for service response error code - // "NotFoundException". - // - // The resource specified in the request was not found. Check the resource and - // try again. - ErrCodeNotFoundException = "NotFoundException" - - // ErrCodeUnauthorizedException for service response error code - // "UnauthorizedException". - // - // You are not authorized to perform this operation. - ErrCodeUnauthorizedException = "UnauthorizedException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go b/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go deleted file mode 100644 index cdc4698e8e4..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go +++ /dev/null @@ -1,97 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package appsync - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/restjson" -) - -// AppSync provides the API operation methods for making requests to -// AWS AppSync. See this package's package overview docs -// for details on the service. -// -// AppSync methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type AppSync struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "appsync" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the AppSync client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a AppSync client from just a session. -// svc := appsync.New(mySession) -// -// // Create a AppSync client with additional configuration -// svc := appsync.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *AppSync { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AppSync { - if len(signingName) == 0 { - signingName = "appsync" - } - svc := &AppSync{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2017-07-25", - JSONVersion: "1.1", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a AppSync operation and runs any -// custom request initialization. -func (c *AppSync) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/budgets/api.go b/vendor/github.com/aws/aws-sdk-go/service/budgets/api.go deleted file mode 100644 index 2490161b2c1..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/budgets/api.go +++ /dev/null @@ -1,3169 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package budgets - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opCreateBudget = "CreateBudget" - -// CreateBudgetRequest generates a "aws/request.Request" representing the -// client's request for the CreateBudget operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateBudget for more information on using the CreateBudget -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateBudgetRequest method. -// req, resp := client.CreateBudgetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *Budgets) CreateBudgetRequest(input *CreateBudgetInput) (req *request.Request, output *CreateBudgetOutput) { - op := &request.Operation{ - Name: opCreateBudget, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateBudgetInput{} - } - - output = &CreateBudgetOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateBudget API operation for AWS Budgets. -// -// Create a new budget -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Budgets's -// API operation CreateBudget for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameterException" -// This exception is thrown if any request is given an invalid parameter. E.g., -// if a required Date field is null. -// -// * ErrCodeInternalErrorException "InternalErrorException" -// This exception is thrown on an unknown internal failure. -// -// * ErrCodeCreationLimitExceededException "CreationLimitExceededException" -// The exception is thrown when customer tries to create a record (e.g. budget), -// but the number this record already exceeds the limitation. -// -// * ErrCodeDuplicateRecordException "DuplicateRecordException" -// The exception is thrown when customer tries to create a record (e.g. budget) -// that already exists. -// -func (c *Budgets) CreateBudget(input *CreateBudgetInput) (*CreateBudgetOutput, error) { - req, out := c.CreateBudgetRequest(input) - return out, req.Send() -} - -// CreateBudgetWithContext is the same as CreateBudget with the addition of -// the ability to pass a context and additional request options. -// -// See CreateBudget for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Budgets) CreateBudgetWithContext(ctx aws.Context, input *CreateBudgetInput, opts ...request.Option) (*CreateBudgetOutput, error) { - req, out := c.CreateBudgetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateNotification = "CreateNotification" - -// CreateNotificationRequest generates a "aws/request.Request" representing the -// client's request for the CreateNotification operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateNotification for more information on using the CreateNotification -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateNotificationRequest method. -// req, resp := client.CreateNotificationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *Budgets) CreateNotificationRequest(input *CreateNotificationInput) (req *request.Request, output *CreateNotificationOutput) { - op := &request.Operation{ - Name: opCreateNotification, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateNotificationInput{} - } - - output = &CreateNotificationOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateNotification API operation for AWS Budgets. -// -// Create a new Notification with subscribers for a budget -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Budgets's -// API operation CreateNotification for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// This exception is thrown on an unknown internal failure. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// This exception is thrown if any request is given an invalid parameter. E.g., -// if a required Date field is null. -// -// * ErrCodeNotFoundException "NotFoundException" -// This exception is thrown if a requested entity is not found. E.g., if a budget -// id doesn't exist for an account ID. -// -// * ErrCodeCreationLimitExceededException "CreationLimitExceededException" -// The exception is thrown when customer tries to create a record (e.g. budget), -// but the number this record already exceeds the limitation. -// -// * ErrCodeDuplicateRecordException "DuplicateRecordException" -// The exception is thrown when customer tries to create a record (e.g. budget) -// that already exists. -// -func (c *Budgets) CreateNotification(input *CreateNotificationInput) (*CreateNotificationOutput, error) { - req, out := c.CreateNotificationRequest(input) - return out, req.Send() -} - -// CreateNotificationWithContext is the same as CreateNotification with the addition of -// the ability to pass a context and additional request options. -// -// See CreateNotification for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Budgets) CreateNotificationWithContext(ctx aws.Context, input *CreateNotificationInput, opts ...request.Option) (*CreateNotificationOutput, error) { - req, out := c.CreateNotificationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateSubscriber = "CreateSubscriber" - -// CreateSubscriberRequest generates a "aws/request.Request" representing the -// client's request for the CreateSubscriber operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateSubscriber for more information on using the CreateSubscriber -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateSubscriberRequest method. -// req, resp := client.CreateSubscriberRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *Budgets) CreateSubscriberRequest(input *CreateSubscriberInput) (req *request.Request, output *CreateSubscriberOutput) { - op := &request.Operation{ - Name: opCreateSubscriber, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateSubscriberInput{} - } - - output = &CreateSubscriberOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateSubscriber API operation for AWS Budgets. -// -// Create a new Subscriber for a notification -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Budgets's -// API operation CreateSubscriber for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// This exception is thrown on an unknown internal failure. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// This exception is thrown if any request is given an invalid parameter. E.g., -// if a required Date field is null. -// -// * ErrCodeCreationLimitExceededException "CreationLimitExceededException" -// The exception is thrown when customer tries to create a record (e.g. budget), -// but the number this record already exceeds the limitation. -// -// * ErrCodeDuplicateRecordException "DuplicateRecordException" -// The exception is thrown when customer tries to create a record (e.g. budget) -// that already exists. -// -// * ErrCodeNotFoundException "NotFoundException" -// This exception is thrown if a requested entity is not found. E.g., if a budget -// id doesn't exist for an account ID. -// -func (c *Budgets) CreateSubscriber(input *CreateSubscriberInput) (*CreateSubscriberOutput, error) { - req, out := c.CreateSubscriberRequest(input) - return out, req.Send() -} - -// CreateSubscriberWithContext is the same as CreateSubscriber with the addition of -// the ability to pass a context and additional request options. -// -// See CreateSubscriber for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Budgets) CreateSubscriberWithContext(ctx aws.Context, input *CreateSubscriberInput, opts ...request.Option) (*CreateSubscriberOutput, error) { - req, out := c.CreateSubscriberRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBudget = "DeleteBudget" - -// DeleteBudgetRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBudget operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBudget for more information on using the DeleteBudget -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBudgetRequest method. -// req, resp := client.DeleteBudgetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *Budgets) DeleteBudgetRequest(input *DeleteBudgetInput) (req *request.Request, output *DeleteBudgetOutput) { - op := &request.Operation{ - Name: opDeleteBudget, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteBudgetInput{} - } - - output = &DeleteBudgetOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteBudget API operation for AWS Budgets. -// -// Delete a budget and related notifications -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Budgets's -// API operation DeleteBudget for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// This exception is thrown on an unknown internal failure. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// This exception is thrown if any request is given an invalid parameter. E.g., -// if a required Date field is null. -// -// * ErrCodeNotFoundException "NotFoundException" -// This exception is thrown if a requested entity is not found. E.g., if a budget -// id doesn't exist for an account ID. -// -func (c *Budgets) DeleteBudget(input *DeleteBudgetInput) (*DeleteBudgetOutput, error) { - req, out := c.DeleteBudgetRequest(input) - return out, req.Send() -} - -// DeleteBudgetWithContext is the same as DeleteBudget with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBudget for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Budgets) DeleteBudgetWithContext(ctx aws.Context, input *DeleteBudgetInput, opts ...request.Option) (*DeleteBudgetOutput, error) { - req, out := c.DeleteBudgetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteNotification = "DeleteNotification" - -// DeleteNotificationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteNotification operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteNotification for more information on using the DeleteNotification -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteNotificationRequest method. -// req, resp := client.DeleteNotificationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *Budgets) DeleteNotificationRequest(input *DeleteNotificationInput) (req *request.Request, output *DeleteNotificationOutput) { - op := &request.Operation{ - Name: opDeleteNotification, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteNotificationInput{} - } - - output = &DeleteNotificationOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteNotification API operation for AWS Budgets. -// -// Delete a notification and related subscribers -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Budgets's -// API operation DeleteNotification for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameterException" -// This exception is thrown if any request is given an invalid parameter. E.g., -// if a required Date field is null. -// -// * ErrCodeInternalErrorException "InternalErrorException" -// This exception is thrown on an unknown internal failure. -// -// * ErrCodeNotFoundException "NotFoundException" -// This exception is thrown if a requested entity is not found. E.g., if a budget -// id doesn't exist for an account ID. -// -func (c *Budgets) DeleteNotification(input *DeleteNotificationInput) (*DeleteNotificationOutput, error) { - req, out := c.DeleteNotificationRequest(input) - return out, req.Send() -} - -// DeleteNotificationWithContext is the same as DeleteNotification with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteNotification for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Budgets) DeleteNotificationWithContext(ctx aws.Context, input *DeleteNotificationInput, opts ...request.Option) (*DeleteNotificationOutput, error) { - req, out := c.DeleteNotificationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteSubscriber = "DeleteSubscriber" - -// DeleteSubscriberRequest generates a "aws/request.Request" representing the -// client's request for the DeleteSubscriber operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteSubscriber for more information on using the DeleteSubscriber -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteSubscriberRequest method. -// req, resp := client.DeleteSubscriberRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *Budgets) DeleteSubscriberRequest(input *DeleteSubscriberInput) (req *request.Request, output *DeleteSubscriberOutput) { - op := &request.Operation{ - Name: opDeleteSubscriber, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteSubscriberInput{} - } - - output = &DeleteSubscriberOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteSubscriber API operation for AWS Budgets. -// -// Delete a Subscriber for a notification -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Budgets's -// API operation DeleteSubscriber for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// This exception is thrown on an unknown internal failure. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// This exception is thrown if any request is given an invalid parameter. E.g., -// if a required Date field is null. -// -// * ErrCodeNotFoundException "NotFoundException" -// This exception is thrown if a requested entity is not found. E.g., if a budget -// id doesn't exist for an account ID. -// -func (c *Budgets) DeleteSubscriber(input *DeleteSubscriberInput) (*DeleteSubscriberOutput, error) { - req, out := c.DeleteSubscriberRequest(input) - return out, req.Send() -} - -// DeleteSubscriberWithContext is the same as DeleteSubscriber with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteSubscriber for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Budgets) DeleteSubscriberWithContext(ctx aws.Context, input *DeleteSubscriberInput, opts ...request.Option) (*DeleteSubscriberOutput, error) { - req, out := c.DeleteSubscriberRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeBudget = "DescribeBudget" - -// DescribeBudgetRequest generates a "aws/request.Request" representing the -// client's request for the DescribeBudget operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeBudget for more information on using the DescribeBudget -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeBudgetRequest method. -// req, resp := client.DescribeBudgetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *Budgets) DescribeBudgetRequest(input *DescribeBudgetInput) (req *request.Request, output *DescribeBudgetOutput) { - op := &request.Operation{ - Name: opDescribeBudget, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeBudgetInput{} - } - - output = &DescribeBudgetOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeBudget API operation for AWS Budgets. -// -// Get a single budget -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Budgets's -// API operation DescribeBudget for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// This exception is thrown on an unknown internal failure. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// This exception is thrown if any request is given an invalid parameter. E.g., -// if a required Date field is null. -// -// * ErrCodeNotFoundException "NotFoundException" -// This exception is thrown if a requested entity is not found. E.g., if a budget -// id doesn't exist for an account ID. -// -func (c *Budgets) DescribeBudget(input *DescribeBudgetInput) (*DescribeBudgetOutput, error) { - req, out := c.DescribeBudgetRequest(input) - return out, req.Send() -} - -// DescribeBudgetWithContext is the same as DescribeBudget with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeBudget for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Budgets) DescribeBudgetWithContext(ctx aws.Context, input *DescribeBudgetInput, opts ...request.Option) (*DescribeBudgetOutput, error) { - req, out := c.DescribeBudgetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeBudgets = "DescribeBudgets" - -// DescribeBudgetsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeBudgets operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeBudgets for more information on using the DescribeBudgets -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeBudgetsRequest method. -// req, resp := client.DescribeBudgetsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *Budgets) DescribeBudgetsRequest(input *DescribeBudgetsInput) (req *request.Request, output *DescribeBudgetsOutput) { - op := &request.Operation{ - Name: opDescribeBudgets, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeBudgetsInput{} - } - - output = &DescribeBudgetsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeBudgets API operation for AWS Budgets. -// -// Get all budgets for an account -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Budgets's -// API operation DescribeBudgets for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// This exception is thrown on an unknown internal failure. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// This exception is thrown if any request is given an invalid parameter. E.g., -// if a required Date field is null. -// -// * ErrCodeNotFoundException "NotFoundException" -// This exception is thrown if a requested entity is not found. E.g., if a budget -// id doesn't exist for an account ID. -// -// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" -// This exception is thrown if paging token signature didn't match the token, -// or the paging token isn't for this request -// -// * ErrCodeExpiredNextTokenException "ExpiredNextTokenException" -// This exception is thrown if the paging token is expired - past its TTL -// -func (c *Budgets) DescribeBudgets(input *DescribeBudgetsInput) (*DescribeBudgetsOutput, error) { - req, out := c.DescribeBudgetsRequest(input) - return out, req.Send() -} - -// DescribeBudgetsWithContext is the same as DescribeBudgets with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeBudgets for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Budgets) DescribeBudgetsWithContext(ctx aws.Context, input *DescribeBudgetsInput, opts ...request.Option) (*DescribeBudgetsOutput, error) { - req, out := c.DescribeBudgetsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeNotificationsForBudget = "DescribeNotificationsForBudget" - -// DescribeNotificationsForBudgetRequest generates a "aws/request.Request" representing the -// client's request for the DescribeNotificationsForBudget operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeNotificationsForBudget for more information on using the DescribeNotificationsForBudget -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeNotificationsForBudgetRequest method. -// req, resp := client.DescribeNotificationsForBudgetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *Budgets) DescribeNotificationsForBudgetRequest(input *DescribeNotificationsForBudgetInput) (req *request.Request, output *DescribeNotificationsForBudgetOutput) { - op := &request.Operation{ - Name: opDescribeNotificationsForBudget, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeNotificationsForBudgetInput{} - } - - output = &DescribeNotificationsForBudgetOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeNotificationsForBudget API operation for AWS Budgets. -// -// Get notifications of a budget -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Budgets's -// API operation DescribeNotificationsForBudget for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// This exception is thrown on an unknown internal failure. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// This exception is thrown if any request is given an invalid parameter. E.g., -// if a required Date field is null. -// -// * ErrCodeNotFoundException "NotFoundException" -// This exception is thrown if a requested entity is not found. E.g., if a budget -// id doesn't exist for an account ID. -// -// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" -// This exception is thrown if paging token signature didn't match the token, -// or the paging token isn't for this request -// -// * ErrCodeExpiredNextTokenException "ExpiredNextTokenException" -// This exception is thrown if the paging token is expired - past its TTL -// -func (c *Budgets) DescribeNotificationsForBudget(input *DescribeNotificationsForBudgetInput) (*DescribeNotificationsForBudgetOutput, error) { - req, out := c.DescribeNotificationsForBudgetRequest(input) - return out, req.Send() -} - -// DescribeNotificationsForBudgetWithContext is the same as DescribeNotificationsForBudget with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeNotificationsForBudget for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Budgets) DescribeNotificationsForBudgetWithContext(ctx aws.Context, input *DescribeNotificationsForBudgetInput, opts ...request.Option) (*DescribeNotificationsForBudgetOutput, error) { - req, out := c.DescribeNotificationsForBudgetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeSubscribersForNotification = "DescribeSubscribersForNotification" - -// DescribeSubscribersForNotificationRequest generates a "aws/request.Request" representing the -// client's request for the DescribeSubscribersForNotification operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeSubscribersForNotification for more information on using the DescribeSubscribersForNotification -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeSubscribersForNotificationRequest method. -// req, resp := client.DescribeSubscribersForNotificationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *Budgets) DescribeSubscribersForNotificationRequest(input *DescribeSubscribersForNotificationInput) (req *request.Request, output *DescribeSubscribersForNotificationOutput) { - op := &request.Operation{ - Name: opDescribeSubscribersForNotification, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeSubscribersForNotificationInput{} - } - - output = &DescribeSubscribersForNotificationOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeSubscribersForNotification API operation for AWS Budgets. -// -// Get subscribers of a notification -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Budgets's -// API operation DescribeSubscribersForNotification for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// This exception is thrown on an unknown internal failure. -// -// * ErrCodeNotFoundException "NotFoundException" -// This exception is thrown if a requested entity is not found. E.g., if a budget -// id doesn't exist for an account ID. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// This exception is thrown if any request is given an invalid parameter. E.g., -// if a required Date field is null. -// -// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" -// This exception is thrown if paging token signature didn't match the token, -// or the paging token isn't for this request -// -// * ErrCodeExpiredNextTokenException "ExpiredNextTokenException" -// This exception is thrown if the paging token is expired - past its TTL -// -func (c *Budgets) DescribeSubscribersForNotification(input *DescribeSubscribersForNotificationInput) (*DescribeSubscribersForNotificationOutput, error) { - req, out := c.DescribeSubscribersForNotificationRequest(input) - return out, req.Send() -} - -// DescribeSubscribersForNotificationWithContext is the same as DescribeSubscribersForNotification with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeSubscribersForNotification for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Budgets) DescribeSubscribersForNotificationWithContext(ctx aws.Context, input *DescribeSubscribersForNotificationInput, opts ...request.Option) (*DescribeSubscribersForNotificationOutput, error) { - req, out := c.DescribeSubscribersForNotificationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateBudget = "UpdateBudget" - -// UpdateBudgetRequest generates a "aws/request.Request" representing the -// client's request for the UpdateBudget operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateBudget for more information on using the UpdateBudget -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateBudgetRequest method. -// req, resp := client.UpdateBudgetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *Budgets) UpdateBudgetRequest(input *UpdateBudgetInput) (req *request.Request, output *UpdateBudgetOutput) { - op := &request.Operation{ - Name: opUpdateBudget, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateBudgetInput{} - } - - output = &UpdateBudgetOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateBudget API operation for AWS Budgets. -// -// Update the information of a budget already created -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Budgets's -// API operation UpdateBudget for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// This exception is thrown on an unknown internal failure. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// This exception is thrown if any request is given an invalid parameter. E.g., -// if a required Date field is null. -// -// * ErrCodeNotFoundException "NotFoundException" -// This exception is thrown if a requested entity is not found. E.g., if a budget -// id doesn't exist for an account ID. -// -func (c *Budgets) UpdateBudget(input *UpdateBudgetInput) (*UpdateBudgetOutput, error) { - req, out := c.UpdateBudgetRequest(input) - return out, req.Send() -} - -// UpdateBudgetWithContext is the same as UpdateBudget with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateBudget for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Budgets) UpdateBudgetWithContext(ctx aws.Context, input *UpdateBudgetInput, opts ...request.Option) (*UpdateBudgetOutput, error) { - req, out := c.UpdateBudgetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateNotification = "UpdateNotification" - -// UpdateNotificationRequest generates a "aws/request.Request" representing the -// client's request for the UpdateNotification operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateNotification for more information on using the UpdateNotification -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateNotificationRequest method. -// req, resp := client.UpdateNotificationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *Budgets) UpdateNotificationRequest(input *UpdateNotificationInput) (req *request.Request, output *UpdateNotificationOutput) { - op := &request.Operation{ - Name: opUpdateNotification, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateNotificationInput{} - } - - output = &UpdateNotificationOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateNotification API operation for AWS Budgets. -// -// Update the information about a notification already created -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Budgets's -// API operation UpdateNotification for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// This exception is thrown on an unknown internal failure. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// This exception is thrown if any request is given an invalid parameter. E.g., -// if a required Date field is null. -// -// * ErrCodeNotFoundException "NotFoundException" -// This exception is thrown if a requested entity is not found. E.g., if a budget -// id doesn't exist for an account ID. -// -// * ErrCodeDuplicateRecordException "DuplicateRecordException" -// The exception is thrown when customer tries to create a record (e.g. budget) -// that already exists. -// -func (c *Budgets) UpdateNotification(input *UpdateNotificationInput) (*UpdateNotificationOutput, error) { - req, out := c.UpdateNotificationRequest(input) - return out, req.Send() -} - -// UpdateNotificationWithContext is the same as UpdateNotification with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateNotification for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Budgets) UpdateNotificationWithContext(ctx aws.Context, input *UpdateNotificationInput, opts ...request.Option) (*UpdateNotificationOutput, error) { - req, out := c.UpdateNotificationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateSubscriber = "UpdateSubscriber" - -// UpdateSubscriberRequest generates a "aws/request.Request" representing the -// client's request for the UpdateSubscriber operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateSubscriber for more information on using the UpdateSubscriber -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateSubscriberRequest method. -// req, resp := client.UpdateSubscriberRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *Budgets) UpdateSubscriberRequest(input *UpdateSubscriberInput) (req *request.Request, output *UpdateSubscriberOutput) { - op := &request.Operation{ - Name: opUpdateSubscriber, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateSubscriberInput{} - } - - output = &UpdateSubscriberOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateSubscriber API operation for AWS Budgets. -// -// Update a subscriber -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Budgets's -// API operation UpdateSubscriber for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// This exception is thrown on an unknown internal failure. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// This exception is thrown if any request is given an invalid parameter. E.g., -// if a required Date field is null. -// -// * ErrCodeNotFoundException "NotFoundException" -// This exception is thrown if a requested entity is not found. E.g., if a budget -// id doesn't exist for an account ID. -// -// * ErrCodeDuplicateRecordException "DuplicateRecordException" -// The exception is thrown when customer tries to create a record (e.g. budget) -// that already exists. -// -func (c *Budgets) UpdateSubscriber(input *UpdateSubscriberInput) (*UpdateSubscriberOutput, error) { - req, out := c.UpdateSubscriberRequest(input) - return out, req.Send() -} - -// UpdateSubscriberWithContext is the same as UpdateSubscriber with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateSubscriber for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Budgets) UpdateSubscriberWithContext(ctx aws.Context, input *UpdateSubscriberInput, opts ...request.Option) (*UpdateSubscriberOutput, error) { - req, out := c.UpdateSubscriberRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// AWS Budget model -type Budget struct { - _ struct{} `type:"structure"` - - // A structure that represents either a cost spend or usage spend. Contains - // an amount and a unit. - // - // BudgetLimit is a required field - BudgetLimit *Spend `type:"structure" required:"true"` - - // A string represents the budget name. No ":" and "\" character is allowed. - // - // BudgetName is a required field - BudgetName *string `type:"string" required:"true"` - - // The type of a budget. It should be COST, USAGE, or RI_UTILIZATION. - // - // BudgetType is a required field - BudgetType *string `type:"string" required:"true" enum:"BudgetType"` - - // A structure that holds the actual and forecasted spend for a budget. - CalculatedSpend *CalculatedSpend `type:"structure"` - - // A map that represents the cost filters applied to the budget. - CostFilters map[string][]*string `type:"map"` - - // This includes the options for getting the cost of a budget. - CostTypes *CostTypes `type:"structure"` - - // A time period indicating the start date and end date of a budget. - // - // TimePeriod is a required field - TimePeriod *TimePeriod `type:"structure" required:"true"` - - // The time unit of the budget. e.g. MONTHLY, QUARTERLY, etc. - // - // TimeUnit is a required field - TimeUnit *string `type:"string" required:"true" enum:"TimeUnit"` -} - -// String returns the string representation -func (s Budget) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Budget) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Budget) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Budget"} - if s.BudgetLimit == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetLimit")) - } - if s.BudgetName == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetName")) - } - if s.BudgetType == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetType")) - } - if s.TimePeriod == nil { - invalidParams.Add(request.NewErrParamRequired("TimePeriod")) - } - if s.TimeUnit == nil { - invalidParams.Add(request.NewErrParamRequired("TimeUnit")) - } - if s.BudgetLimit != nil { - if err := s.BudgetLimit.Validate(); err != nil { - invalidParams.AddNested("BudgetLimit", err.(request.ErrInvalidParams)) - } - } - if s.CalculatedSpend != nil { - if err := s.CalculatedSpend.Validate(); err != nil { - invalidParams.AddNested("CalculatedSpend", err.(request.ErrInvalidParams)) - } - } - if s.TimePeriod != nil { - if err := s.TimePeriod.Validate(); err != nil { - invalidParams.AddNested("TimePeriod", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBudgetLimit sets the BudgetLimit field's value. -func (s *Budget) SetBudgetLimit(v *Spend) *Budget { - s.BudgetLimit = v - return s -} - -// SetBudgetName sets the BudgetName field's value. -func (s *Budget) SetBudgetName(v string) *Budget { - s.BudgetName = &v - return s -} - -// SetBudgetType sets the BudgetType field's value. -func (s *Budget) SetBudgetType(v string) *Budget { - s.BudgetType = &v - return s -} - -// SetCalculatedSpend sets the CalculatedSpend field's value. -func (s *Budget) SetCalculatedSpend(v *CalculatedSpend) *Budget { - s.CalculatedSpend = v - return s -} - -// SetCostFilters sets the CostFilters field's value. -func (s *Budget) SetCostFilters(v map[string][]*string) *Budget { - s.CostFilters = v - return s -} - -// SetCostTypes sets the CostTypes field's value. -func (s *Budget) SetCostTypes(v *CostTypes) *Budget { - s.CostTypes = v - return s -} - -// SetTimePeriod sets the TimePeriod field's value. -func (s *Budget) SetTimePeriod(v *TimePeriod) *Budget { - s.TimePeriod = v - return s -} - -// SetTimeUnit sets the TimeUnit field's value. -func (s *Budget) SetTimeUnit(v string) *Budget { - s.TimeUnit = &v - return s -} - -// A structure that holds the actual and forecasted spend for a budget. -type CalculatedSpend struct { - _ struct{} `type:"structure"` - - // A structure that represents either a cost spend or usage spend. Contains - // an amount and a unit. - // - // ActualSpend is a required field - ActualSpend *Spend `type:"structure" required:"true"` - - // A structure that represents either a cost spend or usage spend. Contains - // an amount and a unit. - ForecastedSpend *Spend `type:"structure"` -} - -// String returns the string representation -func (s CalculatedSpend) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CalculatedSpend) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CalculatedSpend) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CalculatedSpend"} - if s.ActualSpend == nil { - invalidParams.Add(request.NewErrParamRequired("ActualSpend")) - } - if s.ActualSpend != nil { - if err := s.ActualSpend.Validate(); err != nil { - invalidParams.AddNested("ActualSpend", err.(request.ErrInvalidParams)) - } - } - if s.ForecastedSpend != nil { - if err := s.ForecastedSpend.Validate(); err != nil { - invalidParams.AddNested("ForecastedSpend", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetActualSpend sets the ActualSpend field's value. -func (s *CalculatedSpend) SetActualSpend(v *Spend) *CalculatedSpend { - s.ActualSpend = v - return s -} - -// SetForecastedSpend sets the ForecastedSpend field's value. -func (s *CalculatedSpend) SetForecastedSpend(v *Spend) *CalculatedSpend { - s.ForecastedSpend = v - return s -} - -// This includes the options for getting the cost of a budget. -type CostTypes struct { - _ struct{} `type:"structure"` - - // A boolean value whether to include credits in the cost budget. - IncludeCredit *bool `type:"boolean"` - - // A boolean value whether to include other subscription costs in the cost budget. - IncludeOtherSubscription *bool `type:"boolean"` - - // A boolean value whether to include recurring costs in the cost budget. - IncludeRecurring *bool `type:"boolean"` - - // A boolean value whether to include refunds in the cost budget. - IncludeRefund *bool `type:"boolean"` - - // A boolean value whether to include subscriptions in the cost budget. - IncludeSubscription *bool `type:"boolean"` - - // A boolean value whether to include support costs in the cost budget. - IncludeSupport *bool `type:"boolean"` - - // A boolean value whether to include tax in the cost budget. - IncludeTax *bool `type:"boolean"` - - // A boolean value whether to include upfront costs in the cost budget. - IncludeUpfront *bool `type:"boolean"` - - // A boolean value whether to use blended costs in the cost budget. - UseBlended *bool `type:"boolean"` -} - -// String returns the string representation -func (s CostTypes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CostTypes) GoString() string { - return s.String() -} - -// SetIncludeCredit sets the IncludeCredit field's value. -func (s *CostTypes) SetIncludeCredit(v bool) *CostTypes { - s.IncludeCredit = &v - return s -} - -// SetIncludeOtherSubscription sets the IncludeOtherSubscription field's value. -func (s *CostTypes) SetIncludeOtherSubscription(v bool) *CostTypes { - s.IncludeOtherSubscription = &v - return s -} - -// SetIncludeRecurring sets the IncludeRecurring field's value. -func (s *CostTypes) SetIncludeRecurring(v bool) *CostTypes { - s.IncludeRecurring = &v - return s -} - -// SetIncludeRefund sets the IncludeRefund field's value. -func (s *CostTypes) SetIncludeRefund(v bool) *CostTypes { - s.IncludeRefund = &v - return s -} - -// SetIncludeSubscription sets the IncludeSubscription field's value. -func (s *CostTypes) SetIncludeSubscription(v bool) *CostTypes { - s.IncludeSubscription = &v - return s -} - -// SetIncludeSupport sets the IncludeSupport field's value. -func (s *CostTypes) SetIncludeSupport(v bool) *CostTypes { - s.IncludeSupport = &v - return s -} - -// SetIncludeTax sets the IncludeTax field's value. -func (s *CostTypes) SetIncludeTax(v bool) *CostTypes { - s.IncludeTax = &v - return s -} - -// SetIncludeUpfront sets the IncludeUpfront field's value. -func (s *CostTypes) SetIncludeUpfront(v bool) *CostTypes { - s.IncludeUpfront = &v - return s -} - -// SetUseBlended sets the UseBlended field's value. -func (s *CostTypes) SetUseBlended(v bool) *CostTypes { - s.UseBlended = &v - return s -} - -// Request of CreateBudget -type CreateBudgetInput struct { - _ struct{} `type:"structure"` - - // Account Id of the customer. It should be a 12 digit number. - // - // AccountId is a required field - AccountId *string `min:"12" type:"string" required:"true"` - - // AWS Budget model - // - // Budget is a required field - Budget *Budget `type:"structure" required:"true"` - - // A list of Notifications, each with a list of subscribers. - NotificationsWithSubscribers []*NotificationWithSubscribers `type:"list"` -} - -// String returns the string representation -func (s CreateBudgetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBudgetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateBudgetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateBudgetInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) - } - if s.Budget == nil { - invalidParams.Add(request.NewErrParamRequired("Budget")) - } - if s.Budget != nil { - if err := s.Budget.Validate(); err != nil { - invalidParams.AddNested("Budget", err.(request.ErrInvalidParams)) - } - } - if s.NotificationsWithSubscribers != nil { - for i, v := range s.NotificationsWithSubscribers { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NotificationsWithSubscribers", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *CreateBudgetInput) SetAccountId(v string) *CreateBudgetInput { - s.AccountId = &v - return s -} - -// SetBudget sets the Budget field's value. -func (s *CreateBudgetInput) SetBudget(v *Budget) *CreateBudgetInput { - s.Budget = v - return s -} - -// SetNotificationsWithSubscribers sets the NotificationsWithSubscribers field's value. -func (s *CreateBudgetInput) SetNotificationsWithSubscribers(v []*NotificationWithSubscribers) *CreateBudgetInput { - s.NotificationsWithSubscribers = v - return s -} - -// Response of CreateBudget -type CreateBudgetOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateBudgetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBudgetOutput) GoString() string { - return s.String() -} - -// Request of CreateNotification -type CreateNotificationInput struct { - _ struct{} `type:"structure"` - - // Account Id of the customer. It should be a 12 digit number. - // - // AccountId is a required field - AccountId *string `min:"12" type:"string" required:"true"` - - // A string represents the budget name. No ":" and "\" character is allowed. - // - // BudgetName is a required field - BudgetName *string `type:"string" required:"true"` - - // Notification model. Each budget may contain multiple notifications with different - // settings. - // - // Notification is a required field - Notification *Notification `type:"structure" required:"true"` - - // A list of subscribers. - // - // Subscribers is a required field - Subscribers []*Subscriber `min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s CreateNotificationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateNotificationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateNotificationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateNotificationInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) - } - if s.BudgetName == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetName")) - } - if s.Notification == nil { - invalidParams.Add(request.NewErrParamRequired("Notification")) - } - if s.Subscribers == nil { - invalidParams.Add(request.NewErrParamRequired("Subscribers")) - } - if s.Subscribers != nil && len(s.Subscribers) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Subscribers", 1)) - } - if s.Notification != nil { - if err := s.Notification.Validate(); err != nil { - invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) - } - } - if s.Subscribers != nil { - for i, v := range s.Subscribers { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Subscribers", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *CreateNotificationInput) SetAccountId(v string) *CreateNotificationInput { - s.AccountId = &v - return s -} - -// SetBudgetName sets the BudgetName field's value. -func (s *CreateNotificationInput) SetBudgetName(v string) *CreateNotificationInput { - s.BudgetName = &v - return s -} - -// SetNotification sets the Notification field's value. -func (s *CreateNotificationInput) SetNotification(v *Notification) *CreateNotificationInput { - s.Notification = v - return s -} - -// SetSubscribers sets the Subscribers field's value. -func (s *CreateNotificationInput) SetSubscribers(v []*Subscriber) *CreateNotificationInput { - s.Subscribers = v - return s -} - -// Response of CreateNotification -type CreateNotificationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateNotificationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateNotificationOutput) GoString() string { - return s.String() -} - -// Request of CreateSubscriber -type CreateSubscriberInput struct { - _ struct{} `type:"structure"` - - // Account Id of the customer. It should be a 12 digit number. - // - // AccountId is a required field - AccountId *string `min:"12" type:"string" required:"true"` - - // A string represents the budget name. No ":" and "\" character is allowed. - // - // BudgetName is a required field - BudgetName *string `type:"string" required:"true"` - - // Notification model. Each budget may contain multiple notifications with different - // settings. - // - // Notification is a required field - Notification *Notification `type:"structure" required:"true"` - - // Subscriber model. Each notification may contain multiple subscribers with - // different addresses. - // - // Subscriber is a required field - Subscriber *Subscriber `type:"structure" required:"true"` -} - -// String returns the string representation -func (s CreateSubscriberInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateSubscriberInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateSubscriberInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateSubscriberInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) - } - if s.BudgetName == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetName")) - } - if s.Notification == nil { - invalidParams.Add(request.NewErrParamRequired("Notification")) - } - if s.Subscriber == nil { - invalidParams.Add(request.NewErrParamRequired("Subscriber")) - } - if s.Notification != nil { - if err := s.Notification.Validate(); err != nil { - invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) - } - } - if s.Subscriber != nil { - if err := s.Subscriber.Validate(); err != nil { - invalidParams.AddNested("Subscriber", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *CreateSubscriberInput) SetAccountId(v string) *CreateSubscriberInput { - s.AccountId = &v - return s -} - -// SetBudgetName sets the BudgetName field's value. -func (s *CreateSubscriberInput) SetBudgetName(v string) *CreateSubscriberInput { - s.BudgetName = &v - return s -} - -// SetNotification sets the Notification field's value. -func (s *CreateSubscriberInput) SetNotification(v *Notification) *CreateSubscriberInput { - s.Notification = v - return s -} - -// SetSubscriber sets the Subscriber field's value. -func (s *CreateSubscriberInput) SetSubscriber(v *Subscriber) *CreateSubscriberInput { - s.Subscriber = v - return s -} - -// Response of CreateSubscriber -type CreateSubscriberOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateSubscriberOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateSubscriberOutput) GoString() string { - return s.String() -} - -// Request of DeleteBudget -type DeleteBudgetInput struct { - _ struct{} `type:"structure"` - - // Account Id of the customer. It should be a 12 digit number. - // - // AccountId is a required field - AccountId *string `min:"12" type:"string" required:"true"` - - // A string represents the budget name. No ":" and "\" character is allowed. - // - // BudgetName is a required field - BudgetName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBudgetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBudgetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBudgetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBudgetInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) - } - if s.BudgetName == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *DeleteBudgetInput) SetAccountId(v string) *DeleteBudgetInput { - s.AccountId = &v - return s -} - -// SetBudgetName sets the BudgetName field's value. -func (s *DeleteBudgetInput) SetBudgetName(v string) *DeleteBudgetInput { - s.BudgetName = &v - return s -} - -// Response of DeleteBudget -type DeleteBudgetOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBudgetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBudgetOutput) GoString() string { - return s.String() -} - -// Request of DeleteNotification -type DeleteNotificationInput struct { - _ struct{} `type:"structure"` - - // Account Id of the customer. It should be a 12 digit number. - // - // AccountId is a required field - AccountId *string `min:"12" type:"string" required:"true"` - - // A string represents the budget name. No ":" and "\" character is allowed. - // - // BudgetName is a required field - BudgetName *string `type:"string" required:"true"` - - // Notification model. Each budget may contain multiple notifications with different - // settings. - // - // Notification is a required field - Notification *Notification `type:"structure" required:"true"` -} - -// String returns the string representation -func (s DeleteNotificationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteNotificationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteNotificationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteNotificationInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) - } - if s.BudgetName == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetName")) - } - if s.Notification == nil { - invalidParams.Add(request.NewErrParamRequired("Notification")) - } - if s.Notification != nil { - if err := s.Notification.Validate(); err != nil { - invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *DeleteNotificationInput) SetAccountId(v string) *DeleteNotificationInput { - s.AccountId = &v - return s -} - -// SetBudgetName sets the BudgetName field's value. -func (s *DeleteNotificationInput) SetBudgetName(v string) *DeleteNotificationInput { - s.BudgetName = &v - return s -} - -// SetNotification sets the Notification field's value. -func (s *DeleteNotificationInput) SetNotification(v *Notification) *DeleteNotificationInput { - s.Notification = v - return s -} - -// Response of DeleteNotification -type DeleteNotificationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteNotificationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteNotificationOutput) GoString() string { - return s.String() -} - -// Request of DeleteSubscriber -type DeleteSubscriberInput struct { - _ struct{} `type:"structure"` - - // Account Id of the customer. It should be a 12 digit number. - // - // AccountId is a required field - AccountId *string `min:"12" type:"string" required:"true"` - - // A string represents the budget name. No ":" and "\" character is allowed. - // - // BudgetName is a required field - BudgetName *string `type:"string" required:"true"` - - // Notification model. Each budget may contain multiple notifications with different - // settings. - // - // Notification is a required field - Notification *Notification `type:"structure" required:"true"` - - // Subscriber model. Each notification may contain multiple subscribers with - // different addresses. - // - // Subscriber is a required field - Subscriber *Subscriber `type:"structure" required:"true"` -} - -// String returns the string representation -func (s DeleteSubscriberInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteSubscriberInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSubscriberInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSubscriberInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) - } - if s.BudgetName == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetName")) - } - if s.Notification == nil { - invalidParams.Add(request.NewErrParamRequired("Notification")) - } - if s.Subscriber == nil { - invalidParams.Add(request.NewErrParamRequired("Subscriber")) - } - if s.Notification != nil { - if err := s.Notification.Validate(); err != nil { - invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) - } - } - if s.Subscriber != nil { - if err := s.Subscriber.Validate(); err != nil { - invalidParams.AddNested("Subscriber", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *DeleteSubscriberInput) SetAccountId(v string) *DeleteSubscriberInput { - s.AccountId = &v - return s -} - -// SetBudgetName sets the BudgetName field's value. -func (s *DeleteSubscriberInput) SetBudgetName(v string) *DeleteSubscriberInput { - s.BudgetName = &v - return s -} - -// SetNotification sets the Notification field's value. -func (s *DeleteSubscriberInput) SetNotification(v *Notification) *DeleteSubscriberInput { - s.Notification = v - return s -} - -// SetSubscriber sets the Subscriber field's value. -func (s *DeleteSubscriberInput) SetSubscriber(v *Subscriber) *DeleteSubscriberInput { - s.Subscriber = v - return s -} - -// Response of DeleteSubscriber -type DeleteSubscriberOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteSubscriberOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteSubscriberOutput) GoString() string { - return s.String() -} - -// Request of DescribeBudget -type DescribeBudgetInput struct { - _ struct{} `type:"structure"` - - // Account Id of the customer. It should be a 12 digit number. - // - // AccountId is a required field - AccountId *string `min:"12" type:"string" required:"true"` - - // A string represents the budget name. No ":" and "\" character is allowed. - // - // BudgetName is a required field - BudgetName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeBudgetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeBudgetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeBudgetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeBudgetInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) - } - if s.BudgetName == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *DescribeBudgetInput) SetAccountId(v string) *DescribeBudgetInput { - s.AccountId = &v - return s -} - -// SetBudgetName sets the BudgetName field's value. -func (s *DescribeBudgetInput) SetBudgetName(v string) *DescribeBudgetInput { - s.BudgetName = &v - return s -} - -// Response of DescribeBudget -type DescribeBudgetOutput struct { - _ struct{} `type:"structure"` - - // AWS Budget model - Budget *Budget `type:"structure"` -} - -// String returns the string representation -func (s DescribeBudgetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeBudgetOutput) GoString() string { - return s.String() -} - -// SetBudget sets the Budget field's value. -func (s *DescribeBudgetOutput) SetBudget(v *Budget) *DescribeBudgetOutput { - s.Budget = v - return s -} - -// Request of DescribeBudgets -type DescribeBudgetsInput struct { - _ struct{} `type:"structure"` - - // Account Id of the customer. It should be a 12 digit number. - // - // AccountId is a required field - AccountId *string `min:"12" type:"string" required:"true"` - - // An integer to represent how many entries a paginated response contains. Maximum - // is set to 100. - MaxResults *int64 `min:"1" type:"integer"` - - // A generic String. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s DescribeBudgetsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeBudgetsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeBudgetsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeBudgetsInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *DescribeBudgetsInput) SetAccountId(v string) *DescribeBudgetsInput { - s.AccountId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeBudgetsInput) SetMaxResults(v int64) *DescribeBudgetsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeBudgetsInput) SetNextToken(v string) *DescribeBudgetsInput { - s.NextToken = &v - return s -} - -// Response of DescribeBudgets -type DescribeBudgetsOutput struct { - _ struct{} `type:"structure"` - - // A list of budgets - Budgets []*Budget `type:"list"` - - // A generic String. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s DescribeBudgetsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeBudgetsOutput) GoString() string { - return s.String() -} - -// SetBudgets sets the Budgets field's value. -func (s *DescribeBudgetsOutput) SetBudgets(v []*Budget) *DescribeBudgetsOutput { - s.Budgets = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeBudgetsOutput) SetNextToken(v string) *DescribeBudgetsOutput { - s.NextToken = &v - return s -} - -// Request of DescribeNotificationsForBudget -type DescribeNotificationsForBudgetInput struct { - _ struct{} `type:"structure"` - - // Account Id of the customer. It should be a 12 digit number. - // - // AccountId is a required field - AccountId *string `min:"12" type:"string" required:"true"` - - // A string represents the budget name. No ":" and "\" character is allowed. - // - // BudgetName is a required field - BudgetName *string `type:"string" required:"true"` - - // An integer to represent how many entries a paginated response contains. Maximum - // is set to 100. - MaxResults *int64 `min:"1" type:"integer"` - - // A generic String. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s DescribeNotificationsForBudgetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeNotificationsForBudgetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeNotificationsForBudgetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeNotificationsForBudgetInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) - } - if s.BudgetName == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetName")) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *DescribeNotificationsForBudgetInput) SetAccountId(v string) *DescribeNotificationsForBudgetInput { - s.AccountId = &v - return s -} - -// SetBudgetName sets the BudgetName field's value. -func (s *DescribeNotificationsForBudgetInput) SetBudgetName(v string) *DescribeNotificationsForBudgetInput { - s.BudgetName = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeNotificationsForBudgetInput) SetMaxResults(v int64) *DescribeNotificationsForBudgetInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeNotificationsForBudgetInput) SetNextToken(v string) *DescribeNotificationsForBudgetInput { - s.NextToken = &v - return s -} - -// Response of GetNotificationsForBudget -type DescribeNotificationsForBudgetOutput struct { - _ struct{} `type:"structure"` - - // A generic String. - NextToken *string `type:"string"` - - // A list of notifications. - Notifications []*Notification `type:"list"` -} - -// String returns the string representation -func (s DescribeNotificationsForBudgetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeNotificationsForBudgetOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeNotificationsForBudgetOutput) SetNextToken(v string) *DescribeNotificationsForBudgetOutput { - s.NextToken = &v - return s -} - -// SetNotifications sets the Notifications field's value. -func (s *DescribeNotificationsForBudgetOutput) SetNotifications(v []*Notification) *DescribeNotificationsForBudgetOutput { - s.Notifications = v - return s -} - -// Request of DescribeSubscribersForNotification -type DescribeSubscribersForNotificationInput struct { - _ struct{} `type:"structure"` - - // Account Id of the customer. It should be a 12 digit number. - // - // AccountId is a required field - AccountId *string `min:"12" type:"string" required:"true"` - - // A string represents the budget name. No ":" and "\" character is allowed. - // - // BudgetName is a required field - BudgetName *string `type:"string" required:"true"` - - // An integer to represent how many entries a paginated response contains. Maximum - // is set to 100. - MaxResults *int64 `min:"1" type:"integer"` - - // A generic String. - NextToken *string `type:"string"` - - // Notification model. Each budget may contain multiple notifications with different - // settings. - // - // Notification is a required field - Notification *Notification `type:"structure" required:"true"` -} - -// String returns the string representation -func (s DescribeSubscribersForNotificationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeSubscribersForNotificationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeSubscribersForNotificationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeSubscribersForNotificationInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) - } - if s.BudgetName == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetName")) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.Notification == nil { - invalidParams.Add(request.NewErrParamRequired("Notification")) - } - if s.Notification != nil { - if err := s.Notification.Validate(); err != nil { - invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *DescribeSubscribersForNotificationInput) SetAccountId(v string) *DescribeSubscribersForNotificationInput { - s.AccountId = &v - return s -} - -// SetBudgetName sets the BudgetName field's value. -func (s *DescribeSubscribersForNotificationInput) SetBudgetName(v string) *DescribeSubscribersForNotificationInput { - s.BudgetName = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeSubscribersForNotificationInput) SetMaxResults(v int64) *DescribeSubscribersForNotificationInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeSubscribersForNotificationInput) SetNextToken(v string) *DescribeSubscribersForNotificationInput { - s.NextToken = &v - return s -} - -// SetNotification sets the Notification field's value. -func (s *DescribeSubscribersForNotificationInput) SetNotification(v *Notification) *DescribeSubscribersForNotificationInput { - s.Notification = v - return s -} - -// Response of DescribeSubscribersForNotification -type DescribeSubscribersForNotificationOutput struct { - _ struct{} `type:"structure"` - - // A generic String. - NextToken *string `type:"string"` - - // A list of subscribers. - Subscribers []*Subscriber `min:"1" type:"list"` -} - -// String returns the string representation -func (s DescribeSubscribersForNotificationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeSubscribersForNotificationOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeSubscribersForNotificationOutput) SetNextToken(v string) *DescribeSubscribersForNotificationOutput { - s.NextToken = &v - return s -} - -// SetSubscribers sets the Subscribers field's value. -func (s *DescribeSubscribersForNotificationOutput) SetSubscribers(v []*Subscriber) *DescribeSubscribersForNotificationOutput { - s.Subscribers = v - return s -} - -// Notification model. Each budget may contain multiple notifications with different -// settings. -type Notification struct { - _ struct{} `type:"structure"` - - // The comparison operator of a notification. Currently we support less than, - // equal to and greater than. - // - // ComparisonOperator is a required field - ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` - - // The type of a notification. It should be ACTUAL or FORECASTED. - // - // NotificationType is a required field - NotificationType *string `type:"string" required:"true" enum:"NotificationType"` - - // The threshold of a notification. It should be a number between 0 and 1,000,000,000. - // - // Threshold is a required field - Threshold *float64 `min:"0.1" type:"double" required:"true"` - - // The type of threshold for a notification. It can be PERCENTAGE or ABSOLUTE_VALUE. - ThresholdType *string `type:"string" enum:"ThresholdType"` -} - -// String returns the string representation -func (s Notification) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Notification) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Notification) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Notification"} - if s.ComparisonOperator == nil { - invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) - } - if s.NotificationType == nil { - invalidParams.Add(request.NewErrParamRequired("NotificationType")) - } - if s.Threshold == nil { - invalidParams.Add(request.NewErrParamRequired("Threshold")) - } - if s.Threshold != nil && *s.Threshold < 0.1 { - invalidParams.Add(request.NewErrParamMinValue("Threshold", 0.1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetComparisonOperator sets the ComparisonOperator field's value. -func (s *Notification) SetComparisonOperator(v string) *Notification { - s.ComparisonOperator = &v - return s -} - -// SetNotificationType sets the NotificationType field's value. -func (s *Notification) SetNotificationType(v string) *Notification { - s.NotificationType = &v - return s -} - -// SetThreshold sets the Threshold field's value. -func (s *Notification) SetThreshold(v float64) *Notification { - s.Threshold = &v - return s -} - -// SetThresholdType sets the ThresholdType field's value. -func (s *Notification) SetThresholdType(v string) *Notification { - s.ThresholdType = &v - return s -} - -// A structure to relate notification and a list of subscribers who belong to -// the notification. -type NotificationWithSubscribers struct { - _ struct{} `type:"structure"` - - // Notification model. Each budget may contain multiple notifications with different - // settings. - // - // Notification is a required field - Notification *Notification `type:"structure" required:"true"` - - // A list of subscribers. - // - // Subscribers is a required field - Subscribers []*Subscriber `min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s NotificationWithSubscribers) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NotificationWithSubscribers) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *NotificationWithSubscribers) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "NotificationWithSubscribers"} - if s.Notification == nil { - invalidParams.Add(request.NewErrParamRequired("Notification")) - } - if s.Subscribers == nil { - invalidParams.Add(request.NewErrParamRequired("Subscribers")) - } - if s.Subscribers != nil && len(s.Subscribers) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Subscribers", 1)) - } - if s.Notification != nil { - if err := s.Notification.Validate(); err != nil { - invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) - } - } - if s.Subscribers != nil { - for i, v := range s.Subscribers { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Subscribers", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNotification sets the Notification field's value. -func (s *NotificationWithSubscribers) SetNotification(v *Notification) *NotificationWithSubscribers { - s.Notification = v - return s -} - -// SetSubscribers sets the Subscribers field's value. -func (s *NotificationWithSubscribers) SetSubscribers(v []*Subscriber) *NotificationWithSubscribers { - s.Subscribers = v - return s -} - -// A structure that represents either a cost spend or usage spend. Contains -// an amount and a unit. -type Spend struct { - _ struct{} `type:"structure"` - - // A string to represent NumericValue. - // - // Amount is a required field - Amount *string `type:"string" required:"true"` - - // A string to represent budget spend unit. It should be not null and not empty. - // - // Unit is a required field - Unit *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s Spend) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Spend) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Spend) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Spend"} - if s.Amount == nil { - invalidParams.Add(request.NewErrParamRequired("Amount")) - } - if s.Unit == nil { - invalidParams.Add(request.NewErrParamRequired("Unit")) - } - if s.Unit != nil && len(*s.Unit) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Unit", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAmount sets the Amount field's value. -func (s *Spend) SetAmount(v string) *Spend { - s.Amount = &v - return s -} - -// SetUnit sets the Unit field's value. -func (s *Spend) SetUnit(v string) *Spend { - s.Unit = &v - return s -} - -// Subscriber model. Each notification may contain multiple subscribers with -// different addresses. -type Subscriber struct { - _ struct{} `type:"structure"` - - // String containing email or sns topic for the subscriber address. - // - // Address is a required field - Address *string `min:"1" type:"string" required:"true"` - - // The subscription type of the subscriber. It can be SMS or EMAIL. - // - // SubscriptionType is a required field - SubscriptionType *string `type:"string" required:"true" enum:"SubscriptionType"` -} - -// String returns the string representation -func (s Subscriber) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Subscriber) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Subscriber) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Subscriber"} - if s.Address == nil { - invalidParams.Add(request.NewErrParamRequired("Address")) - } - if s.Address != nil && len(*s.Address) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Address", 1)) - } - if s.SubscriptionType == nil { - invalidParams.Add(request.NewErrParamRequired("SubscriptionType")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAddress sets the Address field's value. -func (s *Subscriber) SetAddress(v string) *Subscriber { - s.Address = &v - return s -} - -// SetSubscriptionType sets the SubscriptionType field's value. -func (s *Subscriber) SetSubscriptionType(v string) *Subscriber { - s.SubscriptionType = &v - return s -} - -// A time period indicating the start date and end date of a budget. -type TimePeriod struct { - _ struct{} `type:"structure"` - - // A generic timestamp. In Java it is transformed to a Date object. - // - // End is a required field - End *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` - - // A generic timestamp. In Java it is transformed to a Date object. - // - // Start is a required field - Start *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` -} - -// String returns the string representation -func (s TimePeriod) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TimePeriod) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TimePeriod) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TimePeriod"} - if s.End == nil { - invalidParams.Add(request.NewErrParamRequired("End")) - } - if s.Start == nil { - invalidParams.Add(request.NewErrParamRequired("Start")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEnd sets the End field's value. -func (s *TimePeriod) SetEnd(v time.Time) *TimePeriod { - s.End = &v - return s -} - -// SetStart sets the Start field's value. -func (s *TimePeriod) SetStart(v time.Time) *TimePeriod { - s.Start = &v - return s -} - -// Request of UpdateBudget -type UpdateBudgetInput struct { - _ struct{} `type:"structure"` - - // Account Id of the customer. It should be a 12 digit number. - // - // AccountId is a required field - AccountId *string `min:"12" type:"string" required:"true"` - - // AWS Budget model - // - // NewBudget is a required field - NewBudget *Budget `type:"structure" required:"true"` -} - -// String returns the string representation -func (s UpdateBudgetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateBudgetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateBudgetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateBudgetInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) - } - if s.NewBudget == nil { - invalidParams.Add(request.NewErrParamRequired("NewBudget")) - } - if s.NewBudget != nil { - if err := s.NewBudget.Validate(); err != nil { - invalidParams.AddNested("NewBudget", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *UpdateBudgetInput) SetAccountId(v string) *UpdateBudgetInput { - s.AccountId = &v - return s -} - -// SetNewBudget sets the NewBudget field's value. -func (s *UpdateBudgetInput) SetNewBudget(v *Budget) *UpdateBudgetInput { - s.NewBudget = v - return s -} - -// Response of UpdateBudget -type UpdateBudgetOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s UpdateBudgetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateBudgetOutput) GoString() string { - return s.String() -} - -// Request of UpdateNotification -type UpdateNotificationInput struct { - _ struct{} `type:"structure"` - - // Account Id of the customer. It should be a 12 digit number. - // - // AccountId is a required field - AccountId *string `min:"12" type:"string" required:"true"` - - // A string represents the budget name. No ":" and "\" character is allowed. - // - // BudgetName is a required field - BudgetName *string `type:"string" required:"true"` - - // Notification model. Each budget may contain multiple notifications with different - // settings. - // - // NewNotification is a required field - NewNotification *Notification `type:"structure" required:"true"` - - // Notification model. Each budget may contain multiple notifications with different - // settings. - // - // OldNotification is a required field - OldNotification *Notification `type:"structure" required:"true"` -} - -// String returns the string representation -func (s UpdateNotificationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateNotificationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateNotificationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateNotificationInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) - } - if s.BudgetName == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetName")) - } - if s.NewNotification == nil { - invalidParams.Add(request.NewErrParamRequired("NewNotification")) - } - if s.OldNotification == nil { - invalidParams.Add(request.NewErrParamRequired("OldNotification")) - } - if s.NewNotification != nil { - if err := s.NewNotification.Validate(); err != nil { - invalidParams.AddNested("NewNotification", err.(request.ErrInvalidParams)) - } - } - if s.OldNotification != nil { - if err := s.OldNotification.Validate(); err != nil { - invalidParams.AddNested("OldNotification", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *UpdateNotificationInput) SetAccountId(v string) *UpdateNotificationInput { - s.AccountId = &v - return s -} - -// SetBudgetName sets the BudgetName field's value. -func (s *UpdateNotificationInput) SetBudgetName(v string) *UpdateNotificationInput { - s.BudgetName = &v - return s -} - -// SetNewNotification sets the NewNotification field's value. -func (s *UpdateNotificationInput) SetNewNotification(v *Notification) *UpdateNotificationInput { - s.NewNotification = v - return s -} - -// SetOldNotification sets the OldNotification field's value. -func (s *UpdateNotificationInput) SetOldNotification(v *Notification) *UpdateNotificationInput { - s.OldNotification = v - return s -} - -// Response of UpdateNotification -type UpdateNotificationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s UpdateNotificationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateNotificationOutput) GoString() string { - return s.String() -} - -// Request of UpdateSubscriber -type UpdateSubscriberInput struct { - _ struct{} `type:"structure"` - - // Account Id of the customer. It should be a 12 digit number. - // - // AccountId is a required field - AccountId *string `min:"12" type:"string" required:"true"` - - // A string represents the budget name. No ":" and "\" character is allowed. - // - // BudgetName is a required field - BudgetName *string `type:"string" required:"true"` - - // Subscriber model. Each notification may contain multiple subscribers with - // different addresses. - // - // NewSubscriber is a required field - NewSubscriber *Subscriber `type:"structure" required:"true"` - - // Notification model. Each budget may contain multiple notifications with different - // settings. - // - // Notification is a required field - Notification *Notification `type:"structure" required:"true"` - - // Subscriber model. Each notification may contain multiple subscribers with - // different addresses. - // - // OldSubscriber is a required field - OldSubscriber *Subscriber `type:"structure" required:"true"` -} - -// String returns the string representation -func (s UpdateSubscriberInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateSubscriberInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateSubscriberInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateSubscriberInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) - } - if s.BudgetName == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetName")) - } - if s.NewSubscriber == nil { - invalidParams.Add(request.NewErrParamRequired("NewSubscriber")) - } - if s.Notification == nil { - invalidParams.Add(request.NewErrParamRequired("Notification")) - } - if s.OldSubscriber == nil { - invalidParams.Add(request.NewErrParamRequired("OldSubscriber")) - } - if s.NewSubscriber != nil { - if err := s.NewSubscriber.Validate(); err != nil { - invalidParams.AddNested("NewSubscriber", err.(request.ErrInvalidParams)) - } - } - if s.Notification != nil { - if err := s.Notification.Validate(); err != nil { - invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) - } - } - if s.OldSubscriber != nil { - if err := s.OldSubscriber.Validate(); err != nil { - invalidParams.AddNested("OldSubscriber", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *UpdateSubscriberInput) SetAccountId(v string) *UpdateSubscriberInput { - s.AccountId = &v - return s -} - -// SetBudgetName sets the BudgetName field's value. -func (s *UpdateSubscriberInput) SetBudgetName(v string) *UpdateSubscriberInput { - s.BudgetName = &v - return s -} - -// SetNewSubscriber sets the NewSubscriber field's value. -func (s *UpdateSubscriberInput) SetNewSubscriber(v *Subscriber) *UpdateSubscriberInput { - s.NewSubscriber = v - return s -} - -// SetNotification sets the Notification field's value. -func (s *UpdateSubscriberInput) SetNotification(v *Notification) *UpdateSubscriberInput { - s.Notification = v - return s -} - -// SetOldSubscriber sets the OldSubscriber field's value. -func (s *UpdateSubscriberInput) SetOldSubscriber(v *Subscriber) *UpdateSubscriberInput { - s.OldSubscriber = v - return s -} - -// Response of UpdateSubscriber -type UpdateSubscriberOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s UpdateSubscriberOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateSubscriberOutput) GoString() string { - return s.String() -} - -// The type of a budget. It should be COST, USAGE, or RI_UTILIZATION. -const ( - // BudgetTypeUsage is a BudgetType enum value - BudgetTypeUsage = "USAGE" - - // BudgetTypeCost is a BudgetType enum value - BudgetTypeCost = "COST" - - // BudgetTypeRiUtilization is a BudgetType enum value - BudgetTypeRiUtilization = "RI_UTILIZATION" -) - -// The comparison operator of a notification. Currently we support less than, -// equal to and greater than. -const ( - // ComparisonOperatorGreaterThan is a ComparisonOperator enum value - ComparisonOperatorGreaterThan = "GREATER_THAN" - - // ComparisonOperatorLessThan is a ComparisonOperator enum value - ComparisonOperatorLessThan = "LESS_THAN" - - // ComparisonOperatorEqualTo is a ComparisonOperator enum value - ComparisonOperatorEqualTo = "EQUAL_TO" -) - -// The type of a notification. It should be ACTUAL or FORECASTED. -const ( - // NotificationTypeActual is a NotificationType enum value - NotificationTypeActual = "ACTUAL" - - // NotificationTypeForecasted is a NotificationType enum value - NotificationTypeForecasted = "FORECASTED" -) - -// The subscription type of the subscriber. It can be SMS or EMAIL. -const ( - // SubscriptionTypeSns is a SubscriptionType enum value - SubscriptionTypeSns = "SNS" - - // SubscriptionTypeEmail is a SubscriptionType enum value - SubscriptionTypeEmail = "EMAIL" -) - -// The type of threshold for a notification. It can be PERCENTAGE or ABSOLUTE_VALUE. -const ( - // ThresholdTypePercentage is a ThresholdType enum value - ThresholdTypePercentage = "PERCENTAGE" - - // ThresholdTypeAbsoluteValue is a ThresholdType enum value - ThresholdTypeAbsoluteValue = "ABSOLUTE_VALUE" -) - -// The time unit of the budget. e.g. MONTHLY, QUARTERLY, etc. -const ( - // TimeUnitDaily is a TimeUnit enum value - TimeUnitDaily = "DAILY" - - // TimeUnitMonthly is a TimeUnit enum value - TimeUnitMonthly = "MONTHLY" - - // TimeUnitQuarterly is a TimeUnit enum value - TimeUnitQuarterly = "QUARTERLY" - - // TimeUnitAnnually is a TimeUnit enum value - TimeUnitAnnually = "ANNUALLY" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/budgets/doc.go b/vendor/github.com/aws/aws-sdk-go/service/budgets/doc.go deleted file mode 100644 index 76973fb821f..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/budgets/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package budgets provides the client and types for making API -// requests to AWS Budgets. -// -// All public APIs for AWS Budgets -// -// See budgets package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/budgets/ -// -// Using the Client -// -// To contact AWS Budgets with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Budgets client Budgets for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/budgets/#New -package budgets diff --git a/vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go b/vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go deleted file mode 100644 index 8a0366d6a2e..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go +++ /dev/null @@ -1,53 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package budgets - -const ( - - // ErrCodeCreationLimitExceededException for service response error code - // "CreationLimitExceededException". - // - // The exception is thrown when customer tries to create a record (e.g. budget), - // but the number this record already exceeds the limitation. - ErrCodeCreationLimitExceededException = "CreationLimitExceededException" - - // ErrCodeDuplicateRecordException for service response error code - // "DuplicateRecordException". - // - // The exception is thrown when customer tries to create a record (e.g. budget) - // that already exists. - ErrCodeDuplicateRecordException = "DuplicateRecordException" - - // ErrCodeExpiredNextTokenException for service response error code - // "ExpiredNextTokenException". - // - // This exception is thrown if the paging token is expired - past its TTL - ErrCodeExpiredNextTokenException = "ExpiredNextTokenException" - - // ErrCodeInternalErrorException for service response error code - // "InternalErrorException". - // - // This exception is thrown on an unknown internal failure. - ErrCodeInternalErrorException = "InternalErrorException" - - // ErrCodeInvalidNextTokenException for service response error code - // "InvalidNextTokenException". - // - // This exception is thrown if paging token signature didn't match the token, - // or the paging token isn't for this request - ErrCodeInvalidNextTokenException = "InvalidNextTokenException" - - // ErrCodeInvalidParameterException for service response error code - // "InvalidParameterException". - // - // This exception is thrown if any request is given an invalid parameter. E.g., - // if a required Date field is null. - ErrCodeInvalidParameterException = "InvalidParameterException" - - // ErrCodeNotFoundException for service response error code - // "NotFoundException". - // - // This exception is thrown if a requested entity is not found. E.g., if a budget - // id doesn't exist for an account ID. - ErrCodeNotFoundException = "NotFoundException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go b/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go deleted file mode 100644 index d74d9c96a55..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go +++ /dev/null @@ -1,95 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package budgets - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -// Budgets provides the API operation methods for making requests to -// AWS Budgets. See this package's package overview docs -// for details on the service. -// -// Budgets methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type Budgets struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "budgets" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the Budgets client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a Budgets client from just a session. -// svc := budgets.New(mySession) -// -// // Create a Budgets client with additional configuration -// svc := budgets.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *Budgets { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Budgets { - svc := &Budgets{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2016-10-20", - JSONVersion: "1.1", - TargetPrefix: "AWSBudgetServiceGateway", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a Budgets operation and runs any -// custom request initialization. -func (c *Budgets) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dax/api.go b/vendor/github.com/aws/aws-sdk-go/service/dax/api.go deleted file mode 100644 index 31fdef87fd9..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/dax/api.go +++ /dev/null @@ -1,4677 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package dax - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opCreateCluster = "CreateCluster" - -// CreateClusterRequest generates a "aws/request.Request" representing the -// client's request for the CreateCluster operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateCluster for more information on using the CreateCluster -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateClusterRequest method. -// req, resp := client.CreateClusterRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateCluster -func (c *DAX) CreateClusterRequest(input *CreateClusterInput) (req *request.Request, output *CreateClusterOutput) { - op := &request.Operation{ - Name: opCreateCluster, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateClusterInput{} - } - - output = &CreateClusterOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateCluster API operation for Amazon DynamoDB Accelerator (DAX). -// -// Creates a DAX cluster. All nodes in the cluster run the same DAX caching -// software. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation CreateCluster for usage and error information. -// -// Returned Error Codes: -// * ErrCodeClusterAlreadyExistsFault "ClusterAlreadyExistsFault" -// You already have a DAX cluster with the given identifier. -// -// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" -// The requested DAX cluster is not in the available state. -// -// * ErrCodeInsufficientClusterCapacityFault "InsufficientClusterCapacityFault" -// There are not enough system resources to create the cluster you requested -// (or to resize an already-existing cluster). -// -// * ErrCodeSubnetGroupNotFoundFault "SubnetGroupNotFoundFault" -// The requested subnet group name does not refer to an existing subnet group. -// -// * ErrCodeInvalidParameterGroupStateFault "InvalidParameterGroupStateFault" -// One or more parameters in a parameter group are in an invalid state. -// -// * ErrCodeParameterGroupNotFoundFault "ParameterGroupNotFoundFault" -// The specified parameter group does not exist. -// -// * ErrCodeClusterQuotaForCustomerExceededFault "ClusterQuotaForCustomerExceededFault" -// You have attempted to exceed the maximum number of DAX clusters for your -// AWS account. -// -// * ErrCodeNodeQuotaForClusterExceededFault "NodeQuotaForClusterExceededFault" -// You have attempted to exceed the maximum number of nodes for a DAX cluster. -// -// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceededFault" -// You have attempted to exceed the maximum number of nodes for your AWS account. -// -// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" -// The VPC network is in an invalid state. -// -// * ErrCodeTagQuotaPerResourceExceeded "TagQuotaPerResourceExceeded" -// You have exceeded the maximum number of tags for this DAX cluster. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateCluster -func (c *DAX) CreateCluster(input *CreateClusterInput) (*CreateClusterOutput, error) { - req, out := c.CreateClusterRequest(input) - return out, req.Send() -} - -// CreateClusterWithContext is the same as CreateCluster with the addition of -// the ability to pass a context and additional request options. -// -// See CreateCluster for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) CreateClusterWithContext(ctx aws.Context, input *CreateClusterInput, opts ...request.Option) (*CreateClusterOutput, error) { - req, out := c.CreateClusterRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateParameterGroup = "CreateParameterGroup" - -// CreateParameterGroupRequest generates a "aws/request.Request" representing the -// client's request for the CreateParameterGroup operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateParameterGroup for more information on using the CreateParameterGroup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateParameterGroupRequest method. -// req, resp := client.CreateParameterGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateParameterGroup -func (c *DAX) CreateParameterGroupRequest(input *CreateParameterGroupInput) (req *request.Request, output *CreateParameterGroupOutput) { - op := &request.Operation{ - Name: opCreateParameterGroup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateParameterGroupInput{} - } - - output = &CreateParameterGroupOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateParameterGroup API operation for Amazon DynamoDB Accelerator (DAX). -// -// Creates a new parameter group. A parameter group is a collection of parameters -// that you apply to all of the nodes in a DAX cluster. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation CreateParameterGroup for usage and error information. -// -// Returned Error Codes: -// * ErrCodeParameterGroupQuotaExceededFault "ParameterGroupQuotaExceededFault" -// You have attempted to exceed the maximum number of parameter groups. -// -// * ErrCodeParameterGroupAlreadyExistsFault "ParameterGroupAlreadyExistsFault" -// The specified parameter group already exists. -// -// * ErrCodeInvalidParameterGroupStateFault "InvalidParameterGroupStateFault" -// One or more parameters in a parameter group are in an invalid state. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateParameterGroup -func (c *DAX) CreateParameterGroup(input *CreateParameterGroupInput) (*CreateParameterGroupOutput, error) { - req, out := c.CreateParameterGroupRequest(input) - return out, req.Send() -} - -// CreateParameterGroupWithContext is the same as CreateParameterGroup with the addition of -// the ability to pass a context and additional request options. -// -// See CreateParameterGroup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) CreateParameterGroupWithContext(ctx aws.Context, input *CreateParameterGroupInput, opts ...request.Option) (*CreateParameterGroupOutput, error) { - req, out := c.CreateParameterGroupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateSubnetGroup = "CreateSubnetGroup" - -// CreateSubnetGroupRequest generates a "aws/request.Request" representing the -// client's request for the CreateSubnetGroup operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateSubnetGroup for more information on using the CreateSubnetGroup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateSubnetGroupRequest method. -// req, resp := client.CreateSubnetGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateSubnetGroup -func (c *DAX) CreateSubnetGroupRequest(input *CreateSubnetGroupInput) (req *request.Request, output *CreateSubnetGroupOutput) { - op := &request.Operation{ - Name: opCreateSubnetGroup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateSubnetGroupInput{} - } - - output = &CreateSubnetGroupOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateSubnetGroup API operation for Amazon DynamoDB Accelerator (DAX). -// -// Creates a new subnet group. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation CreateSubnetGroup for usage and error information. -// -// Returned Error Codes: -// * ErrCodeSubnetGroupAlreadyExistsFault "SubnetGroupAlreadyExistsFault" -// The specified subnet group already exists. -// -// * ErrCodeSubnetGroupQuotaExceededFault "SubnetGroupQuotaExceededFault" -// The request cannot be processed because it would exceed the allowed number -// of subnets in a subnet group. -// -// * ErrCodeSubnetQuotaExceededFault "SubnetQuotaExceededFault" -// The request cannot be processed because it would exceed the allowed number -// of subnets in a subnet group. -// -// * ErrCodeInvalidSubnet "InvalidSubnet" -// An invalid subnet identifier was specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateSubnetGroup -func (c *DAX) CreateSubnetGroup(input *CreateSubnetGroupInput) (*CreateSubnetGroupOutput, error) { - req, out := c.CreateSubnetGroupRequest(input) - return out, req.Send() -} - -// CreateSubnetGroupWithContext is the same as CreateSubnetGroup with the addition of -// the ability to pass a context and additional request options. -// -// See CreateSubnetGroup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) CreateSubnetGroupWithContext(ctx aws.Context, input *CreateSubnetGroupInput, opts ...request.Option) (*CreateSubnetGroupOutput, error) { - req, out := c.CreateSubnetGroupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDecreaseReplicationFactor = "DecreaseReplicationFactor" - -// DecreaseReplicationFactorRequest generates a "aws/request.Request" representing the -// client's request for the DecreaseReplicationFactor operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DecreaseReplicationFactor for more information on using the DecreaseReplicationFactor -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DecreaseReplicationFactorRequest method. -// req, resp := client.DecreaseReplicationFactorRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DecreaseReplicationFactor -func (c *DAX) DecreaseReplicationFactorRequest(input *DecreaseReplicationFactorInput) (req *request.Request, output *DecreaseReplicationFactorOutput) { - op := &request.Operation{ - Name: opDecreaseReplicationFactor, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DecreaseReplicationFactorInput{} - } - - output = &DecreaseReplicationFactorOutput{} - req = c.newRequest(op, input, output) - return -} - -// DecreaseReplicationFactor API operation for Amazon DynamoDB Accelerator (DAX). -// -// Removes one or more nodes from a DAX cluster. -// -// You cannot use DecreaseReplicationFactor to remove the last node in a DAX -// cluster. If you need to do this, use DeleteCluster instead. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation DecreaseReplicationFactor for usage and error information. -// -// Returned Error Codes: -// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" -// The requested cluster ID does not refer to an existing DAX cluster. -// -// * ErrCodeNodeNotFoundFault "NodeNotFoundFault" -// None of the nodes in the cluster have the given node ID. -// -// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" -// The requested DAX cluster is not in the available state. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DecreaseReplicationFactor -func (c *DAX) DecreaseReplicationFactor(input *DecreaseReplicationFactorInput) (*DecreaseReplicationFactorOutput, error) { - req, out := c.DecreaseReplicationFactorRequest(input) - return out, req.Send() -} - -// DecreaseReplicationFactorWithContext is the same as DecreaseReplicationFactor with the addition of -// the ability to pass a context and additional request options. -// -// See DecreaseReplicationFactor for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) DecreaseReplicationFactorWithContext(ctx aws.Context, input *DecreaseReplicationFactorInput, opts ...request.Option) (*DecreaseReplicationFactorOutput, error) { - req, out := c.DecreaseReplicationFactorRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteCluster = "DeleteCluster" - -// DeleteClusterRequest generates a "aws/request.Request" representing the -// client's request for the DeleteCluster operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteCluster for more information on using the DeleteCluster -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteClusterRequest method. -// req, resp := client.DeleteClusterRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteCluster -func (c *DAX) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Request, output *DeleteClusterOutput) { - op := &request.Operation{ - Name: opDeleteCluster, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteClusterInput{} - } - - output = &DeleteClusterOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteCluster API operation for Amazon DynamoDB Accelerator (DAX). -// -// Deletes a previously provisioned DAX cluster. DeleteCluster deletes all associated -// nodes, node endpoints and the DAX cluster itself. When you receive a successful -// response from this action, DAX immediately begins deleting the cluster; you -// cannot cancel or revert this action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation DeleteCluster for usage and error information. -// -// Returned Error Codes: -// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" -// The requested cluster ID does not refer to an existing DAX cluster. -// -// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" -// The requested DAX cluster is not in the available state. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteCluster -func (c *DAX) DeleteCluster(input *DeleteClusterInput) (*DeleteClusterOutput, error) { - req, out := c.DeleteClusterRequest(input) - return out, req.Send() -} - -// DeleteClusterWithContext is the same as DeleteCluster with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteCluster for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) DeleteClusterWithContext(ctx aws.Context, input *DeleteClusterInput, opts ...request.Option) (*DeleteClusterOutput, error) { - req, out := c.DeleteClusterRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteParameterGroup = "DeleteParameterGroup" - -// DeleteParameterGroupRequest generates a "aws/request.Request" representing the -// client's request for the DeleteParameterGroup operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteParameterGroup for more information on using the DeleteParameterGroup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteParameterGroupRequest method. -// req, resp := client.DeleteParameterGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteParameterGroup -func (c *DAX) DeleteParameterGroupRequest(input *DeleteParameterGroupInput) (req *request.Request, output *DeleteParameterGroupOutput) { - op := &request.Operation{ - Name: opDeleteParameterGroup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteParameterGroupInput{} - } - - output = &DeleteParameterGroupOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteParameterGroup API operation for Amazon DynamoDB Accelerator (DAX). -// -// Deletes the specified parameter group. You cannot delete a parameter group -// if it is associated with any DAX clusters. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation DeleteParameterGroup for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterGroupStateFault "InvalidParameterGroupStateFault" -// One or more parameters in a parameter group are in an invalid state. -// -// * ErrCodeParameterGroupNotFoundFault "ParameterGroupNotFoundFault" -// The specified parameter group does not exist. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteParameterGroup -func (c *DAX) DeleteParameterGroup(input *DeleteParameterGroupInput) (*DeleteParameterGroupOutput, error) { - req, out := c.DeleteParameterGroupRequest(input) - return out, req.Send() -} - -// DeleteParameterGroupWithContext is the same as DeleteParameterGroup with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteParameterGroup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) DeleteParameterGroupWithContext(ctx aws.Context, input *DeleteParameterGroupInput, opts ...request.Option) (*DeleteParameterGroupOutput, error) { - req, out := c.DeleteParameterGroupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteSubnetGroup = "DeleteSubnetGroup" - -// DeleteSubnetGroupRequest generates a "aws/request.Request" representing the -// client's request for the DeleteSubnetGroup operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteSubnetGroup for more information on using the DeleteSubnetGroup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteSubnetGroupRequest method. -// req, resp := client.DeleteSubnetGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteSubnetGroup -func (c *DAX) DeleteSubnetGroupRequest(input *DeleteSubnetGroupInput) (req *request.Request, output *DeleteSubnetGroupOutput) { - op := &request.Operation{ - Name: opDeleteSubnetGroup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteSubnetGroupInput{} - } - - output = &DeleteSubnetGroupOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteSubnetGroup API operation for Amazon DynamoDB Accelerator (DAX). -// -// Deletes a subnet group. -// -// You cannot delete a subnet group if it is associated with any DAX clusters. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation DeleteSubnetGroup for usage and error information. -// -// Returned Error Codes: -// * ErrCodeSubnetGroupInUseFault "SubnetGroupInUseFault" -// The specified subnet group is currently in use. -// -// * ErrCodeSubnetGroupNotFoundFault "SubnetGroupNotFoundFault" -// The requested subnet group name does not refer to an existing subnet group. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteSubnetGroup -func (c *DAX) DeleteSubnetGroup(input *DeleteSubnetGroupInput) (*DeleteSubnetGroupOutput, error) { - req, out := c.DeleteSubnetGroupRequest(input) - return out, req.Send() -} - -// DeleteSubnetGroupWithContext is the same as DeleteSubnetGroup with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteSubnetGroup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) DeleteSubnetGroupWithContext(ctx aws.Context, input *DeleteSubnetGroupInput, opts ...request.Option) (*DeleteSubnetGroupOutput, error) { - req, out := c.DeleteSubnetGroupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeClusters = "DescribeClusters" - -// DescribeClustersRequest generates a "aws/request.Request" representing the -// client's request for the DescribeClusters operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeClusters for more information on using the DescribeClusters -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeClustersRequest method. -// req, resp := client.DescribeClustersRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeClusters -func (c *DAX) DescribeClustersRequest(input *DescribeClustersInput) (req *request.Request, output *DescribeClustersOutput) { - op := &request.Operation{ - Name: opDescribeClusters, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeClustersInput{} - } - - output = &DescribeClustersOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeClusters API operation for Amazon DynamoDB Accelerator (DAX). -// -// Returns information about all provisioned DAX clusters if no cluster identifier -// is specified, or about a specific DAX cluster if a cluster identifier is -// supplied. -// -// If the cluster is in the CREATING state, only cluster level information will -// be displayed until all of the nodes are successfully provisioned. -// -// If the cluster is in the DELETING state, only cluster level information will -// be displayed. -// -// If nodes are currently being added to the DAX cluster, node endpoint information -// and creation time for the additional nodes will not be displayed until they -// are completely provisioned. When the DAX cluster state is available, the -// cluster is ready for use. -// -// If nodes are currently being removed from the DAX cluster, no endpoint information -// for the removed nodes is displayed. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation DescribeClusters for usage and error information. -// -// Returned Error Codes: -// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" -// The requested cluster ID does not refer to an existing DAX cluster. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeClusters -func (c *DAX) DescribeClusters(input *DescribeClustersInput) (*DescribeClustersOutput, error) { - req, out := c.DescribeClustersRequest(input) - return out, req.Send() -} - -// DescribeClustersWithContext is the same as DescribeClusters with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeClusters for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) DescribeClustersWithContext(ctx aws.Context, input *DescribeClustersInput, opts ...request.Option) (*DescribeClustersOutput, error) { - req, out := c.DescribeClustersRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeDefaultParameters = "DescribeDefaultParameters" - -// DescribeDefaultParametersRequest generates a "aws/request.Request" representing the -// client's request for the DescribeDefaultParameters operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeDefaultParameters for more information on using the DescribeDefaultParameters -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeDefaultParametersRequest method. -// req, resp := client.DescribeDefaultParametersRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeDefaultParameters -func (c *DAX) DescribeDefaultParametersRequest(input *DescribeDefaultParametersInput) (req *request.Request, output *DescribeDefaultParametersOutput) { - op := &request.Operation{ - Name: opDescribeDefaultParameters, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeDefaultParametersInput{} - } - - output = &DescribeDefaultParametersOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeDefaultParameters API operation for Amazon DynamoDB Accelerator (DAX). -// -// Returns the default system parameter information for the DAX caching software. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation DescribeDefaultParameters for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeDefaultParameters -func (c *DAX) DescribeDefaultParameters(input *DescribeDefaultParametersInput) (*DescribeDefaultParametersOutput, error) { - req, out := c.DescribeDefaultParametersRequest(input) - return out, req.Send() -} - -// DescribeDefaultParametersWithContext is the same as DescribeDefaultParameters with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeDefaultParameters for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) DescribeDefaultParametersWithContext(ctx aws.Context, input *DescribeDefaultParametersInput, opts ...request.Option) (*DescribeDefaultParametersOutput, error) { - req, out := c.DescribeDefaultParametersRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeEvents = "DescribeEvents" - -// DescribeEventsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeEvents operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeEvents for more information on using the DescribeEvents -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeEventsRequest method. -// req, resp := client.DescribeEventsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeEvents -func (c *DAX) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { - op := &request.Operation{ - Name: opDescribeEvents, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeEventsInput{} - } - - output = &DescribeEventsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeEvents API operation for Amazon DynamoDB Accelerator (DAX). -// -// Returns events related to DAX clusters and parameter groups. You can obtain -// events specific to a particular DAX cluster or parameter group by providing -// the name as a parameter. -// -// By default, only the events occurring within the last hour are returned; -// however, you can retrieve up to 14 days' worth of events if necessary. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation DescribeEvents for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeEvents -func (c *DAX) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { - req, out := c.DescribeEventsRequest(input) - return out, req.Send() -} - -// DescribeEventsWithContext is the same as DescribeEvents with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeEvents for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) DescribeEventsWithContext(ctx aws.Context, input *DescribeEventsInput, opts ...request.Option) (*DescribeEventsOutput, error) { - req, out := c.DescribeEventsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeParameterGroups = "DescribeParameterGroups" - -// DescribeParameterGroupsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeParameterGroups operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeParameterGroups for more information on using the DescribeParameterGroups -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeParameterGroupsRequest method. -// req, resp := client.DescribeParameterGroupsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParameterGroups -func (c *DAX) DescribeParameterGroupsRequest(input *DescribeParameterGroupsInput) (req *request.Request, output *DescribeParameterGroupsOutput) { - op := &request.Operation{ - Name: opDescribeParameterGroups, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeParameterGroupsInput{} - } - - output = &DescribeParameterGroupsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeParameterGroups API operation for Amazon DynamoDB Accelerator (DAX). -// -// Returns a list of parameter group descriptions. If a parameter group name -// is specified, the list will contain only the descriptions for that group. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation DescribeParameterGroups for usage and error information. -// -// Returned Error Codes: -// * ErrCodeParameterGroupNotFoundFault "ParameterGroupNotFoundFault" -// The specified parameter group does not exist. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParameterGroups -func (c *DAX) DescribeParameterGroups(input *DescribeParameterGroupsInput) (*DescribeParameterGroupsOutput, error) { - req, out := c.DescribeParameterGroupsRequest(input) - return out, req.Send() -} - -// DescribeParameterGroupsWithContext is the same as DescribeParameterGroups with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeParameterGroups for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) DescribeParameterGroupsWithContext(ctx aws.Context, input *DescribeParameterGroupsInput, opts ...request.Option) (*DescribeParameterGroupsOutput, error) { - req, out := c.DescribeParameterGroupsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeParameters = "DescribeParameters" - -// DescribeParametersRequest generates a "aws/request.Request" representing the -// client's request for the DescribeParameters operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeParameters for more information on using the DescribeParameters -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeParametersRequest method. -// req, resp := client.DescribeParametersRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParameters -func (c *DAX) DescribeParametersRequest(input *DescribeParametersInput) (req *request.Request, output *DescribeParametersOutput) { - op := &request.Operation{ - Name: opDescribeParameters, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeParametersInput{} - } - - output = &DescribeParametersOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeParameters API operation for Amazon DynamoDB Accelerator (DAX). -// -// Returns the detailed parameter list for a particular parameter group. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation DescribeParameters for usage and error information. -// -// Returned Error Codes: -// * ErrCodeParameterGroupNotFoundFault "ParameterGroupNotFoundFault" -// The specified parameter group does not exist. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParameters -func (c *DAX) DescribeParameters(input *DescribeParametersInput) (*DescribeParametersOutput, error) { - req, out := c.DescribeParametersRequest(input) - return out, req.Send() -} - -// DescribeParametersWithContext is the same as DescribeParameters with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeParameters for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) DescribeParametersWithContext(ctx aws.Context, input *DescribeParametersInput, opts ...request.Option) (*DescribeParametersOutput, error) { - req, out := c.DescribeParametersRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeSubnetGroups = "DescribeSubnetGroups" - -// DescribeSubnetGroupsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeSubnetGroups operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeSubnetGroups for more information on using the DescribeSubnetGroups -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeSubnetGroupsRequest method. -// req, resp := client.DescribeSubnetGroupsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeSubnetGroups -func (c *DAX) DescribeSubnetGroupsRequest(input *DescribeSubnetGroupsInput) (req *request.Request, output *DescribeSubnetGroupsOutput) { - op := &request.Operation{ - Name: opDescribeSubnetGroups, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeSubnetGroupsInput{} - } - - output = &DescribeSubnetGroupsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeSubnetGroups API operation for Amazon DynamoDB Accelerator (DAX). -// -// Returns a list of subnet group descriptions. If a subnet group name is specified, -// the list will contain only the description of that group. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation DescribeSubnetGroups for usage and error information. -// -// Returned Error Codes: -// * ErrCodeSubnetGroupNotFoundFault "SubnetGroupNotFoundFault" -// The requested subnet group name does not refer to an existing subnet group. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeSubnetGroups -func (c *DAX) DescribeSubnetGroups(input *DescribeSubnetGroupsInput) (*DescribeSubnetGroupsOutput, error) { - req, out := c.DescribeSubnetGroupsRequest(input) - return out, req.Send() -} - -// DescribeSubnetGroupsWithContext is the same as DescribeSubnetGroups with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeSubnetGroups for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) DescribeSubnetGroupsWithContext(ctx aws.Context, input *DescribeSubnetGroupsInput, opts ...request.Option) (*DescribeSubnetGroupsOutput, error) { - req, out := c.DescribeSubnetGroupsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opIncreaseReplicationFactor = "IncreaseReplicationFactor" - -// IncreaseReplicationFactorRequest generates a "aws/request.Request" representing the -// client's request for the IncreaseReplicationFactor operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See IncreaseReplicationFactor for more information on using the IncreaseReplicationFactor -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the IncreaseReplicationFactorRequest method. -// req, resp := client.IncreaseReplicationFactorRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/IncreaseReplicationFactor -func (c *DAX) IncreaseReplicationFactorRequest(input *IncreaseReplicationFactorInput) (req *request.Request, output *IncreaseReplicationFactorOutput) { - op := &request.Operation{ - Name: opIncreaseReplicationFactor, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &IncreaseReplicationFactorInput{} - } - - output = &IncreaseReplicationFactorOutput{} - req = c.newRequest(op, input, output) - return -} - -// IncreaseReplicationFactor API operation for Amazon DynamoDB Accelerator (DAX). -// -// Adds one or more nodes to a DAX cluster. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation IncreaseReplicationFactor for usage and error information. -// -// Returned Error Codes: -// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" -// The requested cluster ID does not refer to an existing DAX cluster. -// -// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" -// The requested DAX cluster is not in the available state. -// -// * ErrCodeInsufficientClusterCapacityFault "InsufficientClusterCapacityFault" -// There are not enough system resources to create the cluster you requested -// (or to resize an already-existing cluster). -// -// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" -// The VPC network is in an invalid state. -// -// * ErrCodeNodeQuotaForClusterExceededFault "NodeQuotaForClusterExceededFault" -// You have attempted to exceed the maximum number of nodes for a DAX cluster. -// -// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceededFault" -// You have attempted to exceed the maximum number of nodes for your AWS account. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/IncreaseReplicationFactor -func (c *DAX) IncreaseReplicationFactor(input *IncreaseReplicationFactorInput) (*IncreaseReplicationFactorOutput, error) { - req, out := c.IncreaseReplicationFactorRequest(input) - return out, req.Send() -} - -// IncreaseReplicationFactorWithContext is the same as IncreaseReplicationFactor with the addition of -// the ability to pass a context and additional request options. -// -// See IncreaseReplicationFactor for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) IncreaseReplicationFactorWithContext(ctx aws.Context, input *IncreaseReplicationFactorInput, opts ...request.Option) (*IncreaseReplicationFactorOutput, error) { - req, out := c.IncreaseReplicationFactorRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListTags = "ListTags" - -// ListTagsRequest generates a "aws/request.Request" representing the -// client's request for the ListTags operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListTags for more information on using the ListTags -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListTagsRequest method. -// req, resp := client.ListTagsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/ListTags -func (c *DAX) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { - op := &request.Operation{ - Name: opListTags, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListTagsInput{} - } - - output = &ListTagsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListTags API operation for Amazon DynamoDB Accelerator (DAX). -// -// List all of the tags for a DAX cluster. You can call ListTags up to 10 times -// per second, per account. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation ListTags for usage and error information. -// -// Returned Error Codes: -// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" -// The requested cluster ID does not refer to an existing DAX cluster. -// -// * ErrCodeInvalidARNFault "InvalidARNFault" -// The Amazon Resource Name (ARN) supplied in the request is not valid. -// -// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" -// The requested DAX cluster is not in the available state. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/ListTags -func (c *DAX) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { - req, out := c.ListTagsRequest(input) - return out, req.Send() -} - -// ListTagsWithContext is the same as ListTags with the addition of -// the ability to pass a context and additional request options. -// -// See ListTags for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) ListTagsWithContext(ctx aws.Context, input *ListTagsInput, opts ...request.Option) (*ListTagsOutput, error) { - req, out := c.ListTagsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRebootNode = "RebootNode" - -// RebootNodeRequest generates a "aws/request.Request" representing the -// client's request for the RebootNode operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RebootNode for more information on using the RebootNode -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RebootNodeRequest method. -// req, resp := client.RebootNodeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/RebootNode -func (c *DAX) RebootNodeRequest(input *RebootNodeInput) (req *request.Request, output *RebootNodeOutput) { - op := &request.Operation{ - Name: opRebootNode, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RebootNodeInput{} - } - - output = &RebootNodeOutput{} - req = c.newRequest(op, input, output) - return -} - -// RebootNode API operation for Amazon DynamoDB Accelerator (DAX). -// -// Reboots a single node of a DAX cluster. The reboot action takes place as -// soon as possible. During the reboot, the node status is set to REBOOTING. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation RebootNode for usage and error information. -// -// Returned Error Codes: -// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" -// The requested cluster ID does not refer to an existing DAX cluster. -// -// * ErrCodeNodeNotFoundFault "NodeNotFoundFault" -// None of the nodes in the cluster have the given node ID. -// -// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" -// The requested DAX cluster is not in the available state. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/RebootNode -func (c *DAX) RebootNode(input *RebootNodeInput) (*RebootNodeOutput, error) { - req, out := c.RebootNodeRequest(input) - return out, req.Send() -} - -// RebootNodeWithContext is the same as RebootNode with the addition of -// the ability to pass a context and additional request options. -// -// See RebootNode for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) RebootNodeWithContext(ctx aws.Context, input *RebootNodeInput, opts ...request.Option) (*RebootNodeOutput, error) { - req, out := c.RebootNodeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opTagResource = "TagResource" - -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See TagResource for more information on using the TagResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/TagResource -func (c *DAX) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { - op := &request.Operation{ - Name: opTagResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &TagResourceInput{} - } - - output = &TagResourceOutput{} - req = c.newRequest(op, input, output) - return -} - -// TagResource API operation for Amazon DynamoDB Accelerator (DAX). -// -// Associates a set of tags with a DAX resource. You can call TagResource up -// to 5 times per second, per account. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation TagResource for usage and error information. -// -// Returned Error Codes: -// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" -// The requested cluster ID does not refer to an existing DAX cluster. -// -// * ErrCodeTagQuotaPerResourceExceeded "TagQuotaPerResourceExceeded" -// You have exceeded the maximum number of tags for this DAX cluster. -// -// * ErrCodeInvalidARNFault "InvalidARNFault" -// The Amazon Resource Name (ARN) supplied in the request is not valid. -// -// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" -// The requested DAX cluster is not in the available state. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/TagResource -func (c *DAX) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) - return out, req.Send() -} - -// TagResourceWithContext is the same as TagResource with the addition of -// the ability to pass a context and additional request options. -// -// See TagResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUntagResource = "UntagResource" - -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UntagResource for more information on using the UntagResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UntagResource -func (c *DAX) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { - op := &request.Operation{ - Name: opUntagResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UntagResourceInput{} - } - - output = &UntagResourceOutput{} - req = c.newRequest(op, input, output) - return -} - -// UntagResource API operation for Amazon DynamoDB Accelerator (DAX). -// -// Removes the association of tags from a DAX resource. You can call UntagResource -// up to 5 times per second, per account. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation UntagResource for usage and error information. -// -// Returned Error Codes: -// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" -// The requested cluster ID does not refer to an existing DAX cluster. -// -// * ErrCodeInvalidARNFault "InvalidARNFault" -// The Amazon Resource Name (ARN) supplied in the request is not valid. -// -// * ErrCodeTagNotFoundFault "TagNotFoundFault" -// The tag does not exist. -// -// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" -// The requested DAX cluster is not in the available state. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UntagResource -func (c *DAX) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) - return out, req.Send() -} - -// UntagResourceWithContext is the same as UntagResource with the addition of -// the ability to pass a context and additional request options. -// -// See UntagResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateCluster = "UpdateCluster" - -// UpdateClusterRequest generates a "aws/request.Request" representing the -// client's request for the UpdateCluster operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateCluster for more information on using the UpdateCluster -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateClusterRequest method. -// req, resp := client.UpdateClusterRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateCluster -func (c *DAX) UpdateClusterRequest(input *UpdateClusterInput) (req *request.Request, output *UpdateClusterOutput) { - op := &request.Operation{ - Name: opUpdateCluster, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateClusterInput{} - } - - output = &UpdateClusterOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateCluster API operation for Amazon DynamoDB Accelerator (DAX). -// -// Modifies the settings for a DAX cluster. You can use this action to change -// one or more cluster configuration parameters by specifying the parameters -// and the new values. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation UpdateCluster for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidClusterStateFault "InvalidClusterStateFault" -// The requested DAX cluster is not in the available state. -// -// * ErrCodeClusterNotFoundFault "ClusterNotFoundFault" -// The requested cluster ID does not refer to an existing DAX cluster. -// -// * ErrCodeInvalidParameterGroupStateFault "InvalidParameterGroupStateFault" -// One or more parameters in a parameter group are in an invalid state. -// -// * ErrCodeParameterGroupNotFoundFault "ParameterGroupNotFoundFault" -// The specified parameter group does not exist. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateCluster -func (c *DAX) UpdateCluster(input *UpdateClusterInput) (*UpdateClusterOutput, error) { - req, out := c.UpdateClusterRequest(input) - return out, req.Send() -} - -// UpdateClusterWithContext is the same as UpdateCluster with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateCluster for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) UpdateClusterWithContext(ctx aws.Context, input *UpdateClusterInput, opts ...request.Option) (*UpdateClusterOutput, error) { - req, out := c.UpdateClusterRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateParameterGroup = "UpdateParameterGroup" - -// UpdateParameterGroupRequest generates a "aws/request.Request" representing the -// client's request for the UpdateParameterGroup operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateParameterGroup for more information on using the UpdateParameterGroup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateParameterGroupRequest method. -// req, resp := client.UpdateParameterGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateParameterGroup -func (c *DAX) UpdateParameterGroupRequest(input *UpdateParameterGroupInput) (req *request.Request, output *UpdateParameterGroupOutput) { - op := &request.Operation{ - Name: opUpdateParameterGroup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateParameterGroupInput{} - } - - output = &UpdateParameterGroupOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateParameterGroup API operation for Amazon DynamoDB Accelerator (DAX). -// -// Modifies the parameters of a parameter group. You can modify up to 20 parameters -// in a single request by submitting a list parameter name and value pairs. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation UpdateParameterGroup for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterGroupStateFault "InvalidParameterGroupStateFault" -// One or more parameters in a parameter group are in an invalid state. -// -// * ErrCodeParameterGroupNotFoundFault "ParameterGroupNotFoundFault" -// The specified parameter group does not exist. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateParameterGroup -func (c *DAX) UpdateParameterGroup(input *UpdateParameterGroupInput) (*UpdateParameterGroupOutput, error) { - req, out := c.UpdateParameterGroupRequest(input) - return out, req.Send() -} - -// UpdateParameterGroupWithContext is the same as UpdateParameterGroup with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateParameterGroup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) UpdateParameterGroupWithContext(ctx aws.Context, input *UpdateParameterGroupInput, opts ...request.Option) (*UpdateParameterGroupOutput, error) { - req, out := c.UpdateParameterGroupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateSubnetGroup = "UpdateSubnetGroup" - -// UpdateSubnetGroupRequest generates a "aws/request.Request" representing the -// client's request for the UpdateSubnetGroup operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateSubnetGroup for more information on using the UpdateSubnetGroup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateSubnetGroupRequest method. -// req, resp := client.UpdateSubnetGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateSubnetGroup -func (c *DAX) UpdateSubnetGroupRequest(input *UpdateSubnetGroupInput) (req *request.Request, output *UpdateSubnetGroupOutput) { - op := &request.Operation{ - Name: opUpdateSubnetGroup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateSubnetGroupInput{} - } - - output = &UpdateSubnetGroupOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateSubnetGroup API operation for Amazon DynamoDB Accelerator (DAX). -// -// Modifies an existing subnet group. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB Accelerator (DAX)'s -// API operation UpdateSubnetGroup for usage and error information. -// -// Returned Error Codes: -// * ErrCodeSubnetGroupNotFoundFault "SubnetGroupNotFoundFault" -// The requested subnet group name does not refer to an existing subnet group. -// -// * ErrCodeSubnetQuotaExceededFault "SubnetQuotaExceededFault" -// The request cannot be processed because it would exceed the allowed number -// of subnets in a subnet group. -// -// * ErrCodeSubnetInUse "SubnetInUse" -// The requested subnet is being used by another subnet group. -// -// * ErrCodeInvalidSubnet "InvalidSubnet" -// An invalid subnet identifier was specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateSubnetGroup -func (c *DAX) UpdateSubnetGroup(input *UpdateSubnetGroupInput) (*UpdateSubnetGroupOutput, error) { - req, out := c.UpdateSubnetGroupRequest(input) - return out, req.Send() -} - -// UpdateSubnetGroupWithContext is the same as UpdateSubnetGroup with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateSubnetGroup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DAX) UpdateSubnetGroupWithContext(ctx aws.Context, input *UpdateSubnetGroupInput, opts ...request.Option) (*UpdateSubnetGroupOutput, error) { - req, out := c.UpdateSubnetGroupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Contains all of the attributes of a specific DAX cluster. -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/Cluster -type Cluster struct { - _ struct{} `type:"structure"` - - // The number of nodes in the cluster that are active (i.e., capable of serving - // requests). - ActiveNodes *int64 `type:"integer"` - - // The Amazon Resource Name (ARN) that uniquely identifies the cluster. - ClusterArn *string `type:"string"` - - // The configuration endpoint for this DAX cluster, consisting of a DNS name - // and a port number. Client applications can specify this endpoint, rather - // than an individual node endpoint, and allow the DAX client software to intelligently - // route requests and responses to nodes in the DAX cluster. - ClusterDiscoveryEndpoint *Endpoint `type:"structure"` - - // The name of the DAX cluster. - ClusterName *string `type:"string"` - - // The description of the cluster. - Description *string `type:"string"` - - // A valid Amazon Resource Name (ARN) that identifies an IAM role. At runtime, - // DAX will assume this role and use the role's permissions to access DynamoDB - // on your behalf. - IamRoleArn *string `type:"string"` - - // A list of nodes to be removed from the cluster. - NodeIdsToRemove []*string `type:"list"` - - // The node type for the nodes in the cluster. (All nodes in a DAX cluster are - // of the same type.) - NodeType *string `type:"string"` - - // A list of nodes that are currently in the cluster. - Nodes []*Node `type:"list"` - - // Describes a notification topic and its status. Notification topics are used - // for publishing DAX events to subscribers using Amazon Simple Notification - // Service (SNS). - NotificationConfiguration *NotificationConfiguration `type:"structure"` - - // The parameter group being used by nodes in the cluster. - ParameterGroup *ParameterGroupStatus `type:"structure"` - - // A range of time when maintenance of DAX cluster software will be performed. - // For example: sun:01:00-sun:09:00. Cluster maintenance normally takes less - // than 30 minutes, and is performed automatically within the maintenance window. - PreferredMaintenanceWindow *string `type:"string"` - - // A list of security groups, and the status of each, for the nodes in the cluster. - SecurityGroups []*SecurityGroupMembership `type:"list"` - - // The current status of the cluster. - Status *string `type:"string"` - - // The subnet group where the DAX cluster is running. - SubnetGroup *string `type:"string"` - - // The total number of nodes in the cluster. - TotalNodes *int64 `type:"integer"` -} - -// String returns the string representation -func (s Cluster) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Cluster) GoString() string { - return s.String() -} - -// SetActiveNodes sets the ActiveNodes field's value. -func (s *Cluster) SetActiveNodes(v int64) *Cluster { - s.ActiveNodes = &v - return s -} - -// SetClusterArn sets the ClusterArn field's value. -func (s *Cluster) SetClusterArn(v string) *Cluster { - s.ClusterArn = &v - return s -} - -// SetClusterDiscoveryEndpoint sets the ClusterDiscoveryEndpoint field's value. -func (s *Cluster) SetClusterDiscoveryEndpoint(v *Endpoint) *Cluster { - s.ClusterDiscoveryEndpoint = v - return s -} - -// SetClusterName sets the ClusterName field's value. -func (s *Cluster) SetClusterName(v string) *Cluster { - s.ClusterName = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *Cluster) SetDescription(v string) *Cluster { - s.Description = &v - return s -} - -// SetIamRoleArn sets the IamRoleArn field's value. -func (s *Cluster) SetIamRoleArn(v string) *Cluster { - s.IamRoleArn = &v - return s -} - -// SetNodeIdsToRemove sets the NodeIdsToRemove field's value. -func (s *Cluster) SetNodeIdsToRemove(v []*string) *Cluster { - s.NodeIdsToRemove = v - return s -} - -// SetNodeType sets the NodeType field's value. -func (s *Cluster) SetNodeType(v string) *Cluster { - s.NodeType = &v - return s -} - -// SetNodes sets the Nodes field's value. -func (s *Cluster) SetNodes(v []*Node) *Cluster { - s.Nodes = v - return s -} - -// SetNotificationConfiguration sets the NotificationConfiguration field's value. -func (s *Cluster) SetNotificationConfiguration(v *NotificationConfiguration) *Cluster { - s.NotificationConfiguration = v - return s -} - -// SetParameterGroup sets the ParameterGroup field's value. -func (s *Cluster) SetParameterGroup(v *ParameterGroupStatus) *Cluster { - s.ParameterGroup = v - return s -} - -// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. -func (s *Cluster) SetPreferredMaintenanceWindow(v string) *Cluster { - s.PreferredMaintenanceWindow = &v - return s -} - -// SetSecurityGroups sets the SecurityGroups field's value. -func (s *Cluster) SetSecurityGroups(v []*SecurityGroupMembership) *Cluster { - s.SecurityGroups = v - return s -} - -// SetStatus sets the Status field's value. -func (s *Cluster) SetStatus(v string) *Cluster { - s.Status = &v - return s -} - -// SetSubnetGroup sets the SubnetGroup field's value. -func (s *Cluster) SetSubnetGroup(v string) *Cluster { - s.SubnetGroup = &v - return s -} - -// SetTotalNodes sets the TotalNodes field's value. -func (s *Cluster) SetTotalNodes(v int64) *Cluster { - s.TotalNodes = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateClusterRequest -type CreateClusterInput struct { - _ struct{} `type:"structure"` - - // The Availability Zones (AZs) in which the cluster nodes will be created. - // All nodes belonging to the cluster are placed in these Availability Zones. - // Use this parameter if you want to distribute the nodes across multiple AZs. - AvailabilityZones []*string `type:"list"` - - // The cluster identifier. This parameter is stored as a lowercase string. - // - // Constraints: - // - // * A name must contain from 1 to 20 alphanumeric characters or hyphens. - // - // * The first character must be a letter. - // - // * A name cannot end with a hyphen or contain two consecutive hyphens. - // - // ClusterName is a required field - ClusterName *string `type:"string" required:"true"` - - // A description of the cluster. - Description *string `type:"string"` - - // A valid Amazon Resource Name (ARN) that identifies an IAM role. At runtime, - // DAX will assume this role and use the role's permissions to access DynamoDB - // on your behalf. - // - // IamRoleArn is a required field - IamRoleArn *string `type:"string" required:"true"` - - // The compute and memory capacity of the nodes in the cluster. - // - // NodeType is a required field - NodeType *string `type:"string" required:"true"` - - // The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications - // will be sent. - // - // The Amazon SNS topic owner must be same as the DAX cluster owner. - NotificationTopicArn *string `type:"string"` - - // The parameter group to be associated with the DAX cluster. - ParameterGroupName *string `type:"string"` - - // Specifies the weekly time range during which maintenance on the DAX cluster - // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi - // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid - // values for ddd are: - // - // * sun - // - // * mon - // - // * tue - // - // * wed - // - // * thu - // - // * fri - // - // * sat - // - // Example: sun:05:00-sun:09:00 - // - // If you don't specify a preferred maintenance window when you create or modify - // a cache cluster, DAX assigns a 60-minute maintenance window on a randomly - // selected day of the week. - PreferredMaintenanceWindow *string `type:"string"` - - // The number of nodes in the DAX cluster. A replication factor of 1 will create - // a single-node cluster, without any read replicas. For additional fault tolerance, - // you can create a multiple node cluster with one or more read replicas. To - // do this, set ReplicationFactor to 2 or more. - // - // AWS recommends that you have at least two read replicas per cluster. - // - // ReplicationFactor is a required field - ReplicationFactor *int64 `type:"integer" required:"true"` - - // A list of security group IDs to be assigned to each node in the DAX cluster. - // (Each of the security group ID is system-generated.) - // - // If this parameter is not specified, DAX assigns the default VPC security - // group to each node. - SecurityGroupIds []*string `type:"list"` - - // The name of the subnet group to be used for the replication group. - // - // DAX clusters can only run in an Amazon VPC environment. All of the subnets - // that you specify in a subnet group must exist in the same VPC. - SubnetGroupName *string `type:"string"` - - // A set of tags to associate with the DAX cluster. - Tags []*Tag `type:"list"` -} - -// String returns the string representation -func (s CreateClusterInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateClusterInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateClusterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateClusterInput"} - if s.ClusterName == nil { - invalidParams.Add(request.NewErrParamRequired("ClusterName")) - } - if s.IamRoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("IamRoleArn")) - } - if s.NodeType == nil { - invalidParams.Add(request.NewErrParamRequired("NodeType")) - } - if s.ReplicationFactor == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationFactor")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAvailabilityZones sets the AvailabilityZones field's value. -func (s *CreateClusterInput) SetAvailabilityZones(v []*string) *CreateClusterInput { - s.AvailabilityZones = v - return s -} - -// SetClusterName sets the ClusterName field's value. -func (s *CreateClusterInput) SetClusterName(v string) *CreateClusterInput { - s.ClusterName = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateClusterInput) SetDescription(v string) *CreateClusterInput { - s.Description = &v - return s -} - -// SetIamRoleArn sets the IamRoleArn field's value. -func (s *CreateClusterInput) SetIamRoleArn(v string) *CreateClusterInput { - s.IamRoleArn = &v - return s -} - -// SetNodeType sets the NodeType field's value. -func (s *CreateClusterInput) SetNodeType(v string) *CreateClusterInput { - s.NodeType = &v - return s -} - -// SetNotificationTopicArn sets the NotificationTopicArn field's value. -func (s *CreateClusterInput) SetNotificationTopicArn(v string) *CreateClusterInput { - s.NotificationTopicArn = &v - return s -} - -// SetParameterGroupName sets the ParameterGroupName field's value. -func (s *CreateClusterInput) SetParameterGroupName(v string) *CreateClusterInput { - s.ParameterGroupName = &v - return s -} - -// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. -func (s *CreateClusterInput) SetPreferredMaintenanceWindow(v string) *CreateClusterInput { - s.PreferredMaintenanceWindow = &v - return s -} - -// SetReplicationFactor sets the ReplicationFactor field's value. -func (s *CreateClusterInput) SetReplicationFactor(v int64) *CreateClusterInput { - s.ReplicationFactor = &v - return s -} - -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *CreateClusterInput) SetSecurityGroupIds(v []*string) *CreateClusterInput { - s.SecurityGroupIds = v - return s -} - -// SetSubnetGroupName sets the SubnetGroupName field's value. -func (s *CreateClusterInput) SetSubnetGroupName(v string) *CreateClusterInput { - s.SubnetGroupName = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateClusterInput) SetTags(v []*Tag) *CreateClusterInput { - s.Tags = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateClusterResponse -type CreateClusterOutput struct { - _ struct{} `type:"structure"` - - // A description of the DAX cluster that you have created. - Cluster *Cluster `type:"structure"` -} - -// String returns the string representation -func (s CreateClusterOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateClusterOutput) GoString() string { - return s.String() -} - -// SetCluster sets the Cluster field's value. -func (s *CreateClusterOutput) SetCluster(v *Cluster) *CreateClusterOutput { - s.Cluster = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateParameterGroupRequest -type CreateParameterGroupInput struct { - _ struct{} `type:"structure"` - - // A description of the parameter group. - Description *string `type:"string"` - - // The name of the parameter group to apply to all of the clusters in this replication - // group. - // - // ParameterGroupName is a required field - ParameterGroupName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateParameterGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateParameterGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateParameterGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateParameterGroupInput"} - if s.ParameterGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("ParameterGroupName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *CreateParameterGroupInput) SetDescription(v string) *CreateParameterGroupInput { - s.Description = &v - return s -} - -// SetParameterGroupName sets the ParameterGroupName field's value. -func (s *CreateParameterGroupInput) SetParameterGroupName(v string) *CreateParameterGroupInput { - s.ParameterGroupName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateParameterGroupResponse -type CreateParameterGroupOutput struct { - _ struct{} `type:"structure"` - - // Represents the output of a CreateParameterGroup action. - ParameterGroup *ParameterGroup `type:"structure"` -} - -// String returns the string representation -func (s CreateParameterGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateParameterGroupOutput) GoString() string { - return s.String() -} - -// SetParameterGroup sets the ParameterGroup field's value. -func (s *CreateParameterGroupOutput) SetParameterGroup(v *ParameterGroup) *CreateParameterGroupOutput { - s.ParameterGroup = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateSubnetGroupRequest -type CreateSubnetGroupInput struct { - _ struct{} `type:"structure"` - - // A description for the subnet group - Description *string `type:"string"` - - // A name for the subnet group. This value is stored as a lowercase string. - // - // SubnetGroupName is a required field - SubnetGroupName *string `type:"string" required:"true"` - - // A list of VPC subnet IDs for the subnet group. - // - // SubnetIds is a required field - SubnetIds []*string `type:"list" required:"true"` -} - -// String returns the string representation -func (s CreateSubnetGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateSubnetGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateSubnetGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateSubnetGroupInput"} - if s.SubnetGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("SubnetGroupName")) - } - if s.SubnetIds == nil { - invalidParams.Add(request.NewErrParamRequired("SubnetIds")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *CreateSubnetGroupInput) SetDescription(v string) *CreateSubnetGroupInput { - s.Description = &v - return s -} - -// SetSubnetGroupName sets the SubnetGroupName field's value. -func (s *CreateSubnetGroupInput) SetSubnetGroupName(v string) *CreateSubnetGroupInput { - s.SubnetGroupName = &v - return s -} - -// SetSubnetIds sets the SubnetIds field's value. -func (s *CreateSubnetGroupInput) SetSubnetIds(v []*string) *CreateSubnetGroupInput { - s.SubnetIds = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateSubnetGroupResponse -type CreateSubnetGroupOutput struct { - _ struct{} `type:"structure"` - - // Represents the output of a CreateSubnetGroup operation. - SubnetGroup *SubnetGroup `type:"structure"` -} - -// String returns the string representation -func (s CreateSubnetGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateSubnetGroupOutput) GoString() string { - return s.String() -} - -// SetSubnetGroup sets the SubnetGroup field's value. -func (s *CreateSubnetGroupOutput) SetSubnetGroup(v *SubnetGroup) *CreateSubnetGroupOutput { - s.SubnetGroup = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DecreaseReplicationFactorRequest -type DecreaseReplicationFactorInput struct { - _ struct{} `type:"structure"` - - // The Availability Zone(s) from which to remove nodes. - AvailabilityZones []*string `type:"list"` - - // The name of the DAX cluster from which you want to remove nodes. - // - // ClusterName is a required field - ClusterName *string `type:"string" required:"true"` - - // The new number of nodes for the DAX cluster. - // - // NewReplicationFactor is a required field - NewReplicationFactor *int64 `type:"integer" required:"true"` - - // The unique identifiers of the nodes to be removed from the cluster. - NodeIdsToRemove []*string `type:"list"` -} - -// String returns the string representation -func (s DecreaseReplicationFactorInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecreaseReplicationFactorInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DecreaseReplicationFactorInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DecreaseReplicationFactorInput"} - if s.ClusterName == nil { - invalidParams.Add(request.NewErrParamRequired("ClusterName")) - } - if s.NewReplicationFactor == nil { - invalidParams.Add(request.NewErrParamRequired("NewReplicationFactor")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAvailabilityZones sets the AvailabilityZones field's value. -func (s *DecreaseReplicationFactorInput) SetAvailabilityZones(v []*string) *DecreaseReplicationFactorInput { - s.AvailabilityZones = v - return s -} - -// SetClusterName sets the ClusterName field's value. -func (s *DecreaseReplicationFactorInput) SetClusterName(v string) *DecreaseReplicationFactorInput { - s.ClusterName = &v - return s -} - -// SetNewReplicationFactor sets the NewReplicationFactor field's value. -func (s *DecreaseReplicationFactorInput) SetNewReplicationFactor(v int64) *DecreaseReplicationFactorInput { - s.NewReplicationFactor = &v - return s -} - -// SetNodeIdsToRemove sets the NodeIdsToRemove field's value. -func (s *DecreaseReplicationFactorInput) SetNodeIdsToRemove(v []*string) *DecreaseReplicationFactorInput { - s.NodeIdsToRemove = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DecreaseReplicationFactorResponse -type DecreaseReplicationFactorOutput struct { - _ struct{} `type:"structure"` - - // A description of the DAX cluster, after you have decreased its replication - // factor. - Cluster *Cluster `type:"structure"` -} - -// String returns the string representation -func (s DecreaseReplicationFactorOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecreaseReplicationFactorOutput) GoString() string { - return s.String() -} - -// SetCluster sets the Cluster field's value. -func (s *DecreaseReplicationFactorOutput) SetCluster(v *Cluster) *DecreaseReplicationFactorOutput { - s.Cluster = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteClusterRequest -type DeleteClusterInput struct { - _ struct{} `type:"structure"` - - // The name of the cluster to be deleted. - // - // ClusterName is a required field - ClusterName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteClusterInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteClusterInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteClusterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteClusterInput"} - if s.ClusterName == nil { - invalidParams.Add(request.NewErrParamRequired("ClusterName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClusterName sets the ClusterName field's value. -func (s *DeleteClusterInput) SetClusterName(v string) *DeleteClusterInput { - s.ClusterName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteClusterResponse -type DeleteClusterOutput struct { - _ struct{} `type:"structure"` - - // A description of the DAX cluster that is being deleted. - Cluster *Cluster `type:"structure"` -} - -// String returns the string representation -func (s DeleteClusterOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteClusterOutput) GoString() string { - return s.String() -} - -// SetCluster sets the Cluster field's value. -func (s *DeleteClusterOutput) SetCluster(v *Cluster) *DeleteClusterOutput { - s.Cluster = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteParameterGroupRequest -type DeleteParameterGroupInput struct { - _ struct{} `type:"structure"` - - // The name of the parameter group to delete. - // - // ParameterGroupName is a required field - ParameterGroupName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteParameterGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteParameterGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteParameterGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteParameterGroupInput"} - if s.ParameterGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("ParameterGroupName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetParameterGroupName sets the ParameterGroupName field's value. -func (s *DeleteParameterGroupInput) SetParameterGroupName(v string) *DeleteParameterGroupInput { - s.ParameterGroupName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteParameterGroupResponse -type DeleteParameterGroupOutput struct { - _ struct{} `type:"structure"` - - // A user-specified message for this action (i.e., a reason for deleting the - // parameter group). - DeletionMessage *string `type:"string"` -} - -// String returns the string representation -func (s DeleteParameterGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteParameterGroupOutput) GoString() string { - return s.String() -} - -// SetDeletionMessage sets the DeletionMessage field's value. -func (s *DeleteParameterGroupOutput) SetDeletionMessage(v string) *DeleteParameterGroupOutput { - s.DeletionMessage = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteSubnetGroupRequest -type DeleteSubnetGroupInput struct { - _ struct{} `type:"structure"` - - // The name of the subnet group to delete. - // - // SubnetGroupName is a required field - SubnetGroupName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteSubnetGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteSubnetGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSubnetGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSubnetGroupInput"} - if s.SubnetGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("SubnetGroupName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSubnetGroupName sets the SubnetGroupName field's value. -func (s *DeleteSubnetGroupInput) SetSubnetGroupName(v string) *DeleteSubnetGroupInput { - s.SubnetGroupName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteSubnetGroupResponse -type DeleteSubnetGroupOutput struct { - _ struct{} `type:"structure"` - - // A user-specified message for this action (i.e., a reason for deleting the - // subnet group). - DeletionMessage *string `type:"string"` -} - -// String returns the string representation -func (s DeleteSubnetGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteSubnetGroupOutput) GoString() string { - return s.String() -} - -// SetDeletionMessage sets the DeletionMessage field's value. -func (s *DeleteSubnetGroupOutput) SetDeletionMessage(v string) *DeleteSubnetGroupOutput { - s.DeletionMessage = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeClustersRequest -type DescribeClustersInput struct { - _ struct{} `type:"structure"` - - // The names of the DAX clusters being described. - ClusterNames []*string `type:"list"` - - // The maximum number of results to include in the response. If more results - // exist than the specified MaxResults value, a token is included in the response - // so that the remaining results can be retrieved. - // - // The value for MaxResults must be between 20 and 100. - MaxResults *int64 `type:"integer"` - - // An optional token returned from a prior request. Use this token for pagination - // of results from this action. If this parameter is specified, the response - // includes only results beyond the token, up to the value specified by MaxResults. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s DescribeClustersInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeClustersInput) GoString() string { - return s.String() -} - -// SetClusterNames sets the ClusterNames field's value. -func (s *DescribeClustersInput) SetClusterNames(v []*string) *DescribeClustersInput { - s.ClusterNames = v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeClustersInput) SetMaxResults(v int64) *DescribeClustersInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeClustersInput) SetNextToken(v string) *DescribeClustersInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeClustersResponse -type DescribeClustersOutput struct { - _ struct{} `type:"structure"` - - // The descriptions of your DAX clusters, in response to a DescribeClusters - // request. - Clusters []*Cluster `type:"list"` - - // Provides an identifier to allow retrieval of paginated results. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s DescribeClustersOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeClustersOutput) GoString() string { - return s.String() -} - -// SetClusters sets the Clusters field's value. -func (s *DescribeClustersOutput) SetClusters(v []*Cluster) *DescribeClustersOutput { - s.Clusters = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeClustersOutput) SetNextToken(v string) *DescribeClustersOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeDefaultParametersRequest -type DescribeDefaultParametersInput struct { - _ struct{} `type:"structure"` - - // The maximum number of results to include in the response. If more results - // exist than the specified MaxResults value, a token is included in the response - // so that the remaining results can be retrieved. - // - // The value for MaxResults must be between 20 and 100. - MaxResults *int64 `type:"integer"` - - // An optional token returned from a prior request. Use this token for pagination - // of results from this action. If this parameter is specified, the response - // includes only results beyond the token, up to the value specified by MaxResults. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s DescribeDefaultParametersInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeDefaultParametersInput) GoString() string { - return s.String() -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeDefaultParametersInput) SetMaxResults(v int64) *DescribeDefaultParametersInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeDefaultParametersInput) SetNextToken(v string) *DescribeDefaultParametersInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeDefaultParametersResponse -type DescribeDefaultParametersOutput struct { - _ struct{} `type:"structure"` - - // Provides an identifier to allow retrieval of paginated results. - NextToken *string `type:"string"` - - // A list of parameters. Each element in the list represents one parameter. - Parameters []*Parameter `type:"list"` -} - -// String returns the string representation -func (s DescribeDefaultParametersOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeDefaultParametersOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeDefaultParametersOutput) SetNextToken(v string) *DescribeDefaultParametersOutput { - s.NextToken = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *DescribeDefaultParametersOutput) SetParameters(v []*Parameter) *DescribeDefaultParametersOutput { - s.Parameters = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeEventsRequest -type DescribeEventsInput struct { - _ struct{} `type:"structure"` - - // The number of minutes' worth of events to retrieve. - Duration *int64 `type:"integer"` - - // The end of the time interval for which to retrieve events, specified in ISO - // 8601 format. - EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The maximum number of results to include in the response. If more results - // exist than the specified MaxResults value, a token is included in the response - // so that the remaining results can be retrieved. - // - // The value for MaxResults must be between 20 and 100. - MaxResults *int64 `type:"integer"` - - // An optional token returned from a prior request. Use this token for pagination - // of results from this action. If this parameter is specified, the response - // includes only results beyond the token, up to the value specified by MaxResults. - NextToken *string `type:"string"` - - // The identifier of the event source for which events will be returned. If - // not specified, then all sources are included in the response. - SourceName *string `type:"string"` - - // The event source to retrieve events for. If no value is specified, all events - // are returned. - SourceType *string `type:"string" enum:"SourceType"` - - // The beginning of the time interval to retrieve events for, specified in ISO - // 8601 format. - StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` -} - -// String returns the string representation -func (s DescribeEventsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeEventsInput) GoString() string { - return s.String() -} - -// SetDuration sets the Duration field's value. -func (s *DescribeEventsInput) SetDuration(v int64) *DescribeEventsInput { - s.Duration = &v - return s -} - -// SetEndTime sets the EndTime field's value. -func (s *DescribeEventsInput) SetEndTime(v time.Time) *DescribeEventsInput { - s.EndTime = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeEventsInput) SetMaxResults(v int64) *DescribeEventsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeEventsInput) SetNextToken(v string) *DescribeEventsInput { - s.NextToken = &v - return s -} - -// SetSourceName sets the SourceName field's value. -func (s *DescribeEventsInput) SetSourceName(v string) *DescribeEventsInput { - s.SourceName = &v - return s -} - -// SetSourceType sets the SourceType field's value. -func (s *DescribeEventsInput) SetSourceType(v string) *DescribeEventsInput { - s.SourceType = &v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *DescribeEventsInput) SetStartTime(v time.Time) *DescribeEventsInput { - s.StartTime = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeEventsResponse -type DescribeEventsOutput struct { - _ struct{} `type:"structure"` - - // An array of events. Each element in the array represents one event. - Events []*Event `type:"list"` - - // Provides an identifier to allow retrieval of paginated results. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s DescribeEventsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeEventsOutput) GoString() string { - return s.String() -} - -// SetEvents sets the Events field's value. -func (s *DescribeEventsOutput) SetEvents(v []*Event) *DescribeEventsOutput { - s.Events = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeEventsOutput) SetNextToken(v string) *DescribeEventsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParameterGroupsRequest -type DescribeParameterGroupsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of results to include in the response. If more results - // exist than the specified MaxResults value, a token is included in the response - // so that the remaining results can be retrieved. - // - // The value for MaxResults must be between 20 and 100. - MaxResults *int64 `type:"integer"` - - // An optional token returned from a prior request. Use this token for pagination - // of results from this action. If this parameter is specified, the response - // includes only results beyond the token, up to the value specified by MaxResults. - NextToken *string `type:"string"` - - // The names of the parameter groups. - ParameterGroupNames []*string `type:"list"` -} - -// String returns the string representation -func (s DescribeParameterGroupsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeParameterGroupsInput) GoString() string { - return s.String() -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeParameterGroupsInput) SetMaxResults(v int64) *DescribeParameterGroupsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeParameterGroupsInput) SetNextToken(v string) *DescribeParameterGroupsInput { - s.NextToken = &v - return s -} - -// SetParameterGroupNames sets the ParameterGroupNames field's value. -func (s *DescribeParameterGroupsInput) SetParameterGroupNames(v []*string) *DescribeParameterGroupsInput { - s.ParameterGroupNames = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParameterGroupsResponse -type DescribeParameterGroupsOutput struct { - _ struct{} `type:"structure"` - - // Provides an identifier to allow retrieval of paginated results. - NextToken *string `type:"string"` - - // An array of parameter groups. Each element in the array represents one parameter - // group. - ParameterGroups []*ParameterGroup `type:"list"` -} - -// String returns the string representation -func (s DescribeParameterGroupsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeParameterGroupsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeParameterGroupsOutput) SetNextToken(v string) *DescribeParameterGroupsOutput { - s.NextToken = &v - return s -} - -// SetParameterGroups sets the ParameterGroups field's value. -func (s *DescribeParameterGroupsOutput) SetParameterGroups(v []*ParameterGroup) *DescribeParameterGroupsOutput { - s.ParameterGroups = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParametersRequest -type DescribeParametersInput struct { - _ struct{} `type:"structure"` - - // The maximum number of results to include in the response. If more results - // exist than the specified MaxResults value, a token is included in the response - // so that the remaining results can be retrieved. - // - // The value for MaxResults must be between 20 and 100. - MaxResults *int64 `type:"integer"` - - // An optional token returned from a prior request. Use this token for pagination - // of results from this action. If this parameter is specified, the response - // includes only results beyond the token, up to the value specified by MaxResults. - NextToken *string `type:"string"` - - // The name of the parameter group. - // - // ParameterGroupName is a required field - ParameterGroupName *string `type:"string" required:"true"` - - // How the parameter is defined. For example, system denotes a system-defined - // parameter. - Source *string `type:"string"` -} - -// String returns the string representation -func (s DescribeParametersInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeParametersInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeParametersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeParametersInput"} - if s.ParameterGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("ParameterGroupName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeParametersInput) SetMaxResults(v int64) *DescribeParametersInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeParametersInput) SetNextToken(v string) *DescribeParametersInput { - s.NextToken = &v - return s -} - -// SetParameterGroupName sets the ParameterGroupName field's value. -func (s *DescribeParametersInput) SetParameterGroupName(v string) *DescribeParametersInput { - s.ParameterGroupName = &v - return s -} - -// SetSource sets the Source field's value. -func (s *DescribeParametersInput) SetSource(v string) *DescribeParametersInput { - s.Source = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeParametersResponse -type DescribeParametersOutput struct { - _ struct{} `type:"structure"` - - // Provides an identifier to allow retrieval of paginated results. - NextToken *string `type:"string"` - - // A list of parameters within a parameter group. Each element in the list represents - // one parameter. - Parameters []*Parameter `type:"list"` -} - -// String returns the string representation -func (s DescribeParametersOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeParametersOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeParametersOutput) SetNextToken(v string) *DescribeParametersOutput { - s.NextToken = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *DescribeParametersOutput) SetParameters(v []*Parameter) *DescribeParametersOutput { - s.Parameters = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeSubnetGroupsRequest -type DescribeSubnetGroupsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of results to include in the response. If more results - // exist than the specified MaxResults value, a token is included in the response - // so that the remaining results can be retrieved. - // - // The value for MaxResults must be between 20 and 100. - MaxResults *int64 `type:"integer"` - - // An optional token returned from a prior request. Use this token for pagination - // of results from this action. If this parameter is specified, the response - // includes only results beyond the token, up to the value specified by MaxResults. - NextToken *string `type:"string"` - - // The name of the subnet group. - SubnetGroupNames []*string `type:"list"` -} - -// String returns the string representation -func (s DescribeSubnetGroupsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeSubnetGroupsInput) GoString() string { - return s.String() -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeSubnetGroupsInput) SetMaxResults(v int64) *DescribeSubnetGroupsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeSubnetGroupsInput) SetNextToken(v string) *DescribeSubnetGroupsInput { - s.NextToken = &v - return s -} - -// SetSubnetGroupNames sets the SubnetGroupNames field's value. -func (s *DescribeSubnetGroupsInput) SetSubnetGroupNames(v []*string) *DescribeSubnetGroupsInput { - s.SubnetGroupNames = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeSubnetGroupsResponse -type DescribeSubnetGroupsOutput struct { - _ struct{} `type:"structure"` - - // Provides an identifier to allow retrieval of paginated results. - NextToken *string `type:"string"` - - // An array of subnet groups. Each element in the array represents a single - // subnet group. - SubnetGroups []*SubnetGroup `type:"list"` -} - -// String returns the string representation -func (s DescribeSubnetGroupsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeSubnetGroupsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeSubnetGroupsOutput) SetNextToken(v string) *DescribeSubnetGroupsOutput { - s.NextToken = &v - return s -} - -// SetSubnetGroups sets the SubnetGroups field's value. -func (s *DescribeSubnetGroupsOutput) SetSubnetGroups(v []*SubnetGroup) *DescribeSubnetGroupsOutput { - s.SubnetGroups = v - return s -} - -// Represents the information required for client programs to connect to the -// configuration endpoint for a DAX cluster, or to an individual node within -// the cluster. -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/Endpoint -type Endpoint struct { - _ struct{} `type:"structure"` - - // The DNS hostname of the endpoint. - Address *string `type:"string"` - - // The port number that applications should use to connect to the endpoint. - Port *int64 `type:"integer"` -} - -// String returns the string representation -func (s Endpoint) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Endpoint) GoString() string { - return s.String() -} - -// SetAddress sets the Address field's value. -func (s *Endpoint) SetAddress(v string) *Endpoint { - s.Address = &v - return s -} - -// SetPort sets the Port field's value. -func (s *Endpoint) SetPort(v int64) *Endpoint { - s.Port = &v - return s -} - -// Represents a single occurrence of something interesting within the system. -// Some examples of events are creating a DAX cluster, adding or removing a -// node, or rebooting a node. -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/Event -type Event struct { - _ struct{} `type:"structure"` - - // The date and time when the event occurred. - Date *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A user-defined message associated with the event. - Message *string `type:"string"` - - // The source of the event. For example, if the event occurred at the node level, - // the source would be the node ID. - SourceName *string `type:"string"` - - // Specifies the origin of this event - a cluster, a parameter group, a node - // ID, etc. - SourceType *string `type:"string" enum:"SourceType"` -} - -// String returns the string representation -func (s Event) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Event) GoString() string { - return s.String() -} - -// SetDate sets the Date field's value. -func (s *Event) SetDate(v time.Time) *Event { - s.Date = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *Event) SetMessage(v string) *Event { - s.Message = &v - return s -} - -// SetSourceName sets the SourceName field's value. -func (s *Event) SetSourceName(v string) *Event { - s.SourceName = &v - return s -} - -// SetSourceType sets the SourceType field's value. -func (s *Event) SetSourceType(v string) *Event { - s.SourceType = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/IncreaseReplicationFactorRequest -type IncreaseReplicationFactorInput struct { - _ struct{} `type:"structure"` - - // The Availability Zones (AZs) in which the cluster nodes will be created. - // All nodes belonging to the cluster are placed in these Availability Zones. - // Use this parameter if you want to distribute the nodes across multiple AZs. - AvailabilityZones []*string `type:"list"` - - // The name of the DAX cluster that will receive additional nodes. - // - // ClusterName is a required field - ClusterName *string `type:"string" required:"true"` - - // The new number of nodes for the DAX cluster. - // - // NewReplicationFactor is a required field - NewReplicationFactor *int64 `type:"integer" required:"true"` -} - -// String returns the string representation -func (s IncreaseReplicationFactorInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s IncreaseReplicationFactorInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *IncreaseReplicationFactorInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "IncreaseReplicationFactorInput"} - if s.ClusterName == nil { - invalidParams.Add(request.NewErrParamRequired("ClusterName")) - } - if s.NewReplicationFactor == nil { - invalidParams.Add(request.NewErrParamRequired("NewReplicationFactor")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAvailabilityZones sets the AvailabilityZones field's value. -func (s *IncreaseReplicationFactorInput) SetAvailabilityZones(v []*string) *IncreaseReplicationFactorInput { - s.AvailabilityZones = v - return s -} - -// SetClusterName sets the ClusterName field's value. -func (s *IncreaseReplicationFactorInput) SetClusterName(v string) *IncreaseReplicationFactorInput { - s.ClusterName = &v - return s -} - -// SetNewReplicationFactor sets the NewReplicationFactor field's value. -func (s *IncreaseReplicationFactorInput) SetNewReplicationFactor(v int64) *IncreaseReplicationFactorInput { - s.NewReplicationFactor = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/IncreaseReplicationFactorResponse -type IncreaseReplicationFactorOutput struct { - _ struct{} `type:"structure"` - - // A description of the DAX cluster. with its new replication factor. - Cluster *Cluster `type:"structure"` -} - -// String returns the string representation -func (s IncreaseReplicationFactorOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s IncreaseReplicationFactorOutput) GoString() string { - return s.String() -} - -// SetCluster sets the Cluster field's value. -func (s *IncreaseReplicationFactorOutput) SetCluster(v *Cluster) *IncreaseReplicationFactorOutput { - s.Cluster = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/ListTagsRequest -type ListTagsInput struct { - _ struct{} `type:"structure"` - - // An optional token returned from a prior request. Use this token for pagination - // of results from this action. If this parameter is specified, the response - // includes only results beyond the token. - NextToken *string `type:"string"` - - // The name of the DAX resource to which the tags belong. - // - // ResourceName is a required field - ResourceName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s ListTagsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListTagsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTagsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTagsInput"} - if s.ResourceName == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTagsInput) SetNextToken(v string) *ListTagsInput { - s.NextToken = &v - return s -} - -// SetResourceName sets the ResourceName field's value. -func (s *ListTagsInput) SetResourceName(v string) *ListTagsInput { - s.ResourceName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/ListTagsResponse -type ListTagsOutput struct { - _ struct{} `type:"structure"` - - // If this value is present, there are additional results to be displayed. To - // retrieve them, call ListTags again, with NextToken set to this value. - NextToken *string `type:"string"` - - // A list of tags currently associated with the DAX cluster. - Tags []*Tag `type:"list"` -} - -// String returns the string representation -func (s ListTagsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListTagsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTagsOutput) SetNextToken(v string) *ListTagsOutput { - s.NextToken = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *ListTagsOutput) SetTags(v []*Tag) *ListTagsOutput { - s.Tags = v - return s -} - -// Represents an individual node within a DAX cluster. -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/Node -type Node struct { - _ struct{} `type:"structure"` - - // The Availability Zone (AZ) in which the node has been deployed. - AvailabilityZone *string `type:"string"` - - // The endpoint for the node, consisting of a DNS name and a port number. Client - // applications can connect directly to a node endpoint, if desired (as an alternative - // to allowing DAX client software to intelligently route requests and responses - // to nodes in the DAX cluster. - Endpoint *Endpoint `type:"structure"` - - // The date and time (in UNIX epoch format) when the node was launched. - NodeCreateTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A system-generated identifier for the node. - NodeId *string `type:"string"` - - // The current status of the node. For example: available. - NodeStatus *string `type:"string"` - - // The status of the parameter group associated with this node. For example, - // in-sync. - ParameterGroupStatus *string `type:"string"` -} - -// String returns the string representation -func (s Node) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Node) GoString() string { - return s.String() -} - -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *Node) SetAvailabilityZone(v string) *Node { - s.AvailabilityZone = &v - return s -} - -// SetEndpoint sets the Endpoint field's value. -func (s *Node) SetEndpoint(v *Endpoint) *Node { - s.Endpoint = v - return s -} - -// SetNodeCreateTime sets the NodeCreateTime field's value. -func (s *Node) SetNodeCreateTime(v time.Time) *Node { - s.NodeCreateTime = &v - return s -} - -// SetNodeId sets the NodeId field's value. -func (s *Node) SetNodeId(v string) *Node { - s.NodeId = &v - return s -} - -// SetNodeStatus sets the NodeStatus field's value. -func (s *Node) SetNodeStatus(v string) *Node { - s.NodeStatus = &v - return s -} - -// SetParameterGroupStatus sets the ParameterGroupStatus field's value. -func (s *Node) SetParameterGroupStatus(v string) *Node { - s.ParameterGroupStatus = &v - return s -} - -// Represents a parameter value that is applicable to a particular node type. -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/NodeTypeSpecificValue -type NodeTypeSpecificValue struct { - _ struct{} `type:"structure"` - - // A node type to which the parameter value applies. - NodeType *string `type:"string"` - - // The parameter value for this node type. - Value *string `type:"string"` -} - -// String returns the string representation -func (s NodeTypeSpecificValue) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NodeTypeSpecificValue) GoString() string { - return s.String() -} - -// SetNodeType sets the NodeType field's value. -func (s *NodeTypeSpecificValue) SetNodeType(v string) *NodeTypeSpecificValue { - s.NodeType = &v - return s -} - -// SetValue sets the Value field's value. -func (s *NodeTypeSpecificValue) SetValue(v string) *NodeTypeSpecificValue { - s.Value = &v - return s -} - -// Describes a notification topic and its status. Notification topics are used -// for publishing DAX events to subscribers using Amazon Simple Notification -// Service (SNS). -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/NotificationConfiguration -type NotificationConfiguration struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) that identifies the topic. - TopicArn *string `type:"string"` - - // The current state of the topic. - TopicStatus *string `type:"string"` -} - -// String returns the string representation -func (s NotificationConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NotificationConfiguration) GoString() string { - return s.String() -} - -// SetTopicArn sets the TopicArn field's value. -func (s *NotificationConfiguration) SetTopicArn(v string) *NotificationConfiguration { - s.TopicArn = &v - return s -} - -// SetTopicStatus sets the TopicStatus field's value. -func (s *NotificationConfiguration) SetTopicStatus(v string) *NotificationConfiguration { - s.TopicStatus = &v - return s -} - -// Describes an individual setting that controls some aspect of DAX behavior. -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/Parameter -type Parameter struct { - _ struct{} `type:"structure"` - - // A range of values within which the parameter can be set. - AllowedValues *string `type:"string"` - - // The conditions under which changes to this parameter can be applied. For - // example, requires-reboot indicates that a new value for this parameter will - // only take effect if a node is rebooted. - ChangeType *string `type:"string" enum:"ChangeType"` - - // The data type of the parameter. For example, integer: - DataType *string `type:"string"` - - // A description of the parameter - Description *string `type:"string"` - - // Whether the customer is allowed to modify the parameter. - IsModifiable *string `type:"string" enum:"IsModifiable"` - - // A list of node types, and specific parameter values for each node. - NodeTypeSpecificValues []*NodeTypeSpecificValue `type:"list"` - - // The name of the parameter. - ParameterName *string `type:"string"` - - // Determines whether the parameter can be applied to any nodes, or only nodes - // of a particular type. - ParameterType *string `type:"string" enum:"ParameterType"` - - // The value for the parameter. - ParameterValue *string `type:"string"` - - // How the parameter is defined. For example, system denotes a system-defined - // parameter. - Source *string `type:"string"` -} - -// String returns the string representation -func (s Parameter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Parameter) GoString() string { - return s.String() -} - -// SetAllowedValues sets the AllowedValues field's value. -func (s *Parameter) SetAllowedValues(v string) *Parameter { - s.AllowedValues = &v - return s -} - -// SetChangeType sets the ChangeType field's value. -func (s *Parameter) SetChangeType(v string) *Parameter { - s.ChangeType = &v - return s -} - -// SetDataType sets the DataType field's value. -func (s *Parameter) SetDataType(v string) *Parameter { - s.DataType = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *Parameter) SetDescription(v string) *Parameter { - s.Description = &v - return s -} - -// SetIsModifiable sets the IsModifiable field's value. -func (s *Parameter) SetIsModifiable(v string) *Parameter { - s.IsModifiable = &v - return s -} - -// SetNodeTypeSpecificValues sets the NodeTypeSpecificValues field's value. -func (s *Parameter) SetNodeTypeSpecificValues(v []*NodeTypeSpecificValue) *Parameter { - s.NodeTypeSpecificValues = v - return s -} - -// SetParameterName sets the ParameterName field's value. -func (s *Parameter) SetParameterName(v string) *Parameter { - s.ParameterName = &v - return s -} - -// SetParameterType sets the ParameterType field's value. -func (s *Parameter) SetParameterType(v string) *Parameter { - s.ParameterType = &v - return s -} - -// SetParameterValue sets the ParameterValue field's value. -func (s *Parameter) SetParameterValue(v string) *Parameter { - s.ParameterValue = &v - return s -} - -// SetSource sets the Source field's value. -func (s *Parameter) SetSource(v string) *Parameter { - s.Source = &v - return s -} - -// A named set of parameters that are applied to all of the nodes in a DAX cluster. -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/ParameterGroup -type ParameterGroup struct { - _ struct{} `type:"structure"` - - // A description of the parameter group. - Description *string `type:"string"` - - // The name of the parameter group. - ParameterGroupName *string `type:"string"` -} - -// String returns the string representation -func (s ParameterGroup) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ParameterGroup) GoString() string { - return s.String() -} - -// SetDescription sets the Description field's value. -func (s *ParameterGroup) SetDescription(v string) *ParameterGroup { - s.Description = &v - return s -} - -// SetParameterGroupName sets the ParameterGroupName field's value. -func (s *ParameterGroup) SetParameterGroupName(v string) *ParameterGroup { - s.ParameterGroupName = &v - return s -} - -// The status of a parameter group. -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/ParameterGroupStatus -type ParameterGroupStatus struct { - _ struct{} `type:"structure"` - - // The node IDs of one or more nodes to be rebooted. - NodeIdsToReboot []*string `type:"list"` - - // The status of parameter updates. - ParameterApplyStatus *string `type:"string"` - - // The name of the parameter group. - ParameterGroupName *string `type:"string"` -} - -// String returns the string representation -func (s ParameterGroupStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ParameterGroupStatus) GoString() string { - return s.String() -} - -// SetNodeIdsToReboot sets the NodeIdsToReboot field's value. -func (s *ParameterGroupStatus) SetNodeIdsToReboot(v []*string) *ParameterGroupStatus { - s.NodeIdsToReboot = v - return s -} - -// SetParameterApplyStatus sets the ParameterApplyStatus field's value. -func (s *ParameterGroupStatus) SetParameterApplyStatus(v string) *ParameterGroupStatus { - s.ParameterApplyStatus = &v - return s -} - -// SetParameterGroupName sets the ParameterGroupName field's value. -func (s *ParameterGroupStatus) SetParameterGroupName(v string) *ParameterGroupStatus { - s.ParameterGroupName = &v - return s -} - -// An individual DAX parameter. -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/ParameterNameValue -type ParameterNameValue struct { - _ struct{} `type:"structure"` - - // The name of the parameter. - ParameterName *string `type:"string"` - - // The value of the parameter. - ParameterValue *string `type:"string"` -} - -// String returns the string representation -func (s ParameterNameValue) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ParameterNameValue) GoString() string { - return s.String() -} - -// SetParameterName sets the ParameterName field's value. -func (s *ParameterNameValue) SetParameterName(v string) *ParameterNameValue { - s.ParameterName = &v - return s -} - -// SetParameterValue sets the ParameterValue field's value. -func (s *ParameterNameValue) SetParameterValue(v string) *ParameterNameValue { - s.ParameterValue = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/RebootNodeRequest -type RebootNodeInput struct { - _ struct{} `type:"structure"` - - // The name of the DAX cluster containing the node to be rebooted. - // - // ClusterName is a required field - ClusterName *string `type:"string" required:"true"` - - // The system-assigned ID of the node to be rebooted. - // - // NodeId is a required field - NodeId *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s RebootNodeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RebootNodeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RebootNodeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RebootNodeInput"} - if s.ClusterName == nil { - invalidParams.Add(request.NewErrParamRequired("ClusterName")) - } - if s.NodeId == nil { - invalidParams.Add(request.NewErrParamRequired("NodeId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClusterName sets the ClusterName field's value. -func (s *RebootNodeInput) SetClusterName(v string) *RebootNodeInput { - s.ClusterName = &v - return s -} - -// SetNodeId sets the NodeId field's value. -func (s *RebootNodeInput) SetNodeId(v string) *RebootNodeInput { - s.NodeId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/RebootNodeResponse -type RebootNodeOutput struct { - _ struct{} `type:"structure"` - - // A description of the DAX cluster after a node has been rebooted. - Cluster *Cluster `type:"structure"` -} - -// String returns the string representation -func (s RebootNodeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RebootNodeOutput) GoString() string { - return s.String() -} - -// SetCluster sets the Cluster field's value. -func (s *RebootNodeOutput) SetCluster(v *Cluster) *RebootNodeOutput { - s.Cluster = v - return s -} - -// An individual VPC security group and its status. -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/SecurityGroupMembership -type SecurityGroupMembership struct { - _ struct{} `type:"structure"` - - // The unique ID for this security group. - SecurityGroupIdentifier *string `type:"string"` - - // The status of this security group. - Status *string `type:"string"` -} - -// String returns the string representation -func (s SecurityGroupMembership) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SecurityGroupMembership) GoString() string { - return s.String() -} - -// SetSecurityGroupIdentifier sets the SecurityGroupIdentifier field's value. -func (s *SecurityGroupMembership) SetSecurityGroupIdentifier(v string) *SecurityGroupMembership { - s.SecurityGroupIdentifier = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *SecurityGroupMembership) SetStatus(v string) *SecurityGroupMembership { - s.Status = &v - return s -} - -// Represents the subnet associated with a DAX cluster. This parameter refers -// to subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used -// with DAX. -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/Subnet -type Subnet struct { - _ struct{} `type:"structure"` - - // The Availability Zone (AZ) for subnet subnet. - SubnetAvailabilityZone *string `type:"string"` - - // The system-assigned identifier for the subnet. - SubnetIdentifier *string `type:"string"` -} - -// String returns the string representation -func (s Subnet) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Subnet) GoString() string { - return s.String() -} - -// SetSubnetAvailabilityZone sets the SubnetAvailabilityZone field's value. -func (s *Subnet) SetSubnetAvailabilityZone(v string) *Subnet { - s.SubnetAvailabilityZone = &v - return s -} - -// SetSubnetIdentifier sets the SubnetIdentifier field's value. -func (s *Subnet) SetSubnetIdentifier(v string) *Subnet { - s.SubnetIdentifier = &v - return s -} - -// Represents the output of one of the following actions: -// -// * CreateSubnetGroup -// -// * ModifySubnetGroup -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/SubnetGroup -type SubnetGroup struct { - _ struct{} `type:"structure"` - - // The description of the subnet group. - Description *string `type:"string"` - - // The name of the subnet group. - SubnetGroupName *string `type:"string"` - - // A list of subnets associated with the subnet group. - Subnets []*Subnet `type:"list"` - - // The Amazon Virtual Private Cloud identifier (VPC ID) of the subnet group. - VpcId *string `type:"string"` -} - -// String returns the string representation -func (s SubnetGroup) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SubnetGroup) GoString() string { - return s.String() -} - -// SetDescription sets the Description field's value. -func (s *SubnetGroup) SetDescription(v string) *SubnetGroup { - s.Description = &v - return s -} - -// SetSubnetGroupName sets the SubnetGroupName field's value. -func (s *SubnetGroup) SetSubnetGroupName(v string) *SubnetGroup { - s.SubnetGroupName = &v - return s -} - -// SetSubnets sets the Subnets field's value. -func (s *SubnetGroup) SetSubnets(v []*Subnet) *SubnetGroup { - s.Subnets = v - return s -} - -// SetVpcId sets the VpcId field's value. -func (s *SubnetGroup) SetVpcId(v string) *SubnetGroup { - s.VpcId = &v - return s -} - -// A description of a tag. Every tag is a key-value pair. You can add up to -// 50 tags to a single DAX cluster. -// -// AWS-assigned tag names and values are automatically assigned the aws: prefix, -// which the user cannot assign. AWS-assigned tag names do not count towards -// the tag limit of 50. User-assigned tag names have the prefix user:. -// -// You cannot backdate the application of a tag. -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/Tag -type Tag struct { - _ struct{} `type:"structure"` - - // The key for the tag. Tag keys are case sensitive. Every DAX cluster can only - // have one tag with the same key. If you try to add an existing tag (same key), - // the existing tag value will be updated to the new value. - Key *string `type:"string"` - - // The value of the tag. Tag values are case-sensitive and can be null. - Value *string `type:"string"` -} - -// String returns the string representation -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Tag) GoString() string { - return s.String() -} - -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/TagResourceRequest -type TagResourceInput struct { - _ struct{} `type:"structure"` - - // The name of the DAX resource to which tags should be added. - // - // ResourceName is a required field - ResourceName *string `type:"string" required:"true"` - - // The tags to be assigned to the DAX resource. - // - // Tags is a required field - Tags []*Tag `type:"list" required:"true"` -} - -// String returns the string representation -func (s TagResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TagResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} - if s.ResourceName == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceName")) - } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceName sets the ResourceName field's value. -func (s *TagResourceInput) SetResourceName(v string) *TagResourceInput { - s.ResourceName = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { - s.Tags = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/TagResourceResponse -type TagResourceOutput struct { - _ struct{} `type:"structure"` - - // The list of tags that are associated with the DAX resource. - Tags []*Tag `type:"list"` -} - -// String returns the string representation -func (s TagResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TagResourceOutput) GoString() string { - return s.String() -} - -// SetTags sets the Tags field's value. -func (s *TagResourceOutput) SetTags(v []*Tag) *TagResourceOutput { - s.Tags = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UntagResourceRequest -type UntagResourceInput struct { - _ struct{} `type:"structure"` - - // The name of the DAX resource from which the tags should be removed. - // - // ResourceName is a required field - ResourceName *string `type:"string" required:"true"` - - // A list of tag keys. If the DAX cluster has any tags with these keys, then - // the tags are removed from the cluster. - // - // TagKeys is a required field - TagKeys []*string `type:"list" required:"true"` -} - -// String returns the string representation -func (s UntagResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UntagResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UntagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} - if s.ResourceName == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceName")) - } - if s.TagKeys == nil { - invalidParams.Add(request.NewErrParamRequired("TagKeys")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceName sets the ResourceName field's value. -func (s *UntagResourceInput) SetResourceName(v string) *UntagResourceInput { - s.ResourceName = &v - return s -} - -// SetTagKeys sets the TagKeys field's value. -func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { - s.TagKeys = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UntagResourceResponse -type UntagResourceOutput struct { - _ struct{} `type:"structure"` - - // The tag keys that have been removed from the cluster. - Tags []*Tag `type:"list"` -} - -// String returns the string representation -func (s UntagResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UntagResourceOutput) GoString() string { - return s.String() -} - -// SetTags sets the Tags field's value. -func (s *UntagResourceOutput) SetTags(v []*Tag) *UntagResourceOutput { - s.Tags = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateClusterRequest -type UpdateClusterInput struct { - _ struct{} `type:"structure"` - - // The name of the DAX cluster to be modified. - // - // ClusterName is a required field - ClusterName *string `type:"string" required:"true"` - - // A description of the changes being made to the cluster. - Description *string `type:"string"` - - // The Amazon Resource Name (ARN) that identifies the topic. - NotificationTopicArn *string `type:"string"` - - // The current state of the topic. - NotificationTopicStatus *string `type:"string"` - - // The name of a parameter group for this cluster. - ParameterGroupName *string `type:"string"` - - // A range of time when maintenance of DAX cluster software will be performed. - // For example: sun:01:00-sun:09:00. Cluster maintenance normally takes less - // than 30 minutes, and is performed automatically within the maintenance window. - PreferredMaintenanceWindow *string `type:"string"` - - // A list of user-specified security group IDs to be assigned to each node in - // the DAX cluster. If this parameter is not specified, DAX assigns the default - // VPC security group to each node. - SecurityGroupIds []*string `type:"list"` -} - -// String returns the string representation -func (s UpdateClusterInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateClusterInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateClusterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateClusterInput"} - if s.ClusterName == nil { - invalidParams.Add(request.NewErrParamRequired("ClusterName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClusterName sets the ClusterName field's value. -func (s *UpdateClusterInput) SetClusterName(v string) *UpdateClusterInput { - s.ClusterName = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *UpdateClusterInput) SetDescription(v string) *UpdateClusterInput { - s.Description = &v - return s -} - -// SetNotificationTopicArn sets the NotificationTopicArn field's value. -func (s *UpdateClusterInput) SetNotificationTopicArn(v string) *UpdateClusterInput { - s.NotificationTopicArn = &v - return s -} - -// SetNotificationTopicStatus sets the NotificationTopicStatus field's value. -func (s *UpdateClusterInput) SetNotificationTopicStatus(v string) *UpdateClusterInput { - s.NotificationTopicStatus = &v - return s -} - -// SetParameterGroupName sets the ParameterGroupName field's value. -func (s *UpdateClusterInput) SetParameterGroupName(v string) *UpdateClusterInput { - s.ParameterGroupName = &v - return s -} - -// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. -func (s *UpdateClusterInput) SetPreferredMaintenanceWindow(v string) *UpdateClusterInput { - s.PreferredMaintenanceWindow = &v - return s -} - -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *UpdateClusterInput) SetSecurityGroupIds(v []*string) *UpdateClusterInput { - s.SecurityGroupIds = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateClusterResponse -type UpdateClusterOutput struct { - _ struct{} `type:"structure"` - - // A description of the DAX cluster, after it has been modified. - Cluster *Cluster `type:"structure"` -} - -// String returns the string representation -func (s UpdateClusterOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateClusterOutput) GoString() string { - return s.String() -} - -// SetCluster sets the Cluster field's value. -func (s *UpdateClusterOutput) SetCluster(v *Cluster) *UpdateClusterOutput { - s.Cluster = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateParameterGroupRequest -type UpdateParameterGroupInput struct { - _ struct{} `type:"structure"` - - // The name of the parameter group. - // - // ParameterGroupName is a required field - ParameterGroupName *string `type:"string" required:"true"` - - // An array of name-value pairs for the parameters in the group. Each element - // in the array represents a single parameter. - // - // ParameterNameValues is a required field - ParameterNameValues []*ParameterNameValue `type:"list" required:"true"` -} - -// String returns the string representation -func (s UpdateParameterGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateParameterGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateParameterGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateParameterGroupInput"} - if s.ParameterGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("ParameterGroupName")) - } - if s.ParameterNameValues == nil { - invalidParams.Add(request.NewErrParamRequired("ParameterNameValues")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetParameterGroupName sets the ParameterGroupName field's value. -func (s *UpdateParameterGroupInput) SetParameterGroupName(v string) *UpdateParameterGroupInput { - s.ParameterGroupName = &v - return s -} - -// SetParameterNameValues sets the ParameterNameValues field's value. -func (s *UpdateParameterGroupInput) SetParameterNameValues(v []*ParameterNameValue) *UpdateParameterGroupInput { - s.ParameterNameValues = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateParameterGroupResponse -type UpdateParameterGroupOutput struct { - _ struct{} `type:"structure"` - - // The parameter group that has been modified. - ParameterGroup *ParameterGroup `type:"structure"` -} - -// String returns the string representation -func (s UpdateParameterGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateParameterGroupOutput) GoString() string { - return s.String() -} - -// SetParameterGroup sets the ParameterGroup field's value. -func (s *UpdateParameterGroupOutput) SetParameterGroup(v *ParameterGroup) *UpdateParameterGroupOutput { - s.ParameterGroup = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateSubnetGroupRequest -type UpdateSubnetGroupInput struct { - _ struct{} `type:"structure"` - - // A description of the subnet group. - Description *string `type:"string"` - - // The name of the subnet group. - // - // SubnetGroupName is a required field - SubnetGroupName *string `type:"string" required:"true"` - - // A list of subnet IDs in the subnet group. - SubnetIds []*string `type:"list"` -} - -// String returns the string representation -func (s UpdateSubnetGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateSubnetGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateSubnetGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateSubnetGroupInput"} - if s.SubnetGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("SubnetGroupName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *UpdateSubnetGroupInput) SetDescription(v string) *UpdateSubnetGroupInput { - s.Description = &v - return s -} - -// SetSubnetGroupName sets the SubnetGroupName field's value. -func (s *UpdateSubnetGroupInput) SetSubnetGroupName(v string) *UpdateSubnetGroupInput { - s.SubnetGroupName = &v - return s -} - -// SetSubnetIds sets the SubnetIds field's value. -func (s *UpdateSubnetGroupInput) SetSubnetIds(v []*string) *UpdateSubnetGroupInput { - s.SubnetIds = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateSubnetGroupResponse -type UpdateSubnetGroupOutput struct { - _ struct{} `type:"structure"` - - // The subnet group that has been modified. - SubnetGroup *SubnetGroup `type:"structure"` -} - -// String returns the string representation -func (s UpdateSubnetGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateSubnetGroupOutput) GoString() string { - return s.String() -} - -// SetSubnetGroup sets the SubnetGroup field's value. -func (s *UpdateSubnetGroupOutput) SetSubnetGroup(v *SubnetGroup) *UpdateSubnetGroupOutput { - s.SubnetGroup = v - return s -} - -const ( - // ChangeTypeImmediate is a ChangeType enum value - ChangeTypeImmediate = "IMMEDIATE" - - // ChangeTypeRequiresReboot is a ChangeType enum value - ChangeTypeRequiresReboot = "REQUIRES_REBOOT" -) - -const ( - // IsModifiableTrue is a IsModifiable enum value - IsModifiableTrue = "TRUE" - - // IsModifiableFalse is a IsModifiable enum value - IsModifiableFalse = "FALSE" - - // IsModifiableConditional is a IsModifiable enum value - IsModifiableConditional = "CONDITIONAL" -) - -const ( - // ParameterTypeDefault is a ParameterType enum value - ParameterTypeDefault = "DEFAULT" - - // ParameterTypeNodeTypeSpecific is a ParameterType enum value - ParameterTypeNodeTypeSpecific = "NODE_TYPE_SPECIFIC" -) - -const ( - // SourceTypeCluster is a SourceType enum value - SourceTypeCluster = "CLUSTER" - - // SourceTypeParameterGroup is a SourceType enum value - SourceTypeParameterGroup = "PARAMETER_GROUP" - - // SourceTypeSubnetGroup is a SourceType enum value - SourceTypeSubnetGroup = "SUBNET_GROUP" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dax/doc.go b/vendor/github.com/aws/aws-sdk-go/service/dax/doc.go deleted file mode 100644 index a3b25ccbd5a..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/dax/doc.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package dax provides the client and types for making API -// requests to Amazon DynamoDB Accelerator (DAX). -// -// DAX is a managed caching service engineered for Amazon DynamoDB. DAX dramatically -// speeds up database reads by caching frequently-accessed data from DynamoDB, -// so applications can access that data with sub-millisecond latency. You can -// create a DAX cluster easily, using the AWS Management Console. With a few -// simple modifications to your code, your application can begin taking advantage -// of the DAX cluster and realize significant improvements in read performance. -// -// See https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19 for more information on this service. -// -// See dax package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/dax/ -// -// Using the Client -// -// To contact Amazon DynamoDB Accelerator (DAX) with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the Amazon DynamoDB Accelerator (DAX) client DAX for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/dax/#New -package dax diff --git a/vendor/github.com/aws/aws-sdk-go/service/dax/errors.go b/vendor/github.com/aws/aws-sdk-go/service/dax/errors.go deleted file mode 100644 index 24aaf1a2327..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/dax/errors.go +++ /dev/null @@ -1,160 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package dax - -const ( - - // ErrCodeClusterAlreadyExistsFault for service response error code - // "ClusterAlreadyExistsFault". - // - // You already have a DAX cluster with the given identifier. - ErrCodeClusterAlreadyExistsFault = "ClusterAlreadyExistsFault" - - // ErrCodeClusterNotFoundFault for service response error code - // "ClusterNotFoundFault". - // - // The requested cluster ID does not refer to an existing DAX cluster. - ErrCodeClusterNotFoundFault = "ClusterNotFoundFault" - - // ErrCodeClusterQuotaForCustomerExceededFault for service response error code - // "ClusterQuotaForCustomerExceededFault". - // - // You have attempted to exceed the maximum number of DAX clusters for your - // AWS account. - ErrCodeClusterQuotaForCustomerExceededFault = "ClusterQuotaForCustomerExceededFault" - - // ErrCodeInsufficientClusterCapacityFault for service response error code - // "InsufficientClusterCapacityFault". - // - // There are not enough system resources to create the cluster you requested - // (or to resize an already-existing cluster). - ErrCodeInsufficientClusterCapacityFault = "InsufficientClusterCapacityFault" - - // ErrCodeInvalidARNFault for service response error code - // "InvalidARNFault". - // - // The Amazon Resource Name (ARN) supplied in the request is not valid. - ErrCodeInvalidARNFault = "InvalidARNFault" - - // ErrCodeInvalidClusterStateFault for service response error code - // "InvalidClusterStateFault". - // - // The requested DAX cluster is not in the available state. - ErrCodeInvalidClusterStateFault = "InvalidClusterStateFault" - - // ErrCodeInvalidParameterCombinationException for service response error code - // "InvalidParameterCombinationException". - // - // Two or more incompatible parameters were specified. - ErrCodeInvalidParameterCombinationException = "InvalidParameterCombinationException" - - // ErrCodeInvalidParameterGroupStateFault for service response error code - // "InvalidParameterGroupStateFault". - // - // One or more parameters in a parameter group are in an invalid state. - ErrCodeInvalidParameterGroupStateFault = "InvalidParameterGroupStateFault" - - // ErrCodeInvalidParameterValueException for service response error code - // "InvalidParameterValueException". - // - // The value for a parameter is invalid. - ErrCodeInvalidParameterValueException = "InvalidParameterValueException" - - // ErrCodeInvalidSubnet for service response error code - // "InvalidSubnet". - // - // An invalid subnet identifier was specified. - ErrCodeInvalidSubnet = "InvalidSubnet" - - // ErrCodeInvalidVPCNetworkStateFault for service response error code - // "InvalidVPCNetworkStateFault". - // - // The VPC network is in an invalid state. - ErrCodeInvalidVPCNetworkStateFault = "InvalidVPCNetworkStateFault" - - // ErrCodeNodeNotFoundFault for service response error code - // "NodeNotFoundFault". - // - // None of the nodes in the cluster have the given node ID. - ErrCodeNodeNotFoundFault = "NodeNotFoundFault" - - // ErrCodeNodeQuotaForClusterExceededFault for service response error code - // "NodeQuotaForClusterExceededFault". - // - // You have attempted to exceed the maximum number of nodes for a DAX cluster. - ErrCodeNodeQuotaForClusterExceededFault = "NodeQuotaForClusterExceededFault" - - // ErrCodeNodeQuotaForCustomerExceededFault for service response error code - // "NodeQuotaForCustomerExceededFault". - // - // You have attempted to exceed the maximum number of nodes for your AWS account. - ErrCodeNodeQuotaForCustomerExceededFault = "NodeQuotaForCustomerExceededFault" - - // ErrCodeParameterGroupAlreadyExistsFault for service response error code - // "ParameterGroupAlreadyExistsFault". - // - // The specified parameter group already exists. - ErrCodeParameterGroupAlreadyExistsFault = "ParameterGroupAlreadyExistsFault" - - // ErrCodeParameterGroupNotFoundFault for service response error code - // "ParameterGroupNotFoundFault". - // - // The specified parameter group does not exist. - ErrCodeParameterGroupNotFoundFault = "ParameterGroupNotFoundFault" - - // ErrCodeParameterGroupQuotaExceededFault for service response error code - // "ParameterGroupQuotaExceededFault". - // - // You have attempted to exceed the maximum number of parameter groups. - ErrCodeParameterGroupQuotaExceededFault = "ParameterGroupQuotaExceededFault" - - // ErrCodeSubnetGroupAlreadyExistsFault for service response error code - // "SubnetGroupAlreadyExistsFault". - // - // The specified subnet group already exists. - ErrCodeSubnetGroupAlreadyExistsFault = "SubnetGroupAlreadyExistsFault" - - // ErrCodeSubnetGroupInUseFault for service response error code - // "SubnetGroupInUseFault". - // - // The specified subnet group is currently in use. - ErrCodeSubnetGroupInUseFault = "SubnetGroupInUseFault" - - // ErrCodeSubnetGroupNotFoundFault for service response error code - // "SubnetGroupNotFoundFault". - // - // The requested subnet group name does not refer to an existing subnet group. - ErrCodeSubnetGroupNotFoundFault = "SubnetGroupNotFoundFault" - - // ErrCodeSubnetGroupQuotaExceededFault for service response error code - // "SubnetGroupQuotaExceededFault". - // - // The request cannot be processed because it would exceed the allowed number - // of subnets in a subnet group. - ErrCodeSubnetGroupQuotaExceededFault = "SubnetGroupQuotaExceededFault" - - // ErrCodeSubnetInUse for service response error code - // "SubnetInUse". - // - // The requested subnet is being used by another subnet group. - ErrCodeSubnetInUse = "SubnetInUse" - - // ErrCodeSubnetQuotaExceededFault for service response error code - // "SubnetQuotaExceededFault". - // - // The request cannot be processed because it would exceed the allowed number - // of subnets in a subnet group. - ErrCodeSubnetQuotaExceededFault = "SubnetQuotaExceededFault" - - // ErrCodeTagNotFoundFault for service response error code - // "TagNotFoundFault". - // - // The tag does not exist. - ErrCodeTagNotFoundFault = "TagNotFoundFault" - - // ErrCodeTagQuotaPerResourceExceeded for service response error code - // "TagQuotaPerResourceExceeded". - // - // You have exceeded the maximum number of tags for this DAX cluster. - ErrCodeTagQuotaPerResourceExceeded = "TagQuotaPerResourceExceeded" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dax/service.go b/vendor/github.com/aws/aws-sdk-go/service/dax/service.go deleted file mode 100644 index a80ed1441a0..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/dax/service.go +++ /dev/null @@ -1,95 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package dax - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -// DAX provides the API operation methods for making requests to -// Amazon DynamoDB Accelerator (DAX). See this package's package overview docs -// for details on the service. -// -// DAX methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type DAX struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "dax" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the DAX client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a DAX client from just a session. -// svc := dax.New(mySession) -// -// // Create a DAX client with additional configuration -// svc := dax.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *DAX { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DAX { - svc := &DAX{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2017-04-19", - JSONVersion: "1.1", - TargetPrefix: "AmazonDAXV3", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a DAX operation and runs any -// custom request initialization. -func (c *DAX) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go deleted file mode 100644 index 7aa6023571f..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go +++ /dev/null @@ -1,19092 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package gamelift - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -const opAcceptMatch = "AcceptMatch" - -// AcceptMatchRequest generates a "aws/request.Request" representing the -// client's request for the AcceptMatch operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AcceptMatch for more information on using the AcceptMatch -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AcceptMatchRequest method. -// req, resp := client.AcceptMatchRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/AcceptMatch -func (c *GameLift) AcceptMatchRequest(input *AcceptMatchInput) (req *request.Request, output *AcceptMatchOutput) { - op := &request.Operation{ - Name: opAcceptMatch, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AcceptMatchInput{} - } - - output = &AcceptMatchOutput{} - req = c.newRequest(op, input, output) - return -} - -// AcceptMatch API operation for Amazon GameLift. -// -// Registers a player's acceptance or rejection of a proposed FlexMatch match. -// A matchmaking configuration may require player acceptance; if so, then matches -// built with that configuration cannot be completed unless all players accept -// the proposed match within a specified time limit. -// -// When FlexMatch builds a match, all the matchmaking tickets involved in the -// proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger -// for your game to get acceptance from all players in the ticket. Acceptances -// are only valid for tickets when they are in this status; all other acceptances -// result in an error. -// -// To register acceptance, specify the ticket ID, a response, and one or more -// players. Once all players have registered acceptance, the matchmaking tickets -// advance to status PLACING, where a new game session is created for the match. -// -// If any player rejects the match, or if acceptances are not received before -// a specified timeout, the proposed match is dropped. The matchmaking tickets -// are then handled in one of two ways: For tickets where all players accepted -// the match, the ticket status is returned to SEARCHING to find a new match. -// For tickets where one or more players failed to accept the match, the ticket -// status is set to FAILED, and processing is terminated. A new matchmaking -// request for these players can be submitted as needed. -// -// Matchmaking-related operations include: -// -// * StartMatchmaking -// -// * DescribeMatchmaking -// -// * StopMatchmaking -// -// * AcceptMatch -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation AcceptMatch for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" -// The requested operation is not supported in the region specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/AcceptMatch -func (c *GameLift) AcceptMatch(input *AcceptMatchInput) (*AcceptMatchOutput, error) { - req, out := c.AcceptMatchRequest(input) - return out, req.Send() -} - -// AcceptMatchWithContext is the same as AcceptMatch with the addition of -// the ability to pass a context and additional request options. -// -// See AcceptMatch for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) AcceptMatchWithContext(ctx aws.Context, input *AcceptMatchInput, opts ...request.Option) (*AcceptMatchOutput, error) { - req, out := c.AcceptMatchRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateAlias = "CreateAlias" - -// CreateAliasRequest generates a "aws/request.Request" representing the -// client's request for the CreateAlias operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateAlias for more information on using the CreateAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateAliasRequest method. -// req, resp := client.CreateAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateAlias -func (c *GameLift) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *CreateAliasOutput) { - op := &request.Operation{ - Name: opCreateAlias, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateAliasInput{} - } - - output = &CreateAliasOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateAlias API operation for Amazon GameLift. -// -// Creates an alias for a fleet. In most situations, you can use an alias ID -// in place of a fleet ID. By using a fleet alias instead of a specific fleet -// ID, you can switch gameplay and players to a new fleet without changing your -// game client or other game components. For example, for games in production, -// using an alias allows you to seamlessly redirect your player base to a new -// game server update. -// -// Amazon GameLift supports two types of routing strategies for aliases: simple -// and terminal. A simple alias points to an active fleet. A terminal alias -// is used to display messaging or link to a URL instead of routing players -// to an active fleet. For example, you might use a terminal alias when a game -// version is no longer supported and you want to direct players to an upgrade -// site. -// -// To create a fleet alias, specify an alias name, routing strategy, and optional -// description. Each simple alias can point to only one fleet, but a fleet can -// have multiple aliases. If successful, a new alias record is returned, including -// an alias ID, which you can reference when creating a game session. You can -// reassign an alias to another fleet by calling UpdateAlias. -// -// Alias-related operations include: -// -// * CreateAlias -// -// * ListAliases -// -// * DescribeAlias -// -// * UpdateAlias -// -// * DeleteAlias -// -// * ResolveAlias -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation CreateAlias for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeConflictException "ConflictException" -// The requested operation would cause a conflict with the current state of -// a service resource associated with the request. Resolve the conflict before -// retrying this request. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The requested operation would cause the resource to exceed the allowed service -// limit. Resolve the issue before retrying. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateAlias -func (c *GameLift) CreateAlias(input *CreateAliasInput) (*CreateAliasOutput, error) { - req, out := c.CreateAliasRequest(input) - return out, req.Send() -} - -// CreateAliasWithContext is the same as CreateAlias with the addition of -// the ability to pass a context and additional request options. -// -// See CreateAlias for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) CreateAliasWithContext(ctx aws.Context, input *CreateAliasInput, opts ...request.Option) (*CreateAliasOutput, error) { - req, out := c.CreateAliasRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateBuild = "CreateBuild" - -// CreateBuildRequest generates a "aws/request.Request" representing the -// client's request for the CreateBuild operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateBuild for more information on using the CreateBuild -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateBuildRequest method. -// req, resp := client.CreateBuildRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateBuild -func (c *GameLift) CreateBuildRequest(input *CreateBuildInput) (req *request.Request, output *CreateBuildOutput) { - op := &request.Operation{ - Name: opCreateBuild, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateBuildInput{} - } - - output = &CreateBuildOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateBuild API operation for Amazon GameLift. -// -// Creates a new Amazon GameLift build from a set of game server binary files -// stored in an Amazon Simple Storage Service (Amazon S3) location. To use this -// API call, create a .zip file containing all of the files for the build and -// store it in an Amazon S3 bucket under your AWS account. For help on packaging -// your build files and creating a build, see Uploading Your Game to Amazon -// GameLift (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html). -// -// Use this API action ONLY if you are storing your game build files in an Amazon -// S3 bucket. To create a build using files stored locally, use the CLI command -// upload-build (http://docs.aws.amazon.com/cli/latest/reference/gamelift/upload-build.html), -// which uploads the build files from a file location you specify. -// -// To create a new build using CreateBuild, identify the storage location and -// operating system of your game build. You also have the option of specifying -// a build name and version. If successful, this action creates a new build -// record with an unique build ID and in INITIALIZED status. Use the API call -// DescribeBuild to check the status of your build. A build must be in READY -// status before it can be used to create fleets to host your game. -// -// Build-related operations include: -// -// * CreateBuild -// -// * ListBuilds -// -// * DescribeBuild -// -// * UpdateBuild -// -// * DeleteBuild -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation CreateBuild for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeConflictException "ConflictException" -// The requested operation would cause a conflict with the current state of -// a service resource associated with the request. Resolve the conflict before -// retrying this request. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateBuild -func (c *GameLift) CreateBuild(input *CreateBuildInput) (*CreateBuildOutput, error) { - req, out := c.CreateBuildRequest(input) - return out, req.Send() -} - -// CreateBuildWithContext is the same as CreateBuild with the addition of -// the ability to pass a context and additional request options. -// -// See CreateBuild for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) CreateBuildWithContext(ctx aws.Context, input *CreateBuildInput, opts ...request.Option) (*CreateBuildOutput, error) { - req, out := c.CreateBuildRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateFleet = "CreateFleet" - -// CreateFleetRequest generates a "aws/request.Request" representing the -// client's request for the CreateFleet operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateFleet for more information on using the CreateFleet -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateFleetRequest method. -// req, resp := client.CreateFleetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateFleet -func (c *GameLift) CreateFleetRequest(input *CreateFleetInput) (req *request.Request, output *CreateFleetOutput) { - op := &request.Operation{ - Name: opCreateFleet, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateFleetInput{} - } - - output = &CreateFleetOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateFleet API operation for Amazon GameLift. -// -// Creates a new fleet to run your game servers. A fleet is a set of Amazon -// Elastic Compute Cloud (Amazon EC2) instances, each of which can run multiple -// server processes to host game sessions. You configure a fleet to create instances -// with certain hardware specifications (see Amazon EC2 Instance Types (http://aws.amazon.com/ec2/instance-types/) -// for more information), and deploy a specified game build to each instance. -// A newly created fleet passes through several statuses; once it reaches the -// ACTIVE status, it can begin hosting game sessions. -// -// To create a new fleet, you must specify the following: (1) fleet name, (2) -// build ID of an uploaded game build, (3) an EC2 instance type, and (4) a run-time -// configuration that describes which server processes to run on each instance -// in the fleet. (Although the run-time configuration is not a required parameter, -// the fleet cannot be successfully activated without it.) -// -// You can also configure the new fleet with the following settings: -// -// * Fleet description -// -// * Access permissions for inbound traffic -// -// * Fleet-wide game session protection -// -// * Resource creation limit -// -// If you use Amazon CloudWatch for metrics, you can add the new fleet to a -// metric group. This allows you to view aggregated metrics for a set of fleets. -// Once you specify a metric group, the new fleet's metrics are included in -// the metric group's data. -// -// You have the option of creating a VPC peering connection with the new fleet. -// For more information, see VPC Peering with Amazon GameLift Fleets (http://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html). -// -// If the CreateFleet call is successful, Amazon GameLift performs the following -// tasks: -// -// * Creates a fleet record and sets the status to NEW (followed by other -// statuses as the fleet is activated). -// -// * Sets the fleet's target capacity to 1 (desired instances), which causes -// Amazon GameLift to start one new EC2 instance. -// -// * Starts launching server processes on the instance. If the fleet is configured -// to run multiple server processes per instance, Amazon GameLift staggers -// each launch by a few seconds. -// -// * Begins writing events to the fleet event log, which can be accessed -// in the Amazon GameLift console. -// -// * Sets the fleet's status to ACTIVE as soon as one server process in the -// fleet is ready to host a game session. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation CreateFleet for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeConflictException "ConflictException" -// The requested operation would cause a conflict with the current state of -// a service resource associated with the request. Resolve the conflict before -// retrying this request. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The requested operation would cause the resource to exceed the allowed service -// limit. Resolve the issue before retrying. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateFleet -func (c *GameLift) CreateFleet(input *CreateFleetInput) (*CreateFleetOutput, error) { - req, out := c.CreateFleetRequest(input) - return out, req.Send() -} - -// CreateFleetWithContext is the same as CreateFleet with the addition of -// the ability to pass a context and additional request options. -// -// See CreateFleet for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) CreateFleetWithContext(ctx aws.Context, input *CreateFleetInput, opts ...request.Option) (*CreateFleetOutput, error) { - req, out := c.CreateFleetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateGameSession = "CreateGameSession" - -// CreateGameSessionRequest generates a "aws/request.Request" representing the -// client's request for the CreateGameSession operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateGameSession for more information on using the CreateGameSession -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateGameSessionRequest method. -// req, resp := client.CreateGameSessionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSession -func (c *GameLift) CreateGameSessionRequest(input *CreateGameSessionInput) (req *request.Request, output *CreateGameSessionOutput) { - op := &request.Operation{ - Name: opCreateGameSession, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateGameSessionInput{} - } - - output = &CreateGameSessionOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateGameSession API operation for Amazon GameLift. -// -// Creates a multiplayer game session for players. This action creates a game -// session record and assigns an available server process in the specified fleet -// to host the game session. A fleet must have an ACTIVE status before a game -// session can be created in it. -// -// To create a game session, specify either fleet ID or alias ID and indicate -// a maximum number of players to allow in the game session. You can also provide -// a name and game-specific properties for this game session. If successful, -// a GameSession object is returned containing the game session properties and -// other settings you specified. -// -// Idempotency tokens. You can add a token that uniquely identifies game session -// requests. This is useful for ensuring that game session requests are idempotent. -// Multiple requests with the same idempotency token are processed only once; -// subsequent requests return the original result. All response values are the -// same with the exception of game session status, which may change. -// -// Resource creation limits. If you are creating a game session on a fleet with -// a resource creation limit policy in force, then you must specify a creator -// ID. Without this ID, Amazon GameLift has no way to evaluate the policy for -// this new game session request. -// -// Player acceptance policy. By default, newly created game sessions are open -// to new players. You can restrict new player access by using UpdateGameSession -// to change the game session's player session creation policy. -// -// Game session logs. Logs are retained for all active game sessions for 14 -// days. To access the logs, call GetGameSessionLogUrl to download the log files. -// -// Available in Amazon GameLift Local. -// -// Game-session-related operations include: -// -// * CreateGameSession -// -// * DescribeGameSessions -// -// * DescribeGameSessionDetails -// -// * SearchGameSessions -// -// * UpdateGameSession -// -// * GetGameSessionLogUrl -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation CreateGameSession for usage and error information. -// -// Returned Error Codes: -// * ErrCodeConflictException "ConflictException" -// The requested operation would cause a conflict with the current state of -// a service resource associated with the request. Resolve the conflict before -// retrying this request. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidFleetStatusException "InvalidFleetStatusException" -// The requested operation would cause a conflict with the current state of -// a resource associated with the request and/or the fleet. Resolve the conflict -// before retrying. -// -// * ErrCodeTerminalRoutingStrategyException "TerminalRoutingStrategyException" -// The service is unable to resolve the routing for a particular alias because -// it has a terminal RoutingStrategy associated with it. The message returned -// in this exception is the message defined in the routing strategy itself. -// Such requests should only be retried if the routing strategy for the specified -// alias is modified. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeFleetCapacityExceededException "FleetCapacityExceededException" -// The specified fleet has no available instances to fulfill a CreateGameSession -// request. Clients can retry such requests immediately or after a waiting period. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The requested operation would cause the resource to exceed the allowed service -// limit. Resolve the issue before retrying. -// -// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException" -// A game session with this custom ID string already exists in this fleet. Resolve -// this conflict before retrying this request. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSession -func (c *GameLift) CreateGameSession(input *CreateGameSessionInput) (*CreateGameSessionOutput, error) { - req, out := c.CreateGameSessionRequest(input) - return out, req.Send() -} - -// CreateGameSessionWithContext is the same as CreateGameSession with the addition of -// the ability to pass a context and additional request options. -// -// See CreateGameSession for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) CreateGameSessionWithContext(ctx aws.Context, input *CreateGameSessionInput, opts ...request.Option) (*CreateGameSessionOutput, error) { - req, out := c.CreateGameSessionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateGameSessionQueue = "CreateGameSessionQueue" - -// CreateGameSessionQueueRequest generates a "aws/request.Request" representing the -// client's request for the CreateGameSessionQueue operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateGameSessionQueue for more information on using the CreateGameSessionQueue -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateGameSessionQueueRequest method. -// req, resp := client.CreateGameSessionQueueRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSessionQueue -func (c *GameLift) CreateGameSessionQueueRequest(input *CreateGameSessionQueueInput) (req *request.Request, output *CreateGameSessionQueueOutput) { - op := &request.Operation{ - Name: opCreateGameSessionQueue, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateGameSessionQueueInput{} - } - - output = &CreateGameSessionQueueOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateGameSessionQueue API operation for Amazon GameLift. -// -// Establishes a new queue for processing requests to place new game sessions. -// A queue identifies where new game sessions can be hosted -- by specifying -// a list of destinations (fleets or aliases) -- and how long requests can wait -// in the queue before timing out. You can set up a queue to try to place game -// sessions on fleets in multiple regions. To add placement requests to a queue, -// call StartGameSessionPlacement and reference the queue name. -// -// Destination order. When processing a request for a game session, Amazon GameLift -// tries each destination in order until it finds one with available resources -// to host the new game session. A queue's default order is determined by how -// destinations are listed. The default order is overridden when a game session -// placement request provides player latency information. Player latency information -// enables Amazon GameLift to prioritize destinations where players report the -// lowest average latency, as a result placing the new game session where the -// majority of players will have the best possible gameplay experience. -// -// Player latency policies. For placement requests containing player latency -// information, use player latency policies to protect individual players from -// very high latencies. With a latency cap, even when a destination can deliver -// a low latency for most players, the game is not placed where any individual -// player is reporting latency higher than a policy's maximum. A queue can have -// multiple latency policies, which are enforced consecutively starting with -// the policy with the lowest latency cap. Use multiple policies to gradually -// relax latency controls; for example, you might set a policy with a low latency -// cap for the first 60 seconds, a second policy with a higher cap for the next -// 60 seconds, etc. -// -// To create a new queue, provide a name, timeout value, a list of destinations -// and, if desired, a set of latency policies. If successful, a new queue object -// is returned. -// -// Queue-related operations include: -// -// * CreateGameSessionQueue -// -// * DescribeGameSessionQueues -// -// * UpdateGameSessionQueue -// -// * DeleteGameSessionQueue -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation CreateGameSessionQueue for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The requested operation would cause the resource to exceed the allowed service -// limit. Resolve the issue before retrying. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSessionQueue -func (c *GameLift) CreateGameSessionQueue(input *CreateGameSessionQueueInput) (*CreateGameSessionQueueOutput, error) { - req, out := c.CreateGameSessionQueueRequest(input) - return out, req.Send() -} - -// CreateGameSessionQueueWithContext is the same as CreateGameSessionQueue with the addition of -// the ability to pass a context and additional request options. -// -// See CreateGameSessionQueue for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) CreateGameSessionQueueWithContext(ctx aws.Context, input *CreateGameSessionQueueInput, opts ...request.Option) (*CreateGameSessionQueueOutput, error) { - req, out := c.CreateGameSessionQueueRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateMatchmakingConfiguration = "CreateMatchmakingConfiguration" - -// CreateMatchmakingConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the CreateMatchmakingConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateMatchmakingConfiguration for more information on using the CreateMatchmakingConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateMatchmakingConfigurationRequest method. -// req, resp := client.CreateMatchmakingConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingConfiguration -func (c *GameLift) CreateMatchmakingConfigurationRequest(input *CreateMatchmakingConfigurationInput) (req *request.Request, output *CreateMatchmakingConfigurationOutput) { - op := &request.Operation{ - Name: opCreateMatchmakingConfiguration, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateMatchmakingConfigurationInput{} - } - - output = &CreateMatchmakingConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateMatchmakingConfiguration API operation for Amazon GameLift. -// -// Defines a new matchmaking configuration for use with FlexMatch. A matchmaking -// configuration sets out guidelines for matching players and getting the matches -// into games. You can set up multiple matchmaking configurations to handle -// the scenarios needed for your game. Each matchmaking request (StartMatchmaking) -// specifies a configuration for the match and provides player attributes to -// support the configuration being used. -// -// To create a matchmaking configuration, at a minimum you must specify the -// following: configuration name; a rule set that governs how to evaluate players -// and find acceptable matches; a game session queue to use when placing a new -// game session for the match; and the maximum time allowed for a matchmaking -// attempt. -// -// Player acceptance -- In each configuration, you have the option to require -// that all players accept participation in a proposed match. To enable this -// feature, set AcceptanceRequired to true and specify a time limit for player -// acceptance. Players have the option to accept or reject a proposed match, -// and a match does not move ahead to game session placement unless all matched -// players accept. -// -// Matchmaking status notification -- There are two ways to track the progress -// of matchmaking tickets: (1) polling ticket status with DescribeMatchmaking; -// or (2) receiving notifications with Amazon Simple Notification Service (SNS). -// To use notifications, you first need to set up an SNS topic to receive the -// notifications, and provide the topic ARN in the matchmaking configuration -// (see Setting up Notifications for Matchmaking (http://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html)). -// Since notifications promise only "best effort" delivery, we recommend calling -// DescribeMatchmaking if no notifications are received within 30 seconds. -// -// Operations related to match configurations and rule sets include: -// -// * CreateMatchmakingConfiguration -// -// * DescribeMatchmakingConfigurations -// -// * UpdateMatchmakingConfiguration -// -// * DeleteMatchmakingConfiguration -// -// * CreateMatchmakingRuleSet -// -// * DescribeMatchmakingRuleSets -// -// * ValidateMatchmakingRuleSet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation CreateMatchmakingConfiguration for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The requested operation would cause the resource to exceed the allowed service -// limit. Resolve the issue before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" -// The requested operation is not supported in the region specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingConfiguration -func (c *GameLift) CreateMatchmakingConfiguration(input *CreateMatchmakingConfigurationInput) (*CreateMatchmakingConfigurationOutput, error) { - req, out := c.CreateMatchmakingConfigurationRequest(input) - return out, req.Send() -} - -// CreateMatchmakingConfigurationWithContext is the same as CreateMatchmakingConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See CreateMatchmakingConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) CreateMatchmakingConfigurationWithContext(ctx aws.Context, input *CreateMatchmakingConfigurationInput, opts ...request.Option) (*CreateMatchmakingConfigurationOutput, error) { - req, out := c.CreateMatchmakingConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateMatchmakingRuleSet = "CreateMatchmakingRuleSet" - -// CreateMatchmakingRuleSetRequest generates a "aws/request.Request" representing the -// client's request for the CreateMatchmakingRuleSet operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateMatchmakingRuleSet for more information on using the CreateMatchmakingRuleSet -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateMatchmakingRuleSetRequest method. -// req, resp := client.CreateMatchmakingRuleSetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingRuleSet -func (c *GameLift) CreateMatchmakingRuleSetRequest(input *CreateMatchmakingRuleSetInput) (req *request.Request, output *CreateMatchmakingRuleSetOutput) { - op := &request.Operation{ - Name: opCreateMatchmakingRuleSet, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateMatchmakingRuleSetInput{} - } - - output = &CreateMatchmakingRuleSetOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateMatchmakingRuleSet API operation for Amazon GameLift. -// -// Creates a new rule set for FlexMatch matchmaking. A rule set describes the -// type of match to create, such as the number and size of teams, and sets the -// parameters for acceptable player matches, such as minimum skill level or -// character type. Rule sets are used in matchmaking configurations, which define -// how matchmaking requests are handled. Each MatchmakingConfiguration uses -// one rule set; you can set up multiple rule sets to handle the scenarios that -// suit your game (such as for different game modes), and create a separate -// matchmaking configuration for each rule set. See additional information on -// rule set content in the MatchmakingRuleSet structure. For help creating rule -// sets, including useful examples, see the topic Adding FlexMatch to Your -// Game (http://docs.aws.amazon.com/gamelift/latest/developerguide/match-intro.html). -// -// Once created, matchmaking rule sets cannot be changed or deleted, so we recommend -// checking the rule set syntax using ValidateMatchmakingRuleSetbefore creating -// the rule set. -// -// To create a matchmaking rule set, provide the set of rules and a unique name. -// Rule sets must be defined in the same region as the matchmaking configuration -// they will be used with. Rule sets cannot be edited or deleted. If you need -// to change a rule set, create a new one with the necessary edits and then -// update matchmaking configurations to use the new rule set. -// -// Operations related to match configurations and rule sets include: -// -// * CreateMatchmakingConfiguration -// -// * DescribeMatchmakingConfigurations -// -// * UpdateMatchmakingConfiguration -// -// * DeleteMatchmakingConfiguration -// -// * CreateMatchmakingRuleSet -// -// * DescribeMatchmakingRuleSets -// -// * ValidateMatchmakingRuleSet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation CreateMatchmakingRuleSet for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" -// The requested operation is not supported in the region specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingRuleSet -func (c *GameLift) CreateMatchmakingRuleSet(input *CreateMatchmakingRuleSetInput) (*CreateMatchmakingRuleSetOutput, error) { - req, out := c.CreateMatchmakingRuleSetRequest(input) - return out, req.Send() -} - -// CreateMatchmakingRuleSetWithContext is the same as CreateMatchmakingRuleSet with the addition of -// the ability to pass a context and additional request options. -// -// See CreateMatchmakingRuleSet for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) CreateMatchmakingRuleSetWithContext(ctx aws.Context, input *CreateMatchmakingRuleSetInput, opts ...request.Option) (*CreateMatchmakingRuleSetOutput, error) { - req, out := c.CreateMatchmakingRuleSetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreatePlayerSession = "CreatePlayerSession" - -// CreatePlayerSessionRequest generates a "aws/request.Request" representing the -// client's request for the CreatePlayerSession operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreatePlayerSession for more information on using the CreatePlayerSession -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreatePlayerSessionRequest method. -// req, resp := client.CreatePlayerSessionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSession -func (c *GameLift) CreatePlayerSessionRequest(input *CreatePlayerSessionInput) (req *request.Request, output *CreatePlayerSessionOutput) { - op := &request.Operation{ - Name: opCreatePlayerSession, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreatePlayerSessionInput{} - } - - output = &CreatePlayerSessionOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreatePlayerSession API operation for Amazon GameLift. -// -// Adds a player to a game session and creates a player session record. Before -// a player can be added, a game session must have an ACTIVE status, have a -// creation policy of ALLOW_ALL, and have an open player slot. To add a group -// of players to a game session, use CreatePlayerSessions. -// -// To create a player session, specify a game session ID, player ID, and optionally -// a string of player data. If successful, the player is added to the game session -// and a new PlayerSession object is returned. Player sessions cannot be updated. -// -// Available in Amazon GameLift Local. -// -// Player-session-related operations include: -// -// * CreatePlayerSession -// -// * CreatePlayerSessions -// -// * DescribePlayerSessions -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation CreatePlayerSession for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidGameSessionStatusException "InvalidGameSessionStatusException" -// The requested operation would cause a conflict with the current state of -// a resource associated with the request and/or the game instance. Resolve -// the conflict before retrying. -// -// * ErrCodeGameSessionFullException "GameSessionFullException" -// The game instance is currently full and cannot allow the requested player(s) -// to join. Clients can retry such requests immediately or after a waiting period. -// -// * ErrCodeTerminalRoutingStrategyException "TerminalRoutingStrategyException" -// The service is unable to resolve the routing for a particular alias because -// it has a terminal RoutingStrategy associated with it. The message returned -// in this exception is the message defined in the routing strategy itself. -// Such requests should only be retried if the routing strategy for the specified -// alias is modified. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSession -func (c *GameLift) CreatePlayerSession(input *CreatePlayerSessionInput) (*CreatePlayerSessionOutput, error) { - req, out := c.CreatePlayerSessionRequest(input) - return out, req.Send() -} - -// CreatePlayerSessionWithContext is the same as CreatePlayerSession with the addition of -// the ability to pass a context and additional request options. -// -// See CreatePlayerSession for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) CreatePlayerSessionWithContext(ctx aws.Context, input *CreatePlayerSessionInput, opts ...request.Option) (*CreatePlayerSessionOutput, error) { - req, out := c.CreatePlayerSessionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreatePlayerSessions = "CreatePlayerSessions" - -// CreatePlayerSessionsRequest generates a "aws/request.Request" representing the -// client's request for the CreatePlayerSessions operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreatePlayerSessions for more information on using the CreatePlayerSessions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreatePlayerSessionsRequest method. -// req, resp := client.CreatePlayerSessionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSessions -func (c *GameLift) CreatePlayerSessionsRequest(input *CreatePlayerSessionsInput) (req *request.Request, output *CreatePlayerSessionsOutput) { - op := &request.Operation{ - Name: opCreatePlayerSessions, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreatePlayerSessionsInput{} - } - - output = &CreatePlayerSessionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreatePlayerSessions API operation for Amazon GameLift. -// -// Adds a group of players to a game session. This action is useful with a team -// matching feature. Before players can be added, a game session must have an -// ACTIVE status, have a creation policy of ALLOW_ALL, and have an open player -// slot. To add a single player to a game session, use CreatePlayerSession. -// -// To create player sessions, specify a game session ID, a list of player IDs, -// and optionally a set of player data strings. If successful, the players are -// added to the game session and a set of new PlayerSession objects is returned. -// Player sessions cannot be updated. -// -// Available in Amazon GameLift Local. -// -// Player-session-related operations include: -// -// * CreatePlayerSession -// -// * CreatePlayerSessions -// -// * DescribePlayerSessions -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation CreatePlayerSessions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidGameSessionStatusException "InvalidGameSessionStatusException" -// The requested operation would cause a conflict with the current state of -// a resource associated with the request and/or the game instance. Resolve -// the conflict before retrying. -// -// * ErrCodeGameSessionFullException "GameSessionFullException" -// The game instance is currently full and cannot allow the requested player(s) -// to join. Clients can retry such requests immediately or after a waiting period. -// -// * ErrCodeTerminalRoutingStrategyException "TerminalRoutingStrategyException" -// The service is unable to resolve the routing for a particular alias because -// it has a terminal RoutingStrategy associated with it. The message returned -// in this exception is the message defined in the routing strategy itself. -// Such requests should only be retried if the routing strategy for the specified -// alias is modified. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSessions -func (c *GameLift) CreatePlayerSessions(input *CreatePlayerSessionsInput) (*CreatePlayerSessionsOutput, error) { - req, out := c.CreatePlayerSessionsRequest(input) - return out, req.Send() -} - -// CreatePlayerSessionsWithContext is the same as CreatePlayerSessions with the addition of -// the ability to pass a context and additional request options. -// -// See CreatePlayerSessions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) CreatePlayerSessionsWithContext(ctx aws.Context, input *CreatePlayerSessionsInput, opts ...request.Option) (*CreatePlayerSessionsOutput, error) { - req, out := c.CreatePlayerSessionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateVpcPeeringAuthorization = "CreateVpcPeeringAuthorization" - -// CreateVpcPeeringAuthorizationRequest generates a "aws/request.Request" representing the -// client's request for the CreateVpcPeeringAuthorization operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateVpcPeeringAuthorization for more information on using the CreateVpcPeeringAuthorization -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateVpcPeeringAuthorizationRequest method. -// req, resp := client.CreateVpcPeeringAuthorizationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringAuthorization -func (c *GameLift) CreateVpcPeeringAuthorizationRequest(input *CreateVpcPeeringAuthorizationInput) (req *request.Request, output *CreateVpcPeeringAuthorizationOutput) { - op := &request.Operation{ - Name: opCreateVpcPeeringAuthorization, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateVpcPeeringAuthorizationInput{} - } - - output = &CreateVpcPeeringAuthorizationOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateVpcPeeringAuthorization API operation for Amazon GameLift. -// -// Requests authorization to create or delete a peer connection between the -// VPC for your Amazon GameLift fleet and a virtual private cloud (VPC) in your -// AWS account. VPC peering enables the game servers on your fleet to communicate -// directly with other AWS resources. Once you've received authorization, call -// CreateVpcPeeringConnection to establish the peering connection. For more -// information, see VPC Peering with Amazon GameLift Fleets (http://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html). -// -// You can peer with VPCs that are owned by any AWS account you have access -// to, including the account that you use to manage your Amazon GameLift fleets. -// You cannot peer with VPCs that are in different regions. -// -// To request authorization to create a connection, call this operation from -// the AWS account with the VPC that you want to peer to your Amazon GameLift -// fleet. For example, to enable your game servers to retrieve data from a DynamoDB -// table, use the account that manages that DynamoDB resource. Identify the -// following values: (1) The ID of the VPC that you want to peer with, and (2) -// the ID of the AWS account that you use to manage Amazon GameLift. If successful, -// VPC peering is authorized for the specified VPC. -// -// To request authorization to delete a connection, call this operation from -// the AWS account with the VPC that is peered with your Amazon GameLift fleet. -// Identify the following values: (1) VPC ID that you want to delete the peering -// connection for, and (2) ID of the AWS account that you use to manage Amazon -// GameLift. -// -// The authorization remains valid for 24 hours unless it is canceled by a call -// to DeleteVpcPeeringAuthorization. You must create or delete the peering connection -// while the authorization is valid. -// -// VPC peering connection operations include: -// -// * CreateVpcPeeringAuthorization -// -// * DescribeVpcPeeringAuthorizations -// -// * DeleteVpcPeeringAuthorization -// -// * CreateVpcPeeringConnection -// -// * DescribeVpcPeeringConnections -// -// * DeleteVpcPeeringConnection -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation CreateVpcPeeringAuthorization for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringAuthorization -func (c *GameLift) CreateVpcPeeringAuthorization(input *CreateVpcPeeringAuthorizationInput) (*CreateVpcPeeringAuthorizationOutput, error) { - req, out := c.CreateVpcPeeringAuthorizationRequest(input) - return out, req.Send() -} - -// CreateVpcPeeringAuthorizationWithContext is the same as CreateVpcPeeringAuthorization with the addition of -// the ability to pass a context and additional request options. -// -// See CreateVpcPeeringAuthorization for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) CreateVpcPeeringAuthorizationWithContext(ctx aws.Context, input *CreateVpcPeeringAuthorizationInput, opts ...request.Option) (*CreateVpcPeeringAuthorizationOutput, error) { - req, out := c.CreateVpcPeeringAuthorizationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateVpcPeeringConnection = "CreateVpcPeeringConnection" - -// CreateVpcPeeringConnectionRequest generates a "aws/request.Request" representing the -// client's request for the CreateVpcPeeringConnection operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateVpcPeeringConnection for more information on using the CreateVpcPeeringConnection -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateVpcPeeringConnectionRequest method. -// req, resp := client.CreateVpcPeeringConnectionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringConnection -func (c *GameLift) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectionInput) (req *request.Request, output *CreateVpcPeeringConnectionOutput) { - op := &request.Operation{ - Name: opCreateVpcPeeringConnection, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateVpcPeeringConnectionInput{} - } - - output = &CreateVpcPeeringConnectionOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateVpcPeeringConnection API operation for Amazon GameLift. -// -// Establishes a VPC peering connection between a virtual private cloud (VPC) -// in an AWS account with the VPC for your Amazon GameLift fleet. VPC peering -// enables the game servers on your fleet to communicate directly with other -// AWS resources. You can peer with VPCs in any AWS account that you have access -// to, including the account that you use to manage your Amazon GameLift fleets. -// You cannot peer with VPCs that are in different regions. For more information, -// see VPC Peering with Amazon GameLift Fleets (http://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html). -// -// Before calling this operation to establish the peering connection, you first -// need to call CreateVpcPeeringAuthorization and identify the VPC you want -// to peer with. Once the authorization for the specified VPC is issued, you -// have 24 hours to establish the connection. These two operations handle all -// tasks necessary to peer the two VPCs, including acceptance, updating routing -// tables, etc. -// -// To establish the connection, call this operation from the AWS account that -// is used to manage the Amazon GameLift fleets. Identify the following values: -// (1) The ID of the fleet you want to be enable a VPC peering connection for; -// (2) The AWS account with the VPC that you want to peer with; and (3) The -// ID of the VPC you want to peer with. This operation is asynchronous. If successful, -// a VpcPeeringConnection request is created. You can use continuous polling -// to track the request's status using DescribeVpcPeeringConnections, or by -// monitoring fleet events for success or failure using DescribeFleetEvents. -// -// VPC peering connection operations include: -// -// * CreateVpcPeeringAuthorization -// -// * DescribeVpcPeeringAuthorizations -// -// * DeleteVpcPeeringAuthorization -// -// * CreateVpcPeeringConnection -// -// * DescribeVpcPeeringConnections -// -// * DeleteVpcPeeringConnection -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation CreateVpcPeeringConnection for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringConnection -func (c *GameLift) CreateVpcPeeringConnection(input *CreateVpcPeeringConnectionInput) (*CreateVpcPeeringConnectionOutput, error) { - req, out := c.CreateVpcPeeringConnectionRequest(input) - return out, req.Send() -} - -// CreateVpcPeeringConnectionWithContext is the same as CreateVpcPeeringConnection with the addition of -// the ability to pass a context and additional request options. -// -// See CreateVpcPeeringConnection for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) CreateVpcPeeringConnectionWithContext(ctx aws.Context, input *CreateVpcPeeringConnectionInput, opts ...request.Option) (*CreateVpcPeeringConnectionOutput, error) { - req, out := c.CreateVpcPeeringConnectionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteAlias = "DeleteAlias" - -// DeleteAliasRequest generates a "aws/request.Request" representing the -// client's request for the DeleteAlias operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteAlias for more information on using the DeleteAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteAliasRequest method. -// req, resp := client.DeleteAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteAlias -func (c *GameLift) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, output *DeleteAliasOutput) { - op := &request.Operation{ - Name: opDeleteAlias, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteAliasInput{} - } - - output = &DeleteAliasOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteAlias API operation for Amazon GameLift. -// -// Deletes an alias. This action removes all record of the alias. Game clients -// attempting to access a server process using the deleted alias receive an -// error. To delete an alias, specify the alias ID to be deleted. -// -// Alias-related operations include: -// -// * CreateAlias -// -// * ListAliases -// -// * DescribeAlias -// -// * UpdateAlias -// -// * DeleteAlias -// -// * ResolveAlias -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DeleteAlias for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteAlias -func (c *GameLift) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) { - req, out := c.DeleteAliasRequest(input) - return out, req.Send() -} - -// DeleteAliasWithContext is the same as DeleteAlias with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteAlias for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DeleteAliasWithContext(ctx aws.Context, input *DeleteAliasInput, opts ...request.Option) (*DeleteAliasOutput, error) { - req, out := c.DeleteAliasRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBuild = "DeleteBuild" - -// DeleteBuildRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBuild operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBuild for more information on using the DeleteBuild -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBuildRequest method. -// req, resp := client.DeleteBuildRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteBuild -func (c *GameLift) DeleteBuildRequest(input *DeleteBuildInput) (req *request.Request, output *DeleteBuildOutput) { - op := &request.Operation{ - Name: opDeleteBuild, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteBuildInput{} - } - - output = &DeleteBuildOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBuild API operation for Amazon GameLift. -// -// Deletes a build. This action permanently deletes the build record and any -// uploaded build files. -// -// To delete a build, specify its ID. Deleting a build does not affect the status -// of any active fleets using the build, but you can no longer create new fleets -// with the deleted build. -// -// Build-related operations include: -// -// * CreateBuild -// -// * ListBuilds -// -// * DescribeBuild -// -// * UpdateBuild -// -// * DeleteBuild -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DeleteBuild for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteBuild -func (c *GameLift) DeleteBuild(input *DeleteBuildInput) (*DeleteBuildOutput, error) { - req, out := c.DeleteBuildRequest(input) - return out, req.Send() -} - -// DeleteBuildWithContext is the same as DeleteBuild with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBuild for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DeleteBuildWithContext(ctx aws.Context, input *DeleteBuildInput, opts ...request.Option) (*DeleteBuildOutput, error) { - req, out := c.DeleteBuildRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteFleet = "DeleteFleet" - -// DeleteFleetRequest generates a "aws/request.Request" representing the -// client's request for the DeleteFleet operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteFleet for more information on using the DeleteFleet -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteFleetRequest method. -// req, resp := client.DeleteFleetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteFleet -func (c *GameLift) DeleteFleetRequest(input *DeleteFleetInput) (req *request.Request, output *DeleteFleetOutput) { - op := &request.Operation{ - Name: opDeleteFleet, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteFleetInput{} - } - - output = &DeleteFleetOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteFleet API operation for Amazon GameLift. -// -// Deletes everything related to a fleet. Before deleting a fleet, you must -// set the fleet's desired capacity to zero. See UpdateFleetCapacity. -// -// This action removes the fleet's resources and the fleet record. Once a fleet -// is deleted, you can no longer use that fleet. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DeleteFleet for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidFleetStatusException "InvalidFleetStatusException" -// The requested operation would cause a conflict with the current state of -// a resource associated with the request and/or the fleet. Resolve the conflict -// before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteFleet -func (c *GameLift) DeleteFleet(input *DeleteFleetInput) (*DeleteFleetOutput, error) { - req, out := c.DeleteFleetRequest(input) - return out, req.Send() -} - -// DeleteFleetWithContext is the same as DeleteFleet with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteFleet for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DeleteFleetWithContext(ctx aws.Context, input *DeleteFleetInput, opts ...request.Option) (*DeleteFleetOutput, error) { - req, out := c.DeleteFleetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteGameSessionQueue = "DeleteGameSessionQueue" - -// DeleteGameSessionQueueRequest generates a "aws/request.Request" representing the -// client's request for the DeleteGameSessionQueue operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteGameSessionQueue for more information on using the DeleteGameSessionQueue -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteGameSessionQueueRequest method. -// req, resp := client.DeleteGameSessionQueueRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteGameSessionQueue -func (c *GameLift) DeleteGameSessionQueueRequest(input *DeleteGameSessionQueueInput) (req *request.Request, output *DeleteGameSessionQueueOutput) { - op := &request.Operation{ - Name: opDeleteGameSessionQueue, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteGameSessionQueueInput{} - } - - output = &DeleteGameSessionQueueOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteGameSessionQueue API operation for Amazon GameLift. -// -// Deletes a game session queue. This action means that any StartGameSessionPlacement -// requests that reference this queue will fail. To delete a queue, specify -// the queue name. -// -// Queue-related operations include: -// -// * CreateGameSessionQueue -// -// * DescribeGameSessionQueues -// -// * UpdateGameSessionQueue -// -// * DeleteGameSessionQueue -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DeleteGameSessionQueue for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteGameSessionQueue -func (c *GameLift) DeleteGameSessionQueue(input *DeleteGameSessionQueueInput) (*DeleteGameSessionQueueOutput, error) { - req, out := c.DeleteGameSessionQueueRequest(input) - return out, req.Send() -} - -// DeleteGameSessionQueueWithContext is the same as DeleteGameSessionQueue with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteGameSessionQueue for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DeleteGameSessionQueueWithContext(ctx aws.Context, input *DeleteGameSessionQueueInput, opts ...request.Option) (*DeleteGameSessionQueueOutput, error) { - req, out := c.DeleteGameSessionQueueRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteMatchmakingConfiguration = "DeleteMatchmakingConfiguration" - -// DeleteMatchmakingConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteMatchmakingConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteMatchmakingConfiguration for more information on using the DeleteMatchmakingConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteMatchmakingConfigurationRequest method. -// req, resp := client.DeleteMatchmakingConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteMatchmakingConfiguration -func (c *GameLift) DeleteMatchmakingConfigurationRequest(input *DeleteMatchmakingConfigurationInput) (req *request.Request, output *DeleteMatchmakingConfigurationOutput) { - op := &request.Operation{ - Name: opDeleteMatchmakingConfiguration, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteMatchmakingConfigurationInput{} - } - - output = &DeleteMatchmakingConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteMatchmakingConfiguration API operation for Amazon GameLift. -// -// Permanently removes a FlexMatch matchmaking configuration. To delete, specify -// the configuration name. A matchmaking configuration cannot be deleted if -// it is being used in any active matchmaking tickets. -// -// Operations related to match configurations and rule sets include: -// -// * CreateMatchmakingConfiguration -// -// * DescribeMatchmakingConfigurations -// -// * UpdateMatchmakingConfiguration -// -// * DeleteMatchmakingConfiguration -// -// * CreateMatchmakingRuleSet -// -// * DescribeMatchmakingRuleSets -// -// * ValidateMatchmakingRuleSet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DeleteMatchmakingConfiguration for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" -// The requested operation is not supported in the region specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteMatchmakingConfiguration -func (c *GameLift) DeleteMatchmakingConfiguration(input *DeleteMatchmakingConfigurationInput) (*DeleteMatchmakingConfigurationOutput, error) { - req, out := c.DeleteMatchmakingConfigurationRequest(input) - return out, req.Send() -} - -// DeleteMatchmakingConfigurationWithContext is the same as DeleteMatchmakingConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteMatchmakingConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DeleteMatchmakingConfigurationWithContext(ctx aws.Context, input *DeleteMatchmakingConfigurationInput, opts ...request.Option) (*DeleteMatchmakingConfigurationOutput, error) { - req, out := c.DeleteMatchmakingConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteScalingPolicy = "DeleteScalingPolicy" - -// DeleteScalingPolicyRequest generates a "aws/request.Request" representing the -// client's request for the DeleteScalingPolicy operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteScalingPolicy for more information on using the DeleteScalingPolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteScalingPolicyRequest method. -// req, resp := client.DeleteScalingPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteScalingPolicy -func (c *GameLift) DeleteScalingPolicyRequest(input *DeleteScalingPolicyInput) (req *request.Request, output *DeleteScalingPolicyOutput) { - op := &request.Operation{ - Name: opDeleteScalingPolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteScalingPolicyInput{} - } - - output = &DeleteScalingPolicyOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteScalingPolicy API operation for Amazon GameLift. -// -// Deletes a fleet scaling policy. This action means that the policy is no longer -// in force and removes all record of it. To delete a scaling policy, specify -// both the scaling policy name and the fleet ID it is associated with. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DeleteScalingPolicy for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteScalingPolicy -func (c *GameLift) DeleteScalingPolicy(input *DeleteScalingPolicyInput) (*DeleteScalingPolicyOutput, error) { - req, out := c.DeleteScalingPolicyRequest(input) - return out, req.Send() -} - -// DeleteScalingPolicyWithContext is the same as DeleteScalingPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteScalingPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DeleteScalingPolicyWithContext(ctx aws.Context, input *DeleteScalingPolicyInput, opts ...request.Option) (*DeleteScalingPolicyOutput, error) { - req, out := c.DeleteScalingPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteVpcPeeringAuthorization = "DeleteVpcPeeringAuthorization" - -// DeleteVpcPeeringAuthorizationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteVpcPeeringAuthorization operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteVpcPeeringAuthorization for more information on using the DeleteVpcPeeringAuthorization -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteVpcPeeringAuthorizationRequest method. -// req, resp := client.DeleteVpcPeeringAuthorizationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringAuthorization -func (c *GameLift) DeleteVpcPeeringAuthorizationRequest(input *DeleteVpcPeeringAuthorizationInput) (req *request.Request, output *DeleteVpcPeeringAuthorizationOutput) { - op := &request.Operation{ - Name: opDeleteVpcPeeringAuthorization, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteVpcPeeringAuthorizationInput{} - } - - output = &DeleteVpcPeeringAuthorizationOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteVpcPeeringAuthorization API operation for Amazon GameLift. -// -// Cancels a pending VPC peering authorization for the specified VPC. If the -// authorization has already been used to create a peering connection, call -// DeleteVpcPeeringConnection to remove the connection. -// -// VPC peering connection operations include: -// -// * CreateVpcPeeringAuthorization -// -// * DescribeVpcPeeringAuthorizations -// -// * DeleteVpcPeeringAuthorization -// -// * CreateVpcPeeringConnection -// -// * DescribeVpcPeeringConnections -// -// * DeleteVpcPeeringConnection -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DeleteVpcPeeringAuthorization for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringAuthorization -func (c *GameLift) DeleteVpcPeeringAuthorization(input *DeleteVpcPeeringAuthorizationInput) (*DeleteVpcPeeringAuthorizationOutput, error) { - req, out := c.DeleteVpcPeeringAuthorizationRequest(input) - return out, req.Send() -} - -// DeleteVpcPeeringAuthorizationWithContext is the same as DeleteVpcPeeringAuthorization with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteVpcPeeringAuthorization for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DeleteVpcPeeringAuthorizationWithContext(ctx aws.Context, input *DeleteVpcPeeringAuthorizationInput, opts ...request.Option) (*DeleteVpcPeeringAuthorizationOutput, error) { - req, out := c.DeleteVpcPeeringAuthorizationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteVpcPeeringConnection = "DeleteVpcPeeringConnection" - -// DeleteVpcPeeringConnectionRequest generates a "aws/request.Request" representing the -// client's request for the DeleteVpcPeeringConnection operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteVpcPeeringConnection for more information on using the DeleteVpcPeeringConnection -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteVpcPeeringConnectionRequest method. -// req, resp := client.DeleteVpcPeeringConnectionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringConnection -func (c *GameLift) DeleteVpcPeeringConnectionRequest(input *DeleteVpcPeeringConnectionInput) (req *request.Request, output *DeleteVpcPeeringConnectionOutput) { - op := &request.Operation{ - Name: opDeleteVpcPeeringConnection, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteVpcPeeringConnectionInput{} - } - - output = &DeleteVpcPeeringConnectionOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteVpcPeeringConnection API operation for Amazon GameLift. -// -// Removes a VPC peering connection. To delete the connection, you must have -// a valid authorization for the VPC peering connection that you want to delete. -// You can check for an authorization by calling DescribeVpcPeeringAuthorizations -// or request a new one using CreateVpcPeeringAuthorization. -// -// Once a valid authorization exists, call this operation from the AWS account -// that is used to manage the Amazon GameLift fleets. Identify the connection -// to delete by the connection ID and fleet ID. If successful, the connection -// is removed. -// -// VPC peering connection operations include: -// -// * CreateVpcPeeringAuthorization -// -// * DescribeVpcPeeringAuthorizations -// -// * DeleteVpcPeeringAuthorization -// -// * CreateVpcPeeringConnection -// -// * DescribeVpcPeeringConnections -// -// * DeleteVpcPeeringConnection -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DeleteVpcPeeringConnection for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringConnection -func (c *GameLift) DeleteVpcPeeringConnection(input *DeleteVpcPeeringConnectionInput) (*DeleteVpcPeeringConnectionOutput, error) { - req, out := c.DeleteVpcPeeringConnectionRequest(input) - return out, req.Send() -} - -// DeleteVpcPeeringConnectionWithContext is the same as DeleteVpcPeeringConnection with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteVpcPeeringConnection for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DeleteVpcPeeringConnectionWithContext(ctx aws.Context, input *DeleteVpcPeeringConnectionInput, opts ...request.Option) (*DeleteVpcPeeringConnectionOutput, error) { - req, out := c.DeleteVpcPeeringConnectionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeAlias = "DescribeAlias" - -// DescribeAliasRequest generates a "aws/request.Request" representing the -// client's request for the DescribeAlias operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeAlias for more information on using the DescribeAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeAliasRequest method. -// req, resp := client.DescribeAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeAlias -func (c *GameLift) DescribeAliasRequest(input *DescribeAliasInput) (req *request.Request, output *DescribeAliasOutput) { - op := &request.Operation{ - Name: opDescribeAlias, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeAliasInput{} - } - - output = &DescribeAliasOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeAlias API operation for Amazon GameLift. -// -// Retrieves properties for an alias. This operation returns all alias metadata -// and settings. To get an alias's target fleet ID only, use ResolveAlias. -// -// To get alias properties, specify the alias ID. If successful, the requested -// alias record is returned. -// -// Alias-related operations include: -// -// * CreateAlias -// -// * ListAliases -// -// * DescribeAlias -// -// * UpdateAlias -// -// * DeleteAlias -// -// * ResolveAlias -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeAlias for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeAlias -func (c *GameLift) DescribeAlias(input *DescribeAliasInput) (*DescribeAliasOutput, error) { - req, out := c.DescribeAliasRequest(input) - return out, req.Send() -} - -// DescribeAliasWithContext is the same as DescribeAlias with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeAlias for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeAliasWithContext(ctx aws.Context, input *DescribeAliasInput, opts ...request.Option) (*DescribeAliasOutput, error) { - req, out := c.DescribeAliasRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeBuild = "DescribeBuild" - -// DescribeBuildRequest generates a "aws/request.Request" representing the -// client's request for the DescribeBuild operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeBuild for more information on using the DescribeBuild -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeBuildRequest method. -// req, resp := client.DescribeBuildRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeBuild -func (c *GameLift) DescribeBuildRequest(input *DescribeBuildInput) (req *request.Request, output *DescribeBuildOutput) { - op := &request.Operation{ - Name: opDescribeBuild, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeBuildInput{} - } - - output = &DescribeBuildOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeBuild API operation for Amazon GameLift. -// -// Retrieves properties for a build. To get a build record, specify a build -// ID. If successful, an object containing the build properties is returned. -// -// Build-related operations include: -// -// * CreateBuild -// -// * ListBuilds -// -// * DescribeBuild -// -// * UpdateBuild -// -// * DeleteBuild -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeBuild for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeBuild -func (c *GameLift) DescribeBuild(input *DescribeBuildInput) (*DescribeBuildOutput, error) { - req, out := c.DescribeBuildRequest(input) - return out, req.Send() -} - -// DescribeBuildWithContext is the same as DescribeBuild with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeBuild for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeBuildWithContext(ctx aws.Context, input *DescribeBuildInput, opts ...request.Option) (*DescribeBuildOutput, error) { - req, out := c.DescribeBuildRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeEC2InstanceLimits = "DescribeEC2InstanceLimits" - -// DescribeEC2InstanceLimitsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeEC2InstanceLimits operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeEC2InstanceLimits for more information on using the DescribeEC2InstanceLimits -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeEC2InstanceLimitsRequest method. -// req, resp := client.DescribeEC2InstanceLimitsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeEC2InstanceLimits -func (c *GameLift) DescribeEC2InstanceLimitsRequest(input *DescribeEC2InstanceLimitsInput) (req *request.Request, output *DescribeEC2InstanceLimitsOutput) { - op := &request.Operation{ - Name: opDescribeEC2InstanceLimits, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeEC2InstanceLimitsInput{} - } - - output = &DescribeEC2InstanceLimitsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeEC2InstanceLimits API operation for Amazon GameLift. -// -// Retrieves the following information for the specified EC2 instance type: -// -// * maximum number of instances allowed per AWS account (service limit) -// -// * current usage level for the AWS account -// -// Service limits vary depending on region. Available regions for Amazon GameLift -// can be found in the AWS Management Console for Amazon GameLift (see the drop-down -// list in the upper right corner). -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeEC2InstanceLimits for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeEC2InstanceLimits -func (c *GameLift) DescribeEC2InstanceLimits(input *DescribeEC2InstanceLimitsInput) (*DescribeEC2InstanceLimitsOutput, error) { - req, out := c.DescribeEC2InstanceLimitsRequest(input) - return out, req.Send() -} - -// DescribeEC2InstanceLimitsWithContext is the same as DescribeEC2InstanceLimits with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeEC2InstanceLimits for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeEC2InstanceLimitsWithContext(ctx aws.Context, input *DescribeEC2InstanceLimitsInput, opts ...request.Option) (*DescribeEC2InstanceLimitsOutput, error) { - req, out := c.DescribeEC2InstanceLimitsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeFleetAttributes = "DescribeFleetAttributes" - -// DescribeFleetAttributesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeFleetAttributes operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeFleetAttributes for more information on using the DescribeFleetAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeFleetAttributesRequest method. -// req, resp := client.DescribeFleetAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetAttributes -func (c *GameLift) DescribeFleetAttributesRequest(input *DescribeFleetAttributesInput) (req *request.Request, output *DescribeFleetAttributesOutput) { - op := &request.Operation{ - Name: opDescribeFleetAttributes, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeFleetAttributesInput{} - } - - output = &DescribeFleetAttributesOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeFleetAttributes API operation for Amazon GameLift. -// -// Retrieves fleet properties, including metadata, status, and configuration, -// for one or more fleets. You can request attributes for all fleets, or specify -// a list of one or more fleet IDs. When requesting multiple fleets, use the -// pagination parameters to retrieve results as a set of sequential pages. If -// successful, a FleetAttributes object is returned for each requested fleet -// ID. When specifying a list of fleet IDs, attribute objects are returned only -// for fleets that currently exist. -// -// Some API actions may limit the number of fleet IDs allowed in one request. -// If a request exceeds this limit, the request fails and the error message -// includes the maximum allowed. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeFleetAttributes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetAttributes -func (c *GameLift) DescribeFleetAttributes(input *DescribeFleetAttributesInput) (*DescribeFleetAttributesOutput, error) { - req, out := c.DescribeFleetAttributesRequest(input) - return out, req.Send() -} - -// DescribeFleetAttributesWithContext is the same as DescribeFleetAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeFleetAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeFleetAttributesWithContext(ctx aws.Context, input *DescribeFleetAttributesInput, opts ...request.Option) (*DescribeFleetAttributesOutput, error) { - req, out := c.DescribeFleetAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeFleetCapacity = "DescribeFleetCapacity" - -// DescribeFleetCapacityRequest generates a "aws/request.Request" representing the -// client's request for the DescribeFleetCapacity operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeFleetCapacity for more information on using the DescribeFleetCapacity -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeFleetCapacityRequest method. -// req, resp := client.DescribeFleetCapacityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetCapacity -func (c *GameLift) DescribeFleetCapacityRequest(input *DescribeFleetCapacityInput) (req *request.Request, output *DescribeFleetCapacityOutput) { - op := &request.Operation{ - Name: opDescribeFleetCapacity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeFleetCapacityInput{} - } - - output = &DescribeFleetCapacityOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeFleetCapacity API operation for Amazon GameLift. -// -// Retrieves the current status of fleet capacity for one or more fleets. This -// information includes the number of instances that have been requested for -// the fleet and the number currently active. You can request capacity for all -// fleets, or specify a list of one or more fleet IDs. When requesting multiple -// fleets, use the pagination parameters to retrieve results as a set of sequential -// pages. If successful, a FleetCapacity object is returned for each requested -// fleet ID. When specifying a list of fleet IDs, attribute objects are returned -// only for fleets that currently exist. -// -// Some API actions may limit the number of fleet IDs allowed in one request. -// If a request exceeds this limit, the request fails and the error message -// includes the maximum allowed. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeFleetCapacity for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetCapacity -func (c *GameLift) DescribeFleetCapacity(input *DescribeFleetCapacityInput) (*DescribeFleetCapacityOutput, error) { - req, out := c.DescribeFleetCapacityRequest(input) - return out, req.Send() -} - -// DescribeFleetCapacityWithContext is the same as DescribeFleetCapacity with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeFleetCapacity for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeFleetCapacityWithContext(ctx aws.Context, input *DescribeFleetCapacityInput, opts ...request.Option) (*DescribeFleetCapacityOutput, error) { - req, out := c.DescribeFleetCapacityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeFleetEvents = "DescribeFleetEvents" - -// DescribeFleetEventsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeFleetEvents operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeFleetEvents for more information on using the DescribeFleetEvents -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeFleetEventsRequest method. -// req, resp := client.DescribeFleetEventsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetEvents -func (c *GameLift) DescribeFleetEventsRequest(input *DescribeFleetEventsInput) (req *request.Request, output *DescribeFleetEventsOutput) { - op := &request.Operation{ - Name: opDescribeFleetEvents, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeFleetEventsInput{} - } - - output = &DescribeFleetEventsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeFleetEvents API operation for Amazon GameLift. -// -// Retrieves entries from the specified fleet's event log. You can specify a -// time range to limit the result set. Use the pagination parameters to retrieve -// results as a set of sequential pages. If successful, a collection of event -// log entries matching the request are returned. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeFleetEvents for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetEvents -func (c *GameLift) DescribeFleetEvents(input *DescribeFleetEventsInput) (*DescribeFleetEventsOutput, error) { - req, out := c.DescribeFleetEventsRequest(input) - return out, req.Send() -} - -// DescribeFleetEventsWithContext is the same as DescribeFleetEvents with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeFleetEvents for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeFleetEventsWithContext(ctx aws.Context, input *DescribeFleetEventsInput, opts ...request.Option) (*DescribeFleetEventsOutput, error) { - req, out := c.DescribeFleetEventsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeFleetPortSettings = "DescribeFleetPortSettings" - -// DescribeFleetPortSettingsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeFleetPortSettings operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeFleetPortSettings for more information on using the DescribeFleetPortSettings -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeFleetPortSettingsRequest method. -// req, resp := client.DescribeFleetPortSettingsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetPortSettings -func (c *GameLift) DescribeFleetPortSettingsRequest(input *DescribeFleetPortSettingsInput) (req *request.Request, output *DescribeFleetPortSettingsOutput) { - op := &request.Operation{ - Name: opDescribeFleetPortSettings, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeFleetPortSettingsInput{} - } - - output = &DescribeFleetPortSettingsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeFleetPortSettings API operation for Amazon GameLift. -// -// Retrieves the inbound connection permissions for a fleet. Connection permissions -// include a range of IP addresses and port settings that incoming traffic can -// use to access server processes in the fleet. To get a fleet's inbound connection -// permissions, specify a fleet ID. If successful, a collection of IpPermission -// objects is returned for the requested fleet ID. If the requested fleet has -// been deleted, the result set is empty. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeFleetPortSettings for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetPortSettings -func (c *GameLift) DescribeFleetPortSettings(input *DescribeFleetPortSettingsInput) (*DescribeFleetPortSettingsOutput, error) { - req, out := c.DescribeFleetPortSettingsRequest(input) - return out, req.Send() -} - -// DescribeFleetPortSettingsWithContext is the same as DescribeFleetPortSettings with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeFleetPortSettings for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeFleetPortSettingsWithContext(ctx aws.Context, input *DescribeFleetPortSettingsInput, opts ...request.Option) (*DescribeFleetPortSettingsOutput, error) { - req, out := c.DescribeFleetPortSettingsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeFleetUtilization = "DescribeFleetUtilization" - -// DescribeFleetUtilizationRequest generates a "aws/request.Request" representing the -// client's request for the DescribeFleetUtilization operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeFleetUtilization for more information on using the DescribeFleetUtilization -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeFleetUtilizationRequest method. -// req, resp := client.DescribeFleetUtilizationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetUtilization -func (c *GameLift) DescribeFleetUtilizationRequest(input *DescribeFleetUtilizationInput) (req *request.Request, output *DescribeFleetUtilizationOutput) { - op := &request.Operation{ - Name: opDescribeFleetUtilization, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeFleetUtilizationInput{} - } - - output = &DescribeFleetUtilizationOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeFleetUtilization API operation for Amazon GameLift. -// -// Retrieves utilization statistics for one or more fleets. You can request -// utilization data for all fleets, or specify a list of one or more fleet IDs. -// When requesting multiple fleets, use the pagination parameters to retrieve -// results as a set of sequential pages. If successful, a FleetUtilization object -// is returned for each requested fleet ID. When specifying a list of fleet -// IDs, utilization objects are returned only for fleets that currently exist. -// -// Some API actions may limit the number of fleet IDs allowed in one request. -// If a request exceeds this limit, the request fails and the error message -// includes the maximum allowed. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeFleetUtilization for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetUtilization -func (c *GameLift) DescribeFleetUtilization(input *DescribeFleetUtilizationInput) (*DescribeFleetUtilizationOutput, error) { - req, out := c.DescribeFleetUtilizationRequest(input) - return out, req.Send() -} - -// DescribeFleetUtilizationWithContext is the same as DescribeFleetUtilization with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeFleetUtilization for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeFleetUtilizationWithContext(ctx aws.Context, input *DescribeFleetUtilizationInput, opts ...request.Option) (*DescribeFleetUtilizationOutput, error) { - req, out := c.DescribeFleetUtilizationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeGameSessionDetails = "DescribeGameSessionDetails" - -// DescribeGameSessionDetailsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeGameSessionDetails operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeGameSessionDetails for more information on using the DescribeGameSessionDetails -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeGameSessionDetailsRequest method. -// req, resp := client.DescribeGameSessionDetailsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionDetails -func (c *GameLift) DescribeGameSessionDetailsRequest(input *DescribeGameSessionDetailsInput) (req *request.Request, output *DescribeGameSessionDetailsOutput) { - op := &request.Operation{ - Name: opDescribeGameSessionDetails, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeGameSessionDetailsInput{} - } - - output = &DescribeGameSessionDetailsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeGameSessionDetails API operation for Amazon GameLift. -// -// Retrieves properties, including the protection policy in force, for one or -// more game sessions. This action can be used in several ways: (1) provide -// a GameSessionId or GameSessionArn to request details for a specific game -// session; (2) provide either a FleetId or an AliasId to request properties -// for all game sessions running on a fleet. -// -// To get game session record(s), specify just one of the following: game session -// ID, fleet ID, or alias ID. You can filter this request by game session status. -// Use the pagination parameters to retrieve results as a set of sequential -// pages. If successful, a GameSessionDetail object is returned for each session -// matching the request. -// -// Game-session-related operations include: -// -// * CreateGameSession -// -// * DescribeGameSessions -// -// * DescribeGameSessionDetails -// -// * SearchGameSessions -// -// * UpdateGameSession -// -// * GetGameSessionLogUrl -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeGameSessionDetails for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeTerminalRoutingStrategyException "TerminalRoutingStrategyException" -// The service is unable to resolve the routing for a particular alias because -// it has a terminal RoutingStrategy associated with it. The message returned -// in this exception is the message defined in the routing strategy itself. -// Such requests should only be retried if the routing strategy for the specified -// alias is modified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionDetails -func (c *GameLift) DescribeGameSessionDetails(input *DescribeGameSessionDetailsInput) (*DescribeGameSessionDetailsOutput, error) { - req, out := c.DescribeGameSessionDetailsRequest(input) - return out, req.Send() -} - -// DescribeGameSessionDetailsWithContext is the same as DescribeGameSessionDetails with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeGameSessionDetails for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeGameSessionDetailsWithContext(ctx aws.Context, input *DescribeGameSessionDetailsInput, opts ...request.Option) (*DescribeGameSessionDetailsOutput, error) { - req, out := c.DescribeGameSessionDetailsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeGameSessionPlacement = "DescribeGameSessionPlacement" - -// DescribeGameSessionPlacementRequest generates a "aws/request.Request" representing the -// client's request for the DescribeGameSessionPlacement operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeGameSessionPlacement for more information on using the DescribeGameSessionPlacement -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeGameSessionPlacementRequest method. -// req, resp := client.DescribeGameSessionPlacementRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionPlacement -func (c *GameLift) DescribeGameSessionPlacementRequest(input *DescribeGameSessionPlacementInput) (req *request.Request, output *DescribeGameSessionPlacementOutput) { - op := &request.Operation{ - Name: opDescribeGameSessionPlacement, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeGameSessionPlacementInput{} - } - - output = &DescribeGameSessionPlacementOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeGameSessionPlacement API operation for Amazon GameLift. -// -// Retrieves properties and current status of a game session placement request. -// To get game session placement details, specify the placement ID. If successful, -// a GameSessionPlacement object is returned. -// -// Game-session-related operations include: -// -// * CreateGameSession -// -// * DescribeGameSessions -// -// * DescribeGameSessionDetails -// -// * SearchGameSessions -// -// * UpdateGameSession -// -// * GetGameSessionLogUrl -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeGameSessionPlacement for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionPlacement -func (c *GameLift) DescribeGameSessionPlacement(input *DescribeGameSessionPlacementInput) (*DescribeGameSessionPlacementOutput, error) { - req, out := c.DescribeGameSessionPlacementRequest(input) - return out, req.Send() -} - -// DescribeGameSessionPlacementWithContext is the same as DescribeGameSessionPlacement with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeGameSessionPlacement for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeGameSessionPlacementWithContext(ctx aws.Context, input *DescribeGameSessionPlacementInput, opts ...request.Option) (*DescribeGameSessionPlacementOutput, error) { - req, out := c.DescribeGameSessionPlacementRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeGameSessionQueues = "DescribeGameSessionQueues" - -// DescribeGameSessionQueuesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeGameSessionQueues operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeGameSessionQueues for more information on using the DescribeGameSessionQueues -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeGameSessionQueuesRequest method. -// req, resp := client.DescribeGameSessionQueuesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionQueues -func (c *GameLift) DescribeGameSessionQueuesRequest(input *DescribeGameSessionQueuesInput) (req *request.Request, output *DescribeGameSessionQueuesOutput) { - op := &request.Operation{ - Name: opDescribeGameSessionQueues, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeGameSessionQueuesInput{} - } - - output = &DescribeGameSessionQueuesOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeGameSessionQueues API operation for Amazon GameLift. -// -// Retrieves the properties for one or more game session queues. When requesting -// multiple queues, use the pagination parameters to retrieve results as a set -// of sequential pages. If successful, a GameSessionQueue object is returned -// for each requested queue. When specifying a list of queues, objects are returned -// only for queues that currently exist in the region. -// -// Queue-related operations include: -// -// * CreateGameSessionQueue -// -// * DescribeGameSessionQueues -// -// * UpdateGameSessionQueue -// -// * DeleteGameSessionQueue -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeGameSessionQueues for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionQueues -func (c *GameLift) DescribeGameSessionQueues(input *DescribeGameSessionQueuesInput) (*DescribeGameSessionQueuesOutput, error) { - req, out := c.DescribeGameSessionQueuesRequest(input) - return out, req.Send() -} - -// DescribeGameSessionQueuesWithContext is the same as DescribeGameSessionQueues with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeGameSessionQueues for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeGameSessionQueuesWithContext(ctx aws.Context, input *DescribeGameSessionQueuesInput, opts ...request.Option) (*DescribeGameSessionQueuesOutput, error) { - req, out := c.DescribeGameSessionQueuesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeGameSessions = "DescribeGameSessions" - -// DescribeGameSessionsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeGameSessions operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeGameSessions for more information on using the DescribeGameSessions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeGameSessionsRequest method. -// req, resp := client.DescribeGameSessionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessions -func (c *GameLift) DescribeGameSessionsRequest(input *DescribeGameSessionsInput) (req *request.Request, output *DescribeGameSessionsOutput) { - op := &request.Operation{ - Name: opDescribeGameSessions, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeGameSessionsInput{} - } - - output = &DescribeGameSessionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeGameSessions API operation for Amazon GameLift. -// -// Retrieves a set of one or more game sessions. Request a specific game session -// or request all game sessions on a fleet. Alternatively, use SearchGameSessions -// to request a set of active game sessions that are filtered by certain criteria. -// To retrieve protection policy settings for game sessions, use DescribeGameSessionDetails. -// -// To get game sessions, specify one of the following: game session ID, fleet -// ID, or alias ID. You can filter this request by game session status. Use -// the pagination parameters to retrieve results as a set of sequential pages. -// If successful, a GameSession object is returned for each game session matching -// the request. -// -// Available in Amazon GameLift Local. -// -// Game-session-related operations include: -// -// * CreateGameSession -// -// * DescribeGameSessions -// -// * DescribeGameSessionDetails -// -// * SearchGameSessions -// -// * UpdateGameSession -// -// * GetGameSessionLogUrl -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeGameSessions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeTerminalRoutingStrategyException "TerminalRoutingStrategyException" -// The service is unable to resolve the routing for a particular alias because -// it has a terminal RoutingStrategy associated with it. The message returned -// in this exception is the message defined in the routing strategy itself. -// Such requests should only be retried if the routing strategy for the specified -// alias is modified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessions -func (c *GameLift) DescribeGameSessions(input *DescribeGameSessionsInput) (*DescribeGameSessionsOutput, error) { - req, out := c.DescribeGameSessionsRequest(input) - return out, req.Send() -} - -// DescribeGameSessionsWithContext is the same as DescribeGameSessions with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeGameSessions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeGameSessionsWithContext(ctx aws.Context, input *DescribeGameSessionsInput, opts ...request.Option) (*DescribeGameSessionsOutput, error) { - req, out := c.DescribeGameSessionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeInstances = "DescribeInstances" - -// DescribeInstancesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeInstances operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeInstances for more information on using the DescribeInstances -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeInstancesRequest method. -// req, resp := client.DescribeInstancesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeInstances -func (c *GameLift) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) { - op := &request.Operation{ - Name: opDescribeInstances, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeInstancesInput{} - } - - output = &DescribeInstancesOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeInstances API operation for Amazon GameLift. -// -// Retrieves information about a fleet's instances, including instance IDs. -// Use this action to get details on all instances in the fleet or get details -// on one specific instance. -// -// To get a specific instance, specify fleet ID and instance ID. To get all -// instances in a fleet, specify a fleet ID only. Use the pagination parameters -// to retrieve results as a set of sequential pages. If successful, an Instance -// object is returned for each result. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeInstances for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeInstances -func (c *GameLift) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) { - req, out := c.DescribeInstancesRequest(input) - return out, req.Send() -} - -// DescribeInstancesWithContext is the same as DescribeInstances with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeInstances for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeInstancesWithContext(ctx aws.Context, input *DescribeInstancesInput, opts ...request.Option) (*DescribeInstancesOutput, error) { - req, out := c.DescribeInstancesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeMatchmaking = "DescribeMatchmaking" - -// DescribeMatchmakingRequest generates a "aws/request.Request" representing the -// client's request for the DescribeMatchmaking operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeMatchmaking for more information on using the DescribeMatchmaking -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeMatchmakingRequest method. -// req, resp := client.DescribeMatchmakingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmaking -func (c *GameLift) DescribeMatchmakingRequest(input *DescribeMatchmakingInput) (req *request.Request, output *DescribeMatchmakingOutput) { - op := &request.Operation{ - Name: opDescribeMatchmaking, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeMatchmakingInput{} - } - - output = &DescribeMatchmakingOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeMatchmaking API operation for Amazon GameLift. -// -// Retrieves a set of one or more matchmaking tickets. Use this operation to -// retrieve ticket information, including status and--once a successful match -// is made--acquire connection information for the resulting new game session. -// -// You can use this operation to track the progress of matchmaking requests -// (through polling) as an alternative to using event notifications. See more -// details on tracking matchmaking requests through polling or notifications -// in StartMatchmaking. -// -// You can request data for a one or a list of ticket IDs. If the request is -// successful, a ticket object is returned for each requested ID. When specifying -// a list of ticket IDs, objects are returned only for tickets that currently -// exist. -// -// Matchmaking-related operations include: -// -// * StartMatchmaking -// -// * DescribeMatchmaking -// -// * StopMatchmaking -// -// * AcceptMatch -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeMatchmaking for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" -// The requested operation is not supported in the region specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmaking -func (c *GameLift) DescribeMatchmaking(input *DescribeMatchmakingInput) (*DescribeMatchmakingOutput, error) { - req, out := c.DescribeMatchmakingRequest(input) - return out, req.Send() -} - -// DescribeMatchmakingWithContext is the same as DescribeMatchmaking with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeMatchmaking for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeMatchmakingWithContext(ctx aws.Context, input *DescribeMatchmakingInput, opts ...request.Option) (*DescribeMatchmakingOutput, error) { - req, out := c.DescribeMatchmakingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeMatchmakingConfigurations = "DescribeMatchmakingConfigurations" - -// DescribeMatchmakingConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeMatchmakingConfigurations operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeMatchmakingConfigurations for more information on using the DescribeMatchmakingConfigurations -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeMatchmakingConfigurationsRequest method. -// req, resp := client.DescribeMatchmakingConfigurationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingConfigurations -func (c *GameLift) DescribeMatchmakingConfigurationsRequest(input *DescribeMatchmakingConfigurationsInput) (req *request.Request, output *DescribeMatchmakingConfigurationsOutput) { - op := &request.Operation{ - Name: opDescribeMatchmakingConfigurations, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeMatchmakingConfigurationsInput{} - } - - output = &DescribeMatchmakingConfigurationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeMatchmakingConfigurations API operation for Amazon GameLift. -// -// Retrieves the details of FlexMatch matchmaking configurations. with this -// operation, you have the following options: (1) retrieve all existing configurations, -// (2) provide the names of one or more configurations to retrieve, or (3) retrieve -// all configurations that use a specified rule set name. When requesting multiple -// items, use the pagination parameters to retrieve results as a set of sequential -// pages. If successful, a configuration is returned for each requested name. -// When specifying a list of names, only configurations that currently exist -// are returned. -// -// Operations related to match configurations and rule sets include: -// -// * CreateMatchmakingConfiguration -// -// * DescribeMatchmakingConfigurations -// -// * UpdateMatchmakingConfiguration -// -// * DeleteMatchmakingConfiguration -// -// * CreateMatchmakingRuleSet -// -// * DescribeMatchmakingRuleSets -// -// * ValidateMatchmakingRuleSet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeMatchmakingConfigurations for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" -// The requested operation is not supported in the region specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingConfigurations -func (c *GameLift) DescribeMatchmakingConfigurations(input *DescribeMatchmakingConfigurationsInput) (*DescribeMatchmakingConfigurationsOutput, error) { - req, out := c.DescribeMatchmakingConfigurationsRequest(input) - return out, req.Send() -} - -// DescribeMatchmakingConfigurationsWithContext is the same as DescribeMatchmakingConfigurations with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeMatchmakingConfigurations for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeMatchmakingConfigurationsWithContext(ctx aws.Context, input *DescribeMatchmakingConfigurationsInput, opts ...request.Option) (*DescribeMatchmakingConfigurationsOutput, error) { - req, out := c.DescribeMatchmakingConfigurationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeMatchmakingRuleSets = "DescribeMatchmakingRuleSets" - -// DescribeMatchmakingRuleSetsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeMatchmakingRuleSets operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeMatchmakingRuleSets for more information on using the DescribeMatchmakingRuleSets -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeMatchmakingRuleSetsRequest method. -// req, resp := client.DescribeMatchmakingRuleSetsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingRuleSets -func (c *GameLift) DescribeMatchmakingRuleSetsRequest(input *DescribeMatchmakingRuleSetsInput) (req *request.Request, output *DescribeMatchmakingRuleSetsOutput) { - op := &request.Operation{ - Name: opDescribeMatchmakingRuleSets, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeMatchmakingRuleSetsInput{} - } - - output = &DescribeMatchmakingRuleSetsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeMatchmakingRuleSets API operation for Amazon GameLift. -// -// Retrieves the details for FlexMatch matchmaking rule sets. You can request -// all existing rule sets for the region, or provide a list of one or more rule -// set names. When requesting multiple items, use the pagination parameters -// to retrieve results as a set of sequential pages. If successful, a rule set -// is returned for each requested name. -// -// Operations related to match configurations and rule sets include: -// -// * CreateMatchmakingConfiguration -// -// * DescribeMatchmakingConfigurations -// -// * UpdateMatchmakingConfiguration -// -// * DeleteMatchmakingConfiguration -// -// * CreateMatchmakingRuleSet -// -// * DescribeMatchmakingRuleSets -// -// * ValidateMatchmakingRuleSet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeMatchmakingRuleSets for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" -// The requested operation is not supported in the region specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingRuleSets -func (c *GameLift) DescribeMatchmakingRuleSets(input *DescribeMatchmakingRuleSetsInput) (*DescribeMatchmakingRuleSetsOutput, error) { - req, out := c.DescribeMatchmakingRuleSetsRequest(input) - return out, req.Send() -} - -// DescribeMatchmakingRuleSetsWithContext is the same as DescribeMatchmakingRuleSets with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeMatchmakingRuleSets for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeMatchmakingRuleSetsWithContext(ctx aws.Context, input *DescribeMatchmakingRuleSetsInput, opts ...request.Option) (*DescribeMatchmakingRuleSetsOutput, error) { - req, out := c.DescribeMatchmakingRuleSetsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribePlayerSessions = "DescribePlayerSessions" - -// DescribePlayerSessionsRequest generates a "aws/request.Request" representing the -// client's request for the DescribePlayerSessions operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribePlayerSessions for more information on using the DescribePlayerSessions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribePlayerSessionsRequest method. -// req, resp := client.DescribePlayerSessionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribePlayerSessions -func (c *GameLift) DescribePlayerSessionsRequest(input *DescribePlayerSessionsInput) (req *request.Request, output *DescribePlayerSessionsOutput) { - op := &request.Operation{ - Name: opDescribePlayerSessions, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribePlayerSessionsInput{} - } - - output = &DescribePlayerSessionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribePlayerSessions API operation for Amazon GameLift. -// -// Retrieves properties for one or more player sessions. This action can be -// used in several ways: (1) provide a PlayerSessionId to request properties -// for a specific player session; (2) provide a GameSessionId to request properties -// for all player sessions in the specified game session; (3) provide a PlayerId -// to request properties for all player sessions of a specified player. -// -// To get game session record(s), specify only one of the following: a player -// session ID, a game session ID, or a player ID. You can filter this request -// by player session status. Use the pagination parameters to retrieve results -// as a set of sequential pages. If successful, a PlayerSession object is returned -// for each session matching the request. -// -// Available in Amazon GameLift Local. -// -// Player-session-related operations include: -// -// * CreatePlayerSession -// -// * CreatePlayerSessions -// -// * DescribePlayerSessions -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribePlayerSessions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribePlayerSessions -func (c *GameLift) DescribePlayerSessions(input *DescribePlayerSessionsInput) (*DescribePlayerSessionsOutput, error) { - req, out := c.DescribePlayerSessionsRequest(input) - return out, req.Send() -} - -// DescribePlayerSessionsWithContext is the same as DescribePlayerSessions with the addition of -// the ability to pass a context and additional request options. -// -// See DescribePlayerSessions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribePlayerSessionsWithContext(ctx aws.Context, input *DescribePlayerSessionsInput, opts ...request.Option) (*DescribePlayerSessionsOutput, error) { - req, out := c.DescribePlayerSessionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeRuntimeConfiguration = "DescribeRuntimeConfiguration" - -// DescribeRuntimeConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the DescribeRuntimeConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeRuntimeConfiguration for more information on using the DescribeRuntimeConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeRuntimeConfigurationRequest method. -// req, resp := client.DescribeRuntimeConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeRuntimeConfiguration -func (c *GameLift) DescribeRuntimeConfigurationRequest(input *DescribeRuntimeConfigurationInput) (req *request.Request, output *DescribeRuntimeConfigurationOutput) { - op := &request.Operation{ - Name: opDescribeRuntimeConfiguration, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeRuntimeConfigurationInput{} - } - - output = &DescribeRuntimeConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeRuntimeConfiguration API operation for Amazon GameLift. -// -// Retrieves the current run-time configuration for the specified fleet. The -// run-time configuration tells Amazon GameLift how to launch server processes -// on instances in the fleet. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeRuntimeConfiguration for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeRuntimeConfiguration -func (c *GameLift) DescribeRuntimeConfiguration(input *DescribeRuntimeConfigurationInput) (*DescribeRuntimeConfigurationOutput, error) { - req, out := c.DescribeRuntimeConfigurationRequest(input) - return out, req.Send() -} - -// DescribeRuntimeConfigurationWithContext is the same as DescribeRuntimeConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeRuntimeConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeRuntimeConfigurationWithContext(ctx aws.Context, input *DescribeRuntimeConfigurationInput, opts ...request.Option) (*DescribeRuntimeConfigurationOutput, error) { - req, out := c.DescribeRuntimeConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeScalingPolicies = "DescribeScalingPolicies" - -// DescribeScalingPoliciesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeScalingPolicies operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeScalingPolicies for more information on using the DescribeScalingPolicies -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeScalingPoliciesRequest method. -// req, resp := client.DescribeScalingPoliciesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeScalingPolicies -func (c *GameLift) DescribeScalingPoliciesRequest(input *DescribeScalingPoliciesInput) (req *request.Request, output *DescribeScalingPoliciesOutput) { - op := &request.Operation{ - Name: opDescribeScalingPolicies, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeScalingPoliciesInput{} - } - - output = &DescribeScalingPoliciesOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeScalingPolicies API operation for Amazon GameLift. -// -// Retrieves all scaling policies applied to a fleet. -// -// To get a fleet's scaling policies, specify the fleet ID. You can filter this -// request by policy status, such as to retrieve only active scaling policies. -// Use the pagination parameters to retrieve results as a set of sequential -// pages. If successful, set of ScalingPolicy objects is returned for the fleet. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeScalingPolicies for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeScalingPolicies -func (c *GameLift) DescribeScalingPolicies(input *DescribeScalingPoliciesInput) (*DescribeScalingPoliciesOutput, error) { - req, out := c.DescribeScalingPoliciesRequest(input) - return out, req.Send() -} - -// DescribeScalingPoliciesWithContext is the same as DescribeScalingPolicies with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeScalingPolicies for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeScalingPoliciesWithContext(ctx aws.Context, input *DescribeScalingPoliciesInput, opts ...request.Option) (*DescribeScalingPoliciesOutput, error) { - req, out := c.DescribeScalingPoliciesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeVpcPeeringAuthorizations = "DescribeVpcPeeringAuthorizations" - -// DescribeVpcPeeringAuthorizationsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeVpcPeeringAuthorizations operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeVpcPeeringAuthorizations for more information on using the DescribeVpcPeeringAuthorizations -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeVpcPeeringAuthorizationsRequest method. -// req, resp := client.DescribeVpcPeeringAuthorizationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringAuthorizations -func (c *GameLift) DescribeVpcPeeringAuthorizationsRequest(input *DescribeVpcPeeringAuthorizationsInput) (req *request.Request, output *DescribeVpcPeeringAuthorizationsOutput) { - op := &request.Operation{ - Name: opDescribeVpcPeeringAuthorizations, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeVpcPeeringAuthorizationsInput{} - } - - output = &DescribeVpcPeeringAuthorizationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeVpcPeeringAuthorizations API operation for Amazon GameLift. -// -// Retrieves valid VPC peering authorizations that are pending for the AWS account. -// This operation returns all VPC peering authorizations and requests for peering. -// This includes those initiated and received by this account. -// -// VPC peering connection operations include: -// -// * CreateVpcPeeringAuthorization -// -// * DescribeVpcPeeringAuthorizations -// -// * DeleteVpcPeeringAuthorization -// -// * CreateVpcPeeringConnection -// -// * DescribeVpcPeeringConnections -// -// * DeleteVpcPeeringConnection -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeVpcPeeringAuthorizations for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringAuthorizations -func (c *GameLift) DescribeVpcPeeringAuthorizations(input *DescribeVpcPeeringAuthorizationsInput) (*DescribeVpcPeeringAuthorizationsOutput, error) { - req, out := c.DescribeVpcPeeringAuthorizationsRequest(input) - return out, req.Send() -} - -// DescribeVpcPeeringAuthorizationsWithContext is the same as DescribeVpcPeeringAuthorizations with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeVpcPeeringAuthorizations for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeVpcPeeringAuthorizationsWithContext(ctx aws.Context, input *DescribeVpcPeeringAuthorizationsInput, opts ...request.Option) (*DescribeVpcPeeringAuthorizationsOutput, error) { - req, out := c.DescribeVpcPeeringAuthorizationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeVpcPeeringConnections = "DescribeVpcPeeringConnections" - -// DescribeVpcPeeringConnectionsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeVpcPeeringConnections operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeVpcPeeringConnections for more information on using the DescribeVpcPeeringConnections -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeVpcPeeringConnectionsRequest method. -// req, resp := client.DescribeVpcPeeringConnectionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringConnections -func (c *GameLift) DescribeVpcPeeringConnectionsRequest(input *DescribeVpcPeeringConnectionsInput) (req *request.Request, output *DescribeVpcPeeringConnectionsOutput) { - op := &request.Operation{ - Name: opDescribeVpcPeeringConnections, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeVpcPeeringConnectionsInput{} - } - - output = &DescribeVpcPeeringConnectionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeVpcPeeringConnections API operation for Amazon GameLift. -// -// Retrieves information on VPC peering connections. Use this operation to get -// peering information for all fleets or for one specific fleet ID. -// -// To retrieve connection information, call this operation from the AWS account -// that is used to manage the Amazon GameLift fleets. Specify a fleet ID or -// leave the parameter empty to retrieve all connection records. If successful, -// the retrieved information includes both active and pending connections. Active -// connections identify the IpV4 CIDR block that the VPC uses to connect. -// -// VPC peering connection operations include: -// -// * CreateVpcPeeringAuthorization -// -// * DescribeVpcPeeringAuthorizations -// -// * DeleteVpcPeeringAuthorization -// -// * CreateVpcPeeringConnection -// -// * DescribeVpcPeeringConnections -// -// * DeleteVpcPeeringConnection -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation DescribeVpcPeeringConnections for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringConnections -func (c *GameLift) DescribeVpcPeeringConnections(input *DescribeVpcPeeringConnectionsInput) (*DescribeVpcPeeringConnectionsOutput, error) { - req, out := c.DescribeVpcPeeringConnectionsRequest(input) - return out, req.Send() -} - -// DescribeVpcPeeringConnectionsWithContext is the same as DescribeVpcPeeringConnections with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeVpcPeeringConnections for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) DescribeVpcPeeringConnectionsWithContext(ctx aws.Context, input *DescribeVpcPeeringConnectionsInput, opts ...request.Option) (*DescribeVpcPeeringConnectionsOutput, error) { - req, out := c.DescribeVpcPeeringConnectionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetGameSessionLogUrl = "GetGameSessionLogUrl" - -// GetGameSessionLogUrlRequest generates a "aws/request.Request" representing the -// client's request for the GetGameSessionLogUrl operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetGameSessionLogUrl for more information on using the GetGameSessionLogUrl -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetGameSessionLogUrlRequest method. -// req, resp := client.GetGameSessionLogUrlRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetGameSessionLogUrl -func (c *GameLift) GetGameSessionLogUrlRequest(input *GetGameSessionLogUrlInput) (req *request.Request, output *GetGameSessionLogUrlOutput) { - op := &request.Operation{ - Name: opGetGameSessionLogUrl, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetGameSessionLogUrlInput{} - } - - output = &GetGameSessionLogUrlOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetGameSessionLogUrl API operation for Amazon GameLift. -// -// Retrieves the location of stored game session logs for a specified game session. -// When a game session is terminated, Amazon GameLift automatically stores the -// logs in Amazon S3 and retains them for 14 days. Use this URL to download -// the logs. -// -// See the AWS Service Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_gamelift) -// page for maximum log file sizes. Log files that exceed this limit are not -// saved. -// -// Game-session-related operations include: -// -// * CreateGameSession -// -// * DescribeGameSessions -// -// * DescribeGameSessionDetails -// -// * SearchGameSessions -// -// * UpdateGameSession -// -// * GetGameSessionLogUrl -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation GetGameSessionLogUrl for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetGameSessionLogUrl -func (c *GameLift) GetGameSessionLogUrl(input *GetGameSessionLogUrlInput) (*GetGameSessionLogUrlOutput, error) { - req, out := c.GetGameSessionLogUrlRequest(input) - return out, req.Send() -} - -// GetGameSessionLogUrlWithContext is the same as GetGameSessionLogUrl with the addition of -// the ability to pass a context and additional request options. -// -// See GetGameSessionLogUrl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) GetGameSessionLogUrlWithContext(ctx aws.Context, input *GetGameSessionLogUrlInput, opts ...request.Option) (*GetGameSessionLogUrlOutput, error) { - req, out := c.GetGameSessionLogUrlRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetInstanceAccess = "GetInstanceAccess" - -// GetInstanceAccessRequest generates a "aws/request.Request" representing the -// client's request for the GetInstanceAccess operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetInstanceAccess for more information on using the GetInstanceAccess -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetInstanceAccessRequest method. -// req, resp := client.GetInstanceAccessRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetInstanceAccess -func (c *GameLift) GetInstanceAccessRequest(input *GetInstanceAccessInput) (req *request.Request, output *GetInstanceAccessOutput) { - op := &request.Operation{ - Name: opGetInstanceAccess, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetInstanceAccessInput{} - } - - output = &GetInstanceAccessOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetInstanceAccess API operation for Amazon GameLift. -// -// Requests remote access to a fleet instance. Remote access is useful for debugging, -// gathering benchmarking data, or watching activity in real time. -// -// Access requires credentials that match the operating system of the instance. -// For a Windows instance, Amazon GameLift returns a user name and password -// as strings for use with a Windows Remote Desktop client. For a Linux instance, -// Amazon GameLift returns a user name and RSA private key, also as strings, -// for use with an SSH client. The private key must be saved in the proper format -// to a .pem file before using. If you're making this request using the AWS -// CLI, saving the secret can be handled as part of the GetInstanceAccess request. -// (See the example later in this topic). For more information on remote access, -// see Remotely Accessing an Instance (http://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-remote-access.html). -// -// To request access to a specific instance, specify the IDs of the instance -// and the fleet it belongs to. If successful, an InstanceAccess object is returned -// containing the instance's IP address and a set of credentials. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation GetInstanceAccess for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetInstanceAccess -func (c *GameLift) GetInstanceAccess(input *GetInstanceAccessInput) (*GetInstanceAccessOutput, error) { - req, out := c.GetInstanceAccessRequest(input) - return out, req.Send() -} - -// GetInstanceAccessWithContext is the same as GetInstanceAccess with the addition of -// the ability to pass a context and additional request options. -// -// See GetInstanceAccess for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) GetInstanceAccessWithContext(ctx aws.Context, input *GetInstanceAccessInput, opts ...request.Option) (*GetInstanceAccessOutput, error) { - req, out := c.GetInstanceAccessRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListAliases = "ListAliases" - -// ListAliasesRequest generates a "aws/request.Request" representing the -// client's request for the ListAliases operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListAliases for more information on using the ListAliases -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListAliasesRequest method. -// req, resp := client.ListAliasesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListAliases -func (c *GameLift) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, output *ListAliasesOutput) { - op := &request.Operation{ - Name: opListAliases, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListAliasesInput{} - } - - output = &ListAliasesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListAliases API operation for Amazon GameLift. -// -// Retrieves all aliases for this AWS account. You can filter the result set -// by alias name and/or routing strategy type. Use the pagination parameters -// to retrieve results in sequential pages. -// -// Returned aliases are not listed in any particular order. -// -// Alias-related operations include: -// -// * CreateAlias -// -// * ListAliases -// -// * DescribeAlias -// -// * UpdateAlias -// -// * DeleteAlias -// -// * ResolveAlias -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation ListAliases for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListAliases -func (c *GameLift) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { - req, out := c.ListAliasesRequest(input) - return out, req.Send() -} - -// ListAliasesWithContext is the same as ListAliases with the addition of -// the ability to pass a context and additional request options. -// -// See ListAliases for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) ListAliasesWithContext(ctx aws.Context, input *ListAliasesInput, opts ...request.Option) (*ListAliasesOutput, error) { - req, out := c.ListAliasesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBuilds = "ListBuilds" - -// ListBuildsRequest generates a "aws/request.Request" representing the -// client's request for the ListBuilds operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListBuilds for more information on using the ListBuilds -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListBuildsRequest method. -// req, resp := client.ListBuildsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListBuilds -func (c *GameLift) ListBuildsRequest(input *ListBuildsInput) (req *request.Request, output *ListBuildsOutput) { - op := &request.Operation{ - Name: opListBuilds, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListBuildsInput{} - } - - output = &ListBuildsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListBuilds API operation for Amazon GameLift. -// -// Retrieves build records for all builds associated with the AWS account in -// use. You can limit results to builds that are in a specific status by using -// the Status parameter. Use the pagination parameters to retrieve results in -// a set of sequential pages. -// -// Build records are not listed in any particular order. -// -// Build-related operations include: -// -// * CreateBuild -// -// * ListBuilds -// -// * DescribeBuild -// -// * UpdateBuild -// -// * DeleteBuild -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation ListBuilds for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListBuilds -func (c *GameLift) ListBuilds(input *ListBuildsInput) (*ListBuildsOutput, error) { - req, out := c.ListBuildsRequest(input) - return out, req.Send() -} - -// ListBuildsWithContext is the same as ListBuilds with the addition of -// the ability to pass a context and additional request options. -// -// See ListBuilds for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) ListBuildsWithContext(ctx aws.Context, input *ListBuildsInput, opts ...request.Option) (*ListBuildsOutput, error) { - req, out := c.ListBuildsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListFleets = "ListFleets" - -// ListFleetsRequest generates a "aws/request.Request" representing the -// client's request for the ListFleets operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListFleets for more information on using the ListFleets -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListFleetsRequest method. -// req, resp := client.ListFleetsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListFleets -func (c *GameLift) ListFleetsRequest(input *ListFleetsInput) (req *request.Request, output *ListFleetsOutput) { - op := &request.Operation{ - Name: opListFleets, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListFleetsInput{} - } - - output = &ListFleetsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListFleets API operation for Amazon GameLift. -// -// Retrieves a collection of fleet records for this AWS account. You can filter -// the result set by build ID. Use the pagination parameters to retrieve results -// in sequential pages. -// -// Fleet records are not listed in any particular order. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation ListFleets for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListFleets -func (c *GameLift) ListFleets(input *ListFleetsInput) (*ListFleetsOutput, error) { - req, out := c.ListFleetsRequest(input) - return out, req.Send() -} - -// ListFleetsWithContext is the same as ListFleets with the addition of -// the ability to pass a context and additional request options. -// -// See ListFleets for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) ListFleetsWithContext(ctx aws.Context, input *ListFleetsInput, opts ...request.Option) (*ListFleetsOutput, error) { - req, out := c.ListFleetsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutScalingPolicy = "PutScalingPolicy" - -// PutScalingPolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutScalingPolicy operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutScalingPolicy for more information on using the PutScalingPolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutScalingPolicyRequest method. -// req, resp := client.PutScalingPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PutScalingPolicy -func (c *GameLift) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req *request.Request, output *PutScalingPolicyOutput) { - op := &request.Operation{ - Name: opPutScalingPolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutScalingPolicyInput{} - } - - output = &PutScalingPolicyOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutScalingPolicy API operation for Amazon GameLift. -// -// Creates or updates a scaling policy for a fleet. An active scaling policy -// prompts Amazon GameLift to track a certain metric for a fleet and automatically -// change the fleet's capacity in specific circumstances. Each scaling policy -// contains one rule statement. Fleets can have multiple scaling policies in -// force simultaneously. -// -// A scaling policy rule statement has the following structure: -// -// If [MetricName] is [ComparisonOperator][Threshold] for [EvaluationPeriods] -// minutes, then [ScalingAdjustmentType] to/by [ScalingAdjustment]. -// -// For example, this policy: "If the number of idle instances exceeds 20 for -// more than 15 minutes, then reduce the fleet capacity by 10 instances" could -// be implemented as the following rule statement: -// -// If [IdleInstances] is [GreaterThanOrEqualToThreshold] [20] for [15] minutes, -// then [ChangeInCapacity] by [-10]. -// -// To create or update a scaling policy, specify a unique combination of name -// and fleet ID, and set the rule values. All parameters for this action are -// required. If successful, the policy name is returned. Scaling policies cannot -// be suspended or made inactive. To stop enforcing a scaling policy, call DeleteScalingPolicy. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation PutScalingPolicy for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PutScalingPolicy -func (c *GameLift) PutScalingPolicy(input *PutScalingPolicyInput) (*PutScalingPolicyOutput, error) { - req, out := c.PutScalingPolicyRequest(input) - return out, req.Send() -} - -// PutScalingPolicyWithContext is the same as PutScalingPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See PutScalingPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) PutScalingPolicyWithContext(ctx aws.Context, input *PutScalingPolicyInput, opts ...request.Option) (*PutScalingPolicyOutput, error) { - req, out := c.PutScalingPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRequestUploadCredentials = "RequestUploadCredentials" - -// RequestUploadCredentialsRequest generates a "aws/request.Request" representing the -// client's request for the RequestUploadCredentials operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RequestUploadCredentials for more information on using the RequestUploadCredentials -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RequestUploadCredentialsRequest method. -// req, resp := client.RequestUploadCredentialsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/RequestUploadCredentials -func (c *GameLift) RequestUploadCredentialsRequest(input *RequestUploadCredentialsInput) (req *request.Request, output *RequestUploadCredentialsOutput) { - op := &request.Operation{ - Name: opRequestUploadCredentials, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RequestUploadCredentialsInput{} - } - - output = &RequestUploadCredentialsOutput{} - req = c.newRequest(op, input, output) - return -} - -// RequestUploadCredentials API operation for Amazon GameLift. -// -// This API call is not currently in use. Retrieves a fresh set of upload credentials -// and the assigned Amazon S3 storage location for a specific build. Valid credentials -// are required to upload your game build files to Amazon S3. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation RequestUploadCredentials for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/RequestUploadCredentials -func (c *GameLift) RequestUploadCredentials(input *RequestUploadCredentialsInput) (*RequestUploadCredentialsOutput, error) { - req, out := c.RequestUploadCredentialsRequest(input) - return out, req.Send() -} - -// RequestUploadCredentialsWithContext is the same as RequestUploadCredentials with the addition of -// the ability to pass a context and additional request options. -// -// See RequestUploadCredentials for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) RequestUploadCredentialsWithContext(ctx aws.Context, input *RequestUploadCredentialsInput, opts ...request.Option) (*RequestUploadCredentialsOutput, error) { - req, out := c.RequestUploadCredentialsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opResolveAlias = "ResolveAlias" - -// ResolveAliasRequest generates a "aws/request.Request" representing the -// client's request for the ResolveAlias operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ResolveAlias for more information on using the ResolveAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ResolveAliasRequest method. -// req, resp := client.ResolveAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ResolveAlias -func (c *GameLift) ResolveAliasRequest(input *ResolveAliasInput) (req *request.Request, output *ResolveAliasOutput) { - op := &request.Operation{ - Name: opResolveAlias, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ResolveAliasInput{} - } - - output = &ResolveAliasOutput{} - req = c.newRequest(op, input, output) - return -} - -// ResolveAlias API operation for Amazon GameLift. -// -// Retrieves the fleet ID that a specified alias is currently pointing to. -// -// Alias-related operations include: -// -// * CreateAlias -// -// * ListAliases -// -// * DescribeAlias -// -// * UpdateAlias -// -// * DeleteAlias -// -// * ResolveAlias -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation ResolveAlias for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeTerminalRoutingStrategyException "TerminalRoutingStrategyException" -// The service is unable to resolve the routing for a particular alias because -// it has a terminal RoutingStrategy associated with it. The message returned -// in this exception is the message defined in the routing strategy itself. -// Such requests should only be retried if the routing strategy for the specified -// alias is modified. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ResolveAlias -func (c *GameLift) ResolveAlias(input *ResolveAliasInput) (*ResolveAliasOutput, error) { - req, out := c.ResolveAliasRequest(input) - return out, req.Send() -} - -// ResolveAliasWithContext is the same as ResolveAlias with the addition of -// the ability to pass a context and additional request options. -// -// See ResolveAlias for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) ResolveAliasWithContext(ctx aws.Context, input *ResolveAliasInput, opts ...request.Option) (*ResolveAliasOutput, error) { - req, out := c.ResolveAliasRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSearchGameSessions = "SearchGameSessions" - -// SearchGameSessionsRequest generates a "aws/request.Request" representing the -// client's request for the SearchGameSessions operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See SearchGameSessions for more information on using the SearchGameSessions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SearchGameSessionsRequest method. -// req, resp := client.SearchGameSessionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/SearchGameSessions -func (c *GameLift) SearchGameSessionsRequest(input *SearchGameSessionsInput) (req *request.Request, output *SearchGameSessionsOutput) { - op := &request.Operation{ - Name: opSearchGameSessions, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &SearchGameSessionsInput{} - } - - output = &SearchGameSessionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// SearchGameSessions API operation for Amazon GameLift. -// -// Retrieves a set of game sessions that match a set of search criteria and -// sorts them in a specified order. A game session search is limited to a single -// fleet. Search results include only game sessions that are in ACTIVE status. -// If you need to retrieve game sessions with a status other than active, use -// DescribeGameSessions. If you need to retrieve the protection policy for each -// game session, use DescribeGameSessionDetails. -// -// You can search or sort by the following game session attributes: -// -// * gameSessionId -- Unique identifier for the game session. You can use -// either a GameSessionId or GameSessionArn value. -// -// * gameSessionName -- Name assigned to a game session. This value is set -// when requesting a new game session with CreateGameSession or updating -// with UpdateGameSession. Game session names do not need to be unique to -// a game session. -// -// * creationTimeMillis -- Value indicating when a game session was created. -// It is expressed in Unix time as milliseconds. -// -// * playerSessionCount -- Number of players currently connected to a game -// session. This value changes rapidly as players join the session or drop -// out. -// -// * maximumSessions -- Maximum number of player sessions allowed for a game -// session. This value is set when requesting a new game session with CreateGameSession -// or updating with UpdateGameSession. -// -// * hasAvailablePlayerSessions -- Boolean value indicating whether a game -// session has reached its maximum number of players. When searching with -// this attribute, the search value must be true or false. It is highly recommended -// that all search requests include this filter attribute to optimize search -// performance and return only sessions that players can join. -// -// To search or sort, specify either a fleet ID or an alias ID, and provide -// a search filter expression, a sort expression, or both. Use the pagination -// parameters to retrieve results as a set of sequential pages. If successful, -// a collection of GameSession objects matching the request is returned. -// -// Returned values for playerSessionCount and hasAvailablePlayerSessions change -// quickly as players join sessions and others drop out. Results should be considered -// a snapshot in time. Be sure to refresh search results often, and handle sessions -// that fill up before a player can join. -// -// Game-session-related operations include: -// -// * CreateGameSession -// -// * DescribeGameSessions -// -// * DescribeGameSessionDetails -// -// * SearchGameSessions -// -// * UpdateGameSession -// -// * GetGameSessionLogUrl -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation SearchGameSessions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeTerminalRoutingStrategyException "TerminalRoutingStrategyException" -// The service is unable to resolve the routing for a particular alias because -// it has a terminal RoutingStrategy associated with it. The message returned -// in this exception is the message defined in the routing strategy itself. -// Such requests should only be retried if the routing strategy for the specified -// alias is modified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/SearchGameSessions -func (c *GameLift) SearchGameSessions(input *SearchGameSessionsInput) (*SearchGameSessionsOutput, error) { - req, out := c.SearchGameSessionsRequest(input) - return out, req.Send() -} - -// SearchGameSessionsWithContext is the same as SearchGameSessions with the addition of -// the ability to pass a context and additional request options. -// -// See SearchGameSessions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) SearchGameSessionsWithContext(ctx aws.Context, input *SearchGameSessionsInput, opts ...request.Option) (*SearchGameSessionsOutput, error) { - req, out := c.SearchGameSessionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStartGameSessionPlacement = "StartGameSessionPlacement" - -// StartGameSessionPlacementRequest generates a "aws/request.Request" representing the -// client's request for the StartGameSessionPlacement operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StartGameSessionPlacement for more information on using the StartGameSessionPlacement -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StartGameSessionPlacementRequest method. -// req, resp := client.StartGameSessionPlacementRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartGameSessionPlacement -func (c *GameLift) StartGameSessionPlacementRequest(input *StartGameSessionPlacementInput) (req *request.Request, output *StartGameSessionPlacementOutput) { - op := &request.Operation{ - Name: opStartGameSessionPlacement, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StartGameSessionPlacementInput{} - } - - output = &StartGameSessionPlacementOutput{} - req = c.newRequest(op, input, output) - return -} - -// StartGameSessionPlacement API operation for Amazon GameLift. -// -// Places a request for a new game session in a queue (see CreateGameSessionQueue). -// When processing a placement request, Amazon GameLift searches for available -// resources on the queue's destinations, scanning each until it finds resources -// or the placement request times out. -// -// A game session placement request can also request player sessions. When a -// new game session is successfully created, Amazon GameLift creates a player -// session for each player included in the request. -// -// When placing a game session, by default Amazon GameLift tries each fleet -// in the order they are listed in the queue configuration. Ideally, a queue's -// destinations are listed in preference order. -// -// Alternatively, when requesting a game session with players, you can also -// provide latency data for each player in relevant regions. Latency data indicates -// the performance lag a player experiences when connected to a fleet in the -// region. Amazon GameLift uses latency data to reorder the list of destinations -// to place the game session in a region with minimal lag. If latency data is -// provided for multiple players, Amazon GameLift calculates each region's average -// lag for all players and reorders to get the best game play across all players. -// -// To place a new game session request, specify the following: -// -// * The queue name and a set of game session properties and settings -// -// * A unique ID (such as a UUID) for the placement. You use this ID to track -// the status of the placement request -// -// * (Optional) A set of IDs and player data for each player you want to -// join to the new game session -// -// * Latency data for all players (if you want to optimize game play for -// the players) -// -// If successful, a new game session placement is created. -// -// To track the status of a placement request, call DescribeGameSessionPlacement -// and check the request's status. If the status is FULFILLED, a new game session -// has been created and a game session ARN and region are referenced. If the -// placement request times out, you can resubmit the request or retry it with -// a different queue. -// -// Game-session-related operations include: -// -// * CreateGameSession -// -// * DescribeGameSessions -// -// * DescribeGameSessionDetails -// -// * SearchGameSessions -// -// * UpdateGameSession -// -// * GetGameSessionLogUrl -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation StartGameSessionPlacement for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartGameSessionPlacement -func (c *GameLift) StartGameSessionPlacement(input *StartGameSessionPlacementInput) (*StartGameSessionPlacementOutput, error) { - req, out := c.StartGameSessionPlacementRequest(input) - return out, req.Send() -} - -// StartGameSessionPlacementWithContext is the same as StartGameSessionPlacement with the addition of -// the ability to pass a context and additional request options. -// -// See StartGameSessionPlacement for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) StartGameSessionPlacementWithContext(ctx aws.Context, input *StartGameSessionPlacementInput, opts ...request.Option) (*StartGameSessionPlacementOutput, error) { - req, out := c.StartGameSessionPlacementRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStartMatchmaking = "StartMatchmaking" - -// StartMatchmakingRequest generates a "aws/request.Request" representing the -// client's request for the StartMatchmaking operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StartMatchmaking for more information on using the StartMatchmaking -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StartMatchmakingRequest method. -// req, resp := client.StartMatchmakingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartMatchmaking -func (c *GameLift) StartMatchmakingRequest(input *StartMatchmakingInput) (req *request.Request, output *StartMatchmakingOutput) { - op := &request.Operation{ - Name: opStartMatchmaking, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StartMatchmakingInput{} - } - - output = &StartMatchmakingOutput{} - req = c.newRequest(op, input, output) - return -} - -// StartMatchmaking API operation for Amazon GameLift. -// -// Uses FlexMatch to create a game match for a group of players based on custom -// matchmaking rules, and starts a new game for the matched players. Each matchmaking -// request specifies the type of match to build (team configuration, rules for -// an acceptable match, etc.). The request also specifies the players to find -// a match for and where to host the new game session for optimal performance. -// A matchmaking request might start with a single player or a group of players -// who want to play together. FlexMatch finds additional players as needed to -// fill the match. Match type, rules, and the queue used to place a new game -// session are defined in a MatchmakingConfiguration. For complete information -// on setting up and using FlexMatch, see the topic Adding FlexMatch to Your -// Game (http://docs.aws.amazon.com/gamelift/latest/developerguide/match-intro.html). -// -// To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, -// and include the players to be matched. You must also include a set of player -// attributes relevant for the matchmaking configuration. If successful, a matchmaking -// ticket is returned with status set to QUEUED. Track the status of the ticket -// to respond as needed and acquire game session connection information for -// successfully completed matches. -// -// Tracking ticket status -- A couple of options are available for tracking -// the status of matchmaking requests: -// -// * Polling -- Call DescribeMatchmaking. This operation returns the full -// ticket object, including current status and (for completed tickets) game -// session connection info. We recommend polling no more than once every -// 10 seconds. -// -// * Notifications -- Get event notifications for changes in ticket status -// using Amazon Simple Notification Service (SNS). Notifications are easy -// to set up (see CreateMatchmakingConfiguration) and typically deliver match -// status changes faster and more efficiently than polling. We recommend -// that you use polling to back up to notifications (since delivery is not -// guaranteed) and call DescribeMatchmaking only when notifications are not -// received within 30 seconds. -// -// Processing a matchmaking request -- FlexMatch handles a matchmaking request -// as follows: -// -// Your client code submits a StartMatchmaking request for one or more players -// and tracks the status of the request ticket. -// -// FlexMatch uses this ticket and others in process to build an acceptable match. -// When a potential match is identified, all tickets in the proposed match are -// advanced to the next status. -// -// If the match requires player acceptance (set in the matchmaking configuration), -// the tickets move into status REQUIRES_ACCEPTANCE. This status triggers your -// client code to solicit acceptance from all players in every ticket involved -// in the match, and then call AcceptMatch for each player. If any player rejects -// or fails to accept the match before a specified timeout, the proposed match -// is dropped (see AcceptMatch for more details). -// -// Once a match is proposed and accepted, the matchmaking tickets move into -// status PLACING. FlexMatch locates resources for a new game session using -// the game session queue (set in the matchmaking configuration) and creates -// the game session based on the match data. -// -// When the match is successfully placed, the matchmaking tickets move into -// COMPLETED status. Connection information (including game session endpoint -// and player session) is added to the matchmaking tickets. Matched players -// can use the connection information to join the game. -// -// Matchmaking-related operations include: -// -// * StartMatchmaking -// -// * DescribeMatchmaking -// -// * StopMatchmaking -// -// * AcceptMatch -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation StartMatchmaking for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" -// The requested operation is not supported in the region specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartMatchmaking -func (c *GameLift) StartMatchmaking(input *StartMatchmakingInput) (*StartMatchmakingOutput, error) { - req, out := c.StartMatchmakingRequest(input) - return out, req.Send() -} - -// StartMatchmakingWithContext is the same as StartMatchmaking with the addition of -// the ability to pass a context and additional request options. -// -// See StartMatchmaking for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) StartMatchmakingWithContext(ctx aws.Context, input *StartMatchmakingInput, opts ...request.Option) (*StartMatchmakingOutput, error) { - req, out := c.StartMatchmakingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStopGameSessionPlacement = "StopGameSessionPlacement" - -// StopGameSessionPlacementRequest generates a "aws/request.Request" representing the -// client's request for the StopGameSessionPlacement operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StopGameSessionPlacement for more information on using the StopGameSessionPlacement -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StopGameSessionPlacementRequest method. -// req, resp := client.StopGameSessionPlacementRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopGameSessionPlacement -func (c *GameLift) StopGameSessionPlacementRequest(input *StopGameSessionPlacementInput) (req *request.Request, output *StopGameSessionPlacementOutput) { - op := &request.Operation{ - Name: opStopGameSessionPlacement, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StopGameSessionPlacementInput{} - } - - output = &StopGameSessionPlacementOutput{} - req = c.newRequest(op, input, output) - return -} - -// StopGameSessionPlacement API operation for Amazon GameLift. -// -// Cancels a game session placement that is in PENDING status. To stop a placement, -// provide the placement ID values. If successful, the placement is moved to -// CANCELLED status. -// -// Game-session-related operations include: -// -// * CreateGameSession -// -// * DescribeGameSessions -// -// * DescribeGameSessionDetails -// -// * SearchGameSessions -// -// * UpdateGameSession -// -// * GetGameSessionLogUrl -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation StopGameSessionPlacement for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopGameSessionPlacement -func (c *GameLift) StopGameSessionPlacement(input *StopGameSessionPlacementInput) (*StopGameSessionPlacementOutput, error) { - req, out := c.StopGameSessionPlacementRequest(input) - return out, req.Send() -} - -// StopGameSessionPlacementWithContext is the same as StopGameSessionPlacement with the addition of -// the ability to pass a context and additional request options. -// -// See StopGameSessionPlacement for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) StopGameSessionPlacementWithContext(ctx aws.Context, input *StopGameSessionPlacementInput, opts ...request.Option) (*StopGameSessionPlacementOutput, error) { - req, out := c.StopGameSessionPlacementRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStopMatchmaking = "StopMatchmaking" - -// StopMatchmakingRequest generates a "aws/request.Request" representing the -// client's request for the StopMatchmaking operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StopMatchmaking for more information on using the StopMatchmaking -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StopMatchmakingRequest method. -// req, resp := client.StopMatchmakingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopMatchmaking -func (c *GameLift) StopMatchmakingRequest(input *StopMatchmakingInput) (req *request.Request, output *StopMatchmakingOutput) { - op := &request.Operation{ - Name: opStopMatchmaking, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StopMatchmakingInput{} - } - - output = &StopMatchmakingOutput{} - req = c.newRequest(op, input, output) - return -} - -// StopMatchmaking API operation for Amazon GameLift. -// -// Cancels a matchmaking ticket that is currently being processed. To stop the -// matchmaking operation, specify the ticket ID. If successful, work on the -// ticket is stopped, and the ticket status is changed to CANCELLED. -// -// Matchmaking-related operations include: -// -// * StartMatchmaking -// -// * DescribeMatchmaking -// -// * StopMatchmaking -// -// * AcceptMatch -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation StopMatchmaking for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" -// The requested operation is not supported in the region specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopMatchmaking -func (c *GameLift) StopMatchmaking(input *StopMatchmakingInput) (*StopMatchmakingOutput, error) { - req, out := c.StopMatchmakingRequest(input) - return out, req.Send() -} - -// StopMatchmakingWithContext is the same as StopMatchmaking with the addition of -// the ability to pass a context and additional request options. -// -// See StopMatchmaking for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) StopMatchmakingWithContext(ctx aws.Context, input *StopMatchmakingInput, opts ...request.Option) (*StopMatchmakingOutput, error) { - req, out := c.StopMatchmakingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateAlias = "UpdateAlias" - -// UpdateAliasRequest generates a "aws/request.Request" representing the -// client's request for the UpdateAlias operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateAlias for more information on using the UpdateAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateAliasRequest method. -// req, resp := client.UpdateAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateAlias -func (c *GameLift) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, output *UpdateAliasOutput) { - op := &request.Operation{ - Name: opUpdateAlias, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateAliasInput{} - } - - output = &UpdateAliasOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateAlias API operation for Amazon GameLift. -// -// Updates properties for an alias. To update properties, specify the alias -// ID to be updated and provide the information to be changed. To reassign an -// alias to another fleet, provide an updated routing strategy. If successful, -// the updated alias record is returned. -// -// Alias-related operations include: -// -// * CreateAlias -// -// * ListAliases -// -// * DescribeAlias -// -// * UpdateAlias -// -// * DeleteAlias -// -// * ResolveAlias -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation UpdateAlias for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateAlias -func (c *GameLift) UpdateAlias(input *UpdateAliasInput) (*UpdateAliasOutput, error) { - req, out := c.UpdateAliasRequest(input) - return out, req.Send() -} - -// UpdateAliasWithContext is the same as UpdateAlias with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateAlias for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) UpdateAliasWithContext(ctx aws.Context, input *UpdateAliasInput, opts ...request.Option) (*UpdateAliasOutput, error) { - req, out := c.UpdateAliasRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateBuild = "UpdateBuild" - -// UpdateBuildRequest generates a "aws/request.Request" representing the -// client's request for the UpdateBuild operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateBuild for more information on using the UpdateBuild -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateBuildRequest method. -// req, resp := client.UpdateBuildRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateBuild -func (c *GameLift) UpdateBuildRequest(input *UpdateBuildInput) (req *request.Request, output *UpdateBuildOutput) { - op := &request.Operation{ - Name: opUpdateBuild, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateBuildInput{} - } - - output = &UpdateBuildOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateBuild API operation for Amazon GameLift. -// -// Updates metadata in a build record, including the build name and version. -// To update the metadata, specify the build ID to update and provide the new -// values. If successful, a build object containing the updated metadata is -// returned. -// -// Build-related operations include: -// -// * CreateBuild -// -// * ListBuilds -// -// * DescribeBuild -// -// * UpdateBuild -// -// * DeleteBuild -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation UpdateBuild for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateBuild -func (c *GameLift) UpdateBuild(input *UpdateBuildInput) (*UpdateBuildOutput, error) { - req, out := c.UpdateBuildRequest(input) - return out, req.Send() -} - -// UpdateBuildWithContext is the same as UpdateBuild with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateBuild for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) UpdateBuildWithContext(ctx aws.Context, input *UpdateBuildInput, opts ...request.Option) (*UpdateBuildOutput, error) { - req, out := c.UpdateBuildRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateFleetAttributes = "UpdateFleetAttributes" - -// UpdateFleetAttributesRequest generates a "aws/request.Request" representing the -// client's request for the UpdateFleetAttributes operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateFleetAttributes for more information on using the UpdateFleetAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateFleetAttributesRequest method. -// req, resp := client.UpdateFleetAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetAttributes -func (c *GameLift) UpdateFleetAttributesRequest(input *UpdateFleetAttributesInput) (req *request.Request, output *UpdateFleetAttributesOutput) { - op := &request.Operation{ - Name: opUpdateFleetAttributes, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateFleetAttributesInput{} - } - - output = &UpdateFleetAttributesOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateFleetAttributes API operation for Amazon GameLift. -// -// Updates fleet properties, including name and description, for a fleet. To -// update metadata, specify the fleet ID and the property values that you want -// to change. If successful, the fleet ID for the updated fleet is returned. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation UpdateFleetAttributes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeConflictException "ConflictException" -// The requested operation would cause a conflict with the current state of -// a service resource associated with the request. Resolve the conflict before -// retrying this request. -// -// * ErrCodeInvalidFleetStatusException "InvalidFleetStatusException" -// The requested operation would cause a conflict with the current state of -// a resource associated with the request and/or the fleet. Resolve the conflict -// before retrying. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The requested operation would cause the resource to exceed the allowed service -// limit. Resolve the issue before retrying. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetAttributes -func (c *GameLift) UpdateFleetAttributes(input *UpdateFleetAttributesInput) (*UpdateFleetAttributesOutput, error) { - req, out := c.UpdateFleetAttributesRequest(input) - return out, req.Send() -} - -// UpdateFleetAttributesWithContext is the same as UpdateFleetAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateFleetAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) UpdateFleetAttributesWithContext(ctx aws.Context, input *UpdateFleetAttributesInput, opts ...request.Option) (*UpdateFleetAttributesOutput, error) { - req, out := c.UpdateFleetAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateFleetCapacity = "UpdateFleetCapacity" - -// UpdateFleetCapacityRequest generates a "aws/request.Request" representing the -// client's request for the UpdateFleetCapacity operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateFleetCapacity for more information on using the UpdateFleetCapacity -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateFleetCapacityRequest method. -// req, resp := client.UpdateFleetCapacityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetCapacity -func (c *GameLift) UpdateFleetCapacityRequest(input *UpdateFleetCapacityInput) (req *request.Request, output *UpdateFleetCapacityOutput) { - op := &request.Operation{ - Name: opUpdateFleetCapacity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateFleetCapacityInput{} - } - - output = &UpdateFleetCapacityOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateFleetCapacity API operation for Amazon GameLift. -// -// Updates capacity settings for a fleet. Use this action to specify the number -// of EC2 instances (hosts) that you want this fleet to contain. Before calling -// this action, you may want to call DescribeEC2InstanceLimits to get the maximum -// capacity based on the fleet's EC2 instance type. -// -// If you're using autoscaling (see PutScalingPolicy), you may want to specify -// a minimum and/or maximum capacity. If you don't provide these, autoscaling -// can set capacity anywhere between zero and the service limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_gamelift). -// -// To update fleet capacity, specify the fleet ID and the number of instances -// you want the fleet to host. If successful, Amazon GameLift starts or terminates -// instances so that the fleet's active instance count matches the desired instance -// count. You can view a fleet's current capacity information by calling DescribeFleetCapacity. -// If the desired instance count is higher than the instance type's limit, the -// "Limit Exceeded" exception occurs. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation UpdateFleetCapacity for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeConflictException "ConflictException" -// The requested operation would cause a conflict with the current state of -// a service resource associated with the request. Resolve the conflict before -// retrying this request. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The requested operation would cause the resource to exceed the allowed service -// limit. Resolve the issue before retrying. -// -// * ErrCodeInvalidFleetStatusException "InvalidFleetStatusException" -// The requested operation would cause a conflict with the current state of -// a resource associated with the request and/or the fleet. Resolve the conflict -// before retrying. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetCapacity -func (c *GameLift) UpdateFleetCapacity(input *UpdateFleetCapacityInput) (*UpdateFleetCapacityOutput, error) { - req, out := c.UpdateFleetCapacityRequest(input) - return out, req.Send() -} - -// UpdateFleetCapacityWithContext is the same as UpdateFleetCapacity with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateFleetCapacity for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) UpdateFleetCapacityWithContext(ctx aws.Context, input *UpdateFleetCapacityInput, opts ...request.Option) (*UpdateFleetCapacityOutput, error) { - req, out := c.UpdateFleetCapacityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateFleetPortSettings = "UpdateFleetPortSettings" - -// UpdateFleetPortSettingsRequest generates a "aws/request.Request" representing the -// client's request for the UpdateFleetPortSettings operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateFleetPortSettings for more information on using the UpdateFleetPortSettings -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateFleetPortSettingsRequest method. -// req, resp := client.UpdateFleetPortSettingsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetPortSettings -func (c *GameLift) UpdateFleetPortSettingsRequest(input *UpdateFleetPortSettingsInput) (req *request.Request, output *UpdateFleetPortSettingsOutput) { - op := &request.Operation{ - Name: opUpdateFleetPortSettings, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateFleetPortSettingsInput{} - } - - output = &UpdateFleetPortSettingsOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateFleetPortSettings API operation for Amazon GameLift. -// -// Updates port settings for a fleet. To update settings, specify the fleet -// ID to be updated and list the permissions you want to update. List the permissions -// you want to add in InboundPermissionAuthorizations, and permissions you want -// to remove in InboundPermissionRevocations. Permissions to be removed must -// match existing fleet permissions. If successful, the fleet ID for the updated -// fleet is returned. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation UpdateFleetPortSettings for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeConflictException "ConflictException" -// The requested operation would cause a conflict with the current state of -// a service resource associated with the request. Resolve the conflict before -// retrying this request. -// -// * ErrCodeInvalidFleetStatusException "InvalidFleetStatusException" -// The requested operation would cause a conflict with the current state of -// a resource associated with the request and/or the fleet. Resolve the conflict -// before retrying. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The requested operation would cause the resource to exceed the allowed service -// limit. Resolve the issue before retrying. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetPortSettings -func (c *GameLift) UpdateFleetPortSettings(input *UpdateFleetPortSettingsInput) (*UpdateFleetPortSettingsOutput, error) { - req, out := c.UpdateFleetPortSettingsRequest(input) - return out, req.Send() -} - -// UpdateFleetPortSettingsWithContext is the same as UpdateFleetPortSettings with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateFleetPortSettings for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) UpdateFleetPortSettingsWithContext(ctx aws.Context, input *UpdateFleetPortSettingsInput, opts ...request.Option) (*UpdateFleetPortSettingsOutput, error) { - req, out := c.UpdateFleetPortSettingsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateGameSession = "UpdateGameSession" - -// UpdateGameSessionRequest generates a "aws/request.Request" representing the -// client's request for the UpdateGameSession operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateGameSession for more information on using the UpdateGameSession -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateGameSessionRequest method. -// req, resp := client.UpdateGameSessionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSession -func (c *GameLift) UpdateGameSessionRequest(input *UpdateGameSessionInput) (req *request.Request, output *UpdateGameSessionOutput) { - op := &request.Operation{ - Name: opUpdateGameSession, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateGameSessionInput{} - } - - output = &UpdateGameSessionOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateGameSession API operation for Amazon GameLift. -// -// Updates game session properties. This includes the session name, maximum -// player count, protection policy, which controls whether or not an active -// game session can be terminated during a scale-down event, and the player -// session creation policy, which controls whether or not new players can join -// the session. To update a game session, specify the game session ID and the -// values you want to change. If successful, an updated GameSession object is -// returned. -// -// Game-session-related operations include: -// -// * CreateGameSession -// -// * DescribeGameSessions -// -// * DescribeGameSessionDetails -// -// * SearchGameSessions -// -// * UpdateGameSession -// -// * GetGameSessionLogUrl -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation UpdateGameSession for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeConflictException "ConflictException" -// The requested operation would cause a conflict with the current state of -// a service resource associated with the request. Resolve the conflict before -// retrying this request. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeInvalidGameSessionStatusException "InvalidGameSessionStatusException" -// The requested operation would cause a conflict with the current state of -// a resource associated with the request and/or the game instance. Resolve -// the conflict before retrying. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSession -func (c *GameLift) UpdateGameSession(input *UpdateGameSessionInput) (*UpdateGameSessionOutput, error) { - req, out := c.UpdateGameSessionRequest(input) - return out, req.Send() -} - -// UpdateGameSessionWithContext is the same as UpdateGameSession with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateGameSession for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) UpdateGameSessionWithContext(ctx aws.Context, input *UpdateGameSessionInput, opts ...request.Option) (*UpdateGameSessionOutput, error) { - req, out := c.UpdateGameSessionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateGameSessionQueue = "UpdateGameSessionQueue" - -// UpdateGameSessionQueueRequest generates a "aws/request.Request" representing the -// client's request for the UpdateGameSessionQueue operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateGameSessionQueue for more information on using the UpdateGameSessionQueue -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateGameSessionQueueRequest method. -// req, resp := client.UpdateGameSessionQueueRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSessionQueue -func (c *GameLift) UpdateGameSessionQueueRequest(input *UpdateGameSessionQueueInput) (req *request.Request, output *UpdateGameSessionQueueOutput) { - op := &request.Operation{ - Name: opUpdateGameSessionQueue, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateGameSessionQueueInput{} - } - - output = &UpdateGameSessionQueueOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateGameSessionQueue API operation for Amazon GameLift. -// -// Updates settings for a game session queue, which determines how new game -// session requests in the queue are processed. To update settings, specify -// the queue name to be updated and provide the new settings. When updating -// destinations, provide a complete list of destinations. -// -// Queue-related operations include: -// -// * CreateGameSessionQueue -// -// * DescribeGameSessionQueues -// -// * UpdateGameSessionQueue -// -// * DeleteGameSessionQueue -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation UpdateGameSessionQueue for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSessionQueue -func (c *GameLift) UpdateGameSessionQueue(input *UpdateGameSessionQueueInput) (*UpdateGameSessionQueueOutput, error) { - req, out := c.UpdateGameSessionQueueRequest(input) - return out, req.Send() -} - -// UpdateGameSessionQueueWithContext is the same as UpdateGameSessionQueue with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateGameSessionQueue for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) UpdateGameSessionQueueWithContext(ctx aws.Context, input *UpdateGameSessionQueueInput, opts ...request.Option) (*UpdateGameSessionQueueOutput, error) { - req, out := c.UpdateGameSessionQueueRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateMatchmakingConfiguration = "UpdateMatchmakingConfiguration" - -// UpdateMatchmakingConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the UpdateMatchmakingConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateMatchmakingConfiguration for more information on using the UpdateMatchmakingConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateMatchmakingConfigurationRequest method. -// req, resp := client.UpdateMatchmakingConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateMatchmakingConfiguration -func (c *GameLift) UpdateMatchmakingConfigurationRequest(input *UpdateMatchmakingConfigurationInput) (req *request.Request, output *UpdateMatchmakingConfigurationOutput) { - op := &request.Operation{ - Name: opUpdateMatchmakingConfiguration, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateMatchmakingConfigurationInput{} - } - - output = &UpdateMatchmakingConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateMatchmakingConfiguration API operation for Amazon GameLift. -// -// Updates settings for a FlexMatch matchmaking configuration. To update settings, -// specify the configuration name to be updated and provide the new settings. -// -// Operations related to match configurations and rule sets include: -// -// * CreateMatchmakingConfiguration -// -// * DescribeMatchmakingConfigurations -// -// * UpdateMatchmakingConfiguration -// -// * DeleteMatchmakingConfiguration -// -// * CreateMatchmakingRuleSet -// -// * DescribeMatchmakingRuleSets -// -// * ValidateMatchmakingRuleSet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation UpdateMatchmakingConfiguration for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" -// The requested operation is not supported in the region specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateMatchmakingConfiguration -func (c *GameLift) UpdateMatchmakingConfiguration(input *UpdateMatchmakingConfigurationInput) (*UpdateMatchmakingConfigurationOutput, error) { - req, out := c.UpdateMatchmakingConfigurationRequest(input) - return out, req.Send() -} - -// UpdateMatchmakingConfigurationWithContext is the same as UpdateMatchmakingConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateMatchmakingConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) UpdateMatchmakingConfigurationWithContext(ctx aws.Context, input *UpdateMatchmakingConfigurationInput, opts ...request.Option) (*UpdateMatchmakingConfigurationOutput, error) { - req, out := c.UpdateMatchmakingConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateRuntimeConfiguration = "UpdateRuntimeConfiguration" - -// UpdateRuntimeConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the UpdateRuntimeConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateRuntimeConfiguration for more information on using the UpdateRuntimeConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateRuntimeConfigurationRequest method. -// req, resp := client.UpdateRuntimeConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateRuntimeConfiguration -func (c *GameLift) UpdateRuntimeConfigurationRequest(input *UpdateRuntimeConfigurationInput) (req *request.Request, output *UpdateRuntimeConfigurationOutput) { - op := &request.Operation{ - Name: opUpdateRuntimeConfiguration, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateRuntimeConfigurationInput{} - } - - output = &UpdateRuntimeConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateRuntimeConfiguration API operation for Amazon GameLift. -// -// Updates the current run-time configuration for the specified fleet, which -// tells Amazon GameLift how to launch server processes on instances in the -// fleet. You can update a fleet's run-time configuration at any time after -// the fleet is created; it does not need to be in an ACTIVE status. -// -// To update run-time configuration, specify the fleet ID and provide a RuntimeConfiguration -// object with the updated collection of server process configurations. -// -// Each instance in a Amazon GameLift fleet checks regularly for an updated -// run-time configuration and changes how it launches server processes to comply -// with the latest version. Existing server processes are not affected by the -// update; they continue to run until they end, while Amazon GameLift simply -// adds new server processes to fit the current run-time configuration. As a -// result, the run-time configuration changes are applied gradually as existing -// processes shut down and new processes are launched in Amazon GameLift's normal -// process recycling activity. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation UpdateRuntimeConfiguration for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnauthorizedException "UnauthorizedException" -// The client failed authentication. Clients should not retry such requests. -// -// * ErrCodeNotFoundException "NotFoundException" -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// * ErrCodeInvalidFleetStatusException "InvalidFleetStatusException" -// The requested operation would cause a conflict with the current state of -// a resource associated with the request and/or the fleet. Resolve the conflict -// before retrying. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateRuntimeConfiguration -func (c *GameLift) UpdateRuntimeConfiguration(input *UpdateRuntimeConfigurationInput) (*UpdateRuntimeConfigurationOutput, error) { - req, out := c.UpdateRuntimeConfigurationRequest(input) - return out, req.Send() -} - -// UpdateRuntimeConfigurationWithContext is the same as UpdateRuntimeConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateRuntimeConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) UpdateRuntimeConfigurationWithContext(ctx aws.Context, input *UpdateRuntimeConfigurationInput, opts ...request.Option) (*UpdateRuntimeConfigurationOutput, error) { - req, out := c.UpdateRuntimeConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opValidateMatchmakingRuleSet = "ValidateMatchmakingRuleSet" - -// ValidateMatchmakingRuleSetRequest generates a "aws/request.Request" representing the -// client's request for the ValidateMatchmakingRuleSet operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ValidateMatchmakingRuleSet for more information on using the ValidateMatchmakingRuleSet -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ValidateMatchmakingRuleSetRequest method. -// req, resp := client.ValidateMatchmakingRuleSetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ValidateMatchmakingRuleSet -func (c *GameLift) ValidateMatchmakingRuleSetRequest(input *ValidateMatchmakingRuleSetInput) (req *request.Request, output *ValidateMatchmakingRuleSetOutput) { - op := &request.Operation{ - Name: opValidateMatchmakingRuleSet, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ValidateMatchmakingRuleSetInput{} - } - - output = &ValidateMatchmakingRuleSetOutput{} - req = c.newRequest(op, input, output) - return -} - -// ValidateMatchmakingRuleSet API operation for Amazon GameLift. -// -// Validates the syntax of a matchmaking rule or rule set. This operation checks -// that the rule set uses syntactically correct JSON and that it conforms to -// allowed property expressions. To validate syntax, provide a rule set string. -// -// Operations related to match configurations and rule sets include: -// -// * CreateMatchmakingConfiguration -// -// * DescribeMatchmakingConfigurations -// -// * UpdateMatchmakingConfiguration -// -// * DeleteMatchmakingConfiguration -// -// * CreateMatchmakingRuleSet -// -// * DescribeMatchmakingRuleSets -// -// * ValidateMatchmakingRuleSet -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon GameLift's -// API operation ValidateMatchmakingRuleSet for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * ErrCodeUnsupportedRegionException "UnsupportedRegionException" -// The requested operation is not supported in the region specified. -// -// * ErrCodeInvalidRequestException "InvalidRequestException" -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ValidateMatchmakingRuleSet -func (c *GameLift) ValidateMatchmakingRuleSet(input *ValidateMatchmakingRuleSetInput) (*ValidateMatchmakingRuleSetOutput, error) { - req, out := c.ValidateMatchmakingRuleSetRequest(input) - return out, req.Send() -} - -// ValidateMatchmakingRuleSetWithContext is the same as ValidateMatchmakingRuleSet with the addition of -// the ability to pass a context and additional request options. -// -// See ValidateMatchmakingRuleSet for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *GameLift) ValidateMatchmakingRuleSetWithContext(ctx aws.Context, input *ValidateMatchmakingRuleSetInput, opts ...request.Option) (*ValidateMatchmakingRuleSetOutput, error) { - req, out := c.ValidateMatchmakingRuleSetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/AcceptMatchInput -type AcceptMatchInput struct { - _ struct{} `type:"structure"` - - // Player response to the proposed match. - // - // AcceptanceType is a required field - AcceptanceType *string `type:"string" required:"true" enum:"AcceptanceType"` - - // Unique identifier for a player delivering the response. This parameter can - // include one or multiple player IDs. - // - // PlayerIds is a required field - PlayerIds []*string `type:"list" required:"true"` - - // Unique identifier for a matchmaking ticket. The ticket must be in status - // REQUIRES_ACCEPTANCE; otherwise this request will fail. - // - // TicketId is a required field - TicketId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s AcceptMatchInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AcceptMatchInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AcceptMatchInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AcceptMatchInput"} - if s.AcceptanceType == nil { - invalidParams.Add(request.NewErrParamRequired("AcceptanceType")) - } - if s.PlayerIds == nil { - invalidParams.Add(request.NewErrParamRequired("PlayerIds")) - } - if s.TicketId == nil { - invalidParams.Add(request.NewErrParamRequired("TicketId")) - } - if s.TicketId != nil && len(*s.TicketId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TicketId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAcceptanceType sets the AcceptanceType field's value. -func (s *AcceptMatchInput) SetAcceptanceType(v string) *AcceptMatchInput { - s.AcceptanceType = &v - return s -} - -// SetPlayerIds sets the PlayerIds field's value. -func (s *AcceptMatchInput) SetPlayerIds(v []*string) *AcceptMatchInput { - s.PlayerIds = v - return s -} - -// SetTicketId sets the TicketId field's value. -func (s *AcceptMatchInput) SetTicketId(v string) *AcceptMatchInput { - s.TicketId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/AcceptMatchOutput -type AcceptMatchOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s AcceptMatchOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AcceptMatchOutput) GoString() string { - return s.String() -} - -// Properties describing a fleet alias. -// -// Alias-related operations include: -// -// * CreateAlias -// -// * ListAliases -// -// * DescribeAlias -// -// * UpdateAlias -// -// * DeleteAlias -// -// * ResolveAlias -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/Alias -type Alias struct { - _ struct{} `type:"structure"` - - // Unique identifier for an alias; alias ARNs are unique across all regions. - AliasArn *string `min:"1" type:"string"` - - // Unique identifier for an alias; alias IDs are unique within a region. - AliasId *string `type:"string"` - - // Time stamp indicating when this data object was created. Format is a number - // expressed in Unix time as milliseconds (for example "1469498468.057"). - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Human-readable description of an alias. - Description *string `type:"string"` - - // Time stamp indicating when this data object was last modified. Format is - // a number expressed in Unix time as milliseconds (for example "1469498468.057"). - LastUpdatedTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Descriptive label that is associated with an alias. Alias names do not need - // to be unique. - Name *string `min:"1" type:"string"` - - // Alias configuration for the alias, including routing type and settings. - RoutingStrategy *RoutingStrategy `type:"structure"` -} - -// String returns the string representation -func (s Alias) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Alias) GoString() string { - return s.String() -} - -// SetAliasArn sets the AliasArn field's value. -func (s *Alias) SetAliasArn(v string) *Alias { - s.AliasArn = &v - return s -} - -// SetAliasId sets the AliasId field's value. -func (s *Alias) SetAliasId(v string) *Alias { - s.AliasId = &v - return s -} - -// SetCreationTime sets the CreationTime field's value. -func (s *Alias) SetCreationTime(v time.Time) *Alias { - s.CreationTime = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *Alias) SetDescription(v string) *Alias { - s.Description = &v - return s -} - -// SetLastUpdatedTime sets the LastUpdatedTime field's value. -func (s *Alias) SetLastUpdatedTime(v time.Time) *Alias { - s.LastUpdatedTime = &v - return s -} - -// SetName sets the Name field's value. -func (s *Alias) SetName(v string) *Alias { - s.Name = &v - return s -} - -// SetRoutingStrategy sets the RoutingStrategy field's value. -func (s *Alias) SetRoutingStrategy(v *RoutingStrategy) *Alias { - s.RoutingStrategy = v - return s -} - -// Values for use in Player attribute type:value pairs. This object lets you -// specify an attribute value using any of the valid data types: string, number, -// string array or data map. Each AttributeValue object can use only one of -// the available properties. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/AttributeValue -type AttributeValue struct { - _ struct{} `type:"structure"` - - // For number values, expressed as double. - N *float64 `type:"double"` - - // For single string values. Maximum string length is 100 characters. - S *string `min:"1" type:"string"` - - // For a map of up to 10 type:value pairs. Maximum length for each string value - // is 100 characters. - SDM map[string]*float64 `type:"map"` - - // For a list of up to 10 strings. Maximum length for each string is 100 characters. - // Duplicate values are not recognized; all occurrences of the repeated value - // after the first of a repeated value are ignored. - SL []*string `type:"list"` -} - -// String returns the string representation -func (s AttributeValue) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AttributeValue) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AttributeValue) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AttributeValue"} - if s.S != nil && len(*s.S) < 1 { - invalidParams.Add(request.NewErrParamMinLen("S", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetN sets the N field's value. -func (s *AttributeValue) SetN(v float64) *AttributeValue { - s.N = &v - return s -} - -// SetS sets the S field's value. -func (s *AttributeValue) SetS(v string) *AttributeValue { - s.S = &v - return s -} - -// SetSDM sets the SDM field's value. -func (s *AttributeValue) SetSDM(v map[string]*float64) *AttributeValue { - s.SDM = v - return s -} - -// SetSL sets the SL field's value. -func (s *AttributeValue) SetSL(v []*string) *AttributeValue { - s.SL = v - return s -} - -// Temporary access credentials used for uploading game build files to Amazon -// GameLift. They are valid for a limited time. If they expire before you upload -// your game build, get a new set by calling RequestUploadCredentials. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/AwsCredentials -type AwsCredentials struct { - _ struct{} `type:"structure"` - - // Temporary key allowing access to the Amazon GameLift S3 account. - AccessKeyId *string `min:"1" type:"string"` - - // Temporary secret key allowing access to the Amazon GameLift S3 account. - SecretAccessKey *string `min:"1" type:"string"` - - // Token used to associate a specific build ID with the files uploaded using - // these credentials. - SessionToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s AwsCredentials) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AwsCredentials) GoString() string { - return s.String() -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *AwsCredentials) SetAccessKeyId(v string) *AwsCredentials { - s.AccessKeyId = &v - return s -} - -// SetSecretAccessKey sets the SecretAccessKey field's value. -func (s *AwsCredentials) SetSecretAccessKey(v string) *AwsCredentials { - s.SecretAccessKey = &v - return s -} - -// SetSessionToken sets the SessionToken field's value. -func (s *AwsCredentials) SetSessionToken(v string) *AwsCredentials { - s.SessionToken = &v - return s -} - -// Properties describing a game build. -// -// Build-related operations include: -// -// * CreateBuild -// -// * ListBuilds -// -// * DescribeBuild -// -// * UpdateBuild -// -// * DeleteBuild -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/Build -type Build struct { - _ struct{} `type:"structure"` - - // Unique identifier for a build. - BuildId *string `type:"string"` - - // Time stamp indicating when this data object was created. Format is a number - // expressed in Unix time as milliseconds (for example "1469498468.057"). - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Descriptive label that is associated with a build. Build names do not need - // to be unique. It can be set using CreateBuild or UpdateBuild. - Name *string `type:"string"` - - // Operating system that the game server binaries are built to run on. This - // value determines the type of fleet resources that you can use for this build. - OperatingSystem *string `type:"string" enum:"OperatingSystem"` - - // File size of the uploaded game build, expressed in bytes. When the build - // status is INITIALIZED, this value is 0. - SizeOnDisk *int64 `min:"1" type:"long"` - - // Current status of the build. - // - // Possible build statuses include the following: - // - // * INITIALIZED -- A new build has been defined, but no files have been - // uploaded. You cannot create fleets for builds that are in this status. - // When a build is successfully created, the build status is set to this - // value. - // - // * READY -- The game build has been successfully uploaded. You can now - // create new fleets for this build. - // - // * FAILED -- The game build upload failed. You cannot create new fleets - // for this build. - Status *string `type:"string" enum:"BuildStatus"` - - // Version that is associated with this build. Version strings do not need to - // be unique. This value can be set using CreateBuild or UpdateBuild. - Version *string `type:"string"` -} - -// String returns the string representation -func (s Build) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Build) GoString() string { - return s.String() -} - -// SetBuildId sets the BuildId field's value. -func (s *Build) SetBuildId(v string) *Build { - s.BuildId = &v - return s -} - -// SetCreationTime sets the CreationTime field's value. -func (s *Build) SetCreationTime(v time.Time) *Build { - s.CreationTime = &v - return s -} - -// SetName sets the Name field's value. -func (s *Build) SetName(v string) *Build { - s.Name = &v - return s -} - -// SetOperatingSystem sets the OperatingSystem field's value. -func (s *Build) SetOperatingSystem(v string) *Build { - s.OperatingSystem = &v - return s -} - -// SetSizeOnDisk sets the SizeOnDisk field's value. -func (s *Build) SetSizeOnDisk(v int64) *Build { - s.SizeOnDisk = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *Build) SetStatus(v string) *Build { - s.Status = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *Build) SetVersion(v string) *Build { - s.Version = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateAliasInput -type CreateAliasInput struct { - _ struct{} `type:"structure"` - - // Human-readable description of an alias. - Description *string `min:"1" type:"string"` - - // Descriptive label that is associated with an alias. Alias names do not need - // to be unique. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Object that specifies the fleet and routing type to use for the alias. - // - // RoutingStrategy is a required field - RoutingStrategy *RoutingStrategy `type:"structure" required:"true"` -} - -// String returns the string representation -func (s CreateAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateAliasInput"} - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.RoutingStrategy == nil { - invalidParams.Add(request.NewErrParamRequired("RoutingStrategy")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *CreateAliasInput) SetDescription(v string) *CreateAliasInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateAliasInput) SetName(v string) *CreateAliasInput { - s.Name = &v - return s -} - -// SetRoutingStrategy sets the RoutingStrategy field's value. -func (s *CreateAliasInput) SetRoutingStrategy(v *RoutingStrategy) *CreateAliasInput { - s.RoutingStrategy = v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateAliasOutput -type CreateAliasOutput struct { - _ struct{} `type:"structure"` - - // Object that describes the newly created alias record. - Alias *Alias `type:"structure"` -} - -// String returns the string representation -func (s CreateAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateAliasOutput) GoString() string { - return s.String() -} - -// SetAlias sets the Alias field's value. -func (s *CreateAliasOutput) SetAlias(v *Alias) *CreateAliasOutput { - s.Alias = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateBuildInput -type CreateBuildInput struct { - _ struct{} `type:"structure"` - - // Descriptive label that is associated with a build. Build names do not need - // to be unique. You can use UpdateBuild to change this value later. - Name *string `min:"1" type:"string"` - - // Operating system that the game server binaries are built to run on. This - // value determines the type of fleet resources that you can use for this build. - // If your game build contains multiple executables, they all must run on the - // same operating system. - OperatingSystem *string `type:"string" enum:"OperatingSystem"` - - // Amazon S3 location of the game build files to be uploaded. The S3 bucket - // must be owned by the same AWS account that you're using to manage Amazon - // GameLift. It also must in the same region that you want to create a new build - // in. Before calling CreateBuild with this location, you must allow Amazon - // GameLift to access your Amazon S3 bucket (see Create a Build with Files in - // Amazon S3 (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build)). - StorageLocation *S3Location `type:"structure"` - - // Version that is associated with this build. Version strings do not need to - // be unique. You can use UpdateBuild to change this value later. - Version *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s CreateBuildInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBuildInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateBuildInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateBuildInput"} - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Version != nil && len(*s.Version) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Version", 1)) - } - if s.StorageLocation != nil { - if err := s.StorageLocation.Validate(); err != nil { - invalidParams.AddNested("StorageLocation", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *CreateBuildInput) SetName(v string) *CreateBuildInput { - s.Name = &v - return s -} - -// SetOperatingSystem sets the OperatingSystem field's value. -func (s *CreateBuildInput) SetOperatingSystem(v string) *CreateBuildInput { - s.OperatingSystem = &v - return s -} - -// SetStorageLocation sets the StorageLocation field's value. -func (s *CreateBuildInput) SetStorageLocation(v *S3Location) *CreateBuildInput { - s.StorageLocation = v - return s -} - -// SetVersion sets the Version field's value. -func (s *CreateBuildInput) SetVersion(v string) *CreateBuildInput { - s.Version = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateBuildOutput -type CreateBuildOutput struct { - _ struct{} `type:"structure"` - - // The newly created build record, including a unique build ID and status. - Build *Build `type:"structure"` - - // Amazon S3 location specified in the request. - StorageLocation *S3Location `type:"structure"` - - // This element is not currently in use. - UploadCredentials *AwsCredentials `type:"structure"` -} - -// String returns the string representation -func (s CreateBuildOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBuildOutput) GoString() string { - return s.String() -} - -// SetBuild sets the Build field's value. -func (s *CreateBuildOutput) SetBuild(v *Build) *CreateBuildOutput { - s.Build = v - return s -} - -// SetStorageLocation sets the StorageLocation field's value. -func (s *CreateBuildOutput) SetStorageLocation(v *S3Location) *CreateBuildOutput { - s.StorageLocation = v - return s -} - -// SetUploadCredentials sets the UploadCredentials field's value. -func (s *CreateBuildOutput) SetUploadCredentials(v *AwsCredentials) *CreateBuildOutput { - s.UploadCredentials = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateFleetInput -type CreateFleetInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a build to be deployed on the new fleet. The build - // must have been successfully uploaded to Amazon GameLift and be in a READY - // status. This fleet setting cannot be changed once the fleet is created. - // - // BuildId is a required field - BuildId *string `type:"string" required:"true"` - - // Human-readable description of a fleet. - Description *string `min:"1" type:"string"` - - // Range of IP addresses and port settings that permit inbound traffic to access - // server processes running on the fleet. If no inbound permissions are set, - // including both IP address range and port range, the server processes in the - // fleet cannot accept connections. You can specify one or more sets of permissions - // for a fleet. - EC2InboundPermissions []*IpPermission `type:"list"` - - // Name of an EC2 instance type that is supported in Amazon GameLift. A fleet - // instance type determines the computing resources of each instance in the - // fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift - // supports the following EC2 instance types. See Amazon EC2 Instance Types - // (http://aws.amazon.com/ec2/instance-types/) for detailed descriptions. - // - // EC2InstanceType is a required field - EC2InstanceType *string `type:"string" required:"true" enum:"EC2InstanceType"` - - // This parameter is no longer used. Instead, to specify where Amazon GameLift - // should store log files once a server process shuts down, use the Amazon GameLift - // server API ProcessReady() and specify one or more directory paths in logParameters. - // See more information in the Server API Reference (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api-ref.html#gamelift-sdk-server-api-ref-dataypes-process). - LogPaths []*string `type:"list"` - - // Names of metric groups to add this fleet to. Use an existing metric group - // name to add this fleet to the group. Or use a new name to create a new metric - // group. A fleet can only be included in one metric group at a time. - MetricGroups []*string `type:"list"` - - // Descriptive label that is associated with a fleet. Fleet names do not need - // to be unique. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Game session protection policy to apply to all instances in this fleet. If - // this parameter is not set, instances in this fleet default to no protection. - // You can change a fleet's protection policy using UpdateFleetAttributes, but - // this change will only affect sessions created after the policy change. You - // can also set protection for individual instances using UpdateGameSession. - // - // * NoProtection -- The game session can be terminated during a scale-down - // event. - // - // * FullProtection -- If the game session is in an ACTIVE status, it cannot - // be terminated during a scale-down event. - NewGameSessionProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` - - // Unique identifier for the AWS account with the VPC that you want to peer - // your Amazon GameLift fleet with. You can find your Account ID in the AWS - // Management Console under account settings. - PeerVpcAwsAccountId *string `min:"1" type:"string"` - - // Unique identifier for a VPC with resources to be accessed by your Amazon - // GameLift fleet. The VPC must be in the same region where your fleet is deployed. - // To get VPC information, including IDs, use the Virtual Private Cloud service - // tools, including the VPC Dashboard in the AWS Management Console. - PeerVpcId *string `min:"1" type:"string"` - - // Policy that limits the number of game sessions an individual player can create - // over a span of time for this fleet. - ResourceCreationLimitPolicy *ResourceCreationLimitPolicy `type:"structure"` - - // Instructions for launching server processes on each instance in the fleet. - // The run-time configuration for a fleet has a collection of server process - // configurations, one for each type of server process to run on an instance. - // A server process configuration specifies the location of the server executable, - // launch parameters, and the number of concurrent processes with that configuration - // to maintain on each instance. A CreateFleet request must include a run-time - // configuration with at least one server process configuration; otherwise the - // request fails with an invalid request exception. (This parameter replaces - // the parameters ServerLaunchPath and ServerLaunchParameters; requests that - // contain values for these parameters instead of a run-time configuration will - // continue to work.) - RuntimeConfiguration *RuntimeConfiguration `type:"structure"` - - // This parameter is no longer used. Instead, specify server launch parameters - // in the RuntimeConfiguration parameter. (Requests that specify a server launch - // path and launch parameters instead of a run-time configuration will continue - // to work.) - ServerLaunchParameters *string `min:"1" type:"string"` - - // This parameter is no longer used. Instead, specify a server launch path using - // the RuntimeConfiguration parameter. (Requests that specify a server launch - // path and launch parameters instead of a run-time configuration will continue - // to work.) - ServerLaunchPath *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s CreateFleetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateFleetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateFleetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateFleetInput"} - if s.BuildId == nil { - invalidParams.Add(request.NewErrParamRequired("BuildId")) - } - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.EC2InstanceType == nil { - invalidParams.Add(request.NewErrParamRequired("EC2InstanceType")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.PeerVpcAwsAccountId != nil && len(*s.PeerVpcAwsAccountId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PeerVpcAwsAccountId", 1)) - } - if s.PeerVpcId != nil && len(*s.PeerVpcId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PeerVpcId", 1)) - } - if s.ServerLaunchParameters != nil && len(*s.ServerLaunchParameters) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ServerLaunchParameters", 1)) - } - if s.ServerLaunchPath != nil && len(*s.ServerLaunchPath) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ServerLaunchPath", 1)) - } - if s.EC2InboundPermissions != nil { - for i, v := range s.EC2InboundPermissions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EC2InboundPermissions", i), err.(request.ErrInvalidParams)) - } - } - } - if s.RuntimeConfiguration != nil { - if err := s.RuntimeConfiguration.Validate(); err != nil { - invalidParams.AddNested("RuntimeConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBuildId sets the BuildId field's value. -func (s *CreateFleetInput) SetBuildId(v string) *CreateFleetInput { - s.BuildId = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateFleetInput) SetDescription(v string) *CreateFleetInput { - s.Description = &v - return s -} - -// SetEC2InboundPermissions sets the EC2InboundPermissions field's value. -func (s *CreateFleetInput) SetEC2InboundPermissions(v []*IpPermission) *CreateFleetInput { - s.EC2InboundPermissions = v - return s -} - -// SetEC2InstanceType sets the EC2InstanceType field's value. -func (s *CreateFleetInput) SetEC2InstanceType(v string) *CreateFleetInput { - s.EC2InstanceType = &v - return s -} - -// SetLogPaths sets the LogPaths field's value. -func (s *CreateFleetInput) SetLogPaths(v []*string) *CreateFleetInput { - s.LogPaths = v - return s -} - -// SetMetricGroups sets the MetricGroups field's value. -func (s *CreateFleetInput) SetMetricGroups(v []*string) *CreateFleetInput { - s.MetricGroups = v - return s -} - -// SetName sets the Name field's value. -func (s *CreateFleetInput) SetName(v string) *CreateFleetInput { - s.Name = &v - return s -} - -// SetNewGameSessionProtectionPolicy sets the NewGameSessionProtectionPolicy field's value. -func (s *CreateFleetInput) SetNewGameSessionProtectionPolicy(v string) *CreateFleetInput { - s.NewGameSessionProtectionPolicy = &v - return s -} - -// SetPeerVpcAwsAccountId sets the PeerVpcAwsAccountId field's value. -func (s *CreateFleetInput) SetPeerVpcAwsAccountId(v string) *CreateFleetInput { - s.PeerVpcAwsAccountId = &v - return s -} - -// SetPeerVpcId sets the PeerVpcId field's value. -func (s *CreateFleetInput) SetPeerVpcId(v string) *CreateFleetInput { - s.PeerVpcId = &v - return s -} - -// SetResourceCreationLimitPolicy sets the ResourceCreationLimitPolicy field's value. -func (s *CreateFleetInput) SetResourceCreationLimitPolicy(v *ResourceCreationLimitPolicy) *CreateFleetInput { - s.ResourceCreationLimitPolicy = v - return s -} - -// SetRuntimeConfiguration sets the RuntimeConfiguration field's value. -func (s *CreateFleetInput) SetRuntimeConfiguration(v *RuntimeConfiguration) *CreateFleetInput { - s.RuntimeConfiguration = v - return s -} - -// SetServerLaunchParameters sets the ServerLaunchParameters field's value. -func (s *CreateFleetInput) SetServerLaunchParameters(v string) *CreateFleetInput { - s.ServerLaunchParameters = &v - return s -} - -// SetServerLaunchPath sets the ServerLaunchPath field's value. -func (s *CreateFleetInput) SetServerLaunchPath(v string) *CreateFleetInput { - s.ServerLaunchPath = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateFleetOutput -type CreateFleetOutput struct { - _ struct{} `type:"structure"` - - // Properties for the newly created fleet. - FleetAttributes *FleetAttributes `type:"structure"` -} - -// String returns the string representation -func (s CreateFleetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateFleetOutput) GoString() string { - return s.String() -} - -// SetFleetAttributes sets the FleetAttributes field's value. -func (s *CreateFleetOutput) SetFleetAttributes(v *FleetAttributes) *CreateFleetOutput { - s.FleetAttributes = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSessionInput -type CreateGameSessionInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for an alias associated with the fleet to create a game - // session in. Each request must reference either a fleet ID or alias ID, but - // not both. - AliasId *string `type:"string"` - - // Unique identifier for a player or entity creating the game session. This - // ID is used to enforce a resource protection policy (if one exists) that limits - // the number of concurrent active game sessions one player can have. - CreatorId *string `min:"1" type:"string"` - - // Unique identifier for a fleet to create a game session in. Each request must - // reference either a fleet ID or alias ID, but not both. - FleetId *string `type:"string"` - - // Set of developer-defined properties for a game session, formatted as a set - // of type:value pairs. These properties are included in the GameSession object, - // which is passed to the game server with a request to start a new game session - // (see Start a Game Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). - GameProperties []*GameProperty `type:"list"` - - // Set of developer-defined game session properties, formatted as a single string - // value. This data is included in the GameSession object, which is passed to - // the game server with a request to start a new game session (see Start a Game - // Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). - GameSessionData *string `min:"1" type:"string"` - - // This parameter is no longer preferred. Please use IdempotencyToken instead. - // Custom string that uniquely identifies a request for a new game session. - // Maximum token length is 48 characters. If provided, this string is included - // in the new game session's ID. (A game session ARN has the following format: - // arn:aws:gamelift:::gamesession//.) - GameSessionId *string `min:"1" type:"string"` - - // Custom string that uniquely identifies a request for a new game session. - // Maximum token length is 48 characters. If provided, this string is included - // in the new game session's ID. (A game session ARN has the following format: - // arn:aws:gamelift:::gamesession//.) Idempotency tokens remain in use for 30 days after a game session - // has ended; game session objects are retained for this time period and then - // deleted. - IdempotencyToken *string `min:"1" type:"string"` - - // Maximum number of players that can be connected simultaneously to the game - // session. - // - // MaximumPlayerSessionCount is a required field - MaximumPlayerSessionCount *int64 `type:"integer" required:"true"` - - // Descriptive label that is associated with a game session. Session names do - // not need to be unique. - Name *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s CreateGameSessionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateGameSessionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateGameSessionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateGameSessionInput"} - if s.CreatorId != nil && len(*s.CreatorId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CreatorId", 1)) - } - if s.GameSessionData != nil && len(*s.GameSessionData) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameSessionData", 1)) - } - if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) - } - if s.IdempotencyToken != nil && len(*s.IdempotencyToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("IdempotencyToken", 1)) - } - if s.MaximumPlayerSessionCount == nil { - invalidParams.Add(request.NewErrParamRequired("MaximumPlayerSessionCount")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.GameProperties != nil { - for i, v := range s.GameProperties { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GameProperties", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAliasId sets the AliasId field's value. -func (s *CreateGameSessionInput) SetAliasId(v string) *CreateGameSessionInput { - s.AliasId = &v - return s -} - -// SetCreatorId sets the CreatorId field's value. -func (s *CreateGameSessionInput) SetCreatorId(v string) *CreateGameSessionInput { - s.CreatorId = &v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *CreateGameSessionInput) SetFleetId(v string) *CreateGameSessionInput { - s.FleetId = &v - return s -} - -// SetGameProperties sets the GameProperties field's value. -func (s *CreateGameSessionInput) SetGameProperties(v []*GameProperty) *CreateGameSessionInput { - s.GameProperties = v - return s -} - -// SetGameSessionData sets the GameSessionData field's value. -func (s *CreateGameSessionInput) SetGameSessionData(v string) *CreateGameSessionInput { - s.GameSessionData = &v - return s -} - -// SetGameSessionId sets the GameSessionId field's value. -func (s *CreateGameSessionInput) SetGameSessionId(v string) *CreateGameSessionInput { - s.GameSessionId = &v - return s -} - -// SetIdempotencyToken sets the IdempotencyToken field's value. -func (s *CreateGameSessionInput) SetIdempotencyToken(v string) *CreateGameSessionInput { - s.IdempotencyToken = &v - return s -} - -// SetMaximumPlayerSessionCount sets the MaximumPlayerSessionCount field's value. -func (s *CreateGameSessionInput) SetMaximumPlayerSessionCount(v int64) *CreateGameSessionInput { - s.MaximumPlayerSessionCount = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateGameSessionInput) SetName(v string) *CreateGameSessionInput { - s.Name = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSessionOutput -type CreateGameSessionOutput struct { - _ struct{} `type:"structure"` - - // Object that describes the newly created game session record. - GameSession *GameSession `type:"structure"` -} - -// String returns the string representation -func (s CreateGameSessionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateGameSessionOutput) GoString() string { - return s.String() -} - -// SetGameSession sets the GameSession field's value. -func (s *CreateGameSessionOutput) SetGameSession(v *GameSession) *CreateGameSessionOutput { - s.GameSession = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSessionQueueInput -type CreateGameSessionQueueInput struct { - _ struct{} `type:"structure"` - - // List of fleets that can be used to fulfill game session placement requests - // in the queue. Fleets are identified by either a fleet ARN or a fleet alias - // ARN. Destinations are listed in default preference order. - Destinations []*GameSessionQueueDestination `type:"list"` - - // Descriptive label that is associated with game session queue. Queue names - // must be unique within each region. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Collection of latency policies to apply when processing game sessions placement - // requests with player latency information. Multiple policies are evaluated - // in order of the maximum latency value, starting with the lowest latency values. - // With just one policy, it is enforced at the start of the game session placement - // for the duration period. With multiple policies, each policy is enforced - // consecutively for its duration period. For example, a queue might enforce - // a 60-second policy followed by a 120-second policy, and then no policy for - // the remainder of the placement. A player latency policy must set a value - // for MaximumIndividualPlayerLatencyMilliseconds; if none is set, this API - // requests will fail. - PlayerLatencyPolicies []*PlayerLatencyPolicy `type:"list"` - - // Maximum time, in seconds, that a new game session placement request remains - // in the queue. When a request exceeds this time, the game session placement - // changes to a TIMED_OUT status. - TimeoutInSeconds *int64 `type:"integer"` -} - -// String returns the string representation -func (s CreateGameSessionQueueInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateGameSessionQueueInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateGameSessionQueueInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateGameSessionQueueInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Destinations != nil { - for i, v := range s.Destinations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Destinations", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDestinations sets the Destinations field's value. -func (s *CreateGameSessionQueueInput) SetDestinations(v []*GameSessionQueueDestination) *CreateGameSessionQueueInput { - s.Destinations = v - return s -} - -// SetName sets the Name field's value. -func (s *CreateGameSessionQueueInput) SetName(v string) *CreateGameSessionQueueInput { - s.Name = &v - return s -} - -// SetPlayerLatencyPolicies sets the PlayerLatencyPolicies field's value. -func (s *CreateGameSessionQueueInput) SetPlayerLatencyPolicies(v []*PlayerLatencyPolicy) *CreateGameSessionQueueInput { - s.PlayerLatencyPolicies = v - return s -} - -// SetTimeoutInSeconds sets the TimeoutInSeconds field's value. -func (s *CreateGameSessionQueueInput) SetTimeoutInSeconds(v int64) *CreateGameSessionQueueInput { - s.TimeoutInSeconds = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameSessionQueueOutput -type CreateGameSessionQueueOutput struct { - _ struct{} `type:"structure"` - - // Object that describes the newly created game session queue. - GameSessionQueue *GameSessionQueue `type:"structure"` -} - -// String returns the string representation -func (s CreateGameSessionQueueOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateGameSessionQueueOutput) GoString() string { - return s.String() -} - -// SetGameSessionQueue sets the GameSessionQueue field's value. -func (s *CreateGameSessionQueueOutput) SetGameSessionQueue(v *GameSessionQueue) *CreateGameSessionQueueOutput { - s.GameSessionQueue = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingConfigurationInput -type CreateMatchmakingConfigurationInput struct { - _ struct{} `type:"structure"` - - // Flag that determines whether or not a match that was created with this configuration - // must be accepted by the matched players. To require acceptance, set to TRUE. - // - // AcceptanceRequired is a required field - AcceptanceRequired *bool `type:"boolean" required:"true"` - - // Length of time (in seconds) to wait for players to accept a proposed match. - // If any player rejects the match or fails to accept before the timeout, the - // ticket continues to look for an acceptable match. - AcceptanceTimeoutSeconds *int64 `min:"1" type:"integer"` - - // Number of player slots in a match to keep open for future players. For example, - // if the configuration's rule set specifies a match for a single 12-person - // team, and the additional player count is set to 2, only 10 players are selected - // for the match. - AdditionalPlayerCount *int64 `type:"integer"` - - // Information to attached to all events related to the matchmaking configuration. - CustomEventData *string `type:"string"` - - // Meaningful description of the matchmaking configuration. - Description *string `min:"1" type:"string"` - - // Set of developer-defined properties for a game session, formatted as a set - // of type:value pairs. These properties are included in the GameSession object, - // which is passed to the game server with a request to start a new game session - // (see Start a Game Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). - // This information is added to the new GameSession object that is created for - // a successful match. - GameProperties []*GameProperty `type:"list"` - - // Set of developer-defined game session properties, formatted as a single string - // value. This data is included in the GameSession object, which is passed to - // the game server with a request to start a new game session (see Start a Game - // Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). - // This information is added to the new GameSession object that is created for - // a successful match. - GameSessionData *string `min:"1" type:"string"` - - // Amazon Resource Name (ARN (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) - // that is assigned to a game session queue and uniquely identifies it. Format - // is arn:aws:gamelift:::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. - // These queues are used when placing game sessions for matches that are created - // with this matchmaking configuration. Queues can be located in any region. - // - // GameSessionQueueArns is a required field - GameSessionQueueArns []*string `type:"list" required:"true"` - - // Unique identifier for a matchmaking configuration. This name is used to identify - // the configuration associated with a matchmaking request or ticket. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // SNS topic ARN that is set up to receive matchmaking notifications. - NotificationTarget *string `type:"string"` - - // Maximum duration, in seconds, that a matchmaking ticket can remain in process - // before timing out. Requests that time out can be resubmitted as needed. - // - // RequestTimeoutSeconds is a required field - RequestTimeoutSeconds *int64 `min:"1" type:"integer" required:"true"` - - // Unique identifier for a matchmaking rule set to use with this configuration. - // A matchmaking configuration can only use rule sets that are defined in the - // same region. - // - // RuleSetName is a required field - RuleSetName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateMatchmakingConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateMatchmakingConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateMatchmakingConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateMatchmakingConfigurationInput"} - if s.AcceptanceRequired == nil { - invalidParams.Add(request.NewErrParamRequired("AcceptanceRequired")) - } - if s.AcceptanceTimeoutSeconds != nil && *s.AcceptanceTimeoutSeconds < 1 { - invalidParams.Add(request.NewErrParamMinValue("AcceptanceTimeoutSeconds", 1)) - } - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.GameSessionData != nil && len(*s.GameSessionData) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameSessionData", 1)) - } - if s.GameSessionQueueArns == nil { - invalidParams.Add(request.NewErrParamRequired("GameSessionQueueArns")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.RequestTimeoutSeconds == nil { - invalidParams.Add(request.NewErrParamRequired("RequestTimeoutSeconds")) - } - if s.RequestTimeoutSeconds != nil && *s.RequestTimeoutSeconds < 1 { - invalidParams.Add(request.NewErrParamMinValue("RequestTimeoutSeconds", 1)) - } - if s.RuleSetName == nil { - invalidParams.Add(request.NewErrParamRequired("RuleSetName")) - } - if s.RuleSetName != nil && len(*s.RuleSetName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleSetName", 1)) - } - if s.GameProperties != nil { - for i, v := range s.GameProperties { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GameProperties", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAcceptanceRequired sets the AcceptanceRequired field's value. -func (s *CreateMatchmakingConfigurationInput) SetAcceptanceRequired(v bool) *CreateMatchmakingConfigurationInput { - s.AcceptanceRequired = &v - return s -} - -// SetAcceptanceTimeoutSeconds sets the AcceptanceTimeoutSeconds field's value. -func (s *CreateMatchmakingConfigurationInput) SetAcceptanceTimeoutSeconds(v int64) *CreateMatchmakingConfigurationInput { - s.AcceptanceTimeoutSeconds = &v - return s -} - -// SetAdditionalPlayerCount sets the AdditionalPlayerCount field's value. -func (s *CreateMatchmakingConfigurationInput) SetAdditionalPlayerCount(v int64) *CreateMatchmakingConfigurationInput { - s.AdditionalPlayerCount = &v - return s -} - -// SetCustomEventData sets the CustomEventData field's value. -func (s *CreateMatchmakingConfigurationInput) SetCustomEventData(v string) *CreateMatchmakingConfigurationInput { - s.CustomEventData = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateMatchmakingConfigurationInput) SetDescription(v string) *CreateMatchmakingConfigurationInput { - s.Description = &v - return s -} - -// SetGameProperties sets the GameProperties field's value. -func (s *CreateMatchmakingConfigurationInput) SetGameProperties(v []*GameProperty) *CreateMatchmakingConfigurationInput { - s.GameProperties = v - return s -} - -// SetGameSessionData sets the GameSessionData field's value. -func (s *CreateMatchmakingConfigurationInput) SetGameSessionData(v string) *CreateMatchmakingConfigurationInput { - s.GameSessionData = &v - return s -} - -// SetGameSessionQueueArns sets the GameSessionQueueArns field's value. -func (s *CreateMatchmakingConfigurationInput) SetGameSessionQueueArns(v []*string) *CreateMatchmakingConfigurationInput { - s.GameSessionQueueArns = v - return s -} - -// SetName sets the Name field's value. -func (s *CreateMatchmakingConfigurationInput) SetName(v string) *CreateMatchmakingConfigurationInput { - s.Name = &v - return s -} - -// SetNotificationTarget sets the NotificationTarget field's value. -func (s *CreateMatchmakingConfigurationInput) SetNotificationTarget(v string) *CreateMatchmakingConfigurationInput { - s.NotificationTarget = &v - return s -} - -// SetRequestTimeoutSeconds sets the RequestTimeoutSeconds field's value. -func (s *CreateMatchmakingConfigurationInput) SetRequestTimeoutSeconds(v int64) *CreateMatchmakingConfigurationInput { - s.RequestTimeoutSeconds = &v - return s -} - -// SetRuleSetName sets the RuleSetName field's value. -func (s *CreateMatchmakingConfigurationInput) SetRuleSetName(v string) *CreateMatchmakingConfigurationInput { - s.RuleSetName = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingConfigurationOutput -type CreateMatchmakingConfigurationOutput struct { - _ struct{} `type:"structure"` - - // Object that describes the newly created matchmaking configuration. - Configuration *MatchmakingConfiguration `type:"structure"` -} - -// String returns the string representation -func (s CreateMatchmakingConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateMatchmakingConfigurationOutput) GoString() string { - return s.String() -} - -// SetConfiguration sets the Configuration field's value. -func (s *CreateMatchmakingConfigurationOutput) SetConfiguration(v *MatchmakingConfiguration) *CreateMatchmakingConfigurationOutput { - s.Configuration = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingRuleSetInput -type CreateMatchmakingRuleSetInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a matchmaking rule set. This name is used to identify - // the rule set associated with a matchmaking configuration. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Collection of matchmaking rules, formatted as a JSON string. (Note that comments - // are not allowed in JSON, but most elements support a description field.) - // - // RuleSetBody is a required field - RuleSetBody *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateMatchmakingRuleSetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateMatchmakingRuleSetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateMatchmakingRuleSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateMatchmakingRuleSetInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.RuleSetBody == nil { - invalidParams.Add(request.NewErrParamRequired("RuleSetBody")) - } - if s.RuleSetBody != nil && len(*s.RuleSetBody) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleSetBody", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *CreateMatchmakingRuleSetInput) SetName(v string) *CreateMatchmakingRuleSetInput { - s.Name = &v - return s -} - -// SetRuleSetBody sets the RuleSetBody field's value. -func (s *CreateMatchmakingRuleSetInput) SetRuleSetBody(v string) *CreateMatchmakingRuleSetInput { - s.RuleSetBody = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateMatchmakingRuleSetOutput -type CreateMatchmakingRuleSetOutput struct { - _ struct{} `type:"structure"` - - // Object that describes the newly created matchmaking rule set. - // - // RuleSet is a required field - RuleSet *MatchmakingRuleSet `type:"structure" required:"true"` -} - -// String returns the string representation -func (s CreateMatchmakingRuleSetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateMatchmakingRuleSetOutput) GoString() string { - return s.String() -} - -// SetRuleSet sets the RuleSet field's value. -func (s *CreateMatchmakingRuleSetOutput) SetRuleSet(v *MatchmakingRuleSet) *CreateMatchmakingRuleSetOutput { - s.RuleSet = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSessionInput -type CreatePlayerSessionInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for the game session to add a player to. - // - // GameSessionId is a required field - GameSessionId *string `min:"1" type:"string" required:"true"` - - // Developer-defined information related to a player. Amazon GameLift does not - // use this data, so it can be formatted as needed for use in the game. - PlayerData *string `min:"1" type:"string"` - - // Unique identifier for a player. Player IDs are developer-defined. - // - // PlayerId is a required field - PlayerId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreatePlayerSessionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreatePlayerSessionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreatePlayerSessionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreatePlayerSessionInput"} - if s.GameSessionId == nil { - invalidParams.Add(request.NewErrParamRequired("GameSessionId")) - } - if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) - } - if s.PlayerData != nil && len(*s.PlayerData) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PlayerData", 1)) - } - if s.PlayerId == nil { - invalidParams.Add(request.NewErrParamRequired("PlayerId")) - } - if s.PlayerId != nil && len(*s.PlayerId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PlayerId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGameSessionId sets the GameSessionId field's value. -func (s *CreatePlayerSessionInput) SetGameSessionId(v string) *CreatePlayerSessionInput { - s.GameSessionId = &v - return s -} - -// SetPlayerData sets the PlayerData field's value. -func (s *CreatePlayerSessionInput) SetPlayerData(v string) *CreatePlayerSessionInput { - s.PlayerData = &v - return s -} - -// SetPlayerId sets the PlayerId field's value. -func (s *CreatePlayerSessionInput) SetPlayerId(v string) *CreatePlayerSessionInput { - s.PlayerId = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSessionOutput -type CreatePlayerSessionOutput struct { - _ struct{} `type:"structure"` - - // Object that describes the newly created player session record. - PlayerSession *PlayerSession `type:"structure"` -} - -// String returns the string representation -func (s CreatePlayerSessionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreatePlayerSessionOutput) GoString() string { - return s.String() -} - -// SetPlayerSession sets the PlayerSession field's value. -func (s *CreatePlayerSessionOutput) SetPlayerSession(v *PlayerSession) *CreatePlayerSessionOutput { - s.PlayerSession = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSessionsInput -type CreatePlayerSessionsInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for the game session to add players to. - // - // GameSessionId is a required field - GameSessionId *string `min:"1" type:"string" required:"true"` - - // Map of string pairs, each specifying a player ID and a set of developer-defined - // information related to the player. Amazon GameLift does not use this data, - // so it can be formatted as needed for use in the game. Player data strings - // for player IDs not included in the PlayerIds parameter are ignored. - PlayerDataMap map[string]*string `type:"map"` - - // List of unique identifiers for the players to be added. - // - // PlayerIds is a required field - PlayerIds []*string `min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s CreatePlayerSessionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreatePlayerSessionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreatePlayerSessionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreatePlayerSessionsInput"} - if s.GameSessionId == nil { - invalidParams.Add(request.NewErrParamRequired("GameSessionId")) - } - if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) - } - if s.PlayerIds == nil { - invalidParams.Add(request.NewErrParamRequired("PlayerIds")) - } - if s.PlayerIds != nil && len(s.PlayerIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PlayerIds", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGameSessionId sets the GameSessionId field's value. -func (s *CreatePlayerSessionsInput) SetGameSessionId(v string) *CreatePlayerSessionsInput { - s.GameSessionId = &v - return s -} - -// SetPlayerDataMap sets the PlayerDataMap field's value. -func (s *CreatePlayerSessionsInput) SetPlayerDataMap(v map[string]*string) *CreatePlayerSessionsInput { - s.PlayerDataMap = v - return s -} - -// SetPlayerIds sets the PlayerIds field's value. -func (s *CreatePlayerSessionsInput) SetPlayerIds(v []*string) *CreatePlayerSessionsInput { - s.PlayerIds = v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreatePlayerSessionsOutput -type CreatePlayerSessionsOutput struct { - _ struct{} `type:"structure"` - - // Collection of player session objects created for the added players. - PlayerSessions []*PlayerSession `type:"list"` -} - -// String returns the string representation -func (s CreatePlayerSessionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreatePlayerSessionsOutput) GoString() string { - return s.String() -} - -// SetPlayerSessions sets the PlayerSessions field's value. -func (s *CreatePlayerSessionsOutput) SetPlayerSessions(v []*PlayerSession) *CreatePlayerSessionsOutput { - s.PlayerSessions = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringAuthorizationInput -type CreateVpcPeeringAuthorizationInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for the AWS account that you use to manage your Amazon - // GameLift fleet. You can find your Account ID in the AWS Management Console - // under account settings. - // - // GameLiftAwsAccountId is a required field - GameLiftAwsAccountId *string `min:"1" type:"string" required:"true"` - - // Unique identifier for a VPC with resources to be accessed by your Amazon - // GameLift fleet. The VPC must be in the same region where your fleet is deployed. - // To get VPC information, including IDs, use the Virtual Private Cloud service - // tools, including the VPC Dashboard in the AWS Management Console. - // - // PeerVpcId is a required field - PeerVpcId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateVpcPeeringAuthorizationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateVpcPeeringAuthorizationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateVpcPeeringAuthorizationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateVpcPeeringAuthorizationInput"} - if s.GameLiftAwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("GameLiftAwsAccountId")) - } - if s.GameLiftAwsAccountId != nil && len(*s.GameLiftAwsAccountId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameLiftAwsAccountId", 1)) - } - if s.PeerVpcId == nil { - invalidParams.Add(request.NewErrParamRequired("PeerVpcId")) - } - if s.PeerVpcId != nil && len(*s.PeerVpcId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PeerVpcId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGameLiftAwsAccountId sets the GameLiftAwsAccountId field's value. -func (s *CreateVpcPeeringAuthorizationInput) SetGameLiftAwsAccountId(v string) *CreateVpcPeeringAuthorizationInput { - s.GameLiftAwsAccountId = &v - return s -} - -// SetPeerVpcId sets the PeerVpcId field's value. -func (s *CreateVpcPeeringAuthorizationInput) SetPeerVpcId(v string) *CreateVpcPeeringAuthorizationInput { - s.PeerVpcId = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringAuthorizationOutput -type CreateVpcPeeringAuthorizationOutput struct { - _ struct{} `type:"structure"` - - // Details on the requested VPC peering authorization, including expiration. - VpcPeeringAuthorization *VpcPeeringAuthorization `type:"structure"` -} - -// String returns the string representation -func (s CreateVpcPeeringAuthorizationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateVpcPeeringAuthorizationOutput) GoString() string { - return s.String() -} - -// SetVpcPeeringAuthorization sets the VpcPeeringAuthorization field's value. -func (s *CreateVpcPeeringAuthorizationOutput) SetVpcPeeringAuthorization(v *VpcPeeringAuthorization) *CreateVpcPeeringAuthorizationOutput { - s.VpcPeeringAuthorization = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringConnectionInput -type CreateVpcPeeringConnectionInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet. This tells Amazon GameLift which GameLift - // VPC to peer with. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` - - // Unique identifier for the AWS account with the VPC that you want to peer - // your Amazon GameLift fleet with. You can find your Account ID in the AWS - // Management Console under account settings. - // - // PeerVpcAwsAccountId is a required field - PeerVpcAwsAccountId *string `min:"1" type:"string" required:"true"` - - // Unique identifier for a VPC with resources to be accessed by your Amazon - // GameLift fleet. The VPC must be in the same region where your fleet is deployed. - // To get VPC information, including IDs, use the Virtual Private Cloud service - // tools, including the VPC Dashboard in the AWS Management Console. - // - // PeerVpcId is a required field - PeerVpcId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateVpcPeeringConnectionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateVpcPeeringConnectionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateVpcPeeringConnectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateVpcPeeringConnectionInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - if s.PeerVpcAwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("PeerVpcAwsAccountId")) - } - if s.PeerVpcAwsAccountId != nil && len(*s.PeerVpcAwsAccountId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PeerVpcAwsAccountId", 1)) - } - if s.PeerVpcId == nil { - invalidParams.Add(request.NewErrParamRequired("PeerVpcId")) - } - if s.PeerVpcId != nil && len(*s.PeerVpcId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PeerVpcId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFleetId sets the FleetId field's value. -func (s *CreateVpcPeeringConnectionInput) SetFleetId(v string) *CreateVpcPeeringConnectionInput { - s.FleetId = &v - return s -} - -// SetPeerVpcAwsAccountId sets the PeerVpcAwsAccountId field's value. -func (s *CreateVpcPeeringConnectionInput) SetPeerVpcAwsAccountId(v string) *CreateVpcPeeringConnectionInput { - s.PeerVpcAwsAccountId = &v - return s -} - -// SetPeerVpcId sets the PeerVpcId field's value. -func (s *CreateVpcPeeringConnectionInput) SetPeerVpcId(v string) *CreateVpcPeeringConnectionInput { - s.PeerVpcId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateVpcPeeringConnectionOutput -type CreateVpcPeeringConnectionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateVpcPeeringConnectionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateVpcPeeringConnectionOutput) GoString() string { - return s.String() -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteAliasInput -type DeleteAliasInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet alias. Specify the alias you want to delete. - // - // AliasId is a required field - AliasId *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteAliasInput"} - if s.AliasId == nil { - invalidParams.Add(request.NewErrParamRequired("AliasId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAliasId sets the AliasId field's value. -func (s *DeleteAliasInput) SetAliasId(v string) *DeleteAliasInput { - s.AliasId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteAliasOutput -type DeleteAliasOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteAliasOutput) GoString() string { - return s.String() -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteBuildInput -type DeleteBuildInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a build to delete. - // - // BuildId is a required field - BuildId *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBuildInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBuildInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBuildInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBuildInput"} - if s.BuildId == nil { - invalidParams.Add(request.NewErrParamRequired("BuildId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBuildId sets the BuildId field's value. -func (s *DeleteBuildInput) SetBuildId(v string) *DeleteBuildInput { - s.BuildId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteBuildOutput -type DeleteBuildOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBuildOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBuildOutput) GoString() string { - return s.String() -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteFleetInput -type DeleteFleetInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet to be deleted. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteFleetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteFleetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteFleetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteFleetInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFleetId sets the FleetId field's value. -func (s *DeleteFleetInput) SetFleetId(v string) *DeleteFleetInput { - s.FleetId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteFleetOutput -type DeleteFleetOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteFleetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteFleetOutput) GoString() string { - return s.String() -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteGameSessionQueueInput -type DeleteGameSessionQueueInput struct { - _ struct{} `type:"structure"` - - // Descriptive label that is associated with game session queue. Queue names - // must be unique within each region. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteGameSessionQueueInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteGameSessionQueueInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteGameSessionQueueInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteGameSessionQueueInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteGameSessionQueueInput) SetName(v string) *DeleteGameSessionQueueInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteGameSessionQueueOutput -type DeleteGameSessionQueueOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteGameSessionQueueOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteGameSessionQueueOutput) GoString() string { - return s.String() -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteMatchmakingConfigurationInput -type DeleteMatchmakingConfigurationInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a matchmaking configuration - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteMatchmakingConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMatchmakingConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteMatchmakingConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteMatchmakingConfigurationInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteMatchmakingConfigurationInput) SetName(v string) *DeleteMatchmakingConfigurationInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteMatchmakingConfigurationOutput -type DeleteMatchmakingConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteMatchmakingConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMatchmakingConfigurationOutput) GoString() string { - return s.String() -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteScalingPolicyInput -type DeleteScalingPolicyInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet to be deleted. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` - - // Descriptive label that is associated with a scaling policy. Policy names - // do not need to be unique. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteScalingPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteScalingPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteScalingPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteScalingPolicyInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFleetId sets the FleetId field's value. -func (s *DeleteScalingPolicyInput) SetFleetId(v string) *DeleteScalingPolicyInput { - s.FleetId = &v - return s -} - -// SetName sets the Name field's value. -func (s *DeleteScalingPolicyInput) SetName(v string) *DeleteScalingPolicyInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteScalingPolicyOutput -type DeleteScalingPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteScalingPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteScalingPolicyOutput) GoString() string { - return s.String() -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringAuthorizationInput -type DeleteVpcPeeringAuthorizationInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for the AWS account that you use to manage your Amazon - // GameLift fleet. You can find your Account ID in the AWS Management Console - // under account settings. - // - // GameLiftAwsAccountId is a required field - GameLiftAwsAccountId *string `min:"1" type:"string" required:"true"` - - // Unique identifier for a VPC with resources to be accessed by your Amazon - // GameLift fleet. The VPC must be in the same region where your fleet is deployed. - // To get VPC information, including IDs, use the Virtual Private Cloud service - // tools, including the VPC Dashboard in the AWS Management Console. - // - // PeerVpcId is a required field - PeerVpcId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteVpcPeeringAuthorizationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteVpcPeeringAuthorizationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteVpcPeeringAuthorizationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteVpcPeeringAuthorizationInput"} - if s.GameLiftAwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("GameLiftAwsAccountId")) - } - if s.GameLiftAwsAccountId != nil && len(*s.GameLiftAwsAccountId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameLiftAwsAccountId", 1)) - } - if s.PeerVpcId == nil { - invalidParams.Add(request.NewErrParamRequired("PeerVpcId")) - } - if s.PeerVpcId != nil && len(*s.PeerVpcId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PeerVpcId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGameLiftAwsAccountId sets the GameLiftAwsAccountId field's value. -func (s *DeleteVpcPeeringAuthorizationInput) SetGameLiftAwsAccountId(v string) *DeleteVpcPeeringAuthorizationInput { - s.GameLiftAwsAccountId = &v - return s -} - -// SetPeerVpcId sets the PeerVpcId field's value. -func (s *DeleteVpcPeeringAuthorizationInput) SetPeerVpcId(v string) *DeleteVpcPeeringAuthorizationInput { - s.PeerVpcId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringAuthorizationOutput -type DeleteVpcPeeringAuthorizationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteVpcPeeringAuthorizationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteVpcPeeringAuthorizationOutput) GoString() string { - return s.String() -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringConnectionInput -type DeleteVpcPeeringConnectionInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet. This value must match the fleet ID referenced - // in the VPC peering connection record. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` - - // Unique identifier for a VPC peering connection. This value is included in - // the VpcPeeringConnection object, which can be retrieved by calling DescribeVpcPeeringConnections. - // - // VpcPeeringConnectionId is a required field - VpcPeeringConnectionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteVpcPeeringConnectionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteVpcPeeringConnectionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteVpcPeeringConnectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteVpcPeeringConnectionInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - if s.VpcPeeringConnectionId == nil { - invalidParams.Add(request.NewErrParamRequired("VpcPeeringConnectionId")) - } - if s.VpcPeeringConnectionId != nil && len(*s.VpcPeeringConnectionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("VpcPeeringConnectionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFleetId sets the FleetId field's value. -func (s *DeleteVpcPeeringConnectionInput) SetFleetId(v string) *DeleteVpcPeeringConnectionInput { - s.FleetId = &v - return s -} - -// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. -func (s *DeleteVpcPeeringConnectionInput) SetVpcPeeringConnectionId(v string) *DeleteVpcPeeringConnectionInput { - s.VpcPeeringConnectionId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteVpcPeeringConnectionOutput -type DeleteVpcPeeringConnectionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteVpcPeeringConnectionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteVpcPeeringConnectionOutput) GoString() string { - return s.String() -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeAliasInput -type DescribeAliasInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet alias. Specify the alias you want to retrieve. - // - // AliasId is a required field - AliasId *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeAliasInput"} - if s.AliasId == nil { - invalidParams.Add(request.NewErrParamRequired("AliasId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAliasId sets the AliasId field's value. -func (s *DescribeAliasInput) SetAliasId(v string) *DescribeAliasInput { - s.AliasId = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeAliasOutput -type DescribeAliasOutput struct { - _ struct{} `type:"structure"` - - // Object that contains the requested alias. - Alias *Alias `type:"structure"` -} - -// String returns the string representation -func (s DescribeAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeAliasOutput) GoString() string { - return s.String() -} - -// SetAlias sets the Alias field's value. -func (s *DescribeAliasOutput) SetAlias(v *Alias) *DescribeAliasOutput { - s.Alias = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeBuildInput -type DescribeBuildInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a build to retrieve properties for. - // - // BuildId is a required field - BuildId *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeBuildInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeBuildInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeBuildInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeBuildInput"} - if s.BuildId == nil { - invalidParams.Add(request.NewErrParamRequired("BuildId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBuildId sets the BuildId field's value. -func (s *DescribeBuildInput) SetBuildId(v string) *DescribeBuildInput { - s.BuildId = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeBuildOutput -type DescribeBuildOutput struct { - _ struct{} `type:"structure"` - - // Set of properties describing the requested build. - Build *Build `type:"structure"` -} - -// String returns the string representation -func (s DescribeBuildOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeBuildOutput) GoString() string { - return s.String() -} - -// SetBuild sets the Build field's value. -func (s *DescribeBuildOutput) SetBuild(v *Build) *DescribeBuildOutput { - s.Build = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeEC2InstanceLimitsInput -type DescribeEC2InstanceLimitsInput struct { - _ struct{} `type:"structure"` - - // Name of an EC2 instance type that is supported in Amazon GameLift. A fleet - // instance type determines the computing resources of each instance in the - // fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift - // supports the following EC2 instance types. See Amazon EC2 Instance Types - // (http://aws.amazon.com/ec2/instance-types/) for detailed descriptions. Leave - // this parameter blank to retrieve limits for all types. - EC2InstanceType *string `type:"string" enum:"EC2InstanceType"` -} - -// String returns the string representation -func (s DescribeEC2InstanceLimitsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeEC2InstanceLimitsInput) GoString() string { - return s.String() -} - -// SetEC2InstanceType sets the EC2InstanceType field's value. -func (s *DescribeEC2InstanceLimitsInput) SetEC2InstanceType(v string) *DescribeEC2InstanceLimitsInput { - s.EC2InstanceType = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeEC2InstanceLimitsOutput -type DescribeEC2InstanceLimitsOutput struct { - _ struct{} `type:"structure"` - - // Object that contains the maximum number of instances for the specified instance - // type. - EC2InstanceLimits []*EC2InstanceLimit `type:"list"` -} - -// String returns the string representation -func (s DescribeEC2InstanceLimitsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeEC2InstanceLimitsOutput) GoString() string { - return s.String() -} - -// SetEC2InstanceLimits sets the EC2InstanceLimits field's value. -func (s *DescribeEC2InstanceLimitsOutput) SetEC2InstanceLimits(v []*EC2InstanceLimit) *DescribeEC2InstanceLimitsOutput { - s.EC2InstanceLimits = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetAttributesInput -type DescribeFleetAttributesInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet(s) to retrieve attributes for. To request attributes - // for all fleets, leave this parameter empty. - FleetIds []*string `min:"1" type:"list"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. This parameter is ignored when - // the request specifies one or a list of fleet IDs. - Limit *int64 `min:"1" type:"integer"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. This parameter - // is ignored when the request specifies one or a list of fleet IDs. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeFleetAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeFleetAttributesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeFleetAttributesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeFleetAttributesInput"} - if s.FleetIds != nil && len(s.FleetIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FleetIds", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFleetIds sets the FleetIds field's value. -func (s *DescribeFleetAttributesInput) SetFleetIds(v []*string) *DescribeFleetAttributesInput { - s.FleetIds = v - return s -} - -// SetLimit sets the Limit field's value. -func (s *DescribeFleetAttributesInput) SetLimit(v int64) *DescribeFleetAttributesInput { - s.Limit = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeFleetAttributesInput) SetNextToken(v string) *DescribeFleetAttributesInput { - s.NextToken = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetAttributesOutput -type DescribeFleetAttributesOutput struct { - _ struct{} `type:"structure"` - - // Collection of objects containing attribute metadata for each requested fleet - // ID. - FleetAttributes []*FleetAttributes `type:"list"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeFleetAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeFleetAttributesOutput) GoString() string { - return s.String() -} - -// SetFleetAttributes sets the FleetAttributes field's value. -func (s *DescribeFleetAttributesOutput) SetFleetAttributes(v []*FleetAttributes) *DescribeFleetAttributesOutput { - s.FleetAttributes = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeFleetAttributesOutput) SetNextToken(v string) *DescribeFleetAttributesOutput { - s.NextToken = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetCapacityInput -type DescribeFleetCapacityInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet(s) to retrieve capacity information for. To - // request capacity information for all fleets, leave this parameter empty. - FleetIds []*string `min:"1" type:"list"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. This parameter is ignored when - // the request specifies one or a list of fleet IDs. - Limit *int64 `min:"1" type:"integer"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. This parameter - // is ignored when the request specifies one or a list of fleet IDs. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeFleetCapacityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeFleetCapacityInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeFleetCapacityInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeFleetCapacityInput"} - if s.FleetIds != nil && len(s.FleetIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FleetIds", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFleetIds sets the FleetIds field's value. -func (s *DescribeFleetCapacityInput) SetFleetIds(v []*string) *DescribeFleetCapacityInput { - s.FleetIds = v - return s -} - -// SetLimit sets the Limit field's value. -func (s *DescribeFleetCapacityInput) SetLimit(v int64) *DescribeFleetCapacityInput { - s.Limit = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeFleetCapacityInput) SetNextToken(v string) *DescribeFleetCapacityInput { - s.NextToken = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetCapacityOutput -type DescribeFleetCapacityOutput struct { - _ struct{} `type:"structure"` - - // Collection of objects containing capacity information for each requested - // fleet ID. Leave this parameter empty to retrieve capacity information for - // all fleets. - FleetCapacity []*FleetCapacity `type:"list"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeFleetCapacityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeFleetCapacityOutput) GoString() string { - return s.String() -} - -// SetFleetCapacity sets the FleetCapacity field's value. -func (s *DescribeFleetCapacityOutput) SetFleetCapacity(v []*FleetCapacity) *DescribeFleetCapacityOutput { - s.FleetCapacity = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeFleetCapacityOutput) SetNextToken(v string) *DescribeFleetCapacityOutput { - s.NextToken = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetEventsInput -type DescribeFleetEventsInput struct { - _ struct{} `type:"structure"` - - // Most recent date to retrieve event logs for. If no end time is specified, - // this call returns entries from the specified start time up to the present. - // Format is a number expressed in Unix time as milliseconds (ex: "1469498468.057"). - EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Unique identifier for a fleet to get event logs for. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. - Limit *int64 `min:"1" type:"integer"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. - NextToken *string `min:"1" type:"string"` - - // Earliest date to retrieve event logs for. If no start time is specified, - // this call returns entries starting from when the fleet was created to the - // specified end time. Format is a number expressed in Unix time as milliseconds - // (ex: "1469498468.057"). - StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` -} - -// String returns the string representation -func (s DescribeFleetEventsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeFleetEventsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeFleetEventsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeFleetEventsInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndTime sets the EndTime field's value. -func (s *DescribeFleetEventsInput) SetEndTime(v time.Time) *DescribeFleetEventsInput { - s.EndTime = &v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *DescribeFleetEventsInput) SetFleetId(v string) *DescribeFleetEventsInput { - s.FleetId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *DescribeFleetEventsInput) SetLimit(v int64) *DescribeFleetEventsInput { - s.Limit = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeFleetEventsInput) SetNextToken(v string) *DescribeFleetEventsInput { - s.NextToken = &v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *DescribeFleetEventsInput) SetStartTime(v time.Time) *DescribeFleetEventsInput { - s.StartTime = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetEventsOutput -type DescribeFleetEventsOutput struct { - _ struct{} `type:"structure"` - - // Collection of objects containing event log entries for the specified fleet. - Events []*Event `type:"list"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeFleetEventsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeFleetEventsOutput) GoString() string { - return s.String() -} - -// SetEvents sets the Events field's value. -func (s *DescribeFleetEventsOutput) SetEvents(v []*Event) *DescribeFleetEventsOutput { - s.Events = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeFleetEventsOutput) SetNextToken(v string) *DescribeFleetEventsOutput { - s.NextToken = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetPortSettingsInput -type DescribeFleetPortSettingsInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet to retrieve port settings for. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeFleetPortSettingsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeFleetPortSettingsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeFleetPortSettingsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeFleetPortSettingsInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFleetId sets the FleetId field's value. -func (s *DescribeFleetPortSettingsInput) SetFleetId(v string) *DescribeFleetPortSettingsInput { - s.FleetId = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetPortSettingsOutput -type DescribeFleetPortSettingsOutput struct { - _ struct{} `type:"structure"` - - // Object that contains port settings for the requested fleet ID. - InboundPermissions []*IpPermission `type:"list"` -} - -// String returns the string representation -func (s DescribeFleetPortSettingsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeFleetPortSettingsOutput) GoString() string { - return s.String() -} - -// SetInboundPermissions sets the InboundPermissions field's value. -func (s *DescribeFleetPortSettingsOutput) SetInboundPermissions(v []*IpPermission) *DescribeFleetPortSettingsOutput { - s.InboundPermissions = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetUtilizationInput -type DescribeFleetUtilizationInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet(s) to retrieve utilization data for. To request - // utilization data for all fleets, leave this parameter empty. - FleetIds []*string `min:"1" type:"list"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. This parameter is ignored when - // the request specifies one or a list of fleet IDs. - Limit *int64 `min:"1" type:"integer"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. This parameter - // is ignored when the request specifies one or a list of fleet IDs. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeFleetUtilizationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeFleetUtilizationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeFleetUtilizationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeFleetUtilizationInput"} - if s.FleetIds != nil && len(s.FleetIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FleetIds", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFleetIds sets the FleetIds field's value. -func (s *DescribeFleetUtilizationInput) SetFleetIds(v []*string) *DescribeFleetUtilizationInput { - s.FleetIds = v - return s -} - -// SetLimit sets the Limit field's value. -func (s *DescribeFleetUtilizationInput) SetLimit(v int64) *DescribeFleetUtilizationInput { - s.Limit = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeFleetUtilizationInput) SetNextToken(v string) *DescribeFleetUtilizationInput { - s.NextToken = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeFleetUtilizationOutput -type DescribeFleetUtilizationOutput struct { - _ struct{} `type:"structure"` - - // Collection of objects containing utilization information for each requested - // fleet ID. - FleetUtilization []*FleetUtilization `type:"list"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeFleetUtilizationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeFleetUtilizationOutput) GoString() string { - return s.String() -} - -// SetFleetUtilization sets the FleetUtilization field's value. -func (s *DescribeFleetUtilizationOutput) SetFleetUtilization(v []*FleetUtilization) *DescribeFleetUtilizationOutput { - s.FleetUtilization = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeFleetUtilizationOutput) SetNextToken(v string) *DescribeFleetUtilizationOutput { - s.NextToken = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionDetailsInput -type DescribeGameSessionDetailsInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for an alias associated with the fleet to retrieve all - // game sessions for. - AliasId *string `type:"string"` - - // Unique identifier for a fleet to retrieve all game sessions active on the - // fleet. - FleetId *string `type:"string"` - - // Unique identifier for the game session to retrieve. - GameSessionId *string `min:"1" type:"string"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. - Limit *int64 `min:"1" type:"integer"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. - NextToken *string `min:"1" type:"string"` - - // Game session status to filter results on. Possible game session statuses - // include ACTIVE, TERMINATED, ACTIVATING and TERMINATING (the last two are - // transitory). - StatusFilter *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeGameSessionDetailsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeGameSessionDetailsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeGameSessionDetailsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeGameSessionDetailsInput"} - if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.StatusFilter != nil && len(*s.StatusFilter) < 1 { - invalidParams.Add(request.NewErrParamMinLen("StatusFilter", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAliasId sets the AliasId field's value. -func (s *DescribeGameSessionDetailsInput) SetAliasId(v string) *DescribeGameSessionDetailsInput { - s.AliasId = &v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *DescribeGameSessionDetailsInput) SetFleetId(v string) *DescribeGameSessionDetailsInput { - s.FleetId = &v - return s -} - -// SetGameSessionId sets the GameSessionId field's value. -func (s *DescribeGameSessionDetailsInput) SetGameSessionId(v string) *DescribeGameSessionDetailsInput { - s.GameSessionId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *DescribeGameSessionDetailsInput) SetLimit(v int64) *DescribeGameSessionDetailsInput { - s.Limit = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeGameSessionDetailsInput) SetNextToken(v string) *DescribeGameSessionDetailsInput { - s.NextToken = &v - return s -} - -// SetStatusFilter sets the StatusFilter field's value. -func (s *DescribeGameSessionDetailsInput) SetStatusFilter(v string) *DescribeGameSessionDetailsInput { - s.StatusFilter = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionDetailsOutput -type DescribeGameSessionDetailsOutput struct { - _ struct{} `type:"structure"` - - // Collection of objects containing game session properties and the protection - // policy currently in force for each session matching the request. - GameSessionDetails []*GameSessionDetail `type:"list"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeGameSessionDetailsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeGameSessionDetailsOutput) GoString() string { - return s.String() -} - -// SetGameSessionDetails sets the GameSessionDetails field's value. -func (s *DescribeGameSessionDetailsOutput) SetGameSessionDetails(v []*GameSessionDetail) *DescribeGameSessionDetailsOutput { - s.GameSessionDetails = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeGameSessionDetailsOutput) SetNextToken(v string) *DescribeGameSessionDetailsOutput { - s.NextToken = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionPlacementInput -type DescribeGameSessionPlacementInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a game session placement to retrieve. - // - // PlacementId is a required field - PlacementId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeGameSessionPlacementInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeGameSessionPlacementInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeGameSessionPlacementInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeGameSessionPlacementInput"} - if s.PlacementId == nil { - invalidParams.Add(request.NewErrParamRequired("PlacementId")) - } - if s.PlacementId != nil && len(*s.PlacementId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PlacementId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPlacementId sets the PlacementId field's value. -func (s *DescribeGameSessionPlacementInput) SetPlacementId(v string) *DescribeGameSessionPlacementInput { - s.PlacementId = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionPlacementOutput -type DescribeGameSessionPlacementOutput struct { - _ struct{} `type:"structure"` - - // Object that describes the requested game session placement. - GameSessionPlacement *GameSessionPlacement `type:"structure"` -} - -// String returns the string representation -func (s DescribeGameSessionPlacementOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeGameSessionPlacementOutput) GoString() string { - return s.String() -} - -// SetGameSessionPlacement sets the GameSessionPlacement field's value. -func (s *DescribeGameSessionPlacementOutput) SetGameSessionPlacement(v *GameSessionPlacement) *DescribeGameSessionPlacementOutput { - s.GameSessionPlacement = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionQueuesInput -type DescribeGameSessionQueuesInput struct { - _ struct{} `type:"structure"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. - Limit *int64 `min:"1" type:"integer"` - - // List of queue names to retrieve information for. To request settings for - // all queues, leave this parameter empty. - Names []*string `type:"list"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeGameSessionQueuesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeGameSessionQueuesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeGameSessionQueuesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeGameSessionQueuesInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLimit sets the Limit field's value. -func (s *DescribeGameSessionQueuesInput) SetLimit(v int64) *DescribeGameSessionQueuesInput { - s.Limit = &v - return s -} - -// SetNames sets the Names field's value. -func (s *DescribeGameSessionQueuesInput) SetNames(v []*string) *DescribeGameSessionQueuesInput { - s.Names = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeGameSessionQueuesInput) SetNextToken(v string) *DescribeGameSessionQueuesInput { - s.NextToken = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionQueuesOutput -type DescribeGameSessionQueuesOutput struct { - _ struct{} `type:"structure"` - - // Collection of objects that describes the requested game session queues. - GameSessionQueues []*GameSessionQueue `type:"list"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeGameSessionQueuesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeGameSessionQueuesOutput) GoString() string { - return s.String() -} - -// SetGameSessionQueues sets the GameSessionQueues field's value. -func (s *DescribeGameSessionQueuesOutput) SetGameSessionQueues(v []*GameSessionQueue) *DescribeGameSessionQueuesOutput { - s.GameSessionQueues = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeGameSessionQueuesOutput) SetNextToken(v string) *DescribeGameSessionQueuesOutput { - s.NextToken = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionsInput -type DescribeGameSessionsInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for an alias associated with the fleet to retrieve all - // game sessions for. - AliasId *string `type:"string"` - - // Unique identifier for a fleet to retrieve all game sessions for. - FleetId *string `type:"string"` - - // Unique identifier for the game session to retrieve. You can use either a - // GameSessionId or GameSessionArn value. - GameSessionId *string `min:"1" type:"string"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. - Limit *int64 `min:"1" type:"integer"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. - NextToken *string `min:"1" type:"string"` - - // Game session status to filter results on. Possible game session statuses - // include ACTIVE, TERMINATED, ACTIVATING, and TERMINATING (the last two are - // transitory). - StatusFilter *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeGameSessionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeGameSessionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeGameSessionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeGameSessionsInput"} - if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.StatusFilter != nil && len(*s.StatusFilter) < 1 { - invalidParams.Add(request.NewErrParamMinLen("StatusFilter", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAliasId sets the AliasId field's value. -func (s *DescribeGameSessionsInput) SetAliasId(v string) *DescribeGameSessionsInput { - s.AliasId = &v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *DescribeGameSessionsInput) SetFleetId(v string) *DescribeGameSessionsInput { - s.FleetId = &v - return s -} - -// SetGameSessionId sets the GameSessionId field's value. -func (s *DescribeGameSessionsInput) SetGameSessionId(v string) *DescribeGameSessionsInput { - s.GameSessionId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *DescribeGameSessionsInput) SetLimit(v int64) *DescribeGameSessionsInput { - s.Limit = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeGameSessionsInput) SetNextToken(v string) *DescribeGameSessionsInput { - s.NextToken = &v - return s -} - -// SetStatusFilter sets the StatusFilter field's value. -func (s *DescribeGameSessionsInput) SetStatusFilter(v string) *DescribeGameSessionsInput { - s.StatusFilter = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionsOutput -type DescribeGameSessionsOutput struct { - _ struct{} `type:"structure"` - - // Collection of objects containing game session properties for each session - // matching the request. - GameSessions []*GameSession `type:"list"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeGameSessionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeGameSessionsOutput) GoString() string { - return s.String() -} - -// SetGameSessions sets the GameSessions field's value. -func (s *DescribeGameSessionsOutput) SetGameSessions(v []*GameSession) *DescribeGameSessionsOutput { - s.GameSessions = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeGameSessionsOutput) SetNextToken(v string) *DescribeGameSessionsOutput { - s.NextToken = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeInstancesInput -type DescribeInstancesInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet to retrieve instance information for. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` - - // Unique identifier for an instance to retrieve. Specify an instance ID or - // leave blank to retrieve all instances in the fleet. - InstanceId *string `type:"string"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. - Limit *int64 `min:"1" type:"integer"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeInstancesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeInstancesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeInstancesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeInstancesInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFleetId sets the FleetId field's value. -func (s *DescribeInstancesInput) SetFleetId(v string) *DescribeInstancesInput { - s.FleetId = &v - return s -} - -// SetInstanceId sets the InstanceId field's value. -func (s *DescribeInstancesInput) SetInstanceId(v string) *DescribeInstancesInput { - s.InstanceId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *DescribeInstancesInput) SetLimit(v int64) *DescribeInstancesInput { - s.Limit = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeInstancesInput) SetNextToken(v string) *DescribeInstancesInput { - s.NextToken = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeInstancesOutput -type DescribeInstancesOutput struct { - _ struct{} `type:"structure"` - - // Collection of objects containing properties for each instance returned. - Instances []*Instance `type:"list"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeInstancesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeInstancesOutput) GoString() string { - return s.String() -} - -// SetInstances sets the Instances field's value. -func (s *DescribeInstancesOutput) SetInstances(v []*Instance) *DescribeInstancesOutput { - s.Instances = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeInstancesOutput) SetNextToken(v string) *DescribeInstancesOutput { - s.NextToken = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingConfigurationsInput -type DescribeMatchmakingConfigurationsInput struct { - _ struct{} `type:"structure"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. This parameter is limited to 10. - Limit *int64 `min:"1" type:"integer"` - - // Unique identifier for a matchmaking configuration(s) to retrieve. To request - // all existing configurations, leave this parameter empty. - Names []*string `type:"list"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. - NextToken *string `min:"1" type:"string"` - - // Unique identifier for a matchmaking rule set. Use this parameter to retrieve - // all matchmaking configurations that use this rule set. - RuleSetName *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeMatchmakingConfigurationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeMatchmakingConfigurationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeMatchmakingConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeMatchmakingConfigurationsInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.RuleSetName != nil && len(*s.RuleSetName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleSetName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLimit sets the Limit field's value. -func (s *DescribeMatchmakingConfigurationsInput) SetLimit(v int64) *DescribeMatchmakingConfigurationsInput { - s.Limit = &v - return s -} - -// SetNames sets the Names field's value. -func (s *DescribeMatchmakingConfigurationsInput) SetNames(v []*string) *DescribeMatchmakingConfigurationsInput { - s.Names = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeMatchmakingConfigurationsInput) SetNextToken(v string) *DescribeMatchmakingConfigurationsInput { - s.NextToken = &v - return s -} - -// SetRuleSetName sets the RuleSetName field's value. -func (s *DescribeMatchmakingConfigurationsInput) SetRuleSetName(v string) *DescribeMatchmakingConfigurationsInput { - s.RuleSetName = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingConfigurationsOutput -type DescribeMatchmakingConfigurationsOutput struct { - _ struct{} `type:"structure"` - - // Collection of requested matchmaking configuration objects. - Configurations []*MatchmakingConfiguration `type:"list"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeMatchmakingConfigurationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeMatchmakingConfigurationsOutput) GoString() string { - return s.String() -} - -// SetConfigurations sets the Configurations field's value. -func (s *DescribeMatchmakingConfigurationsOutput) SetConfigurations(v []*MatchmakingConfiguration) *DescribeMatchmakingConfigurationsOutput { - s.Configurations = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeMatchmakingConfigurationsOutput) SetNextToken(v string) *DescribeMatchmakingConfigurationsOutput { - s.NextToken = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingInput -type DescribeMatchmakingInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a matchmaking ticket. To request all existing tickets, - // leave this parameter empty. - // - // TicketIds is a required field - TicketIds []*string `type:"list" required:"true"` -} - -// String returns the string representation -func (s DescribeMatchmakingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeMatchmakingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeMatchmakingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeMatchmakingInput"} - if s.TicketIds == nil { - invalidParams.Add(request.NewErrParamRequired("TicketIds")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTicketIds sets the TicketIds field's value. -func (s *DescribeMatchmakingInput) SetTicketIds(v []*string) *DescribeMatchmakingInput { - s.TicketIds = v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingOutput -type DescribeMatchmakingOutput struct { - _ struct{} `type:"structure"` - - // Collection of existing matchmaking ticket objects matching the request. - TicketList []*MatchmakingTicket `type:"list"` -} - -// String returns the string representation -func (s DescribeMatchmakingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeMatchmakingOutput) GoString() string { - return s.String() -} - -// SetTicketList sets the TicketList field's value. -func (s *DescribeMatchmakingOutput) SetTicketList(v []*MatchmakingTicket) *DescribeMatchmakingOutput { - s.TicketList = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingRuleSetsInput -type DescribeMatchmakingRuleSetsInput struct { - _ struct{} `type:"structure"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. - Limit *int64 `min:"1" type:"integer"` - - // Unique identifier for a matchmaking rule set. This name is used to identify - // the rule set associated with a matchmaking configuration. - Names []*string `min:"1" type:"list"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeMatchmakingRuleSetsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeMatchmakingRuleSetsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeMatchmakingRuleSetsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeMatchmakingRuleSetsInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Names != nil && len(s.Names) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Names", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLimit sets the Limit field's value. -func (s *DescribeMatchmakingRuleSetsInput) SetLimit(v int64) *DescribeMatchmakingRuleSetsInput { - s.Limit = &v - return s -} - -// SetNames sets the Names field's value. -func (s *DescribeMatchmakingRuleSetsInput) SetNames(v []*string) *DescribeMatchmakingRuleSetsInput { - s.Names = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeMatchmakingRuleSetsInput) SetNextToken(v string) *DescribeMatchmakingRuleSetsInput { - s.NextToken = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingRuleSetsOutput -type DescribeMatchmakingRuleSetsOutput struct { - _ struct{} `type:"structure"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` - - // Collection of requested matchmaking rule set objects. - // - // RuleSets is a required field - RuleSets []*MatchmakingRuleSet `type:"list" required:"true"` -} - -// String returns the string representation -func (s DescribeMatchmakingRuleSetsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeMatchmakingRuleSetsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeMatchmakingRuleSetsOutput) SetNextToken(v string) *DescribeMatchmakingRuleSetsOutput { - s.NextToken = &v - return s -} - -// SetRuleSets sets the RuleSets field's value. -func (s *DescribeMatchmakingRuleSetsOutput) SetRuleSets(v []*MatchmakingRuleSet) *DescribeMatchmakingRuleSetsOutput { - s.RuleSets = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribePlayerSessionsInput -type DescribePlayerSessionsInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for the game session to retrieve player sessions for. - GameSessionId *string `min:"1" type:"string"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. If a player session ID is specified, - // this parameter is ignored. - Limit *int64 `min:"1" type:"integer"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. If a player session - // ID is specified, this parameter is ignored. - NextToken *string `min:"1" type:"string"` - - // Unique identifier for a player to retrieve player sessions for. - PlayerId *string `min:"1" type:"string"` - - // Unique identifier for a player session to retrieve. - PlayerSessionId *string `type:"string"` - - // Player session status to filter results on. - // - // Possible player session statuses include the following: - // - // * RESERVED -- The player session request has been received, but the player - // has not yet connected to the server process and/or been validated. - // - // * ACTIVE -- The player has been validated by the server process and is - // currently connected. - // - // * COMPLETED -- The player connection has been dropped. - // - // * TIMEDOUT -- A player session request was received, but the player did - // not connect and/or was not validated within the timeout limit (60 seconds). - PlayerSessionStatusFilter *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribePlayerSessionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribePlayerSessionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribePlayerSessionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribePlayerSessionsInput"} - if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.PlayerId != nil && len(*s.PlayerId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PlayerId", 1)) - } - if s.PlayerSessionStatusFilter != nil && len(*s.PlayerSessionStatusFilter) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PlayerSessionStatusFilter", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGameSessionId sets the GameSessionId field's value. -func (s *DescribePlayerSessionsInput) SetGameSessionId(v string) *DescribePlayerSessionsInput { - s.GameSessionId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *DescribePlayerSessionsInput) SetLimit(v int64) *DescribePlayerSessionsInput { - s.Limit = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribePlayerSessionsInput) SetNextToken(v string) *DescribePlayerSessionsInput { - s.NextToken = &v - return s -} - -// SetPlayerId sets the PlayerId field's value. -func (s *DescribePlayerSessionsInput) SetPlayerId(v string) *DescribePlayerSessionsInput { - s.PlayerId = &v - return s -} - -// SetPlayerSessionId sets the PlayerSessionId field's value. -func (s *DescribePlayerSessionsInput) SetPlayerSessionId(v string) *DescribePlayerSessionsInput { - s.PlayerSessionId = &v - return s -} - -// SetPlayerSessionStatusFilter sets the PlayerSessionStatusFilter field's value. -func (s *DescribePlayerSessionsInput) SetPlayerSessionStatusFilter(v string) *DescribePlayerSessionsInput { - s.PlayerSessionStatusFilter = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribePlayerSessionsOutput -type DescribePlayerSessionsOutput struct { - _ struct{} `type:"structure"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` - - // Collection of objects containing properties for each player session that - // matches the request. - PlayerSessions []*PlayerSession `type:"list"` -} - -// String returns the string representation -func (s DescribePlayerSessionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribePlayerSessionsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribePlayerSessionsOutput) SetNextToken(v string) *DescribePlayerSessionsOutput { - s.NextToken = &v - return s -} - -// SetPlayerSessions sets the PlayerSessions field's value. -func (s *DescribePlayerSessionsOutput) SetPlayerSessions(v []*PlayerSession) *DescribePlayerSessionsOutput { - s.PlayerSessions = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeRuntimeConfigurationInput -type DescribeRuntimeConfigurationInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet to get the run-time configuration for. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeRuntimeConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeRuntimeConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeRuntimeConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeRuntimeConfigurationInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFleetId sets the FleetId field's value. -func (s *DescribeRuntimeConfigurationInput) SetFleetId(v string) *DescribeRuntimeConfigurationInput { - s.FleetId = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeRuntimeConfigurationOutput -type DescribeRuntimeConfigurationOutput struct { - _ struct{} `type:"structure"` - - // Instructions describing how server processes should be launched and maintained - // on each instance in the fleet. - RuntimeConfiguration *RuntimeConfiguration `type:"structure"` -} - -// String returns the string representation -func (s DescribeRuntimeConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeRuntimeConfigurationOutput) GoString() string { - return s.String() -} - -// SetRuntimeConfiguration sets the RuntimeConfiguration field's value. -func (s *DescribeRuntimeConfigurationOutput) SetRuntimeConfiguration(v *RuntimeConfiguration) *DescribeRuntimeConfigurationOutput { - s.RuntimeConfiguration = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeScalingPoliciesInput -type DescribeScalingPoliciesInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet to retrieve scaling policies for. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. - Limit *int64 `min:"1" type:"integer"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. - NextToken *string `min:"1" type:"string"` - - // Scaling policy status to filter results on. A scaling policy is only in force - // when in an ACTIVE status. - // - // * ACTIVE -- The scaling policy is currently in force. - // - // * UPDATEREQUESTED -- A request to update the scaling policy has been received. - // - // * UPDATING -- A change is being made to the scaling policy. - // - // * DELETEREQUESTED -- A request to delete the scaling policy has been received. - // - // * DELETING -- The scaling policy is being deleted. - // - // * DELETED -- The scaling policy has been deleted. - // - // * ERROR -- An error occurred in creating the policy. It should be removed - // and recreated. - StatusFilter *string `type:"string" enum:"ScalingStatusType"` -} - -// String returns the string representation -func (s DescribeScalingPoliciesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeScalingPoliciesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeScalingPoliciesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeScalingPoliciesInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFleetId sets the FleetId field's value. -func (s *DescribeScalingPoliciesInput) SetFleetId(v string) *DescribeScalingPoliciesInput { - s.FleetId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *DescribeScalingPoliciesInput) SetLimit(v int64) *DescribeScalingPoliciesInput { - s.Limit = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeScalingPoliciesInput) SetNextToken(v string) *DescribeScalingPoliciesInput { - s.NextToken = &v - return s -} - -// SetStatusFilter sets the StatusFilter field's value. -func (s *DescribeScalingPoliciesInput) SetStatusFilter(v string) *DescribeScalingPoliciesInput { - s.StatusFilter = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeScalingPoliciesOutput -type DescribeScalingPoliciesOutput struct { - _ struct{} `type:"structure"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` - - // Collection of objects containing the scaling policies matching the request. - ScalingPolicies []*ScalingPolicy `type:"list"` -} - -// String returns the string representation -func (s DescribeScalingPoliciesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeScalingPoliciesOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeScalingPoliciesOutput) SetNextToken(v string) *DescribeScalingPoliciesOutput { - s.NextToken = &v - return s -} - -// SetScalingPolicies sets the ScalingPolicies field's value. -func (s *DescribeScalingPoliciesOutput) SetScalingPolicies(v []*ScalingPolicy) *DescribeScalingPoliciesOutput { - s.ScalingPolicies = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringAuthorizationsInput -type DescribeVpcPeeringAuthorizationsInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DescribeVpcPeeringAuthorizationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeVpcPeeringAuthorizationsInput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringAuthorizationsOutput -type DescribeVpcPeeringAuthorizationsOutput struct { - _ struct{} `type:"structure"` - - // Collection of objects that describe all valid VPC peering operations for - // the current AWS account. - VpcPeeringAuthorizations []*VpcPeeringAuthorization `type:"list"` -} - -// String returns the string representation -func (s DescribeVpcPeeringAuthorizationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeVpcPeeringAuthorizationsOutput) GoString() string { - return s.String() -} - -// SetVpcPeeringAuthorizations sets the VpcPeeringAuthorizations field's value. -func (s *DescribeVpcPeeringAuthorizationsOutput) SetVpcPeeringAuthorizations(v []*VpcPeeringAuthorization) *DescribeVpcPeeringAuthorizationsOutput { - s.VpcPeeringAuthorizations = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringConnectionsInput -type DescribeVpcPeeringConnectionsInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet. - FleetId *string `type:"string"` -} - -// String returns the string representation -func (s DescribeVpcPeeringConnectionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeVpcPeeringConnectionsInput) GoString() string { - return s.String() -} - -// SetFleetId sets the FleetId field's value. -func (s *DescribeVpcPeeringConnectionsInput) SetFleetId(v string) *DescribeVpcPeeringConnectionsInput { - s.FleetId = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeVpcPeeringConnectionsOutput -type DescribeVpcPeeringConnectionsOutput struct { - _ struct{} `type:"structure"` - - // Collection of VPC peering connection records that match the request. - VpcPeeringConnections []*VpcPeeringConnection `type:"list"` -} - -// String returns the string representation -func (s DescribeVpcPeeringConnectionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeVpcPeeringConnectionsOutput) GoString() string { - return s.String() -} - -// SetVpcPeeringConnections sets the VpcPeeringConnections field's value. -func (s *DescribeVpcPeeringConnectionsOutput) SetVpcPeeringConnections(v []*VpcPeeringConnection) *DescribeVpcPeeringConnectionsOutput { - s.VpcPeeringConnections = v - return s -} - -// Player information for use when creating player sessions using a game session -// placement request with StartGameSessionPlacement. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DesiredPlayerSession -type DesiredPlayerSession struct { - _ struct{} `type:"structure"` - - // Developer-defined information related to a player. Amazon GameLift does not - // use this data, so it can be formatted as needed for use in the game. - PlayerData *string `min:"1" type:"string"` - - // Unique identifier for a player to associate with the player session. - PlayerId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DesiredPlayerSession) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DesiredPlayerSession) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DesiredPlayerSession) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DesiredPlayerSession"} - if s.PlayerData != nil && len(*s.PlayerData) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PlayerData", 1)) - } - if s.PlayerId != nil && len(*s.PlayerId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PlayerId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPlayerData sets the PlayerData field's value. -func (s *DesiredPlayerSession) SetPlayerData(v string) *DesiredPlayerSession { - s.PlayerData = &v - return s -} - -// SetPlayerId sets the PlayerId field's value. -func (s *DesiredPlayerSession) SetPlayerId(v string) *DesiredPlayerSession { - s.PlayerId = &v - return s -} - -// Current status of fleet capacity. The number of active instances should match -// or be in the process of matching the number of desired instances. Pending -// and terminating counts are non-zero only if fleet capacity is adjusting to -// an UpdateFleetCapacity request, or if access to resources is temporarily -// affected. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/EC2InstanceCounts -type EC2InstanceCounts struct { - _ struct{} `type:"structure"` - - // Actual number of active instances in the fleet. - ACTIVE *int64 `type:"integer"` - - // Ideal number of active instances in the fleet. - DESIRED *int64 `type:"integer"` - - // Number of active instances in the fleet that are not currently hosting a - // game session. - IDLE *int64 `type:"integer"` - - // Maximum value allowed for the fleet's instance count. - MAXIMUM *int64 `type:"integer"` - - // Minimum value allowed for the fleet's instance count. - MINIMUM *int64 `type:"integer"` - - // Number of instances in the fleet that are starting but not yet active. - PENDING *int64 `type:"integer"` - - // Number of instances in the fleet that are no longer active but haven't yet - // been terminated. - TERMINATING *int64 `type:"integer"` -} - -// String returns the string representation -func (s EC2InstanceCounts) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s EC2InstanceCounts) GoString() string { - return s.String() -} - -// SetACTIVE sets the ACTIVE field's value. -func (s *EC2InstanceCounts) SetACTIVE(v int64) *EC2InstanceCounts { - s.ACTIVE = &v - return s -} - -// SetDESIRED sets the DESIRED field's value. -func (s *EC2InstanceCounts) SetDESIRED(v int64) *EC2InstanceCounts { - s.DESIRED = &v - return s -} - -// SetIDLE sets the IDLE field's value. -func (s *EC2InstanceCounts) SetIDLE(v int64) *EC2InstanceCounts { - s.IDLE = &v - return s -} - -// SetMAXIMUM sets the MAXIMUM field's value. -func (s *EC2InstanceCounts) SetMAXIMUM(v int64) *EC2InstanceCounts { - s.MAXIMUM = &v - return s -} - -// SetMINIMUM sets the MINIMUM field's value. -func (s *EC2InstanceCounts) SetMINIMUM(v int64) *EC2InstanceCounts { - s.MINIMUM = &v - return s -} - -// SetPENDING sets the PENDING field's value. -func (s *EC2InstanceCounts) SetPENDING(v int64) *EC2InstanceCounts { - s.PENDING = &v - return s -} - -// SetTERMINATING sets the TERMINATING field's value. -func (s *EC2InstanceCounts) SetTERMINATING(v int64) *EC2InstanceCounts { - s.TERMINATING = &v - return s -} - -// Maximum number of instances allowed based on the Amazon Elastic Compute Cloud -// (Amazon EC2) instance type. Instance limits can be retrieved by calling DescribeEC2InstanceLimits. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/EC2InstanceLimit -type EC2InstanceLimit struct { - _ struct{} `type:"structure"` - - // Number of instances of the specified type that are currently in use by this - // AWS account. - CurrentInstances *int64 `type:"integer"` - - // Name of an EC2 instance type that is supported in Amazon GameLift. A fleet - // instance type determines the computing resources of each instance in the - // fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift - // supports the following EC2 instance types. See Amazon EC2 Instance Types - // (http://aws.amazon.com/ec2/instance-types/) for detailed descriptions. - EC2InstanceType *string `type:"string" enum:"EC2InstanceType"` - - // Number of instances allowed. - InstanceLimit *int64 `type:"integer"` -} - -// String returns the string representation -func (s EC2InstanceLimit) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s EC2InstanceLimit) GoString() string { - return s.String() -} - -// SetCurrentInstances sets the CurrentInstances field's value. -func (s *EC2InstanceLimit) SetCurrentInstances(v int64) *EC2InstanceLimit { - s.CurrentInstances = &v - return s -} - -// SetEC2InstanceType sets the EC2InstanceType field's value. -func (s *EC2InstanceLimit) SetEC2InstanceType(v string) *EC2InstanceLimit { - s.EC2InstanceType = &v - return s -} - -// SetInstanceLimit sets the InstanceLimit field's value. -func (s *EC2InstanceLimit) SetInstanceLimit(v int64) *EC2InstanceLimit { - s.InstanceLimit = &v - return s -} - -// Log entry describing an event that involves Amazon GameLift resources (such -// as a fleet). In addition to tracking activity, event codes and messages can -// provide additional information for troubleshooting and debugging problems. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/Event -type Event struct { - _ struct{} `type:"structure"` - - // Type of event being logged. The following events are currently in use: - // - // General events: - // - // * GENERIC_EVENT -- An unspecified event has occurred. - // - // Fleet creation events: - // - // * FLEET_CREATED -- A fleet record was successfully created with a status - // of NEW. Event messaging includes the fleet ID. - // - // * FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. - // The compressed build has started downloading to a fleet instance for installation. - // - // * FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the - // fleet instance. - // - // * FLEET_CREATION_EXTRACTING_BUILD – The game server build was successfully - // downloaded to an instance, and the build files are now being extracted - // from the uploaded build and saved to an instance. Failure at this stage - // prevents a fleet from moving to ACTIVE status. Logs for this stage display - // a list of the files that are extracted and saved on the instance. Access - // the logs by using the URL in PreSignedLogUrl. - // - // * FLEET_CREATION_RUNNING_INSTALLER – The game server build files were - // successfully extracted, and the Amazon GameLift is now running the build's - // install script (if one is included). Failure in this stage prevents a - // fleet from moving to ACTIVE status. Logs for this stage list the installation - // steps and whether or not the install completed successfully. Access the - // logs by using the URL in PreSignedLogUrl. - // - // * FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, - // and the Amazon GameLift is now verifying that the game server launch paths, - // which are specified in the fleet's run-time configuration, exist. If any - // listed launch path exists, Amazon GameLift tries to launch a game server - // process and waits for the process to report ready. Failures in this stage - // prevent a fleet from moving to ACTIVE status. Logs for this stage list - // the launch paths in the run-time configuration and indicate whether each - // is found. Access the logs by using the URL in PreSignedLogUrl. - // - // * FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING. - // - // * FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the run-time - // configuration failed because the executable specified in a launch path - // does not exist on the instance. - // - // * FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING. - // - // * FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the run-time - // configuration failed because the executable specified in a launch path - // failed to run on the fleet instance. - // - // * FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING. - // - // - // * FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete - // one of the steps in the fleet activation process. This event code indicates - // that the game build was successfully downloaded to a fleet instance, built, - // and validated, but was not able to start a server process. A possible - // reason for failure is that the game server is not reporting "process ready" - // to the Amazon GameLift service. - // - // * FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to - // ACTIVE. The fleet is now ready to host game sessions. - // - // VPC peering events: - // - // * FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established - // between the VPC for an Amazon GameLift fleet and a VPC in your AWS account. - // - // * FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. - // Event details and status information (see DescribeVpcPeeringConnections) - // provide additional detail. A common reason for peering failure is that - // the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve - // this, change the CIDR block for the VPC in your AWS account. For more - // information on VPC peering failures, see http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html - // (http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html) - // - // * FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully - // deleted. - // - // Other fleet events: - // - // * FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings - // (desired instances, minimum/maximum scaling limits). Event messaging includes - // the new capacity settings. - // - // * FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made - // to the fleet's game session protection policy setting. Event messaging - // includes both the old and new policy setting. - // - // * FLEET_DELETED -- A request to delete a fleet was initiated. - EventCode *string `type:"string" enum:"EventCode"` - - // Unique identifier for a fleet event. - EventId *string `min:"1" type:"string"` - - // Time stamp indicating when this event occurred. Format is a number expressed - // in Unix time as milliseconds (for example "1469498468.057"). - EventTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Additional information related to the event. - Message *string `min:"1" type:"string"` - - // Location of stored logs with additional detail that is related to the event. - // This is useful for debugging issues. The URL is valid for 15 minutes. You - // can also access fleet creation logs through the Amazon GameLift console. - PreSignedLogUrl *string `min:"1" type:"string"` - - // Unique identifier for an event resource, such as a fleet ID. - ResourceId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s Event) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Event) GoString() string { - return s.String() -} - -// SetEventCode sets the EventCode field's value. -func (s *Event) SetEventCode(v string) *Event { - s.EventCode = &v - return s -} - -// SetEventId sets the EventId field's value. -func (s *Event) SetEventId(v string) *Event { - s.EventId = &v - return s -} - -// SetEventTime sets the EventTime field's value. -func (s *Event) SetEventTime(v time.Time) *Event { - s.EventTime = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *Event) SetMessage(v string) *Event { - s.Message = &v - return s -} - -// SetPreSignedLogUrl sets the PreSignedLogUrl field's value. -func (s *Event) SetPreSignedLogUrl(v string) *Event { - s.PreSignedLogUrl = &v - return s -} - -// SetResourceId sets the ResourceId field's value. -func (s *Event) SetResourceId(v string) *Event { - s.ResourceId = &v - return s -} - -// General properties describing a fleet. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/FleetAttributes -type FleetAttributes struct { - _ struct{} `type:"structure"` - - // Unique identifier for a build. - BuildId *string `type:"string"` - - // Time stamp indicating when this data object was created. Format is a number - // expressed in Unix time as milliseconds (for example "1469498468.057"). - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Human-readable description of the fleet. - Description *string `min:"1" type:"string"` - - // Identifier for a fleet that is unique across all regions. - FleetArn *string `min:"1" type:"string"` - - // Unique identifier for a fleet. - FleetId *string `type:"string"` - - // Location of default log files. When a server process is shut down, Amazon - // GameLift captures and stores any log files in this location. These logs are - // in addition to game session logs; see more on game session logs in the Amazon - // GameLift Developer Guide (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-api-server-code). - // If no default log path for a fleet is specified, Amazon GameLift automatically - // uploads logs that are stored on each instance at C:\game\logs (for Windows) - // or /local/game/logs (for Linux). Use the Amazon GameLift console to access - // stored logs. - LogPaths []*string `type:"list"` - - // Names of metric groups that this fleet is included in. In Amazon CloudWatch, - // you can view metrics for an individual fleet or aggregated metrics for fleets - // that are in a fleet metric group. A fleet can be included in only one metric - // group at a time. - MetricGroups []*string `type:"list"` - - // Descriptive label that is associated with a fleet. Fleet names do not need - // to be unique. - Name *string `min:"1" type:"string"` - - // Type of game session protection to set for all new instances started in the - // fleet. - // - // * NoProtection -- The game session can be terminated during a scale-down - // event. - // - // * FullProtection -- If the game session is in an ACTIVE status, it cannot - // be terminated during a scale-down event. - NewGameSessionProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` - - // Operating system of the fleet's computing resources. A fleet's operating - // system depends on the OS specified for the build that is deployed on this - // fleet. - OperatingSystem *string `type:"string" enum:"OperatingSystem"` - - // Fleet policy to limit the number of game sessions an individual player can - // create over a span of time. - ResourceCreationLimitPolicy *ResourceCreationLimitPolicy `type:"structure"` - - // Game server launch parameters specified for fleets created before 2016-08-04 - // (or AWS SDK v. 0.12.16). Server launch parameters for fleets created after - // this date are specified in the fleet's RuntimeConfiguration. - ServerLaunchParameters *string `min:"1" type:"string"` - - // Path to a game server executable in the fleet's build, specified for fleets - // created before 2016-08-04 (or AWS SDK v. 0.12.16). Server launch paths for - // fleets created after this date are specified in the fleet's RuntimeConfiguration. - ServerLaunchPath *string `min:"1" type:"string"` - - // Current status of the fleet. - // - // Possible fleet statuses include the following: - // - // * NEW -- A new fleet has been defined and desired instances is set to - // 1. - // - // * DOWNLOADING/VALIDATING/BUILDING/ACTIVATING -- Amazon GameLift is setting - // up the new fleet, creating new instances with the game build and starting - // server processes. - // - // * ACTIVE -- Hosts can now accept game sessions. - // - // * ERROR -- An error occurred when downloading, validating, building, or - // activating the fleet. - // - // * DELETING -- Hosts are responding to a delete fleet request. - // - // * TERMINATED -- The fleet no longer exists. - Status *string `type:"string" enum:"FleetStatus"` - - // Time stamp indicating when this data object was terminated. Format is a number - // expressed in Unix time as milliseconds (for example "1469498468.057"). - TerminationTime *time.Time `type:"timestamp" timestampFormat:"unix"` -} - -// String returns the string representation -func (s FleetAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FleetAttributes) GoString() string { - return s.String() -} - -// SetBuildId sets the BuildId field's value. -func (s *FleetAttributes) SetBuildId(v string) *FleetAttributes { - s.BuildId = &v - return s -} - -// SetCreationTime sets the CreationTime field's value. -func (s *FleetAttributes) SetCreationTime(v time.Time) *FleetAttributes { - s.CreationTime = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *FleetAttributes) SetDescription(v string) *FleetAttributes { - s.Description = &v - return s -} - -// SetFleetArn sets the FleetArn field's value. -func (s *FleetAttributes) SetFleetArn(v string) *FleetAttributes { - s.FleetArn = &v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *FleetAttributes) SetFleetId(v string) *FleetAttributes { - s.FleetId = &v - return s -} - -// SetLogPaths sets the LogPaths field's value. -func (s *FleetAttributes) SetLogPaths(v []*string) *FleetAttributes { - s.LogPaths = v - return s -} - -// SetMetricGroups sets the MetricGroups field's value. -func (s *FleetAttributes) SetMetricGroups(v []*string) *FleetAttributes { - s.MetricGroups = v - return s -} - -// SetName sets the Name field's value. -func (s *FleetAttributes) SetName(v string) *FleetAttributes { - s.Name = &v - return s -} - -// SetNewGameSessionProtectionPolicy sets the NewGameSessionProtectionPolicy field's value. -func (s *FleetAttributes) SetNewGameSessionProtectionPolicy(v string) *FleetAttributes { - s.NewGameSessionProtectionPolicy = &v - return s -} - -// SetOperatingSystem sets the OperatingSystem field's value. -func (s *FleetAttributes) SetOperatingSystem(v string) *FleetAttributes { - s.OperatingSystem = &v - return s -} - -// SetResourceCreationLimitPolicy sets the ResourceCreationLimitPolicy field's value. -func (s *FleetAttributes) SetResourceCreationLimitPolicy(v *ResourceCreationLimitPolicy) *FleetAttributes { - s.ResourceCreationLimitPolicy = v - return s -} - -// SetServerLaunchParameters sets the ServerLaunchParameters field's value. -func (s *FleetAttributes) SetServerLaunchParameters(v string) *FleetAttributes { - s.ServerLaunchParameters = &v - return s -} - -// SetServerLaunchPath sets the ServerLaunchPath field's value. -func (s *FleetAttributes) SetServerLaunchPath(v string) *FleetAttributes { - s.ServerLaunchPath = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *FleetAttributes) SetStatus(v string) *FleetAttributes { - s.Status = &v - return s -} - -// SetTerminationTime sets the TerminationTime field's value. -func (s *FleetAttributes) SetTerminationTime(v time.Time) *FleetAttributes { - s.TerminationTime = &v - return s -} - -// Information about the fleet's capacity. Fleet capacity is measured in EC2 -// instances. By default, new fleets have a capacity of one instance, but can -// be updated as needed. The maximum number of instances for a fleet is determined -// by the fleet's instance type. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/FleetCapacity -type FleetCapacity struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet. - FleetId *string `type:"string"` - - // Current status of fleet capacity. - InstanceCounts *EC2InstanceCounts `type:"structure"` - - // Name of an EC2 instance type that is supported in Amazon GameLift. A fleet - // instance type determines the computing resources of each instance in the - // fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift - // supports the following EC2 instance types. See Amazon EC2 Instance Types - // (http://aws.amazon.com/ec2/instance-types/) for detailed descriptions. - InstanceType *string `type:"string" enum:"EC2InstanceType"` -} - -// String returns the string representation -func (s FleetCapacity) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FleetCapacity) GoString() string { - return s.String() -} - -// SetFleetId sets the FleetId field's value. -func (s *FleetCapacity) SetFleetId(v string) *FleetCapacity { - s.FleetId = &v - return s -} - -// SetInstanceCounts sets the InstanceCounts field's value. -func (s *FleetCapacity) SetInstanceCounts(v *EC2InstanceCounts) *FleetCapacity { - s.InstanceCounts = v - return s -} - -// SetInstanceType sets the InstanceType field's value. -func (s *FleetCapacity) SetInstanceType(v string) *FleetCapacity { - s.InstanceType = &v - return s -} - -// Current status of fleet utilization, including the number of game and player -// sessions being hosted. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/FleetUtilization -type FleetUtilization struct { - _ struct{} `type:"structure"` - - // Number of active game sessions currently being hosted on all instances in - // the fleet. - ActiveGameSessionCount *int64 `type:"integer"` - - // Number of server processes in an ACTIVE status currently running across all - // instances in the fleet - ActiveServerProcessCount *int64 `type:"integer"` - - // Number of active player sessions currently being hosted on all instances - // in the fleet. - CurrentPlayerSessionCount *int64 `type:"integer"` - - // Unique identifier for a fleet. - FleetId *string `type:"string"` - - // Maximum players allowed across all game sessions currently being hosted on - // all instances in the fleet. - MaximumPlayerSessionCount *int64 `type:"integer"` -} - -// String returns the string representation -func (s FleetUtilization) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FleetUtilization) GoString() string { - return s.String() -} - -// SetActiveGameSessionCount sets the ActiveGameSessionCount field's value. -func (s *FleetUtilization) SetActiveGameSessionCount(v int64) *FleetUtilization { - s.ActiveGameSessionCount = &v - return s -} - -// SetActiveServerProcessCount sets the ActiveServerProcessCount field's value. -func (s *FleetUtilization) SetActiveServerProcessCount(v int64) *FleetUtilization { - s.ActiveServerProcessCount = &v - return s -} - -// SetCurrentPlayerSessionCount sets the CurrentPlayerSessionCount field's value. -func (s *FleetUtilization) SetCurrentPlayerSessionCount(v int64) *FleetUtilization { - s.CurrentPlayerSessionCount = &v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *FleetUtilization) SetFleetId(v string) *FleetUtilization { - s.FleetId = &v - return s -} - -// SetMaximumPlayerSessionCount sets the MaximumPlayerSessionCount field's value. -func (s *FleetUtilization) SetMaximumPlayerSessionCount(v int64) *FleetUtilization { - s.MaximumPlayerSessionCount = &v - return s -} - -// Set of key-value pairs that contain information about a game session. When -// included in a game session request, these properties communicate details -// to be used when setting up the new game session, such as to specify a game -// mode, level, or map. Game properties are passed to the game server process -// when initiating a new game session; the server process uses the properties -// as appropriate. For more information, see the Amazon GameLift Developer -// Guide (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-client-api.html#gamelift-sdk-client-api-create). -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GameProperty -type GameProperty struct { - _ struct{} `type:"structure"` - - // Game property identifier. - // - // Key is a required field - Key *string `type:"string" required:"true"` - - // Game property value. - // - // Value is a required field - Value *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s GameProperty) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GameProperty) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GameProperty) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GameProperty"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *GameProperty) SetKey(v string) *GameProperty { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *GameProperty) SetValue(v string) *GameProperty { - s.Value = &v - return s -} - -// Properties describing a game session. -// -// A game session in ACTIVE status can host players. When a game session ends, -// its status is set to TERMINATED. -// -// Once the session ends, the game session object is retained for 30 days. This -// means you can reuse idempotency token values after this time. Game session -// logs are retained for 14 days. -// -// Game-session-related operations include: -// -// * CreateGameSession -// -// * DescribeGameSessions -// -// * DescribeGameSessionDetails -// -// * SearchGameSessions -// -// * UpdateGameSession -// -// * GetGameSessionLogUrl -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GameSession -type GameSession struct { - _ struct{} `type:"structure"` - - // Time stamp indicating when this data object was created. Format is a number - // expressed in Unix time as milliseconds (for example "1469498468.057"). - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Unique identifier for a player. This ID is used to enforce a resource protection - // policy (if one exists), that limits the number of game sessions a player - // can create. - CreatorId *string `min:"1" type:"string"` - - // Number of players currently in the game session. - CurrentPlayerSessionCount *int64 `type:"integer"` - - // Unique identifier for a fleet that the game session is running on. - FleetId *string `type:"string"` - - // Set of developer-defined properties for a game session, formatted as a set - // of type:value pairs. These properties are included in the GameSession object, - // which is passed to the game server with a request to start a new game session - // (see Start a Game Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). - GameProperties []*GameProperty `type:"list"` - - // Set of developer-defined game session properties, formatted as a single string - // value. This data is included in the GameSession object, which is passed to - // the game server with a request to start a new game session (see Start a Game - // Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). - GameSessionData *string `min:"1" type:"string"` - - // Unique identifier for the game session. A game session ARN has the following - // format: arn:aws:gamelift:::gamesession//. - GameSessionId *string `min:"1" type:"string"` - - // IP address of the game session. To connect to a Amazon GameLift game server, - // an app needs both the IP address and port number. - IpAddress *string `type:"string"` - - // Maximum number of players that can be connected simultaneously to the game - // session. - MaximumPlayerSessionCount *int64 `type:"integer"` - - // Descriptive label that is associated with a game session. Session names do - // not need to be unique. - Name *string `min:"1" type:"string"` - - // Indicates whether or not the game session is accepting new players. - PlayerSessionCreationPolicy *string `type:"string" enum:"PlayerSessionCreationPolicy"` - - // Port number for the game session. To connect to a Amazon GameLift game server, - // an app needs both the IP address and port number. - Port *int64 `min:"1" type:"integer"` - - // Current status of the game session. A game session must have an ACTIVE status - // to have player sessions. - Status *string `type:"string" enum:"GameSessionStatus"` - - // Time stamp indicating when this data object was terminated. Format is a number - // expressed in Unix time as milliseconds (for example "1469498468.057"). - TerminationTime *time.Time `type:"timestamp" timestampFormat:"unix"` -} - -// String returns the string representation -func (s GameSession) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GameSession) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *GameSession) SetCreationTime(v time.Time) *GameSession { - s.CreationTime = &v - return s -} - -// SetCreatorId sets the CreatorId field's value. -func (s *GameSession) SetCreatorId(v string) *GameSession { - s.CreatorId = &v - return s -} - -// SetCurrentPlayerSessionCount sets the CurrentPlayerSessionCount field's value. -func (s *GameSession) SetCurrentPlayerSessionCount(v int64) *GameSession { - s.CurrentPlayerSessionCount = &v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *GameSession) SetFleetId(v string) *GameSession { - s.FleetId = &v - return s -} - -// SetGameProperties sets the GameProperties field's value. -func (s *GameSession) SetGameProperties(v []*GameProperty) *GameSession { - s.GameProperties = v - return s -} - -// SetGameSessionData sets the GameSessionData field's value. -func (s *GameSession) SetGameSessionData(v string) *GameSession { - s.GameSessionData = &v - return s -} - -// SetGameSessionId sets the GameSessionId field's value. -func (s *GameSession) SetGameSessionId(v string) *GameSession { - s.GameSessionId = &v - return s -} - -// SetIpAddress sets the IpAddress field's value. -func (s *GameSession) SetIpAddress(v string) *GameSession { - s.IpAddress = &v - return s -} - -// SetMaximumPlayerSessionCount sets the MaximumPlayerSessionCount field's value. -func (s *GameSession) SetMaximumPlayerSessionCount(v int64) *GameSession { - s.MaximumPlayerSessionCount = &v - return s -} - -// SetName sets the Name field's value. -func (s *GameSession) SetName(v string) *GameSession { - s.Name = &v - return s -} - -// SetPlayerSessionCreationPolicy sets the PlayerSessionCreationPolicy field's value. -func (s *GameSession) SetPlayerSessionCreationPolicy(v string) *GameSession { - s.PlayerSessionCreationPolicy = &v - return s -} - -// SetPort sets the Port field's value. -func (s *GameSession) SetPort(v int64) *GameSession { - s.Port = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *GameSession) SetStatus(v string) *GameSession { - s.Status = &v - return s -} - -// SetTerminationTime sets the TerminationTime field's value. -func (s *GameSession) SetTerminationTime(v time.Time) *GameSession { - s.TerminationTime = &v - return s -} - -// Connection information for the new game session that is created with matchmaking. -// (with StartMatchmaking). Once a match is set, the FlexMatch engine places -// the match and creates a new game session for it. This information, including -// the game session endpoint and player sessions for each player in the original -// matchmaking request, is added to the MatchmakingTicket, which can be retrieved -// by calling DescribeMatchmaking. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GameSessionConnectionInfo -type GameSessionConnectionInfo struct { - _ struct{} `type:"structure"` - - // Amazon Resource Name (ARN (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) - // that is assigned to a game session and uniquely identifies it. - GameSessionArn *string `min:"1" type:"string"` - - // IP address of the game session. To connect to a Amazon GameLift game server, - // an app needs both the IP address and port number. - IpAddress *string `type:"string"` - - // Collection of player session IDs, one for each player ID that was included - // in the original matchmaking request. - MatchedPlayerSessions []*MatchedPlayerSession `type:"list"` - - // Port number for the game session. To connect to a Amazon GameLift game server, - // an app needs both the IP address and port number. - Port *int64 `min:"1" type:"integer"` -} - -// String returns the string representation -func (s GameSessionConnectionInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GameSessionConnectionInfo) GoString() string { - return s.String() -} - -// SetGameSessionArn sets the GameSessionArn field's value. -func (s *GameSessionConnectionInfo) SetGameSessionArn(v string) *GameSessionConnectionInfo { - s.GameSessionArn = &v - return s -} - -// SetIpAddress sets the IpAddress field's value. -func (s *GameSessionConnectionInfo) SetIpAddress(v string) *GameSessionConnectionInfo { - s.IpAddress = &v - return s -} - -// SetMatchedPlayerSessions sets the MatchedPlayerSessions field's value. -func (s *GameSessionConnectionInfo) SetMatchedPlayerSessions(v []*MatchedPlayerSession) *GameSessionConnectionInfo { - s.MatchedPlayerSessions = v - return s -} - -// SetPort sets the Port field's value. -func (s *GameSessionConnectionInfo) SetPort(v int64) *GameSessionConnectionInfo { - s.Port = &v - return s -} - -// A game session's properties plus the protection policy currently in force. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GameSessionDetail -type GameSessionDetail struct { - _ struct{} `type:"structure"` - - // Object that describes a game session. - GameSession *GameSession `type:"structure"` - - // Current status of protection for the game session. - // - // * NoProtection -- The game session can be terminated during a scale-down - // event. - // - // * FullProtection -- If the game session is in an ACTIVE status, it cannot - // be terminated during a scale-down event. - ProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` -} - -// String returns the string representation -func (s GameSessionDetail) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GameSessionDetail) GoString() string { - return s.String() -} - -// SetGameSession sets the GameSession field's value. -func (s *GameSessionDetail) SetGameSession(v *GameSession) *GameSessionDetail { - s.GameSession = v - return s -} - -// SetProtectionPolicy sets the ProtectionPolicy field's value. -func (s *GameSessionDetail) SetProtectionPolicy(v string) *GameSessionDetail { - s.ProtectionPolicy = &v - return s -} - -// Object that describes a StartGameSessionPlacement request. This object includes -// the full details of the original request plus the current status and start/end -// time stamps. -// -// Game session placement-related operations include: -// -// * StartGameSessionPlacement -// -// * DescribeGameSessionPlacement -// -// * StopGameSessionPlacement -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GameSessionPlacement -type GameSessionPlacement struct { - _ struct{} `type:"structure"` - - // Time stamp indicating when this request was completed, canceled, or timed - // out. - EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Set of developer-defined properties for a game session, formatted as a set - // of type:value pairs. These properties are included in the GameSession object, - // which is passed to the game server with a request to start a new game session - // (see Start a Game Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). - GameProperties []*GameProperty `type:"list"` - - // Identifier for the game session created by this placement request. This value - // is set once the new game session is placed (placement status is FULFILLED). - // This identifier is unique across all regions. You can use this value as a - // GameSessionId value as needed. - GameSessionArn *string `min:"1" type:"string"` - - // Set of developer-defined game session properties, formatted as a single string - // value. This data is included in the GameSession object, which is passed to - // the game server with a request to start a new game session (see Start a Game - // Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). - GameSessionData *string `min:"1" type:"string"` - - // Unique identifier for the game session. This value is set once the new game - // session is placed (placement status is FULFILLED). - GameSessionId *string `min:"1" type:"string"` - - // Descriptive label that is associated with a game session. Session names do - // not need to be unique. - GameSessionName *string `min:"1" type:"string"` - - // Descriptive label that is associated with game session queue. Queue names - // must be unique within each region. - GameSessionQueueName *string `min:"1" type:"string"` - - // Name of the region where the game session created by this placement request - // is running. This value is set once the new game session is placed (placement - // status is FULFILLED). - GameSessionRegion *string `min:"1" type:"string"` - - // IP address of the game session. To connect to a Amazon GameLift game server, - // an app needs both the IP address and port number. This value is set once - // the new game session is placed (placement status is FULFILLED). - IpAddress *string `type:"string"` - - // Maximum number of players that can be connected simultaneously to the game - // session. - MaximumPlayerSessionCount *int64 `type:"integer"` - - // Collection of information on player sessions created in response to the game - // session placement request. These player sessions are created only once a - // new game session is successfully placed (placement status is FULFILLED). - // This information includes the player ID (as provided in the placement request) - // and the corresponding player session ID. Retrieve full player sessions by - // calling DescribePlayerSessions with the player session ID. - PlacedPlayerSessions []*PlacedPlayerSession `type:"list"` - - // Unique identifier for a game session placement. - PlacementId *string `min:"1" type:"string"` - - // Set of values, expressed in milliseconds, indicating the amount of latency - // that a player experiences when connected to AWS regions. - PlayerLatencies []*PlayerLatency `type:"list"` - - // Port number for the game session. To connect to a Amazon GameLift game server, - // an app needs both the IP address and port number. This value is set once - // the new game session is placed (placement status is FULFILLED). - Port *int64 `min:"1" type:"integer"` - - // Time stamp indicating when this request was placed in the queue. Format is - // a number expressed in Unix time as milliseconds (for example "1469498468.057"). - StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Current status of the game session placement request. - // - // * PENDING -- The placement request is currently in the queue waiting to - // be processed. - // - // * FULFILLED -- A new game session and player sessions (if requested) have - // been successfully created. Values for GameSessionArn and GameSessionRegion - // are available. - // - // * CANCELLED -- The placement request was canceled with a call to StopGameSessionPlacement. - // - // * TIMED_OUT -- A new game session was not successfully created before - // the time limit expired. You can resubmit the placement request as needed. - Status *string `type:"string" enum:"GameSessionPlacementState"` -} - -// String returns the string representation -func (s GameSessionPlacement) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GameSessionPlacement) GoString() string { - return s.String() -} - -// SetEndTime sets the EndTime field's value. -func (s *GameSessionPlacement) SetEndTime(v time.Time) *GameSessionPlacement { - s.EndTime = &v - return s -} - -// SetGameProperties sets the GameProperties field's value. -func (s *GameSessionPlacement) SetGameProperties(v []*GameProperty) *GameSessionPlacement { - s.GameProperties = v - return s -} - -// SetGameSessionArn sets the GameSessionArn field's value. -func (s *GameSessionPlacement) SetGameSessionArn(v string) *GameSessionPlacement { - s.GameSessionArn = &v - return s -} - -// SetGameSessionData sets the GameSessionData field's value. -func (s *GameSessionPlacement) SetGameSessionData(v string) *GameSessionPlacement { - s.GameSessionData = &v - return s -} - -// SetGameSessionId sets the GameSessionId field's value. -func (s *GameSessionPlacement) SetGameSessionId(v string) *GameSessionPlacement { - s.GameSessionId = &v - return s -} - -// SetGameSessionName sets the GameSessionName field's value. -func (s *GameSessionPlacement) SetGameSessionName(v string) *GameSessionPlacement { - s.GameSessionName = &v - return s -} - -// SetGameSessionQueueName sets the GameSessionQueueName field's value. -func (s *GameSessionPlacement) SetGameSessionQueueName(v string) *GameSessionPlacement { - s.GameSessionQueueName = &v - return s -} - -// SetGameSessionRegion sets the GameSessionRegion field's value. -func (s *GameSessionPlacement) SetGameSessionRegion(v string) *GameSessionPlacement { - s.GameSessionRegion = &v - return s -} - -// SetIpAddress sets the IpAddress field's value. -func (s *GameSessionPlacement) SetIpAddress(v string) *GameSessionPlacement { - s.IpAddress = &v - return s -} - -// SetMaximumPlayerSessionCount sets the MaximumPlayerSessionCount field's value. -func (s *GameSessionPlacement) SetMaximumPlayerSessionCount(v int64) *GameSessionPlacement { - s.MaximumPlayerSessionCount = &v - return s -} - -// SetPlacedPlayerSessions sets the PlacedPlayerSessions field's value. -func (s *GameSessionPlacement) SetPlacedPlayerSessions(v []*PlacedPlayerSession) *GameSessionPlacement { - s.PlacedPlayerSessions = v - return s -} - -// SetPlacementId sets the PlacementId field's value. -func (s *GameSessionPlacement) SetPlacementId(v string) *GameSessionPlacement { - s.PlacementId = &v - return s -} - -// SetPlayerLatencies sets the PlayerLatencies field's value. -func (s *GameSessionPlacement) SetPlayerLatencies(v []*PlayerLatency) *GameSessionPlacement { - s.PlayerLatencies = v - return s -} - -// SetPort sets the Port field's value. -func (s *GameSessionPlacement) SetPort(v int64) *GameSessionPlacement { - s.Port = &v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *GameSessionPlacement) SetStartTime(v time.Time) *GameSessionPlacement { - s.StartTime = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *GameSessionPlacement) SetStatus(v string) *GameSessionPlacement { - s.Status = &v - return s -} - -// Configuration of a queue that is used to process game session placement requests. -// The queue configuration identifies several game features: -// -// * The destinations where a new game session can potentially be hosted. -// Amazon GameLift tries these destinations in an order based on either the -// queue's default order or player latency information, if provided in a -// placement request. With latency information, Amazon GameLift can place -// game sessions where the majority of players are reporting the lowest possible -// latency. -// -// * The length of time that placement requests can wait in the queue before -// timing out. -// -// * A set of optional latency policies that protect individual players from -// high latencies, preventing game sessions from being placed where any individual -// player is reporting latency higher than a policy's maximum. -// -// Queue-related operations include: -// -// * CreateGameSessionQueue -// -// * DescribeGameSessionQueues -// -// * UpdateGameSessionQueue -// -// * DeleteGameSessionQueue -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GameSessionQueue -type GameSessionQueue struct { - _ struct{} `type:"structure"` - - // List of fleets that can be used to fulfill game session placement requests - // in the queue. Fleets are identified by either a fleet ARN or a fleet alias - // ARN. Destinations are listed in default preference order. - Destinations []*GameSessionQueueDestination `type:"list"` - - // Amazon Resource Name (ARN (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) - // that is assigned to a game session queue and uniquely identifies it. Format - // is arn:aws:gamelift:::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. - GameSessionQueueArn *string `min:"1" type:"string"` - - // Descriptive label that is associated with game session queue. Queue names - // must be unique within each region. - Name *string `min:"1" type:"string"` - - // Collection of latency policies to apply when processing game sessions placement - // requests with player latency information. Multiple policies are evaluated - // in order of the maximum latency value, starting with the lowest latency values. - // With just one policy, it is enforced at the start of the game session placement - // for the duration period. With multiple policies, each policy is enforced - // consecutively for its duration period. For example, a queue might enforce - // a 60-second policy followed by a 120-second policy, and then no policy for - // the remainder of the placement. - PlayerLatencyPolicies []*PlayerLatencyPolicy `type:"list"` - - // Maximum time, in seconds, that a new game session placement request remains - // in the queue. When a request exceeds this time, the game session placement - // changes to a TIMED_OUT status. - TimeoutInSeconds *int64 `type:"integer"` -} - -// String returns the string representation -func (s GameSessionQueue) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GameSessionQueue) GoString() string { - return s.String() -} - -// SetDestinations sets the Destinations field's value. -func (s *GameSessionQueue) SetDestinations(v []*GameSessionQueueDestination) *GameSessionQueue { - s.Destinations = v - return s -} - -// SetGameSessionQueueArn sets the GameSessionQueueArn field's value. -func (s *GameSessionQueue) SetGameSessionQueueArn(v string) *GameSessionQueue { - s.GameSessionQueueArn = &v - return s -} - -// SetName sets the Name field's value. -func (s *GameSessionQueue) SetName(v string) *GameSessionQueue { - s.Name = &v - return s -} - -// SetPlayerLatencyPolicies sets the PlayerLatencyPolicies field's value. -func (s *GameSessionQueue) SetPlayerLatencyPolicies(v []*PlayerLatencyPolicy) *GameSessionQueue { - s.PlayerLatencyPolicies = v - return s -} - -// SetTimeoutInSeconds sets the TimeoutInSeconds field's value. -func (s *GameSessionQueue) SetTimeoutInSeconds(v int64) *GameSessionQueue { - s.TimeoutInSeconds = &v - return s -} - -// Fleet designated in a game session queue. Requests for new game sessions -// in the queue are fulfilled by starting a new game session on any destination -// configured for a queue. -// -// Queue-related operations include: -// -// * CreateGameSessionQueue -// -// * DescribeGameSessionQueues -// -// * UpdateGameSessionQueue -// -// * DeleteGameSessionQueue -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GameSessionQueueDestination -type GameSessionQueueDestination struct { - _ struct{} `type:"structure"` - - // Amazon Resource Name (ARN) assigned to fleet or fleet alias. ARNs, which - // include a fleet ID or alias ID and a region name, provide a unique identifier - // across all regions. - DestinationArn *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s GameSessionQueueDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GameSessionQueueDestination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GameSessionQueueDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GameSessionQueueDestination"} - if s.DestinationArn != nil && len(*s.DestinationArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DestinationArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDestinationArn sets the DestinationArn field's value. -func (s *GameSessionQueueDestination) SetDestinationArn(v string) *GameSessionQueueDestination { - s.DestinationArn = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetGameSessionLogUrlInput -type GetGameSessionLogUrlInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for the game session to get logs for. - // - // GameSessionId is a required field - GameSessionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetGameSessionLogUrlInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetGameSessionLogUrlInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetGameSessionLogUrlInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetGameSessionLogUrlInput"} - if s.GameSessionId == nil { - invalidParams.Add(request.NewErrParamRequired("GameSessionId")) - } - if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGameSessionId sets the GameSessionId field's value. -func (s *GetGameSessionLogUrlInput) SetGameSessionId(v string) *GetGameSessionLogUrlInput { - s.GameSessionId = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetGameSessionLogUrlOutput -type GetGameSessionLogUrlOutput struct { - _ struct{} `type:"structure"` - - // Location of the requested game session logs, available for download. - PreSignedUrl *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s GetGameSessionLogUrlOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetGameSessionLogUrlOutput) GoString() string { - return s.String() -} - -// SetPreSignedUrl sets the PreSignedUrl field's value. -func (s *GetGameSessionLogUrlOutput) SetPreSignedUrl(v string) *GetGameSessionLogUrlOutput { - s.PreSignedUrl = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetInstanceAccessInput -type GetInstanceAccessInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet that contains the instance you want access - // to. The fleet can be in any of the following statuses: ACTIVATING, ACTIVE, - // or ERROR. Fleets with an ERROR status may be accessible for a short time - // before they are deleted. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` - - // Unique identifier for an instance you want to get access to. You can access - // an instance in any status. - // - // InstanceId is a required field - InstanceId *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s GetInstanceAccessInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetInstanceAccessInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetInstanceAccessInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetInstanceAccessInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - if s.InstanceId == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFleetId sets the FleetId field's value. -func (s *GetInstanceAccessInput) SetFleetId(v string) *GetInstanceAccessInput { - s.FleetId = &v - return s -} - -// SetInstanceId sets the InstanceId field's value. -func (s *GetInstanceAccessInput) SetInstanceId(v string) *GetInstanceAccessInput { - s.InstanceId = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetInstanceAccessOutput -type GetInstanceAccessOutput struct { - _ struct{} `type:"structure"` - - // Object that contains connection information for a fleet instance, including - // IP address and access credentials. - InstanceAccess *InstanceAccess `type:"structure"` -} - -// String returns the string representation -func (s GetInstanceAccessOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetInstanceAccessOutput) GoString() string { - return s.String() -} - -// SetInstanceAccess sets the InstanceAccess field's value. -func (s *GetInstanceAccessOutput) SetInstanceAccess(v *InstanceAccess) *GetInstanceAccessOutput { - s.InstanceAccess = v - return s -} - -// Properties that describe an instance of a virtual computing resource that -// hosts one or more game servers. A fleet may contain zero or more instances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/Instance -type Instance struct { - _ struct{} `type:"structure"` - - // Time stamp indicating when this data object was created. Format is a number - // expressed in Unix time as milliseconds (for example "1469498468.057"). - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Unique identifier for a fleet that the instance is in. - FleetId *string `type:"string"` - - // Unique identifier for an instance. - InstanceId *string `type:"string"` - - // IP address assigned to the instance. - IpAddress *string `type:"string"` - - // Operating system that is running on this instance. - OperatingSystem *string `type:"string" enum:"OperatingSystem"` - - // Current status of the instance. Possible statuses include the following: - // - // * PENDING -- The instance is in the process of being created and launching - // server processes as defined in the fleet's run-time configuration. - // - // * ACTIVE -- The instance has been successfully created and at least one - // server process has successfully launched and reported back to Amazon GameLift - // that it is ready to host a game session. The instance is now considered - // ready to host game sessions. - // - // * TERMINATING -- The instance is in the process of shutting down. This - // may happen to reduce capacity during a scaling down event or to recycle - // resources in the event of a problem. - Status *string `type:"string" enum:"InstanceStatus"` - - // EC2 instance type that defines the computing resources of this instance. - Type *string `type:"string" enum:"EC2InstanceType"` -} - -// String returns the string representation -func (s Instance) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Instance) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *Instance) SetCreationTime(v time.Time) *Instance { - s.CreationTime = &v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *Instance) SetFleetId(v string) *Instance { - s.FleetId = &v - return s -} - -// SetInstanceId sets the InstanceId field's value. -func (s *Instance) SetInstanceId(v string) *Instance { - s.InstanceId = &v - return s -} - -// SetIpAddress sets the IpAddress field's value. -func (s *Instance) SetIpAddress(v string) *Instance { - s.IpAddress = &v - return s -} - -// SetOperatingSystem sets the OperatingSystem field's value. -func (s *Instance) SetOperatingSystem(v string) *Instance { - s.OperatingSystem = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *Instance) SetStatus(v string) *Instance { - s.Status = &v - return s -} - -// SetType sets the Type field's value. -func (s *Instance) SetType(v string) *Instance { - s.Type = &v - return s -} - -// Information required to remotely connect to a fleet instance. Access is requested -// by calling GetInstanceAccess. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/InstanceAccess -type InstanceAccess struct { - _ struct{} `type:"structure"` - - // Credentials required to access the instance. - Credentials *InstanceCredentials `type:"structure"` - - // Unique identifier for a fleet containing the instance being accessed. - FleetId *string `type:"string"` - - // Unique identifier for an instance being accessed. - InstanceId *string `type:"string"` - - // IP address assigned to the instance. - IpAddress *string `type:"string"` - - // Operating system that is running on the instance. - OperatingSystem *string `type:"string" enum:"OperatingSystem"` -} - -// String returns the string representation -func (s InstanceAccess) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InstanceAccess) GoString() string { - return s.String() -} - -// SetCredentials sets the Credentials field's value. -func (s *InstanceAccess) SetCredentials(v *InstanceCredentials) *InstanceAccess { - s.Credentials = v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *InstanceAccess) SetFleetId(v string) *InstanceAccess { - s.FleetId = &v - return s -} - -// SetInstanceId sets the InstanceId field's value. -func (s *InstanceAccess) SetInstanceId(v string) *InstanceAccess { - s.InstanceId = &v - return s -} - -// SetIpAddress sets the IpAddress field's value. -func (s *InstanceAccess) SetIpAddress(v string) *InstanceAccess { - s.IpAddress = &v - return s -} - -// SetOperatingSystem sets the OperatingSystem field's value. -func (s *InstanceAccess) SetOperatingSystem(v string) *InstanceAccess { - s.OperatingSystem = &v - return s -} - -// Set of credentials required to remotely access a fleet instance. Access credentials -// are requested by calling GetInstanceAccess and returned in an InstanceAccess -// object. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/InstanceCredentials -type InstanceCredentials struct { - _ struct{} `type:"structure"` - - // Secret string. For Windows instances, the secret is a password for use with - // Windows Remote Desktop. For Linux instances, it is a private key (which must - // be saved as a .pem file) for use with SSH. - Secret *string `min:"1" type:"string"` - - // User login string. - UserName *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s InstanceCredentials) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InstanceCredentials) GoString() string { - return s.String() -} - -// SetSecret sets the Secret field's value. -func (s *InstanceCredentials) SetSecret(v string) *InstanceCredentials { - s.Secret = &v - return s -} - -// SetUserName sets the UserName field's value. -func (s *InstanceCredentials) SetUserName(v string) *InstanceCredentials { - s.UserName = &v - return s -} - -// A range of IP addresses and port settings that allow inbound traffic to connect -// to server processes on Amazon GameLift. Each game session hosted on a fleet -// is assigned a unique combination of IP address and port number, which must -// fall into the fleet's allowed ranges. This combination is included in the -// GameSession object. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/IpPermission -type IpPermission struct { - _ struct{} `type:"structure"` - - // Starting value for a range of allowed port numbers. - // - // FromPort is a required field - FromPort *int64 `min:"1" type:"integer" required:"true"` - - // Range of allowed IP addresses. This value must be expressed in CIDR notation. - // Example: "000.000.000.000/[subnet mask]" or optionally the shortened version - // "0.0.0.0/[subnet mask]". - // - // IpRange is a required field - IpRange *string `type:"string" required:"true"` - - // Network communication protocol used by the fleet. - // - // Protocol is a required field - Protocol *string `type:"string" required:"true" enum:"IpProtocol"` - - // Ending value for a range of allowed port numbers. Port numbers are end-inclusive. - // This value must be higher than FromPort. - // - // ToPort is a required field - ToPort *int64 `min:"1" type:"integer" required:"true"` -} - -// String returns the string representation -func (s IpPermission) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s IpPermission) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *IpPermission) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "IpPermission"} - if s.FromPort == nil { - invalidParams.Add(request.NewErrParamRequired("FromPort")) - } - if s.FromPort != nil && *s.FromPort < 1 { - invalidParams.Add(request.NewErrParamMinValue("FromPort", 1)) - } - if s.IpRange == nil { - invalidParams.Add(request.NewErrParamRequired("IpRange")) - } - if s.Protocol == nil { - invalidParams.Add(request.NewErrParamRequired("Protocol")) - } - if s.ToPort == nil { - invalidParams.Add(request.NewErrParamRequired("ToPort")) - } - if s.ToPort != nil && *s.ToPort < 1 { - invalidParams.Add(request.NewErrParamMinValue("ToPort", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFromPort sets the FromPort field's value. -func (s *IpPermission) SetFromPort(v int64) *IpPermission { - s.FromPort = &v - return s -} - -// SetIpRange sets the IpRange field's value. -func (s *IpPermission) SetIpRange(v string) *IpPermission { - s.IpRange = &v - return s -} - -// SetProtocol sets the Protocol field's value. -func (s *IpPermission) SetProtocol(v string) *IpPermission { - s.Protocol = &v - return s -} - -// SetToPort sets the ToPort field's value. -func (s *IpPermission) SetToPort(v int64) *IpPermission { - s.ToPort = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListAliasesInput -type ListAliasesInput struct { - _ struct{} `type:"structure"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. - Limit *int64 `min:"1" type:"integer"` - - // Descriptive label that is associated with an alias. Alias names do not need - // to be unique. - Name *string `min:"1" type:"string"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. - NextToken *string `min:"1" type:"string"` - - // Type of routing to filter results on. Use this parameter to retrieve only - // aliases of a certain type. To retrieve all aliases, leave this parameter - // empty. - // - // Possible routing types include the following: - // - // * SIMPLE -- The alias resolves to one specific fleet. Use this type when - // routing to active fleets. - // - // * TERMINAL -- The alias does not resolve to a fleet but instead can be - // used to display a message to the user. A terminal alias throws a TerminalRoutingStrategyException - // with the RoutingStrategy message embedded. - RoutingStrategyType *string `type:"string" enum:"RoutingStrategyType"` -} - -// String returns the string representation -func (s ListAliasesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListAliasesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListAliasesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListAliasesInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLimit sets the Limit field's value. -func (s *ListAliasesInput) SetLimit(v int64) *ListAliasesInput { - s.Limit = &v - return s -} - -// SetName sets the Name field's value. -func (s *ListAliasesInput) SetName(v string) *ListAliasesInput { - s.Name = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListAliasesInput) SetNextToken(v string) *ListAliasesInput { - s.NextToken = &v - return s -} - -// SetRoutingStrategyType sets the RoutingStrategyType field's value. -func (s *ListAliasesInput) SetRoutingStrategyType(v string) *ListAliasesInput { - s.RoutingStrategyType = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListAliasesOutput -type ListAliasesOutput struct { - _ struct{} `type:"structure"` - - // Collection of alias records that match the list request. - Aliases []*Alias `type:"list"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s ListAliasesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListAliasesOutput) GoString() string { - return s.String() -} - -// SetAliases sets the Aliases field's value. -func (s *ListAliasesOutput) SetAliases(v []*Alias) *ListAliasesOutput { - s.Aliases = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListAliasesOutput) SetNextToken(v string) *ListAliasesOutput { - s.NextToken = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListBuildsInput -type ListBuildsInput struct { - _ struct{} `type:"structure"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. - Limit *int64 `min:"1" type:"integer"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. - NextToken *string `min:"1" type:"string"` - - // Build status to filter results by. To retrieve all builds, leave this parameter - // empty. - // - // Possible build statuses include the following: - // - // * INITIALIZED -- A new build has been defined, but no files have been - // uploaded. You cannot create fleets for builds that are in this status. - // When a build is successfully created, the build status is set to this - // value. - // - // * READY -- The game build has been successfully uploaded. You can now - // create new fleets for this build. - // - // * FAILED -- The game build upload failed. You cannot create new fleets - // for this build. - Status *string `type:"string" enum:"BuildStatus"` -} - -// String returns the string representation -func (s ListBuildsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBuildsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListBuildsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBuildsInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLimit sets the Limit field's value. -func (s *ListBuildsInput) SetLimit(v int64) *ListBuildsInput { - s.Limit = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListBuildsInput) SetNextToken(v string) *ListBuildsInput { - s.NextToken = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *ListBuildsInput) SetStatus(v string) *ListBuildsInput { - s.Status = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListBuildsOutput -type ListBuildsOutput struct { - _ struct{} `type:"structure"` - - // Collection of build records that match the request. - Builds []*Build `type:"list"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s ListBuildsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBuildsOutput) GoString() string { - return s.String() -} - -// SetBuilds sets the Builds field's value. -func (s *ListBuildsOutput) SetBuilds(v []*Build) *ListBuildsOutput { - s.Builds = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListBuildsOutput) SetNextToken(v string) *ListBuildsOutput { - s.NextToken = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListFleetsInput -type ListFleetsInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a build to return fleets for. Use this parameter to - // return only fleets using the specified build. To retrieve all fleets, leave - // this parameter empty. - BuildId *string `type:"string"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. - Limit *int64 `min:"1" type:"integer"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s ListFleetsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListFleetsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListFleetsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListFleetsInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBuildId sets the BuildId field's value. -func (s *ListFleetsInput) SetBuildId(v string) *ListFleetsInput { - s.BuildId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *ListFleetsInput) SetLimit(v int64) *ListFleetsInput { - s.Limit = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListFleetsInput) SetNextToken(v string) *ListFleetsInput { - s.NextToken = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListFleetsOutput -type ListFleetsOutput struct { - _ struct{} `type:"structure"` - - // Set of fleet IDs matching the list request. You can retrieve additional information - // about all returned fleets by passing this result set to a call to DescribeFleetAttributes, - // DescribeFleetCapacity, or DescribeFleetUtilization. - FleetIds []*string `min:"1" type:"list"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s ListFleetsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListFleetsOutput) GoString() string { - return s.String() -} - -// SetFleetIds sets the FleetIds field's value. -func (s *ListFleetsOutput) SetFleetIds(v []*string) *ListFleetsOutput { - s.FleetIds = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListFleetsOutput) SetNextToken(v string) *ListFleetsOutput { - s.NextToken = &v - return s -} - -// Represents a new player session that is created as a result of a successful -// FlexMatch match. A successful match automatically creates new player sessions -// for every player ID in the original matchmaking request. -// -// When players connect to the match's game session, they must include both -// player ID and player session ID in order to claim their assigned player slot. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/MatchedPlayerSession -type MatchedPlayerSession struct { - _ struct{} `type:"structure"` - - // Unique identifier for a player - PlayerId *string `min:"1" type:"string"` - - // Unique identifier for a player session - PlayerSessionId *string `type:"string"` -} - -// String returns the string representation -func (s MatchedPlayerSession) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MatchedPlayerSession) GoString() string { - return s.String() -} - -// SetPlayerId sets the PlayerId field's value. -func (s *MatchedPlayerSession) SetPlayerId(v string) *MatchedPlayerSession { - s.PlayerId = &v - return s -} - -// SetPlayerSessionId sets the PlayerSessionId field's value. -func (s *MatchedPlayerSession) SetPlayerSessionId(v string) *MatchedPlayerSession { - s.PlayerSessionId = &v - return s -} - -// Guidelines for use with FlexMatch to match players into games. All matchmaking -// requests must specify a matchmaking configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/MatchmakingConfiguration -type MatchmakingConfiguration struct { - _ struct{} `type:"structure"` - - // Flag that determines whether or not a match that was created with this configuration - // must be accepted by the matched players. To require acceptance, set to TRUE. - AcceptanceRequired *bool `type:"boolean"` - - // Length of time (in seconds) to wait for players to accept a proposed match. - // If any player rejects the match or fails to accept before the timeout, the - // ticket continues to look for an acceptable match. - AcceptanceTimeoutSeconds *int64 `min:"1" type:"integer"` - - // Number of player slots in a match to keep open for future players. For example, - // if the configuration's rule set specifies a match for a single 12-person - // team, and the additional player count is set to 2, only 10 players are selected - // for the match. - AdditionalPlayerCount *int64 `type:"integer"` - - // Time stamp indicating when this data object was created. Format is a number - // expressed in Unix time as milliseconds (for example "1469498468.057"). - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Information to attached to all events related to the matchmaking configuration. - CustomEventData *string `type:"string"` - - // Descriptive label that is associated with matchmaking configuration. - Description *string `min:"1" type:"string"` - - // Set of developer-defined properties for a game session, formatted as a set - // of type:value pairs. These properties are included in the GameSession object, - // which is passed to the game server with a request to start a new game session - // (see Start a Game Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). - // This information is added to the new GameSession object that is created for - // a successful match. - GameProperties []*GameProperty `type:"list"` - - // Set of developer-defined game session properties, formatted as a single string - // value. This data is included in the GameSession object, which is passed to - // the game server with a request to start a new game session (see Start a Game - // Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). - // This information is added to the new GameSession object that is created for - // a successful match. - GameSessionData *string `min:"1" type:"string"` - - // Amazon Resource Name (ARN (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) - // that is assigned to a game session queue and uniquely identifies it. Format - // is arn:aws:gamelift:::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. - // These queues are used when placing game sessions for matches that are created - // with this matchmaking configuration. Queues can be located in any region. - GameSessionQueueArns []*string `type:"list"` - - // Unique identifier for a matchmaking configuration. This name is used to identify - // the configuration associated with a matchmaking request or ticket. - Name *string `min:"1" type:"string"` - - // SNS topic ARN that is set up to receive matchmaking notifications. - NotificationTarget *string `type:"string"` - - // Maximum duration, in seconds, that a matchmaking ticket can remain in process - // before timing out. Requests that time out can be resubmitted as needed. - RequestTimeoutSeconds *int64 `min:"1" type:"integer"` - - // Unique identifier for a matchmaking rule set to use with this configuration. - // A matchmaking configuration can only use rule sets that are defined in the - // same region. - RuleSetName *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s MatchmakingConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MatchmakingConfiguration) GoString() string { - return s.String() -} - -// SetAcceptanceRequired sets the AcceptanceRequired field's value. -func (s *MatchmakingConfiguration) SetAcceptanceRequired(v bool) *MatchmakingConfiguration { - s.AcceptanceRequired = &v - return s -} - -// SetAcceptanceTimeoutSeconds sets the AcceptanceTimeoutSeconds field's value. -func (s *MatchmakingConfiguration) SetAcceptanceTimeoutSeconds(v int64) *MatchmakingConfiguration { - s.AcceptanceTimeoutSeconds = &v - return s -} - -// SetAdditionalPlayerCount sets the AdditionalPlayerCount field's value. -func (s *MatchmakingConfiguration) SetAdditionalPlayerCount(v int64) *MatchmakingConfiguration { - s.AdditionalPlayerCount = &v - return s -} - -// SetCreationTime sets the CreationTime field's value. -func (s *MatchmakingConfiguration) SetCreationTime(v time.Time) *MatchmakingConfiguration { - s.CreationTime = &v - return s -} - -// SetCustomEventData sets the CustomEventData field's value. -func (s *MatchmakingConfiguration) SetCustomEventData(v string) *MatchmakingConfiguration { - s.CustomEventData = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *MatchmakingConfiguration) SetDescription(v string) *MatchmakingConfiguration { - s.Description = &v - return s -} - -// SetGameProperties sets the GameProperties field's value. -func (s *MatchmakingConfiguration) SetGameProperties(v []*GameProperty) *MatchmakingConfiguration { - s.GameProperties = v - return s -} - -// SetGameSessionData sets the GameSessionData field's value. -func (s *MatchmakingConfiguration) SetGameSessionData(v string) *MatchmakingConfiguration { - s.GameSessionData = &v - return s -} - -// SetGameSessionQueueArns sets the GameSessionQueueArns field's value. -func (s *MatchmakingConfiguration) SetGameSessionQueueArns(v []*string) *MatchmakingConfiguration { - s.GameSessionQueueArns = v - return s -} - -// SetName sets the Name field's value. -func (s *MatchmakingConfiguration) SetName(v string) *MatchmakingConfiguration { - s.Name = &v - return s -} - -// SetNotificationTarget sets the NotificationTarget field's value. -func (s *MatchmakingConfiguration) SetNotificationTarget(v string) *MatchmakingConfiguration { - s.NotificationTarget = &v - return s -} - -// SetRequestTimeoutSeconds sets the RequestTimeoutSeconds field's value. -func (s *MatchmakingConfiguration) SetRequestTimeoutSeconds(v int64) *MatchmakingConfiguration { - s.RequestTimeoutSeconds = &v - return s -} - -// SetRuleSetName sets the RuleSetName field's value. -func (s *MatchmakingConfiguration) SetRuleSetName(v string) *MatchmakingConfiguration { - s.RuleSetName = &v - return s -} - -// Set of rule statements, used with FlexMatch, that determine how to build -// a certain kind of player match. Each rule set describes a type of group to -// be created and defines the parameters for acceptable player matches. Rule -// sets are used in MatchmakingConfiguration objects. -// -// A rule set may define the following elements for a match. For detailed information -// and examples showing how to construct a rule set, see Create Matchmaking -// Rules for Your Game (http://docs.aws.amazon.com/gamelift/latest/developerguide/match-rules.html). -// -// * Teams -- Required. A rule set must define one or multiple teams for -// the match and set minimum and maximum team sizes. For example, a rule -// set might describe a 4x4 match that requires all eight slots to be filled. -// -// -// * Player attributes -- Optional. These attributes specify a set of player -// characteristics to evaluate when looking for a match. Matchmaking requests -// that use a rule set with player attributes must provide the corresponding -// attribute values. For example, an attribute might specify a player's skill -// or level. -// -// * Rules -- Optional. Rules define how to evaluate potential players for -// a match based on player attributes. A rule might specify minimum requirements -// for individual players--such as each player must meet a certain skill -// level, or may describe an entire group--such as all teams must be evenly -// matched or have at least one player in a certain role. -// -// * Expansions -- Optional. Expansions allow you to relax the rules after -// a period of time if no acceptable matches are found. This feature lets -// you balance getting players into games in a reasonable amount of time -// instead of making them wait indefinitely for the best possible match. -// For example, you might use an expansion to increase the maximum skill -// variance between players after 30 seconds. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/MatchmakingRuleSet -type MatchmakingRuleSet struct { - _ struct{} `type:"structure"` - - // Time stamp indicating when this data object was created. Format is a number - // expressed in Unix time as milliseconds (for example "1469498468.057"). - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Collection of matchmaking rules, formatted as a JSON string. (Note that comments14 - // are not allowed in JSON, but most elements support a description field.) - // - // RuleSetBody is a required field - RuleSetBody *string `min:"1" type:"string" required:"true"` - - // Unique identifier for a matchmaking rule set - RuleSetName *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s MatchmakingRuleSet) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MatchmakingRuleSet) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *MatchmakingRuleSet) SetCreationTime(v time.Time) *MatchmakingRuleSet { - s.CreationTime = &v - return s -} - -// SetRuleSetBody sets the RuleSetBody field's value. -func (s *MatchmakingRuleSet) SetRuleSetBody(v string) *MatchmakingRuleSet { - s.RuleSetBody = &v - return s -} - -// SetRuleSetName sets the RuleSetName field's value. -func (s *MatchmakingRuleSet) SetRuleSetName(v string) *MatchmakingRuleSet { - s.RuleSetName = &v - return s -} - -// Ticket generated to track the progress of a matchmaking request. Each ticket -// is uniquely identified by a ticket ID, supplied by the requester, when creating -// a matchmaking request with StartMatchmaking. Tickets can be retrieved by -// calling DescribeMatchmaking with the ticket ID. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/MatchmakingTicket -type MatchmakingTicket struct { - _ struct{} `type:"structure"` - - // Name of the MatchmakingConfiguration that is used with this ticket. Matchmaking - // configurations determine how players are grouped into a match and how a new - // game session is created for the match. - ConfigurationName *string `min:"1" type:"string"` - - // Time stamp indicating when the matchmaking request stopped being processed - // due to successful completion, timeout, or cancellation. Format is a number - // expressed in Unix time as milliseconds (for example "1469498468.057"). - EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Average amount of time (in seconds) that players are currently waiting for - // a match. If there is not enough recent data, this property may be empty. - EstimatedWaitTime *int64 `type:"integer"` - - // Identifier and connection information of the game session created for the - // match. This information is added to the ticket only after the matchmaking - // request has been successfully completed. - GameSessionConnectionInfo *GameSessionConnectionInfo `type:"structure"` - - // A set of Player objects, each representing a player to find matches for. - // Players are identified by a unique player ID and may include latency data - // for use during matchmaking. If the ticket is in status COMPLETED, the Player - // objects include the team the players were assigned to in the resulting match. - Players []*Player `type:"list"` - - // Time stamp indicating when this matchmaking request was received. Format - // is a number expressed in Unix time as milliseconds (for example "1469498468.057"). - StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Current status of the matchmaking request. - // - // * QUEUED -- The matchmaking request has been received and is currently - // waiting to be processed. - // - // * SEARCHING -- The matchmaking request is currently being processed. - // - // * REQUIRES_ACCEPTANCE -- A match has been proposed and the players must - // accept the match (see AcceptMatch). This status is used only with requests - // that use a matchmaking configuration with a player acceptance requirement. - // - // * PLACING -- The FlexMatch engine has matched players and is in the process - // of placing a new game session for the match. - // - // * COMPLETED -- Players have been matched and a game session is ready to - // host the players. A ticket in this state contains the necessary connection - // information for players. - // - // * FAILED -- The matchmaking request was not completed. Tickets with players - // who fail to accept a proposed match are placed in FAILED status; new matchmaking - // requests can be submitted for these players. - // - // * CANCELLED -- The matchmaking request was canceled with a call to StopMatchmaking. - // - // * TIMED_OUT -- The matchmaking request was not completed within the duration - // specified in the matchmaking configuration. Matchmaking requests that - // time out can be resubmitted. - Status *string `type:"string" enum:"MatchmakingConfigurationStatus"` - - // Additional information about the current status. - StatusMessage *string `type:"string"` - - // Code to explain the current status. For example, a status reason may indicate - // when a ticket has returned to SEARCHING status after a proposed match fails - // to receive player acceptances. - StatusReason *string `type:"string"` - - // Unique identifier for a matchmaking ticket. - TicketId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s MatchmakingTicket) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MatchmakingTicket) GoString() string { - return s.String() -} - -// SetConfigurationName sets the ConfigurationName field's value. -func (s *MatchmakingTicket) SetConfigurationName(v string) *MatchmakingTicket { - s.ConfigurationName = &v - return s -} - -// SetEndTime sets the EndTime field's value. -func (s *MatchmakingTicket) SetEndTime(v time.Time) *MatchmakingTicket { - s.EndTime = &v - return s -} - -// SetEstimatedWaitTime sets the EstimatedWaitTime field's value. -func (s *MatchmakingTicket) SetEstimatedWaitTime(v int64) *MatchmakingTicket { - s.EstimatedWaitTime = &v - return s -} - -// SetGameSessionConnectionInfo sets the GameSessionConnectionInfo field's value. -func (s *MatchmakingTicket) SetGameSessionConnectionInfo(v *GameSessionConnectionInfo) *MatchmakingTicket { - s.GameSessionConnectionInfo = v - return s -} - -// SetPlayers sets the Players field's value. -func (s *MatchmakingTicket) SetPlayers(v []*Player) *MatchmakingTicket { - s.Players = v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *MatchmakingTicket) SetStartTime(v time.Time) *MatchmakingTicket { - s.StartTime = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *MatchmakingTicket) SetStatus(v string) *MatchmakingTicket { - s.Status = &v - return s -} - -// SetStatusMessage sets the StatusMessage field's value. -func (s *MatchmakingTicket) SetStatusMessage(v string) *MatchmakingTicket { - s.StatusMessage = &v - return s -} - -// SetStatusReason sets the StatusReason field's value. -func (s *MatchmakingTicket) SetStatusReason(v string) *MatchmakingTicket { - s.StatusReason = &v - return s -} - -// SetTicketId sets the TicketId field's value. -func (s *MatchmakingTicket) SetTicketId(v string) *MatchmakingTicket { - s.TicketId = &v - return s -} - -// Information about a player session that was created as part of a StartGameSessionPlacement -// request. This object contains only the player ID and player session ID. To -// retrieve full details on a player session, call DescribePlayerSessions with -// the player session ID. -// -// Player-session-related operations include: -// -// * CreatePlayerSession -// -// * CreatePlayerSessions -// -// * DescribePlayerSessions -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PlacedPlayerSession -type PlacedPlayerSession struct { - _ struct{} `type:"structure"` - - // Unique identifier for a player that is associated with this player session. - PlayerId *string `min:"1" type:"string"` - - // Unique identifier for a player session. - PlayerSessionId *string `type:"string"` -} - -// String returns the string representation -func (s PlacedPlayerSession) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PlacedPlayerSession) GoString() string { - return s.String() -} - -// SetPlayerId sets the PlayerId field's value. -func (s *PlacedPlayerSession) SetPlayerId(v string) *PlacedPlayerSession { - s.PlayerId = &v - return s -} - -// SetPlayerSessionId sets the PlayerSessionId field's value. -func (s *PlacedPlayerSession) SetPlayerSessionId(v string) *PlacedPlayerSession { - s.PlayerSessionId = &v - return s -} - -// Represents a player in matchmaking. When starting a matchmaking request, -// a player has a player ID, attributes, and may have latency data. Team information -// is added after a match has been successfully completed. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/Player -type Player struct { - _ struct{} `type:"structure"` - - // Set of values, expressed in milliseconds, indicating the amount of latency - // that a player experiences when connected to AWS regions. If this property - // is present, FlexMatch considers placing the match only in regions for which - // latency is reported. - // - // If a matchmaker has a rule that evaluates player latency, players must report - // latency in order to be matched. If no latency is reported in this scenario, - // FlexMatch assumes that no regions are available to the player and the ticket - // is not matchable. - LatencyInMs map[string]*int64 `type:"map"` - - // Collection of name:value pairs containing player information for use in matchmaking. - // Player attribute names need to match playerAttributes names in the rule set - // being used. Example: "PlayerAttributes": {"skill": {"N": "23"}, "gameMode": - // {"S": "deathmatch"}}. - PlayerAttributes map[string]*AttributeValue `type:"map"` - - // Unique identifier for a player - PlayerId *string `min:"1" type:"string"` - - // Name of the team that the player is assigned to in a match. Team names are - // defined in a matchmaking rule set. - Team *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s Player) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Player) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Player) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Player"} - if s.PlayerId != nil && len(*s.PlayerId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PlayerId", 1)) - } - if s.Team != nil && len(*s.Team) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Team", 1)) - } - if s.PlayerAttributes != nil { - for i, v := range s.PlayerAttributes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PlayerAttributes", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLatencyInMs sets the LatencyInMs field's value. -func (s *Player) SetLatencyInMs(v map[string]*int64) *Player { - s.LatencyInMs = v - return s -} - -// SetPlayerAttributes sets the PlayerAttributes field's value. -func (s *Player) SetPlayerAttributes(v map[string]*AttributeValue) *Player { - s.PlayerAttributes = v - return s -} - -// SetPlayerId sets the PlayerId field's value. -func (s *Player) SetPlayerId(v string) *Player { - s.PlayerId = &v - return s -} - -// SetTeam sets the Team field's value. -func (s *Player) SetTeam(v string) *Player { - s.Team = &v - return s -} - -// Regional latency information for a player, used when requesting a new game -// session with StartGameSessionPlacement. This value indicates the amount of -// time lag that exists when the player is connected to a fleet in the specified -// region. The relative difference between a player's latency values for multiple -// regions are used to determine which fleets are best suited to place a new -// game session for the player. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PlayerLatency -type PlayerLatency struct { - _ struct{} `type:"structure"` - - // Amount of time that represents the time lag experienced by the player when - // connected to the specified region. - LatencyInMilliseconds *float64 `type:"float"` - - // Unique identifier for a player associated with the latency data. - PlayerId *string `min:"1" type:"string"` - - // Name of the region that is associated with the latency value. - RegionIdentifier *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s PlayerLatency) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PlayerLatency) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PlayerLatency) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PlayerLatency"} - if s.PlayerId != nil && len(*s.PlayerId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PlayerId", 1)) - } - if s.RegionIdentifier != nil && len(*s.RegionIdentifier) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RegionIdentifier", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLatencyInMilliseconds sets the LatencyInMilliseconds field's value. -func (s *PlayerLatency) SetLatencyInMilliseconds(v float64) *PlayerLatency { - s.LatencyInMilliseconds = &v - return s -} - -// SetPlayerId sets the PlayerId field's value. -func (s *PlayerLatency) SetPlayerId(v string) *PlayerLatency { - s.PlayerId = &v - return s -} - -// SetRegionIdentifier sets the RegionIdentifier field's value. -func (s *PlayerLatency) SetRegionIdentifier(v string) *PlayerLatency { - s.RegionIdentifier = &v - return s -} - -// Queue setting that determines the highest latency allowed for individual -// players when placing a game session. When a latency policy is in force, a -// game session cannot be placed at any destination in a region where a player -// is reporting latency higher than the cap. Latency policies are only enforced -// when the placement request contains player latency information. -// -// Queue-related operations include: -// -// * CreateGameSessionQueue -// -// * DescribeGameSessionQueues -// -// * UpdateGameSessionQueue -// -// * DeleteGameSessionQueue -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PlayerLatencyPolicy -type PlayerLatencyPolicy struct { - _ struct{} `type:"structure"` - - // The maximum latency value that is allowed for any player, in milliseconds. - // All policies must have a value set for this property. - MaximumIndividualPlayerLatencyMilliseconds *int64 `type:"integer"` - - // The length of time, in seconds, that the policy is enforced while placing - // a new game session. A null value for this property means that the policy - // is enforced until the queue times out. - PolicyDurationSeconds *int64 `type:"integer"` -} - -// String returns the string representation -func (s PlayerLatencyPolicy) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PlayerLatencyPolicy) GoString() string { - return s.String() -} - -// SetMaximumIndividualPlayerLatencyMilliseconds sets the MaximumIndividualPlayerLatencyMilliseconds field's value. -func (s *PlayerLatencyPolicy) SetMaximumIndividualPlayerLatencyMilliseconds(v int64) *PlayerLatencyPolicy { - s.MaximumIndividualPlayerLatencyMilliseconds = &v - return s -} - -// SetPolicyDurationSeconds sets the PolicyDurationSeconds field's value. -func (s *PlayerLatencyPolicy) SetPolicyDurationSeconds(v int64) *PlayerLatencyPolicy { - s.PolicyDurationSeconds = &v - return s -} - -// Properties describing a player session. Player session objects are created -// either by creating a player session for a specific game session, or as part -// of a game session placement. A player session represents either a player -// reservation for a game session (status RESERVED) or actual player activity -// in a game session (status ACTIVE). A player session object (including player -// data) is automatically passed to a game session when the player connects -// to the game session and is validated. -// -// When a player disconnects, the player session status changes to COMPLETED. -// Once the session ends, the player session object is retained for 30 days -// and then removed. -// -// Player-session-related operations include: -// -// * CreatePlayerSession -// -// * CreatePlayerSessions -// -// * DescribePlayerSessions -// -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PlayerSession -type PlayerSession struct { - _ struct{} `type:"structure"` - - // Time stamp indicating when this data object was created. Format is a number - // expressed in Unix time as milliseconds (for example "1469498468.057"). - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Unique identifier for a fleet that the player's game session is running on. - FleetId *string `type:"string"` - - // Unique identifier for the game session that the player session is connected - // to. - GameSessionId *string `min:"1" type:"string"` - - // IP address of the game session. To connect to a Amazon GameLift game server, - // an app needs both the IP address and port number. - IpAddress *string `type:"string"` - - // Developer-defined information related to a player. Amazon GameLift does not - // use this data, so it can be formatted as needed for use in the game. - PlayerData *string `min:"1" type:"string"` - - // Unique identifier for a player that is associated with this player session. - PlayerId *string `min:"1" type:"string"` - - // Unique identifier for a player session. - PlayerSessionId *string `type:"string"` - - // Port number for the game session. To connect to a Amazon GameLift server - // process, an app needs both the IP address and port number. - Port *int64 `min:"1" type:"integer"` - - // Current status of the player session. - // - // Possible player session statuses include the following: - // - // * RESERVED -- The player session request has been received, but the player - // has not yet connected to the server process and/or been validated. - // - // * ACTIVE -- The player has been validated by the server process and is - // currently connected. - // - // * COMPLETED -- The player connection has been dropped. - // - // * TIMEDOUT -- A player session request was received, but the player did - // not connect and/or was not validated within the timeout limit (60 seconds). - Status *string `type:"string" enum:"PlayerSessionStatus"` - - // Time stamp indicating when this data object was terminated. Format is a number - // expressed in Unix time as milliseconds (for example "1469498468.057"). - TerminationTime *time.Time `type:"timestamp" timestampFormat:"unix"` -} - -// String returns the string representation -func (s PlayerSession) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PlayerSession) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *PlayerSession) SetCreationTime(v time.Time) *PlayerSession { - s.CreationTime = &v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *PlayerSession) SetFleetId(v string) *PlayerSession { - s.FleetId = &v - return s -} - -// SetGameSessionId sets the GameSessionId field's value. -func (s *PlayerSession) SetGameSessionId(v string) *PlayerSession { - s.GameSessionId = &v - return s -} - -// SetIpAddress sets the IpAddress field's value. -func (s *PlayerSession) SetIpAddress(v string) *PlayerSession { - s.IpAddress = &v - return s -} - -// SetPlayerData sets the PlayerData field's value. -func (s *PlayerSession) SetPlayerData(v string) *PlayerSession { - s.PlayerData = &v - return s -} - -// SetPlayerId sets the PlayerId field's value. -func (s *PlayerSession) SetPlayerId(v string) *PlayerSession { - s.PlayerId = &v - return s -} - -// SetPlayerSessionId sets the PlayerSessionId field's value. -func (s *PlayerSession) SetPlayerSessionId(v string) *PlayerSession { - s.PlayerSessionId = &v - return s -} - -// SetPort sets the Port field's value. -func (s *PlayerSession) SetPort(v int64) *PlayerSession { - s.Port = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *PlayerSession) SetStatus(v string) *PlayerSession { - s.Status = &v - return s -} - -// SetTerminationTime sets the TerminationTime field's value. -func (s *PlayerSession) SetTerminationTime(v time.Time) *PlayerSession { - s.TerminationTime = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PutScalingPolicyInput -type PutScalingPolicyInput struct { - _ struct{} `type:"structure"` - - // Comparison operator to use when measuring the metric against the threshold - // value. - // - // ComparisonOperator is a required field - ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperatorType"` - - // Length of time (in minutes) the metric must be at or beyond the threshold - // before a scaling event is triggered. - // - // EvaluationPeriods is a required field - EvaluationPeriods *int64 `min:"1" type:"integer" required:"true"` - - // Unique identifier for a fleet to apply this policy to. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` - - // Name of the Amazon GameLift-defined metric that is used to trigger an adjustment. - // - // * ActivatingGameSessions -- number of game sessions in the process of - // being created (game session status = ACTIVATING). - // - // * ActiveGameSessions -- number of game sessions currently running (game - // session status = ACTIVE). - // - // * CurrentPlayerSessions -- number of active or reserved player sessions - // (player session status = ACTIVE or RESERVED). - // - // * AvailablePlayerSessions -- number of player session slots currently - // available in active game sessions across the fleet, calculated by subtracting - // a game session's current player session count from its maximum player - // session count. This number includes game sessions that are not currently - // accepting players (game session PlayerSessionCreationPolicy = DENY_ALL). - // - // * ActiveInstances -- number of instances currently running a game session. - // - // * IdleInstances -- number of instances not currently running a game session. - // - // MetricName is a required field - MetricName *string `type:"string" required:"true" enum:"MetricName"` - - // Descriptive label that is associated with a scaling policy. Policy names - // do not need to be unique. A fleet can have only one scaling policy with the - // same name. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Amount of adjustment to make, based on the scaling adjustment type. - // - // ScalingAdjustment is a required field - ScalingAdjustment *int64 `type:"integer" required:"true"` - - // Type of adjustment to make to a fleet's instance count (see FleetCapacity): - // - // * ChangeInCapacity -- add (or subtract) the scaling adjustment value from - // the current instance count. Positive values scale up while negative values - // scale down. - // - // * ExactCapacity -- set the instance count to the scaling adjustment value. - // - // * PercentChangeInCapacity -- increase or reduce the current instance count - // by the scaling adjustment, read as a percentage. Positive values scale - // up while negative values scale down; for example, a value of "-10" scales - // the fleet down by 10%. - // - // ScalingAdjustmentType is a required field - ScalingAdjustmentType *string `type:"string" required:"true" enum:"ScalingAdjustmentType"` - - // Metric value used to trigger a scaling event. - // - // Threshold is a required field - Threshold *float64 `type:"double" required:"true"` -} - -// String returns the string representation -func (s PutScalingPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutScalingPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutScalingPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutScalingPolicyInput"} - if s.ComparisonOperator == nil { - invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) - } - if s.EvaluationPeriods == nil { - invalidParams.Add(request.NewErrParamRequired("EvaluationPeriods")) - } - if s.EvaluationPeriods != nil && *s.EvaluationPeriods < 1 { - invalidParams.Add(request.NewErrParamMinValue("EvaluationPeriods", 1)) - } - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - if s.MetricName == nil { - invalidParams.Add(request.NewErrParamRequired("MetricName")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.ScalingAdjustment == nil { - invalidParams.Add(request.NewErrParamRequired("ScalingAdjustment")) - } - if s.ScalingAdjustmentType == nil { - invalidParams.Add(request.NewErrParamRequired("ScalingAdjustmentType")) - } - if s.Threshold == nil { - invalidParams.Add(request.NewErrParamRequired("Threshold")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetComparisonOperator sets the ComparisonOperator field's value. -func (s *PutScalingPolicyInput) SetComparisonOperator(v string) *PutScalingPolicyInput { - s.ComparisonOperator = &v - return s -} - -// SetEvaluationPeriods sets the EvaluationPeriods field's value. -func (s *PutScalingPolicyInput) SetEvaluationPeriods(v int64) *PutScalingPolicyInput { - s.EvaluationPeriods = &v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *PutScalingPolicyInput) SetFleetId(v string) *PutScalingPolicyInput { - s.FleetId = &v - return s -} - -// SetMetricName sets the MetricName field's value. -func (s *PutScalingPolicyInput) SetMetricName(v string) *PutScalingPolicyInput { - s.MetricName = &v - return s -} - -// SetName sets the Name field's value. -func (s *PutScalingPolicyInput) SetName(v string) *PutScalingPolicyInput { - s.Name = &v - return s -} - -// SetScalingAdjustment sets the ScalingAdjustment field's value. -func (s *PutScalingPolicyInput) SetScalingAdjustment(v int64) *PutScalingPolicyInput { - s.ScalingAdjustment = &v - return s -} - -// SetScalingAdjustmentType sets the ScalingAdjustmentType field's value. -func (s *PutScalingPolicyInput) SetScalingAdjustmentType(v string) *PutScalingPolicyInput { - s.ScalingAdjustmentType = &v - return s -} - -// SetThreshold sets the Threshold field's value. -func (s *PutScalingPolicyInput) SetThreshold(v float64) *PutScalingPolicyInput { - s.Threshold = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PutScalingPolicyOutput -type PutScalingPolicyOutput struct { - _ struct{} `type:"structure"` - - // Descriptive label that is associated with a scaling policy. Policy names - // do not need to be unique. - Name *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s PutScalingPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutScalingPolicyOutput) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *PutScalingPolicyOutput) SetName(v string) *PutScalingPolicyOutput { - s.Name = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/RequestUploadCredentialsInput -type RequestUploadCredentialsInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a build to get credentials for. - // - // BuildId is a required field - BuildId *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s RequestUploadCredentialsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RequestUploadCredentialsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RequestUploadCredentialsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RequestUploadCredentialsInput"} - if s.BuildId == nil { - invalidParams.Add(request.NewErrParamRequired("BuildId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBuildId sets the BuildId field's value. -func (s *RequestUploadCredentialsInput) SetBuildId(v string) *RequestUploadCredentialsInput { - s.BuildId = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/RequestUploadCredentialsOutput -type RequestUploadCredentialsOutput struct { - _ struct{} `type:"structure"` - - // Amazon S3 path and key, identifying where the game build files are stored. - StorageLocation *S3Location `type:"structure"` - - // AWS credentials required when uploading a game build to the storage location. - // These credentials have a limited lifespan and are valid only for the build - // they were issued for. - UploadCredentials *AwsCredentials `type:"structure"` -} - -// String returns the string representation -func (s RequestUploadCredentialsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RequestUploadCredentialsOutput) GoString() string { - return s.String() -} - -// SetStorageLocation sets the StorageLocation field's value. -func (s *RequestUploadCredentialsOutput) SetStorageLocation(v *S3Location) *RequestUploadCredentialsOutput { - s.StorageLocation = v - return s -} - -// SetUploadCredentials sets the UploadCredentials field's value. -func (s *RequestUploadCredentialsOutput) SetUploadCredentials(v *AwsCredentials) *RequestUploadCredentialsOutput { - s.UploadCredentials = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ResolveAliasInput -type ResolveAliasInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for the alias you want to resolve. - // - // AliasId is a required field - AliasId *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s ResolveAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ResolveAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ResolveAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ResolveAliasInput"} - if s.AliasId == nil { - invalidParams.Add(request.NewErrParamRequired("AliasId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAliasId sets the AliasId field's value. -func (s *ResolveAliasInput) SetAliasId(v string) *ResolveAliasInput { - s.AliasId = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ResolveAliasOutput -type ResolveAliasOutput struct { - _ struct{} `type:"structure"` - - // Fleet identifier that is associated with the requested alias. - FleetId *string `type:"string"` -} - -// String returns the string representation -func (s ResolveAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ResolveAliasOutput) GoString() string { - return s.String() -} - -// SetFleetId sets the FleetId field's value. -func (s *ResolveAliasOutput) SetFleetId(v string) *ResolveAliasOutput { - s.FleetId = &v - return s -} - -// Policy that limits the number of game sessions a player can create on the -// same fleet. This optional policy gives game owners control over how players -// can consume available game server resources. A resource creation policy makes -// the following statement: "An individual player can create a maximum number -// of new game sessions within a specified time period". -// -// The policy is evaluated when a player tries to create a new game session. -// For example, with a policy of 10 new game sessions and a time period of 60 -// minutes, on receiving a CreateGameSession request, Amazon GameLift checks -// that the player (identified by CreatorId) has created fewer than 10 game -// sessions in the past 60 minutes. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ResourceCreationLimitPolicy -type ResourceCreationLimitPolicy struct { - _ struct{} `type:"structure"` - - // Maximum number of game sessions that an individual can create during the - // policy period. - NewGameSessionsPerCreator *int64 `type:"integer"` - - // Time span used in evaluating the resource creation limit policy. - PolicyPeriodInMinutes *int64 `type:"integer"` -} - -// String returns the string representation -func (s ResourceCreationLimitPolicy) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ResourceCreationLimitPolicy) GoString() string { - return s.String() -} - -// SetNewGameSessionsPerCreator sets the NewGameSessionsPerCreator field's value. -func (s *ResourceCreationLimitPolicy) SetNewGameSessionsPerCreator(v int64) *ResourceCreationLimitPolicy { - s.NewGameSessionsPerCreator = &v - return s -} - -// SetPolicyPeriodInMinutes sets the PolicyPeriodInMinutes field's value. -func (s *ResourceCreationLimitPolicy) SetPolicyPeriodInMinutes(v int64) *ResourceCreationLimitPolicy { - s.PolicyPeriodInMinutes = &v - return s -} - -// Routing configuration for a fleet alias. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/RoutingStrategy -type RoutingStrategy struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet that the alias points to. - FleetId *string `type:"string"` - - // Message text to be used with a terminal routing strategy. - Message *string `type:"string"` - - // Type of routing strategy. - // - // Possible routing types include the following: - // - // * SIMPLE -- The alias resolves to one specific fleet. Use this type when - // routing to active fleets. - // - // * TERMINAL -- The alias does not resolve to a fleet but instead can be - // used to display a message to the user. A terminal alias throws a TerminalRoutingStrategyException - // with the RoutingStrategy message embedded. - Type *string `type:"string" enum:"RoutingStrategyType"` -} - -// String returns the string representation -func (s RoutingStrategy) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RoutingStrategy) GoString() string { - return s.String() -} - -// SetFleetId sets the FleetId field's value. -func (s *RoutingStrategy) SetFleetId(v string) *RoutingStrategy { - s.FleetId = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *RoutingStrategy) SetMessage(v string) *RoutingStrategy { - s.Message = &v - return s -} - -// SetType sets the Type field's value. -func (s *RoutingStrategy) SetType(v string) *RoutingStrategy { - s.Type = &v - return s -} - -// A collection of server process configurations that describe what processes -// to run on each instance in a fleet. All fleets must have a run-time configuration. -// Each instance in the fleet launches the server processes specified in the -// run-time configuration and launches new ones as existing processes end. Each -// instance regularly checks for an updated run-time configuration and follows -// the new instructions. -// -// The run-time configuration enables the instances in a fleet to run multiple -// processes simultaneously. Potential scenarios are as follows: (1) Run multiple -// processes of a single game server executable to maximize usage of your hosting -// resources. (2) Run one or more processes of different build executables, -// such as your game server executable and a related program, or two or more -// different versions of a game server. (3) Run multiple processes of a single -// game server but with different launch parameters, for example to run one -// process on each instance in debug mode. -// -// A Amazon GameLift instance is limited to 50 processes running simultaneously. -// A run-time configuration must specify fewer than this limit. To calculate -// the total number of processes specified in a run-time configuration, add -// the values of the ConcurrentExecutions parameter for each ServerProcess object -// in the run-time configuration. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/RuntimeConfiguration -type RuntimeConfiguration struct { - _ struct{} `type:"structure"` - - // Maximum amount of time (in seconds) that a game session can remain in status - // ACTIVATING. If the game session is not active before the timeout, activation - // is terminated and the game session status is changed to TERMINATED. - GameSessionActivationTimeoutSeconds *int64 `min:"1" type:"integer"` - - // Maximum number of game sessions with status ACTIVATING to allow on an instance - // simultaneously. This setting limits the amount of instance resources that - // can be used for new game activations at any one time. - MaxConcurrentGameSessionActivations *int64 `min:"1" type:"integer"` - - // Collection of server process configurations that describe which server processes - // to run on each instance in a fleet. - ServerProcesses []*ServerProcess `min:"1" type:"list"` -} - -// String returns the string representation -func (s RuntimeConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RuntimeConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RuntimeConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RuntimeConfiguration"} - if s.GameSessionActivationTimeoutSeconds != nil && *s.GameSessionActivationTimeoutSeconds < 1 { - invalidParams.Add(request.NewErrParamMinValue("GameSessionActivationTimeoutSeconds", 1)) - } - if s.MaxConcurrentGameSessionActivations != nil && *s.MaxConcurrentGameSessionActivations < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxConcurrentGameSessionActivations", 1)) - } - if s.ServerProcesses != nil && len(s.ServerProcesses) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ServerProcesses", 1)) - } - if s.ServerProcesses != nil { - for i, v := range s.ServerProcesses { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ServerProcesses", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGameSessionActivationTimeoutSeconds sets the GameSessionActivationTimeoutSeconds field's value. -func (s *RuntimeConfiguration) SetGameSessionActivationTimeoutSeconds(v int64) *RuntimeConfiguration { - s.GameSessionActivationTimeoutSeconds = &v - return s -} - -// SetMaxConcurrentGameSessionActivations sets the MaxConcurrentGameSessionActivations field's value. -func (s *RuntimeConfiguration) SetMaxConcurrentGameSessionActivations(v int64) *RuntimeConfiguration { - s.MaxConcurrentGameSessionActivations = &v - return s -} - -// SetServerProcesses sets the ServerProcesses field's value. -func (s *RuntimeConfiguration) SetServerProcesses(v []*ServerProcess) *RuntimeConfiguration { - s.ServerProcesses = v - return s -} - -// Location in Amazon Simple Storage Service (Amazon S3) where build files can -// be stored for access by Amazon GameLift. This location is specified in a -// CreateBuild request. For more details, see the Create a Build with Files -// in Amazon S3 (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build). -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/S3Location -type S3Location struct { - _ struct{} `type:"structure"` - - // Amazon S3 bucket identifier. This is the name of your S3 bucket. - Bucket *string `min:"1" type:"string"` - - // Name of the zip file containing your build files. - Key *string `min:"1" type:"string"` - - // Amazon Resource Name (ARN (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) - // for the access role that allows Amazon GameLift to access your S3 bucket. - RoleArn *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s S3Location) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s S3Location) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *S3Location) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "S3Location"} - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.RoleArn != nil && len(*s.RoleArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *S3Location) SetBucket(v string) *S3Location { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *S3Location) SetKey(v string) *S3Location { - s.Key = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *S3Location) SetRoleArn(v string) *S3Location { - s.RoleArn = &v - return s -} - -// Rule that controls how a fleet is scaled. Scaling policies are uniquely identified -// by the combination of name and fleet ID. -// -// Fleet-related operations include: -// -// * CreateFleet -// -// * ListFleets -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet capacity: -// -// DescribeFleetCapacity -// -// UpdateFleetCapacity -// -// PutScalingPolicy (automatic scaling) -// -// DescribeScalingPolicies (automatic scaling) -// -// DeleteScalingPolicy (automatic scaling) -// -// DescribeEC2InstanceLimits -// -// * DeleteFleet -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ScalingPolicy -type ScalingPolicy struct { - _ struct{} `type:"structure"` - - // Comparison operator to use when measuring a metric against the threshold - // value. - ComparisonOperator *string `type:"string" enum:"ComparisonOperatorType"` - - // Length of time (in minutes) the metric must be at or beyond the threshold - // before a scaling event is triggered. - EvaluationPeriods *int64 `min:"1" type:"integer"` - - // Unique identifier for a fleet that is associated with this scaling policy. - FleetId *string `type:"string"` - - // Name of the Amazon GameLift-defined metric that is used to trigger an adjustment. - // - // * ActivatingGameSessions -- number of game sessions in the process of - // being created (game session status = ACTIVATING). - // - // * ActiveGameSessions -- number of game sessions currently running (game - // session status = ACTIVE). - // - // * CurrentPlayerSessions -- number of active or reserved player sessions - // (player session status = ACTIVE or RESERVED). - // - // * AvailablePlayerSessions -- number of player session slots currently - // available in active game sessions across the fleet, calculated by subtracting - // a game session's current player session count from its maximum player - // session count. This number does include game sessions that are not currently - // accepting players (game session PlayerSessionCreationPolicy = DENY_ALL). - // - // * ActiveInstances -- number of instances currently running a game session. - // - // * IdleInstances -- number of instances not currently running a game session. - MetricName *string `type:"string" enum:"MetricName"` - - // Descriptive label that is associated with a scaling policy. Policy names - // do not need to be unique. - Name *string `min:"1" type:"string"` - - // Amount of adjustment to make, based on the scaling adjustment type. - ScalingAdjustment *int64 `type:"integer"` - - // Type of adjustment to make to a fleet's instance count (see FleetCapacity): - // - // * ChangeInCapacity -- add (or subtract) the scaling adjustment value from - // the current instance count. Positive values scale up while negative values - // scale down. - // - // * ExactCapacity -- set the instance count to the scaling adjustment value. - // - // * PercentChangeInCapacity -- increase or reduce the current instance count - // by the scaling adjustment, read as a percentage. Positive values scale - // up while negative values scale down. - ScalingAdjustmentType *string `type:"string" enum:"ScalingAdjustmentType"` - - // Current status of the scaling policy. The scaling policy is only in force - // when in an ACTIVE status. - // - // * ACTIVE -- The scaling policy is currently in force. - // - // * UPDATE_REQUESTED -- A request to update the scaling policy has been - // received. - // - // * UPDATING -- A change is being made to the scaling policy. - // - // * DELETE_REQUESTED -- A request to delete the scaling policy has been - // received. - // - // * DELETING -- The scaling policy is being deleted. - // - // * DELETED -- The scaling policy has been deleted. - // - // * ERROR -- An error occurred in creating the policy. It should be removed - // and recreated. - Status *string `type:"string" enum:"ScalingStatusType"` - - // Metric value used to trigger a scaling event. - Threshold *float64 `type:"double"` -} - -// String returns the string representation -func (s ScalingPolicy) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ScalingPolicy) GoString() string { - return s.String() -} - -// SetComparisonOperator sets the ComparisonOperator field's value. -func (s *ScalingPolicy) SetComparisonOperator(v string) *ScalingPolicy { - s.ComparisonOperator = &v - return s -} - -// SetEvaluationPeriods sets the EvaluationPeriods field's value. -func (s *ScalingPolicy) SetEvaluationPeriods(v int64) *ScalingPolicy { - s.EvaluationPeriods = &v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *ScalingPolicy) SetFleetId(v string) *ScalingPolicy { - s.FleetId = &v - return s -} - -// SetMetricName sets the MetricName field's value. -func (s *ScalingPolicy) SetMetricName(v string) *ScalingPolicy { - s.MetricName = &v - return s -} - -// SetName sets the Name field's value. -func (s *ScalingPolicy) SetName(v string) *ScalingPolicy { - s.Name = &v - return s -} - -// SetScalingAdjustment sets the ScalingAdjustment field's value. -func (s *ScalingPolicy) SetScalingAdjustment(v int64) *ScalingPolicy { - s.ScalingAdjustment = &v - return s -} - -// SetScalingAdjustmentType sets the ScalingAdjustmentType field's value. -func (s *ScalingPolicy) SetScalingAdjustmentType(v string) *ScalingPolicy { - s.ScalingAdjustmentType = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *ScalingPolicy) SetStatus(v string) *ScalingPolicy { - s.Status = &v - return s -} - -// SetThreshold sets the Threshold field's value. -func (s *ScalingPolicy) SetThreshold(v float64) *ScalingPolicy { - s.Threshold = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/SearchGameSessionsInput -type SearchGameSessionsInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for an alias associated with the fleet to search for active - // game sessions. Each request must reference either a fleet ID or alias ID, - // but not both. - AliasId *string `type:"string"` - - // String containing the search criteria for the session search. If no filter - // expression is included, the request returns results for all game sessions - // in the fleet that are in ACTIVE status. - // - // A filter expression can contain one or multiple conditions. Each condition - // consists of the following: - // - // * Operand -- Name of a game session attribute. Valid values are gameSessionName, - // gameSessionId, creationTimeMillis, playerSessionCount, maximumSessions, - // hasAvailablePlayerSessions. - // - // * Comparator -- Valid comparators are: =, <>, <, >, <=, >=. - // - // * Value -- Value to be searched for. Values can be numbers, boolean values - // (true/false) or strings. String values are case sensitive, enclosed in - // single quotes. Special characters must be escaped. Boolean and string - // values can only be used with the comparators = and <>. For example, the - // following filter expression searches on gameSessionName: "FilterExpression": - // "gameSessionName = 'Matt\\'s Awesome Game 1'". - // - // To chain multiple conditions in a single expression, use the logical keywords - // AND, OR, and NOT and parentheses as needed. For example: x AND y AND NOT - // z, NOT (x OR y). - // - // Session search evaluates conditions from left to right using the following - // precedence rules: - // - // =, <>, <, >, <=, >= - // - // Parentheses - // - // NOT - // - // AND - // - // OR - // - // For example, this filter expression retrieves game sessions hosting at least - // ten players that have an open player slot: "maximumSessions>=10 AND hasAvailablePlayerSessions=true". - FilterExpression *string `min:"1" type:"string"` - - // Unique identifier for a fleet to search for active game sessions. Each request - // must reference either a fleet ID or alias ID, but not both. - FleetId *string `type:"string"` - - // Maximum number of results to return. Use this parameter with NextToken to - // get results as a set of sequential pages. The maximum number of results returned - // is 20, even if this value is not set or is set higher than 20. - Limit *int64 `min:"1" type:"integer"` - - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. - NextToken *string `min:"1" type:"string"` - - // Instructions on how to sort the search results. If no sort expression is - // included, the request returns results in random order. A sort expression - // consists of the following elements: - // - // * Operand -- Name of a game session attribute. Valid values are gameSessionName, - // gameSessionId, creationTimeMillis, playerSessionCount, maximumSessions, - // hasAvailablePlayerSessions. - // - // * Order -- Valid sort orders are ASC (ascending) and DESC (descending). - // - // For example, this sort expression returns the oldest active sessions first: - // "SortExpression": "creationTimeMillis ASC". Results with a null value for - // the sort operand are returned at the end of the list. - SortExpression *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s SearchGameSessionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SearchGameSessionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SearchGameSessionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SearchGameSessionsInput"} - if s.FilterExpression != nil && len(*s.FilterExpression) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FilterExpression", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.SortExpression != nil && len(*s.SortExpression) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SortExpression", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAliasId sets the AliasId field's value. -func (s *SearchGameSessionsInput) SetAliasId(v string) *SearchGameSessionsInput { - s.AliasId = &v - return s -} - -// SetFilterExpression sets the FilterExpression field's value. -func (s *SearchGameSessionsInput) SetFilterExpression(v string) *SearchGameSessionsInput { - s.FilterExpression = &v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *SearchGameSessionsInput) SetFleetId(v string) *SearchGameSessionsInput { - s.FleetId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *SearchGameSessionsInput) SetLimit(v int64) *SearchGameSessionsInput { - s.Limit = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *SearchGameSessionsInput) SetNextToken(v string) *SearchGameSessionsInput { - s.NextToken = &v - return s -} - -// SetSortExpression sets the SortExpression field's value. -func (s *SearchGameSessionsInput) SetSortExpression(v string) *SearchGameSessionsInput { - s.SortExpression = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/SearchGameSessionsOutput -type SearchGameSessionsOutput struct { - _ struct{} `type:"structure"` - - // Collection of objects containing game session properties for each session - // matching the request. - GameSessions []*GameSession `type:"list"` - - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end - // of the list. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s SearchGameSessionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SearchGameSessionsOutput) GoString() string { - return s.String() -} - -// SetGameSessions sets the GameSessions field's value. -func (s *SearchGameSessionsOutput) SetGameSessions(v []*GameSession) *SearchGameSessionsOutput { - s.GameSessions = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *SearchGameSessionsOutput) SetNextToken(v string) *SearchGameSessionsOutput { - s.NextToken = &v - return s -} - -// A set of instructions for launching server processes on each instance in -// a fleet. Each instruction set identifies the location of the server executable, -// optional launch parameters, and the number of server processes with this -// configuration to maintain concurrently on the instance. Server process configurations -// make up a fleet's RuntimeConfiguration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ServerProcess -type ServerProcess struct { - _ struct{} `type:"structure"` - - // Number of server processes using this configuration to run concurrently on - // an instance. - // - // ConcurrentExecutions is a required field - ConcurrentExecutions *int64 `min:"1" type:"integer" required:"true"` - - // Location of the server executable in a game build. All game builds are installed - // on instances at the root : for Windows instances C:\game, and for Linux instances - // /local/game. A Windows game build with an executable file located at MyGame\latest\server.exe - // must have a launch path of "C:\game\MyGame\latest\server.exe". A Linux game - // build with an executable file located at MyGame/latest/server.exe must have - // a launch path of "/local/game/MyGame/latest/server.exe". - // - // LaunchPath is a required field - LaunchPath *string `min:"1" type:"string" required:"true"` - - // Optional list of parameters to pass to the server executable on launch. - Parameters *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s ServerProcess) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ServerProcess) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ServerProcess) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ServerProcess"} - if s.ConcurrentExecutions == nil { - invalidParams.Add(request.NewErrParamRequired("ConcurrentExecutions")) - } - if s.ConcurrentExecutions != nil && *s.ConcurrentExecutions < 1 { - invalidParams.Add(request.NewErrParamMinValue("ConcurrentExecutions", 1)) - } - if s.LaunchPath == nil { - invalidParams.Add(request.NewErrParamRequired("LaunchPath")) - } - if s.LaunchPath != nil && len(*s.LaunchPath) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LaunchPath", 1)) - } - if s.Parameters != nil && len(*s.Parameters) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConcurrentExecutions sets the ConcurrentExecutions field's value. -func (s *ServerProcess) SetConcurrentExecutions(v int64) *ServerProcess { - s.ConcurrentExecutions = &v - return s -} - -// SetLaunchPath sets the LaunchPath field's value. -func (s *ServerProcess) SetLaunchPath(v string) *ServerProcess { - s.LaunchPath = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *ServerProcess) SetParameters(v string) *ServerProcess { - s.Parameters = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartGameSessionPlacementInput -type StartGameSessionPlacementInput struct { - _ struct{} `type:"structure"` - - // Set of information on each player to create a player session for. - DesiredPlayerSessions []*DesiredPlayerSession `type:"list"` - - // Set of developer-defined properties for a game session, formatted as a set - // of type:value pairs. These properties are included in the GameSession object, - // which is passed to the game server with a request to start a new game session - // (see Start a Game Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). - GameProperties []*GameProperty `type:"list"` - - // Set of developer-defined game session properties, formatted as a single string - // value. This data is included in the GameSession object, which is passed to - // the game server with a request to start a new game session (see Start a Game - // Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). - GameSessionData *string `min:"1" type:"string"` - - // Descriptive label that is associated with a game session. Session names do - // not need to be unique. - GameSessionName *string `min:"1" type:"string"` - - // Name of the queue to use to place the new game session. - // - // GameSessionQueueName is a required field - GameSessionQueueName *string `min:"1" type:"string" required:"true"` - - // Maximum number of players that can be connected simultaneously to the game - // session. - // - // MaximumPlayerSessionCount is a required field - MaximumPlayerSessionCount *int64 `type:"integer" required:"true"` - - // Unique identifier to assign to the new game session placement. This value - // is developer-defined. The value must be unique across all regions and cannot - // be reused unless you are resubmitting a canceled or timed-out placement request. - // - // PlacementId is a required field - PlacementId *string `min:"1" type:"string" required:"true"` - - // Set of values, expressed in milliseconds, indicating the amount of latency - // that a player experiences when connected to AWS regions. This information - // is used to try to place the new game session where it can offer the best - // possible gameplay experience for the players. - PlayerLatencies []*PlayerLatency `type:"list"` -} - -// String returns the string representation -func (s StartGameSessionPlacementInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartGameSessionPlacementInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartGameSessionPlacementInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartGameSessionPlacementInput"} - if s.GameSessionData != nil && len(*s.GameSessionData) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameSessionData", 1)) - } - if s.GameSessionName != nil && len(*s.GameSessionName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameSessionName", 1)) - } - if s.GameSessionQueueName == nil { - invalidParams.Add(request.NewErrParamRequired("GameSessionQueueName")) - } - if s.GameSessionQueueName != nil && len(*s.GameSessionQueueName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameSessionQueueName", 1)) - } - if s.MaximumPlayerSessionCount == nil { - invalidParams.Add(request.NewErrParamRequired("MaximumPlayerSessionCount")) - } - if s.PlacementId == nil { - invalidParams.Add(request.NewErrParamRequired("PlacementId")) - } - if s.PlacementId != nil && len(*s.PlacementId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PlacementId", 1)) - } - if s.DesiredPlayerSessions != nil { - for i, v := range s.DesiredPlayerSessions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DesiredPlayerSessions", i), err.(request.ErrInvalidParams)) - } - } - } - if s.GameProperties != nil { - for i, v := range s.GameProperties { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GameProperties", i), err.(request.ErrInvalidParams)) - } - } - } - if s.PlayerLatencies != nil { - for i, v := range s.PlayerLatencies { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PlayerLatencies", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDesiredPlayerSessions sets the DesiredPlayerSessions field's value. -func (s *StartGameSessionPlacementInput) SetDesiredPlayerSessions(v []*DesiredPlayerSession) *StartGameSessionPlacementInput { - s.DesiredPlayerSessions = v - return s -} - -// SetGameProperties sets the GameProperties field's value. -func (s *StartGameSessionPlacementInput) SetGameProperties(v []*GameProperty) *StartGameSessionPlacementInput { - s.GameProperties = v - return s -} - -// SetGameSessionData sets the GameSessionData field's value. -func (s *StartGameSessionPlacementInput) SetGameSessionData(v string) *StartGameSessionPlacementInput { - s.GameSessionData = &v - return s -} - -// SetGameSessionName sets the GameSessionName field's value. -func (s *StartGameSessionPlacementInput) SetGameSessionName(v string) *StartGameSessionPlacementInput { - s.GameSessionName = &v - return s -} - -// SetGameSessionQueueName sets the GameSessionQueueName field's value. -func (s *StartGameSessionPlacementInput) SetGameSessionQueueName(v string) *StartGameSessionPlacementInput { - s.GameSessionQueueName = &v - return s -} - -// SetMaximumPlayerSessionCount sets the MaximumPlayerSessionCount field's value. -func (s *StartGameSessionPlacementInput) SetMaximumPlayerSessionCount(v int64) *StartGameSessionPlacementInput { - s.MaximumPlayerSessionCount = &v - return s -} - -// SetPlacementId sets the PlacementId field's value. -func (s *StartGameSessionPlacementInput) SetPlacementId(v string) *StartGameSessionPlacementInput { - s.PlacementId = &v - return s -} - -// SetPlayerLatencies sets the PlayerLatencies field's value. -func (s *StartGameSessionPlacementInput) SetPlayerLatencies(v []*PlayerLatency) *StartGameSessionPlacementInput { - s.PlayerLatencies = v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartGameSessionPlacementOutput -type StartGameSessionPlacementOutput struct { - _ struct{} `type:"structure"` - - // Object that describes the newly created game session placement. This object - // includes all the information provided in the request, as well as start/end - // time stamps and placement status. - GameSessionPlacement *GameSessionPlacement `type:"structure"` -} - -// String returns the string representation -func (s StartGameSessionPlacementOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartGameSessionPlacementOutput) GoString() string { - return s.String() -} - -// SetGameSessionPlacement sets the GameSessionPlacement field's value. -func (s *StartGameSessionPlacementOutput) SetGameSessionPlacement(v *GameSessionPlacement) *StartGameSessionPlacementOutput { - s.GameSessionPlacement = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartMatchmakingInput -type StartMatchmakingInput struct { - _ struct{} `type:"structure"` - - // Name of the matchmaking configuration to use for this request. Matchmaking - // configurations must exist in the same region as this request. - // - // ConfigurationName is a required field - ConfigurationName *string `min:"1" type:"string" required:"true"` - - // Information on each player to be matched. This information must include a - // player ID, and may contain player attributes and latency data to be used - // in the matchmaking process. After a successful match, Player objects contain - // the name of the team the player is assigned to. - // - // Players is a required field - Players []*Player `type:"list" required:"true"` - - // Unique identifier for a matchmaking ticket. Use this identifier to track - // the matchmaking ticket status and retrieve match results. - TicketId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s StartMatchmakingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartMatchmakingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartMatchmakingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartMatchmakingInput"} - if s.ConfigurationName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigurationName")) - } - if s.ConfigurationName != nil && len(*s.ConfigurationName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigurationName", 1)) - } - if s.Players == nil { - invalidParams.Add(request.NewErrParamRequired("Players")) - } - if s.TicketId != nil && len(*s.TicketId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TicketId", 1)) - } - if s.Players != nil { - for i, v := range s.Players { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Players", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConfigurationName sets the ConfigurationName field's value. -func (s *StartMatchmakingInput) SetConfigurationName(v string) *StartMatchmakingInput { - s.ConfigurationName = &v - return s -} - -// SetPlayers sets the Players field's value. -func (s *StartMatchmakingInput) SetPlayers(v []*Player) *StartMatchmakingInput { - s.Players = v - return s -} - -// SetTicketId sets the TicketId field's value. -func (s *StartMatchmakingInput) SetTicketId(v string) *StartMatchmakingInput { - s.TicketId = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StartMatchmakingOutput -type StartMatchmakingOutput struct { - _ struct{} `type:"structure"` - - // Ticket representing the matchmaking request. This object include the information - // included in the request, ticket status, and match results as generated during - // the matchmaking process. - MatchmakingTicket *MatchmakingTicket `type:"structure"` -} - -// String returns the string representation -func (s StartMatchmakingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartMatchmakingOutput) GoString() string { - return s.String() -} - -// SetMatchmakingTicket sets the MatchmakingTicket field's value. -func (s *StartMatchmakingOutput) SetMatchmakingTicket(v *MatchmakingTicket) *StartMatchmakingOutput { - s.MatchmakingTicket = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopGameSessionPlacementInput -type StopGameSessionPlacementInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a game session placement to cancel. - // - // PlacementId is a required field - PlacementId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s StopGameSessionPlacementInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopGameSessionPlacementInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StopGameSessionPlacementInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StopGameSessionPlacementInput"} - if s.PlacementId == nil { - invalidParams.Add(request.NewErrParamRequired("PlacementId")) - } - if s.PlacementId != nil && len(*s.PlacementId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PlacementId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPlacementId sets the PlacementId field's value. -func (s *StopGameSessionPlacementInput) SetPlacementId(v string) *StopGameSessionPlacementInput { - s.PlacementId = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopGameSessionPlacementOutput -type StopGameSessionPlacementOutput struct { - _ struct{} `type:"structure"` - - // Object that describes the canceled game session placement, with CANCELLED - // status and an end time stamp. - GameSessionPlacement *GameSessionPlacement `type:"structure"` -} - -// String returns the string representation -func (s StopGameSessionPlacementOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopGameSessionPlacementOutput) GoString() string { - return s.String() -} - -// SetGameSessionPlacement sets the GameSessionPlacement field's value. -func (s *StopGameSessionPlacementOutput) SetGameSessionPlacement(v *GameSessionPlacement) *StopGameSessionPlacementOutput { - s.GameSessionPlacement = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopMatchmakingInput -type StopMatchmakingInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a matchmaking ticket. - // - // TicketId is a required field - TicketId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s StopMatchmakingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopMatchmakingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StopMatchmakingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StopMatchmakingInput"} - if s.TicketId == nil { - invalidParams.Add(request.NewErrParamRequired("TicketId")) - } - if s.TicketId != nil && len(*s.TicketId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TicketId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTicketId sets the TicketId field's value. -func (s *StopMatchmakingInput) SetTicketId(v string) *StopMatchmakingInput { - s.TicketId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopMatchmakingOutput -type StopMatchmakingOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s StopMatchmakingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopMatchmakingOutput) GoString() string { - return s.String() -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateAliasInput -type UpdateAliasInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet alias. Specify the alias you want to update. - // - // AliasId is a required field - AliasId *string `type:"string" required:"true"` - - // Human-readable description of an alias. - Description *string `min:"1" type:"string"` - - // Descriptive label that is associated with an alias. Alias names do not need - // to be unique. - Name *string `min:"1" type:"string"` - - // Object that specifies the fleet and routing type to use for the alias. - RoutingStrategy *RoutingStrategy `type:"structure"` -} - -// String returns the string representation -func (s UpdateAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateAliasInput"} - if s.AliasId == nil { - invalidParams.Add(request.NewErrParamRequired("AliasId")) - } - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAliasId sets the AliasId field's value. -func (s *UpdateAliasInput) SetAliasId(v string) *UpdateAliasInput { - s.AliasId = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *UpdateAliasInput) SetDescription(v string) *UpdateAliasInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateAliasInput) SetName(v string) *UpdateAliasInput { - s.Name = &v - return s -} - -// SetRoutingStrategy sets the RoutingStrategy field's value. -func (s *UpdateAliasInput) SetRoutingStrategy(v *RoutingStrategy) *UpdateAliasInput { - s.RoutingStrategy = v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateAliasOutput -type UpdateAliasOutput struct { - _ struct{} `type:"structure"` - - // Object that contains the updated alias configuration. - Alias *Alias `type:"structure"` -} - -// String returns the string representation -func (s UpdateAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateAliasOutput) GoString() string { - return s.String() -} - -// SetAlias sets the Alias field's value. -func (s *UpdateAliasOutput) SetAlias(v *Alias) *UpdateAliasOutput { - s.Alias = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateBuildInput -type UpdateBuildInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a build to update. - // - // BuildId is a required field - BuildId *string `type:"string" required:"true"` - - // Descriptive label that is associated with a build. Build names do not need - // to be unique. - Name *string `min:"1" type:"string"` - - // Version that is associated with this build. Version strings do not need to - // be unique. - Version *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s UpdateBuildInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateBuildInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateBuildInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateBuildInput"} - if s.BuildId == nil { - invalidParams.Add(request.NewErrParamRequired("BuildId")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Version != nil && len(*s.Version) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Version", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBuildId sets the BuildId field's value. -func (s *UpdateBuildInput) SetBuildId(v string) *UpdateBuildInput { - s.BuildId = &v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateBuildInput) SetName(v string) *UpdateBuildInput { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *UpdateBuildInput) SetVersion(v string) *UpdateBuildInput { - s.Version = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateBuildOutput -type UpdateBuildOutput struct { - _ struct{} `type:"structure"` - - // Object that contains the updated build record. - Build *Build `type:"structure"` -} - -// String returns the string representation -func (s UpdateBuildOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateBuildOutput) GoString() string { - return s.String() -} - -// SetBuild sets the Build field's value. -func (s *UpdateBuildOutput) SetBuild(v *Build) *UpdateBuildOutput { - s.Build = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetAttributesInput -type UpdateFleetAttributesInput struct { - _ struct{} `type:"structure"` - - // Human-readable description of a fleet. - Description *string `min:"1" type:"string"` - - // Unique identifier for a fleet to update attribute metadata for. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` - - // Names of metric groups to include this fleet in. Amazon CloudWatch uses a - // fleet metric group is to aggregate metrics from multiple fleets. Use an existing - // metric group name to add this fleet to the group. Or use a new name to create - // a new metric group. A fleet can only be included in one metric group at a - // time. - MetricGroups []*string `type:"list"` - - // Descriptive label that is associated with a fleet. Fleet names do not need - // to be unique. - Name *string `min:"1" type:"string"` - - // Game session protection policy to apply to all new instances created in this - // fleet. Instances that already exist are not affected. You can set protection - // for individual instances using UpdateGameSession. - // - // * NoProtection -- The game session can be terminated during a scale-down - // event. - // - // * FullProtection -- If the game session is in an ACTIVE status, it cannot - // be terminated during a scale-down event. - NewGameSessionProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` - - // Policy that limits the number of game sessions an individual player can create - // over a span of time. - ResourceCreationLimitPolicy *ResourceCreationLimitPolicy `type:"structure"` -} - -// String returns the string representation -func (s UpdateFleetAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateFleetAttributesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateFleetAttributesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateFleetAttributesInput"} - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *UpdateFleetAttributesInput) SetDescription(v string) *UpdateFleetAttributesInput { - s.Description = &v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *UpdateFleetAttributesInput) SetFleetId(v string) *UpdateFleetAttributesInput { - s.FleetId = &v - return s -} - -// SetMetricGroups sets the MetricGroups field's value. -func (s *UpdateFleetAttributesInput) SetMetricGroups(v []*string) *UpdateFleetAttributesInput { - s.MetricGroups = v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateFleetAttributesInput) SetName(v string) *UpdateFleetAttributesInput { - s.Name = &v - return s -} - -// SetNewGameSessionProtectionPolicy sets the NewGameSessionProtectionPolicy field's value. -func (s *UpdateFleetAttributesInput) SetNewGameSessionProtectionPolicy(v string) *UpdateFleetAttributesInput { - s.NewGameSessionProtectionPolicy = &v - return s -} - -// SetResourceCreationLimitPolicy sets the ResourceCreationLimitPolicy field's value. -func (s *UpdateFleetAttributesInput) SetResourceCreationLimitPolicy(v *ResourceCreationLimitPolicy) *UpdateFleetAttributesInput { - s.ResourceCreationLimitPolicy = v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetAttributesOutput -type UpdateFleetAttributesOutput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet that was updated. - FleetId *string `type:"string"` -} - -// String returns the string representation -func (s UpdateFleetAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateFleetAttributesOutput) GoString() string { - return s.String() -} - -// SetFleetId sets the FleetId field's value. -func (s *UpdateFleetAttributesOutput) SetFleetId(v string) *UpdateFleetAttributesOutput { - s.FleetId = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetCapacityInput -type UpdateFleetCapacityInput struct { - _ struct{} `type:"structure"` - - // Number of EC2 instances you want this fleet to host. - DesiredInstances *int64 `type:"integer"` - - // Unique identifier for a fleet to update capacity for. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` - - // Maximum value allowed for the fleet's instance count. Default if not set - // is 1. - MaxSize *int64 `type:"integer"` - - // Minimum value allowed for the fleet's instance count. Default if not set - // is 0. - MinSize *int64 `type:"integer"` -} - -// String returns the string representation -func (s UpdateFleetCapacityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateFleetCapacityInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateFleetCapacityInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateFleetCapacityInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDesiredInstances sets the DesiredInstances field's value. -func (s *UpdateFleetCapacityInput) SetDesiredInstances(v int64) *UpdateFleetCapacityInput { - s.DesiredInstances = &v - return s -} - -// SetFleetId sets the FleetId field's value. -func (s *UpdateFleetCapacityInput) SetFleetId(v string) *UpdateFleetCapacityInput { - s.FleetId = &v - return s -} - -// SetMaxSize sets the MaxSize field's value. -func (s *UpdateFleetCapacityInput) SetMaxSize(v int64) *UpdateFleetCapacityInput { - s.MaxSize = &v - return s -} - -// SetMinSize sets the MinSize field's value. -func (s *UpdateFleetCapacityInput) SetMinSize(v int64) *UpdateFleetCapacityInput { - s.MinSize = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetCapacityOutput -type UpdateFleetCapacityOutput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet that was updated. - FleetId *string `type:"string"` -} - -// String returns the string representation -func (s UpdateFleetCapacityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateFleetCapacityOutput) GoString() string { - return s.String() -} - -// SetFleetId sets the FleetId field's value. -func (s *UpdateFleetCapacityOutput) SetFleetId(v string) *UpdateFleetCapacityOutput { - s.FleetId = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetPortSettingsInput -type UpdateFleetPortSettingsInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet to update port settings for. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` - - // Collection of port settings to be added to the fleet record. - InboundPermissionAuthorizations []*IpPermission `type:"list"` - - // Collection of port settings to be removed from the fleet record. - InboundPermissionRevocations []*IpPermission `type:"list"` -} - -// String returns the string representation -func (s UpdateFleetPortSettingsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateFleetPortSettingsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateFleetPortSettingsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateFleetPortSettingsInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - if s.InboundPermissionAuthorizations != nil { - for i, v := range s.InboundPermissionAuthorizations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InboundPermissionAuthorizations", i), err.(request.ErrInvalidParams)) - } - } - } - if s.InboundPermissionRevocations != nil { - for i, v := range s.InboundPermissionRevocations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InboundPermissionRevocations", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFleetId sets the FleetId field's value. -func (s *UpdateFleetPortSettingsInput) SetFleetId(v string) *UpdateFleetPortSettingsInput { - s.FleetId = &v - return s -} - -// SetInboundPermissionAuthorizations sets the InboundPermissionAuthorizations field's value. -func (s *UpdateFleetPortSettingsInput) SetInboundPermissionAuthorizations(v []*IpPermission) *UpdateFleetPortSettingsInput { - s.InboundPermissionAuthorizations = v - return s -} - -// SetInboundPermissionRevocations sets the InboundPermissionRevocations field's value. -func (s *UpdateFleetPortSettingsInput) SetInboundPermissionRevocations(v []*IpPermission) *UpdateFleetPortSettingsInput { - s.InboundPermissionRevocations = v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetPortSettingsOutput -type UpdateFleetPortSettingsOutput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet that was updated. - FleetId *string `type:"string"` -} - -// String returns the string representation -func (s UpdateFleetPortSettingsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateFleetPortSettingsOutput) GoString() string { - return s.String() -} - -// SetFleetId sets the FleetId field's value. -func (s *UpdateFleetPortSettingsOutput) SetFleetId(v string) *UpdateFleetPortSettingsOutput { - s.FleetId = &v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSessionInput -type UpdateGameSessionInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for the game session to update. - // - // GameSessionId is a required field - GameSessionId *string `min:"1" type:"string" required:"true"` - - // Maximum number of players that can be connected simultaneously to the game - // session. - MaximumPlayerSessionCount *int64 `type:"integer"` - - // Descriptive label that is associated with a game session. Session names do - // not need to be unique. - Name *string `min:"1" type:"string"` - - // Policy determining whether or not the game session accepts new players. - PlayerSessionCreationPolicy *string `type:"string" enum:"PlayerSessionCreationPolicy"` - - // Game session protection policy to apply to this game session only. - // - // * NoProtection -- The game session can be terminated during a scale-down - // event. - // - // * FullProtection -- If the game session is in an ACTIVE status, it cannot - // be terminated during a scale-down event. - ProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` -} - -// String returns the string representation -func (s UpdateGameSessionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateGameSessionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateGameSessionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateGameSessionInput"} - if s.GameSessionId == nil { - invalidParams.Add(request.NewErrParamRequired("GameSessionId")) - } - if s.GameSessionId != nil && len(*s.GameSessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameSessionId", 1)) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGameSessionId sets the GameSessionId field's value. -func (s *UpdateGameSessionInput) SetGameSessionId(v string) *UpdateGameSessionInput { - s.GameSessionId = &v - return s -} - -// SetMaximumPlayerSessionCount sets the MaximumPlayerSessionCount field's value. -func (s *UpdateGameSessionInput) SetMaximumPlayerSessionCount(v int64) *UpdateGameSessionInput { - s.MaximumPlayerSessionCount = &v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateGameSessionInput) SetName(v string) *UpdateGameSessionInput { - s.Name = &v - return s -} - -// SetPlayerSessionCreationPolicy sets the PlayerSessionCreationPolicy field's value. -func (s *UpdateGameSessionInput) SetPlayerSessionCreationPolicy(v string) *UpdateGameSessionInput { - s.PlayerSessionCreationPolicy = &v - return s -} - -// SetProtectionPolicy sets the ProtectionPolicy field's value. -func (s *UpdateGameSessionInput) SetProtectionPolicy(v string) *UpdateGameSessionInput { - s.ProtectionPolicy = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSessionOutput -type UpdateGameSessionOutput struct { - _ struct{} `type:"structure"` - - // Object that contains the updated game session metadata. - GameSession *GameSession `type:"structure"` -} - -// String returns the string representation -func (s UpdateGameSessionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateGameSessionOutput) GoString() string { - return s.String() -} - -// SetGameSession sets the GameSession field's value. -func (s *UpdateGameSessionOutput) SetGameSession(v *GameSession) *UpdateGameSessionOutput { - s.GameSession = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSessionQueueInput -type UpdateGameSessionQueueInput struct { - _ struct{} `type:"structure"` - - // List of fleets that can be used to fulfill game session placement requests - // in the queue. Fleets are identified by either a fleet ARN or a fleet alias - // ARN. Destinations are listed in default preference order. When updating this - // list, provide a complete list of destinations. - Destinations []*GameSessionQueueDestination `type:"list"` - - // Descriptive label that is associated with game session queue. Queue names - // must be unique within each region. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Collection of latency policies to apply when processing game sessions placement - // requests with player latency information. Multiple policies are evaluated - // in order of the maximum latency value, starting with the lowest latency values. - // With just one policy, it is enforced at the start of the game session placement - // for the duration period. With multiple policies, each policy is enforced - // consecutively for its duration period. For example, a queue might enforce - // a 60-second policy followed by a 120-second policy, and then no policy for - // the remainder of the placement. When updating policies, provide a complete - // collection of policies. - PlayerLatencyPolicies []*PlayerLatencyPolicy `type:"list"` - - // Maximum time, in seconds, that a new game session placement request remains - // in the queue. When a request exceeds this time, the game session placement - // changes to a TIMED_OUT status. - TimeoutInSeconds *int64 `type:"integer"` -} - -// String returns the string representation -func (s UpdateGameSessionQueueInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateGameSessionQueueInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateGameSessionQueueInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateGameSessionQueueInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Destinations != nil { - for i, v := range s.Destinations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Destinations", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDestinations sets the Destinations field's value. -func (s *UpdateGameSessionQueueInput) SetDestinations(v []*GameSessionQueueDestination) *UpdateGameSessionQueueInput { - s.Destinations = v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateGameSessionQueueInput) SetName(v string) *UpdateGameSessionQueueInput { - s.Name = &v - return s -} - -// SetPlayerLatencyPolicies sets the PlayerLatencyPolicies field's value. -func (s *UpdateGameSessionQueueInput) SetPlayerLatencyPolicies(v []*PlayerLatencyPolicy) *UpdateGameSessionQueueInput { - s.PlayerLatencyPolicies = v - return s -} - -// SetTimeoutInSeconds sets the TimeoutInSeconds field's value. -func (s *UpdateGameSessionQueueInput) SetTimeoutInSeconds(v int64) *UpdateGameSessionQueueInput { - s.TimeoutInSeconds = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameSessionQueueOutput -type UpdateGameSessionQueueOutput struct { - _ struct{} `type:"structure"` - - // Object that describes the newly updated game session queue. - GameSessionQueue *GameSessionQueue `type:"structure"` -} - -// String returns the string representation -func (s UpdateGameSessionQueueOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateGameSessionQueueOutput) GoString() string { - return s.String() -} - -// SetGameSessionQueue sets the GameSessionQueue field's value. -func (s *UpdateGameSessionQueueOutput) SetGameSessionQueue(v *GameSessionQueue) *UpdateGameSessionQueueOutput { - s.GameSessionQueue = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateMatchmakingConfigurationInput -type UpdateMatchmakingConfigurationInput struct { - _ struct{} `type:"structure"` - - // Flag that determines whether or not a match that was created with this configuration - // must be accepted by the matched players. To require acceptance, set to TRUE. - AcceptanceRequired *bool `type:"boolean"` - - // Length of time (in seconds) to wait for players to accept a proposed match. - // If any player rejects the match or fails to accept before the timeout, the - // ticket continues to look for an acceptable match. - AcceptanceTimeoutSeconds *int64 `min:"1" type:"integer"` - - // Number of player slots in a match to keep open for future players. For example, - // if the configuration's rule set specifies a match for a single 12-person - // team, and the additional player count is set to 2, only 10 players are selected - // for the match. - AdditionalPlayerCount *int64 `type:"integer"` - - // Information to attached to all events related to the matchmaking configuration. - CustomEventData *string `type:"string"` - - // Descriptive label that is associated with matchmaking configuration. - Description *string `min:"1" type:"string"` - - // Set of developer-defined properties for a game session, formatted as a set - // of type:value pairs. These properties are included in the GameSession object, - // which is passed to the game server with a request to start a new game session - // (see Start a Game Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). - // This information is added to the new GameSession object that is created for - // a successful match. - GameProperties []*GameProperty `type:"list"` - - // Set of developer-defined game session properties, formatted as a single string - // value. This data is included in the GameSession object, which is passed to - // the game server with a request to start a new game session (see Start a Game - // Session (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). - // This information is added to the new GameSession object that is created for - // a successful match. - GameSessionData *string `min:"1" type:"string"` - - // Amazon Resource Name (ARN (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) - // that is assigned to a game session queue and uniquely identifies it. Format - // is arn:aws:gamelift:::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. - // These queues are used when placing game sessions for matches that are created - // with this matchmaking configuration. Queues can be located in any region. - GameSessionQueueArns []*string `type:"list"` - - // Unique identifier for a matchmaking configuration to update. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // SNS topic ARN that is set up to receive matchmaking notifications. See Setting - // up Notifications for Matchmaking (http://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) - // for more information. - NotificationTarget *string `type:"string"` - - // Maximum duration, in seconds, that a matchmaking ticket can remain in process - // before timing out. Requests that time out can be resubmitted as needed. - RequestTimeoutSeconds *int64 `min:"1" type:"integer"` - - // Unique identifier for a matchmaking rule set to use with this configuration. - // A matchmaking configuration can only use rule sets that are defined in the - // same region. - RuleSetName *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s UpdateMatchmakingConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateMatchmakingConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateMatchmakingConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateMatchmakingConfigurationInput"} - if s.AcceptanceTimeoutSeconds != nil && *s.AcceptanceTimeoutSeconds < 1 { - invalidParams.Add(request.NewErrParamMinValue("AcceptanceTimeoutSeconds", 1)) - } - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.GameSessionData != nil && len(*s.GameSessionData) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GameSessionData", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.RequestTimeoutSeconds != nil && *s.RequestTimeoutSeconds < 1 { - invalidParams.Add(request.NewErrParamMinValue("RequestTimeoutSeconds", 1)) - } - if s.RuleSetName != nil && len(*s.RuleSetName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleSetName", 1)) - } - if s.GameProperties != nil { - for i, v := range s.GameProperties { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GameProperties", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAcceptanceRequired sets the AcceptanceRequired field's value. -func (s *UpdateMatchmakingConfigurationInput) SetAcceptanceRequired(v bool) *UpdateMatchmakingConfigurationInput { - s.AcceptanceRequired = &v - return s -} - -// SetAcceptanceTimeoutSeconds sets the AcceptanceTimeoutSeconds field's value. -func (s *UpdateMatchmakingConfigurationInput) SetAcceptanceTimeoutSeconds(v int64) *UpdateMatchmakingConfigurationInput { - s.AcceptanceTimeoutSeconds = &v - return s -} - -// SetAdditionalPlayerCount sets the AdditionalPlayerCount field's value. -func (s *UpdateMatchmakingConfigurationInput) SetAdditionalPlayerCount(v int64) *UpdateMatchmakingConfigurationInput { - s.AdditionalPlayerCount = &v - return s -} - -// SetCustomEventData sets the CustomEventData field's value. -func (s *UpdateMatchmakingConfigurationInput) SetCustomEventData(v string) *UpdateMatchmakingConfigurationInput { - s.CustomEventData = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *UpdateMatchmakingConfigurationInput) SetDescription(v string) *UpdateMatchmakingConfigurationInput { - s.Description = &v - return s -} - -// SetGameProperties sets the GameProperties field's value. -func (s *UpdateMatchmakingConfigurationInput) SetGameProperties(v []*GameProperty) *UpdateMatchmakingConfigurationInput { - s.GameProperties = v - return s -} - -// SetGameSessionData sets the GameSessionData field's value. -func (s *UpdateMatchmakingConfigurationInput) SetGameSessionData(v string) *UpdateMatchmakingConfigurationInput { - s.GameSessionData = &v - return s -} - -// SetGameSessionQueueArns sets the GameSessionQueueArns field's value. -func (s *UpdateMatchmakingConfigurationInput) SetGameSessionQueueArns(v []*string) *UpdateMatchmakingConfigurationInput { - s.GameSessionQueueArns = v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateMatchmakingConfigurationInput) SetName(v string) *UpdateMatchmakingConfigurationInput { - s.Name = &v - return s -} - -// SetNotificationTarget sets the NotificationTarget field's value. -func (s *UpdateMatchmakingConfigurationInput) SetNotificationTarget(v string) *UpdateMatchmakingConfigurationInput { - s.NotificationTarget = &v - return s -} - -// SetRequestTimeoutSeconds sets the RequestTimeoutSeconds field's value. -func (s *UpdateMatchmakingConfigurationInput) SetRequestTimeoutSeconds(v int64) *UpdateMatchmakingConfigurationInput { - s.RequestTimeoutSeconds = &v - return s -} - -// SetRuleSetName sets the RuleSetName field's value. -func (s *UpdateMatchmakingConfigurationInput) SetRuleSetName(v string) *UpdateMatchmakingConfigurationInput { - s.RuleSetName = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateMatchmakingConfigurationOutput -type UpdateMatchmakingConfigurationOutput struct { - _ struct{} `type:"structure"` - - // Object that describes the updated matchmaking configuration. - Configuration *MatchmakingConfiguration `type:"structure"` -} - -// String returns the string representation -func (s UpdateMatchmakingConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateMatchmakingConfigurationOutput) GoString() string { - return s.String() -} - -// SetConfiguration sets the Configuration field's value. -func (s *UpdateMatchmakingConfigurationOutput) SetConfiguration(v *MatchmakingConfiguration) *UpdateMatchmakingConfigurationOutput { - s.Configuration = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateRuntimeConfigurationInput -type UpdateRuntimeConfigurationInput struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet to update run-time configuration for. - // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` - - // Instructions for launching server processes on each instance in the fleet. - // The run-time configuration for a fleet has a collection of server process - // configurations, one for each type of server process to run on an instance. - // A server process configuration specifies the location of the server executable, - // launch parameters, and the number of concurrent processes with that configuration - // to maintain on each instance. - // - // RuntimeConfiguration is a required field - RuntimeConfiguration *RuntimeConfiguration `type:"structure" required:"true"` -} - -// String returns the string representation -func (s UpdateRuntimeConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateRuntimeConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateRuntimeConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateRuntimeConfigurationInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) - } - if s.RuntimeConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("RuntimeConfiguration")) - } - if s.RuntimeConfiguration != nil { - if err := s.RuntimeConfiguration.Validate(); err != nil { - invalidParams.AddNested("RuntimeConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFleetId sets the FleetId field's value. -func (s *UpdateRuntimeConfigurationInput) SetFleetId(v string) *UpdateRuntimeConfigurationInput { - s.FleetId = &v - return s -} - -// SetRuntimeConfiguration sets the RuntimeConfiguration field's value. -func (s *UpdateRuntimeConfigurationInput) SetRuntimeConfiguration(v *RuntimeConfiguration) *UpdateRuntimeConfigurationInput { - s.RuntimeConfiguration = v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateRuntimeConfigurationOutput -type UpdateRuntimeConfigurationOutput struct { - _ struct{} `type:"structure"` - - // The run-time configuration currently in force. If the update was successful, - // this object matches the one in the request. - RuntimeConfiguration *RuntimeConfiguration `type:"structure"` -} - -// String returns the string representation -func (s UpdateRuntimeConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateRuntimeConfigurationOutput) GoString() string { - return s.String() -} - -// SetRuntimeConfiguration sets the RuntimeConfiguration field's value. -func (s *UpdateRuntimeConfigurationOutput) SetRuntimeConfiguration(v *RuntimeConfiguration) *UpdateRuntimeConfigurationOutput { - s.RuntimeConfiguration = v - return s -} - -// Represents the input for a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ValidateMatchmakingRuleSetInput -type ValidateMatchmakingRuleSetInput struct { - _ struct{} `type:"structure"` - - // Collection of matchmaking rules to validate, formatted as a JSON string. - // - // RuleSetBody is a required field - RuleSetBody *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s ValidateMatchmakingRuleSetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ValidateMatchmakingRuleSetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ValidateMatchmakingRuleSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ValidateMatchmakingRuleSetInput"} - if s.RuleSetBody == nil { - invalidParams.Add(request.NewErrParamRequired("RuleSetBody")) - } - if s.RuleSetBody != nil && len(*s.RuleSetBody) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleSetBody", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRuleSetBody sets the RuleSetBody field's value. -func (s *ValidateMatchmakingRuleSetInput) SetRuleSetBody(v string) *ValidateMatchmakingRuleSetInput { - s.RuleSetBody = &v - return s -} - -// Represents the returned data in response to a request action. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ValidateMatchmakingRuleSetOutput -type ValidateMatchmakingRuleSetOutput struct { - _ struct{} `type:"structure"` - - // Response indicating whether or not the rule set is valid. - Valid *bool `type:"boolean"` -} - -// String returns the string representation -func (s ValidateMatchmakingRuleSetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ValidateMatchmakingRuleSetOutput) GoString() string { - return s.String() -} - -// SetValid sets the Valid field's value. -func (s *ValidateMatchmakingRuleSetOutput) SetValid(v bool) *ValidateMatchmakingRuleSetOutput { - s.Valid = &v - return s -} - -// Represents an authorization for a VPC peering connection between the VPC -// for an Amazon GameLift fleet and another VPC on an account you have access -// to. This authorization must exist and be valid for the peering connection -// to be established. Authorizations are valid for 24 hours after they are issued. -// -// VPC peering connection operations include: -// -// * CreateVpcPeeringAuthorization -// -// * DescribeVpcPeeringAuthorizations -// -// * DeleteVpcPeeringAuthorization -// -// * CreateVpcPeeringConnection -// -// * DescribeVpcPeeringConnections -// -// * DeleteVpcPeeringConnection -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/VpcPeeringAuthorization -type VpcPeeringAuthorization struct { - _ struct{} `type:"structure"` - - // Time stamp indicating when this authorization was issued. Format is a number - // expressed in Unix time as milliseconds (for example "1469498468.057"). - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Time stamp indicating when this authorization expires (24 hours after issuance). - // Format is a number expressed in Unix time as milliseconds (for example "1469498468.057"). - ExpirationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Unique identifier for the AWS account that you use to manage your Amazon - // GameLift fleet. You can find your Account ID in the AWS Management Console - // under account settings. - GameLiftAwsAccountId *string `min:"1" type:"string"` - - PeerVpcAwsAccountId *string `min:"1" type:"string"` - - // Unique identifier for a VPC with resources to be accessed by your Amazon - // GameLift fleet. The VPC must be in the same region where your fleet is deployed. - // To get VPC information, including IDs, use the Virtual Private Cloud service - // tools, including the VPC Dashboard in the AWS Management Console. - PeerVpcId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s VpcPeeringAuthorization) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VpcPeeringAuthorization) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *VpcPeeringAuthorization) SetCreationTime(v time.Time) *VpcPeeringAuthorization { - s.CreationTime = &v - return s -} - -// SetExpirationTime sets the ExpirationTime field's value. -func (s *VpcPeeringAuthorization) SetExpirationTime(v time.Time) *VpcPeeringAuthorization { - s.ExpirationTime = &v - return s -} - -// SetGameLiftAwsAccountId sets the GameLiftAwsAccountId field's value. -func (s *VpcPeeringAuthorization) SetGameLiftAwsAccountId(v string) *VpcPeeringAuthorization { - s.GameLiftAwsAccountId = &v - return s -} - -// SetPeerVpcAwsAccountId sets the PeerVpcAwsAccountId field's value. -func (s *VpcPeeringAuthorization) SetPeerVpcAwsAccountId(v string) *VpcPeeringAuthorization { - s.PeerVpcAwsAccountId = &v - return s -} - -// SetPeerVpcId sets the PeerVpcId field's value. -func (s *VpcPeeringAuthorization) SetPeerVpcId(v string) *VpcPeeringAuthorization { - s.PeerVpcId = &v - return s -} - -// Represents a peering connection between a VPC on one of your AWS accounts -// and the VPC for your Amazon GameLift fleets. This record may be for an active -// peering connection or a pending connection that has not yet been established. -// -// VPC peering connection operations include: -// -// * CreateVpcPeeringAuthorization -// -// * DescribeVpcPeeringAuthorizations -// -// * DeleteVpcPeeringAuthorization -// -// * CreateVpcPeeringConnection -// -// * DescribeVpcPeeringConnections -// -// * DeleteVpcPeeringConnection -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/VpcPeeringConnection -type VpcPeeringConnection struct { - _ struct{} `type:"structure"` - - // Unique identifier for a fleet. This ID determines the ID of the Amazon GameLift - // VPC for your fleet. - FleetId *string `type:"string"` - - // Unique identifier for the VPC that contains the Amazon GameLift fleet for - // this connection. This VPC is managed by Amazon GameLift and does not appear - // in your AWS account. - GameLiftVpcId *string `min:"1" type:"string"` - - // CIDR block of IPv4 addresses assigned to the VPC peering connection for the - // GameLift VPC. The peered VPC also has an IPv4 CIDR block associated with - // it; these blocks cannot overlap or the peering connection cannot be created. - IpV4CidrBlock *string `min:"1" type:"string"` - - // Unique identifier for a VPC with resources to be accessed by your Amazon - // GameLift fleet. The VPC must be in the same region where your fleet is deployed. - // To get VPC information, including IDs, use the Virtual Private Cloud service - // tools, including the VPC Dashboard in the AWS Management Console. - PeerVpcId *string `min:"1" type:"string"` - - // Object that contains status information about the connection. Status indicates - // if a connection is pending, successful, or failed. - Status *VpcPeeringConnectionStatus `type:"structure"` - - // Unique identifier that is automatically assigned to the connection record. - // This ID is referenced in VPC peering connection events, and is used when - // deleting a connection with DeleteVpcPeeringConnection. - VpcPeeringConnectionId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s VpcPeeringConnection) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VpcPeeringConnection) GoString() string { - return s.String() -} - -// SetFleetId sets the FleetId field's value. -func (s *VpcPeeringConnection) SetFleetId(v string) *VpcPeeringConnection { - s.FleetId = &v - return s -} - -// SetGameLiftVpcId sets the GameLiftVpcId field's value. -func (s *VpcPeeringConnection) SetGameLiftVpcId(v string) *VpcPeeringConnection { - s.GameLiftVpcId = &v - return s -} - -// SetIpV4CidrBlock sets the IpV4CidrBlock field's value. -func (s *VpcPeeringConnection) SetIpV4CidrBlock(v string) *VpcPeeringConnection { - s.IpV4CidrBlock = &v - return s -} - -// SetPeerVpcId sets the PeerVpcId field's value. -func (s *VpcPeeringConnection) SetPeerVpcId(v string) *VpcPeeringConnection { - s.PeerVpcId = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *VpcPeeringConnection) SetStatus(v *VpcPeeringConnectionStatus) *VpcPeeringConnection { - s.Status = v - return s -} - -// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. -func (s *VpcPeeringConnection) SetVpcPeeringConnectionId(v string) *VpcPeeringConnection { - s.VpcPeeringConnectionId = &v - return s -} - -// Represents status information for a VPC peering connection. Status is associated -// with a VpcPeeringConnection object. Status codes and messages are provided -// from EC2 (). (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_VpcPeeringConnectionStateReason.html) -// Connection status information is also communicated as a fleet Event. -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/VpcPeeringConnectionStatus -type VpcPeeringConnectionStatus struct { - _ struct{} `type:"structure"` - - // Code indicating the status of a VPC peering connection. - Code *string `min:"1" type:"string"` - - // Additional messaging associated with the connection status. - Message *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s VpcPeeringConnectionStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VpcPeeringConnectionStatus) GoString() string { - return s.String() -} - -// SetCode sets the Code field's value. -func (s *VpcPeeringConnectionStatus) SetCode(v string) *VpcPeeringConnectionStatus { - s.Code = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *VpcPeeringConnectionStatus) SetMessage(v string) *VpcPeeringConnectionStatus { - s.Message = &v - return s -} - -const ( - // AcceptanceTypeAccept is a AcceptanceType enum value - AcceptanceTypeAccept = "ACCEPT" - - // AcceptanceTypeReject is a AcceptanceType enum value - AcceptanceTypeReject = "REJECT" -) - -const ( - // BuildStatusInitialized is a BuildStatus enum value - BuildStatusInitialized = "INITIALIZED" - - // BuildStatusReady is a BuildStatus enum value - BuildStatusReady = "READY" - - // BuildStatusFailed is a BuildStatus enum value - BuildStatusFailed = "FAILED" -) - -const ( - // ComparisonOperatorTypeGreaterThanOrEqualToThreshold is a ComparisonOperatorType enum value - ComparisonOperatorTypeGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" - - // ComparisonOperatorTypeGreaterThanThreshold is a ComparisonOperatorType enum value - ComparisonOperatorTypeGreaterThanThreshold = "GreaterThanThreshold" - - // ComparisonOperatorTypeLessThanThreshold is a ComparisonOperatorType enum value - ComparisonOperatorTypeLessThanThreshold = "LessThanThreshold" - - // ComparisonOperatorTypeLessThanOrEqualToThreshold is a ComparisonOperatorType enum value - ComparisonOperatorTypeLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" -) - -const ( - // EC2InstanceTypeT2Micro is a EC2InstanceType enum value - EC2InstanceTypeT2Micro = "t2.micro" - - // EC2InstanceTypeT2Small is a EC2InstanceType enum value - EC2InstanceTypeT2Small = "t2.small" - - // EC2InstanceTypeT2Medium is a EC2InstanceType enum value - EC2InstanceTypeT2Medium = "t2.medium" - - // EC2InstanceTypeT2Large is a EC2InstanceType enum value - EC2InstanceTypeT2Large = "t2.large" - - // EC2InstanceTypeC3Large is a EC2InstanceType enum value - EC2InstanceTypeC3Large = "c3.large" - - // EC2InstanceTypeC3Xlarge is a EC2InstanceType enum value - EC2InstanceTypeC3Xlarge = "c3.xlarge" - - // EC2InstanceTypeC32xlarge is a EC2InstanceType enum value - EC2InstanceTypeC32xlarge = "c3.2xlarge" - - // EC2InstanceTypeC34xlarge is a EC2InstanceType enum value - EC2InstanceTypeC34xlarge = "c3.4xlarge" - - // EC2InstanceTypeC38xlarge is a EC2InstanceType enum value - EC2InstanceTypeC38xlarge = "c3.8xlarge" - - // EC2InstanceTypeC4Large is a EC2InstanceType enum value - EC2InstanceTypeC4Large = "c4.large" - - // EC2InstanceTypeC4Xlarge is a EC2InstanceType enum value - EC2InstanceTypeC4Xlarge = "c4.xlarge" - - // EC2InstanceTypeC42xlarge is a EC2InstanceType enum value - EC2InstanceTypeC42xlarge = "c4.2xlarge" - - // EC2InstanceTypeC44xlarge is a EC2InstanceType enum value - EC2InstanceTypeC44xlarge = "c4.4xlarge" - - // EC2InstanceTypeC48xlarge is a EC2InstanceType enum value - EC2InstanceTypeC48xlarge = "c4.8xlarge" - - // EC2InstanceTypeR3Large is a EC2InstanceType enum value - EC2InstanceTypeR3Large = "r3.large" - - // EC2InstanceTypeR3Xlarge is a EC2InstanceType enum value - EC2InstanceTypeR3Xlarge = "r3.xlarge" - - // EC2InstanceTypeR32xlarge is a EC2InstanceType enum value - EC2InstanceTypeR32xlarge = "r3.2xlarge" - - // EC2InstanceTypeR34xlarge is a EC2InstanceType enum value - EC2InstanceTypeR34xlarge = "r3.4xlarge" - - // EC2InstanceTypeR38xlarge is a EC2InstanceType enum value - EC2InstanceTypeR38xlarge = "r3.8xlarge" - - // EC2InstanceTypeR4Large is a EC2InstanceType enum value - EC2InstanceTypeR4Large = "r4.large" - - // EC2InstanceTypeR4Xlarge is a EC2InstanceType enum value - EC2InstanceTypeR4Xlarge = "r4.xlarge" - - // EC2InstanceTypeR42xlarge is a EC2InstanceType enum value - EC2InstanceTypeR42xlarge = "r4.2xlarge" - - // EC2InstanceTypeR44xlarge is a EC2InstanceType enum value - EC2InstanceTypeR44xlarge = "r4.4xlarge" - - // EC2InstanceTypeR48xlarge is a EC2InstanceType enum value - EC2InstanceTypeR48xlarge = "r4.8xlarge" - - // EC2InstanceTypeR416xlarge is a EC2InstanceType enum value - EC2InstanceTypeR416xlarge = "r4.16xlarge" - - // EC2InstanceTypeM3Medium is a EC2InstanceType enum value - EC2InstanceTypeM3Medium = "m3.medium" - - // EC2InstanceTypeM3Large is a EC2InstanceType enum value - EC2InstanceTypeM3Large = "m3.large" - - // EC2InstanceTypeM3Xlarge is a EC2InstanceType enum value - EC2InstanceTypeM3Xlarge = "m3.xlarge" - - // EC2InstanceTypeM32xlarge is a EC2InstanceType enum value - EC2InstanceTypeM32xlarge = "m3.2xlarge" - - // EC2InstanceTypeM4Large is a EC2InstanceType enum value - EC2InstanceTypeM4Large = "m4.large" - - // EC2InstanceTypeM4Xlarge is a EC2InstanceType enum value - EC2InstanceTypeM4Xlarge = "m4.xlarge" - - // EC2InstanceTypeM42xlarge is a EC2InstanceType enum value - EC2InstanceTypeM42xlarge = "m4.2xlarge" - - // EC2InstanceTypeM44xlarge is a EC2InstanceType enum value - EC2InstanceTypeM44xlarge = "m4.4xlarge" - - // EC2InstanceTypeM410xlarge is a EC2InstanceType enum value - EC2InstanceTypeM410xlarge = "m4.10xlarge" -) - -const ( - // EventCodeGenericEvent is a EventCode enum value - EventCodeGenericEvent = "GENERIC_EVENT" - - // EventCodeFleetCreated is a EventCode enum value - EventCodeFleetCreated = "FLEET_CREATED" - - // EventCodeFleetDeleted is a EventCode enum value - EventCodeFleetDeleted = "FLEET_DELETED" - - // EventCodeFleetScalingEvent is a EventCode enum value - EventCodeFleetScalingEvent = "FLEET_SCALING_EVENT" - - // EventCodeFleetStateDownloading is a EventCode enum value - EventCodeFleetStateDownloading = "FLEET_STATE_DOWNLOADING" - - // EventCodeFleetStateValidating is a EventCode enum value - EventCodeFleetStateValidating = "FLEET_STATE_VALIDATING" - - // EventCodeFleetStateBuilding is a EventCode enum value - EventCodeFleetStateBuilding = "FLEET_STATE_BUILDING" - - // EventCodeFleetStateActivating is a EventCode enum value - EventCodeFleetStateActivating = "FLEET_STATE_ACTIVATING" - - // EventCodeFleetStateActive is a EventCode enum value - EventCodeFleetStateActive = "FLEET_STATE_ACTIVE" - - // EventCodeFleetStateError is a EventCode enum value - EventCodeFleetStateError = "FLEET_STATE_ERROR" - - // EventCodeFleetInitializationFailed is a EventCode enum value - EventCodeFleetInitializationFailed = "FLEET_INITIALIZATION_FAILED" - - // EventCodeFleetBinaryDownloadFailed is a EventCode enum value - EventCodeFleetBinaryDownloadFailed = "FLEET_BINARY_DOWNLOAD_FAILED" - - // EventCodeFleetValidationLaunchPathNotFound is a EventCode enum value - EventCodeFleetValidationLaunchPathNotFound = "FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND" - - // EventCodeFleetValidationExecutableRuntimeFailure is a EventCode enum value - EventCodeFleetValidationExecutableRuntimeFailure = "FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE" - - // EventCodeFleetValidationTimedOut is a EventCode enum value - EventCodeFleetValidationTimedOut = "FLEET_VALIDATION_TIMED_OUT" - - // EventCodeFleetActivationFailed is a EventCode enum value - EventCodeFleetActivationFailed = "FLEET_ACTIVATION_FAILED" - - // EventCodeFleetActivationFailedNoInstances is a EventCode enum value - EventCodeFleetActivationFailedNoInstances = "FLEET_ACTIVATION_FAILED_NO_INSTANCES" - - // EventCodeFleetNewGameSessionProtectionPolicyUpdated is a EventCode enum value - EventCodeFleetNewGameSessionProtectionPolicyUpdated = "FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED" - - // EventCodeServerProcessInvalidPath is a EventCode enum value - EventCodeServerProcessInvalidPath = "SERVER_PROCESS_INVALID_PATH" - - // EventCodeServerProcessSdkInitializationTimeout is a EventCode enum value - EventCodeServerProcessSdkInitializationTimeout = "SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT" - - // EventCodeServerProcessProcessReadyTimeout is a EventCode enum value - EventCodeServerProcessProcessReadyTimeout = "SERVER_PROCESS_PROCESS_READY_TIMEOUT" - - // EventCodeServerProcessCrashed is a EventCode enum value - EventCodeServerProcessCrashed = "SERVER_PROCESS_CRASHED" - - // EventCodeServerProcessTerminatedUnhealthy is a EventCode enum value - EventCodeServerProcessTerminatedUnhealthy = "SERVER_PROCESS_TERMINATED_UNHEALTHY" - - // EventCodeServerProcessForceTerminated is a EventCode enum value - EventCodeServerProcessForceTerminated = "SERVER_PROCESS_FORCE_TERMINATED" - - // EventCodeServerProcessProcessExitTimeout is a EventCode enum value - EventCodeServerProcessProcessExitTimeout = "SERVER_PROCESS_PROCESS_EXIT_TIMEOUT" - - // EventCodeGameSessionActivationTimeout is a EventCode enum value - EventCodeGameSessionActivationTimeout = "GAME_SESSION_ACTIVATION_TIMEOUT" - - // EventCodeFleetCreationExtractingBuild is a EventCode enum value - EventCodeFleetCreationExtractingBuild = "FLEET_CREATION_EXTRACTING_BUILD" - - // EventCodeFleetCreationRunningInstaller is a EventCode enum value - EventCodeFleetCreationRunningInstaller = "FLEET_CREATION_RUNNING_INSTALLER" - - // EventCodeFleetCreationValidatingRuntimeConfig is a EventCode enum value - EventCodeFleetCreationValidatingRuntimeConfig = "FLEET_CREATION_VALIDATING_RUNTIME_CONFIG" - - // EventCodeFleetVpcPeeringSucceeded is a EventCode enum value - EventCodeFleetVpcPeeringSucceeded = "FLEET_VPC_PEERING_SUCCEEDED" - - // EventCodeFleetVpcPeeringFailed is a EventCode enum value - EventCodeFleetVpcPeeringFailed = "FLEET_VPC_PEERING_FAILED" - - // EventCodeFleetVpcPeeringDeleted is a EventCode enum value - EventCodeFleetVpcPeeringDeleted = "FLEET_VPC_PEERING_DELETED" -) - -const ( - // FleetStatusNew is a FleetStatus enum value - FleetStatusNew = "NEW" - - // FleetStatusDownloading is a FleetStatus enum value - FleetStatusDownloading = "DOWNLOADING" - - // FleetStatusValidating is a FleetStatus enum value - FleetStatusValidating = "VALIDATING" - - // FleetStatusBuilding is a FleetStatus enum value - FleetStatusBuilding = "BUILDING" - - // FleetStatusActivating is a FleetStatus enum value - FleetStatusActivating = "ACTIVATING" - - // FleetStatusActive is a FleetStatus enum value - FleetStatusActive = "ACTIVE" - - // FleetStatusDeleting is a FleetStatus enum value - FleetStatusDeleting = "DELETING" - - // FleetStatusError is a FleetStatus enum value - FleetStatusError = "ERROR" - - // FleetStatusTerminated is a FleetStatus enum value - FleetStatusTerminated = "TERMINATED" -) - -const ( - // GameSessionPlacementStatePending is a GameSessionPlacementState enum value - GameSessionPlacementStatePending = "PENDING" - - // GameSessionPlacementStateFulfilled is a GameSessionPlacementState enum value - GameSessionPlacementStateFulfilled = "FULFILLED" - - // GameSessionPlacementStateCancelled is a GameSessionPlacementState enum value - GameSessionPlacementStateCancelled = "CANCELLED" - - // GameSessionPlacementStateTimedOut is a GameSessionPlacementState enum value - GameSessionPlacementStateTimedOut = "TIMED_OUT" -) - -const ( - // GameSessionStatusActive is a GameSessionStatus enum value - GameSessionStatusActive = "ACTIVE" - - // GameSessionStatusActivating is a GameSessionStatus enum value - GameSessionStatusActivating = "ACTIVATING" - - // GameSessionStatusTerminated is a GameSessionStatus enum value - GameSessionStatusTerminated = "TERMINATED" - - // GameSessionStatusTerminating is a GameSessionStatus enum value - GameSessionStatusTerminating = "TERMINATING" - - // GameSessionStatusError is a GameSessionStatus enum value - GameSessionStatusError = "ERROR" -) - -const ( - // InstanceStatusPending is a InstanceStatus enum value - InstanceStatusPending = "PENDING" - - // InstanceStatusActive is a InstanceStatus enum value - InstanceStatusActive = "ACTIVE" - - // InstanceStatusTerminating is a InstanceStatus enum value - InstanceStatusTerminating = "TERMINATING" -) - -const ( - // IpProtocolTcp is a IpProtocol enum value - IpProtocolTcp = "TCP" - - // IpProtocolUdp is a IpProtocol enum value - IpProtocolUdp = "UDP" -) - -const ( - // MatchmakingConfigurationStatusCancelled is a MatchmakingConfigurationStatus enum value - MatchmakingConfigurationStatusCancelled = "CANCELLED" - - // MatchmakingConfigurationStatusCompleted is a MatchmakingConfigurationStatus enum value - MatchmakingConfigurationStatusCompleted = "COMPLETED" - - // MatchmakingConfigurationStatusFailed is a MatchmakingConfigurationStatus enum value - MatchmakingConfigurationStatusFailed = "FAILED" - - // MatchmakingConfigurationStatusPlacing is a MatchmakingConfigurationStatus enum value - MatchmakingConfigurationStatusPlacing = "PLACING" - - // MatchmakingConfigurationStatusQueued is a MatchmakingConfigurationStatus enum value - MatchmakingConfigurationStatusQueued = "QUEUED" - - // MatchmakingConfigurationStatusRequiresAcceptance is a MatchmakingConfigurationStatus enum value - MatchmakingConfigurationStatusRequiresAcceptance = "REQUIRES_ACCEPTANCE" - - // MatchmakingConfigurationStatusSearching is a MatchmakingConfigurationStatus enum value - MatchmakingConfigurationStatusSearching = "SEARCHING" - - // MatchmakingConfigurationStatusTimedOut is a MatchmakingConfigurationStatus enum value - MatchmakingConfigurationStatusTimedOut = "TIMED_OUT" -) - -const ( - // MetricNameActivatingGameSessions is a MetricName enum value - MetricNameActivatingGameSessions = "ActivatingGameSessions" - - // MetricNameActiveGameSessions is a MetricName enum value - MetricNameActiveGameSessions = "ActiveGameSessions" - - // MetricNameActiveInstances is a MetricName enum value - MetricNameActiveInstances = "ActiveInstances" - - // MetricNameAvailableGameSessions is a MetricName enum value - MetricNameAvailableGameSessions = "AvailableGameSessions" - - // MetricNameAvailablePlayerSessions is a MetricName enum value - MetricNameAvailablePlayerSessions = "AvailablePlayerSessions" - - // MetricNameCurrentPlayerSessions is a MetricName enum value - MetricNameCurrentPlayerSessions = "CurrentPlayerSessions" - - // MetricNameIdleInstances is a MetricName enum value - MetricNameIdleInstances = "IdleInstances" - - // MetricNamePercentAvailableGameSessions is a MetricName enum value - MetricNamePercentAvailableGameSessions = "PercentAvailableGameSessions" - - // MetricNamePercentIdleInstances is a MetricName enum value - MetricNamePercentIdleInstances = "PercentIdleInstances" - - // MetricNameQueueDepth is a MetricName enum value - MetricNameQueueDepth = "QueueDepth" - - // MetricNameWaitTime is a MetricName enum value - MetricNameWaitTime = "WaitTime" -) - -const ( - // OperatingSystemWindows2012 is a OperatingSystem enum value - OperatingSystemWindows2012 = "WINDOWS_2012" - - // OperatingSystemAmazonLinux is a OperatingSystem enum value - OperatingSystemAmazonLinux = "AMAZON_LINUX" -) - -const ( - // PlayerSessionCreationPolicyAcceptAll is a PlayerSessionCreationPolicy enum value - PlayerSessionCreationPolicyAcceptAll = "ACCEPT_ALL" - - // PlayerSessionCreationPolicyDenyAll is a PlayerSessionCreationPolicy enum value - PlayerSessionCreationPolicyDenyAll = "DENY_ALL" -) - -const ( - // PlayerSessionStatusReserved is a PlayerSessionStatus enum value - PlayerSessionStatusReserved = "RESERVED" - - // PlayerSessionStatusActive is a PlayerSessionStatus enum value - PlayerSessionStatusActive = "ACTIVE" - - // PlayerSessionStatusCompleted is a PlayerSessionStatus enum value - PlayerSessionStatusCompleted = "COMPLETED" - - // PlayerSessionStatusTimedout is a PlayerSessionStatus enum value - PlayerSessionStatusTimedout = "TIMEDOUT" -) - -const ( - // ProtectionPolicyNoProtection is a ProtectionPolicy enum value - ProtectionPolicyNoProtection = "NoProtection" - - // ProtectionPolicyFullProtection is a ProtectionPolicy enum value - ProtectionPolicyFullProtection = "FullProtection" -) - -const ( - // RoutingStrategyTypeSimple is a RoutingStrategyType enum value - RoutingStrategyTypeSimple = "SIMPLE" - - // RoutingStrategyTypeTerminal is a RoutingStrategyType enum value - RoutingStrategyTypeTerminal = "TERMINAL" -) - -const ( - // ScalingAdjustmentTypeChangeInCapacity is a ScalingAdjustmentType enum value - ScalingAdjustmentTypeChangeInCapacity = "ChangeInCapacity" - - // ScalingAdjustmentTypeExactCapacity is a ScalingAdjustmentType enum value - ScalingAdjustmentTypeExactCapacity = "ExactCapacity" - - // ScalingAdjustmentTypePercentChangeInCapacity is a ScalingAdjustmentType enum value - ScalingAdjustmentTypePercentChangeInCapacity = "PercentChangeInCapacity" -) - -const ( - // ScalingStatusTypeActive is a ScalingStatusType enum value - ScalingStatusTypeActive = "ACTIVE" - - // ScalingStatusTypeUpdateRequested is a ScalingStatusType enum value - ScalingStatusTypeUpdateRequested = "UPDATE_REQUESTED" - - // ScalingStatusTypeUpdating is a ScalingStatusType enum value - ScalingStatusTypeUpdating = "UPDATING" - - // ScalingStatusTypeDeleteRequested is a ScalingStatusType enum value - ScalingStatusTypeDeleteRequested = "DELETE_REQUESTED" - - // ScalingStatusTypeDeleting is a ScalingStatusType enum value - ScalingStatusTypeDeleting = "DELETING" - - // ScalingStatusTypeDeleted is a ScalingStatusType enum value - ScalingStatusTypeDeleted = "DELETED" - - // ScalingStatusTypeError is a ScalingStatusType enum value - ScalingStatusTypeError = "ERROR" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go deleted file mode 100644 index d73c39d65ac..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go +++ /dev/null @@ -1,304 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package gamelift provides the client and types for making API -// requests to Amazon GameLift. -// -// Amazon GameLift is a managed service for developers who need a scalable, -// dedicated server solution for their multiplayer games. Amazon GameLift provides -// tools for the following tasks: (1) acquire computing resources and deploy -// game servers, (2) scale game server capacity to meet player demand, (3) host -// game sessions and manage player access, and (4) track in-depth metrics on -// player usage and server performance. -// -// The Amazon GameLift service API includes two important function sets: -// -// * Manage game sessions and player access -- Retrieve information on available -// game sessions; create new game sessions; send player requests to join -// a game session. -// -// * Configure and manage game server resources -- Manage builds, fleets, -// queues, and aliases; set autoscaling policies; retrieve logs and metrics. -// -// This reference guide describes the low-level service API for Amazon GameLift. -// You can use the API functionality with these tools: -// -// * The Amazon Web Services software development kit (AWS SDK (http://aws.amazon.com/tools/#sdk)) -// is available in multiple languages (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-supported.html#gamelift-supported-clients) -// including C++ and C#. Use the SDK to access the API programmatically from -// an application, such as a game client. -// -// * The AWS command-line interface (http://aws.amazon.com/cli/) (CLI) tool -// is primarily useful for handling administrative actions, such as setting -// up and managing Amazon GameLift settings and resources. You can use the -// AWS CLI to manage all of your AWS services. -// -// * The AWS Management Console (https://console.aws.amazon.com/gamelift/home) -// for Amazon GameLift provides a web interface to manage your Amazon GameLift -// settings and resources. The console includes a dashboard for tracking -// key resources, including builds and fleets, and displays usage and performance -// metrics for your games as customizable graphs. -// -// * Amazon GameLift Local is a tool for testing your game's integration -// with Amazon GameLift before deploying it on the service. This tools supports -// a subset of key API actions, which can be called from either the AWS CLI -// or programmatically. See Testing an Integration (http://docs.aws.amazon.com/gamelift/latest/developerguide/integration-testing-local.html). -// -// MORE RESOURCES -// -// * Amazon GameLift Developer Guide (http://docs.aws.amazon.com/gamelift/latest/developerguide/) -// -- Learn more about Amazon GameLift features and how to use them. -// -// * Lumberyard and Amazon GameLift Tutorials (https://gamedev.amazon.com/forums/tutorials) -// -- Get started fast with walkthroughs and sample projects. -// -// * GameDev Blog (http://aws.amazon.com/blogs/gamedev/) -- Stay up to date -// with new features and techniques. -// -// * GameDev Forums (https://gamedev.amazon.com/forums/spaces/123/gamelift-discussion.html) -// -- Connect with the GameDev community. -// -// * Amazon GameLift Document History (http://docs.aws.amazon.com/gamelift/latest/developerguide/doc-history.html) -// -- See changes to the Amazon GameLift service, SDKs, and documentation, -// as well as links to release notes. -// -// API SUMMARY -// -// This list offers a functional overview of the Amazon GameLift service API. -// -// Managing Games and Players -// -// Use these actions to start new game sessions, find existing game sessions, -// track game session status and other information, and enable player access -// to game sessions. -// -// * Discover existing game sessions -// -// SearchGameSessions -- Retrieve all available game sessions or search for -// game sessions that match a set of criteria. -// -// * Start new game sessions -// -// Start new games with Queues to find the best available hosting resources -// across multiple regions, minimize player latency, and balance game session -// activity for efficiency and cost effectiveness. -// -// StartGameSessionPlacement -- Request a new game session placement and add -// one or more players to it. -// -// DescribeGameSessionPlacement -- Get details on a placement request, including -// status. -// -// StopGameSessionPlacement -- Cancel a placement request. -// -// CreateGameSession -- Start a new game session on a specific fleet. Available -// in Amazon GameLift Local. -// -// * Start new game sessions with FlexMatch matchmaking -// -// StartMatchmaking -- Request matchmaking for one players or a group who want -// to play together. -// -// DescribeMatchmaking -- Get details on a matchmaking request, including status. -// -// AcceptMatch -- Register that a player accepts a proposed match, for matches -// that require player acceptance. -// -// StopMatchmaking -- Cancel a matchmaking request. -// -// * Manage game session data -// -// DescribeGameSessions -- Retrieve metadata for one or more game sessions, -// including length of time active and current player count. Available in -// Amazon GameLift Local. -// -// DescribeGameSessionDetails -- Retrieve metadata and the game session protection -// setting for one or more game sessions. -// -// UpdateGameSession -- Change game session settings, such as maximum player -// count and join policy. -// -// GetGameSessionLogUrl -- Get the location of saved logs for a game session. -// -// * Manage player sessions -// -// CreatePlayerSession -- Send a request for a player to join a game session. -// Available in Amazon GameLift Local. -// -// CreatePlayerSessions -- Send a request for multiple players to join a game -// session. Available in Amazon GameLift Local. -// -// DescribePlayerSessions -- Get details on player activity, including status, -// playing time, and player data. Available in Amazon GameLift Local. -// -// Setting Up and Managing Game Servers -// -// When setting up Amazon GameLift resources for your game, you first create -// a game build (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html) -// and upload it to Amazon GameLift. You can then use these actions to configure -// and manage a fleet of resources to run your game servers, scale capacity -// to meet player demand, access performance and utilization metrics, and more. -// -// * Manage game builds -// -// CreateBuild -- Create a new build using files stored in an Amazon S3 bucket. -// (Update uploading permissions with RequestUploadCredentials.) To create -// a build and upload files from a local path, use the AWS CLI command upload-build. -// -// ListBuilds -- Get a list of all builds uploaded to a Amazon GameLift region. -// -// DescribeBuild -- Retrieve information associated with a build. -// -// UpdateBuild -- Change build metadata, including build name and version. -// -// DeleteBuild -- Remove a build from Amazon GameLift. -// -// * Manage fleets -// -// CreateFleet -- Configure and activate a new fleet to run a build's game servers. -// -// ListFleets -- Get a list of all fleet IDs in a Amazon GameLift region (all -// statuses). -// -// DeleteFleet -- Terminate a fleet that is no longer running game servers or -// hosting players. -// -// View / update fleet configurations. -// -// DescribeFleetAttributes / UpdateFleetAttributes -- View or change a fleet's -// metadata and settings for game session protection and resource creation -// limits. -// -// DescribeFleetPortSettings / UpdateFleetPortSettings -- View or change the -// inbound permissions (IP address and port setting ranges) allowed for a -// fleet. -// -// DescribeRuntimeConfiguration / UpdateRuntimeConfiguration -- View or change -// what server processes (and how many) to run on each instance in a fleet. -// -// * Control fleet capacity -// -// DescribeEC2InstanceLimits -- Retrieve maximum number of instances allowed -// for the current AWS account and the current usage level. -// -// DescribeFleetCapacity / UpdateFleetCapacity -- Retrieve the capacity settings -// and the current number of instances in a fleet; adjust fleet capacity -// settings to scale up or down. -// -// Autoscale -- Manage autoscaling rules and apply them to a fleet. -// -// PutScalingPolicy -- Create a new autoscaling policy, or update an existing -// one. -// -// DescribeScalingPolicies -- Retrieve an existing autoscaling policy. -// -// DeleteScalingPolicy -- Delete an autoscaling policy and stop it from affecting -// a fleet's capacity. -// -// * Manage VPC peering connections for fleets -// -// CreateVpcPeeringAuthorization -- Authorize a peering connection to one of -// your VPCs. -// -// DescribeVpcPeeringAuthorizations -- Retrieve valid peering connection authorizations. -// -// -// DeleteVpcPeeringAuthorization -- Delete a peering connection authorization. -// -// CreateVpcPeeringConnection -- Establish a peering connection between the -// VPC for a Amazon GameLift fleet and one of your VPCs. -// -// DescribeVpcPeeringConnections -- Retrieve information on active or pending -// VPC peering connections with a Amazon GameLift fleet. -// -// DeleteVpcPeeringConnection -- Delete a VPC peering connection with a Amazon -// GameLift fleet. -// -// * Access fleet activity statistics -// -// DescribeFleetUtilization -- Get current data on the number of server processes, -// game sessions, and players currently active on a fleet. -// -// DescribeFleetEvents -- Get a fleet's logged events for a specified time span. -// -// DescribeGameSessions -- Retrieve metadata associated with one or more game -// sessions, including length of time active and current player count. -// -// * Remotely access an instance -// -// DescribeInstances -- Get information on each instance in a fleet, including -// instance ID, IP address, and status. -// -// GetInstanceAccess -- Request access credentials needed to remotely connect -// to a specified instance in a fleet. -// -// * Manage fleet aliases -// -// CreateAlias -- Define a new alias and optionally assign it to a fleet. -// -// ListAliases -- Get all fleet aliases defined in a Amazon GameLift region. -// -// DescribeAlias -- Retrieve information on an existing alias. -// -// UpdateAlias -- Change settings for a alias, such as redirecting it from one -// fleet to another. -// -// DeleteAlias -- Remove an alias from the region. -// -// ResolveAlias -- Get the fleet ID that a specified alias points to. -// -// * Manage game session queues -// -// CreateGameSessionQueue -- Create a queue for processing requests for new -// game sessions. -// -// DescribeGameSessionQueues -- Retrieve game session queues defined in a Amazon -// GameLift region. -// -// UpdateGameSessionQueue -- Change the configuration of a game session queue. -// -// DeleteGameSessionQueue -- Remove a game session queue from the region. -// -// * Manage FlexMatch resources -// -// CreateMatchmakingConfiguration -- Create a matchmaking configuration with -// instructions for building a player group and placing in a new game session. -// -// -// DescribeMatchmakingConfigurations -- Retrieve matchmaking configurations -// defined a Amazon GameLift region. -// -// UpdateMatchmakingConfiguration -- Change settings for matchmaking configuration. -// queue. -// -// DeleteMatchmakingConfiguration -- Remove a matchmaking configuration from -// the region. -// -// CreateMatchmakingRuleSet -- Create a set of rules to use when searching for -// player matches. -// -// DescribeMatchmakingRuleSets -- Retrieve matchmaking rule sets defined in -// a Amazon GameLift region. -// -// ValidateMatchmakingRuleSet -- Verify syntax for a set of matchmaking rules. -// -// See https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01 for more information on this service. -// -// See gamelift package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/gamelift/ -// -// Using the Client -// -// To contact Amazon GameLift with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the Amazon GameLift client GameLift for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/gamelift/#New -package gamelift diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/errors.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/errors.go deleted file mode 100644 index d04e78d0d2c..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/gamelift/errors.go +++ /dev/null @@ -1,102 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package gamelift - -const ( - - // ErrCodeConflictException for service response error code - // "ConflictException". - // - // The requested operation would cause a conflict with the current state of - // a service resource associated with the request. Resolve the conflict before - // retrying this request. - ErrCodeConflictException = "ConflictException" - - // ErrCodeFleetCapacityExceededException for service response error code - // "FleetCapacityExceededException". - // - // The specified fleet has no available instances to fulfill a CreateGameSession - // request. Clients can retry such requests immediately or after a waiting period. - ErrCodeFleetCapacityExceededException = "FleetCapacityExceededException" - - // ErrCodeGameSessionFullException for service response error code - // "GameSessionFullException". - // - // The game instance is currently full and cannot allow the requested player(s) - // to join. Clients can retry such requests immediately or after a waiting period. - ErrCodeGameSessionFullException = "GameSessionFullException" - - // ErrCodeIdempotentParameterMismatchException for service response error code - // "IdempotentParameterMismatchException". - // - // A game session with this custom ID string already exists in this fleet. Resolve - // this conflict before retrying this request. - ErrCodeIdempotentParameterMismatchException = "IdempotentParameterMismatchException" - - // ErrCodeInternalServiceException for service response error code - // "InternalServiceException". - // - // The service encountered an unrecoverable internal failure while processing - // the request. Clients can retry such requests immediately or after a waiting - // period. - ErrCodeInternalServiceException = "InternalServiceException" - - // ErrCodeInvalidFleetStatusException for service response error code - // "InvalidFleetStatusException". - // - // The requested operation would cause a conflict with the current state of - // a resource associated with the request and/or the fleet. Resolve the conflict - // before retrying. - ErrCodeInvalidFleetStatusException = "InvalidFleetStatusException" - - // ErrCodeInvalidGameSessionStatusException for service response error code - // "InvalidGameSessionStatusException". - // - // The requested operation would cause a conflict with the current state of - // a resource associated with the request and/or the game instance. Resolve - // the conflict before retrying. - ErrCodeInvalidGameSessionStatusException = "InvalidGameSessionStatusException" - - // ErrCodeInvalidRequestException for service response error code - // "InvalidRequestException". - // - // One or more parameter values in the request are invalid. Correct the invalid - // parameter values before retrying. - ErrCodeInvalidRequestException = "InvalidRequestException" - - // ErrCodeLimitExceededException for service response error code - // "LimitExceededException". - // - // The requested operation would cause the resource to exceed the allowed service - // limit. Resolve the issue before retrying. - ErrCodeLimitExceededException = "LimitExceededException" - - // ErrCodeNotFoundException for service response error code - // "NotFoundException". - // - // A service resource associated with the request could not be found. Clients - // should not retry such requests. - ErrCodeNotFoundException = "NotFoundException" - - // ErrCodeTerminalRoutingStrategyException for service response error code - // "TerminalRoutingStrategyException". - // - // The service is unable to resolve the routing for a particular alias because - // it has a terminal RoutingStrategy associated with it. The message returned - // in this exception is the message defined in the routing strategy itself. - // Such requests should only be retried if the routing strategy for the specified - // alias is modified. - ErrCodeTerminalRoutingStrategyException = "TerminalRoutingStrategyException" - - // ErrCodeUnauthorizedException for service response error code - // "UnauthorizedException". - // - // The client failed authentication. Clients should not retry such requests. - ErrCodeUnauthorizedException = "UnauthorizedException" - - // ErrCodeUnsupportedRegionException for service response error code - // "UnsupportedRegionException". - // - // The requested operation is not supported in the region specified. - ErrCodeUnsupportedRegionException = "UnsupportedRegionException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go deleted file mode 100644 index b79ac20478d..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go +++ /dev/null @@ -1,95 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package gamelift - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -// GameLift provides the API operation methods for making requests to -// Amazon GameLift. See this package's package overview docs -// for details on the service. -// -// GameLift methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type GameLift struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "gamelift" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the GameLift client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a GameLift client from just a session. -// svc := gamelift.New(mySession) -// -// // Create a GameLift client with additional configuration -// svc := gamelift.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *GameLift { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *GameLift { - svc := &GameLift{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2015-10-01", - JSONVersion: "1.1", - TargetPrefix: "GameLift", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a GameLift operation and runs any -// custom request initialization. -func (c *GameLift) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glue/api.go b/vendor/github.com/aws/aws-sdk-go/service/glue/api.go deleted file mode 100644 index 8caea4d59e3..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/glue/api.go +++ /dev/null @@ -1,18679 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package glue - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opBatchCreatePartition = "BatchCreatePartition" - -// BatchCreatePartitionRequest generates a "aws/request.Request" representing the -// client's request for the BatchCreatePartition operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See BatchCreatePartition for more information on using the BatchCreatePartition -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the BatchCreatePartitionRequest method. -// req, resp := client.BatchCreatePartitionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchCreatePartition -func (c *Glue) BatchCreatePartitionRequest(input *BatchCreatePartitionInput) (req *request.Request, output *BatchCreatePartitionOutput) { - op := &request.Operation{ - Name: opBatchCreatePartition, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &BatchCreatePartitionInput{} - } - - output = &BatchCreatePartitionOutput{} - req = c.newRequest(op, input, output) - return -} - -// BatchCreatePartition API operation for AWS Glue. -// -// Creates one or more partitions in a batch operation. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation BatchCreatePartition for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeAlreadyExistsException "AlreadyExistsException" -// A resource to be created or added already exists. -// -// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" -// A resource numerical limit was exceeded. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchCreatePartition -func (c *Glue) BatchCreatePartition(input *BatchCreatePartitionInput) (*BatchCreatePartitionOutput, error) { - req, out := c.BatchCreatePartitionRequest(input) - return out, req.Send() -} - -// BatchCreatePartitionWithContext is the same as BatchCreatePartition with the addition of -// the ability to pass a context and additional request options. -// -// See BatchCreatePartition for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) BatchCreatePartitionWithContext(ctx aws.Context, input *BatchCreatePartitionInput, opts ...request.Option) (*BatchCreatePartitionOutput, error) { - req, out := c.BatchCreatePartitionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opBatchDeleteConnection = "BatchDeleteConnection" - -// BatchDeleteConnectionRequest generates a "aws/request.Request" representing the -// client's request for the BatchDeleteConnection operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See BatchDeleteConnection for more information on using the BatchDeleteConnection -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the BatchDeleteConnectionRequest method. -// req, resp := client.BatchDeleteConnectionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchDeleteConnection -func (c *Glue) BatchDeleteConnectionRequest(input *BatchDeleteConnectionInput) (req *request.Request, output *BatchDeleteConnectionOutput) { - op := &request.Operation{ - Name: opBatchDeleteConnection, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &BatchDeleteConnectionInput{} - } - - output = &BatchDeleteConnectionOutput{} - req = c.newRequest(op, input, output) - return -} - -// BatchDeleteConnection API operation for AWS Glue. -// -// Deletes a list of connection definitions from the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation BatchDeleteConnection for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchDeleteConnection -func (c *Glue) BatchDeleteConnection(input *BatchDeleteConnectionInput) (*BatchDeleteConnectionOutput, error) { - req, out := c.BatchDeleteConnectionRequest(input) - return out, req.Send() -} - -// BatchDeleteConnectionWithContext is the same as BatchDeleteConnection with the addition of -// the ability to pass a context and additional request options. -// -// See BatchDeleteConnection for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) BatchDeleteConnectionWithContext(ctx aws.Context, input *BatchDeleteConnectionInput, opts ...request.Option) (*BatchDeleteConnectionOutput, error) { - req, out := c.BatchDeleteConnectionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opBatchDeletePartition = "BatchDeletePartition" - -// BatchDeletePartitionRequest generates a "aws/request.Request" representing the -// client's request for the BatchDeletePartition operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See BatchDeletePartition for more information on using the BatchDeletePartition -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the BatchDeletePartitionRequest method. -// req, resp := client.BatchDeletePartitionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchDeletePartition -func (c *Glue) BatchDeletePartitionRequest(input *BatchDeletePartitionInput) (req *request.Request, output *BatchDeletePartitionOutput) { - op := &request.Operation{ - Name: opBatchDeletePartition, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &BatchDeletePartitionInput{} - } - - output = &BatchDeletePartitionOutput{} - req = c.newRequest(op, input, output) - return -} - -// BatchDeletePartition API operation for AWS Glue. -// -// Deletes one or more partitions in a batch operation. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation BatchDeletePartition for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchDeletePartition -func (c *Glue) BatchDeletePartition(input *BatchDeletePartitionInput) (*BatchDeletePartitionOutput, error) { - req, out := c.BatchDeletePartitionRequest(input) - return out, req.Send() -} - -// BatchDeletePartitionWithContext is the same as BatchDeletePartition with the addition of -// the ability to pass a context and additional request options. -// -// See BatchDeletePartition for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) BatchDeletePartitionWithContext(ctx aws.Context, input *BatchDeletePartitionInput, opts ...request.Option) (*BatchDeletePartitionOutput, error) { - req, out := c.BatchDeletePartitionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opBatchDeleteTable = "BatchDeleteTable" - -// BatchDeleteTableRequest generates a "aws/request.Request" representing the -// client's request for the BatchDeleteTable operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See BatchDeleteTable for more information on using the BatchDeleteTable -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the BatchDeleteTableRequest method. -// req, resp := client.BatchDeleteTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchDeleteTable -func (c *Glue) BatchDeleteTableRequest(input *BatchDeleteTableInput) (req *request.Request, output *BatchDeleteTableOutput) { - op := &request.Operation{ - Name: opBatchDeleteTable, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &BatchDeleteTableInput{} - } - - output = &BatchDeleteTableOutput{} - req = c.newRequest(op, input, output) - return -} - -// BatchDeleteTable API operation for AWS Glue. -// -// Deletes multiple tables at once. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation BatchDeleteTable for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchDeleteTable -func (c *Glue) BatchDeleteTable(input *BatchDeleteTableInput) (*BatchDeleteTableOutput, error) { - req, out := c.BatchDeleteTableRequest(input) - return out, req.Send() -} - -// BatchDeleteTableWithContext is the same as BatchDeleteTable with the addition of -// the ability to pass a context and additional request options. -// -// See BatchDeleteTable for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) BatchDeleteTableWithContext(ctx aws.Context, input *BatchDeleteTableInput, opts ...request.Option) (*BatchDeleteTableOutput, error) { - req, out := c.BatchDeleteTableRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opBatchGetPartition = "BatchGetPartition" - -// BatchGetPartitionRequest generates a "aws/request.Request" representing the -// client's request for the BatchGetPartition operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See BatchGetPartition for more information on using the BatchGetPartition -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the BatchGetPartitionRequest method. -// req, resp := client.BatchGetPartitionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchGetPartition -func (c *Glue) BatchGetPartitionRequest(input *BatchGetPartitionInput) (req *request.Request, output *BatchGetPartitionOutput) { - op := &request.Operation{ - Name: opBatchGetPartition, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &BatchGetPartitionInput{} - } - - output = &BatchGetPartitionOutput{} - req = c.newRequest(op, input, output) - return -} - -// BatchGetPartition API operation for AWS Glue. -// -// Retrieves partitions in a batch request. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation BatchGetPartition for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchGetPartition -func (c *Glue) BatchGetPartition(input *BatchGetPartitionInput) (*BatchGetPartitionOutput, error) { - req, out := c.BatchGetPartitionRequest(input) - return out, req.Send() -} - -// BatchGetPartitionWithContext is the same as BatchGetPartition with the addition of -// the ability to pass a context and additional request options. -// -// See BatchGetPartition for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) BatchGetPartitionWithContext(ctx aws.Context, input *BatchGetPartitionInput, opts ...request.Option) (*BatchGetPartitionOutput, error) { - req, out := c.BatchGetPartitionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opBatchStopJobRun = "BatchStopJobRun" - -// BatchStopJobRunRequest generates a "aws/request.Request" representing the -// client's request for the BatchStopJobRun operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See BatchStopJobRun for more information on using the BatchStopJobRun -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the BatchStopJobRunRequest method. -// req, resp := client.BatchStopJobRunRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchStopJobRun -func (c *Glue) BatchStopJobRunRequest(input *BatchStopJobRunInput) (req *request.Request, output *BatchStopJobRunOutput) { - op := &request.Operation{ - Name: opBatchStopJobRun, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &BatchStopJobRunInput{} - } - - output = &BatchStopJobRunOutput{} - req = c.newRequest(op, input, output) - return -} - -// BatchStopJobRun API operation for AWS Glue. -// -// Stops a batch of job runs for a given job. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation BatchStopJobRun for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchStopJobRun -func (c *Glue) BatchStopJobRun(input *BatchStopJobRunInput) (*BatchStopJobRunOutput, error) { - req, out := c.BatchStopJobRunRequest(input) - return out, req.Send() -} - -// BatchStopJobRunWithContext is the same as BatchStopJobRun with the addition of -// the ability to pass a context and additional request options. -// -// See BatchStopJobRun for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) BatchStopJobRunWithContext(ctx aws.Context, input *BatchStopJobRunInput, opts ...request.Option) (*BatchStopJobRunOutput, error) { - req, out := c.BatchStopJobRunRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateClassifier = "CreateClassifier" - -// CreateClassifierRequest generates a "aws/request.Request" representing the -// client's request for the CreateClassifier operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateClassifier for more information on using the CreateClassifier -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateClassifierRequest method. -// req, resp := client.CreateClassifierRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateClassifier -func (c *Glue) CreateClassifierRequest(input *CreateClassifierInput) (req *request.Request, output *CreateClassifierOutput) { - op := &request.Operation{ - Name: opCreateClassifier, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateClassifierInput{} - } - - output = &CreateClassifierOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateClassifier API operation for AWS Glue. -// -// Creates a classifier in the user's account. This may be either a GrokClassifier -// or an XMLClassifier. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation CreateClassifier for usage and error information. -// -// Returned Error Codes: -// * ErrCodeAlreadyExistsException "AlreadyExistsException" -// A resource to be created or added already exists. -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateClassifier -func (c *Glue) CreateClassifier(input *CreateClassifierInput) (*CreateClassifierOutput, error) { - req, out := c.CreateClassifierRequest(input) - return out, req.Send() -} - -// CreateClassifierWithContext is the same as CreateClassifier with the addition of -// the ability to pass a context and additional request options. -// -// See CreateClassifier for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) CreateClassifierWithContext(ctx aws.Context, input *CreateClassifierInput, opts ...request.Option) (*CreateClassifierOutput, error) { - req, out := c.CreateClassifierRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateConnection = "CreateConnection" - -// CreateConnectionRequest generates a "aws/request.Request" representing the -// client's request for the CreateConnection operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateConnection for more information on using the CreateConnection -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateConnectionRequest method. -// req, resp := client.CreateConnectionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateConnection -func (c *Glue) CreateConnectionRequest(input *CreateConnectionInput) (req *request.Request, output *CreateConnectionOutput) { - op := &request.Operation{ - Name: opCreateConnection, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateConnectionInput{} - } - - output = &CreateConnectionOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateConnection API operation for AWS Glue. -// -// Creates a connection definition in the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation CreateConnection for usage and error information. -// -// Returned Error Codes: -// * ErrCodeAlreadyExistsException "AlreadyExistsException" -// A resource to be created or added already exists. -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateConnection -func (c *Glue) CreateConnection(input *CreateConnectionInput) (*CreateConnectionOutput, error) { - req, out := c.CreateConnectionRequest(input) - return out, req.Send() -} - -// CreateConnectionWithContext is the same as CreateConnection with the addition of -// the ability to pass a context and additional request options. -// -// See CreateConnection for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) CreateConnectionWithContext(ctx aws.Context, input *CreateConnectionInput, opts ...request.Option) (*CreateConnectionOutput, error) { - req, out := c.CreateConnectionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateCrawler = "CreateCrawler" - -// CreateCrawlerRequest generates a "aws/request.Request" representing the -// client's request for the CreateCrawler operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateCrawler for more information on using the CreateCrawler -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateCrawlerRequest method. -// req, resp := client.CreateCrawlerRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateCrawler -func (c *Glue) CreateCrawlerRequest(input *CreateCrawlerInput) (req *request.Request, output *CreateCrawlerOutput) { - op := &request.Operation{ - Name: opCreateCrawler, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateCrawlerInput{} - } - - output = &CreateCrawlerOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateCrawler API operation for AWS Glue. -// -// Creates a new crawler with specified targets, role, configuration, and optional -// schedule. At least one crawl target must be specified, in either the s3Targets -// or the jdbcTargets field. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation CreateCrawler for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeAlreadyExistsException "AlreadyExistsException" -// A resource to be created or added already exists. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" -// A resource numerical limit was exceeded. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateCrawler -func (c *Glue) CreateCrawler(input *CreateCrawlerInput) (*CreateCrawlerOutput, error) { - req, out := c.CreateCrawlerRequest(input) - return out, req.Send() -} - -// CreateCrawlerWithContext is the same as CreateCrawler with the addition of -// the ability to pass a context and additional request options. -// -// See CreateCrawler for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) CreateCrawlerWithContext(ctx aws.Context, input *CreateCrawlerInput, opts ...request.Option) (*CreateCrawlerOutput, error) { - req, out := c.CreateCrawlerRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateDatabase = "CreateDatabase" - -// CreateDatabaseRequest generates a "aws/request.Request" representing the -// client's request for the CreateDatabase operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateDatabase for more information on using the CreateDatabase -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateDatabaseRequest method. -// req, resp := client.CreateDatabaseRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateDatabase -func (c *Glue) CreateDatabaseRequest(input *CreateDatabaseInput) (req *request.Request, output *CreateDatabaseOutput) { - op := &request.Operation{ - Name: opCreateDatabase, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateDatabaseInput{} - } - - output = &CreateDatabaseOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateDatabase API operation for AWS Glue. -// -// Creates a new database in a Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation CreateDatabase for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeAlreadyExistsException "AlreadyExistsException" -// A resource to be created or added already exists. -// -// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" -// A resource numerical limit was exceeded. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateDatabase -func (c *Glue) CreateDatabase(input *CreateDatabaseInput) (*CreateDatabaseOutput, error) { - req, out := c.CreateDatabaseRequest(input) - return out, req.Send() -} - -// CreateDatabaseWithContext is the same as CreateDatabase with the addition of -// the ability to pass a context and additional request options. -// -// See CreateDatabase for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) CreateDatabaseWithContext(ctx aws.Context, input *CreateDatabaseInput, opts ...request.Option) (*CreateDatabaseOutput, error) { - req, out := c.CreateDatabaseRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateDevEndpoint = "CreateDevEndpoint" - -// CreateDevEndpointRequest generates a "aws/request.Request" representing the -// client's request for the CreateDevEndpoint operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateDevEndpoint for more information on using the CreateDevEndpoint -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateDevEndpointRequest method. -// req, resp := client.CreateDevEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateDevEndpoint -func (c *Glue) CreateDevEndpointRequest(input *CreateDevEndpointInput) (req *request.Request, output *CreateDevEndpointOutput) { - op := &request.Operation{ - Name: opCreateDevEndpoint, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateDevEndpointInput{} - } - - output = &CreateDevEndpointOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateDevEndpoint API operation for AWS Glue. -// -// Creates a new DevEndpoint. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation CreateDevEndpoint for usage and error information. -// -// Returned Error Codes: -// * ErrCodeAccessDeniedException "AccessDeniedException" -// Access to a resource was denied. -// -// * ErrCodeAlreadyExistsException "AlreadyExistsException" -// A resource to be created or added already exists. -// -// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException" -// The same unique identifier was associated with two different records. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeValidationException "ValidationException" -// A value could not be validated. -// -// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" -// A resource numerical limit was exceeded. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateDevEndpoint -func (c *Glue) CreateDevEndpoint(input *CreateDevEndpointInput) (*CreateDevEndpointOutput, error) { - req, out := c.CreateDevEndpointRequest(input) - return out, req.Send() -} - -// CreateDevEndpointWithContext is the same as CreateDevEndpoint with the addition of -// the ability to pass a context and additional request options. -// -// See CreateDevEndpoint for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) CreateDevEndpointWithContext(ctx aws.Context, input *CreateDevEndpointInput, opts ...request.Option) (*CreateDevEndpointOutput, error) { - req, out := c.CreateDevEndpointRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateJob = "CreateJob" - -// CreateJobRequest generates a "aws/request.Request" representing the -// client's request for the CreateJob operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateJob for more information on using the CreateJob -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateJobRequest method. -// req, resp := client.CreateJobRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateJob -func (c *Glue) CreateJobRequest(input *CreateJobInput) (req *request.Request, output *CreateJobOutput) { - op := &request.Operation{ - Name: opCreateJob, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateJobInput{} - } - - output = &CreateJobOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateJob API operation for AWS Glue. -// -// Creates a new job. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation CreateJob for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException" -// The same unique identifier was associated with two different records. -// -// * ErrCodeAlreadyExistsException "AlreadyExistsException" -// A resource to be created or added already exists. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" -// A resource numerical limit was exceeded. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateJob -func (c *Glue) CreateJob(input *CreateJobInput) (*CreateJobOutput, error) { - req, out := c.CreateJobRequest(input) - return out, req.Send() -} - -// CreateJobWithContext is the same as CreateJob with the addition of -// the ability to pass a context and additional request options. -// -// See CreateJob for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) CreateJobWithContext(ctx aws.Context, input *CreateJobInput, opts ...request.Option) (*CreateJobOutput, error) { - req, out := c.CreateJobRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreatePartition = "CreatePartition" - -// CreatePartitionRequest generates a "aws/request.Request" representing the -// client's request for the CreatePartition operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreatePartition for more information on using the CreatePartition -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreatePartitionRequest method. -// req, resp := client.CreatePartitionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreatePartition -func (c *Glue) CreatePartitionRequest(input *CreatePartitionInput) (req *request.Request, output *CreatePartitionOutput) { - op := &request.Operation{ - Name: opCreatePartition, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreatePartitionInput{} - } - - output = &CreatePartitionOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreatePartition API operation for AWS Glue. -// -// Creates a new partition. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation CreatePartition for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeAlreadyExistsException "AlreadyExistsException" -// A resource to be created or added already exists. -// -// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" -// A resource numerical limit was exceeded. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreatePartition -func (c *Glue) CreatePartition(input *CreatePartitionInput) (*CreatePartitionOutput, error) { - req, out := c.CreatePartitionRequest(input) - return out, req.Send() -} - -// CreatePartitionWithContext is the same as CreatePartition with the addition of -// the ability to pass a context and additional request options. -// -// See CreatePartition for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) CreatePartitionWithContext(ctx aws.Context, input *CreatePartitionInput, opts ...request.Option) (*CreatePartitionOutput, error) { - req, out := c.CreatePartitionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateScript = "CreateScript" - -// CreateScriptRequest generates a "aws/request.Request" representing the -// client's request for the CreateScript operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateScript for more information on using the CreateScript -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateScriptRequest method. -// req, resp := client.CreateScriptRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateScript -func (c *Glue) CreateScriptRequest(input *CreateScriptInput) (req *request.Request, output *CreateScriptOutput) { - op := &request.Operation{ - Name: opCreateScript, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateScriptInput{} - } - - output = &CreateScriptOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateScript API operation for AWS Glue. -// -// Transforms a directed acyclic graph (DAG) into a Python script. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation CreateScript for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateScript -func (c *Glue) CreateScript(input *CreateScriptInput) (*CreateScriptOutput, error) { - req, out := c.CreateScriptRequest(input) - return out, req.Send() -} - -// CreateScriptWithContext is the same as CreateScript with the addition of -// the ability to pass a context and additional request options. -// -// See CreateScript for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) CreateScriptWithContext(ctx aws.Context, input *CreateScriptInput, opts ...request.Option) (*CreateScriptOutput, error) { - req, out := c.CreateScriptRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateTable = "CreateTable" - -// CreateTableRequest generates a "aws/request.Request" representing the -// client's request for the CreateTable operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateTable for more information on using the CreateTable -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateTableRequest method. -// req, resp := client.CreateTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateTable -func (c *Glue) CreateTableRequest(input *CreateTableInput) (req *request.Request, output *CreateTableOutput) { - op := &request.Operation{ - Name: opCreateTable, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateTableInput{} - } - - output = &CreateTableOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateTable API operation for AWS Glue. -// -// Creates a new table definition in the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation CreateTable for usage and error information. -// -// Returned Error Codes: -// * ErrCodeAlreadyExistsException "AlreadyExistsException" -// A resource to be created or added already exists. -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" -// A resource numerical limit was exceeded. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateTable -func (c *Glue) CreateTable(input *CreateTableInput) (*CreateTableOutput, error) { - req, out := c.CreateTableRequest(input) - return out, req.Send() -} - -// CreateTableWithContext is the same as CreateTable with the addition of -// the ability to pass a context and additional request options. -// -// See CreateTable for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) CreateTableWithContext(ctx aws.Context, input *CreateTableInput, opts ...request.Option) (*CreateTableOutput, error) { - req, out := c.CreateTableRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateTrigger = "CreateTrigger" - -// CreateTriggerRequest generates a "aws/request.Request" representing the -// client's request for the CreateTrigger operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateTrigger for more information on using the CreateTrigger -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateTriggerRequest method. -// req, resp := client.CreateTriggerRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateTrigger -func (c *Glue) CreateTriggerRequest(input *CreateTriggerInput) (req *request.Request, output *CreateTriggerOutput) { - op := &request.Operation{ - Name: opCreateTrigger, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateTriggerInput{} - } - - output = &CreateTriggerOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateTrigger API operation for AWS Glue. -// -// Creates a new trigger. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation CreateTrigger for usage and error information. -// -// Returned Error Codes: -// * ErrCodeAlreadyExistsException "AlreadyExistsException" -// A resource to be created or added already exists. -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" -// A resource numerical limit was exceeded. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateTrigger -func (c *Glue) CreateTrigger(input *CreateTriggerInput) (*CreateTriggerOutput, error) { - req, out := c.CreateTriggerRequest(input) - return out, req.Send() -} - -// CreateTriggerWithContext is the same as CreateTrigger with the addition of -// the ability to pass a context and additional request options. -// -// See CreateTrigger for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) CreateTriggerWithContext(ctx aws.Context, input *CreateTriggerInput, opts ...request.Option) (*CreateTriggerOutput, error) { - req, out := c.CreateTriggerRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateUserDefinedFunction = "CreateUserDefinedFunction" - -// CreateUserDefinedFunctionRequest generates a "aws/request.Request" representing the -// client's request for the CreateUserDefinedFunction operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateUserDefinedFunction for more information on using the CreateUserDefinedFunction -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateUserDefinedFunctionRequest method. -// req, resp := client.CreateUserDefinedFunctionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateUserDefinedFunction -func (c *Glue) CreateUserDefinedFunctionRequest(input *CreateUserDefinedFunctionInput) (req *request.Request, output *CreateUserDefinedFunctionOutput) { - op := &request.Operation{ - Name: opCreateUserDefinedFunction, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateUserDefinedFunctionInput{} - } - - output = &CreateUserDefinedFunctionOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateUserDefinedFunction API operation for AWS Glue. -// -// Creates a new function definition in the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation CreateUserDefinedFunction for usage and error information. -// -// Returned Error Codes: -// * ErrCodeAlreadyExistsException "AlreadyExistsException" -// A resource to be created or added already exists. -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateUserDefinedFunction -func (c *Glue) CreateUserDefinedFunction(input *CreateUserDefinedFunctionInput) (*CreateUserDefinedFunctionOutput, error) { - req, out := c.CreateUserDefinedFunctionRequest(input) - return out, req.Send() -} - -// CreateUserDefinedFunctionWithContext is the same as CreateUserDefinedFunction with the addition of -// the ability to pass a context and additional request options. -// -// See CreateUserDefinedFunction for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) CreateUserDefinedFunctionWithContext(ctx aws.Context, input *CreateUserDefinedFunctionInput, opts ...request.Option) (*CreateUserDefinedFunctionOutput, error) { - req, out := c.CreateUserDefinedFunctionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteClassifier = "DeleteClassifier" - -// DeleteClassifierRequest generates a "aws/request.Request" representing the -// client's request for the DeleteClassifier operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteClassifier for more information on using the DeleteClassifier -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteClassifierRequest method. -// req, resp := client.DeleteClassifierRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteClassifier -func (c *Glue) DeleteClassifierRequest(input *DeleteClassifierInput) (req *request.Request, output *DeleteClassifierOutput) { - op := &request.Operation{ - Name: opDeleteClassifier, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteClassifierInput{} - } - - output = &DeleteClassifierOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteClassifier API operation for AWS Glue. -// -// Removes a classifier from the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation DeleteClassifier for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteClassifier -func (c *Glue) DeleteClassifier(input *DeleteClassifierInput) (*DeleteClassifierOutput, error) { - req, out := c.DeleteClassifierRequest(input) - return out, req.Send() -} - -// DeleteClassifierWithContext is the same as DeleteClassifier with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteClassifier for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) DeleteClassifierWithContext(ctx aws.Context, input *DeleteClassifierInput, opts ...request.Option) (*DeleteClassifierOutput, error) { - req, out := c.DeleteClassifierRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteConnection = "DeleteConnection" - -// DeleteConnectionRequest generates a "aws/request.Request" representing the -// client's request for the DeleteConnection operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteConnection for more information on using the DeleteConnection -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteConnectionRequest method. -// req, resp := client.DeleteConnectionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteConnection -func (c *Glue) DeleteConnectionRequest(input *DeleteConnectionInput) (req *request.Request, output *DeleteConnectionOutput) { - op := &request.Operation{ - Name: opDeleteConnection, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteConnectionInput{} - } - - output = &DeleteConnectionOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteConnection API operation for AWS Glue. -// -// Deletes a connection from the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation DeleteConnection for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteConnection -func (c *Glue) DeleteConnection(input *DeleteConnectionInput) (*DeleteConnectionOutput, error) { - req, out := c.DeleteConnectionRequest(input) - return out, req.Send() -} - -// DeleteConnectionWithContext is the same as DeleteConnection with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteConnection for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) DeleteConnectionWithContext(ctx aws.Context, input *DeleteConnectionInput, opts ...request.Option) (*DeleteConnectionOutput, error) { - req, out := c.DeleteConnectionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteCrawler = "DeleteCrawler" - -// DeleteCrawlerRequest generates a "aws/request.Request" representing the -// client's request for the DeleteCrawler operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteCrawler for more information on using the DeleteCrawler -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteCrawlerRequest method. -// req, resp := client.DeleteCrawlerRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteCrawler -func (c *Glue) DeleteCrawlerRequest(input *DeleteCrawlerInput) (req *request.Request, output *DeleteCrawlerOutput) { - op := &request.Operation{ - Name: opDeleteCrawler, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteCrawlerInput{} - } - - output = &DeleteCrawlerOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteCrawler API operation for AWS Glue. -// -// Removes a specified crawler from the Data Catalog, unless the crawler state -// is RUNNING. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation DeleteCrawler for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeCrawlerRunningException "CrawlerRunningException" -// The operation cannot be performed because the crawler is already running. -// -// * ErrCodeSchedulerTransitioningException "SchedulerTransitioningException" -// The specified scheduler is transitioning. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteCrawler -func (c *Glue) DeleteCrawler(input *DeleteCrawlerInput) (*DeleteCrawlerOutput, error) { - req, out := c.DeleteCrawlerRequest(input) - return out, req.Send() -} - -// DeleteCrawlerWithContext is the same as DeleteCrawler with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteCrawler for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) DeleteCrawlerWithContext(ctx aws.Context, input *DeleteCrawlerInput, opts ...request.Option) (*DeleteCrawlerOutput, error) { - req, out := c.DeleteCrawlerRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteDatabase = "DeleteDatabase" - -// DeleteDatabaseRequest generates a "aws/request.Request" representing the -// client's request for the DeleteDatabase operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteDatabase for more information on using the DeleteDatabase -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteDatabaseRequest method. -// req, resp := client.DeleteDatabaseRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteDatabase -func (c *Glue) DeleteDatabaseRequest(input *DeleteDatabaseInput) (req *request.Request, output *DeleteDatabaseOutput) { - op := &request.Operation{ - Name: opDeleteDatabase, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteDatabaseInput{} - } - - output = &DeleteDatabaseOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteDatabase API operation for AWS Glue. -// -// Removes a specified Database from a Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation DeleteDatabase for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteDatabase -func (c *Glue) DeleteDatabase(input *DeleteDatabaseInput) (*DeleteDatabaseOutput, error) { - req, out := c.DeleteDatabaseRequest(input) - return out, req.Send() -} - -// DeleteDatabaseWithContext is the same as DeleteDatabase with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteDatabase for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) DeleteDatabaseWithContext(ctx aws.Context, input *DeleteDatabaseInput, opts ...request.Option) (*DeleteDatabaseOutput, error) { - req, out := c.DeleteDatabaseRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteDevEndpoint = "DeleteDevEndpoint" - -// DeleteDevEndpointRequest generates a "aws/request.Request" representing the -// client's request for the DeleteDevEndpoint operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteDevEndpoint for more information on using the DeleteDevEndpoint -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteDevEndpointRequest method. -// req, resp := client.DeleteDevEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteDevEndpoint -func (c *Glue) DeleteDevEndpointRequest(input *DeleteDevEndpointInput) (req *request.Request, output *DeleteDevEndpointOutput) { - op := &request.Operation{ - Name: opDeleteDevEndpoint, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteDevEndpointInput{} - } - - output = &DeleteDevEndpointOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteDevEndpoint API operation for AWS Glue. -// -// Deletes a specified DevEndpoint. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation DeleteDevEndpoint for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteDevEndpoint -func (c *Glue) DeleteDevEndpoint(input *DeleteDevEndpointInput) (*DeleteDevEndpointOutput, error) { - req, out := c.DeleteDevEndpointRequest(input) - return out, req.Send() -} - -// DeleteDevEndpointWithContext is the same as DeleteDevEndpoint with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteDevEndpoint for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) DeleteDevEndpointWithContext(ctx aws.Context, input *DeleteDevEndpointInput, opts ...request.Option) (*DeleteDevEndpointOutput, error) { - req, out := c.DeleteDevEndpointRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteJob = "DeleteJob" - -// DeleteJobRequest generates a "aws/request.Request" representing the -// client's request for the DeleteJob operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteJob for more information on using the DeleteJob -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteJobRequest method. -// req, resp := client.DeleteJobRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteJob -func (c *Glue) DeleteJobRequest(input *DeleteJobInput) (req *request.Request, output *DeleteJobOutput) { - op := &request.Operation{ - Name: opDeleteJob, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteJobInput{} - } - - output = &DeleteJobOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteJob API operation for AWS Glue. -// -// Deletes a specified job. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation DeleteJob for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteJob -func (c *Glue) DeleteJob(input *DeleteJobInput) (*DeleteJobOutput, error) { - req, out := c.DeleteJobRequest(input) - return out, req.Send() -} - -// DeleteJobWithContext is the same as DeleteJob with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteJob for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) DeleteJobWithContext(ctx aws.Context, input *DeleteJobInput, opts ...request.Option) (*DeleteJobOutput, error) { - req, out := c.DeleteJobRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeletePartition = "DeletePartition" - -// DeletePartitionRequest generates a "aws/request.Request" representing the -// client's request for the DeletePartition operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeletePartition for more information on using the DeletePartition -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeletePartitionRequest method. -// req, resp := client.DeletePartitionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeletePartition -func (c *Glue) DeletePartitionRequest(input *DeletePartitionInput) (req *request.Request, output *DeletePartitionOutput) { - op := &request.Operation{ - Name: opDeletePartition, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeletePartitionInput{} - } - - output = &DeletePartitionOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeletePartition API operation for AWS Glue. -// -// Deletes a specified partition. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation DeletePartition for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeletePartition -func (c *Glue) DeletePartition(input *DeletePartitionInput) (*DeletePartitionOutput, error) { - req, out := c.DeletePartitionRequest(input) - return out, req.Send() -} - -// DeletePartitionWithContext is the same as DeletePartition with the addition of -// the ability to pass a context and additional request options. -// -// See DeletePartition for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) DeletePartitionWithContext(ctx aws.Context, input *DeletePartitionInput, opts ...request.Option) (*DeletePartitionOutput, error) { - req, out := c.DeletePartitionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteTable = "DeleteTable" - -// DeleteTableRequest generates a "aws/request.Request" representing the -// client's request for the DeleteTable operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteTable for more information on using the DeleteTable -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteTableRequest method. -// req, resp := client.DeleteTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteTable -func (c *Glue) DeleteTableRequest(input *DeleteTableInput) (req *request.Request, output *DeleteTableOutput) { - op := &request.Operation{ - Name: opDeleteTable, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteTableInput{} - } - - output = &DeleteTableOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteTable API operation for AWS Glue. -// -// Removes a table definition from the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation DeleteTable for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteTable -func (c *Glue) DeleteTable(input *DeleteTableInput) (*DeleteTableOutput, error) { - req, out := c.DeleteTableRequest(input) - return out, req.Send() -} - -// DeleteTableWithContext is the same as DeleteTable with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteTable for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) DeleteTableWithContext(ctx aws.Context, input *DeleteTableInput, opts ...request.Option) (*DeleteTableOutput, error) { - req, out := c.DeleteTableRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteTrigger = "DeleteTrigger" - -// DeleteTriggerRequest generates a "aws/request.Request" representing the -// client's request for the DeleteTrigger operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteTrigger for more information on using the DeleteTrigger -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteTriggerRequest method. -// req, resp := client.DeleteTriggerRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteTrigger -func (c *Glue) DeleteTriggerRequest(input *DeleteTriggerInput) (req *request.Request, output *DeleteTriggerOutput) { - op := &request.Operation{ - Name: opDeleteTrigger, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteTriggerInput{} - } - - output = &DeleteTriggerOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteTrigger API operation for AWS Glue. -// -// Deletes a specified trigger. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation DeleteTrigger for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteTrigger -func (c *Glue) DeleteTrigger(input *DeleteTriggerInput) (*DeleteTriggerOutput, error) { - req, out := c.DeleteTriggerRequest(input) - return out, req.Send() -} - -// DeleteTriggerWithContext is the same as DeleteTrigger with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteTrigger for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) DeleteTriggerWithContext(ctx aws.Context, input *DeleteTriggerInput, opts ...request.Option) (*DeleteTriggerOutput, error) { - req, out := c.DeleteTriggerRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteUserDefinedFunction = "DeleteUserDefinedFunction" - -// DeleteUserDefinedFunctionRequest generates a "aws/request.Request" representing the -// client's request for the DeleteUserDefinedFunction operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteUserDefinedFunction for more information on using the DeleteUserDefinedFunction -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteUserDefinedFunctionRequest method. -// req, resp := client.DeleteUserDefinedFunctionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteUserDefinedFunction -func (c *Glue) DeleteUserDefinedFunctionRequest(input *DeleteUserDefinedFunctionInput) (req *request.Request, output *DeleteUserDefinedFunctionOutput) { - op := &request.Operation{ - Name: opDeleteUserDefinedFunction, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteUserDefinedFunctionInput{} - } - - output = &DeleteUserDefinedFunctionOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteUserDefinedFunction API operation for AWS Glue. -// -// Deletes an existing function definition from the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation DeleteUserDefinedFunction for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteUserDefinedFunction -func (c *Glue) DeleteUserDefinedFunction(input *DeleteUserDefinedFunctionInput) (*DeleteUserDefinedFunctionOutput, error) { - req, out := c.DeleteUserDefinedFunctionRequest(input) - return out, req.Send() -} - -// DeleteUserDefinedFunctionWithContext is the same as DeleteUserDefinedFunction with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteUserDefinedFunction for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) DeleteUserDefinedFunctionWithContext(ctx aws.Context, input *DeleteUserDefinedFunctionInput, opts ...request.Option) (*DeleteUserDefinedFunctionOutput, error) { - req, out := c.DeleteUserDefinedFunctionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetCatalogImportStatus = "GetCatalogImportStatus" - -// GetCatalogImportStatusRequest generates a "aws/request.Request" representing the -// client's request for the GetCatalogImportStatus operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetCatalogImportStatus for more information on using the GetCatalogImportStatus -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetCatalogImportStatusRequest method. -// req, resp := client.GetCatalogImportStatusRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCatalogImportStatus -func (c *Glue) GetCatalogImportStatusRequest(input *GetCatalogImportStatusInput) (req *request.Request, output *GetCatalogImportStatusOutput) { - op := &request.Operation{ - Name: opGetCatalogImportStatus, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetCatalogImportStatusInput{} - } - - output = &GetCatalogImportStatusOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCatalogImportStatus API operation for AWS Glue. -// -// Retrieves the status of a migration operation. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetCatalogImportStatus for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCatalogImportStatus -func (c *Glue) GetCatalogImportStatus(input *GetCatalogImportStatusInput) (*GetCatalogImportStatusOutput, error) { - req, out := c.GetCatalogImportStatusRequest(input) - return out, req.Send() -} - -// GetCatalogImportStatusWithContext is the same as GetCatalogImportStatus with the addition of -// the ability to pass a context and additional request options. -// -// See GetCatalogImportStatus for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetCatalogImportStatusWithContext(ctx aws.Context, input *GetCatalogImportStatusInput, opts ...request.Option) (*GetCatalogImportStatusOutput, error) { - req, out := c.GetCatalogImportStatusRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetClassifier = "GetClassifier" - -// GetClassifierRequest generates a "aws/request.Request" representing the -// client's request for the GetClassifier operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetClassifier for more information on using the GetClassifier -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetClassifierRequest method. -// req, resp := client.GetClassifierRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetClassifier -func (c *Glue) GetClassifierRequest(input *GetClassifierInput) (req *request.Request, output *GetClassifierOutput) { - op := &request.Operation{ - Name: opGetClassifier, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetClassifierInput{} - } - - output = &GetClassifierOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetClassifier API operation for AWS Glue. -// -// Retrieve a classifier by name. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetClassifier for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetClassifier -func (c *Glue) GetClassifier(input *GetClassifierInput) (*GetClassifierOutput, error) { - req, out := c.GetClassifierRequest(input) - return out, req.Send() -} - -// GetClassifierWithContext is the same as GetClassifier with the addition of -// the ability to pass a context and additional request options. -// -// See GetClassifier for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetClassifierWithContext(ctx aws.Context, input *GetClassifierInput, opts ...request.Option) (*GetClassifierOutput, error) { - req, out := c.GetClassifierRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetClassifiers = "GetClassifiers" - -// GetClassifiersRequest generates a "aws/request.Request" representing the -// client's request for the GetClassifiers operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetClassifiers for more information on using the GetClassifiers -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetClassifiersRequest method. -// req, resp := client.GetClassifiersRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetClassifiers -func (c *Glue) GetClassifiersRequest(input *GetClassifiersInput) (req *request.Request, output *GetClassifiersOutput) { - op := &request.Operation{ - Name: opGetClassifiers, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetClassifiersInput{} - } - - output = &GetClassifiersOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetClassifiers API operation for AWS Glue. -// -// Lists all classifier objects in the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetClassifiers for usage and error information. -// -// Returned Error Codes: -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetClassifiers -func (c *Glue) GetClassifiers(input *GetClassifiersInput) (*GetClassifiersOutput, error) { - req, out := c.GetClassifiersRequest(input) - return out, req.Send() -} - -// GetClassifiersWithContext is the same as GetClassifiers with the addition of -// the ability to pass a context and additional request options. -// -// See GetClassifiers for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetClassifiersWithContext(ctx aws.Context, input *GetClassifiersInput, opts ...request.Option) (*GetClassifiersOutput, error) { - req, out := c.GetClassifiersRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetClassifiersPages iterates over the pages of a GetClassifiers operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetClassifiers method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetClassifiers operation. -// pageNum := 0 -// err := client.GetClassifiersPages(params, -// func(page *GetClassifiersOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetClassifiersPages(input *GetClassifiersInput, fn func(*GetClassifiersOutput, bool) bool) error { - return c.GetClassifiersPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetClassifiersPagesWithContext same as GetClassifiersPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetClassifiersPagesWithContext(ctx aws.Context, input *GetClassifiersInput, fn func(*GetClassifiersOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetClassifiersInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetClassifiersRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetClassifiersOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetConnection = "GetConnection" - -// GetConnectionRequest generates a "aws/request.Request" representing the -// client's request for the GetConnection operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetConnection for more information on using the GetConnection -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetConnectionRequest method. -// req, resp := client.GetConnectionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetConnection -func (c *Glue) GetConnectionRequest(input *GetConnectionInput) (req *request.Request, output *GetConnectionOutput) { - op := &request.Operation{ - Name: opGetConnection, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetConnectionInput{} - } - - output = &GetConnectionOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetConnection API operation for AWS Glue. -// -// Retrieves a connection definition from the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetConnection for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetConnection -func (c *Glue) GetConnection(input *GetConnectionInput) (*GetConnectionOutput, error) { - req, out := c.GetConnectionRequest(input) - return out, req.Send() -} - -// GetConnectionWithContext is the same as GetConnection with the addition of -// the ability to pass a context and additional request options. -// -// See GetConnection for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetConnectionWithContext(ctx aws.Context, input *GetConnectionInput, opts ...request.Option) (*GetConnectionOutput, error) { - req, out := c.GetConnectionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetConnections = "GetConnections" - -// GetConnectionsRequest generates a "aws/request.Request" representing the -// client's request for the GetConnections operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetConnections for more information on using the GetConnections -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetConnectionsRequest method. -// req, resp := client.GetConnectionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetConnections -func (c *Glue) GetConnectionsRequest(input *GetConnectionsInput) (req *request.Request, output *GetConnectionsOutput) { - op := &request.Operation{ - Name: opGetConnections, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetConnectionsInput{} - } - - output = &GetConnectionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetConnections API operation for AWS Glue. -// -// Retrieves a list of connection definitions from the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetConnections for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetConnections -func (c *Glue) GetConnections(input *GetConnectionsInput) (*GetConnectionsOutput, error) { - req, out := c.GetConnectionsRequest(input) - return out, req.Send() -} - -// GetConnectionsWithContext is the same as GetConnections with the addition of -// the ability to pass a context and additional request options. -// -// See GetConnections for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetConnectionsWithContext(ctx aws.Context, input *GetConnectionsInput, opts ...request.Option) (*GetConnectionsOutput, error) { - req, out := c.GetConnectionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetConnectionsPages iterates over the pages of a GetConnections operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetConnections method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetConnections operation. -// pageNum := 0 -// err := client.GetConnectionsPages(params, -// func(page *GetConnectionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetConnectionsPages(input *GetConnectionsInput, fn func(*GetConnectionsOutput, bool) bool) error { - return c.GetConnectionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetConnectionsPagesWithContext same as GetConnectionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetConnectionsPagesWithContext(ctx aws.Context, input *GetConnectionsInput, fn func(*GetConnectionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetConnectionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetConnectionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetConnectionsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetCrawler = "GetCrawler" - -// GetCrawlerRequest generates a "aws/request.Request" representing the -// client's request for the GetCrawler operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetCrawler for more information on using the GetCrawler -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetCrawlerRequest method. -// req, resp := client.GetCrawlerRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCrawler -func (c *Glue) GetCrawlerRequest(input *GetCrawlerInput) (req *request.Request, output *GetCrawlerOutput) { - op := &request.Operation{ - Name: opGetCrawler, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetCrawlerInput{} - } - - output = &GetCrawlerOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCrawler API operation for AWS Glue. -// -// Retrieves metadata for a specified crawler. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetCrawler for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCrawler -func (c *Glue) GetCrawler(input *GetCrawlerInput) (*GetCrawlerOutput, error) { - req, out := c.GetCrawlerRequest(input) - return out, req.Send() -} - -// GetCrawlerWithContext is the same as GetCrawler with the addition of -// the ability to pass a context and additional request options. -// -// See GetCrawler for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetCrawlerWithContext(ctx aws.Context, input *GetCrawlerInput, opts ...request.Option) (*GetCrawlerOutput, error) { - req, out := c.GetCrawlerRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetCrawlerMetrics = "GetCrawlerMetrics" - -// GetCrawlerMetricsRequest generates a "aws/request.Request" representing the -// client's request for the GetCrawlerMetrics operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetCrawlerMetrics for more information on using the GetCrawlerMetrics -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetCrawlerMetricsRequest method. -// req, resp := client.GetCrawlerMetricsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCrawlerMetrics -func (c *Glue) GetCrawlerMetricsRequest(input *GetCrawlerMetricsInput) (req *request.Request, output *GetCrawlerMetricsOutput) { - op := &request.Operation{ - Name: opGetCrawlerMetrics, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetCrawlerMetricsInput{} - } - - output = &GetCrawlerMetricsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCrawlerMetrics API operation for AWS Glue. -// -// Retrieves metrics about specified crawlers. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetCrawlerMetrics for usage and error information. -// -// Returned Error Codes: -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCrawlerMetrics -func (c *Glue) GetCrawlerMetrics(input *GetCrawlerMetricsInput) (*GetCrawlerMetricsOutput, error) { - req, out := c.GetCrawlerMetricsRequest(input) - return out, req.Send() -} - -// GetCrawlerMetricsWithContext is the same as GetCrawlerMetrics with the addition of -// the ability to pass a context and additional request options. -// -// See GetCrawlerMetrics for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetCrawlerMetricsWithContext(ctx aws.Context, input *GetCrawlerMetricsInput, opts ...request.Option) (*GetCrawlerMetricsOutput, error) { - req, out := c.GetCrawlerMetricsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetCrawlerMetricsPages iterates over the pages of a GetCrawlerMetrics operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetCrawlerMetrics method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetCrawlerMetrics operation. -// pageNum := 0 -// err := client.GetCrawlerMetricsPages(params, -// func(page *GetCrawlerMetricsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetCrawlerMetricsPages(input *GetCrawlerMetricsInput, fn func(*GetCrawlerMetricsOutput, bool) bool) error { - return c.GetCrawlerMetricsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetCrawlerMetricsPagesWithContext same as GetCrawlerMetricsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetCrawlerMetricsPagesWithContext(ctx aws.Context, input *GetCrawlerMetricsInput, fn func(*GetCrawlerMetricsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetCrawlerMetricsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetCrawlerMetricsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetCrawlerMetricsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetCrawlers = "GetCrawlers" - -// GetCrawlersRequest generates a "aws/request.Request" representing the -// client's request for the GetCrawlers operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetCrawlers for more information on using the GetCrawlers -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetCrawlersRequest method. -// req, resp := client.GetCrawlersRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCrawlers -func (c *Glue) GetCrawlersRequest(input *GetCrawlersInput) (req *request.Request, output *GetCrawlersOutput) { - op := &request.Operation{ - Name: opGetCrawlers, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetCrawlersInput{} - } - - output = &GetCrawlersOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCrawlers API operation for AWS Glue. -// -// Retrieves metadata for all crawlers defined in the customer account. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetCrawlers for usage and error information. -// -// Returned Error Codes: -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCrawlers -func (c *Glue) GetCrawlers(input *GetCrawlersInput) (*GetCrawlersOutput, error) { - req, out := c.GetCrawlersRequest(input) - return out, req.Send() -} - -// GetCrawlersWithContext is the same as GetCrawlers with the addition of -// the ability to pass a context and additional request options. -// -// See GetCrawlers for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetCrawlersWithContext(ctx aws.Context, input *GetCrawlersInput, opts ...request.Option) (*GetCrawlersOutput, error) { - req, out := c.GetCrawlersRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetCrawlersPages iterates over the pages of a GetCrawlers operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetCrawlers method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetCrawlers operation. -// pageNum := 0 -// err := client.GetCrawlersPages(params, -// func(page *GetCrawlersOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetCrawlersPages(input *GetCrawlersInput, fn func(*GetCrawlersOutput, bool) bool) error { - return c.GetCrawlersPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetCrawlersPagesWithContext same as GetCrawlersPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetCrawlersPagesWithContext(ctx aws.Context, input *GetCrawlersInput, fn func(*GetCrawlersOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetCrawlersInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetCrawlersRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetCrawlersOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetDatabase = "GetDatabase" - -// GetDatabaseRequest generates a "aws/request.Request" representing the -// client's request for the GetDatabase operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetDatabase for more information on using the GetDatabase -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetDatabaseRequest method. -// req, resp := client.GetDatabaseRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDatabase -func (c *Glue) GetDatabaseRequest(input *GetDatabaseInput) (req *request.Request, output *GetDatabaseOutput) { - op := &request.Operation{ - Name: opGetDatabase, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetDatabaseInput{} - } - - output = &GetDatabaseOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetDatabase API operation for AWS Glue. -// -// Retrieves the definition of a specified database. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetDatabase for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDatabase -func (c *Glue) GetDatabase(input *GetDatabaseInput) (*GetDatabaseOutput, error) { - req, out := c.GetDatabaseRequest(input) - return out, req.Send() -} - -// GetDatabaseWithContext is the same as GetDatabase with the addition of -// the ability to pass a context and additional request options. -// -// See GetDatabase for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetDatabaseWithContext(ctx aws.Context, input *GetDatabaseInput, opts ...request.Option) (*GetDatabaseOutput, error) { - req, out := c.GetDatabaseRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetDatabases = "GetDatabases" - -// GetDatabasesRequest generates a "aws/request.Request" representing the -// client's request for the GetDatabases operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetDatabases for more information on using the GetDatabases -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetDatabasesRequest method. -// req, resp := client.GetDatabasesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDatabases -func (c *Glue) GetDatabasesRequest(input *GetDatabasesInput) (req *request.Request, output *GetDatabasesOutput) { - op := &request.Operation{ - Name: opGetDatabases, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetDatabasesInput{} - } - - output = &GetDatabasesOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetDatabases API operation for AWS Glue. -// -// Retrieves all Databases defined in a given Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetDatabases for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDatabases -func (c *Glue) GetDatabases(input *GetDatabasesInput) (*GetDatabasesOutput, error) { - req, out := c.GetDatabasesRequest(input) - return out, req.Send() -} - -// GetDatabasesWithContext is the same as GetDatabases with the addition of -// the ability to pass a context and additional request options. -// -// See GetDatabases for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetDatabasesWithContext(ctx aws.Context, input *GetDatabasesInput, opts ...request.Option) (*GetDatabasesOutput, error) { - req, out := c.GetDatabasesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetDatabasesPages iterates over the pages of a GetDatabases operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetDatabases method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetDatabases operation. -// pageNum := 0 -// err := client.GetDatabasesPages(params, -// func(page *GetDatabasesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetDatabasesPages(input *GetDatabasesInput, fn func(*GetDatabasesOutput, bool) bool) error { - return c.GetDatabasesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetDatabasesPagesWithContext same as GetDatabasesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetDatabasesPagesWithContext(ctx aws.Context, input *GetDatabasesInput, fn func(*GetDatabasesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetDatabasesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetDatabasesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetDatabasesOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetDataflowGraph = "GetDataflowGraph" - -// GetDataflowGraphRequest generates a "aws/request.Request" representing the -// client's request for the GetDataflowGraph operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetDataflowGraph for more information on using the GetDataflowGraph -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetDataflowGraphRequest method. -// req, resp := client.GetDataflowGraphRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDataflowGraph -func (c *Glue) GetDataflowGraphRequest(input *GetDataflowGraphInput) (req *request.Request, output *GetDataflowGraphOutput) { - op := &request.Operation{ - Name: opGetDataflowGraph, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetDataflowGraphInput{} - } - - output = &GetDataflowGraphOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetDataflowGraph API operation for AWS Glue. -// -// Transforms a Python script into a directed acyclic graph (DAG). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetDataflowGraph for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDataflowGraph -func (c *Glue) GetDataflowGraph(input *GetDataflowGraphInput) (*GetDataflowGraphOutput, error) { - req, out := c.GetDataflowGraphRequest(input) - return out, req.Send() -} - -// GetDataflowGraphWithContext is the same as GetDataflowGraph with the addition of -// the ability to pass a context and additional request options. -// -// See GetDataflowGraph for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetDataflowGraphWithContext(ctx aws.Context, input *GetDataflowGraphInput, opts ...request.Option) (*GetDataflowGraphOutput, error) { - req, out := c.GetDataflowGraphRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetDevEndpoint = "GetDevEndpoint" - -// GetDevEndpointRequest generates a "aws/request.Request" representing the -// client's request for the GetDevEndpoint operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetDevEndpoint for more information on using the GetDevEndpoint -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetDevEndpointRequest method. -// req, resp := client.GetDevEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDevEndpoint -func (c *Glue) GetDevEndpointRequest(input *GetDevEndpointInput) (req *request.Request, output *GetDevEndpointOutput) { - op := &request.Operation{ - Name: opGetDevEndpoint, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetDevEndpointInput{} - } - - output = &GetDevEndpointOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetDevEndpoint API operation for AWS Glue. -// -// Retrieves information about a specified DevEndpoint. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetDevEndpoint for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDevEndpoint -func (c *Glue) GetDevEndpoint(input *GetDevEndpointInput) (*GetDevEndpointOutput, error) { - req, out := c.GetDevEndpointRequest(input) - return out, req.Send() -} - -// GetDevEndpointWithContext is the same as GetDevEndpoint with the addition of -// the ability to pass a context and additional request options. -// -// See GetDevEndpoint for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetDevEndpointWithContext(ctx aws.Context, input *GetDevEndpointInput, opts ...request.Option) (*GetDevEndpointOutput, error) { - req, out := c.GetDevEndpointRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetDevEndpoints = "GetDevEndpoints" - -// GetDevEndpointsRequest generates a "aws/request.Request" representing the -// client's request for the GetDevEndpoints operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetDevEndpoints for more information on using the GetDevEndpoints -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetDevEndpointsRequest method. -// req, resp := client.GetDevEndpointsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDevEndpoints -func (c *Glue) GetDevEndpointsRequest(input *GetDevEndpointsInput) (req *request.Request, output *GetDevEndpointsOutput) { - op := &request.Operation{ - Name: opGetDevEndpoints, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetDevEndpointsInput{} - } - - output = &GetDevEndpointsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetDevEndpoints API operation for AWS Glue. -// -// Retrieves all the DevEndpoints in this AWS account. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetDevEndpoints for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDevEndpoints -func (c *Glue) GetDevEndpoints(input *GetDevEndpointsInput) (*GetDevEndpointsOutput, error) { - req, out := c.GetDevEndpointsRequest(input) - return out, req.Send() -} - -// GetDevEndpointsWithContext is the same as GetDevEndpoints with the addition of -// the ability to pass a context and additional request options. -// -// See GetDevEndpoints for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetDevEndpointsWithContext(ctx aws.Context, input *GetDevEndpointsInput, opts ...request.Option) (*GetDevEndpointsOutput, error) { - req, out := c.GetDevEndpointsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetDevEndpointsPages iterates over the pages of a GetDevEndpoints operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetDevEndpoints method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetDevEndpoints operation. -// pageNum := 0 -// err := client.GetDevEndpointsPages(params, -// func(page *GetDevEndpointsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetDevEndpointsPages(input *GetDevEndpointsInput, fn func(*GetDevEndpointsOutput, bool) bool) error { - return c.GetDevEndpointsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetDevEndpointsPagesWithContext same as GetDevEndpointsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetDevEndpointsPagesWithContext(ctx aws.Context, input *GetDevEndpointsInput, fn func(*GetDevEndpointsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetDevEndpointsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetDevEndpointsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetDevEndpointsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetJob = "GetJob" - -// GetJobRequest generates a "aws/request.Request" representing the -// client's request for the GetJob operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetJob for more information on using the GetJob -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetJobRequest method. -// req, resp := client.GetJobRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJob -func (c *Glue) GetJobRequest(input *GetJobInput) (req *request.Request, output *GetJobOutput) { - op := &request.Operation{ - Name: opGetJob, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetJobInput{} - } - - output = &GetJobOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetJob API operation for AWS Glue. -// -// Retrieves an existing job definition. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetJob for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJob -func (c *Glue) GetJob(input *GetJobInput) (*GetJobOutput, error) { - req, out := c.GetJobRequest(input) - return out, req.Send() -} - -// GetJobWithContext is the same as GetJob with the addition of -// the ability to pass a context and additional request options. -// -// See GetJob for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetJobWithContext(ctx aws.Context, input *GetJobInput, opts ...request.Option) (*GetJobOutput, error) { - req, out := c.GetJobRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetJobRun = "GetJobRun" - -// GetJobRunRequest generates a "aws/request.Request" representing the -// client's request for the GetJobRun operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetJobRun for more information on using the GetJobRun -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetJobRunRequest method. -// req, resp := client.GetJobRunRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobRun -func (c *Glue) GetJobRunRequest(input *GetJobRunInput) (req *request.Request, output *GetJobRunOutput) { - op := &request.Operation{ - Name: opGetJobRun, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetJobRunInput{} - } - - output = &GetJobRunOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetJobRun API operation for AWS Glue. -// -// Retrieves the metadata for a given job run. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetJobRun for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobRun -func (c *Glue) GetJobRun(input *GetJobRunInput) (*GetJobRunOutput, error) { - req, out := c.GetJobRunRequest(input) - return out, req.Send() -} - -// GetJobRunWithContext is the same as GetJobRun with the addition of -// the ability to pass a context and additional request options. -// -// See GetJobRun for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetJobRunWithContext(ctx aws.Context, input *GetJobRunInput, opts ...request.Option) (*GetJobRunOutput, error) { - req, out := c.GetJobRunRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetJobRuns = "GetJobRuns" - -// GetJobRunsRequest generates a "aws/request.Request" representing the -// client's request for the GetJobRuns operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetJobRuns for more information on using the GetJobRuns -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetJobRunsRequest method. -// req, resp := client.GetJobRunsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobRuns -func (c *Glue) GetJobRunsRequest(input *GetJobRunsInput) (req *request.Request, output *GetJobRunsOutput) { - op := &request.Operation{ - Name: opGetJobRuns, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetJobRunsInput{} - } - - output = &GetJobRunsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetJobRuns API operation for AWS Glue. -// -// Retrieves metadata for all runs of a given job. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetJobRuns for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobRuns -func (c *Glue) GetJobRuns(input *GetJobRunsInput) (*GetJobRunsOutput, error) { - req, out := c.GetJobRunsRequest(input) - return out, req.Send() -} - -// GetJobRunsWithContext is the same as GetJobRuns with the addition of -// the ability to pass a context and additional request options. -// -// See GetJobRuns for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetJobRunsWithContext(ctx aws.Context, input *GetJobRunsInput, opts ...request.Option) (*GetJobRunsOutput, error) { - req, out := c.GetJobRunsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetJobRunsPages iterates over the pages of a GetJobRuns operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetJobRuns method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetJobRuns operation. -// pageNum := 0 -// err := client.GetJobRunsPages(params, -// func(page *GetJobRunsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetJobRunsPages(input *GetJobRunsInput, fn func(*GetJobRunsOutput, bool) bool) error { - return c.GetJobRunsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetJobRunsPagesWithContext same as GetJobRunsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetJobRunsPagesWithContext(ctx aws.Context, input *GetJobRunsInput, fn func(*GetJobRunsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetJobRunsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetJobRunsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetJobRunsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetJobs = "GetJobs" - -// GetJobsRequest generates a "aws/request.Request" representing the -// client's request for the GetJobs operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetJobs for more information on using the GetJobs -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetJobsRequest method. -// req, resp := client.GetJobsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobs -func (c *Glue) GetJobsRequest(input *GetJobsInput) (req *request.Request, output *GetJobsOutput) { - op := &request.Operation{ - Name: opGetJobs, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetJobsInput{} - } - - output = &GetJobsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetJobs API operation for AWS Glue. -// -// Retrieves all current jobs. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetJobs for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobs -func (c *Glue) GetJobs(input *GetJobsInput) (*GetJobsOutput, error) { - req, out := c.GetJobsRequest(input) - return out, req.Send() -} - -// GetJobsWithContext is the same as GetJobs with the addition of -// the ability to pass a context and additional request options. -// -// See GetJobs for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetJobsWithContext(ctx aws.Context, input *GetJobsInput, opts ...request.Option) (*GetJobsOutput, error) { - req, out := c.GetJobsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetJobsPages iterates over the pages of a GetJobs operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetJobs method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetJobs operation. -// pageNum := 0 -// err := client.GetJobsPages(params, -// func(page *GetJobsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetJobsPages(input *GetJobsInput, fn func(*GetJobsOutput, bool) bool) error { - return c.GetJobsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetJobsPagesWithContext same as GetJobsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetJobsPagesWithContext(ctx aws.Context, input *GetJobsInput, fn func(*GetJobsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetJobsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetJobsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetJobsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetMapping = "GetMapping" - -// GetMappingRequest generates a "aws/request.Request" representing the -// client's request for the GetMapping operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetMapping for more information on using the GetMapping -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetMappingRequest method. -// req, resp := client.GetMappingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMapping -func (c *Glue) GetMappingRequest(input *GetMappingInput) (req *request.Request, output *GetMappingOutput) { - op := &request.Operation{ - Name: opGetMapping, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetMappingInput{} - } - - output = &GetMappingOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetMapping API operation for AWS Glue. -// -// Creates mappings. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetMapping for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMapping -func (c *Glue) GetMapping(input *GetMappingInput) (*GetMappingOutput, error) { - req, out := c.GetMappingRequest(input) - return out, req.Send() -} - -// GetMappingWithContext is the same as GetMapping with the addition of -// the ability to pass a context and additional request options. -// -// See GetMapping for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetMappingWithContext(ctx aws.Context, input *GetMappingInput, opts ...request.Option) (*GetMappingOutput, error) { - req, out := c.GetMappingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetPartition = "GetPartition" - -// GetPartitionRequest generates a "aws/request.Request" representing the -// client's request for the GetPartition operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetPartition for more information on using the GetPartition -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetPartitionRequest method. -// req, resp := client.GetPartitionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartition -func (c *Glue) GetPartitionRequest(input *GetPartitionInput) (req *request.Request, output *GetPartitionOutput) { - op := &request.Operation{ - Name: opGetPartition, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetPartitionInput{} - } - - output = &GetPartitionOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetPartition API operation for AWS Glue. -// -// Retrieves information about a specified partition. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetPartition for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartition -func (c *Glue) GetPartition(input *GetPartitionInput) (*GetPartitionOutput, error) { - req, out := c.GetPartitionRequest(input) - return out, req.Send() -} - -// GetPartitionWithContext is the same as GetPartition with the addition of -// the ability to pass a context and additional request options. -// -// See GetPartition for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetPartitionWithContext(ctx aws.Context, input *GetPartitionInput, opts ...request.Option) (*GetPartitionOutput, error) { - req, out := c.GetPartitionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetPartitions = "GetPartitions" - -// GetPartitionsRequest generates a "aws/request.Request" representing the -// client's request for the GetPartitions operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetPartitions for more information on using the GetPartitions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetPartitionsRequest method. -// req, resp := client.GetPartitionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartitions -func (c *Glue) GetPartitionsRequest(input *GetPartitionsInput) (req *request.Request, output *GetPartitionsOutput) { - op := &request.Operation{ - Name: opGetPartitions, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetPartitionsInput{} - } - - output = &GetPartitionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetPartitions API operation for AWS Glue. -// -// Retrieves information about the partitions in a table. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetPartitions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartitions -func (c *Glue) GetPartitions(input *GetPartitionsInput) (*GetPartitionsOutput, error) { - req, out := c.GetPartitionsRequest(input) - return out, req.Send() -} - -// GetPartitionsWithContext is the same as GetPartitions with the addition of -// the ability to pass a context and additional request options. -// -// See GetPartitions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetPartitionsWithContext(ctx aws.Context, input *GetPartitionsInput, opts ...request.Option) (*GetPartitionsOutput, error) { - req, out := c.GetPartitionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetPartitionsPages iterates over the pages of a GetPartitions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetPartitions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetPartitions operation. -// pageNum := 0 -// err := client.GetPartitionsPages(params, -// func(page *GetPartitionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetPartitionsPages(input *GetPartitionsInput, fn func(*GetPartitionsOutput, bool) bool) error { - return c.GetPartitionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetPartitionsPagesWithContext same as GetPartitionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetPartitionsPagesWithContext(ctx aws.Context, input *GetPartitionsInput, fn func(*GetPartitionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetPartitionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetPartitionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetPartitionsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetPlan = "GetPlan" - -// GetPlanRequest generates a "aws/request.Request" representing the -// client's request for the GetPlan operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetPlan for more information on using the GetPlan -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetPlanRequest method. -// req, resp := client.GetPlanRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPlan -func (c *Glue) GetPlanRequest(input *GetPlanInput) (req *request.Request, output *GetPlanOutput) { - op := &request.Operation{ - Name: opGetPlan, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetPlanInput{} - } - - output = &GetPlanOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetPlan API operation for AWS Glue. -// -// Gets a Python script to perform a specified mapping. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetPlan for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPlan -func (c *Glue) GetPlan(input *GetPlanInput) (*GetPlanOutput, error) { - req, out := c.GetPlanRequest(input) - return out, req.Send() -} - -// GetPlanWithContext is the same as GetPlan with the addition of -// the ability to pass a context and additional request options. -// -// See GetPlan for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetPlanWithContext(ctx aws.Context, input *GetPlanInput, opts ...request.Option) (*GetPlanOutput, error) { - req, out := c.GetPlanRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetTable = "GetTable" - -// GetTableRequest generates a "aws/request.Request" representing the -// client's request for the GetTable operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetTable for more information on using the GetTable -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetTableRequest method. -// req, resp := client.GetTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTable -func (c *Glue) GetTableRequest(input *GetTableInput) (req *request.Request, output *GetTableOutput) { - op := &request.Operation{ - Name: opGetTable, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetTableInput{} - } - - output = &GetTableOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetTable API operation for AWS Glue. -// -// Retrieves the Table definition in a Data Catalog for a specified table. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetTable for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTable -func (c *Glue) GetTable(input *GetTableInput) (*GetTableOutput, error) { - req, out := c.GetTableRequest(input) - return out, req.Send() -} - -// GetTableWithContext is the same as GetTable with the addition of -// the ability to pass a context and additional request options. -// -// See GetTable for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetTableWithContext(ctx aws.Context, input *GetTableInput, opts ...request.Option) (*GetTableOutput, error) { - req, out := c.GetTableRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetTableVersions = "GetTableVersions" - -// GetTableVersionsRequest generates a "aws/request.Request" representing the -// client's request for the GetTableVersions operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetTableVersions for more information on using the GetTableVersions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetTableVersionsRequest method. -// req, resp := client.GetTableVersionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableVersions -func (c *Glue) GetTableVersionsRequest(input *GetTableVersionsInput) (req *request.Request, output *GetTableVersionsOutput) { - op := &request.Operation{ - Name: opGetTableVersions, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetTableVersionsInput{} - } - - output = &GetTableVersionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetTableVersions API operation for AWS Glue. -// -// Retrieves a list of strings that identify available versions of a specified -// table. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetTableVersions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableVersions -func (c *Glue) GetTableVersions(input *GetTableVersionsInput) (*GetTableVersionsOutput, error) { - req, out := c.GetTableVersionsRequest(input) - return out, req.Send() -} - -// GetTableVersionsWithContext is the same as GetTableVersions with the addition of -// the ability to pass a context and additional request options. -// -// See GetTableVersions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetTableVersionsWithContext(ctx aws.Context, input *GetTableVersionsInput, opts ...request.Option) (*GetTableVersionsOutput, error) { - req, out := c.GetTableVersionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetTableVersionsPages iterates over the pages of a GetTableVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetTableVersions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetTableVersions operation. -// pageNum := 0 -// err := client.GetTableVersionsPages(params, -// func(page *GetTableVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetTableVersionsPages(input *GetTableVersionsInput, fn func(*GetTableVersionsOutput, bool) bool) error { - return c.GetTableVersionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetTableVersionsPagesWithContext same as GetTableVersionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetTableVersionsPagesWithContext(ctx aws.Context, input *GetTableVersionsInput, fn func(*GetTableVersionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetTableVersionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetTableVersionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetTableVersionsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetTables = "GetTables" - -// GetTablesRequest generates a "aws/request.Request" representing the -// client's request for the GetTables operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetTables for more information on using the GetTables -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetTablesRequest method. -// req, resp := client.GetTablesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTables -func (c *Glue) GetTablesRequest(input *GetTablesInput) (req *request.Request, output *GetTablesOutput) { - op := &request.Operation{ - Name: opGetTables, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetTablesInput{} - } - - output = &GetTablesOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetTables API operation for AWS Glue. -// -// Retrieves the definitions of some or all of the tables in a given Database. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetTables for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTables -func (c *Glue) GetTables(input *GetTablesInput) (*GetTablesOutput, error) { - req, out := c.GetTablesRequest(input) - return out, req.Send() -} - -// GetTablesWithContext is the same as GetTables with the addition of -// the ability to pass a context and additional request options. -// -// See GetTables for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetTablesWithContext(ctx aws.Context, input *GetTablesInput, opts ...request.Option) (*GetTablesOutput, error) { - req, out := c.GetTablesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetTablesPages iterates over the pages of a GetTables operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetTables method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetTables operation. -// pageNum := 0 -// err := client.GetTablesPages(params, -// func(page *GetTablesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetTablesPages(input *GetTablesInput, fn func(*GetTablesOutput, bool) bool) error { - return c.GetTablesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetTablesPagesWithContext same as GetTablesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetTablesPagesWithContext(ctx aws.Context, input *GetTablesInput, fn func(*GetTablesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetTablesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetTablesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetTablesOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetTrigger = "GetTrigger" - -// GetTriggerRequest generates a "aws/request.Request" representing the -// client's request for the GetTrigger operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetTrigger for more information on using the GetTrigger -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetTriggerRequest method. -// req, resp := client.GetTriggerRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTrigger -func (c *Glue) GetTriggerRequest(input *GetTriggerInput) (req *request.Request, output *GetTriggerOutput) { - op := &request.Operation{ - Name: opGetTrigger, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetTriggerInput{} - } - - output = &GetTriggerOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetTrigger API operation for AWS Glue. -// -// Retrieves the definition of a trigger. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetTrigger for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTrigger -func (c *Glue) GetTrigger(input *GetTriggerInput) (*GetTriggerOutput, error) { - req, out := c.GetTriggerRequest(input) - return out, req.Send() -} - -// GetTriggerWithContext is the same as GetTrigger with the addition of -// the ability to pass a context and additional request options. -// -// See GetTrigger for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetTriggerWithContext(ctx aws.Context, input *GetTriggerInput, opts ...request.Option) (*GetTriggerOutput, error) { - req, out := c.GetTriggerRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetTriggers = "GetTriggers" - -// GetTriggersRequest generates a "aws/request.Request" representing the -// client's request for the GetTriggers operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetTriggers for more information on using the GetTriggers -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetTriggersRequest method. -// req, resp := client.GetTriggersRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTriggers -func (c *Glue) GetTriggersRequest(input *GetTriggersInput) (req *request.Request, output *GetTriggersOutput) { - op := &request.Operation{ - Name: opGetTriggers, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetTriggersInput{} - } - - output = &GetTriggersOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetTriggers API operation for AWS Glue. -// -// Gets all the triggers associated with a job. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetTriggers for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTriggers -func (c *Glue) GetTriggers(input *GetTriggersInput) (*GetTriggersOutput, error) { - req, out := c.GetTriggersRequest(input) - return out, req.Send() -} - -// GetTriggersWithContext is the same as GetTriggers with the addition of -// the ability to pass a context and additional request options. -// -// See GetTriggers for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetTriggersWithContext(ctx aws.Context, input *GetTriggersInput, opts ...request.Option) (*GetTriggersOutput, error) { - req, out := c.GetTriggersRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetTriggersPages iterates over the pages of a GetTriggers operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetTriggers method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetTriggers operation. -// pageNum := 0 -// err := client.GetTriggersPages(params, -// func(page *GetTriggersOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetTriggersPages(input *GetTriggersInput, fn func(*GetTriggersOutput, bool) bool) error { - return c.GetTriggersPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetTriggersPagesWithContext same as GetTriggersPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetTriggersPagesWithContext(ctx aws.Context, input *GetTriggersInput, fn func(*GetTriggersOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetTriggersInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetTriggersRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetTriggersOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetUserDefinedFunction = "GetUserDefinedFunction" - -// GetUserDefinedFunctionRequest generates a "aws/request.Request" representing the -// client's request for the GetUserDefinedFunction operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetUserDefinedFunction for more information on using the GetUserDefinedFunction -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetUserDefinedFunctionRequest method. -// req, resp := client.GetUserDefinedFunctionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunction -func (c *Glue) GetUserDefinedFunctionRequest(input *GetUserDefinedFunctionInput) (req *request.Request, output *GetUserDefinedFunctionOutput) { - op := &request.Operation{ - Name: opGetUserDefinedFunction, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetUserDefinedFunctionInput{} - } - - output = &GetUserDefinedFunctionOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetUserDefinedFunction API operation for AWS Glue. -// -// Retrieves a specified function definition from the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetUserDefinedFunction for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunction -func (c *Glue) GetUserDefinedFunction(input *GetUserDefinedFunctionInput) (*GetUserDefinedFunctionOutput, error) { - req, out := c.GetUserDefinedFunctionRequest(input) - return out, req.Send() -} - -// GetUserDefinedFunctionWithContext is the same as GetUserDefinedFunction with the addition of -// the ability to pass a context and additional request options. -// -// See GetUserDefinedFunction for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetUserDefinedFunctionWithContext(ctx aws.Context, input *GetUserDefinedFunctionInput, opts ...request.Option) (*GetUserDefinedFunctionOutput, error) { - req, out := c.GetUserDefinedFunctionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetUserDefinedFunctions = "GetUserDefinedFunctions" - -// GetUserDefinedFunctionsRequest generates a "aws/request.Request" representing the -// client's request for the GetUserDefinedFunctions operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetUserDefinedFunctions for more information on using the GetUserDefinedFunctions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetUserDefinedFunctionsRequest method. -// req, resp := client.GetUserDefinedFunctionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunctions -func (c *Glue) GetUserDefinedFunctionsRequest(input *GetUserDefinedFunctionsInput) (req *request.Request, output *GetUserDefinedFunctionsOutput) { - op := &request.Operation{ - Name: opGetUserDefinedFunctions, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetUserDefinedFunctionsInput{} - } - - output = &GetUserDefinedFunctionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetUserDefinedFunctions API operation for AWS Glue. -// -// Retrieves a multiple function definitions from the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetUserDefinedFunctions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunctions -func (c *Glue) GetUserDefinedFunctions(input *GetUserDefinedFunctionsInput) (*GetUserDefinedFunctionsOutput, error) { - req, out := c.GetUserDefinedFunctionsRequest(input) - return out, req.Send() -} - -// GetUserDefinedFunctionsWithContext is the same as GetUserDefinedFunctions with the addition of -// the ability to pass a context and additional request options. -// -// See GetUserDefinedFunctions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetUserDefinedFunctionsWithContext(ctx aws.Context, input *GetUserDefinedFunctionsInput, opts ...request.Option) (*GetUserDefinedFunctionsOutput, error) { - req, out := c.GetUserDefinedFunctionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetUserDefinedFunctionsPages iterates over the pages of a GetUserDefinedFunctions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetUserDefinedFunctions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetUserDefinedFunctions operation. -// pageNum := 0 -// err := client.GetUserDefinedFunctionsPages(params, -// func(page *GetUserDefinedFunctionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetUserDefinedFunctionsPages(input *GetUserDefinedFunctionsInput, fn func(*GetUserDefinedFunctionsOutput, bool) bool) error { - return c.GetUserDefinedFunctionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetUserDefinedFunctionsPagesWithContext same as GetUserDefinedFunctionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetUserDefinedFunctionsPagesWithContext(ctx aws.Context, input *GetUserDefinedFunctionsInput, fn func(*GetUserDefinedFunctionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetUserDefinedFunctionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetUserDefinedFunctionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetUserDefinedFunctionsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opImportCatalogToGlue = "ImportCatalogToGlue" - -// ImportCatalogToGlueRequest generates a "aws/request.Request" representing the -// client's request for the ImportCatalogToGlue operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ImportCatalogToGlue for more information on using the ImportCatalogToGlue -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ImportCatalogToGlueRequest method. -// req, resp := client.ImportCatalogToGlueRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ImportCatalogToGlue -func (c *Glue) ImportCatalogToGlueRequest(input *ImportCatalogToGlueInput) (req *request.Request, output *ImportCatalogToGlueOutput) { - op := &request.Operation{ - Name: opImportCatalogToGlue, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ImportCatalogToGlueInput{} - } - - output = &ImportCatalogToGlueOutput{} - req = c.newRequest(op, input, output) - return -} - -// ImportCatalogToGlue API operation for AWS Glue. -// -// Imports an existing Athena Data Catalog to AWS Glue -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation ImportCatalogToGlue for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ImportCatalogToGlue -func (c *Glue) ImportCatalogToGlue(input *ImportCatalogToGlueInput) (*ImportCatalogToGlueOutput, error) { - req, out := c.ImportCatalogToGlueRequest(input) - return out, req.Send() -} - -// ImportCatalogToGlueWithContext is the same as ImportCatalogToGlue with the addition of -// the ability to pass a context and additional request options. -// -// See ImportCatalogToGlue for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) ImportCatalogToGlueWithContext(ctx aws.Context, input *ImportCatalogToGlueInput, opts ...request.Option) (*ImportCatalogToGlueOutput, error) { - req, out := c.ImportCatalogToGlueRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opResetJobBookmark = "ResetJobBookmark" - -// ResetJobBookmarkRequest generates a "aws/request.Request" representing the -// client's request for the ResetJobBookmark operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ResetJobBookmark for more information on using the ResetJobBookmark -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ResetJobBookmarkRequest method. -// req, resp := client.ResetJobBookmarkRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResetJobBookmark -func (c *Glue) ResetJobBookmarkRequest(input *ResetJobBookmarkInput) (req *request.Request, output *ResetJobBookmarkOutput) { - op := &request.Operation{ - Name: opResetJobBookmark, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ResetJobBookmarkInput{} - } - - output = &ResetJobBookmarkOutput{} - req = c.newRequest(op, input, output) - return -} - -// ResetJobBookmark API operation for AWS Glue. -// -// Resets a bookmark entry. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation ResetJobBookmark for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResetJobBookmark -func (c *Glue) ResetJobBookmark(input *ResetJobBookmarkInput) (*ResetJobBookmarkOutput, error) { - req, out := c.ResetJobBookmarkRequest(input) - return out, req.Send() -} - -// ResetJobBookmarkWithContext is the same as ResetJobBookmark with the addition of -// the ability to pass a context and additional request options. -// -// See ResetJobBookmark for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) ResetJobBookmarkWithContext(ctx aws.Context, input *ResetJobBookmarkInput, opts ...request.Option) (*ResetJobBookmarkOutput, error) { - req, out := c.ResetJobBookmarkRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStartCrawler = "StartCrawler" - -// StartCrawlerRequest generates a "aws/request.Request" representing the -// client's request for the StartCrawler operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StartCrawler for more information on using the StartCrawler -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StartCrawlerRequest method. -// req, resp := client.StartCrawlerRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawler -func (c *Glue) StartCrawlerRequest(input *StartCrawlerInput) (req *request.Request, output *StartCrawlerOutput) { - op := &request.Operation{ - Name: opStartCrawler, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StartCrawlerInput{} - } - - output = &StartCrawlerOutput{} - req = c.newRequest(op, input, output) - return -} - -// StartCrawler API operation for AWS Glue. -// -// Starts a crawl using the specified crawler, regardless of what is scheduled. -// If the crawler is already running, does nothing. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation StartCrawler for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeCrawlerRunningException "CrawlerRunningException" -// The operation cannot be performed because the crawler is already running. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawler -func (c *Glue) StartCrawler(input *StartCrawlerInput) (*StartCrawlerOutput, error) { - req, out := c.StartCrawlerRequest(input) - return out, req.Send() -} - -// StartCrawlerWithContext is the same as StartCrawler with the addition of -// the ability to pass a context and additional request options. -// -// See StartCrawler for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) StartCrawlerWithContext(ctx aws.Context, input *StartCrawlerInput, opts ...request.Option) (*StartCrawlerOutput, error) { - req, out := c.StartCrawlerRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStartCrawlerSchedule = "StartCrawlerSchedule" - -// StartCrawlerScheduleRequest generates a "aws/request.Request" representing the -// client's request for the StartCrawlerSchedule operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StartCrawlerSchedule for more information on using the StartCrawlerSchedule -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StartCrawlerScheduleRequest method. -// req, resp := client.StartCrawlerScheduleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawlerSchedule -func (c *Glue) StartCrawlerScheduleRequest(input *StartCrawlerScheduleInput) (req *request.Request, output *StartCrawlerScheduleOutput) { - op := &request.Operation{ - Name: opStartCrawlerSchedule, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StartCrawlerScheduleInput{} - } - - output = &StartCrawlerScheduleOutput{} - req = c.newRequest(op, input, output) - return -} - -// StartCrawlerSchedule API operation for AWS Glue. -// -// Changes the schedule state of the specified crawler to SCHEDULED, unless -// the crawler is already running or the schedule state is already SCHEDULED. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation StartCrawlerSchedule for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeSchedulerRunningException "SchedulerRunningException" -// The specified scheduler is already running. -// -// * ErrCodeSchedulerTransitioningException "SchedulerTransitioningException" -// The specified scheduler is transitioning. -// -// * ErrCodeNoScheduleException "NoScheduleException" -// There is no applicable schedule. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawlerSchedule -func (c *Glue) StartCrawlerSchedule(input *StartCrawlerScheduleInput) (*StartCrawlerScheduleOutput, error) { - req, out := c.StartCrawlerScheduleRequest(input) - return out, req.Send() -} - -// StartCrawlerScheduleWithContext is the same as StartCrawlerSchedule with the addition of -// the ability to pass a context and additional request options. -// -// See StartCrawlerSchedule for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) StartCrawlerScheduleWithContext(ctx aws.Context, input *StartCrawlerScheduleInput, opts ...request.Option) (*StartCrawlerScheduleOutput, error) { - req, out := c.StartCrawlerScheduleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStartJobRun = "StartJobRun" - -// StartJobRunRequest generates a "aws/request.Request" representing the -// client's request for the StartJobRun operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StartJobRun for more information on using the StartJobRun -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StartJobRunRequest method. -// req, resp := client.StartJobRunRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartJobRun -func (c *Glue) StartJobRunRequest(input *StartJobRunInput) (req *request.Request, output *StartJobRunOutput) { - op := &request.Operation{ - Name: opStartJobRun, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StartJobRunInput{} - } - - output = &StartJobRunOutput{} - req = c.newRequest(op, input, output) - return -} - -// StartJobRun API operation for AWS Glue. -// -// Runs a job. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation StartJobRun for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" -// A resource numerical limit was exceeded. -// -// * ErrCodeConcurrentRunsExceededException "ConcurrentRunsExceededException" -// Too many jobs are being run concurrently. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartJobRun -func (c *Glue) StartJobRun(input *StartJobRunInput) (*StartJobRunOutput, error) { - req, out := c.StartJobRunRequest(input) - return out, req.Send() -} - -// StartJobRunWithContext is the same as StartJobRun with the addition of -// the ability to pass a context and additional request options. -// -// See StartJobRun for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) StartJobRunWithContext(ctx aws.Context, input *StartJobRunInput, opts ...request.Option) (*StartJobRunOutput, error) { - req, out := c.StartJobRunRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStartTrigger = "StartTrigger" - -// StartTriggerRequest generates a "aws/request.Request" representing the -// client's request for the StartTrigger operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StartTrigger for more information on using the StartTrigger -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StartTriggerRequest method. -// req, resp := client.StartTriggerRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartTrigger -func (c *Glue) StartTriggerRequest(input *StartTriggerInput) (req *request.Request, output *StartTriggerOutput) { - op := &request.Operation{ - Name: opStartTrigger, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StartTriggerInput{} - } - - output = &StartTriggerOutput{} - req = c.newRequest(op, input, output) - return -} - -// StartTrigger API operation for AWS Glue. -// -// Starts an existing trigger. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation StartTrigger for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" -// A resource numerical limit was exceeded. -// -// * ErrCodeConcurrentRunsExceededException "ConcurrentRunsExceededException" -// Too many jobs are being run concurrently. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartTrigger -func (c *Glue) StartTrigger(input *StartTriggerInput) (*StartTriggerOutput, error) { - req, out := c.StartTriggerRequest(input) - return out, req.Send() -} - -// StartTriggerWithContext is the same as StartTrigger with the addition of -// the ability to pass a context and additional request options. -// -// See StartTrigger for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) StartTriggerWithContext(ctx aws.Context, input *StartTriggerInput, opts ...request.Option) (*StartTriggerOutput, error) { - req, out := c.StartTriggerRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStopCrawler = "StopCrawler" - -// StopCrawlerRequest generates a "aws/request.Request" representing the -// client's request for the StopCrawler operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StopCrawler for more information on using the StopCrawler -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StopCrawlerRequest method. -// req, resp := client.StopCrawlerRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawler -func (c *Glue) StopCrawlerRequest(input *StopCrawlerInput) (req *request.Request, output *StopCrawlerOutput) { - op := &request.Operation{ - Name: opStopCrawler, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StopCrawlerInput{} - } - - output = &StopCrawlerOutput{} - req = c.newRequest(op, input, output) - return -} - -// StopCrawler API operation for AWS Glue. -// -// If the specified crawler is running, stops the crawl. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation StopCrawler for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeCrawlerNotRunningException "CrawlerNotRunningException" -// The specified crawler is not running. -// -// * ErrCodeCrawlerStoppingException "CrawlerStoppingException" -// The specified crawler is stopping. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawler -func (c *Glue) StopCrawler(input *StopCrawlerInput) (*StopCrawlerOutput, error) { - req, out := c.StopCrawlerRequest(input) - return out, req.Send() -} - -// StopCrawlerWithContext is the same as StopCrawler with the addition of -// the ability to pass a context and additional request options. -// -// See StopCrawler for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) StopCrawlerWithContext(ctx aws.Context, input *StopCrawlerInput, opts ...request.Option) (*StopCrawlerOutput, error) { - req, out := c.StopCrawlerRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStopCrawlerSchedule = "StopCrawlerSchedule" - -// StopCrawlerScheduleRequest generates a "aws/request.Request" representing the -// client's request for the StopCrawlerSchedule operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StopCrawlerSchedule for more information on using the StopCrawlerSchedule -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StopCrawlerScheduleRequest method. -// req, resp := client.StopCrawlerScheduleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawlerSchedule -func (c *Glue) StopCrawlerScheduleRequest(input *StopCrawlerScheduleInput) (req *request.Request, output *StopCrawlerScheduleOutput) { - op := &request.Operation{ - Name: opStopCrawlerSchedule, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StopCrawlerScheduleInput{} - } - - output = &StopCrawlerScheduleOutput{} - req = c.newRequest(op, input, output) - return -} - -// StopCrawlerSchedule API operation for AWS Glue. -// -// Sets the schedule state of the specified crawler to NOT_SCHEDULED, but does -// not stop the crawler if it is already running. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation StopCrawlerSchedule for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeSchedulerNotRunningException "SchedulerNotRunningException" -// The specified scheduler is not running. -// -// * ErrCodeSchedulerTransitioningException "SchedulerTransitioningException" -// The specified scheduler is transitioning. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawlerSchedule -func (c *Glue) StopCrawlerSchedule(input *StopCrawlerScheduleInput) (*StopCrawlerScheduleOutput, error) { - req, out := c.StopCrawlerScheduleRequest(input) - return out, req.Send() -} - -// StopCrawlerScheduleWithContext is the same as StopCrawlerSchedule with the addition of -// the ability to pass a context and additional request options. -// -// See StopCrawlerSchedule for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) StopCrawlerScheduleWithContext(ctx aws.Context, input *StopCrawlerScheduleInput, opts ...request.Option) (*StopCrawlerScheduleOutput, error) { - req, out := c.StopCrawlerScheduleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStopTrigger = "StopTrigger" - -// StopTriggerRequest generates a "aws/request.Request" representing the -// client's request for the StopTrigger operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StopTrigger for more information on using the StopTrigger -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StopTriggerRequest method. -// req, resp := client.StopTriggerRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopTrigger -func (c *Glue) StopTriggerRequest(input *StopTriggerInput) (req *request.Request, output *StopTriggerOutput) { - op := &request.Operation{ - Name: opStopTrigger, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StopTriggerInput{} - } - - output = &StopTriggerOutput{} - req = c.newRequest(op, input, output) - return -} - -// StopTrigger API operation for AWS Glue. -// -// Stops a specified trigger. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation StopTrigger for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopTrigger -func (c *Glue) StopTrigger(input *StopTriggerInput) (*StopTriggerOutput, error) { - req, out := c.StopTriggerRequest(input) - return out, req.Send() -} - -// StopTriggerWithContext is the same as StopTrigger with the addition of -// the ability to pass a context and additional request options. -// -// See StopTrigger for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) StopTriggerWithContext(ctx aws.Context, input *StopTriggerInput, opts ...request.Option) (*StopTriggerOutput, error) { - req, out := c.StopTriggerRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateClassifier = "UpdateClassifier" - -// UpdateClassifierRequest generates a "aws/request.Request" representing the -// client's request for the UpdateClassifier operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateClassifier for more information on using the UpdateClassifier -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateClassifierRequest method. -// req, resp := client.UpdateClassifierRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateClassifier -func (c *Glue) UpdateClassifierRequest(input *UpdateClassifierInput) (req *request.Request, output *UpdateClassifierOutput) { - op := &request.Operation{ - Name: opUpdateClassifier, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateClassifierInput{} - } - - output = &UpdateClassifierOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateClassifier API operation for AWS Glue. -// -// Modifies an existing classifier (either a GrokClassifier or an XMLClassifier). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation UpdateClassifier for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeVersionMismatchException "VersionMismatchException" -// There was a version conflict. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateClassifier -func (c *Glue) UpdateClassifier(input *UpdateClassifierInput) (*UpdateClassifierOutput, error) { - req, out := c.UpdateClassifierRequest(input) - return out, req.Send() -} - -// UpdateClassifierWithContext is the same as UpdateClassifier with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateClassifier for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) UpdateClassifierWithContext(ctx aws.Context, input *UpdateClassifierInput, opts ...request.Option) (*UpdateClassifierOutput, error) { - req, out := c.UpdateClassifierRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateConnection = "UpdateConnection" - -// UpdateConnectionRequest generates a "aws/request.Request" representing the -// client's request for the UpdateConnection operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateConnection for more information on using the UpdateConnection -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateConnectionRequest method. -// req, resp := client.UpdateConnectionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateConnection -func (c *Glue) UpdateConnectionRequest(input *UpdateConnectionInput) (req *request.Request, output *UpdateConnectionOutput) { - op := &request.Operation{ - Name: opUpdateConnection, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateConnectionInput{} - } - - output = &UpdateConnectionOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateConnection API operation for AWS Glue. -// -// Updates a connection definition in the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation UpdateConnection for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateConnection -func (c *Glue) UpdateConnection(input *UpdateConnectionInput) (*UpdateConnectionOutput, error) { - req, out := c.UpdateConnectionRequest(input) - return out, req.Send() -} - -// UpdateConnectionWithContext is the same as UpdateConnection with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateConnection for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) UpdateConnectionWithContext(ctx aws.Context, input *UpdateConnectionInput, opts ...request.Option) (*UpdateConnectionOutput, error) { - req, out := c.UpdateConnectionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateCrawler = "UpdateCrawler" - -// UpdateCrawlerRequest generates a "aws/request.Request" representing the -// client's request for the UpdateCrawler operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateCrawler for more information on using the UpdateCrawler -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateCrawlerRequest method. -// req, resp := client.UpdateCrawlerRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawler -func (c *Glue) UpdateCrawlerRequest(input *UpdateCrawlerInput) (req *request.Request, output *UpdateCrawlerOutput) { - op := &request.Operation{ - Name: opUpdateCrawler, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateCrawlerInput{} - } - - output = &UpdateCrawlerOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateCrawler API operation for AWS Glue. -// -// Updates a crawler. If a crawler is running, you must stop it using StopCrawler -// before updating it. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation UpdateCrawler for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeVersionMismatchException "VersionMismatchException" -// There was a version conflict. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeCrawlerRunningException "CrawlerRunningException" -// The operation cannot be performed because the crawler is already running. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawler -func (c *Glue) UpdateCrawler(input *UpdateCrawlerInput) (*UpdateCrawlerOutput, error) { - req, out := c.UpdateCrawlerRequest(input) - return out, req.Send() -} - -// UpdateCrawlerWithContext is the same as UpdateCrawler with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateCrawler for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) UpdateCrawlerWithContext(ctx aws.Context, input *UpdateCrawlerInput, opts ...request.Option) (*UpdateCrawlerOutput, error) { - req, out := c.UpdateCrawlerRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateCrawlerSchedule = "UpdateCrawlerSchedule" - -// UpdateCrawlerScheduleRequest generates a "aws/request.Request" representing the -// client's request for the UpdateCrawlerSchedule operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateCrawlerSchedule for more information on using the UpdateCrawlerSchedule -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateCrawlerScheduleRequest method. -// req, resp := client.UpdateCrawlerScheduleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawlerSchedule -func (c *Glue) UpdateCrawlerScheduleRequest(input *UpdateCrawlerScheduleInput) (req *request.Request, output *UpdateCrawlerScheduleOutput) { - op := &request.Operation{ - Name: opUpdateCrawlerSchedule, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateCrawlerScheduleInput{} - } - - output = &UpdateCrawlerScheduleOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateCrawlerSchedule API operation for AWS Glue. -// -// Updates the schedule of a crawler using a cron expression. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation UpdateCrawlerSchedule for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeVersionMismatchException "VersionMismatchException" -// There was a version conflict. -// -// * ErrCodeSchedulerTransitioningException "SchedulerTransitioningException" -// The specified scheduler is transitioning. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawlerSchedule -func (c *Glue) UpdateCrawlerSchedule(input *UpdateCrawlerScheduleInput) (*UpdateCrawlerScheduleOutput, error) { - req, out := c.UpdateCrawlerScheduleRequest(input) - return out, req.Send() -} - -// UpdateCrawlerScheduleWithContext is the same as UpdateCrawlerSchedule with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateCrawlerSchedule for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) UpdateCrawlerScheduleWithContext(ctx aws.Context, input *UpdateCrawlerScheduleInput, opts ...request.Option) (*UpdateCrawlerScheduleOutput, error) { - req, out := c.UpdateCrawlerScheduleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateDatabase = "UpdateDatabase" - -// UpdateDatabaseRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDatabase operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateDatabase for more information on using the UpdateDatabase -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateDatabaseRequest method. -// req, resp := client.UpdateDatabaseRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDatabase -func (c *Glue) UpdateDatabaseRequest(input *UpdateDatabaseInput) (req *request.Request, output *UpdateDatabaseOutput) { - op := &request.Operation{ - Name: opUpdateDatabase, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateDatabaseInput{} - } - - output = &UpdateDatabaseOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateDatabase API operation for AWS Glue. -// -// Updates an existing database definition in a Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation UpdateDatabase for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDatabase -func (c *Glue) UpdateDatabase(input *UpdateDatabaseInput) (*UpdateDatabaseOutput, error) { - req, out := c.UpdateDatabaseRequest(input) - return out, req.Send() -} - -// UpdateDatabaseWithContext is the same as UpdateDatabase with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateDatabase for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) UpdateDatabaseWithContext(ctx aws.Context, input *UpdateDatabaseInput, opts ...request.Option) (*UpdateDatabaseOutput, error) { - req, out := c.UpdateDatabaseRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateDevEndpoint = "UpdateDevEndpoint" - -// UpdateDevEndpointRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDevEndpoint operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateDevEndpoint for more information on using the UpdateDevEndpoint -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateDevEndpointRequest method. -// req, resp := client.UpdateDevEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDevEndpoint -func (c *Glue) UpdateDevEndpointRequest(input *UpdateDevEndpointInput) (req *request.Request, output *UpdateDevEndpointOutput) { - op := &request.Operation{ - Name: opUpdateDevEndpoint, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateDevEndpointInput{} - } - - output = &UpdateDevEndpointOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateDevEndpoint API operation for AWS Glue. -// -// Updates a specified DevEndpoint. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation UpdateDevEndpoint for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeValidationException "ValidationException" -// A value could not be validated. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDevEndpoint -func (c *Glue) UpdateDevEndpoint(input *UpdateDevEndpointInput) (*UpdateDevEndpointOutput, error) { - req, out := c.UpdateDevEndpointRequest(input) - return out, req.Send() -} - -// UpdateDevEndpointWithContext is the same as UpdateDevEndpoint with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateDevEndpoint for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) UpdateDevEndpointWithContext(ctx aws.Context, input *UpdateDevEndpointInput, opts ...request.Option) (*UpdateDevEndpointOutput, error) { - req, out := c.UpdateDevEndpointRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateJob = "UpdateJob" - -// UpdateJobRequest generates a "aws/request.Request" representing the -// client's request for the UpdateJob operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateJob for more information on using the UpdateJob -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateJobRequest method. -// req, resp := client.UpdateJobRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateJob -func (c *Glue) UpdateJobRequest(input *UpdateJobInput) (req *request.Request, output *UpdateJobOutput) { - op := &request.Operation{ - Name: opUpdateJob, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateJobInput{} - } - - output = &UpdateJobOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateJob API operation for AWS Glue. -// -// Updates an existing job definition. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation UpdateJob for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateJob -func (c *Glue) UpdateJob(input *UpdateJobInput) (*UpdateJobOutput, error) { - req, out := c.UpdateJobRequest(input) - return out, req.Send() -} - -// UpdateJobWithContext is the same as UpdateJob with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateJob for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) UpdateJobWithContext(ctx aws.Context, input *UpdateJobInput, opts ...request.Option) (*UpdateJobOutput, error) { - req, out := c.UpdateJobRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdatePartition = "UpdatePartition" - -// UpdatePartitionRequest generates a "aws/request.Request" representing the -// client's request for the UpdatePartition operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdatePartition for more information on using the UpdatePartition -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdatePartitionRequest method. -// req, resp := client.UpdatePartitionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdatePartition -func (c *Glue) UpdatePartitionRequest(input *UpdatePartitionInput) (req *request.Request, output *UpdatePartitionOutput) { - op := &request.Operation{ - Name: opUpdatePartition, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdatePartitionInput{} - } - - output = &UpdatePartitionOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdatePartition API operation for AWS Glue. -// -// Updates a partition. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation UpdatePartition for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdatePartition -func (c *Glue) UpdatePartition(input *UpdatePartitionInput) (*UpdatePartitionOutput, error) { - req, out := c.UpdatePartitionRequest(input) - return out, req.Send() -} - -// UpdatePartitionWithContext is the same as UpdatePartition with the addition of -// the ability to pass a context and additional request options. -// -// See UpdatePartition for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) UpdatePartitionWithContext(ctx aws.Context, input *UpdatePartitionInput, opts ...request.Option) (*UpdatePartitionOutput, error) { - req, out := c.UpdatePartitionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateTable = "UpdateTable" - -// UpdateTableRequest generates a "aws/request.Request" representing the -// client's request for the UpdateTable operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateTable for more information on using the UpdateTable -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateTableRequest method. -// req, resp := client.UpdateTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTable -func (c *Glue) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) { - op := &request.Operation{ - Name: opUpdateTable, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateTableInput{} - } - - output = &UpdateTableOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateTable API operation for AWS Glue. -// -// Updates a metadata table in the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation UpdateTable for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Two processes are trying to modify a resource simultaneously. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTable -func (c *Glue) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) { - req, out := c.UpdateTableRequest(input) - return out, req.Send() -} - -// UpdateTableWithContext is the same as UpdateTable with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateTable for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) UpdateTableWithContext(ctx aws.Context, input *UpdateTableInput, opts ...request.Option) (*UpdateTableOutput, error) { - req, out := c.UpdateTableRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateTrigger = "UpdateTrigger" - -// UpdateTriggerRequest generates a "aws/request.Request" representing the -// client's request for the UpdateTrigger operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateTrigger for more information on using the UpdateTrigger -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateTriggerRequest method. -// req, resp := client.UpdateTriggerRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTrigger -func (c *Glue) UpdateTriggerRequest(input *UpdateTriggerInput) (req *request.Request, output *UpdateTriggerOutput) { - op := &request.Operation{ - Name: opUpdateTrigger, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateTriggerInput{} - } - - output = &UpdateTriggerOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateTrigger API operation for AWS Glue. -// -// Updates a trigger definition. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation UpdateTrigger for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTrigger -func (c *Glue) UpdateTrigger(input *UpdateTriggerInput) (*UpdateTriggerOutput, error) { - req, out := c.UpdateTriggerRequest(input) - return out, req.Send() -} - -// UpdateTriggerWithContext is the same as UpdateTrigger with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateTrigger for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) UpdateTriggerWithContext(ctx aws.Context, input *UpdateTriggerInput, opts ...request.Option) (*UpdateTriggerOutput, error) { - req, out := c.UpdateTriggerRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateUserDefinedFunction = "UpdateUserDefinedFunction" - -// UpdateUserDefinedFunctionRequest generates a "aws/request.Request" representing the -// client's request for the UpdateUserDefinedFunction operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateUserDefinedFunction for more information on using the UpdateUserDefinedFunction -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateUserDefinedFunctionRequest method. -// req, resp := client.UpdateUserDefinedFunctionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateUserDefinedFunction -func (c *Glue) UpdateUserDefinedFunctionRequest(input *UpdateUserDefinedFunctionInput) (req *request.Request, output *UpdateUserDefinedFunctionOutput) { - op := &request.Operation{ - Name: opUpdateUserDefinedFunction, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateUserDefinedFunctionInput{} - } - - output = &UpdateUserDefinedFunctionOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateUserDefinedFunction API operation for AWS Glue. -// -// Updates an existing function definition in the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation UpdateUserDefinedFunction for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateUserDefinedFunction -func (c *Glue) UpdateUserDefinedFunction(input *UpdateUserDefinedFunctionInput) (*UpdateUserDefinedFunctionOutput, error) { - req, out := c.UpdateUserDefinedFunctionRequest(input) - return out, req.Send() -} - -// UpdateUserDefinedFunctionWithContext is the same as UpdateUserDefinedFunction with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateUserDefinedFunction for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) UpdateUserDefinedFunctionWithContext(ctx aws.Context, input *UpdateUserDefinedFunctionInput, opts ...request.Option) (*UpdateUserDefinedFunctionOutput, error) { - req, out := c.UpdateUserDefinedFunctionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Defines an action to be initiated by a trigger. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Action -type Action struct { - _ struct{} `type:"structure"` - - // Arguments to be passed to the job. - Arguments map[string]*string `type:"map"` - - // The name of a job to be executed. - JobName *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s Action) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Action) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Action) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Action"} - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetArguments sets the Arguments field's value. -func (s *Action) SetArguments(v map[string]*string) *Action { - s.Arguments = v - return s -} - -// SetJobName sets the JobName field's value. -func (s *Action) SetJobName(v string) *Action { - s.JobName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchCreatePartitionRequest -type BatchCreatePartitionInput struct { - _ struct{} `type:"structure"` - - // The ID of the catalog in which the partion is to be created. Currently, this - // should be the AWS account ID. - CatalogId *string `min:"1" type:"string"` - - // The name of the metadata database in which the partition is to be created. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // A list of PartitionInput structures that define the partitions to be created. - // - // PartitionInputList is a required field - PartitionInputList []*PartitionInput `type:"list" required:"true"` - - // The name of the metadata table in which the partition is to be created. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s BatchCreatePartitionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchCreatePartitionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchCreatePartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchCreatePartitionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.PartitionInputList == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionInputList")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.PartitionInputList != nil { - for i, v := range s.PartitionInputList { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PartitionInputList", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *BatchCreatePartitionInput) SetCatalogId(v string) *BatchCreatePartitionInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *BatchCreatePartitionInput) SetDatabaseName(v string) *BatchCreatePartitionInput { - s.DatabaseName = &v - return s -} - -// SetPartitionInputList sets the PartitionInputList field's value. -func (s *BatchCreatePartitionInput) SetPartitionInputList(v []*PartitionInput) *BatchCreatePartitionInput { - s.PartitionInputList = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *BatchCreatePartitionInput) SetTableName(v string) *BatchCreatePartitionInput { - s.TableName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchCreatePartitionResponse -type BatchCreatePartitionOutput struct { - _ struct{} `type:"structure"` - - // Errors encountered when trying to create the requested partitions. - Errors []*PartitionError `type:"list"` -} - -// String returns the string representation -func (s BatchCreatePartitionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchCreatePartitionOutput) GoString() string { - return s.String() -} - -// SetErrors sets the Errors field's value. -func (s *BatchCreatePartitionOutput) SetErrors(v []*PartitionError) *BatchCreatePartitionOutput { - s.Errors = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchDeleteConnectionRequest -type BatchDeleteConnectionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog in which the connections reside. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // A list of names of the connections to delete. - // - // ConnectionNameList is a required field - ConnectionNameList []*string `type:"list" required:"true"` -} - -// String returns the string representation -func (s BatchDeleteConnectionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchDeleteConnectionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchDeleteConnectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchDeleteConnectionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.ConnectionNameList == nil { - invalidParams.Add(request.NewErrParamRequired("ConnectionNameList")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *BatchDeleteConnectionInput) SetCatalogId(v string) *BatchDeleteConnectionInput { - s.CatalogId = &v - return s -} - -// SetConnectionNameList sets the ConnectionNameList field's value. -func (s *BatchDeleteConnectionInput) SetConnectionNameList(v []*string) *BatchDeleteConnectionInput { - s.ConnectionNameList = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchDeleteConnectionResponse -type BatchDeleteConnectionOutput struct { - _ struct{} `type:"structure"` - - // A map of the names of connections that were not successfully deleted to error - // details. - Errors map[string]*ErrorDetail `type:"map"` - - // A list of names of the connection definitions that were successfully deleted. - Succeeded []*string `type:"list"` -} - -// String returns the string representation -func (s BatchDeleteConnectionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchDeleteConnectionOutput) GoString() string { - return s.String() -} - -// SetErrors sets the Errors field's value. -func (s *BatchDeleteConnectionOutput) SetErrors(v map[string]*ErrorDetail) *BatchDeleteConnectionOutput { - s.Errors = v - return s -} - -// SetSucceeded sets the Succeeded field's value. -func (s *BatchDeleteConnectionOutput) SetSucceeded(v []*string) *BatchDeleteConnectionOutput { - s.Succeeded = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchDeletePartitionRequest -type BatchDeletePartitionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the partition to be deleted resides. If - // none is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database in which the table in question resides. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // A list of PartitionInput structures that define the partitions to be deleted. - // - // PartitionsToDelete is a required field - PartitionsToDelete []*PartitionValueList `type:"list" required:"true"` - - // The name of the table where the partitions to be deleted is located. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s BatchDeletePartitionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchDeletePartitionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchDeletePartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchDeletePartitionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.PartitionsToDelete == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionsToDelete")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.PartitionsToDelete != nil { - for i, v := range s.PartitionsToDelete { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PartitionsToDelete", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *BatchDeletePartitionInput) SetCatalogId(v string) *BatchDeletePartitionInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *BatchDeletePartitionInput) SetDatabaseName(v string) *BatchDeletePartitionInput { - s.DatabaseName = &v - return s -} - -// SetPartitionsToDelete sets the PartitionsToDelete field's value. -func (s *BatchDeletePartitionInput) SetPartitionsToDelete(v []*PartitionValueList) *BatchDeletePartitionInput { - s.PartitionsToDelete = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *BatchDeletePartitionInput) SetTableName(v string) *BatchDeletePartitionInput { - s.TableName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchDeletePartitionResponse -type BatchDeletePartitionOutput struct { - _ struct{} `type:"structure"` - - // Errors encountered when trying to delete the requested partitions. - Errors []*PartitionError `type:"list"` -} - -// String returns the string representation -func (s BatchDeletePartitionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchDeletePartitionOutput) GoString() string { - return s.String() -} - -// SetErrors sets the Errors field's value. -func (s *BatchDeletePartitionOutput) SetErrors(v []*PartitionError) *BatchDeletePartitionOutput { - s.Errors = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchDeleteTableRequest -type BatchDeleteTableInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the table resides. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database where the tables to delete reside. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // A list of the table to delete. - // - // TablesToDelete is a required field - TablesToDelete []*string `type:"list" required:"true"` -} - -// String returns the string representation -func (s BatchDeleteTableInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchDeleteTableInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchDeleteTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchDeleteTableInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.TablesToDelete == nil { - invalidParams.Add(request.NewErrParamRequired("TablesToDelete")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *BatchDeleteTableInput) SetCatalogId(v string) *BatchDeleteTableInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *BatchDeleteTableInput) SetDatabaseName(v string) *BatchDeleteTableInput { - s.DatabaseName = &v - return s -} - -// SetTablesToDelete sets the TablesToDelete field's value. -func (s *BatchDeleteTableInput) SetTablesToDelete(v []*string) *BatchDeleteTableInput { - s.TablesToDelete = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchDeleteTableResponse -type BatchDeleteTableOutput struct { - _ struct{} `type:"structure"` - - // A list of errors encountered in attempting to delete the specified tables. - Errors []*TableError `type:"list"` -} - -// String returns the string representation -func (s BatchDeleteTableOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchDeleteTableOutput) GoString() string { - return s.String() -} - -// SetErrors sets the Errors field's value. -func (s *BatchDeleteTableOutput) SetErrors(v []*TableError) *BatchDeleteTableOutput { - s.Errors = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchGetPartitionRequest -type BatchGetPartitionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the partitions in question reside. If none - // is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database where the partitions reside. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // A list of partition values identifying the partitions to retrieve. - // - // PartitionsToGet is a required field - PartitionsToGet []*PartitionValueList `type:"list" required:"true"` - - // The name of the partitions' table. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s BatchGetPartitionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchGetPartitionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetPartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetPartitionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.PartitionsToGet == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionsToGet")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.PartitionsToGet != nil { - for i, v := range s.PartitionsToGet { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PartitionsToGet", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *BatchGetPartitionInput) SetCatalogId(v string) *BatchGetPartitionInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *BatchGetPartitionInput) SetDatabaseName(v string) *BatchGetPartitionInput { - s.DatabaseName = &v - return s -} - -// SetPartitionsToGet sets the PartitionsToGet field's value. -func (s *BatchGetPartitionInput) SetPartitionsToGet(v []*PartitionValueList) *BatchGetPartitionInput { - s.PartitionsToGet = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *BatchGetPartitionInput) SetTableName(v string) *BatchGetPartitionInput { - s.TableName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchGetPartitionResponse -type BatchGetPartitionOutput struct { - _ struct{} `type:"structure"` - - // A list of the requested partitions. - Partitions []*Partition `type:"list"` - - // A list of the partition values in the request for which partions were not - // returned. - UnprocessedKeys []*PartitionValueList `type:"list"` -} - -// String returns the string representation -func (s BatchGetPartitionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchGetPartitionOutput) GoString() string { - return s.String() -} - -// SetPartitions sets the Partitions field's value. -func (s *BatchGetPartitionOutput) SetPartitions(v []*Partition) *BatchGetPartitionOutput { - s.Partitions = v - return s -} - -// SetUnprocessedKeys sets the UnprocessedKeys field's value. -func (s *BatchGetPartitionOutput) SetUnprocessedKeys(v []*PartitionValueList) *BatchGetPartitionOutput { - s.UnprocessedKeys = v - return s -} - -// Details about the job run and the error that occurred while trying to submit -// it for stopping. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchStopJobRunError -type BatchStopJobRunError struct { - _ struct{} `type:"structure"` - - // The details of the error that occurred. - ErrorDetail *ErrorDetail `type:"structure"` - - // The name of the job. - JobName *string `min:"1" type:"string"` - - // The job run Id. - JobRunId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s BatchStopJobRunError) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchStopJobRunError) GoString() string { - return s.String() -} - -// SetErrorDetail sets the ErrorDetail field's value. -func (s *BatchStopJobRunError) SetErrorDetail(v *ErrorDetail) *BatchStopJobRunError { - s.ErrorDetail = v - return s -} - -// SetJobName sets the JobName field's value. -func (s *BatchStopJobRunError) SetJobName(v string) *BatchStopJobRunError { - s.JobName = &v - return s -} - -// SetJobRunId sets the JobRunId field's value. -func (s *BatchStopJobRunError) SetJobRunId(v string) *BatchStopJobRunError { - s.JobRunId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchStopJobRunRequest -type BatchStopJobRunInput struct { - _ struct{} `type:"structure"` - - // The name of the job whose job runs are to be stopped. - // - // JobName is a required field - JobName *string `min:"1" type:"string" required:"true"` - - // A list of job run Ids of the given job to be stopped. - // - // JobRunIds is a required field - JobRunIds []*string `min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s BatchStopJobRunInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchStopJobRunInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchStopJobRunInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchStopJobRunInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) - } - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) - } - if s.JobRunIds == nil { - invalidParams.Add(request.NewErrParamRequired("JobRunIds")) - } - if s.JobRunIds != nil && len(s.JobRunIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobRunIds", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetJobName sets the JobName field's value. -func (s *BatchStopJobRunInput) SetJobName(v string) *BatchStopJobRunInput { - s.JobName = &v - return s -} - -// SetJobRunIds sets the JobRunIds field's value. -func (s *BatchStopJobRunInput) SetJobRunIds(v []*string) *BatchStopJobRunInput { - s.JobRunIds = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchStopJobRunResponse -type BatchStopJobRunOutput struct { - _ struct{} `type:"structure"` - - // A list containing the job run Ids and details of the error that occurred - // for each job run while submitting to stop. - Errors []*BatchStopJobRunError `type:"list"` - - // A list of job runs which are successfully submitted for stopping. - SuccessfulSubmissions []*BatchStopJobRunSuccessfulSubmission `type:"list"` -} - -// String returns the string representation -func (s BatchStopJobRunOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchStopJobRunOutput) GoString() string { - return s.String() -} - -// SetErrors sets the Errors field's value. -func (s *BatchStopJobRunOutput) SetErrors(v []*BatchStopJobRunError) *BatchStopJobRunOutput { - s.Errors = v - return s -} - -// SetSuccessfulSubmissions sets the SuccessfulSubmissions field's value. -func (s *BatchStopJobRunOutput) SetSuccessfulSubmissions(v []*BatchStopJobRunSuccessfulSubmission) *BatchStopJobRunOutput { - s.SuccessfulSubmissions = v - return s -} - -// Details about the job run which is submitted successfully for stopping. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchStopJobRunSuccessfulSubmission -type BatchStopJobRunSuccessfulSubmission struct { - _ struct{} `type:"structure"` - - // The name of the job. - JobName *string `min:"1" type:"string"` - - // The job run Id. - JobRunId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s BatchStopJobRunSuccessfulSubmission) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchStopJobRunSuccessfulSubmission) GoString() string { - return s.String() -} - -// SetJobName sets the JobName field's value. -func (s *BatchStopJobRunSuccessfulSubmission) SetJobName(v string) *BatchStopJobRunSuccessfulSubmission { - s.JobName = &v - return s -} - -// SetJobRunId sets the JobRunId field's value. -func (s *BatchStopJobRunSuccessfulSubmission) SetJobRunId(v string) *BatchStopJobRunSuccessfulSubmission { - s.JobRunId = &v - return s -} - -// Specifies a table definition in the Data Catalog. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CatalogEntry -type CatalogEntry struct { - _ struct{} `type:"structure"` - - // The database in which the table metadata resides. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The name of the table in question. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CatalogEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CatalogEntry) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CatalogEntry) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CatalogEntry"} - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *CatalogEntry) SetDatabaseName(v string) *CatalogEntry { - s.DatabaseName = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *CatalogEntry) SetTableName(v string) *CatalogEntry { - s.TableName = &v - return s -} - -// A structure containing migration status information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CatalogImportStatus -type CatalogImportStatus struct { - _ struct{} `type:"structure"` - - // True if the migration has completed, or False otherwise. - ImportCompleted *bool `type:"boolean"` - - // The time that the migration was started. - ImportTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The name of the person who initiated the migration. - ImportedBy *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s CatalogImportStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CatalogImportStatus) GoString() string { - return s.String() -} - -// SetImportCompleted sets the ImportCompleted field's value. -func (s *CatalogImportStatus) SetImportCompleted(v bool) *CatalogImportStatus { - s.ImportCompleted = &v - return s -} - -// SetImportTime sets the ImportTime field's value. -func (s *CatalogImportStatus) SetImportTime(v time.Time) *CatalogImportStatus { - s.ImportTime = &v - return s -} - -// SetImportedBy sets the ImportedBy field's value. -func (s *CatalogImportStatus) SetImportedBy(v string) *CatalogImportStatus { - s.ImportedBy = &v - return s -} - -// Classifiers are written in Python and triggered during a crawl task. You -// can write your own classifiers to best categorize your data sources and specify -// the appropriate schemas to use for them. A classifier checks whether a given -// file is in a format it can handle, and if it is, the classifier creates a -// schema in the form of a StructType object that matches that data format. -// -// A classifier can be either a grok classifier or an XML classifier, specified -// in one or the other field of the Classifier object. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Classifier -type Classifier struct { - _ struct{} `type:"structure"` - - // A GrokClassifier object. - GrokClassifier *GrokClassifier `type:"structure"` - - // An XMLClassifier object. - XMLClassifier *XMLClassifier `type:"structure"` -} - -// String returns the string representation -func (s Classifier) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Classifier) GoString() string { - return s.String() -} - -// SetGrokClassifier sets the GrokClassifier field's value. -func (s *Classifier) SetGrokClassifier(v *GrokClassifier) *Classifier { - s.GrokClassifier = v - return s -} - -// SetXMLClassifier sets the XMLClassifier field's value. -func (s *Classifier) SetXMLClassifier(v *XMLClassifier) *Classifier { - s.XMLClassifier = v - return s -} - -// Represents a directional edge in a directed acyclic graph (DAG). -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CodeGenEdge -type CodeGenEdge struct { - _ struct{} `type:"structure"` - - // The ID of the node at which the edge starts. - // - // Source is a required field - Source *string `min:"1" type:"string" required:"true"` - - // The ID of the node at which the edge ends. - // - // Target is a required field - Target *string `min:"1" type:"string" required:"true"` - - // The target of the edge. - TargetParameter *string `type:"string"` -} - -// String returns the string representation -func (s CodeGenEdge) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CodeGenEdge) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CodeGenEdge) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CodeGenEdge"} - if s.Source == nil { - invalidParams.Add(request.NewErrParamRequired("Source")) - } - if s.Source != nil && len(*s.Source) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Source", 1)) - } - if s.Target == nil { - invalidParams.Add(request.NewErrParamRequired("Target")) - } - if s.Target != nil && len(*s.Target) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Target", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSource sets the Source field's value. -func (s *CodeGenEdge) SetSource(v string) *CodeGenEdge { - s.Source = &v - return s -} - -// SetTarget sets the Target field's value. -func (s *CodeGenEdge) SetTarget(v string) *CodeGenEdge { - s.Target = &v - return s -} - -// SetTargetParameter sets the TargetParameter field's value. -func (s *CodeGenEdge) SetTargetParameter(v string) *CodeGenEdge { - s.TargetParameter = &v - return s -} - -// Represents a node in a directed acyclic graph (DAG) -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CodeGenNode -type CodeGenNode struct { - _ struct{} `type:"structure"` - - // Properties of the node, in the form of name-value pairs. - // - // Args is a required field - Args []*CodeGenNodeArg `type:"list" required:"true"` - - // A node identifier that is unique within the node's graph. - // - // Id is a required field - Id *string `min:"1" type:"string" required:"true"` - - // The line number of the node. - LineNumber *int64 `type:"integer"` - - // The type of node this is. - // - // NodeType is a required field - NodeType *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s CodeGenNode) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CodeGenNode) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CodeGenNode) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CodeGenNode"} - if s.Args == nil { - invalidParams.Add(request.NewErrParamRequired("Args")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) - } - if s.NodeType == nil { - invalidParams.Add(request.NewErrParamRequired("NodeType")) - } - if s.Args != nil { - for i, v := range s.Args { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Args", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetArgs sets the Args field's value. -func (s *CodeGenNode) SetArgs(v []*CodeGenNodeArg) *CodeGenNode { - s.Args = v - return s -} - -// SetId sets the Id field's value. -func (s *CodeGenNode) SetId(v string) *CodeGenNode { - s.Id = &v - return s -} - -// SetLineNumber sets the LineNumber field's value. -func (s *CodeGenNode) SetLineNumber(v int64) *CodeGenNode { - s.LineNumber = &v - return s -} - -// SetNodeType sets the NodeType field's value. -func (s *CodeGenNode) SetNodeType(v string) *CodeGenNode { - s.NodeType = &v - return s -} - -// An argument or property of a node. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CodeGenNodeArg -type CodeGenNodeArg struct { - _ struct{} `type:"structure"` - - // The name of the argument or property. - // - // Name is a required field - Name *string `type:"string" required:"true"` - - // True if the value is used as a parameter. - Param *bool `type:"boolean"` - - // The value of the argument or property. - // - // Value is a required field - Value *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s CodeGenNodeArg) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CodeGenNodeArg) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CodeGenNodeArg) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CodeGenNodeArg"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *CodeGenNodeArg) SetName(v string) *CodeGenNodeArg { - s.Name = &v - return s -} - -// SetParam sets the Param field's value. -func (s *CodeGenNodeArg) SetParam(v bool) *CodeGenNodeArg { - s.Param = &v - return s -} - -// SetValue sets the Value field's value. -func (s *CodeGenNodeArg) SetValue(v string) *CodeGenNodeArg { - s.Value = &v - return s -} - -// A column in a Table. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Column -type Column struct { - _ struct{} `type:"structure"` - - // Free-form text comment. - Comment *string `type:"string"` - - // The name of the Column. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The datatype of data in the Column. - Type *string `type:"string"` -} - -// String returns the string representation -func (s Column) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Column) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Column) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Column"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetComment sets the Comment field's value. -func (s *Column) SetComment(v string) *Column { - s.Comment = &v - return s -} - -// SetName sets the Name field's value. -func (s *Column) SetName(v string) *Column { - s.Name = &v - return s -} - -// SetType sets the Type field's value. -func (s *Column) SetType(v string) *Column { - s.Type = &v - return s -} - -// Defines a condition under which a trigger fires. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Condition -type Condition struct { - _ struct{} `type:"structure"` - - // The name of the job in question. - JobName *string `min:"1" type:"string"` - - // A logical operator. - LogicalOperator *string `type:"string" enum:"LogicalOperator"` - - // The condition state. - State *string `type:"string" enum:"JobRunState"` -} - -// String returns the string representation -func (s Condition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Condition) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Condition) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Condition"} - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetJobName sets the JobName field's value. -func (s *Condition) SetJobName(v string) *Condition { - s.JobName = &v - return s -} - -// SetLogicalOperator sets the LogicalOperator field's value. -func (s *Condition) SetLogicalOperator(v string) *Condition { - s.LogicalOperator = &v - return s -} - -// SetState sets the State field's value. -func (s *Condition) SetState(v string) *Condition { - s.State = &v - return s -} - -// Defines a connection to a data source. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Connection -type Connection struct { - _ struct{} `type:"structure"` - - // A list of key-value pairs used as parameters for this connection. - ConnectionProperties map[string]*string `type:"map"` - - // The type of the connection. Currently, only JDBC is supported; SFTP is not - // supported. - ConnectionType *string `type:"string" enum:"ConnectionType"` - - // The time this connection definition was created. - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Description of the connection. - Description *string `type:"string"` - - // The user, group or role that last updated this connection definition. - LastUpdatedBy *string `min:"1" type:"string"` - - // The last time this connection definition was updated. - LastUpdatedTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A list of criteria that can be used in selecting this connection. - MatchCriteria []*string `type:"list"` - - // The name of the connection definition. - Name *string `min:"1" type:"string"` - - // A map of physical connection requirements, such as VPC and SecurityGroup, - // needed for making this connection successfully. - PhysicalConnectionRequirements *PhysicalConnectionRequirements `type:"structure"` -} - -// String returns the string representation -func (s Connection) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Connection) GoString() string { - return s.String() -} - -// SetConnectionProperties sets the ConnectionProperties field's value. -func (s *Connection) SetConnectionProperties(v map[string]*string) *Connection { - s.ConnectionProperties = v - return s -} - -// SetConnectionType sets the ConnectionType field's value. -func (s *Connection) SetConnectionType(v string) *Connection { - s.ConnectionType = &v - return s -} - -// SetCreationTime sets the CreationTime field's value. -func (s *Connection) SetCreationTime(v time.Time) *Connection { - s.CreationTime = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *Connection) SetDescription(v string) *Connection { - s.Description = &v - return s -} - -// SetLastUpdatedBy sets the LastUpdatedBy field's value. -func (s *Connection) SetLastUpdatedBy(v string) *Connection { - s.LastUpdatedBy = &v - return s -} - -// SetLastUpdatedTime sets the LastUpdatedTime field's value. -func (s *Connection) SetLastUpdatedTime(v time.Time) *Connection { - s.LastUpdatedTime = &v - return s -} - -// SetMatchCriteria sets the MatchCriteria field's value. -func (s *Connection) SetMatchCriteria(v []*string) *Connection { - s.MatchCriteria = v - return s -} - -// SetName sets the Name field's value. -func (s *Connection) SetName(v string) *Connection { - s.Name = &v - return s -} - -// SetPhysicalConnectionRequirements sets the PhysicalConnectionRequirements field's value. -func (s *Connection) SetPhysicalConnectionRequirements(v *PhysicalConnectionRequirements) *Connection { - s.PhysicalConnectionRequirements = v - return s -} - -// A structure used to specify a connection to create or update. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ConnectionInput -type ConnectionInput struct { - _ struct{} `type:"structure"` - - // A list of key-value pairs used as parameters for this connection. - ConnectionProperties map[string]*string `type:"map"` - - // The type of the connection. Currently, only JDBC is supported; SFTP is not - // supported. - ConnectionType *string `type:"string" enum:"ConnectionType"` - - // Description of the connection. - Description *string `type:"string"` - - // A list of criteria that can be used in selecting this connection. - MatchCriteria []*string `type:"list"` - - // The name of the connection. - Name *string `min:"1" type:"string"` - - // A map of physical connection requirements, such as VPC and SecurityGroup, - // needed for making this connection successfully. - PhysicalConnectionRequirements *PhysicalConnectionRequirements `type:"structure"` -} - -// String returns the string representation -func (s ConnectionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ConnectionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ConnectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ConnectionInput"} - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.PhysicalConnectionRequirements != nil { - if err := s.PhysicalConnectionRequirements.Validate(); err != nil { - invalidParams.AddNested("PhysicalConnectionRequirements", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConnectionProperties sets the ConnectionProperties field's value. -func (s *ConnectionInput) SetConnectionProperties(v map[string]*string) *ConnectionInput { - s.ConnectionProperties = v - return s -} - -// SetConnectionType sets the ConnectionType field's value. -func (s *ConnectionInput) SetConnectionType(v string) *ConnectionInput { - s.ConnectionType = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *ConnectionInput) SetDescription(v string) *ConnectionInput { - s.Description = &v - return s -} - -// SetMatchCriteria sets the MatchCriteria field's value. -func (s *ConnectionInput) SetMatchCriteria(v []*string) *ConnectionInput { - s.MatchCriteria = v - return s -} - -// SetName sets the Name field's value. -func (s *ConnectionInput) SetName(v string) *ConnectionInput { - s.Name = &v - return s -} - -// SetPhysicalConnectionRequirements sets the PhysicalConnectionRequirements field's value. -func (s *ConnectionInput) SetPhysicalConnectionRequirements(v *PhysicalConnectionRequirements) *ConnectionInput { - s.PhysicalConnectionRequirements = v - return s -} - -// Specifies the connections used by a job. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ConnectionsList -type ConnectionsList struct { - _ struct{} `type:"structure"` - - // A list of connections used by the job. - Connections []*string `type:"list"` -} - -// String returns the string representation -func (s ConnectionsList) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ConnectionsList) GoString() string { - return s.String() -} - -// SetConnections sets the Connections field's value. -func (s *ConnectionsList) SetConnections(v []*string) *ConnectionsList { - s.Connections = v - return s -} - -// Specifies a crawler program that examines a data source and uses classifiers -// to try to determine its schema. If successful, the crawler records metadata -// concerning the data source in the AWS Glue Data Catalog. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Crawler -type Crawler struct { - _ struct{} `type:"structure"` - - // A list of custom classifiers associated with the crawler. - Classifiers []*string `type:"list"` - - // Crawler configuration information. This versioned JSON string allows users - // to specify aspects of a Crawler's behavior. - // - // You can use this field to force partitions to inherit metadata such as classification, - // input format, output format, serde information, and schema from their parent - // table, rather than detect this information separately for each partition. - // Use the following JSON string to specify that behavior: - Configuration *string `type:"string"` - - // If the crawler is running, contains the total time elapsed since the last - // crawl began. - CrawlElapsedTime *int64 `type:"long"` - - // The time when the crawler was created. - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The database where metadata is written by this crawler. - DatabaseName *string `type:"string"` - - // A description of the crawler. - Description *string `type:"string"` - - // The status of the last crawl, and potentially error information if an error - // occurred. - LastCrawl *LastCrawlInfo `type:"structure"` - - // The time the crawler was last updated. - LastUpdated *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The crawler name. - Name *string `min:"1" type:"string"` - - // The IAM role (or ARN of an IAM role) used to access customer resources, such - // as data in Amazon S3. - Role *string `type:"string"` - - // For scheduled crawlers, the schedule when the crawler runs. - Schedule *Schedule `type:"structure"` - - // Sets the behavior when the crawler finds a changed or deleted object. - SchemaChangePolicy *SchemaChangePolicy `type:"structure"` - - // Indicates whether the crawler is running, or whether a run is pending. - State *string `type:"string" enum:"CrawlerState"` - - // The prefix added to the names of tables that are created. - TablePrefix *string `type:"string"` - - // A collection of targets to crawl. - Targets *CrawlerTargets `type:"structure"` - - // The version of the crawler. - Version *int64 `type:"long"` -} - -// String returns the string representation -func (s Crawler) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Crawler) GoString() string { - return s.String() -} - -// SetClassifiers sets the Classifiers field's value. -func (s *Crawler) SetClassifiers(v []*string) *Crawler { - s.Classifiers = v - return s -} - -// SetConfiguration sets the Configuration field's value. -func (s *Crawler) SetConfiguration(v string) *Crawler { - s.Configuration = &v - return s -} - -// SetCrawlElapsedTime sets the CrawlElapsedTime field's value. -func (s *Crawler) SetCrawlElapsedTime(v int64) *Crawler { - s.CrawlElapsedTime = &v - return s -} - -// SetCreationTime sets the CreationTime field's value. -func (s *Crawler) SetCreationTime(v time.Time) *Crawler { - s.CreationTime = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *Crawler) SetDatabaseName(v string) *Crawler { - s.DatabaseName = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *Crawler) SetDescription(v string) *Crawler { - s.Description = &v - return s -} - -// SetLastCrawl sets the LastCrawl field's value. -func (s *Crawler) SetLastCrawl(v *LastCrawlInfo) *Crawler { - s.LastCrawl = v - return s -} - -// SetLastUpdated sets the LastUpdated field's value. -func (s *Crawler) SetLastUpdated(v time.Time) *Crawler { - s.LastUpdated = &v - return s -} - -// SetName sets the Name field's value. -func (s *Crawler) SetName(v string) *Crawler { - s.Name = &v - return s -} - -// SetRole sets the Role field's value. -func (s *Crawler) SetRole(v string) *Crawler { - s.Role = &v - return s -} - -// SetSchedule sets the Schedule field's value. -func (s *Crawler) SetSchedule(v *Schedule) *Crawler { - s.Schedule = v - return s -} - -// SetSchemaChangePolicy sets the SchemaChangePolicy field's value. -func (s *Crawler) SetSchemaChangePolicy(v *SchemaChangePolicy) *Crawler { - s.SchemaChangePolicy = v - return s -} - -// SetState sets the State field's value. -func (s *Crawler) SetState(v string) *Crawler { - s.State = &v - return s -} - -// SetTablePrefix sets the TablePrefix field's value. -func (s *Crawler) SetTablePrefix(v string) *Crawler { - s.TablePrefix = &v - return s -} - -// SetTargets sets the Targets field's value. -func (s *Crawler) SetTargets(v *CrawlerTargets) *Crawler { - s.Targets = v - return s -} - -// SetVersion sets the Version field's value. -func (s *Crawler) SetVersion(v int64) *Crawler { - s.Version = &v - return s -} - -// Metrics for a specified crawler. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CrawlerMetrics -type CrawlerMetrics struct { - _ struct{} `type:"structure"` - - // The name of the crawler. - CrawlerName *string `min:"1" type:"string"` - - // The duration of the crawler's most recent run, in seconds. - LastRuntimeSeconds *float64 `type:"double"` - - // The median duration of this crawler's runs, in seconds. - MedianRuntimeSeconds *float64 `type:"double"` - - // True if the crawler is still estimating how long it will take to complete - // this run. - StillEstimating *bool `type:"boolean"` - - // The number of tables created by this crawler. - TablesCreated *int64 `type:"integer"` - - // The number of tables deleted by this crawler. - TablesDeleted *int64 `type:"integer"` - - // The number of tables updated by this crawler. - TablesUpdated *int64 `type:"integer"` - - // The estimated time left to complete a running crawl. - TimeLeftSeconds *float64 `type:"double"` -} - -// String returns the string representation -func (s CrawlerMetrics) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CrawlerMetrics) GoString() string { - return s.String() -} - -// SetCrawlerName sets the CrawlerName field's value. -func (s *CrawlerMetrics) SetCrawlerName(v string) *CrawlerMetrics { - s.CrawlerName = &v - return s -} - -// SetLastRuntimeSeconds sets the LastRuntimeSeconds field's value. -func (s *CrawlerMetrics) SetLastRuntimeSeconds(v float64) *CrawlerMetrics { - s.LastRuntimeSeconds = &v - return s -} - -// SetMedianRuntimeSeconds sets the MedianRuntimeSeconds field's value. -func (s *CrawlerMetrics) SetMedianRuntimeSeconds(v float64) *CrawlerMetrics { - s.MedianRuntimeSeconds = &v - return s -} - -// SetStillEstimating sets the StillEstimating field's value. -func (s *CrawlerMetrics) SetStillEstimating(v bool) *CrawlerMetrics { - s.StillEstimating = &v - return s -} - -// SetTablesCreated sets the TablesCreated field's value. -func (s *CrawlerMetrics) SetTablesCreated(v int64) *CrawlerMetrics { - s.TablesCreated = &v - return s -} - -// SetTablesDeleted sets the TablesDeleted field's value. -func (s *CrawlerMetrics) SetTablesDeleted(v int64) *CrawlerMetrics { - s.TablesDeleted = &v - return s -} - -// SetTablesUpdated sets the TablesUpdated field's value. -func (s *CrawlerMetrics) SetTablesUpdated(v int64) *CrawlerMetrics { - s.TablesUpdated = &v - return s -} - -// SetTimeLeftSeconds sets the TimeLeftSeconds field's value. -func (s *CrawlerMetrics) SetTimeLeftSeconds(v float64) *CrawlerMetrics { - s.TimeLeftSeconds = &v - return s -} - -// Specifies data stores to crawl. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CrawlerTargets -type CrawlerTargets struct { - _ struct{} `type:"structure"` - - // Specifies JDBC targets. - JdbcTargets []*JdbcTarget `type:"list"` - - // Specifies Amazon S3 targets. - S3Targets []*S3Target `type:"list"` -} - -// String returns the string representation -func (s CrawlerTargets) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CrawlerTargets) GoString() string { - return s.String() -} - -// SetJdbcTargets sets the JdbcTargets field's value. -func (s *CrawlerTargets) SetJdbcTargets(v []*JdbcTarget) *CrawlerTargets { - s.JdbcTargets = v - return s -} - -// SetS3Targets sets the S3Targets field's value. -func (s *CrawlerTargets) SetS3Targets(v []*S3Target) *CrawlerTargets { - s.S3Targets = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateClassifierRequest -type CreateClassifierInput struct { - _ struct{} `type:"structure"` - - // A GrokClassifier object specifying the classifier to create. - GrokClassifier *CreateGrokClassifierRequest `type:"structure"` - - // An XMLClassifier object specifying the classifier to create. - XMLClassifier *CreateXMLClassifierRequest `type:"structure"` -} - -// String returns the string representation -func (s CreateClassifierInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateClassifierInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateClassifierInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateClassifierInput"} - if s.GrokClassifier != nil { - if err := s.GrokClassifier.Validate(); err != nil { - invalidParams.AddNested("GrokClassifier", err.(request.ErrInvalidParams)) - } - } - if s.XMLClassifier != nil { - if err := s.XMLClassifier.Validate(); err != nil { - invalidParams.AddNested("XMLClassifier", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrokClassifier sets the GrokClassifier field's value. -func (s *CreateClassifierInput) SetGrokClassifier(v *CreateGrokClassifierRequest) *CreateClassifierInput { - s.GrokClassifier = v - return s -} - -// SetXMLClassifier sets the XMLClassifier field's value. -func (s *CreateClassifierInput) SetXMLClassifier(v *CreateXMLClassifierRequest) *CreateClassifierInput { - s.XMLClassifier = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateClassifierResponse -type CreateClassifierOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateClassifierOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateClassifierOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateConnectionRequest -type CreateConnectionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog in which to create the connection. If none is - // supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // A ConnectionInput object defining the connection to create. - // - // ConnectionInput is a required field - ConnectionInput *ConnectionInput `type:"structure" required:"true"` -} - -// String returns the string representation -func (s CreateConnectionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateConnectionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateConnectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateConnectionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.ConnectionInput == nil { - invalidParams.Add(request.NewErrParamRequired("ConnectionInput")) - } - if s.ConnectionInput != nil { - if err := s.ConnectionInput.Validate(); err != nil { - invalidParams.AddNested("ConnectionInput", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *CreateConnectionInput) SetCatalogId(v string) *CreateConnectionInput { - s.CatalogId = &v - return s -} - -// SetConnectionInput sets the ConnectionInput field's value. -func (s *CreateConnectionInput) SetConnectionInput(v *ConnectionInput) *CreateConnectionInput { - s.ConnectionInput = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateConnectionResponse -type CreateConnectionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateConnectionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateConnectionOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateCrawlerRequest -type CreateCrawlerInput struct { - _ struct{} `type:"structure"` - - // A list of custom classifiers that the user has registered. By default, all - // AWS classifiers are included in a crawl, but these custom classifiers always - // override the default classifiers for a given classification. - Classifiers []*string `type:"list"` - - // Crawler configuration information. This versioned JSON string allows users - // to specify aspects of a Crawler's behavior. - // - // You can use this field to force partitions to inherit metadata such as classification, - // input format, output format, serde information, and schema from their parent - // table, rather than detect this information separately for each partition. - Configuration *string `type:"string"` - - // The AWS Glue database where results are written, such as: arn:aws:daylight:us-east-1::database/sometable/*. - // - // DatabaseName is a required field - DatabaseName *string `type:"string" required:"true"` - - // A description of the new crawler. - Description *string `type:"string"` - - // Name of the new crawler. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The IAM role (or ARN of an IAM role) used by the new crawler to access customer - // resources. - // - // Role is a required field - Role *string `type:"string" required:"true"` - - // A cron expression used to specify the schedule (see Time-Based Schedules - // for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, you would specify: - // cron(15 12 * * ? *). - Schedule *string `type:"string"` - - // Policy for the crawler's update and deletion behavior. - SchemaChangePolicy *SchemaChangePolicy `type:"structure"` - - // The table prefix used for catalog tables that are created. - TablePrefix *string `type:"string"` - - // A list of collection of targets to crawl. - // - // Targets is a required field - Targets *CrawlerTargets `type:"structure" required:"true"` -} - -// String returns the string representation -func (s CreateCrawlerInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateCrawlerInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateCrawlerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateCrawlerInput"} - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Role == nil { - invalidParams.Add(request.NewErrParamRequired("Role")) - } - if s.Targets == nil { - invalidParams.Add(request.NewErrParamRequired("Targets")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClassifiers sets the Classifiers field's value. -func (s *CreateCrawlerInput) SetClassifiers(v []*string) *CreateCrawlerInput { - s.Classifiers = v - return s -} - -// SetConfiguration sets the Configuration field's value. -func (s *CreateCrawlerInput) SetConfiguration(v string) *CreateCrawlerInput { - s.Configuration = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *CreateCrawlerInput) SetDatabaseName(v string) *CreateCrawlerInput { - s.DatabaseName = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateCrawlerInput) SetDescription(v string) *CreateCrawlerInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateCrawlerInput) SetName(v string) *CreateCrawlerInput { - s.Name = &v - return s -} - -// SetRole sets the Role field's value. -func (s *CreateCrawlerInput) SetRole(v string) *CreateCrawlerInput { - s.Role = &v - return s -} - -// SetSchedule sets the Schedule field's value. -func (s *CreateCrawlerInput) SetSchedule(v string) *CreateCrawlerInput { - s.Schedule = &v - return s -} - -// SetSchemaChangePolicy sets the SchemaChangePolicy field's value. -func (s *CreateCrawlerInput) SetSchemaChangePolicy(v *SchemaChangePolicy) *CreateCrawlerInput { - s.SchemaChangePolicy = v - return s -} - -// SetTablePrefix sets the TablePrefix field's value. -func (s *CreateCrawlerInput) SetTablePrefix(v string) *CreateCrawlerInput { - s.TablePrefix = &v - return s -} - -// SetTargets sets the Targets field's value. -func (s *CreateCrawlerInput) SetTargets(v *CrawlerTargets) *CreateCrawlerInput { - s.Targets = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateCrawlerResponse -type CreateCrawlerOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateCrawlerOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateCrawlerOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateDatabaseRequest -type CreateDatabaseInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog in which to create the database. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // A DatabaseInput object defining the metadata database to create in the catalog. - // - // DatabaseInput is a required field - DatabaseInput *DatabaseInput `type:"structure" required:"true"` -} - -// String returns the string representation -func (s CreateDatabaseInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateDatabaseInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDatabaseInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDatabaseInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseInput == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseInput")) - } - if s.DatabaseInput != nil { - if err := s.DatabaseInput.Validate(); err != nil { - invalidParams.AddNested("DatabaseInput", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *CreateDatabaseInput) SetCatalogId(v string) *CreateDatabaseInput { - s.CatalogId = &v - return s -} - -// SetDatabaseInput sets the DatabaseInput field's value. -func (s *CreateDatabaseInput) SetDatabaseInput(v *DatabaseInput) *CreateDatabaseInput { - s.DatabaseInput = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateDatabaseResponse -type CreateDatabaseOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateDatabaseOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateDatabaseOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateDevEndpointRequest -type CreateDevEndpointInput struct { - _ struct{} `type:"structure"` - - // The name to be assigned to the new DevEndpoint. - // - // EndpointName is a required field - EndpointName *string `type:"string" required:"true"` - - // Path to one or more Java Jars in an S3 bucket that should be loaded in your - // DevEndpoint. - ExtraJarsS3Path *string `type:"string"` - - // Path(s) to one or more Python libraries in an S3 bucket that should be loaded - // in your DevEndpoint. Multiple values must be complete paths separated by - // a comma. - // - // Please note that only pure Python libraries can currently be used on a DevEndpoint. - // Libraries that rely on C extensions, such as the pandas (http://pandas.pydata.org/) - // Python data analysis library, are not yet supported. - ExtraPythonLibsS3Path *string `type:"string"` - - // The number of AWS Glue Data Processing Units (DPUs) to allocate to this DevEndpoint. - NumberOfNodes *int64 `type:"integer"` - - // The public key to use for authentication. - // - // PublicKey is a required field - PublicKey *string `type:"string" required:"true"` - - // The IAM role for the DevEndpoint. - // - // RoleArn is a required field - RoleArn *string `type:"string" required:"true"` - - // Security group IDs for the security groups to be used by the new DevEndpoint. - SecurityGroupIds []*string `type:"list"` - - // The subnet ID for the new DevEndpoint to use. - SubnetId *string `type:"string"` -} - -// String returns the string representation -func (s CreateDevEndpointInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateDevEndpointInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDevEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDevEndpointInput"} - if s.EndpointName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointName")) - } - if s.PublicKey == nil { - invalidParams.Add(request.NewErrParamRequired("PublicKey")) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndpointName sets the EndpointName field's value. -func (s *CreateDevEndpointInput) SetEndpointName(v string) *CreateDevEndpointInput { - s.EndpointName = &v - return s -} - -// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. -func (s *CreateDevEndpointInput) SetExtraJarsS3Path(v string) *CreateDevEndpointInput { - s.ExtraJarsS3Path = &v - return s -} - -// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. -func (s *CreateDevEndpointInput) SetExtraPythonLibsS3Path(v string) *CreateDevEndpointInput { - s.ExtraPythonLibsS3Path = &v - return s -} - -// SetNumberOfNodes sets the NumberOfNodes field's value. -func (s *CreateDevEndpointInput) SetNumberOfNodes(v int64) *CreateDevEndpointInput { - s.NumberOfNodes = &v - return s -} - -// SetPublicKey sets the PublicKey field's value. -func (s *CreateDevEndpointInput) SetPublicKey(v string) *CreateDevEndpointInput { - s.PublicKey = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *CreateDevEndpointInput) SetRoleArn(v string) *CreateDevEndpointInput { - s.RoleArn = &v - return s -} - -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *CreateDevEndpointInput) SetSecurityGroupIds(v []*string) *CreateDevEndpointInput { - s.SecurityGroupIds = v - return s -} - -// SetSubnetId sets the SubnetId field's value. -func (s *CreateDevEndpointInput) SetSubnetId(v string) *CreateDevEndpointInput { - s.SubnetId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateDevEndpointResponse -type CreateDevEndpointOutput struct { - _ struct{} `type:"structure"` - - // The AWS availability zone where this DevEndpoint is located. - AvailabilityZone *string `type:"string"` - - // The point in time at which this DevEndpoint was created. - CreatedTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The name assigned to the new DevEndpoint. - EndpointName *string `type:"string"` - - // Path to one or more Java Jars in an S3 bucket that will be loaded in your - // DevEndpoint. - ExtraJarsS3Path *string `type:"string"` - - // Path(s) to one or more Python libraries in an S3 bucket that will be loaded - // in your DevEndpoint. - ExtraPythonLibsS3Path *string `type:"string"` - - // The reason for a current failure in this DevEndpoint. - FailureReason *string `type:"string"` - - // The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint. - NumberOfNodes *int64 `type:"integer"` - - // The AWS ARN of the role assigned to the new DevEndpoint. - RoleArn *string `type:"string"` - - // The security groups assigned to the new DevEndpoint. - SecurityGroupIds []*string `type:"list"` - - // The current status of the new DevEndpoint. - Status *string `type:"string"` - - // The subnet ID assigned to the new DevEndpoint. - SubnetId *string `type:"string"` - - // The ID of the VPC used by this DevEndpoint. - VpcId *string `type:"string"` - - // The address of the YARN endpoint used by this DevEndpoint. - YarnEndpointAddress *string `type:"string"` - - // The Apache Zeppelin port for the remote Apache Spark interpreter. - ZeppelinRemoteSparkInterpreterPort *int64 `type:"integer"` -} - -// String returns the string representation -func (s CreateDevEndpointOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateDevEndpointOutput) GoString() string { - return s.String() -} - -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *CreateDevEndpointOutput) SetAvailabilityZone(v string) *CreateDevEndpointOutput { - s.AvailabilityZone = &v - return s -} - -// SetCreatedTimestamp sets the CreatedTimestamp field's value. -func (s *CreateDevEndpointOutput) SetCreatedTimestamp(v time.Time) *CreateDevEndpointOutput { - s.CreatedTimestamp = &v - return s -} - -// SetEndpointName sets the EndpointName field's value. -func (s *CreateDevEndpointOutput) SetEndpointName(v string) *CreateDevEndpointOutput { - s.EndpointName = &v - return s -} - -// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. -func (s *CreateDevEndpointOutput) SetExtraJarsS3Path(v string) *CreateDevEndpointOutput { - s.ExtraJarsS3Path = &v - return s -} - -// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. -func (s *CreateDevEndpointOutput) SetExtraPythonLibsS3Path(v string) *CreateDevEndpointOutput { - s.ExtraPythonLibsS3Path = &v - return s -} - -// SetFailureReason sets the FailureReason field's value. -func (s *CreateDevEndpointOutput) SetFailureReason(v string) *CreateDevEndpointOutput { - s.FailureReason = &v - return s -} - -// SetNumberOfNodes sets the NumberOfNodes field's value. -func (s *CreateDevEndpointOutput) SetNumberOfNodes(v int64) *CreateDevEndpointOutput { - s.NumberOfNodes = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *CreateDevEndpointOutput) SetRoleArn(v string) *CreateDevEndpointOutput { - s.RoleArn = &v - return s -} - -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *CreateDevEndpointOutput) SetSecurityGroupIds(v []*string) *CreateDevEndpointOutput { - s.SecurityGroupIds = v - return s -} - -// SetStatus sets the Status field's value. -func (s *CreateDevEndpointOutput) SetStatus(v string) *CreateDevEndpointOutput { - s.Status = &v - return s -} - -// SetSubnetId sets the SubnetId field's value. -func (s *CreateDevEndpointOutput) SetSubnetId(v string) *CreateDevEndpointOutput { - s.SubnetId = &v - return s -} - -// SetVpcId sets the VpcId field's value. -func (s *CreateDevEndpointOutput) SetVpcId(v string) *CreateDevEndpointOutput { - s.VpcId = &v - return s -} - -// SetYarnEndpointAddress sets the YarnEndpointAddress field's value. -func (s *CreateDevEndpointOutput) SetYarnEndpointAddress(v string) *CreateDevEndpointOutput { - s.YarnEndpointAddress = &v - return s -} - -// SetZeppelinRemoteSparkInterpreterPort sets the ZeppelinRemoteSparkInterpreterPort field's value. -func (s *CreateDevEndpointOutput) SetZeppelinRemoteSparkInterpreterPort(v int64) *CreateDevEndpointOutput { - s.ZeppelinRemoteSparkInterpreterPort = &v - return s -} - -// Specifies a grok classifier for CreateClassifier to create. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateGrokClassifierRequest -type CreateGrokClassifierRequest struct { - _ struct{} `type:"structure"` - - // An identifier of the data format that the classifier matches, such as Twitter, - // JSON, Omniture logs, Amazon CloudWatch Logs, and so on. - // - // Classification is a required field - Classification *string `type:"string" required:"true"` - - // Optional custom grok patterns used by this classifier. - CustomPatterns *string `type:"string"` - - // The grok pattern used by this classifier. - // - // GrokPattern is a required field - GrokPattern *string `min:"1" type:"string" required:"true"` - - // The name of the new classifier. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateGrokClassifierRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateGrokClassifierRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateGrokClassifierRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateGrokClassifierRequest"} - if s.Classification == nil { - invalidParams.Add(request.NewErrParamRequired("Classification")) - } - if s.GrokPattern == nil { - invalidParams.Add(request.NewErrParamRequired("GrokPattern")) - } - if s.GrokPattern != nil && len(*s.GrokPattern) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrokPattern", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClassification sets the Classification field's value. -func (s *CreateGrokClassifierRequest) SetClassification(v string) *CreateGrokClassifierRequest { - s.Classification = &v - return s -} - -// SetCustomPatterns sets the CustomPatterns field's value. -func (s *CreateGrokClassifierRequest) SetCustomPatterns(v string) *CreateGrokClassifierRequest { - s.CustomPatterns = &v - return s -} - -// SetGrokPattern sets the GrokPattern field's value. -func (s *CreateGrokClassifierRequest) SetGrokPattern(v string) *CreateGrokClassifierRequest { - s.GrokPattern = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateGrokClassifierRequest) SetName(v string) *CreateGrokClassifierRequest { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateJobRequest -type CreateJobInput struct { - _ struct{} `type:"structure"` - - // The number of capacity units allocated to this job. - AllocatedCapacity *int64 `type:"integer"` - - // The JobCommand that executes this job. - // - // Command is a required field - Command *JobCommand `type:"structure" required:"true"` - - // The connections used for this job. - Connections *ConnectionsList `type:"structure"` - - // The default parameters for this job. - DefaultArguments map[string]*string `type:"map"` - - // Description of the job. - Description *string `type:"string"` - - // An ExecutionProperty specifying the maximum number of concurrent runs allowed - // for this job. - ExecutionProperty *ExecutionProperty `type:"structure"` - - // This field is reserved for future use. - LogUri *string `type:"string"` - - // The maximum number of times to retry this job if it fails. - MaxRetries *int64 `type:"integer"` - - // The name you assign to this job. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The role associated with this job. - // - // Role is a required field - Role *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateJobInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateJobInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateJobInput"} - if s.Command == nil { - invalidParams.Add(request.NewErrParamRequired("Command")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Role == nil { - invalidParams.Add(request.NewErrParamRequired("Role")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAllocatedCapacity sets the AllocatedCapacity field's value. -func (s *CreateJobInput) SetAllocatedCapacity(v int64) *CreateJobInput { - s.AllocatedCapacity = &v - return s -} - -// SetCommand sets the Command field's value. -func (s *CreateJobInput) SetCommand(v *JobCommand) *CreateJobInput { - s.Command = v - return s -} - -// SetConnections sets the Connections field's value. -func (s *CreateJobInput) SetConnections(v *ConnectionsList) *CreateJobInput { - s.Connections = v - return s -} - -// SetDefaultArguments sets the DefaultArguments field's value. -func (s *CreateJobInput) SetDefaultArguments(v map[string]*string) *CreateJobInput { - s.DefaultArguments = v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateJobInput) SetDescription(v string) *CreateJobInput { - s.Description = &v - return s -} - -// SetExecutionProperty sets the ExecutionProperty field's value. -func (s *CreateJobInput) SetExecutionProperty(v *ExecutionProperty) *CreateJobInput { - s.ExecutionProperty = v - return s -} - -// SetLogUri sets the LogUri field's value. -func (s *CreateJobInput) SetLogUri(v string) *CreateJobInput { - s.LogUri = &v - return s -} - -// SetMaxRetries sets the MaxRetries field's value. -func (s *CreateJobInput) SetMaxRetries(v int64) *CreateJobInput { - s.MaxRetries = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateJobInput) SetName(v string) *CreateJobInput { - s.Name = &v - return s -} - -// SetRole sets the Role field's value. -func (s *CreateJobInput) SetRole(v string) *CreateJobInput { - s.Role = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateJobResponse -type CreateJobOutput struct { - _ struct{} `type:"structure"` - - // The unique name of the new job that has been created. - Name *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s CreateJobOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateJobOutput) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *CreateJobOutput) SetName(v string) *CreateJobOutput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreatePartitionRequest -type CreatePartitionInput struct { - _ struct{} `type:"structure"` - - // The ID of the catalog in which the partion is to be created. Currently, this - // should be the AWS account ID. - CatalogId *string `min:"1" type:"string"` - - // The name of the metadata database in which the partition is to be created. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // A PartitionInput structure defining the partition to be created. - // - // PartitionInput is a required field - PartitionInput *PartitionInput `type:"structure" required:"true"` - - // The name of the metadata table in which the partition is to be created. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreatePartitionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreatePartitionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreatePartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreatePartitionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.PartitionInput == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionInput")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.PartitionInput != nil { - if err := s.PartitionInput.Validate(); err != nil { - invalidParams.AddNested("PartitionInput", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *CreatePartitionInput) SetCatalogId(v string) *CreatePartitionInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *CreatePartitionInput) SetDatabaseName(v string) *CreatePartitionInput { - s.DatabaseName = &v - return s -} - -// SetPartitionInput sets the PartitionInput field's value. -func (s *CreatePartitionInput) SetPartitionInput(v *PartitionInput) *CreatePartitionInput { - s.PartitionInput = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *CreatePartitionInput) SetTableName(v string) *CreatePartitionInput { - s.TableName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreatePartitionResponse -type CreatePartitionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreatePartitionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreatePartitionOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateScriptRequest -type CreateScriptInput struct { - _ struct{} `type:"structure"` - - // A list of the edges in the DAG. - DagEdges []*CodeGenEdge `type:"list"` - - // A list of the nodes in the DAG. - DagNodes []*CodeGenNode `type:"list"` -} - -// String returns the string representation -func (s CreateScriptInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateScriptInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateScriptInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateScriptInput"} - if s.DagEdges != nil { - for i, v := range s.DagEdges { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DagEdges", i), err.(request.ErrInvalidParams)) - } - } - } - if s.DagNodes != nil { - for i, v := range s.DagNodes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DagNodes", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDagEdges sets the DagEdges field's value. -func (s *CreateScriptInput) SetDagEdges(v []*CodeGenEdge) *CreateScriptInput { - s.DagEdges = v - return s -} - -// SetDagNodes sets the DagNodes field's value. -func (s *CreateScriptInput) SetDagNodes(v []*CodeGenNode) *CreateScriptInput { - s.DagNodes = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateScriptResponse -type CreateScriptOutput struct { - _ struct{} `type:"structure"` - - // The Python script generated from the DAG. - PythonScript *string `type:"string"` -} - -// String returns the string representation -func (s CreateScriptOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateScriptOutput) GoString() string { - return s.String() -} - -// SetPythonScript sets the PythonScript field's value. -func (s *CreateScriptOutput) SetPythonScript(v string) *CreateScriptOutput { - s.PythonScript = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateTableRequest -type CreateTableInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog in which to create the Table. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The catalog database in which to create the new table. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The TableInput object that defines the metadata table to create in the catalog. - // - // TableInput is a required field - TableInput *TableInput `type:"structure" required:"true"` -} - -// String returns the string representation -func (s CreateTableInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateTableInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateTableInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.TableInput == nil { - invalidParams.Add(request.NewErrParamRequired("TableInput")) - } - if s.TableInput != nil { - if err := s.TableInput.Validate(); err != nil { - invalidParams.AddNested("TableInput", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *CreateTableInput) SetCatalogId(v string) *CreateTableInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *CreateTableInput) SetDatabaseName(v string) *CreateTableInput { - s.DatabaseName = &v - return s -} - -// SetTableInput sets the TableInput field's value. -func (s *CreateTableInput) SetTableInput(v *TableInput) *CreateTableInput { - s.TableInput = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateTableResponse -type CreateTableOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateTableOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateTableOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateTriggerRequest -type CreateTriggerInput struct { - _ struct{} `type:"structure"` - - // The actions initiated by this trigger when it fires. - // - // Actions is a required field - Actions []*Action `type:"list" required:"true"` - - // A description of the new trigger. - Description *string `type:"string"` - - // The name to assign to the new trigger. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // A predicate to specify when the new trigger should fire. - Predicate *Predicate `type:"structure"` - - // A cron expression used to specify the schedule (see Time-Based Schedules - // for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, you would specify: - // cron(15 12 * * ? *). - Schedule *string `type:"string"` - - // The type of the new trigger. - // - // Type is a required field - Type *string `type:"string" required:"true" enum:"TriggerType"` -} - -// String returns the string representation -func (s CreateTriggerInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateTriggerInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateTriggerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateTriggerInput"} - if s.Actions == nil { - invalidParams.Add(request.NewErrParamRequired("Actions")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - if s.Actions != nil { - for i, v := range s.Actions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Predicate != nil { - if err := s.Predicate.Validate(); err != nil { - invalidParams.AddNested("Predicate", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetActions sets the Actions field's value. -func (s *CreateTriggerInput) SetActions(v []*Action) *CreateTriggerInput { - s.Actions = v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateTriggerInput) SetDescription(v string) *CreateTriggerInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateTriggerInput) SetName(v string) *CreateTriggerInput { - s.Name = &v - return s -} - -// SetPredicate sets the Predicate field's value. -func (s *CreateTriggerInput) SetPredicate(v *Predicate) *CreateTriggerInput { - s.Predicate = v - return s -} - -// SetSchedule sets the Schedule field's value. -func (s *CreateTriggerInput) SetSchedule(v string) *CreateTriggerInput { - s.Schedule = &v - return s -} - -// SetType sets the Type field's value. -func (s *CreateTriggerInput) SetType(v string) *CreateTriggerInput { - s.Type = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateTriggerResponse -type CreateTriggerOutput struct { - _ struct{} `type:"structure"` - - // The name assigned to the new trigger. - Name *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s CreateTriggerOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateTriggerOutput) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *CreateTriggerOutput) SetName(v string) *CreateTriggerOutput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateUserDefinedFunctionRequest -type CreateUserDefinedFunctionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog in which to create the function. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database in which to create the function. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // A FunctionInput object that defines the function to create in the Data Catalog. - // - // FunctionInput is a required field - FunctionInput *UserDefinedFunctionInput `type:"structure" required:"true"` -} - -// String returns the string representation -func (s CreateUserDefinedFunctionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateUserDefinedFunctionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateUserDefinedFunctionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateUserDefinedFunctionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.FunctionInput == nil { - invalidParams.Add(request.NewErrParamRequired("FunctionInput")) - } - if s.FunctionInput != nil { - if err := s.FunctionInput.Validate(); err != nil { - invalidParams.AddNested("FunctionInput", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *CreateUserDefinedFunctionInput) SetCatalogId(v string) *CreateUserDefinedFunctionInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *CreateUserDefinedFunctionInput) SetDatabaseName(v string) *CreateUserDefinedFunctionInput { - s.DatabaseName = &v - return s -} - -// SetFunctionInput sets the FunctionInput field's value. -func (s *CreateUserDefinedFunctionInput) SetFunctionInput(v *UserDefinedFunctionInput) *CreateUserDefinedFunctionInput { - s.FunctionInput = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateUserDefinedFunctionResponse -type CreateUserDefinedFunctionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateUserDefinedFunctionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateUserDefinedFunctionOutput) GoString() string { - return s.String() -} - -// Specifies an XML classifier for CreateClassifier to create. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateXMLClassifierRequest -type CreateXMLClassifierRequest struct { - _ struct{} `type:"structure"` - - // An identifier of the data format that the classifier matches. - // - // Classification is a required field - Classification *string `type:"string" required:"true"` - - // The name of the classifier. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The XML tag designating the element that contains each record in an XML document - // being parsed. Note that this cannot be an empty element. It must contain - // child elements representing fields in the record. - RowTag *string `type:"string"` -} - -// String returns the string representation -func (s CreateXMLClassifierRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateXMLClassifierRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateXMLClassifierRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateXMLClassifierRequest"} - if s.Classification == nil { - invalidParams.Add(request.NewErrParamRequired("Classification")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClassification sets the Classification field's value. -func (s *CreateXMLClassifierRequest) SetClassification(v string) *CreateXMLClassifierRequest { - s.Classification = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateXMLClassifierRequest) SetName(v string) *CreateXMLClassifierRequest { - s.Name = &v - return s -} - -// SetRowTag sets the RowTag field's value. -func (s *CreateXMLClassifierRequest) SetRowTag(v string) *CreateXMLClassifierRequest { - s.RowTag = &v - return s -} - -// The Database object represents a logical grouping of tables that may reside -// in a Hive metastore or an RDBMS. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Database -type Database struct { - _ struct{} `type:"structure"` - - // The time at which the metadata database was created in the catalog. - CreateTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Description of the database. - Description *string `type:"string"` - - // The location of the database (for example, an HDFS path). - LocationUri *string `min:"1" type:"string"` - - // Name of the database. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // A list of key-value pairs that define parameters and properties of the database. - Parameters map[string]*string `type:"map"` -} - -// String returns the string representation -func (s Database) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Database) GoString() string { - return s.String() -} - -// SetCreateTime sets the CreateTime field's value. -func (s *Database) SetCreateTime(v time.Time) *Database { - s.CreateTime = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *Database) SetDescription(v string) *Database { - s.Description = &v - return s -} - -// SetLocationUri sets the LocationUri field's value. -func (s *Database) SetLocationUri(v string) *Database { - s.LocationUri = &v - return s -} - -// SetName sets the Name field's value. -func (s *Database) SetName(v string) *Database { - s.Name = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *Database) SetParameters(v map[string]*string) *Database { - s.Parameters = v - return s -} - -// The structure used to create or updata a database. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DatabaseInput -type DatabaseInput struct { - _ struct{} `type:"structure"` - - // Description of the database - Description *string `type:"string"` - - // The location of the database (for example, an HDFS path). - LocationUri *string `min:"1" type:"string"` - - // Name of the database. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // A list of key-value pairs that define parameters and properties of the database. - Parameters map[string]*string `type:"map"` -} - -// String returns the string representation -func (s DatabaseInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DatabaseInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DatabaseInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DatabaseInput"} - if s.LocationUri != nil && len(*s.LocationUri) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LocationUri", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *DatabaseInput) SetDescription(v string) *DatabaseInput { - s.Description = &v - return s -} - -// SetLocationUri sets the LocationUri field's value. -func (s *DatabaseInput) SetLocationUri(v string) *DatabaseInput { - s.LocationUri = &v - return s -} - -// SetName sets the Name field's value. -func (s *DatabaseInput) SetName(v string) *DatabaseInput { - s.Name = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *DatabaseInput) SetParameters(v map[string]*string) *DatabaseInput { - s.Parameters = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteClassifierRequest -type DeleteClassifierInput struct { - _ struct{} `type:"structure"` - - // Name of the classifier to remove. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteClassifierInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteClassifierInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteClassifierInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteClassifierInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteClassifierInput) SetName(v string) *DeleteClassifierInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteClassifierResponse -type DeleteClassifierOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteClassifierOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteClassifierOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteConnectionRequest -type DeleteConnectionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog in which the connection resides. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the connection to delete. - // - // ConnectionName is a required field - ConnectionName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteConnectionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteConnectionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteConnectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteConnectionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.ConnectionName == nil { - invalidParams.Add(request.NewErrParamRequired("ConnectionName")) - } - if s.ConnectionName != nil && len(*s.ConnectionName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConnectionName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *DeleteConnectionInput) SetCatalogId(v string) *DeleteConnectionInput { - s.CatalogId = &v - return s -} - -// SetConnectionName sets the ConnectionName field's value. -func (s *DeleteConnectionInput) SetConnectionName(v string) *DeleteConnectionInput { - s.ConnectionName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteConnectionResponse -type DeleteConnectionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteConnectionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteConnectionOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteCrawlerRequest -type DeleteCrawlerInput struct { - _ struct{} `type:"structure"` - - // Name of the crawler to remove. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteCrawlerInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteCrawlerInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteCrawlerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteCrawlerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteCrawlerInput) SetName(v string) *DeleteCrawlerInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteCrawlerResponse -type DeleteCrawlerOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteCrawlerOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteCrawlerOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteDatabaseRequest -type DeleteDatabaseInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog in which the database resides. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the Database to delete. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteDatabaseInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteDatabaseInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDatabaseInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDatabaseInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *DeleteDatabaseInput) SetCatalogId(v string) *DeleteDatabaseInput { - s.CatalogId = &v - return s -} - -// SetName sets the Name field's value. -func (s *DeleteDatabaseInput) SetName(v string) *DeleteDatabaseInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteDatabaseResponse -type DeleteDatabaseOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteDatabaseOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteDatabaseOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteDevEndpointRequest -type DeleteDevEndpointInput struct { - _ struct{} `type:"structure"` - - // The name of the DevEndpoint. - // - // EndpointName is a required field - EndpointName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteDevEndpointInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteDevEndpointInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDevEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDevEndpointInput"} - if s.EndpointName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndpointName sets the EndpointName field's value. -func (s *DeleteDevEndpointInput) SetEndpointName(v string) *DeleteDevEndpointInput { - s.EndpointName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteDevEndpointResponse -type DeleteDevEndpointOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteDevEndpointOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteDevEndpointOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteJobRequest -type DeleteJobInput struct { - _ struct{} `type:"structure"` - - // The name of the job to delete. - // - // JobName is a required field - JobName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteJobInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteJobInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteJobInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) - } - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetJobName sets the JobName field's value. -func (s *DeleteJobInput) SetJobName(v string) *DeleteJobInput { - s.JobName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteJobResponse -type DeleteJobOutput struct { - _ struct{} `type:"structure"` - - // The name of the job that was deleted. - JobName *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DeleteJobOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteJobOutput) GoString() string { - return s.String() -} - -// SetJobName sets the JobName field's value. -func (s *DeleteJobOutput) SetJobName(v string) *DeleteJobOutput { - s.JobName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeletePartitionRequest -type DeletePartitionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the partition to be deleted resides. If - // none is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database in which the table in question resides. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The values that define the partition. - // - // PartitionValues is a required field - PartitionValues []*string `type:"list" required:"true"` - - // The name of the table where the partition to be deleted is located. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeletePartitionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeletePartitionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeletePartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeletePartitionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.PartitionValues == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionValues")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *DeletePartitionInput) SetCatalogId(v string) *DeletePartitionInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *DeletePartitionInput) SetDatabaseName(v string) *DeletePartitionInput { - s.DatabaseName = &v - return s -} - -// SetPartitionValues sets the PartitionValues field's value. -func (s *DeletePartitionInput) SetPartitionValues(v []*string) *DeletePartitionInput { - s.PartitionValues = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *DeletePartitionInput) SetTableName(v string) *DeletePartitionInput { - s.TableName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeletePartitionResponse -type DeletePartitionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeletePartitionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeletePartitionOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteTableRequest -type DeleteTableInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the table resides. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database in which the table resides. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The name of the table to be deleted. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteTableInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteTableInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteTableInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *DeleteTableInput) SetCatalogId(v string) *DeleteTableInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *DeleteTableInput) SetDatabaseName(v string) *DeleteTableInput { - s.DatabaseName = &v - return s -} - -// SetName sets the Name field's value. -func (s *DeleteTableInput) SetName(v string) *DeleteTableInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteTableResponse -type DeleteTableOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteTableOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteTableOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteTriggerRequest -type DeleteTriggerInput struct { - _ struct{} `type:"structure"` - - // The name of the trigger to delete. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteTriggerInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteTriggerInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteTriggerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteTriggerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteTriggerInput) SetName(v string) *DeleteTriggerInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteTriggerResponse -type DeleteTriggerOutput struct { - _ struct{} `type:"structure"` - - // The name of the trigger that was deleted. - Name *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DeleteTriggerOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteTriggerOutput) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *DeleteTriggerOutput) SetName(v string) *DeleteTriggerOutput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteUserDefinedFunctionRequest -type DeleteUserDefinedFunctionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the function to be deleted is located. If - // none is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database where the function is located. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The name of the function definition to be deleted. - // - // FunctionName is a required field - FunctionName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteUserDefinedFunctionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteUserDefinedFunctionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteUserDefinedFunctionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteUserDefinedFunctionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.FunctionName == nil { - invalidParams.Add(request.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *DeleteUserDefinedFunctionInput) SetCatalogId(v string) *DeleteUserDefinedFunctionInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *DeleteUserDefinedFunctionInput) SetDatabaseName(v string) *DeleteUserDefinedFunctionInput { - s.DatabaseName = &v - return s -} - -// SetFunctionName sets the FunctionName field's value. -func (s *DeleteUserDefinedFunctionInput) SetFunctionName(v string) *DeleteUserDefinedFunctionInput { - s.FunctionName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteUserDefinedFunctionResponse -type DeleteUserDefinedFunctionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteUserDefinedFunctionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteUserDefinedFunctionOutput) GoString() string { - return s.String() -} - -// A development endpoint where a developer can remotely debug ETL scripts. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DevEndpoint -type DevEndpoint struct { - _ struct{} `type:"structure"` - - // The AWS availability zone where this DevEndpoint is located. - AvailabilityZone *string `type:"string"` - - // The point in time at which this DevEndpoint was created. - CreatedTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The name of the DevEndpoint. - EndpointName *string `type:"string"` - - // Path to one or more Java Jars in an S3 bucket that should be loaded in your - // DevEndpoint. - // - // Please note that only pure Java/Scala libraries can currently be used on - // a DevEndpoint. - ExtraJarsS3Path *string `type:"string"` - - // Path(s) to one or more Python libraries in an S3 bucket that should be loaded - // in your DevEndpoint. Multiple values must be complete paths separated by - // a comma. - // - // Please note that only pure Python libraries can currently be used on a DevEndpoint. - // Libraries that rely on C extensions, such as the pandas (http://pandas.pydata.org/) - // Python data analysis library, are not yet supported. - ExtraPythonLibsS3Path *string `type:"string"` - - // The reason for a current failure in this DevEndpoint. - FailureReason *string `type:"string"` - - // The point in time at which this DevEndpoint was last modified. - LastModifiedTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The status of the last update. - LastUpdateStatus *string `type:"string"` - - // The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint. - NumberOfNodes *int64 `type:"integer"` - - // The public address used by this DevEndpoint. - PublicAddress *string `type:"string"` - - // The public key to be used by this DevEndpoint for authentication. - PublicKey *string `type:"string"` - - // The AWS ARN of the IAM role used in this DevEndpoint. - RoleArn *string `type:"string"` - - // A list of security group identifiers used in this DevEndpoint. - SecurityGroupIds []*string `type:"list"` - - // The current status of this DevEndpoint. - Status *string `type:"string"` - - // The subnet ID for this DevEndpoint. - SubnetId *string `type:"string"` - - // The ID of the virtual private cloud (VPC) used by this DevEndpoint. - VpcId *string `type:"string"` - - // The YARN endpoint address used by this DevEndpoint. - YarnEndpointAddress *string `type:"string"` - - // The Apache Zeppelin port for the remote Apache Spark interpreter. - ZeppelinRemoteSparkInterpreterPort *int64 `type:"integer"` -} - -// String returns the string representation -func (s DevEndpoint) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DevEndpoint) GoString() string { - return s.String() -} - -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *DevEndpoint) SetAvailabilityZone(v string) *DevEndpoint { - s.AvailabilityZone = &v - return s -} - -// SetCreatedTimestamp sets the CreatedTimestamp field's value. -func (s *DevEndpoint) SetCreatedTimestamp(v time.Time) *DevEndpoint { - s.CreatedTimestamp = &v - return s -} - -// SetEndpointName sets the EndpointName field's value. -func (s *DevEndpoint) SetEndpointName(v string) *DevEndpoint { - s.EndpointName = &v - return s -} - -// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. -func (s *DevEndpoint) SetExtraJarsS3Path(v string) *DevEndpoint { - s.ExtraJarsS3Path = &v - return s -} - -// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. -func (s *DevEndpoint) SetExtraPythonLibsS3Path(v string) *DevEndpoint { - s.ExtraPythonLibsS3Path = &v - return s -} - -// SetFailureReason sets the FailureReason field's value. -func (s *DevEndpoint) SetFailureReason(v string) *DevEndpoint { - s.FailureReason = &v - return s -} - -// SetLastModifiedTimestamp sets the LastModifiedTimestamp field's value. -func (s *DevEndpoint) SetLastModifiedTimestamp(v time.Time) *DevEndpoint { - s.LastModifiedTimestamp = &v - return s -} - -// SetLastUpdateStatus sets the LastUpdateStatus field's value. -func (s *DevEndpoint) SetLastUpdateStatus(v string) *DevEndpoint { - s.LastUpdateStatus = &v - return s -} - -// SetNumberOfNodes sets the NumberOfNodes field's value. -func (s *DevEndpoint) SetNumberOfNodes(v int64) *DevEndpoint { - s.NumberOfNodes = &v - return s -} - -// SetPublicAddress sets the PublicAddress field's value. -func (s *DevEndpoint) SetPublicAddress(v string) *DevEndpoint { - s.PublicAddress = &v - return s -} - -// SetPublicKey sets the PublicKey field's value. -func (s *DevEndpoint) SetPublicKey(v string) *DevEndpoint { - s.PublicKey = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *DevEndpoint) SetRoleArn(v string) *DevEndpoint { - s.RoleArn = &v - return s -} - -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *DevEndpoint) SetSecurityGroupIds(v []*string) *DevEndpoint { - s.SecurityGroupIds = v - return s -} - -// SetStatus sets the Status field's value. -func (s *DevEndpoint) SetStatus(v string) *DevEndpoint { - s.Status = &v - return s -} - -// SetSubnetId sets the SubnetId field's value. -func (s *DevEndpoint) SetSubnetId(v string) *DevEndpoint { - s.SubnetId = &v - return s -} - -// SetVpcId sets the VpcId field's value. -func (s *DevEndpoint) SetVpcId(v string) *DevEndpoint { - s.VpcId = &v - return s -} - -// SetYarnEndpointAddress sets the YarnEndpointAddress field's value. -func (s *DevEndpoint) SetYarnEndpointAddress(v string) *DevEndpoint { - s.YarnEndpointAddress = &v - return s -} - -// SetZeppelinRemoteSparkInterpreterPort sets the ZeppelinRemoteSparkInterpreterPort field's value. -func (s *DevEndpoint) SetZeppelinRemoteSparkInterpreterPort(v int64) *DevEndpoint { - s.ZeppelinRemoteSparkInterpreterPort = &v - return s -} - -// Custom libraries to be loaded into a DevEndpoint. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DevEndpointCustomLibraries -type DevEndpointCustomLibraries struct { - _ struct{} `type:"structure"` - - // Path to one or more Java Jars in an S3 bucket that should be loaded in your - // DevEndpoint. - // - // Please note that only pure Java/Scala libraries can currently be used on - // a DevEndpoint. - ExtraJarsS3Path *string `type:"string"` - - // Path(s) to one or more Python libraries in an S3 bucket that should be loaded - // in your DevEndpoint. Multiple values must be complete paths separated by - // a comma. - // - // Please note that only pure Python libraries can currently be used on a DevEndpoint. - // Libraries that rely on C extensions, such as the pandas (http://pandas.pydata.org/) - // Python data analysis library, are not yet supported. - ExtraPythonLibsS3Path *string `type:"string"` -} - -// String returns the string representation -func (s DevEndpointCustomLibraries) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DevEndpointCustomLibraries) GoString() string { - return s.String() -} - -// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. -func (s *DevEndpointCustomLibraries) SetExtraJarsS3Path(v string) *DevEndpointCustomLibraries { - s.ExtraJarsS3Path = &v - return s -} - -// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. -func (s *DevEndpointCustomLibraries) SetExtraPythonLibsS3Path(v string) *DevEndpointCustomLibraries { - s.ExtraPythonLibsS3Path = &v - return s -} - -// Contains details about an error. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ErrorDetail -type ErrorDetail struct { - _ struct{} `type:"structure"` - - // The code associated with this error. - ErrorCode *string `min:"1" type:"string"` - - // A message describing the error. - ErrorMessage *string `type:"string"` -} - -// String returns the string representation -func (s ErrorDetail) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ErrorDetail) GoString() string { - return s.String() -} - -// SetErrorCode sets the ErrorCode field's value. -func (s *ErrorDetail) SetErrorCode(v string) *ErrorDetail { - s.ErrorCode = &v - return s -} - -// SetErrorMessage sets the ErrorMessage field's value. -func (s *ErrorDetail) SetErrorMessage(v string) *ErrorDetail { - s.ErrorMessage = &v - return s -} - -// An execution property of a job. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ExecutionProperty -type ExecutionProperty struct { - _ struct{} `type:"structure"` - - // The maximum number of concurrent runs allowed for a job. - MaxConcurrentRuns *int64 `type:"integer"` -} - -// String returns the string representation -func (s ExecutionProperty) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ExecutionProperty) GoString() string { - return s.String() -} - -// SetMaxConcurrentRuns sets the MaxConcurrentRuns field's value. -func (s *ExecutionProperty) SetMaxConcurrentRuns(v int64) *ExecutionProperty { - s.MaxConcurrentRuns = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCatalogImportStatusRequest -type GetCatalogImportStatusInput struct { - _ struct{} `type:"structure"` - - // The ID of the catalog to migrate. Currently, this should be the AWS account - // ID. - CatalogId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s GetCatalogImportStatusInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCatalogImportStatusInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetCatalogImportStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCatalogImportStatusInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *GetCatalogImportStatusInput) SetCatalogId(v string) *GetCatalogImportStatusInput { - s.CatalogId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCatalogImportStatusResponse -type GetCatalogImportStatusOutput struct { - _ struct{} `type:"structure"` - - // The status of the specified catalog migration. - ImportStatus *CatalogImportStatus `type:"structure"` -} - -// String returns the string representation -func (s GetCatalogImportStatusOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCatalogImportStatusOutput) GoString() string { - return s.String() -} - -// SetImportStatus sets the ImportStatus field's value. -func (s *GetCatalogImportStatusOutput) SetImportStatus(v *CatalogImportStatus) *GetCatalogImportStatusOutput { - s.ImportStatus = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetClassifierRequest -type GetClassifierInput struct { - _ struct{} `type:"structure"` - - // Name of the classifier to retrieve. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetClassifierInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetClassifierInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetClassifierInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetClassifierInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *GetClassifierInput) SetName(v string) *GetClassifierInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetClassifierResponse -type GetClassifierOutput struct { - _ struct{} `type:"structure"` - - // The requested classifier. - Classifier *Classifier `type:"structure"` -} - -// String returns the string representation -func (s GetClassifierOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetClassifierOutput) GoString() string { - return s.String() -} - -// SetClassifier sets the Classifier field's value. -func (s *GetClassifierOutput) SetClassifier(v *Classifier) *GetClassifierOutput { - s.Classifier = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetClassifiersRequest -type GetClassifiersInput struct { - _ struct{} `type:"structure"` - - // Size of the list to return (optional). - MaxResults *int64 `min:"1" type:"integer"` - - // An optional continuation token. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetClassifiersInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetClassifiersInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetClassifiersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetClassifiersInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetClassifiersInput) SetMaxResults(v int64) *GetClassifiersInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetClassifiersInput) SetNextToken(v string) *GetClassifiersInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetClassifiersResponse -type GetClassifiersOutput struct { - _ struct{} `type:"structure"` - - // The requested list of classifier objects. - Classifiers []*Classifier `type:"list"` - - // A continuation token. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetClassifiersOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetClassifiersOutput) GoString() string { - return s.String() -} - -// SetClassifiers sets the Classifiers field's value. -func (s *GetClassifiersOutput) SetClassifiers(v []*Classifier) *GetClassifiersOutput { - s.Classifiers = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetClassifiersOutput) SetNextToken(v string) *GetClassifiersOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetConnectionRequest -type GetConnectionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog in which the connection resides. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the connection definition to retrieve. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetConnectionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetConnectionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetConnectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetConnectionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *GetConnectionInput) SetCatalogId(v string) *GetConnectionInput { - s.CatalogId = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetConnectionInput) SetName(v string) *GetConnectionInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetConnectionResponse -type GetConnectionOutput struct { - _ struct{} `type:"structure"` - - // The requested connection definition. - Connection *Connection `type:"structure"` -} - -// String returns the string representation -func (s GetConnectionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetConnectionOutput) GoString() string { - return s.String() -} - -// SetConnection sets the Connection field's value. -func (s *GetConnectionOutput) SetConnection(v *Connection) *GetConnectionOutput { - s.Connection = v - return s -} - -// Filters the connection definitions returned by the GetConnections API. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetConnectionsFilter -type GetConnectionsFilter struct { - _ struct{} `type:"structure"` - - // The type of connections to return. Currently, only JDBC is supported; SFTP - // is not supported. - ConnectionType *string `type:"string" enum:"ConnectionType"` - - // A criteria string that must match the criteria recorded in the connection - // definition for that connection definition to be returned. - MatchCriteria []*string `type:"list"` -} - -// String returns the string representation -func (s GetConnectionsFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetConnectionsFilter) GoString() string { - return s.String() -} - -// SetConnectionType sets the ConnectionType field's value. -func (s *GetConnectionsFilter) SetConnectionType(v string) *GetConnectionsFilter { - s.ConnectionType = &v - return s -} - -// SetMatchCriteria sets the MatchCriteria field's value. -func (s *GetConnectionsFilter) SetMatchCriteria(v []*string) *GetConnectionsFilter { - s.MatchCriteria = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetConnectionsRequest -type GetConnectionsInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog in which the connections reside. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // A filter that controls which connections will be returned. - Filter *GetConnectionsFilter `type:"structure"` - - // The maximum number of connections to return in one response. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, if this is a continuation call. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetConnectionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetConnectionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetConnectionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetConnectionsInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *GetConnectionsInput) SetCatalogId(v string) *GetConnectionsInput { - s.CatalogId = &v - return s -} - -// SetFilter sets the Filter field's value. -func (s *GetConnectionsInput) SetFilter(v *GetConnectionsFilter) *GetConnectionsInput { - s.Filter = v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetConnectionsInput) SetMaxResults(v int64) *GetConnectionsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetConnectionsInput) SetNextToken(v string) *GetConnectionsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetConnectionsResponse -type GetConnectionsOutput struct { - _ struct{} `type:"structure"` - - // A list of requested connection definitions. - ConnectionList []*Connection `type:"list"` - - // A continuation token, if the list of connections returned does not include - // the last of the filtered connections. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetConnectionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetConnectionsOutput) GoString() string { - return s.String() -} - -// SetConnectionList sets the ConnectionList field's value. -func (s *GetConnectionsOutput) SetConnectionList(v []*Connection) *GetConnectionsOutput { - s.ConnectionList = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetConnectionsOutput) SetNextToken(v string) *GetConnectionsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCrawlerRequest -type GetCrawlerInput struct { - _ struct{} `type:"structure"` - - // Name of the crawler to retrieve metadata for. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetCrawlerInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCrawlerInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetCrawlerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCrawlerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *GetCrawlerInput) SetName(v string) *GetCrawlerInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCrawlerMetricsRequest -type GetCrawlerMetricsInput struct { - _ struct{} `type:"structure"` - - // A list of the names of crawlers about which to retrieve metrics. - CrawlerNameList []*string `type:"list"` - - // The maximum size of a list to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, if this is a continuation call. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetCrawlerMetricsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCrawlerMetricsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetCrawlerMetricsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCrawlerMetricsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCrawlerNameList sets the CrawlerNameList field's value. -func (s *GetCrawlerMetricsInput) SetCrawlerNameList(v []*string) *GetCrawlerMetricsInput { - s.CrawlerNameList = v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetCrawlerMetricsInput) SetMaxResults(v int64) *GetCrawlerMetricsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetCrawlerMetricsInput) SetNextToken(v string) *GetCrawlerMetricsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCrawlerMetricsResponse -type GetCrawlerMetricsOutput struct { - _ struct{} `type:"structure"` - - // A list of metrics for the specified crawler. - CrawlerMetricsList []*CrawlerMetrics `type:"list"` - - // A continuation token, if the returned list does not contain the last metric - // available. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetCrawlerMetricsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCrawlerMetricsOutput) GoString() string { - return s.String() -} - -// SetCrawlerMetricsList sets the CrawlerMetricsList field's value. -func (s *GetCrawlerMetricsOutput) SetCrawlerMetricsList(v []*CrawlerMetrics) *GetCrawlerMetricsOutput { - s.CrawlerMetricsList = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetCrawlerMetricsOutput) SetNextToken(v string) *GetCrawlerMetricsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCrawlerResponse -type GetCrawlerOutput struct { - _ struct{} `type:"structure"` - - // The metadata for the specified crawler. - Crawler *Crawler `type:"structure"` -} - -// String returns the string representation -func (s GetCrawlerOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCrawlerOutput) GoString() string { - return s.String() -} - -// SetCrawler sets the Crawler field's value. -func (s *GetCrawlerOutput) SetCrawler(v *Crawler) *GetCrawlerOutput { - s.Crawler = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCrawlersRequest -type GetCrawlersInput struct { - _ struct{} `type:"structure"` - - // The number of crawlers to return on each call. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, if this is a continuation request. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetCrawlersInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCrawlersInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetCrawlersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCrawlersInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetCrawlersInput) SetMaxResults(v int64) *GetCrawlersInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetCrawlersInput) SetNextToken(v string) *GetCrawlersInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetCrawlersResponse -type GetCrawlersOutput struct { - _ struct{} `type:"structure"` - - // A list of crawler metadata. - Crawlers []*Crawler `type:"list"` - - // A continuation token, if the returned list has not reached the end of those - // defined in this customer account. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetCrawlersOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCrawlersOutput) GoString() string { - return s.String() -} - -// SetCrawlers sets the Crawlers field's value. -func (s *GetCrawlersOutput) SetCrawlers(v []*Crawler) *GetCrawlersOutput { - s.Crawlers = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetCrawlersOutput) SetNextToken(v string) *GetCrawlersOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDatabaseRequest -type GetDatabaseInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog in which the database resides. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the database to retrieve. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetDatabaseInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetDatabaseInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetDatabaseInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDatabaseInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *GetDatabaseInput) SetCatalogId(v string) *GetDatabaseInput { - s.CatalogId = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetDatabaseInput) SetName(v string) *GetDatabaseInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDatabaseResponse -type GetDatabaseOutput struct { - _ struct{} `type:"structure"` - - // The definition of the specified database in the catalog. - Database *Database `type:"structure"` -} - -// String returns the string representation -func (s GetDatabaseOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetDatabaseOutput) GoString() string { - return s.String() -} - -// SetDatabase sets the Database field's value. -func (s *GetDatabaseOutput) SetDatabase(v *Database) *GetDatabaseOutput { - s.Database = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDatabasesRequest -type GetDatabasesInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog from which to retrieve Databases. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The maximum number of databases to return in one response. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, if this is a continuation call. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetDatabasesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetDatabasesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetDatabasesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDatabasesInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *GetDatabasesInput) SetCatalogId(v string) *GetDatabasesInput { - s.CatalogId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetDatabasesInput) SetMaxResults(v int64) *GetDatabasesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetDatabasesInput) SetNextToken(v string) *GetDatabasesInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDatabasesResponse -type GetDatabasesOutput struct { - _ struct{} `type:"structure"` - - // A list of Database objects from the specified catalog. - // - // DatabaseList is a required field - DatabaseList []*Database `type:"list" required:"true"` - - // A continuation token for paginating the returned list of tokens, returned - // if the current segment of the list is not the last. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetDatabasesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetDatabasesOutput) GoString() string { - return s.String() -} - -// SetDatabaseList sets the DatabaseList field's value. -func (s *GetDatabasesOutput) SetDatabaseList(v []*Database) *GetDatabasesOutput { - s.DatabaseList = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetDatabasesOutput) SetNextToken(v string) *GetDatabasesOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDataflowGraphRequest -type GetDataflowGraphInput struct { - _ struct{} `type:"structure"` - - // The Python script to transform. - PythonScript *string `type:"string"` -} - -// String returns the string representation -func (s GetDataflowGraphInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetDataflowGraphInput) GoString() string { - return s.String() -} - -// SetPythonScript sets the PythonScript field's value. -func (s *GetDataflowGraphInput) SetPythonScript(v string) *GetDataflowGraphInput { - s.PythonScript = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDataflowGraphResponse -type GetDataflowGraphOutput struct { - _ struct{} `type:"structure"` - - // A list of the edges in the resulting DAG. - DagEdges []*CodeGenEdge `type:"list"` - - // A list of the nodes in the resulting DAG. - DagNodes []*CodeGenNode `type:"list"` -} - -// String returns the string representation -func (s GetDataflowGraphOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetDataflowGraphOutput) GoString() string { - return s.String() -} - -// SetDagEdges sets the DagEdges field's value. -func (s *GetDataflowGraphOutput) SetDagEdges(v []*CodeGenEdge) *GetDataflowGraphOutput { - s.DagEdges = v - return s -} - -// SetDagNodes sets the DagNodes field's value. -func (s *GetDataflowGraphOutput) SetDagNodes(v []*CodeGenNode) *GetDataflowGraphOutput { - s.DagNodes = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDevEndpointRequest -type GetDevEndpointInput struct { - _ struct{} `type:"structure"` - - // Name of the DevEndpoint for which to retrieve information. - // - // EndpointName is a required field - EndpointName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s GetDevEndpointInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetDevEndpointInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetDevEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDevEndpointInput"} - if s.EndpointName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndpointName sets the EndpointName field's value. -func (s *GetDevEndpointInput) SetEndpointName(v string) *GetDevEndpointInput { - s.EndpointName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDevEndpointResponse -type GetDevEndpointOutput struct { - _ struct{} `type:"structure"` - - // A DevEndpoint definition. - DevEndpoint *DevEndpoint `type:"structure"` -} - -// String returns the string representation -func (s GetDevEndpointOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetDevEndpointOutput) GoString() string { - return s.String() -} - -// SetDevEndpoint sets the DevEndpoint field's value. -func (s *GetDevEndpointOutput) SetDevEndpoint(v *DevEndpoint) *GetDevEndpointOutput { - s.DevEndpoint = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDevEndpointsRequest -type GetDevEndpointsInput struct { - _ struct{} `type:"structure"` - - // The maximum size of information to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, if this is a continuation call. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetDevEndpointsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetDevEndpointsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetDevEndpointsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDevEndpointsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetDevEndpointsInput) SetMaxResults(v int64) *GetDevEndpointsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetDevEndpointsInput) SetNextToken(v string) *GetDevEndpointsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDevEndpointsResponse -type GetDevEndpointsOutput struct { - _ struct{} `type:"structure"` - - // A list of DevEndpoint definitions. - DevEndpoints []*DevEndpoint `type:"list"` - - // A continuation token, if not all DevEndpoint definitions have yet been returned. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetDevEndpointsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetDevEndpointsOutput) GoString() string { - return s.String() -} - -// SetDevEndpoints sets the DevEndpoints field's value. -func (s *GetDevEndpointsOutput) SetDevEndpoints(v []*DevEndpoint) *GetDevEndpointsOutput { - s.DevEndpoints = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetDevEndpointsOutput) SetNextToken(v string) *GetDevEndpointsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobRequest -type GetJobInput struct { - _ struct{} `type:"structure"` - - // The name of the job to retrieve. - // - // JobName is a required field - JobName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetJobInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetJobInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetJobInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) - } - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetJobName sets the JobName field's value. -func (s *GetJobInput) SetJobName(v string) *GetJobInput { - s.JobName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobResponse -type GetJobOutput struct { - _ struct{} `type:"structure"` - - // The requested job definition. - Job *Job `type:"structure"` -} - -// String returns the string representation -func (s GetJobOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetJobOutput) GoString() string { - return s.String() -} - -// SetJob sets the Job field's value. -func (s *GetJobOutput) SetJob(v *Job) *GetJobOutput { - s.Job = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobRunRequest -type GetJobRunInput struct { - _ struct{} `type:"structure"` - - // Name of the job being run. - // - // JobName is a required field - JobName *string `min:"1" type:"string" required:"true"` - - // A list of the predecessor runs to return as well. - PredecessorsIncluded *bool `type:"boolean"` - - // The ID of the job run. - // - // RunId is a required field - RunId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetJobRunInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetJobRunInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetJobRunInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetJobRunInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) - } - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) - } - if s.RunId == nil { - invalidParams.Add(request.NewErrParamRequired("RunId")) - } - if s.RunId != nil && len(*s.RunId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RunId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetJobName sets the JobName field's value. -func (s *GetJobRunInput) SetJobName(v string) *GetJobRunInput { - s.JobName = &v - return s -} - -// SetPredecessorsIncluded sets the PredecessorsIncluded field's value. -func (s *GetJobRunInput) SetPredecessorsIncluded(v bool) *GetJobRunInput { - s.PredecessorsIncluded = &v - return s -} - -// SetRunId sets the RunId field's value. -func (s *GetJobRunInput) SetRunId(v string) *GetJobRunInput { - s.RunId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobRunResponse -type GetJobRunOutput struct { - _ struct{} `type:"structure"` - - // The requested job-run metadata. - JobRun *JobRun `type:"structure"` -} - -// String returns the string representation -func (s GetJobRunOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetJobRunOutput) GoString() string { - return s.String() -} - -// SetJobRun sets the JobRun field's value. -func (s *GetJobRunOutput) SetJobRun(v *JobRun) *GetJobRunOutput { - s.JobRun = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobRunsRequest -type GetJobRunsInput struct { - _ struct{} `type:"structure"` - - // The name of the job for which to retrieve all job runs. - // - // JobName is a required field - JobName *string `min:"1" type:"string" required:"true"` - - // The maximum size of the response. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, if this is a continuation call. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetJobRunsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetJobRunsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetJobRunsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetJobRunsInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) - } - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetJobName sets the JobName field's value. -func (s *GetJobRunsInput) SetJobName(v string) *GetJobRunsInput { - s.JobName = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetJobRunsInput) SetMaxResults(v int64) *GetJobRunsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetJobRunsInput) SetNextToken(v string) *GetJobRunsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobRunsResponse -type GetJobRunsOutput struct { - _ struct{} `type:"structure"` - - // A list of job-run metatdata objects. - JobRuns []*JobRun `type:"list"` - - // A continuation token, if not all reequested job runs have been returned. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetJobRunsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetJobRunsOutput) GoString() string { - return s.String() -} - -// SetJobRuns sets the JobRuns field's value. -func (s *GetJobRunsOutput) SetJobRuns(v []*JobRun) *GetJobRunsOutput { - s.JobRuns = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetJobRunsOutput) SetNextToken(v string) *GetJobRunsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobsRequest -type GetJobsInput struct { - _ struct{} `type:"structure"` - - // The maximum size of the response. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, if this is a continuation call. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetJobsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetJobsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetJobsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetJobsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetJobsInput) SetMaxResults(v int64) *GetJobsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetJobsInput) SetNextToken(v string) *GetJobsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobsResponse -type GetJobsOutput struct { - _ struct{} `type:"structure"` - - // A list of jobs. - Jobs []*Job `type:"list"` - - // A continuation token, if not all jobs have yet been returned. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetJobsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetJobsOutput) GoString() string { - return s.String() -} - -// SetJobs sets the Jobs field's value. -func (s *GetJobsOutput) SetJobs(v []*Job) *GetJobsOutput { - s.Jobs = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetJobsOutput) SetNextToken(v string) *GetJobsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMappingRequest -type GetMappingInput struct { - _ struct{} `type:"structure"` - - // Parameters for the mapping. - Location *Location `type:"structure"` - - // A list of target tables. - Sinks []*CatalogEntry `type:"list"` - - // Specifies the source table. - // - // Source is a required field - Source *CatalogEntry `type:"structure" required:"true"` -} - -// String returns the string representation -func (s GetMappingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetMappingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetMappingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetMappingInput"} - if s.Source == nil { - invalidParams.Add(request.NewErrParamRequired("Source")) - } - if s.Location != nil { - if err := s.Location.Validate(); err != nil { - invalidParams.AddNested("Location", err.(request.ErrInvalidParams)) - } - } - if s.Sinks != nil { - for i, v := range s.Sinks { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Sinks", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Source != nil { - if err := s.Source.Validate(); err != nil { - invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLocation sets the Location field's value. -func (s *GetMappingInput) SetLocation(v *Location) *GetMappingInput { - s.Location = v - return s -} - -// SetSinks sets the Sinks field's value. -func (s *GetMappingInput) SetSinks(v []*CatalogEntry) *GetMappingInput { - s.Sinks = v - return s -} - -// SetSource sets the Source field's value. -func (s *GetMappingInput) SetSource(v *CatalogEntry) *GetMappingInput { - s.Source = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMappingResponse -type GetMappingOutput struct { - _ struct{} `type:"structure"` - - // A list of mappings to the specified targets. - // - // Mapping is a required field - Mapping []*MappingEntry `type:"list" required:"true"` -} - -// String returns the string representation -func (s GetMappingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetMappingOutput) GoString() string { - return s.String() -} - -// SetMapping sets the Mapping field's value. -func (s *GetMappingOutput) SetMapping(v []*MappingEntry) *GetMappingOutput { - s.Mapping = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartitionRequest -type GetPartitionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the partition in question resides. If none - // is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database where the partition resides. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The values that define the partition. - // - // PartitionValues is a required field - PartitionValues []*string `type:"list" required:"true"` - - // The name of the partition's table. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetPartitionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPartitionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPartitionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.PartitionValues == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionValues")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *GetPartitionInput) SetCatalogId(v string) *GetPartitionInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetPartitionInput) SetDatabaseName(v string) *GetPartitionInput { - s.DatabaseName = &v - return s -} - -// SetPartitionValues sets the PartitionValues field's value. -func (s *GetPartitionInput) SetPartitionValues(v []*string) *GetPartitionInput { - s.PartitionValues = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *GetPartitionInput) SetTableName(v string) *GetPartitionInput { - s.TableName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartitionResponse -type GetPartitionOutput struct { - _ struct{} `type:"structure"` - - // The requested information, in the form of a Partition object. - Partition *Partition `type:"structure"` -} - -// String returns the string representation -func (s GetPartitionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPartitionOutput) GoString() string { - return s.String() -} - -// SetPartition sets the Partition field's value. -func (s *GetPartitionOutput) SetPartition(v *Partition) *GetPartitionOutput { - s.Partition = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartitionsRequest -type GetPartitionsInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the partitions in question reside. If none - // is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database where the partitions reside. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // An expression filtering the partitions to be returned. - Expression *string `type:"string"` - - // The maximum number of partitions to return in a single response. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, if this is not the first call to retrieve these partitions. - NextToken *string `type:"string"` - - // The segment of the table's partitions to scan in this request. - Segment *Segment `type:"structure"` - - // The name of the partitions' table. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetPartitionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPartitionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPartitionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPartitionsInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.Segment != nil { - if err := s.Segment.Validate(); err != nil { - invalidParams.AddNested("Segment", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *GetPartitionsInput) SetCatalogId(v string) *GetPartitionsInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetPartitionsInput) SetDatabaseName(v string) *GetPartitionsInput { - s.DatabaseName = &v - return s -} - -// SetExpression sets the Expression field's value. -func (s *GetPartitionsInput) SetExpression(v string) *GetPartitionsInput { - s.Expression = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetPartitionsInput) SetMaxResults(v int64) *GetPartitionsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetPartitionsInput) SetNextToken(v string) *GetPartitionsInput { - s.NextToken = &v - return s -} - -// SetSegment sets the Segment field's value. -func (s *GetPartitionsInput) SetSegment(v *Segment) *GetPartitionsInput { - s.Segment = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *GetPartitionsInput) SetTableName(v string) *GetPartitionsInput { - s.TableName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartitionsResponse -type GetPartitionsOutput struct { - _ struct{} `type:"structure"` - - // A continuation token, if the returned list of partitions does not does not - // include the last one. - NextToken *string `type:"string"` - - // A list of requested partitions. - Partitions []*Partition `type:"list"` -} - -// String returns the string representation -func (s GetPartitionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPartitionsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *GetPartitionsOutput) SetNextToken(v string) *GetPartitionsOutput { - s.NextToken = &v - return s -} - -// SetPartitions sets the Partitions field's value. -func (s *GetPartitionsOutput) SetPartitions(v []*Partition) *GetPartitionsOutput { - s.Partitions = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPlanRequest -type GetPlanInput struct { - _ struct{} `type:"structure"` - - // Parameters for the mapping. - Location *Location `type:"structure"` - - // The list of mappings from a source table to target tables. - // - // Mapping is a required field - Mapping []*MappingEntry `type:"list" required:"true"` - - // The target tables. - Sinks []*CatalogEntry `type:"list"` - - // The source table. - // - // Source is a required field - Source *CatalogEntry `type:"structure" required:"true"` -} - -// String returns the string representation -func (s GetPlanInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPlanInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPlanInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPlanInput"} - if s.Mapping == nil { - invalidParams.Add(request.NewErrParamRequired("Mapping")) - } - if s.Source == nil { - invalidParams.Add(request.NewErrParamRequired("Source")) - } - if s.Location != nil { - if err := s.Location.Validate(); err != nil { - invalidParams.AddNested("Location", err.(request.ErrInvalidParams)) - } - } - if s.Sinks != nil { - for i, v := range s.Sinks { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Sinks", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Source != nil { - if err := s.Source.Validate(); err != nil { - invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLocation sets the Location field's value. -func (s *GetPlanInput) SetLocation(v *Location) *GetPlanInput { - s.Location = v - return s -} - -// SetMapping sets the Mapping field's value. -func (s *GetPlanInput) SetMapping(v []*MappingEntry) *GetPlanInput { - s.Mapping = v - return s -} - -// SetSinks sets the Sinks field's value. -func (s *GetPlanInput) SetSinks(v []*CatalogEntry) *GetPlanInput { - s.Sinks = v - return s -} - -// SetSource sets the Source field's value. -func (s *GetPlanInput) SetSource(v *CatalogEntry) *GetPlanInput { - s.Source = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPlanResponse -type GetPlanOutput struct { - _ struct{} `type:"structure"` - - // A Python script to perform the mapping. - PythonScript *string `type:"string"` -} - -// String returns the string representation -func (s GetPlanOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPlanOutput) GoString() string { - return s.String() -} - -// SetPythonScript sets the PythonScript field's value. -func (s *GetPlanOutput) SetPythonScript(v string) *GetPlanOutput { - s.PythonScript = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableRequest -type GetTableInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the table resides. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the database in the catalog in which the table resides. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The name of the table for which to retrieve the definition. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetTableInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetTableInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTableInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *GetTableInput) SetCatalogId(v string) *GetTableInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetTableInput) SetDatabaseName(v string) *GetTableInput { - s.DatabaseName = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetTableInput) SetName(v string) *GetTableInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableResponse -type GetTableOutput struct { - _ struct{} `type:"structure"` - - // The Table object that defines the specified table. - Table *Table `type:"structure"` -} - -// String returns the string representation -func (s GetTableOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetTableOutput) GoString() string { - return s.String() -} - -// SetTable sets the Table field's value. -func (s *GetTableOutput) SetTable(v *Table) *GetTableOutput { - s.Table = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableVersionsRequest -type GetTableVersionsInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the tables reside. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The database in the catalog in which the table resides. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The maximum number of table versions to return in one response. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, if this is not the first call. - NextToken *string `type:"string"` - - // The name of the table. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetTableVersionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetTableVersionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetTableVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTableVersionsInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *GetTableVersionsInput) SetCatalogId(v string) *GetTableVersionsInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetTableVersionsInput) SetDatabaseName(v string) *GetTableVersionsInput { - s.DatabaseName = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetTableVersionsInput) SetMaxResults(v int64) *GetTableVersionsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetTableVersionsInput) SetNextToken(v string) *GetTableVersionsInput { - s.NextToken = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *GetTableVersionsInput) SetTableName(v string) *GetTableVersionsInput { - s.TableName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableVersionsResponse -type GetTableVersionsOutput struct { - _ struct{} `type:"structure"` - - // A continuation token, if the list of available versions does not include - // the last one. - NextToken *string `type:"string"` - - // A list of strings identifying available versions of the specified table. - TableVersions []*TableVersion `type:"list"` -} - -// String returns the string representation -func (s GetTableVersionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetTableVersionsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *GetTableVersionsOutput) SetNextToken(v string) *GetTableVersionsOutput { - s.NextToken = &v - return s -} - -// SetTableVersions sets the TableVersions field's value. -func (s *GetTableVersionsOutput) SetTableVersions(v []*TableVersion) *GetTableVersionsOutput { - s.TableVersions = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTablesRequest -type GetTablesInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the tables reside. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The database in the catalog whose tables to list. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // A regular expression pattern. If present, only those tables whose names match - // the pattern are returned. - Expression *string `type:"string"` - - // The maximum number of tables to return in a single response. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, included if this is a continuation call. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetTablesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetTablesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetTablesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTablesInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *GetTablesInput) SetCatalogId(v string) *GetTablesInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetTablesInput) SetDatabaseName(v string) *GetTablesInput { - s.DatabaseName = &v - return s -} - -// SetExpression sets the Expression field's value. -func (s *GetTablesInput) SetExpression(v string) *GetTablesInput { - s.Expression = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetTablesInput) SetMaxResults(v int64) *GetTablesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetTablesInput) SetNextToken(v string) *GetTablesInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTablesResponse -type GetTablesOutput struct { - _ struct{} `type:"structure"` - - // A continuation token, present if the current list segment is not the last. - NextToken *string `type:"string"` - - // A list of the requested Table objects. - TableList []*Table `type:"list"` -} - -// String returns the string representation -func (s GetTablesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetTablesOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *GetTablesOutput) SetNextToken(v string) *GetTablesOutput { - s.NextToken = &v - return s -} - -// SetTableList sets the TableList field's value. -func (s *GetTablesOutput) SetTableList(v []*Table) *GetTablesOutput { - s.TableList = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTriggerRequest -type GetTriggerInput struct { - _ struct{} `type:"structure"` - - // The name of the trigger to retrieve. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetTriggerInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetTriggerInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetTriggerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTriggerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *GetTriggerInput) SetName(v string) *GetTriggerInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTriggerResponse -type GetTriggerOutput struct { - _ struct{} `type:"structure"` - - // The requested trigger definition. - Trigger *Trigger `type:"structure"` -} - -// String returns the string representation -func (s GetTriggerOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetTriggerOutput) GoString() string { - return s.String() -} - -// SetTrigger sets the Trigger field's value. -func (s *GetTriggerOutput) SetTrigger(v *Trigger) *GetTriggerOutput { - s.Trigger = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTriggersRequest -type GetTriggersInput struct { - _ struct{} `type:"structure"` - - // The name of the job for which to retrieve triggers. - DependentJobName *string `min:"1" type:"string"` - - // The maximum size of the response. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, if this is a continuation call. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetTriggersInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetTriggersInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetTriggersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTriggersInput"} - if s.DependentJobName != nil && len(*s.DependentJobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DependentJobName", 1)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDependentJobName sets the DependentJobName field's value. -func (s *GetTriggersInput) SetDependentJobName(v string) *GetTriggersInput { - s.DependentJobName = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetTriggersInput) SetMaxResults(v int64) *GetTriggersInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetTriggersInput) SetNextToken(v string) *GetTriggersInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTriggersResponse -type GetTriggersOutput struct { - _ struct{} `type:"structure"` - - // A continuation token, if not all the requested triggers have yet been returned. - NextToken *string `type:"string"` - - // A list of triggers for the specified job. - Triggers []*Trigger `type:"list"` -} - -// String returns the string representation -func (s GetTriggersOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetTriggersOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *GetTriggersOutput) SetNextToken(v string) *GetTriggersOutput { - s.NextToken = &v - return s -} - -// SetTriggers sets the Triggers field's value. -func (s *GetTriggersOutput) SetTriggers(v []*Trigger) *GetTriggersOutput { - s.Triggers = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunctionRequest -type GetUserDefinedFunctionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the function to be retrieved is located. - // If none is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database where the function is located. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The name of the function. - // - // FunctionName is a required field - FunctionName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetUserDefinedFunctionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetUserDefinedFunctionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetUserDefinedFunctionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetUserDefinedFunctionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.FunctionName == nil { - invalidParams.Add(request.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *GetUserDefinedFunctionInput) SetCatalogId(v string) *GetUserDefinedFunctionInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetUserDefinedFunctionInput) SetDatabaseName(v string) *GetUserDefinedFunctionInput { - s.DatabaseName = &v - return s -} - -// SetFunctionName sets the FunctionName field's value. -func (s *GetUserDefinedFunctionInput) SetFunctionName(v string) *GetUserDefinedFunctionInput { - s.FunctionName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunctionResponse -type GetUserDefinedFunctionOutput struct { - _ struct{} `type:"structure"` - - // The requested function definition. - UserDefinedFunction *UserDefinedFunction `type:"structure"` -} - -// String returns the string representation -func (s GetUserDefinedFunctionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetUserDefinedFunctionOutput) GoString() string { - return s.String() -} - -// SetUserDefinedFunction sets the UserDefinedFunction field's value. -func (s *GetUserDefinedFunctionOutput) SetUserDefinedFunction(v *UserDefinedFunction) *GetUserDefinedFunctionOutput { - s.UserDefinedFunction = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunctionsRequest -type GetUserDefinedFunctionsInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the functions to be retrieved are located. - // If none is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database where the functions are located. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The maximum number of functions to return in one response. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, if this is a continuation call. - NextToken *string `type:"string"` - - // An optional function-name pattern string that filters the function definitions - // returned. - // - // Pattern is a required field - Pattern *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetUserDefinedFunctionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetUserDefinedFunctionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetUserDefinedFunctionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetUserDefinedFunctionsInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.Pattern == nil { - invalidParams.Add(request.NewErrParamRequired("Pattern")) - } - if s.Pattern != nil && len(*s.Pattern) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Pattern", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *GetUserDefinedFunctionsInput) SetCatalogId(v string) *GetUserDefinedFunctionsInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetUserDefinedFunctionsInput) SetDatabaseName(v string) *GetUserDefinedFunctionsInput { - s.DatabaseName = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetUserDefinedFunctionsInput) SetMaxResults(v int64) *GetUserDefinedFunctionsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetUserDefinedFunctionsInput) SetNextToken(v string) *GetUserDefinedFunctionsInput { - s.NextToken = &v - return s -} - -// SetPattern sets the Pattern field's value. -func (s *GetUserDefinedFunctionsInput) SetPattern(v string) *GetUserDefinedFunctionsInput { - s.Pattern = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunctionsResponse -type GetUserDefinedFunctionsOutput struct { - _ struct{} `type:"structure"` - - // A continuation token, if the list of functions returned does not include - // the last requested function. - NextToken *string `type:"string"` - - // A list of requested function definitions. - UserDefinedFunctions []*UserDefinedFunction `type:"list"` -} - -// String returns the string representation -func (s GetUserDefinedFunctionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetUserDefinedFunctionsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *GetUserDefinedFunctionsOutput) SetNextToken(v string) *GetUserDefinedFunctionsOutput { - s.NextToken = &v - return s -} - -// SetUserDefinedFunctions sets the UserDefinedFunctions field's value. -func (s *GetUserDefinedFunctionsOutput) SetUserDefinedFunctions(v []*UserDefinedFunction) *GetUserDefinedFunctionsOutput { - s.UserDefinedFunctions = v - return s -} - -// A classifier that uses grok patterns. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GrokClassifier -type GrokClassifier struct { - _ struct{} `type:"structure"` - - // An identifier of the data format that the classifier matches, such as Twitter, - // JSON, Omniture logs, and so on. - // - // Classification is a required field - Classification *string `type:"string" required:"true"` - - // The time this classifier was registered. - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Optional custom grok patterns defined by this classifier. For more information, - // see custom patterns in Writing Custom Classifers (http://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html). - CustomPatterns *string `type:"string"` - - // The grok pattern applied to a data store by this classifier. For more information, - // see built-in patterns in Writing Custom Classifers (http://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html). - // - // GrokPattern is a required field - GrokPattern *string `min:"1" type:"string" required:"true"` - - // The time this classifier was last updated. - LastUpdated *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The name of the classifier. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The version of this classifier. - Version *int64 `type:"long"` -} - -// String returns the string representation -func (s GrokClassifier) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GrokClassifier) GoString() string { - return s.String() -} - -// SetClassification sets the Classification field's value. -func (s *GrokClassifier) SetClassification(v string) *GrokClassifier { - s.Classification = &v - return s -} - -// SetCreationTime sets the CreationTime field's value. -func (s *GrokClassifier) SetCreationTime(v time.Time) *GrokClassifier { - s.CreationTime = &v - return s -} - -// SetCustomPatterns sets the CustomPatterns field's value. -func (s *GrokClassifier) SetCustomPatterns(v string) *GrokClassifier { - s.CustomPatterns = &v - return s -} - -// SetGrokPattern sets the GrokPattern field's value. -func (s *GrokClassifier) SetGrokPattern(v string) *GrokClassifier { - s.GrokPattern = &v - return s -} - -// SetLastUpdated sets the LastUpdated field's value. -func (s *GrokClassifier) SetLastUpdated(v time.Time) *GrokClassifier { - s.LastUpdated = &v - return s -} - -// SetName sets the Name field's value. -func (s *GrokClassifier) SetName(v string) *GrokClassifier { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *GrokClassifier) SetVersion(v int64) *GrokClassifier { - s.Version = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ImportCatalogToGlueRequest -type ImportCatalogToGlueInput struct { - _ struct{} `type:"structure"` - - // The ID of the catalog to import. Currently, this should be the AWS account - // ID. - CatalogId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s ImportCatalogToGlueInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ImportCatalogToGlueInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ImportCatalogToGlueInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ImportCatalogToGlueInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *ImportCatalogToGlueInput) SetCatalogId(v string) *ImportCatalogToGlueInput { - s.CatalogId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ImportCatalogToGlueResponse -type ImportCatalogToGlueOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s ImportCatalogToGlueOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ImportCatalogToGlueOutput) GoString() string { - return s.String() -} - -// Specifies a JDBC data store to crawl. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JdbcTarget -type JdbcTarget struct { - _ struct{} `type:"structure"` - - // The name of the connection to use to connect to the JDBC target. - ConnectionName *string `type:"string"` - - // A list of glob patterns used to exclude from the crawl. For more information, - // see Catalog Tables with a Crawler (http://docs.aws.amazon.com/glue/latest/dg/add-crawler.html). - Exclusions []*string `type:"list"` - - // The path of the JDBC target. - Path *string `type:"string"` -} - -// String returns the string representation -func (s JdbcTarget) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s JdbcTarget) GoString() string { - return s.String() -} - -// SetConnectionName sets the ConnectionName field's value. -func (s *JdbcTarget) SetConnectionName(v string) *JdbcTarget { - s.ConnectionName = &v - return s -} - -// SetExclusions sets the Exclusions field's value. -func (s *JdbcTarget) SetExclusions(v []*string) *JdbcTarget { - s.Exclusions = v - return s -} - -// SetPath sets the Path field's value. -func (s *JdbcTarget) SetPath(v string) *JdbcTarget { - s.Path = &v - return s -} - -// Specifies a job in the Data Catalog. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Job -type Job struct { - _ struct{} `type:"structure"` - - // The number of capacity units allocated to this job. - AllocatedCapacity *int64 `type:"integer"` - - // The JobCommand that executes this job. - Command *JobCommand `type:"structure"` - - // The connections used for this job. - Connections *ConnectionsList `type:"structure"` - - // The time and date that this job specification was created. - CreatedOn *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The default parameters for this job. - DefaultArguments map[string]*string `type:"map"` - - // Description of this job. - Description *string `type:"string"` - - // An ExecutionProperty specifying the maximum number of concurrent runs allowed - // for this job. - ExecutionProperty *ExecutionProperty `type:"structure"` - - // The last point in time when this job specification was modified. - LastModifiedOn *time.Time `type:"timestamp" timestampFormat:"unix"` - - // This field is reserved for future use. - LogUri *string `type:"string"` - - // The maximum number of times to retry this job if it fails. - MaxRetries *int64 `type:"integer"` - - // The name you assign to this job. - Name *string `min:"1" type:"string"` - - // The role associated with this job. - Role *string `type:"string"` -} - -// String returns the string representation -func (s Job) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Job) GoString() string { - return s.String() -} - -// SetAllocatedCapacity sets the AllocatedCapacity field's value. -func (s *Job) SetAllocatedCapacity(v int64) *Job { - s.AllocatedCapacity = &v - return s -} - -// SetCommand sets the Command field's value. -func (s *Job) SetCommand(v *JobCommand) *Job { - s.Command = v - return s -} - -// SetConnections sets the Connections field's value. -func (s *Job) SetConnections(v *ConnectionsList) *Job { - s.Connections = v - return s -} - -// SetCreatedOn sets the CreatedOn field's value. -func (s *Job) SetCreatedOn(v time.Time) *Job { - s.CreatedOn = &v - return s -} - -// SetDefaultArguments sets the DefaultArguments field's value. -func (s *Job) SetDefaultArguments(v map[string]*string) *Job { - s.DefaultArguments = v - return s -} - -// SetDescription sets the Description field's value. -func (s *Job) SetDescription(v string) *Job { - s.Description = &v - return s -} - -// SetExecutionProperty sets the ExecutionProperty field's value. -func (s *Job) SetExecutionProperty(v *ExecutionProperty) *Job { - s.ExecutionProperty = v - return s -} - -// SetLastModifiedOn sets the LastModifiedOn field's value. -func (s *Job) SetLastModifiedOn(v time.Time) *Job { - s.LastModifiedOn = &v - return s -} - -// SetLogUri sets the LogUri field's value. -func (s *Job) SetLogUri(v string) *Job { - s.LogUri = &v - return s -} - -// SetMaxRetries sets the MaxRetries field's value. -func (s *Job) SetMaxRetries(v int64) *Job { - s.MaxRetries = &v - return s -} - -// SetName sets the Name field's value. -func (s *Job) SetName(v string) *Job { - s.Name = &v - return s -} - -// SetRole sets the Role field's value. -func (s *Job) SetRole(v string) *Job { - s.Role = &v - return s -} - -// Defines a point which a job can resume processing. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobBookmarkEntry -type JobBookmarkEntry struct { - _ struct{} `type:"structure"` - - // The attempt ID number. - Attempt *int64 `type:"integer"` - - // The bookmark itself. - JobBookmark *string `type:"string"` - - // Name of the job in question. - JobName *string `type:"string"` - - // The run ID number. - Run *int64 `type:"integer"` - - // Version of the job. - Version *int64 `type:"integer"` -} - -// String returns the string representation -func (s JobBookmarkEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s JobBookmarkEntry) GoString() string { - return s.String() -} - -// SetAttempt sets the Attempt field's value. -func (s *JobBookmarkEntry) SetAttempt(v int64) *JobBookmarkEntry { - s.Attempt = &v - return s -} - -// SetJobBookmark sets the JobBookmark field's value. -func (s *JobBookmarkEntry) SetJobBookmark(v string) *JobBookmarkEntry { - s.JobBookmark = &v - return s -} - -// SetJobName sets the JobName field's value. -func (s *JobBookmarkEntry) SetJobName(v string) *JobBookmarkEntry { - s.JobName = &v - return s -} - -// SetRun sets the Run field's value. -func (s *JobBookmarkEntry) SetRun(v int64) *JobBookmarkEntry { - s.Run = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *JobBookmarkEntry) SetVersion(v int64) *JobBookmarkEntry { - s.Version = &v - return s -} - -// Specifies code that executes a job. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobCommand -type JobCommand struct { - _ struct{} `type:"structure"` - - // The name of this job command. - Name *string `type:"string"` - - // Specifies the location of a script that executes a job. - ScriptLocation *string `type:"string"` -} - -// String returns the string representation -func (s JobCommand) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s JobCommand) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *JobCommand) SetName(v string) *JobCommand { - s.Name = &v - return s -} - -// SetScriptLocation sets the ScriptLocation field's value. -func (s *JobCommand) SetScriptLocation(v string) *JobCommand { - s.ScriptLocation = &v - return s -} - -// Contains information about a job run. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobRun -type JobRun struct { - _ struct{} `type:"structure"` - - // The amount of infrastructure capacity allocated to this job run. - AllocatedCapacity *int64 `type:"integer"` - - // The job arguments associated with this run. - Arguments map[string]*string `type:"map"` - - // The number or the attempt to run this job. - Attempt *int64 `type:"integer"` - - // The date and time this job run completed. - CompletedOn *time.Time `type:"timestamp" timestampFormat:"unix"` - - // An error message associated with this job run. - ErrorMessage *string `type:"string"` - - // The ID of this job run. - Id *string `min:"1" type:"string"` - - // The name of the job being run. - JobName *string `min:"1" type:"string"` - - // The current state of the job run. - JobRunState *string `type:"string" enum:"JobRunState"` - - // The last time this job run was modified. - LastModifiedOn *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A list of predecessors to this job run. - PredecessorRuns []*Predecessor `type:"list"` - - // The ID of the previous run of this job. - PreviousRunId *string `min:"1" type:"string"` - - // The date and time at which this job run was started. - StartedOn *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The name of the trigger for this job run. - TriggerName *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s JobRun) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s JobRun) GoString() string { - return s.String() -} - -// SetAllocatedCapacity sets the AllocatedCapacity field's value. -func (s *JobRun) SetAllocatedCapacity(v int64) *JobRun { - s.AllocatedCapacity = &v - return s -} - -// SetArguments sets the Arguments field's value. -func (s *JobRun) SetArguments(v map[string]*string) *JobRun { - s.Arguments = v - return s -} - -// SetAttempt sets the Attempt field's value. -func (s *JobRun) SetAttempt(v int64) *JobRun { - s.Attempt = &v - return s -} - -// SetCompletedOn sets the CompletedOn field's value. -func (s *JobRun) SetCompletedOn(v time.Time) *JobRun { - s.CompletedOn = &v - return s -} - -// SetErrorMessage sets the ErrorMessage field's value. -func (s *JobRun) SetErrorMessage(v string) *JobRun { - s.ErrorMessage = &v - return s -} - -// SetId sets the Id field's value. -func (s *JobRun) SetId(v string) *JobRun { - s.Id = &v - return s -} - -// SetJobName sets the JobName field's value. -func (s *JobRun) SetJobName(v string) *JobRun { - s.JobName = &v - return s -} - -// SetJobRunState sets the JobRunState field's value. -func (s *JobRun) SetJobRunState(v string) *JobRun { - s.JobRunState = &v - return s -} - -// SetLastModifiedOn sets the LastModifiedOn field's value. -func (s *JobRun) SetLastModifiedOn(v time.Time) *JobRun { - s.LastModifiedOn = &v - return s -} - -// SetPredecessorRuns sets the PredecessorRuns field's value. -func (s *JobRun) SetPredecessorRuns(v []*Predecessor) *JobRun { - s.PredecessorRuns = v - return s -} - -// SetPreviousRunId sets the PreviousRunId field's value. -func (s *JobRun) SetPreviousRunId(v string) *JobRun { - s.PreviousRunId = &v - return s -} - -// SetStartedOn sets the StartedOn field's value. -func (s *JobRun) SetStartedOn(v time.Time) *JobRun { - s.StartedOn = &v - return s -} - -// SetTriggerName sets the TriggerName field's value. -func (s *JobRun) SetTriggerName(v string) *JobRun { - s.TriggerName = &v - return s -} - -// Specifies information used to update an existing job. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobUpdate -type JobUpdate struct { - _ struct{} `type:"structure"` - - // The number of capacity units allocated to this job. - AllocatedCapacity *int64 `type:"integer"` - - // The JobCommand that executes this job. - Command *JobCommand `type:"structure"` - - // The connections used for this job. - Connections *ConnectionsList `type:"structure"` - - // The default parameters for this job. - DefaultArguments map[string]*string `type:"map"` - - // Description of the job. - Description *string `type:"string"` - - // An ExecutionProperty specifying the maximum number of concurrent runs allowed - // for this job. - ExecutionProperty *ExecutionProperty `type:"structure"` - - // This field is reserved for future use. - LogUri *string `type:"string"` - - // The maximum number of times to retry this job if it fails. - MaxRetries *int64 `type:"integer"` - - // The role associated with this job. - Role *string `type:"string"` -} - -// String returns the string representation -func (s JobUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s JobUpdate) GoString() string { - return s.String() -} - -// SetAllocatedCapacity sets the AllocatedCapacity field's value. -func (s *JobUpdate) SetAllocatedCapacity(v int64) *JobUpdate { - s.AllocatedCapacity = &v - return s -} - -// SetCommand sets the Command field's value. -func (s *JobUpdate) SetCommand(v *JobCommand) *JobUpdate { - s.Command = v - return s -} - -// SetConnections sets the Connections field's value. -func (s *JobUpdate) SetConnections(v *ConnectionsList) *JobUpdate { - s.Connections = v - return s -} - -// SetDefaultArguments sets the DefaultArguments field's value. -func (s *JobUpdate) SetDefaultArguments(v map[string]*string) *JobUpdate { - s.DefaultArguments = v - return s -} - -// SetDescription sets the Description field's value. -func (s *JobUpdate) SetDescription(v string) *JobUpdate { - s.Description = &v - return s -} - -// SetExecutionProperty sets the ExecutionProperty field's value. -func (s *JobUpdate) SetExecutionProperty(v *ExecutionProperty) *JobUpdate { - s.ExecutionProperty = v - return s -} - -// SetLogUri sets the LogUri field's value. -func (s *JobUpdate) SetLogUri(v string) *JobUpdate { - s.LogUri = &v - return s -} - -// SetMaxRetries sets the MaxRetries field's value. -func (s *JobUpdate) SetMaxRetries(v int64) *JobUpdate { - s.MaxRetries = &v - return s -} - -// SetRole sets the Role field's value. -func (s *JobUpdate) SetRole(v string) *JobUpdate { - s.Role = &v - return s -} - -// Status and error information about the most recent crawl. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/LastCrawlInfo -type LastCrawlInfo struct { - _ struct{} `type:"structure"` - - // If an error occurred, the error information about the last crawl. - ErrorMessage *string `type:"string"` - - // The log group for the last crawl. - LogGroup *string `min:"1" type:"string"` - - // The log stream for the last crawl. - LogStream *string `min:"1" type:"string"` - - // The prefix for a message about this crawl. - MessagePrefix *string `min:"1" type:"string"` - - // The time at which the crawl started. - StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Status of the last crawl. - Status *string `type:"string" enum:"LastCrawlStatus"` -} - -// String returns the string representation -func (s LastCrawlInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LastCrawlInfo) GoString() string { - return s.String() -} - -// SetErrorMessage sets the ErrorMessage field's value. -func (s *LastCrawlInfo) SetErrorMessage(v string) *LastCrawlInfo { - s.ErrorMessage = &v - return s -} - -// SetLogGroup sets the LogGroup field's value. -func (s *LastCrawlInfo) SetLogGroup(v string) *LastCrawlInfo { - s.LogGroup = &v - return s -} - -// SetLogStream sets the LogStream field's value. -func (s *LastCrawlInfo) SetLogStream(v string) *LastCrawlInfo { - s.LogStream = &v - return s -} - -// SetMessagePrefix sets the MessagePrefix field's value. -func (s *LastCrawlInfo) SetMessagePrefix(v string) *LastCrawlInfo { - s.MessagePrefix = &v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *LastCrawlInfo) SetStartTime(v time.Time) *LastCrawlInfo { - s.StartTime = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *LastCrawlInfo) SetStatus(v string) *LastCrawlInfo { - s.Status = &v - return s -} - -// The location of resources. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Location -type Location struct { - _ struct{} `type:"structure"` - - // A JDBC location. - Jdbc []*CodeGenNodeArg `type:"list"` - - // An Amazon S3 location. - S3 []*CodeGenNodeArg `type:"list"` -} - -// String returns the string representation -func (s Location) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Location) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Location) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Location"} - if s.Jdbc != nil { - for i, v := range s.Jdbc { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Jdbc", i), err.(request.ErrInvalidParams)) - } - } - } - if s.S3 != nil { - for i, v := range s.S3 { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "S3", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetJdbc sets the Jdbc field's value. -func (s *Location) SetJdbc(v []*CodeGenNodeArg) *Location { - s.Jdbc = v - return s -} - -// SetS3 sets the S3 field's value. -func (s *Location) SetS3(v []*CodeGenNodeArg) *Location { - s.S3 = v - return s -} - -// Defines a mapping. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MappingEntry -type MappingEntry struct { - _ struct{} `type:"structure"` - - // The source path. - SourcePath *string `type:"string"` - - // The name of the source table. - SourceTable *string `type:"string"` - - // The source type. - SourceType *string `type:"string"` - - // The target path. - TargetPath *string `type:"string"` - - // The target table. - TargetTable *string `type:"string"` - - // The target type. - TargetType *string `type:"string"` -} - -// String returns the string representation -func (s MappingEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MappingEntry) GoString() string { - return s.String() -} - -// SetSourcePath sets the SourcePath field's value. -func (s *MappingEntry) SetSourcePath(v string) *MappingEntry { - s.SourcePath = &v - return s -} - -// SetSourceTable sets the SourceTable field's value. -func (s *MappingEntry) SetSourceTable(v string) *MappingEntry { - s.SourceTable = &v - return s -} - -// SetSourceType sets the SourceType field's value. -func (s *MappingEntry) SetSourceType(v string) *MappingEntry { - s.SourceType = &v - return s -} - -// SetTargetPath sets the TargetPath field's value. -func (s *MappingEntry) SetTargetPath(v string) *MappingEntry { - s.TargetPath = &v - return s -} - -// SetTargetTable sets the TargetTable field's value. -func (s *MappingEntry) SetTargetTable(v string) *MappingEntry { - s.TargetTable = &v - return s -} - -// SetTargetType sets the TargetType field's value. -func (s *MappingEntry) SetTargetType(v string) *MappingEntry { - s.TargetType = &v - return s -} - -// Specifies the sort order of a sorted column. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Order -type Order struct { - _ struct{} `type:"structure"` - - // The name of the column. - // - // Column is a required field - Column *string `min:"1" type:"string" required:"true"` - - // Indicates that the column is sorted in ascending order (== 1), or in descending - // order (==0). - // - // SortOrder is a required field - SortOrder *int64 `type:"integer" required:"true"` -} - -// String returns the string representation -func (s Order) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Order) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Order) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Order"} - if s.Column == nil { - invalidParams.Add(request.NewErrParamRequired("Column")) - } - if s.Column != nil && len(*s.Column) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Column", 1)) - } - if s.SortOrder == nil { - invalidParams.Add(request.NewErrParamRequired("SortOrder")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetColumn sets the Column field's value. -func (s *Order) SetColumn(v string) *Order { - s.Column = &v - return s -} - -// SetSortOrder sets the SortOrder field's value. -func (s *Order) SetSortOrder(v int64) *Order { - s.SortOrder = &v - return s -} - -// Represents a slice of table data. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Partition -type Partition struct { - _ struct{} `type:"structure"` - - // The time at which the partition was created. - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The name of the catalog database where the table in question is located. - DatabaseName *string `min:"1" type:"string"` - - // The last time at which the partition was accessed. - LastAccessTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The last time at which column statistics were computed for this partition. - LastAnalyzedTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Partition parameters, in the form of a list of key-value pairs. - Parameters map[string]*string `type:"map"` - - // Provides information about the physical location where the partition is stored. - StorageDescriptor *StorageDescriptor `type:"structure"` - - // The name of the table in question. - TableName *string `min:"1" type:"string"` - - // The values of the partition. - Values []*string `type:"list"` -} - -// String returns the string representation -func (s Partition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Partition) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *Partition) SetCreationTime(v time.Time) *Partition { - s.CreationTime = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *Partition) SetDatabaseName(v string) *Partition { - s.DatabaseName = &v - return s -} - -// SetLastAccessTime sets the LastAccessTime field's value. -func (s *Partition) SetLastAccessTime(v time.Time) *Partition { - s.LastAccessTime = &v - return s -} - -// SetLastAnalyzedTime sets the LastAnalyzedTime field's value. -func (s *Partition) SetLastAnalyzedTime(v time.Time) *Partition { - s.LastAnalyzedTime = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *Partition) SetParameters(v map[string]*string) *Partition { - s.Parameters = v - return s -} - -// SetStorageDescriptor sets the StorageDescriptor field's value. -func (s *Partition) SetStorageDescriptor(v *StorageDescriptor) *Partition { - s.StorageDescriptor = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *Partition) SetTableName(v string) *Partition { - s.TableName = &v - return s -} - -// SetValues sets the Values field's value. -func (s *Partition) SetValues(v []*string) *Partition { - s.Values = v - return s -} - -// Contains information about a partition error. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PartitionError -type PartitionError struct { - _ struct{} `type:"structure"` - - // Details about the partition error. - ErrorDetail *ErrorDetail `type:"structure"` - - // The values that define the partition. - PartitionValues []*string `type:"list"` -} - -// String returns the string representation -func (s PartitionError) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PartitionError) GoString() string { - return s.String() -} - -// SetErrorDetail sets the ErrorDetail field's value. -func (s *PartitionError) SetErrorDetail(v *ErrorDetail) *PartitionError { - s.ErrorDetail = v - return s -} - -// SetPartitionValues sets the PartitionValues field's value. -func (s *PartitionError) SetPartitionValues(v []*string) *PartitionError { - s.PartitionValues = v - return s -} - -// The structure used to create and update a partion. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PartitionInput -type PartitionInput struct { - _ struct{} `type:"structure"` - - // The last time at which the partition was accessed. - LastAccessTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The last time at which column statistics were computed for this partition. - LastAnalyzedTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Partition parameters, in the form of a list of key-value pairs. - Parameters map[string]*string `type:"map"` - - // Provides information about the physical location where the partition is stored. - StorageDescriptor *StorageDescriptor `type:"structure"` - - // The values of the partition. - Values []*string `type:"list"` -} - -// String returns the string representation -func (s PartitionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PartitionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PartitionInput"} - if s.StorageDescriptor != nil { - if err := s.StorageDescriptor.Validate(); err != nil { - invalidParams.AddNested("StorageDescriptor", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLastAccessTime sets the LastAccessTime field's value. -func (s *PartitionInput) SetLastAccessTime(v time.Time) *PartitionInput { - s.LastAccessTime = &v - return s -} - -// SetLastAnalyzedTime sets the LastAnalyzedTime field's value. -func (s *PartitionInput) SetLastAnalyzedTime(v time.Time) *PartitionInput { - s.LastAnalyzedTime = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *PartitionInput) SetParameters(v map[string]*string) *PartitionInput { - s.Parameters = v - return s -} - -// SetStorageDescriptor sets the StorageDescriptor field's value. -func (s *PartitionInput) SetStorageDescriptor(v *StorageDescriptor) *PartitionInput { - s.StorageDescriptor = v - return s -} - -// SetValues sets the Values field's value. -func (s *PartitionInput) SetValues(v []*string) *PartitionInput { - s.Values = v - return s -} - -// Contains a list of values defining partitions. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PartitionValueList -type PartitionValueList struct { - _ struct{} `type:"structure"` - - // The list of values. - // - // Values is a required field - Values []*string `type:"list" required:"true"` -} - -// String returns the string representation -func (s PartitionValueList) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PartitionValueList) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PartitionValueList) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PartitionValueList"} - if s.Values == nil { - invalidParams.Add(request.NewErrParamRequired("Values")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetValues sets the Values field's value. -func (s *PartitionValueList) SetValues(v []*string) *PartitionValueList { - s.Values = v - return s -} - -// Specifies the physical requirements for a connection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PhysicalConnectionRequirements -type PhysicalConnectionRequirements struct { - _ struct{} `type:"structure"` - - // The connection's availability zone. - AvailabilityZone *string `min:"1" type:"string"` - - // The security group ID list used by the connection. - SecurityGroupIdList []*string `type:"list"` - - // The subnet ID used by the connection. - SubnetId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s PhysicalConnectionRequirements) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PhysicalConnectionRequirements) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PhysicalConnectionRequirements) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PhysicalConnectionRequirements"} - if s.AvailabilityZone != nil && len(*s.AvailabilityZone) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AvailabilityZone", 1)) - } - if s.SubnetId != nil && len(*s.SubnetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SubnetId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *PhysicalConnectionRequirements) SetAvailabilityZone(v string) *PhysicalConnectionRequirements { - s.AvailabilityZone = &v - return s -} - -// SetSecurityGroupIdList sets the SecurityGroupIdList field's value. -func (s *PhysicalConnectionRequirements) SetSecurityGroupIdList(v []*string) *PhysicalConnectionRequirements { - s.SecurityGroupIdList = v - return s -} - -// SetSubnetId sets the SubnetId field's value. -func (s *PhysicalConnectionRequirements) SetSubnetId(v string) *PhysicalConnectionRequirements { - s.SubnetId = &v - return s -} - -// A job run that preceded this one. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Predecessor -type Predecessor struct { - _ struct{} `type:"structure"` - - // The name of the predecessor job. - JobName *string `min:"1" type:"string"` - - // The job-run ID of the precessor job run. - RunId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s Predecessor) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Predecessor) GoString() string { - return s.String() -} - -// SetJobName sets the JobName field's value. -func (s *Predecessor) SetJobName(v string) *Predecessor { - s.JobName = &v - return s -} - -// SetRunId sets the RunId field's value. -func (s *Predecessor) SetRunId(v string) *Predecessor { - s.RunId = &v - return s -} - -// Defines the predicate of the trigger, which determines when it fires. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Predicate -type Predicate struct { - _ struct{} `type:"structure"` - - // A list of the conditions that determine when the trigger will fire. - Conditions []*Condition `type:"list"` - - // Currently "OR" is not supported. - Logical *string `type:"string" enum:"Logical"` -} - -// String returns the string representation -func (s Predicate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Predicate) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Predicate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Predicate"} - if s.Conditions != nil { - for i, v := range s.Conditions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Conditions", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConditions sets the Conditions field's value. -func (s *Predicate) SetConditions(v []*Condition) *Predicate { - s.Conditions = v - return s -} - -// SetLogical sets the Logical field's value. -func (s *Predicate) SetLogical(v string) *Predicate { - s.Logical = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResetJobBookmarkRequest -type ResetJobBookmarkInput struct { - _ struct{} `type:"structure"` - - // The name of the job in question. - // - // JobName is a required field - JobName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s ResetJobBookmarkInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ResetJobBookmarkInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ResetJobBookmarkInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ResetJobBookmarkInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetJobName sets the JobName field's value. -func (s *ResetJobBookmarkInput) SetJobName(v string) *ResetJobBookmarkInput { - s.JobName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResetJobBookmarkResponse -type ResetJobBookmarkOutput struct { - _ struct{} `type:"structure"` - - // The reset bookmark entry. - JobBookmarkEntry *JobBookmarkEntry `type:"structure"` -} - -// String returns the string representation -func (s ResetJobBookmarkOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ResetJobBookmarkOutput) GoString() string { - return s.String() -} - -// SetJobBookmarkEntry sets the JobBookmarkEntry field's value. -func (s *ResetJobBookmarkOutput) SetJobBookmarkEntry(v *JobBookmarkEntry) *ResetJobBookmarkOutput { - s.JobBookmarkEntry = v - return s -} - -// URIs for function resources. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResourceUri -type ResourceUri struct { - _ struct{} `type:"structure"` - - // The type of the resource. - ResourceType *string `type:"string" enum:"ResourceType"` - - // The URI for accessing the resource. - Uri *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s ResourceUri) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ResourceUri) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ResourceUri) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ResourceUri"} - if s.Uri != nil && len(*s.Uri) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Uri", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceType sets the ResourceType field's value. -func (s *ResourceUri) SetResourceType(v string) *ResourceUri { - s.ResourceType = &v - return s -} - -// SetUri sets the Uri field's value. -func (s *ResourceUri) SetUri(v string) *ResourceUri { - s.Uri = &v - return s -} - -// Specifies a data store in Amazon S3. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/S3Target -type S3Target struct { - _ struct{} `type:"structure"` - - // A list of glob patterns used to exclude from the crawl. For more information, - // see Catalog Tables with a Crawler (http://docs.aws.amazon.com/glue/latest/dg/add-crawler.html). - Exclusions []*string `type:"list"` - - // The path to the Amazon S3 target. - Path *string `type:"string"` -} - -// String returns the string representation -func (s S3Target) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s S3Target) GoString() string { - return s.String() -} - -// SetExclusions sets the Exclusions field's value. -func (s *S3Target) SetExclusions(v []*string) *S3Target { - s.Exclusions = v - return s -} - -// SetPath sets the Path field's value. -func (s *S3Target) SetPath(v string) *S3Target { - s.Path = &v - return s -} - -// A scheduling object using a cron statement to schedule an event. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Schedule -type Schedule struct { - _ struct{} `type:"structure"` - - // A cron expression used to specify the schedule (see Time-Based Schedules - // for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, you would specify: - // cron(15 12 * * ? *). - ScheduleExpression *string `type:"string"` - - // The state of the schedule. - State *string `type:"string" enum:"ScheduleState"` -} - -// String returns the string representation -func (s Schedule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Schedule) GoString() string { - return s.String() -} - -// SetScheduleExpression sets the ScheduleExpression field's value. -func (s *Schedule) SetScheduleExpression(v string) *Schedule { - s.ScheduleExpression = &v - return s -} - -// SetState sets the State field's value. -func (s *Schedule) SetState(v string) *Schedule { - s.State = &v - return s -} - -// Crawler policy for update and deletion behavior. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SchemaChangePolicy -type SchemaChangePolicy struct { - _ struct{} `type:"structure"` - - // The deletion behavior when the crawler finds a deleted object. - DeleteBehavior *string `type:"string" enum:"DeleteBehavior"` - - // The update behavior when the crawler finds a changed schema. - UpdateBehavior *string `type:"string" enum:"UpdateBehavior"` -} - -// String returns the string representation -func (s SchemaChangePolicy) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SchemaChangePolicy) GoString() string { - return s.String() -} - -// SetDeleteBehavior sets the DeleteBehavior field's value. -func (s *SchemaChangePolicy) SetDeleteBehavior(v string) *SchemaChangePolicy { - s.DeleteBehavior = &v - return s -} - -// SetUpdateBehavior sets the UpdateBehavior field's value. -func (s *SchemaChangePolicy) SetUpdateBehavior(v string) *SchemaChangePolicy { - s.UpdateBehavior = &v - return s -} - -// Defines a non-overlapping region of a table's partitions, allowing multiple -// requests to be executed in parallel. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Segment -type Segment struct { - _ struct{} `type:"structure"` - - // The zero-based index number of the this segment. For example, if the total - // number of segments is 4, SegmentNumber values will range from zero through - // three. - // - // SegmentNumber is a required field - SegmentNumber *int64 `type:"integer" required:"true"` - - // The total numer of segments. - // - // TotalSegments is a required field - TotalSegments *int64 `min:"1" type:"integer" required:"true"` -} - -// String returns the string representation -func (s Segment) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Segment) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Segment) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Segment"} - if s.SegmentNumber == nil { - invalidParams.Add(request.NewErrParamRequired("SegmentNumber")) - } - if s.TotalSegments == nil { - invalidParams.Add(request.NewErrParamRequired("TotalSegments")) - } - if s.TotalSegments != nil && *s.TotalSegments < 1 { - invalidParams.Add(request.NewErrParamMinValue("TotalSegments", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSegmentNumber sets the SegmentNumber field's value. -func (s *Segment) SetSegmentNumber(v int64) *Segment { - s.SegmentNumber = &v - return s -} - -// SetTotalSegments sets the TotalSegments field's value. -func (s *Segment) SetTotalSegments(v int64) *Segment { - s.TotalSegments = &v - return s -} - -// Information about a serialization/deserialization program (SerDe) which serves -// as an extractor and loader. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SerDeInfo -type SerDeInfo struct { - _ struct{} `type:"structure"` - - // Name of the SerDe. - Name *string `min:"1" type:"string"` - - // A list of initialization parameters for the SerDe, in key-value form. - Parameters map[string]*string `type:"map"` - - // Usually the class that implements the SerDe. An example is: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe. - SerializationLibrary *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s SerDeInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SerDeInfo) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SerDeInfo) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SerDeInfo"} - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.SerializationLibrary != nil && len(*s.SerializationLibrary) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SerializationLibrary", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *SerDeInfo) SetName(v string) *SerDeInfo { - s.Name = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *SerDeInfo) SetParameters(v map[string]*string) *SerDeInfo { - s.Parameters = v - return s -} - -// SetSerializationLibrary sets the SerializationLibrary field's value. -func (s *SerDeInfo) SetSerializationLibrary(v string) *SerDeInfo { - s.SerializationLibrary = &v - return s -} - -// Specifies skewed values in a table. Skewed are ones that occur with very -// high frequency. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SkewedInfo -type SkewedInfo struct { - _ struct{} `type:"structure"` - - // A list of names of columns that contain skewed values. - SkewedColumnNames []*string `type:"list"` - - // A mapping of skewed values to the columns that contain them. - SkewedColumnValueLocationMaps map[string]*string `type:"map"` - - // A list of values that appear so frequently as to be considered skewed. - SkewedColumnValues []*string `type:"list"` -} - -// String returns the string representation -func (s SkewedInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SkewedInfo) GoString() string { - return s.String() -} - -// SetSkewedColumnNames sets the SkewedColumnNames field's value. -func (s *SkewedInfo) SetSkewedColumnNames(v []*string) *SkewedInfo { - s.SkewedColumnNames = v - return s -} - -// SetSkewedColumnValueLocationMaps sets the SkewedColumnValueLocationMaps field's value. -func (s *SkewedInfo) SetSkewedColumnValueLocationMaps(v map[string]*string) *SkewedInfo { - s.SkewedColumnValueLocationMaps = v - return s -} - -// SetSkewedColumnValues sets the SkewedColumnValues field's value. -func (s *SkewedInfo) SetSkewedColumnValues(v []*string) *SkewedInfo { - s.SkewedColumnValues = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawlerRequest -type StartCrawlerInput struct { - _ struct{} `type:"structure"` - - // Name of the crawler to start. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s StartCrawlerInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartCrawlerInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartCrawlerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartCrawlerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *StartCrawlerInput) SetName(v string) *StartCrawlerInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawlerResponse -type StartCrawlerOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s StartCrawlerOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartCrawlerOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawlerScheduleRequest -type StartCrawlerScheduleInput struct { - _ struct{} `type:"structure"` - - // Name of the crawler to schedule. - // - // CrawlerName is a required field - CrawlerName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s StartCrawlerScheduleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartCrawlerScheduleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartCrawlerScheduleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartCrawlerScheduleInput"} - if s.CrawlerName == nil { - invalidParams.Add(request.NewErrParamRequired("CrawlerName")) - } - if s.CrawlerName != nil && len(*s.CrawlerName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CrawlerName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCrawlerName sets the CrawlerName field's value. -func (s *StartCrawlerScheduleInput) SetCrawlerName(v string) *StartCrawlerScheduleInput { - s.CrawlerName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawlerScheduleResponse -type StartCrawlerScheduleOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s StartCrawlerScheduleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartCrawlerScheduleOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartJobRunRequest -type StartJobRunInput struct { - _ struct{} `type:"structure"` - - // The infrastructure capacity to allocate to this job. - AllocatedCapacity *int64 `type:"integer"` - - // Specific arguments for this job run. - Arguments map[string]*string `type:"map"` - - // The name of the job to start. - // - // JobName is a required field - JobName *string `min:"1" type:"string" required:"true"` - - // The ID of the job run to start. - JobRunId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s StartJobRunInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartJobRunInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartJobRunInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartJobRunInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) - } - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) - } - if s.JobRunId != nil && len(*s.JobRunId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobRunId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAllocatedCapacity sets the AllocatedCapacity field's value. -func (s *StartJobRunInput) SetAllocatedCapacity(v int64) *StartJobRunInput { - s.AllocatedCapacity = &v - return s -} - -// SetArguments sets the Arguments field's value. -func (s *StartJobRunInput) SetArguments(v map[string]*string) *StartJobRunInput { - s.Arguments = v - return s -} - -// SetJobName sets the JobName field's value. -func (s *StartJobRunInput) SetJobName(v string) *StartJobRunInput { - s.JobName = &v - return s -} - -// SetJobRunId sets the JobRunId field's value. -func (s *StartJobRunInput) SetJobRunId(v string) *StartJobRunInput { - s.JobRunId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartJobRunResponse -type StartJobRunOutput struct { - _ struct{} `type:"structure"` - - // The ID assigned to this job run. - JobRunId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s StartJobRunOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartJobRunOutput) GoString() string { - return s.String() -} - -// SetJobRunId sets the JobRunId field's value. -func (s *StartJobRunOutput) SetJobRunId(v string) *StartJobRunOutput { - s.JobRunId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartTriggerRequest -type StartTriggerInput struct { - _ struct{} `type:"structure"` - - // The name of the trigger to start. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s StartTriggerInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartTriggerInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartTriggerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartTriggerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *StartTriggerInput) SetName(v string) *StartTriggerInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartTriggerResponse -type StartTriggerOutput struct { - _ struct{} `type:"structure"` - - // The name of the trigger that was started. - Name *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s StartTriggerOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartTriggerOutput) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *StartTriggerOutput) SetName(v string) *StartTriggerOutput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawlerRequest -type StopCrawlerInput struct { - _ struct{} `type:"structure"` - - // Name of the crawler to stop. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s StopCrawlerInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopCrawlerInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StopCrawlerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StopCrawlerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *StopCrawlerInput) SetName(v string) *StopCrawlerInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawlerResponse -type StopCrawlerOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s StopCrawlerOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopCrawlerOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawlerScheduleRequest -type StopCrawlerScheduleInput struct { - _ struct{} `type:"structure"` - - // Name of the crawler whose schedule state to set. - // - // CrawlerName is a required field - CrawlerName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s StopCrawlerScheduleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopCrawlerScheduleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StopCrawlerScheduleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StopCrawlerScheduleInput"} - if s.CrawlerName == nil { - invalidParams.Add(request.NewErrParamRequired("CrawlerName")) - } - if s.CrawlerName != nil && len(*s.CrawlerName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CrawlerName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCrawlerName sets the CrawlerName field's value. -func (s *StopCrawlerScheduleInput) SetCrawlerName(v string) *StopCrawlerScheduleInput { - s.CrawlerName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawlerScheduleResponse -type StopCrawlerScheduleOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s StopCrawlerScheduleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopCrawlerScheduleOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopTriggerRequest -type StopTriggerInput struct { - _ struct{} `type:"structure"` - - // The name of the trigger to stop. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s StopTriggerInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopTriggerInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StopTriggerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StopTriggerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *StopTriggerInput) SetName(v string) *StopTriggerInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopTriggerResponse -type StopTriggerOutput struct { - _ struct{} `type:"structure"` - - // The name of the trigger that was stopped. - Name *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s StopTriggerOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopTriggerOutput) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *StopTriggerOutput) SetName(v string) *StopTriggerOutput { - s.Name = &v - return s -} - -// Describes the physical storage of table data. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StorageDescriptor -type StorageDescriptor struct { - _ struct{} `type:"structure"` - - // A list of reducer grouping columns, clustering columns, and bucketing columns - // in the table. - BucketColumns []*string `type:"list"` - - // A list of the Columns in the table. - Columns []*Column `type:"list"` - - // True if the data in the table is compressed, or False if not. - Compressed *bool `type:"boolean"` - - // The input format: SequenceFileInputFormat (binary), or TextInputFormat, or - // a custom format. - InputFormat *string `type:"string"` - - // The physical location of the table. By default this takes the form of the - // warehouse location, followed by the database location in the warehouse, followed - // by the table name. - Location *string `type:"string"` - - // Must be specified if the table contains any dimension columns. - NumberOfBuckets *int64 `type:"integer"` - - // The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat, - // or a custom format. - OutputFormat *string `type:"string"` - - // User-supplied properties in key-value form. - Parameters map[string]*string `type:"map"` - - // Serialization/deserialization (SerDe) information. - SerdeInfo *SerDeInfo `type:"structure"` - - // Information about values that appear very frequently in a column (skewed - // values). - SkewedInfo *SkewedInfo `type:"structure"` - - // A list specifying the sort order of each bucket in the table. - SortColumns []*Order `type:"list"` - - // True if the table data is stored in subdirectories, or False if not. - StoredAsSubDirectories *bool `type:"boolean"` -} - -// String returns the string representation -func (s StorageDescriptor) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StorageDescriptor) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StorageDescriptor) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StorageDescriptor"} - if s.Columns != nil { - for i, v := range s.Columns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Columns", i), err.(request.ErrInvalidParams)) - } - } - } - if s.SerdeInfo != nil { - if err := s.SerdeInfo.Validate(); err != nil { - invalidParams.AddNested("SerdeInfo", err.(request.ErrInvalidParams)) - } - } - if s.SortColumns != nil { - for i, v := range s.SortColumns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SortColumns", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucketColumns sets the BucketColumns field's value. -func (s *StorageDescriptor) SetBucketColumns(v []*string) *StorageDescriptor { - s.BucketColumns = v - return s -} - -// SetColumns sets the Columns field's value. -func (s *StorageDescriptor) SetColumns(v []*Column) *StorageDescriptor { - s.Columns = v - return s -} - -// SetCompressed sets the Compressed field's value. -func (s *StorageDescriptor) SetCompressed(v bool) *StorageDescriptor { - s.Compressed = &v - return s -} - -// SetInputFormat sets the InputFormat field's value. -func (s *StorageDescriptor) SetInputFormat(v string) *StorageDescriptor { - s.InputFormat = &v - return s -} - -// SetLocation sets the Location field's value. -func (s *StorageDescriptor) SetLocation(v string) *StorageDescriptor { - s.Location = &v - return s -} - -// SetNumberOfBuckets sets the NumberOfBuckets field's value. -func (s *StorageDescriptor) SetNumberOfBuckets(v int64) *StorageDescriptor { - s.NumberOfBuckets = &v - return s -} - -// SetOutputFormat sets the OutputFormat field's value. -func (s *StorageDescriptor) SetOutputFormat(v string) *StorageDescriptor { - s.OutputFormat = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *StorageDescriptor) SetParameters(v map[string]*string) *StorageDescriptor { - s.Parameters = v - return s -} - -// SetSerdeInfo sets the SerdeInfo field's value. -func (s *StorageDescriptor) SetSerdeInfo(v *SerDeInfo) *StorageDescriptor { - s.SerdeInfo = v - return s -} - -// SetSkewedInfo sets the SkewedInfo field's value. -func (s *StorageDescriptor) SetSkewedInfo(v *SkewedInfo) *StorageDescriptor { - s.SkewedInfo = v - return s -} - -// SetSortColumns sets the SortColumns field's value. -func (s *StorageDescriptor) SetSortColumns(v []*Order) *StorageDescriptor { - s.SortColumns = v - return s -} - -// SetStoredAsSubDirectories sets the StoredAsSubDirectories field's value. -func (s *StorageDescriptor) SetStoredAsSubDirectories(v bool) *StorageDescriptor { - s.StoredAsSubDirectories = &v - return s -} - -// Represents a collection of related data organized in columns and rows. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Table -type Table struct { - _ struct{} `type:"structure"` - - // Time when the table definition was created in the Data Catalog. - CreateTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Person or entity who created the table. - CreatedBy *string `min:"1" type:"string"` - - // Name of the metadata database where the table metadata resides. - DatabaseName *string `min:"1" type:"string"` - - // Description of the table. - Description *string `type:"string"` - - // Last time the table was accessed. This is usually taken from HDFS, and may - // not be reliable. - LastAccessTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Last time column statistics were computed for this table. - LastAnalyzedTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Name of the table. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Owner of the table. - Owner *string `min:"1" type:"string"` - - // Properties associated with this table, as a list of key-value pairs. - Parameters map[string]*string `type:"map"` - - // A list of columns by which the table is partitioned. Only primitive types - // are supported as partition keys. - PartitionKeys []*Column `type:"list"` - - // Retention time for this table. - Retention *int64 `type:"integer"` - - // A storage descriptor containing information about the physical storage of - // this table. - StorageDescriptor *StorageDescriptor `type:"structure"` - - // The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.). - TableType *string `type:"string"` - - // Last time the table was updated. - UpdateTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // If the table is a view, the expanded text of the view; otherwise null. - ViewExpandedText *string `type:"string"` - - // If the table is a view, the original text of the view; otherwise null. - ViewOriginalText *string `type:"string"` -} - -// String returns the string representation -func (s Table) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Table) GoString() string { - return s.String() -} - -// SetCreateTime sets the CreateTime field's value. -func (s *Table) SetCreateTime(v time.Time) *Table { - s.CreateTime = &v - return s -} - -// SetCreatedBy sets the CreatedBy field's value. -func (s *Table) SetCreatedBy(v string) *Table { - s.CreatedBy = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *Table) SetDatabaseName(v string) *Table { - s.DatabaseName = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *Table) SetDescription(v string) *Table { - s.Description = &v - return s -} - -// SetLastAccessTime sets the LastAccessTime field's value. -func (s *Table) SetLastAccessTime(v time.Time) *Table { - s.LastAccessTime = &v - return s -} - -// SetLastAnalyzedTime sets the LastAnalyzedTime field's value. -func (s *Table) SetLastAnalyzedTime(v time.Time) *Table { - s.LastAnalyzedTime = &v - return s -} - -// SetName sets the Name field's value. -func (s *Table) SetName(v string) *Table { - s.Name = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *Table) SetOwner(v string) *Table { - s.Owner = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *Table) SetParameters(v map[string]*string) *Table { - s.Parameters = v - return s -} - -// SetPartitionKeys sets the PartitionKeys field's value. -func (s *Table) SetPartitionKeys(v []*Column) *Table { - s.PartitionKeys = v - return s -} - -// SetRetention sets the Retention field's value. -func (s *Table) SetRetention(v int64) *Table { - s.Retention = &v - return s -} - -// SetStorageDescriptor sets the StorageDescriptor field's value. -func (s *Table) SetStorageDescriptor(v *StorageDescriptor) *Table { - s.StorageDescriptor = v - return s -} - -// SetTableType sets the TableType field's value. -func (s *Table) SetTableType(v string) *Table { - s.TableType = &v - return s -} - -// SetUpdateTime sets the UpdateTime field's value. -func (s *Table) SetUpdateTime(v time.Time) *Table { - s.UpdateTime = &v - return s -} - -// SetViewExpandedText sets the ViewExpandedText field's value. -func (s *Table) SetViewExpandedText(v string) *Table { - s.ViewExpandedText = &v - return s -} - -// SetViewOriginalText sets the ViewOriginalText field's value. -func (s *Table) SetViewOriginalText(v string) *Table { - s.ViewOriginalText = &v - return s -} - -// An error record for table operations. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/TableError -type TableError struct { - _ struct{} `type:"structure"` - - // Detail about the error. - ErrorDetail *ErrorDetail `type:"structure"` - - // Name of the table. - TableName *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s TableError) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TableError) GoString() string { - return s.String() -} - -// SetErrorDetail sets the ErrorDetail field's value. -func (s *TableError) SetErrorDetail(v *ErrorDetail) *TableError { - s.ErrorDetail = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *TableError) SetTableName(v string) *TableError { - s.TableName = &v - return s -} - -// Structure used to create or update the table. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/TableInput -type TableInput struct { - _ struct{} `type:"structure"` - - // Description of the table. - Description *string `type:"string"` - - // Last time the table was accessed. - LastAccessTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Last time column statistics were computed for this table. - LastAnalyzedTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Name of the table. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Owner of the table. - Owner *string `min:"1" type:"string"` - - // Properties associated with this table, as a list of key-value pairs. - Parameters map[string]*string `type:"map"` - - // A list of columns by which the table is partitioned. Only primitive types - // are supported as partition keys. - PartitionKeys []*Column `type:"list"` - - // Retention time for this table. - Retention *int64 `type:"integer"` - - // A storage descriptor containing information about the physical storage of - // this table. - StorageDescriptor *StorageDescriptor `type:"structure"` - - // The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.). - TableType *string `type:"string"` - - // If the table is a view, the expanded text of the view; otherwise null. - ViewExpandedText *string `type:"string"` - - // If the table is a view, the original text of the view; otherwise null. - ViewOriginalText *string `type:"string"` -} - -// String returns the string representation -func (s TableInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TableInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TableInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Owner != nil && len(*s.Owner) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Owner", 1)) - } - if s.PartitionKeys != nil { - for i, v := range s.PartitionKeys { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PartitionKeys", i), err.(request.ErrInvalidParams)) - } - } - } - if s.StorageDescriptor != nil { - if err := s.StorageDescriptor.Validate(); err != nil { - invalidParams.AddNested("StorageDescriptor", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *TableInput) SetDescription(v string) *TableInput { - s.Description = &v - return s -} - -// SetLastAccessTime sets the LastAccessTime field's value. -func (s *TableInput) SetLastAccessTime(v time.Time) *TableInput { - s.LastAccessTime = &v - return s -} - -// SetLastAnalyzedTime sets the LastAnalyzedTime field's value. -func (s *TableInput) SetLastAnalyzedTime(v time.Time) *TableInput { - s.LastAnalyzedTime = &v - return s -} - -// SetName sets the Name field's value. -func (s *TableInput) SetName(v string) *TableInput { - s.Name = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *TableInput) SetOwner(v string) *TableInput { - s.Owner = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *TableInput) SetParameters(v map[string]*string) *TableInput { - s.Parameters = v - return s -} - -// SetPartitionKeys sets the PartitionKeys field's value. -func (s *TableInput) SetPartitionKeys(v []*Column) *TableInput { - s.PartitionKeys = v - return s -} - -// SetRetention sets the Retention field's value. -func (s *TableInput) SetRetention(v int64) *TableInput { - s.Retention = &v - return s -} - -// SetStorageDescriptor sets the StorageDescriptor field's value. -func (s *TableInput) SetStorageDescriptor(v *StorageDescriptor) *TableInput { - s.StorageDescriptor = v - return s -} - -// SetTableType sets the TableType field's value. -func (s *TableInput) SetTableType(v string) *TableInput { - s.TableType = &v - return s -} - -// SetViewExpandedText sets the ViewExpandedText field's value. -func (s *TableInput) SetViewExpandedText(v string) *TableInput { - s.ViewExpandedText = &v - return s -} - -// SetViewOriginalText sets the ViewOriginalText field's value. -func (s *TableInput) SetViewOriginalText(v string) *TableInput { - s.ViewOriginalText = &v - return s -} - -// Specifies a version of a table. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/TableVersion -type TableVersion struct { - _ struct{} `type:"structure"` - - // The table in question - Table *Table `type:"structure"` - - // The ID value that identifies this table version. - VersionId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s TableVersion) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TableVersion) GoString() string { - return s.String() -} - -// SetTable sets the Table field's value. -func (s *TableVersion) SetTable(v *Table) *TableVersion { - s.Table = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *TableVersion) SetVersionId(v string) *TableVersion { - s.VersionId = &v - return s -} - -// Information about a specific trigger. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Trigger -type Trigger struct { - _ struct{} `type:"structure"` - - // The actions initiated by this trigger. - Actions []*Action `type:"list"` - - // A description of this trigger. - Description *string `type:"string"` - - // The trigger ID. - Id *string `min:"1" type:"string"` - - // Name of the trigger. - Name *string `min:"1" type:"string"` - - // The predicate of this trigger. - Predicate *Predicate `type:"structure"` - - // A cron expression used to specify the schedule (see Time-Based Schedules - // for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, you would specify: - // cron(15 12 * * ? *). - Schedule *string `type:"string"` - - // The current state of the trigger. - State *string `type:"string" enum:"TriggerState"` - - // The type of trigger that this is. - Type *string `type:"string" enum:"TriggerType"` -} - -// String returns the string representation -func (s Trigger) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Trigger) GoString() string { - return s.String() -} - -// SetActions sets the Actions field's value. -func (s *Trigger) SetActions(v []*Action) *Trigger { - s.Actions = v - return s -} - -// SetDescription sets the Description field's value. -func (s *Trigger) SetDescription(v string) *Trigger { - s.Description = &v - return s -} - -// SetId sets the Id field's value. -func (s *Trigger) SetId(v string) *Trigger { - s.Id = &v - return s -} - -// SetName sets the Name field's value. -func (s *Trigger) SetName(v string) *Trigger { - s.Name = &v - return s -} - -// SetPredicate sets the Predicate field's value. -func (s *Trigger) SetPredicate(v *Predicate) *Trigger { - s.Predicate = v - return s -} - -// SetSchedule sets the Schedule field's value. -func (s *Trigger) SetSchedule(v string) *Trigger { - s.Schedule = &v - return s -} - -// SetState sets the State field's value. -func (s *Trigger) SetState(v string) *Trigger { - s.State = &v - return s -} - -// SetType sets the Type field's value. -func (s *Trigger) SetType(v string) *Trigger { - s.Type = &v - return s -} - -// A structure used to provide information used to updata a trigger. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/TriggerUpdate -type TriggerUpdate struct { - _ struct{} `type:"structure"` - - // The actions initiated by this trigger. - Actions []*Action `type:"list"` - - // A description of this trigger. - Description *string `type:"string"` - - // The name of the trigger. - Name *string `min:"1" type:"string"` - - // The predicate of this trigger, which defines when it will fire. - Predicate *Predicate `type:"structure"` - - // An updated cron expression used to specify the schedule (see Time-Based Schedules - // for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, you would specify: - // cron(15 12 * * ? *). - Schedule *string `type:"string"` -} - -// String returns the string representation -func (s TriggerUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TriggerUpdate) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TriggerUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TriggerUpdate"} - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Actions != nil { - for i, v := range s.Actions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Predicate != nil { - if err := s.Predicate.Validate(); err != nil { - invalidParams.AddNested("Predicate", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetActions sets the Actions field's value. -func (s *TriggerUpdate) SetActions(v []*Action) *TriggerUpdate { - s.Actions = v - return s -} - -// SetDescription sets the Description field's value. -func (s *TriggerUpdate) SetDescription(v string) *TriggerUpdate { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *TriggerUpdate) SetName(v string) *TriggerUpdate { - s.Name = &v - return s -} - -// SetPredicate sets the Predicate field's value. -func (s *TriggerUpdate) SetPredicate(v *Predicate) *TriggerUpdate { - s.Predicate = v - return s -} - -// SetSchedule sets the Schedule field's value. -func (s *TriggerUpdate) SetSchedule(v string) *TriggerUpdate { - s.Schedule = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateClassifierRequest -type UpdateClassifierInput struct { - _ struct{} `type:"structure"` - - // A GrokClassifier object with updated fields. - GrokClassifier *UpdateGrokClassifierRequest `type:"structure"` - - // An XMLClassifier object with updated fields. - XMLClassifier *UpdateXMLClassifierRequest `type:"structure"` -} - -// String returns the string representation -func (s UpdateClassifierInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateClassifierInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateClassifierInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateClassifierInput"} - if s.GrokClassifier != nil { - if err := s.GrokClassifier.Validate(); err != nil { - invalidParams.AddNested("GrokClassifier", err.(request.ErrInvalidParams)) - } - } - if s.XMLClassifier != nil { - if err := s.XMLClassifier.Validate(); err != nil { - invalidParams.AddNested("XMLClassifier", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrokClassifier sets the GrokClassifier field's value. -func (s *UpdateClassifierInput) SetGrokClassifier(v *UpdateGrokClassifierRequest) *UpdateClassifierInput { - s.GrokClassifier = v - return s -} - -// SetXMLClassifier sets the XMLClassifier field's value. -func (s *UpdateClassifierInput) SetXMLClassifier(v *UpdateXMLClassifierRequest) *UpdateClassifierInput { - s.XMLClassifier = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateClassifierResponse -type UpdateClassifierOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s UpdateClassifierOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateClassifierOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateConnectionRequest -type UpdateConnectionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog in which the connection resides. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // A ConnectionInput object that redefines the connection in question. - // - // ConnectionInput is a required field - ConnectionInput *ConnectionInput `type:"structure" required:"true"` - - // The name of the connection definition to update. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s UpdateConnectionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateConnectionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateConnectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateConnectionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.ConnectionInput == nil { - invalidParams.Add(request.NewErrParamRequired("ConnectionInput")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.ConnectionInput != nil { - if err := s.ConnectionInput.Validate(); err != nil { - invalidParams.AddNested("ConnectionInput", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *UpdateConnectionInput) SetCatalogId(v string) *UpdateConnectionInput { - s.CatalogId = &v - return s -} - -// SetConnectionInput sets the ConnectionInput field's value. -func (s *UpdateConnectionInput) SetConnectionInput(v *ConnectionInput) *UpdateConnectionInput { - s.ConnectionInput = v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateConnectionInput) SetName(v string) *UpdateConnectionInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateConnectionResponse -type UpdateConnectionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s UpdateConnectionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateConnectionOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawlerRequest -type UpdateCrawlerInput struct { - _ struct{} `type:"structure"` - - // A list of custom classifiers that the user has registered. By default, all - // classifiers are included in a crawl, but these custom classifiers always - // override the default classifiers for a given classification. - Classifiers []*string `type:"list"` - - // Crawler configuration information. This versioned JSON string allows users - // to specify aspects of a Crawler's behavior. - // - // You can use this field to force partitions to inherit metadata such as classification, - // input format, output format, serde information, and schema from their parent - // table, rather than detect this information separately for each partition. - // Use the following JSON string to specify that behavior: - Configuration *string `type:"string"` - - // The AWS Glue database where results are stored, such as: arn:aws:daylight:us-east-1::database/sometable/*. - DatabaseName *string `type:"string"` - - // A description of the new crawler. - Description *string `type:"string"` - - // Name of the new crawler. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The IAM role (or ARN of an IAM role) used by the new crawler to access customer - // resources. - Role *string `type:"string"` - - // A cron expression used to specify the schedule (see Time-Based Schedules - // for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, you would specify: - // cron(15 12 * * ? *). - Schedule *string `type:"string"` - - // Policy for the crawler's update and deletion behavior. - SchemaChangePolicy *SchemaChangePolicy `type:"structure"` - - // The table prefix used for catalog tables that are created. - TablePrefix *string `type:"string"` - - // A list of targets to crawl. - Targets *CrawlerTargets `type:"structure"` -} - -// String returns the string representation -func (s UpdateCrawlerInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateCrawlerInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateCrawlerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateCrawlerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClassifiers sets the Classifiers field's value. -func (s *UpdateCrawlerInput) SetClassifiers(v []*string) *UpdateCrawlerInput { - s.Classifiers = v - return s -} - -// SetConfiguration sets the Configuration field's value. -func (s *UpdateCrawlerInput) SetConfiguration(v string) *UpdateCrawlerInput { - s.Configuration = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *UpdateCrawlerInput) SetDatabaseName(v string) *UpdateCrawlerInput { - s.DatabaseName = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *UpdateCrawlerInput) SetDescription(v string) *UpdateCrawlerInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateCrawlerInput) SetName(v string) *UpdateCrawlerInput { - s.Name = &v - return s -} - -// SetRole sets the Role field's value. -func (s *UpdateCrawlerInput) SetRole(v string) *UpdateCrawlerInput { - s.Role = &v - return s -} - -// SetSchedule sets the Schedule field's value. -func (s *UpdateCrawlerInput) SetSchedule(v string) *UpdateCrawlerInput { - s.Schedule = &v - return s -} - -// SetSchemaChangePolicy sets the SchemaChangePolicy field's value. -func (s *UpdateCrawlerInput) SetSchemaChangePolicy(v *SchemaChangePolicy) *UpdateCrawlerInput { - s.SchemaChangePolicy = v - return s -} - -// SetTablePrefix sets the TablePrefix field's value. -func (s *UpdateCrawlerInput) SetTablePrefix(v string) *UpdateCrawlerInput { - s.TablePrefix = &v - return s -} - -// SetTargets sets the Targets field's value. -func (s *UpdateCrawlerInput) SetTargets(v *CrawlerTargets) *UpdateCrawlerInput { - s.Targets = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawlerResponse -type UpdateCrawlerOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s UpdateCrawlerOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateCrawlerOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawlerScheduleRequest -type UpdateCrawlerScheduleInput struct { - _ struct{} `type:"structure"` - - // Name of the crawler whose schedule to update. - // - // CrawlerName is a required field - CrawlerName *string `min:"1" type:"string" required:"true"` - - // The updated cron expression used to specify the schedule (see Time-Based - // Schedules for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, you would specify: - // cron(15 12 * * ? *). - Schedule *string `type:"string"` -} - -// String returns the string representation -func (s UpdateCrawlerScheduleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateCrawlerScheduleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateCrawlerScheduleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateCrawlerScheduleInput"} - if s.CrawlerName == nil { - invalidParams.Add(request.NewErrParamRequired("CrawlerName")) - } - if s.CrawlerName != nil && len(*s.CrawlerName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CrawlerName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCrawlerName sets the CrawlerName field's value. -func (s *UpdateCrawlerScheduleInput) SetCrawlerName(v string) *UpdateCrawlerScheduleInput { - s.CrawlerName = &v - return s -} - -// SetSchedule sets the Schedule field's value. -func (s *UpdateCrawlerScheduleInput) SetSchedule(v string) *UpdateCrawlerScheduleInput { - s.Schedule = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawlerScheduleResponse -type UpdateCrawlerScheduleOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s UpdateCrawlerScheduleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateCrawlerScheduleOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDatabaseRequest -type UpdateDatabaseInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog in which the metadata database resides. If none - // is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // A DatabaseInput object specifying the new definition of the metadata database - // in the catalog. - // - // DatabaseInput is a required field - DatabaseInput *DatabaseInput `type:"structure" required:"true"` - - // The name of the metadata database to update in the catalog. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s UpdateDatabaseInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateDatabaseInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateDatabaseInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateDatabaseInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseInput == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseInput")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.DatabaseInput != nil { - if err := s.DatabaseInput.Validate(); err != nil { - invalidParams.AddNested("DatabaseInput", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *UpdateDatabaseInput) SetCatalogId(v string) *UpdateDatabaseInput { - s.CatalogId = &v - return s -} - -// SetDatabaseInput sets the DatabaseInput field's value. -func (s *UpdateDatabaseInput) SetDatabaseInput(v *DatabaseInput) *UpdateDatabaseInput { - s.DatabaseInput = v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateDatabaseInput) SetName(v string) *UpdateDatabaseInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDatabaseResponse -type UpdateDatabaseOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s UpdateDatabaseOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateDatabaseOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDevEndpointRequest -type UpdateDevEndpointInput struct { - _ struct{} `type:"structure"` - - // Custom Python or Java libraries to be loaded in the DevEndpoint. - CustomLibraries *DevEndpointCustomLibraries `type:"structure"` - - // The name of the DevEndpoint to be updated. - // - // EndpointName is a required field - EndpointName *string `type:"string" required:"true"` - - // The public key for the DevEndpoint to use. - PublicKey *string `type:"string"` - - // True if the list of custom libraries to be loaded in the development endpoint - // needs to be updated, or False otherwise. - UpdateEtlLibraries *bool `type:"boolean"` -} - -// String returns the string representation -func (s UpdateDevEndpointInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateDevEndpointInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateDevEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateDevEndpointInput"} - if s.EndpointName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCustomLibraries sets the CustomLibraries field's value. -func (s *UpdateDevEndpointInput) SetCustomLibraries(v *DevEndpointCustomLibraries) *UpdateDevEndpointInput { - s.CustomLibraries = v - return s -} - -// SetEndpointName sets the EndpointName field's value. -func (s *UpdateDevEndpointInput) SetEndpointName(v string) *UpdateDevEndpointInput { - s.EndpointName = &v - return s -} - -// SetPublicKey sets the PublicKey field's value. -func (s *UpdateDevEndpointInput) SetPublicKey(v string) *UpdateDevEndpointInput { - s.PublicKey = &v - return s -} - -// SetUpdateEtlLibraries sets the UpdateEtlLibraries field's value. -func (s *UpdateDevEndpointInput) SetUpdateEtlLibraries(v bool) *UpdateDevEndpointInput { - s.UpdateEtlLibraries = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDevEndpointResponse -type UpdateDevEndpointOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s UpdateDevEndpointOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateDevEndpointOutput) GoString() string { - return s.String() -} - -// Specifies a grok classifier to update when passed to UpdateClassifier. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateGrokClassifierRequest -type UpdateGrokClassifierRequest struct { - _ struct{} `type:"structure"` - - // An identifier of the data format that the classifier matches, such as Twitter, - // JSON, Omniture logs, Amazon CloudWatch Logs, and so on. - Classification *string `type:"string"` - - // Optional custom grok patterns used by this classifier. - CustomPatterns *string `type:"string"` - - // The grok pattern used by this classifier. - GrokPattern *string `min:"1" type:"string"` - - // The name of the GrokClassifier. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s UpdateGrokClassifierRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateGrokClassifierRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateGrokClassifierRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateGrokClassifierRequest"} - if s.GrokPattern != nil && len(*s.GrokPattern) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrokPattern", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClassification sets the Classification field's value. -func (s *UpdateGrokClassifierRequest) SetClassification(v string) *UpdateGrokClassifierRequest { - s.Classification = &v - return s -} - -// SetCustomPatterns sets the CustomPatterns field's value. -func (s *UpdateGrokClassifierRequest) SetCustomPatterns(v string) *UpdateGrokClassifierRequest { - s.CustomPatterns = &v - return s -} - -// SetGrokPattern sets the GrokPattern field's value. -func (s *UpdateGrokClassifierRequest) SetGrokPattern(v string) *UpdateGrokClassifierRequest { - s.GrokPattern = &v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateGrokClassifierRequest) SetName(v string) *UpdateGrokClassifierRequest { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateJobRequest -type UpdateJobInput struct { - _ struct{} `type:"structure"` - - // Name of the job definition to update. - // - // JobName is a required field - JobName *string `min:"1" type:"string" required:"true"` - - // Specifies the values with which to update the job. - // - // JobUpdate is a required field - JobUpdate *JobUpdate `type:"structure" required:"true"` -} - -// String returns the string representation -func (s UpdateJobInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateJobInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateJobInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) - } - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) - } - if s.JobUpdate == nil { - invalidParams.Add(request.NewErrParamRequired("JobUpdate")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetJobName sets the JobName field's value. -func (s *UpdateJobInput) SetJobName(v string) *UpdateJobInput { - s.JobName = &v - return s -} - -// SetJobUpdate sets the JobUpdate field's value. -func (s *UpdateJobInput) SetJobUpdate(v *JobUpdate) *UpdateJobInput { - s.JobUpdate = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateJobResponse -type UpdateJobOutput struct { - _ struct{} `type:"structure"` - - // Returns the name of the updated job. - JobName *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s UpdateJobOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateJobOutput) GoString() string { - return s.String() -} - -// SetJobName sets the JobName field's value. -func (s *UpdateJobOutput) SetJobName(v string) *UpdateJobOutput { - s.JobName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdatePartitionRequest -type UpdatePartitionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the partition to be updated resides. If - // none is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database in which the table in question resides. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The new partition object to which to update the partition. - // - // PartitionInput is a required field - PartitionInput *PartitionInput `type:"structure" required:"true"` - - // A list of the values defining the partition. - // - // PartitionValueList is a required field - PartitionValueList []*string `type:"list" required:"true"` - - // The name of the table where the partition to be updated is located. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s UpdatePartitionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdatePartitionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdatePartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdatePartitionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.PartitionInput == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionInput")) - } - if s.PartitionValueList == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionValueList")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.PartitionInput != nil { - if err := s.PartitionInput.Validate(); err != nil { - invalidParams.AddNested("PartitionInput", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *UpdatePartitionInput) SetCatalogId(v string) *UpdatePartitionInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *UpdatePartitionInput) SetDatabaseName(v string) *UpdatePartitionInput { - s.DatabaseName = &v - return s -} - -// SetPartitionInput sets the PartitionInput field's value. -func (s *UpdatePartitionInput) SetPartitionInput(v *PartitionInput) *UpdatePartitionInput { - s.PartitionInput = v - return s -} - -// SetPartitionValueList sets the PartitionValueList field's value. -func (s *UpdatePartitionInput) SetPartitionValueList(v []*string) *UpdatePartitionInput { - s.PartitionValueList = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *UpdatePartitionInput) SetTableName(v string) *UpdatePartitionInput { - s.TableName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdatePartitionResponse -type UpdatePartitionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s UpdatePartitionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdatePartitionOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTableRequest -type UpdateTableInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the table resides. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database in which the table resides. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // An updated TableInput object to define the metadata table in the catalog. - // - // TableInput is a required field - TableInput *TableInput `type:"structure" required:"true"` -} - -// String returns the string representation -func (s UpdateTableInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateTableInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateTableInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.TableInput == nil { - invalidParams.Add(request.NewErrParamRequired("TableInput")) - } - if s.TableInput != nil { - if err := s.TableInput.Validate(); err != nil { - invalidParams.AddNested("TableInput", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *UpdateTableInput) SetCatalogId(v string) *UpdateTableInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *UpdateTableInput) SetDatabaseName(v string) *UpdateTableInput { - s.DatabaseName = &v - return s -} - -// SetTableInput sets the TableInput field's value. -func (s *UpdateTableInput) SetTableInput(v *TableInput) *UpdateTableInput { - s.TableInput = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTableResponse -type UpdateTableOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s UpdateTableOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateTableOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTriggerRequest -type UpdateTriggerInput struct { - _ struct{} `type:"structure"` - - // The name of the trigger to update. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The new values with which to update the trigger. - // - // TriggerUpdate is a required field - TriggerUpdate *TriggerUpdate `type:"structure" required:"true"` -} - -// String returns the string representation -func (s UpdateTriggerInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateTriggerInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateTriggerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateTriggerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.TriggerUpdate == nil { - invalidParams.Add(request.NewErrParamRequired("TriggerUpdate")) - } - if s.TriggerUpdate != nil { - if err := s.TriggerUpdate.Validate(); err != nil { - invalidParams.AddNested("TriggerUpdate", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *UpdateTriggerInput) SetName(v string) *UpdateTriggerInput { - s.Name = &v - return s -} - -// SetTriggerUpdate sets the TriggerUpdate field's value. -func (s *UpdateTriggerInput) SetTriggerUpdate(v *TriggerUpdate) *UpdateTriggerInput { - s.TriggerUpdate = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTriggerResponse -type UpdateTriggerOutput struct { - _ struct{} `type:"structure"` - - // The resulting trigger definition. - Trigger *Trigger `type:"structure"` -} - -// String returns the string representation -func (s UpdateTriggerOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateTriggerOutput) GoString() string { - return s.String() -} - -// SetTrigger sets the Trigger field's value. -func (s *UpdateTriggerOutput) SetTrigger(v *Trigger) *UpdateTriggerOutput { - s.Trigger = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateUserDefinedFunctionRequest -type UpdateUserDefinedFunctionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the function to be updated is located. If - // none is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database where the function to be updated is located. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // A FunctionInput object that re-defines the function in the Data Catalog. - // - // FunctionInput is a required field - FunctionInput *UserDefinedFunctionInput `type:"structure" required:"true"` - - // The name of the function. - // - // FunctionName is a required field - FunctionName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s UpdateUserDefinedFunctionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateUserDefinedFunctionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateUserDefinedFunctionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateUserDefinedFunctionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.FunctionInput == nil { - invalidParams.Add(request.NewErrParamRequired("FunctionInput")) - } - if s.FunctionName == nil { - invalidParams.Add(request.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) - } - if s.FunctionInput != nil { - if err := s.FunctionInput.Validate(); err != nil { - invalidParams.AddNested("FunctionInput", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *UpdateUserDefinedFunctionInput) SetCatalogId(v string) *UpdateUserDefinedFunctionInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *UpdateUserDefinedFunctionInput) SetDatabaseName(v string) *UpdateUserDefinedFunctionInput { - s.DatabaseName = &v - return s -} - -// SetFunctionInput sets the FunctionInput field's value. -func (s *UpdateUserDefinedFunctionInput) SetFunctionInput(v *UserDefinedFunctionInput) *UpdateUserDefinedFunctionInput { - s.FunctionInput = v - return s -} - -// SetFunctionName sets the FunctionName field's value. -func (s *UpdateUserDefinedFunctionInput) SetFunctionName(v string) *UpdateUserDefinedFunctionInput { - s.FunctionName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateUserDefinedFunctionResponse -type UpdateUserDefinedFunctionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s UpdateUserDefinedFunctionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateUserDefinedFunctionOutput) GoString() string { - return s.String() -} - -// Specifies an XML classifier to be updated. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateXMLClassifierRequest -type UpdateXMLClassifierRequest struct { - _ struct{} `type:"structure"` - - // An identifier of the data format that the classifier matches. - Classification *string `type:"string"` - - // The name of the classifier. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The XML tag designating the element that contains each record in an XML document - // being parsed. Note that this cannot be an empty element. It must contain - // child elements representing fields in the record. - RowTag *string `type:"string"` -} - -// String returns the string representation -func (s UpdateXMLClassifierRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateXMLClassifierRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateXMLClassifierRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateXMLClassifierRequest"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClassification sets the Classification field's value. -func (s *UpdateXMLClassifierRequest) SetClassification(v string) *UpdateXMLClassifierRequest { - s.Classification = &v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateXMLClassifierRequest) SetName(v string) *UpdateXMLClassifierRequest { - s.Name = &v - return s -} - -// SetRowTag sets the RowTag field's value. -func (s *UpdateXMLClassifierRequest) SetRowTag(v string) *UpdateXMLClassifierRequest { - s.RowTag = &v - return s -} - -// Represents the equivalent of a Hive user-defined function (UDF) definition. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UserDefinedFunction -type UserDefinedFunction struct { - _ struct{} `type:"structure"` - - // The Java class that contains the function code. - ClassName *string `min:"1" type:"string"` - - // The time at which the function was created. - CreateTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The name of the function. - FunctionName *string `min:"1" type:"string"` - - // The owner of the function. - OwnerName *string `min:"1" type:"string"` - - // The owner type. - OwnerType *string `type:"string" enum:"PrincipalType"` - - // The resource URIs for the function. - ResourceUris []*ResourceUri `type:"list"` -} - -// String returns the string representation -func (s UserDefinedFunction) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UserDefinedFunction) GoString() string { - return s.String() -} - -// SetClassName sets the ClassName field's value. -func (s *UserDefinedFunction) SetClassName(v string) *UserDefinedFunction { - s.ClassName = &v - return s -} - -// SetCreateTime sets the CreateTime field's value. -func (s *UserDefinedFunction) SetCreateTime(v time.Time) *UserDefinedFunction { - s.CreateTime = &v - return s -} - -// SetFunctionName sets the FunctionName field's value. -func (s *UserDefinedFunction) SetFunctionName(v string) *UserDefinedFunction { - s.FunctionName = &v - return s -} - -// SetOwnerName sets the OwnerName field's value. -func (s *UserDefinedFunction) SetOwnerName(v string) *UserDefinedFunction { - s.OwnerName = &v - return s -} - -// SetOwnerType sets the OwnerType field's value. -func (s *UserDefinedFunction) SetOwnerType(v string) *UserDefinedFunction { - s.OwnerType = &v - return s -} - -// SetResourceUris sets the ResourceUris field's value. -func (s *UserDefinedFunction) SetResourceUris(v []*ResourceUri) *UserDefinedFunction { - s.ResourceUris = v - return s -} - -// A structure used to create or updata a user-defined function. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UserDefinedFunctionInput -type UserDefinedFunctionInput struct { - _ struct{} `type:"structure"` - - // The Java class that contains the function code. - ClassName *string `min:"1" type:"string"` - - // The name of the function. - FunctionName *string `min:"1" type:"string"` - - // The owner of the function. - OwnerName *string `min:"1" type:"string"` - - // The owner type. - OwnerType *string `type:"string" enum:"PrincipalType"` - - // The resource URIs for the function. - ResourceUris []*ResourceUri `type:"list"` -} - -// String returns the string representation -func (s UserDefinedFunctionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UserDefinedFunctionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UserDefinedFunctionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UserDefinedFunctionInput"} - if s.ClassName != nil && len(*s.ClassName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ClassName", 1)) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) - } - if s.OwnerName != nil && len(*s.OwnerName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("OwnerName", 1)) - } - if s.ResourceUris != nil { - for i, v := range s.ResourceUris { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResourceUris", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClassName sets the ClassName field's value. -func (s *UserDefinedFunctionInput) SetClassName(v string) *UserDefinedFunctionInput { - s.ClassName = &v - return s -} - -// SetFunctionName sets the FunctionName field's value. -func (s *UserDefinedFunctionInput) SetFunctionName(v string) *UserDefinedFunctionInput { - s.FunctionName = &v - return s -} - -// SetOwnerName sets the OwnerName field's value. -func (s *UserDefinedFunctionInput) SetOwnerName(v string) *UserDefinedFunctionInput { - s.OwnerName = &v - return s -} - -// SetOwnerType sets the OwnerType field's value. -func (s *UserDefinedFunctionInput) SetOwnerType(v string) *UserDefinedFunctionInput { - s.OwnerType = &v - return s -} - -// SetResourceUris sets the ResourceUris field's value. -func (s *UserDefinedFunctionInput) SetResourceUris(v []*ResourceUri) *UserDefinedFunctionInput { - s.ResourceUris = v - return s -} - -// A classifier for XML content. -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/XMLClassifier -type XMLClassifier struct { - _ struct{} `type:"structure"` - - // An identifier of the data format that the classifier matches. - // - // Classification is a required field - Classification *string `type:"string" required:"true"` - - // The time this classifier was registered. - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The time this classifier was last updated. - LastUpdated *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The name of the classifier. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The XML tag designating the element that contains each record in an XML document - // being parsed. Note that this cannot be an empty element. It must contain - // child elements representing fields in the record. - RowTag *string `type:"string"` - - // The version of this classifier. - Version *int64 `type:"long"` -} - -// String returns the string representation -func (s XMLClassifier) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s XMLClassifier) GoString() string { - return s.String() -} - -// SetClassification sets the Classification field's value. -func (s *XMLClassifier) SetClassification(v string) *XMLClassifier { - s.Classification = &v - return s -} - -// SetCreationTime sets the CreationTime field's value. -func (s *XMLClassifier) SetCreationTime(v time.Time) *XMLClassifier { - s.CreationTime = &v - return s -} - -// SetLastUpdated sets the LastUpdated field's value. -func (s *XMLClassifier) SetLastUpdated(v time.Time) *XMLClassifier { - s.LastUpdated = &v - return s -} - -// SetName sets the Name field's value. -func (s *XMLClassifier) SetName(v string) *XMLClassifier { - s.Name = &v - return s -} - -// SetRowTag sets the RowTag field's value. -func (s *XMLClassifier) SetRowTag(v string) *XMLClassifier { - s.RowTag = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *XMLClassifier) SetVersion(v int64) *XMLClassifier { - s.Version = &v - return s -} - -const ( - // ConnectionPropertyKeyHost is a ConnectionPropertyKey enum value - ConnectionPropertyKeyHost = "HOST" - - // ConnectionPropertyKeyPort is a ConnectionPropertyKey enum value - ConnectionPropertyKeyPort = "PORT" - - // ConnectionPropertyKeyUsername is a ConnectionPropertyKey enum value - ConnectionPropertyKeyUsername = "USERNAME" - - // ConnectionPropertyKeyPassword is a ConnectionPropertyKey enum value - ConnectionPropertyKeyPassword = "PASSWORD" - - // ConnectionPropertyKeyJdbcDriverJarUri is a ConnectionPropertyKey enum value - ConnectionPropertyKeyJdbcDriverJarUri = "JDBC_DRIVER_JAR_URI" - - // ConnectionPropertyKeyJdbcDriverClassName is a ConnectionPropertyKey enum value - ConnectionPropertyKeyJdbcDriverClassName = "JDBC_DRIVER_CLASS_NAME" - - // ConnectionPropertyKeyJdbcEngine is a ConnectionPropertyKey enum value - ConnectionPropertyKeyJdbcEngine = "JDBC_ENGINE" - - // ConnectionPropertyKeyJdbcEngineVersion is a ConnectionPropertyKey enum value - ConnectionPropertyKeyJdbcEngineVersion = "JDBC_ENGINE_VERSION" - - // ConnectionPropertyKeyConfigFiles is a ConnectionPropertyKey enum value - ConnectionPropertyKeyConfigFiles = "CONFIG_FILES" - - // ConnectionPropertyKeyInstanceId is a ConnectionPropertyKey enum value - ConnectionPropertyKeyInstanceId = "INSTANCE_ID" - - // ConnectionPropertyKeyJdbcConnectionUrl is a ConnectionPropertyKey enum value - ConnectionPropertyKeyJdbcConnectionUrl = "JDBC_CONNECTION_URL" -) - -const ( - // ConnectionTypeJdbc is a ConnectionType enum value - ConnectionTypeJdbc = "JDBC" - - // ConnectionTypeSftp is a ConnectionType enum value - ConnectionTypeSftp = "SFTP" -) - -const ( - // CrawlerStateReady is a CrawlerState enum value - CrawlerStateReady = "READY" - - // CrawlerStateRunning is a CrawlerState enum value - CrawlerStateRunning = "RUNNING" - - // CrawlerStateStopping is a CrawlerState enum value - CrawlerStateStopping = "STOPPING" -) - -const ( - // DeleteBehaviorLog is a DeleteBehavior enum value - DeleteBehaviorLog = "LOG" - - // DeleteBehaviorDeleteFromDatabase is a DeleteBehavior enum value - DeleteBehaviorDeleteFromDatabase = "DELETE_FROM_DATABASE" - - // DeleteBehaviorDeprecateInDatabase is a DeleteBehavior enum value - DeleteBehaviorDeprecateInDatabase = "DEPRECATE_IN_DATABASE" -) - -const ( - // JobRunStateStarting is a JobRunState enum value - JobRunStateStarting = "STARTING" - - // JobRunStateRunning is a JobRunState enum value - JobRunStateRunning = "RUNNING" - - // JobRunStateStopping is a JobRunState enum value - JobRunStateStopping = "STOPPING" - - // JobRunStateStopped is a JobRunState enum value - JobRunStateStopped = "STOPPED" - - // JobRunStateSucceeded is a JobRunState enum value - JobRunStateSucceeded = "SUCCEEDED" - - // JobRunStateFailed is a JobRunState enum value - JobRunStateFailed = "FAILED" -) - -const ( - // LastCrawlStatusSucceeded is a LastCrawlStatus enum value - LastCrawlStatusSucceeded = "SUCCEEDED" - - // LastCrawlStatusCancelled is a LastCrawlStatus enum value - LastCrawlStatusCancelled = "CANCELLED" - - // LastCrawlStatusFailed is a LastCrawlStatus enum value - LastCrawlStatusFailed = "FAILED" -) - -const ( - // LogicalAnd is a Logical enum value - LogicalAnd = "AND" -) - -const ( - // LogicalOperatorEquals is a LogicalOperator enum value - LogicalOperatorEquals = "EQUALS" -) - -const ( - // PrincipalTypeUser is a PrincipalType enum value - PrincipalTypeUser = "USER" - - // PrincipalTypeRole is a PrincipalType enum value - PrincipalTypeRole = "ROLE" - - // PrincipalTypeGroup is a PrincipalType enum value - PrincipalTypeGroup = "GROUP" -) - -const ( - // ResourceTypeJar is a ResourceType enum value - ResourceTypeJar = "JAR" - - // ResourceTypeFile is a ResourceType enum value - ResourceTypeFile = "FILE" - - // ResourceTypeArchive is a ResourceType enum value - ResourceTypeArchive = "ARCHIVE" -) - -const ( - // ScheduleStateScheduled is a ScheduleState enum value - ScheduleStateScheduled = "SCHEDULED" - - // ScheduleStateNotScheduled is a ScheduleState enum value - ScheduleStateNotScheduled = "NOT_SCHEDULED" - - // ScheduleStateTransitioning is a ScheduleState enum value - ScheduleStateTransitioning = "TRANSITIONING" -) - -const ( - // TriggerStateCreating is a TriggerState enum value - TriggerStateCreating = "CREATING" - - // TriggerStateCreated is a TriggerState enum value - TriggerStateCreated = "CREATED" - - // TriggerStateActivating is a TriggerState enum value - TriggerStateActivating = "ACTIVATING" - - // TriggerStateActivated is a TriggerState enum value - TriggerStateActivated = "ACTIVATED" - - // TriggerStateDeactivating is a TriggerState enum value - TriggerStateDeactivating = "DEACTIVATING" - - // TriggerStateDeactivated is a TriggerState enum value - TriggerStateDeactivated = "DEACTIVATED" - - // TriggerStateDeleting is a TriggerState enum value - TriggerStateDeleting = "DELETING" - - // TriggerStateUpdating is a TriggerState enum value - TriggerStateUpdating = "UPDATING" -) - -const ( - // TriggerTypeScheduled is a TriggerType enum value - TriggerTypeScheduled = "SCHEDULED" - - // TriggerTypeConditional is a TriggerType enum value - TriggerTypeConditional = "CONDITIONAL" - - // TriggerTypeOnDemand is a TriggerType enum value - TriggerTypeOnDemand = "ON_DEMAND" -) - -const ( - // UpdateBehaviorLog is a UpdateBehavior enum value - UpdateBehaviorLog = "LOG" - - // UpdateBehaviorUpdateInDatabase is a UpdateBehavior enum value - UpdateBehaviorUpdateInDatabase = "UPDATE_IN_DATABASE" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/glue/doc.go b/vendor/github.com/aws/aws-sdk-go/service/glue/doc.go deleted file mode 100644 index c25c96ea440..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/glue/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package glue provides the client and types for making API -// requests to AWS Glue. -// -// Defines the public endpoint for the AWS Glue service. -// -// See https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31 for more information on this service. -// -// See glue package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/glue/ -// -// Using the Client -// -// To contact AWS Glue with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Glue client Glue for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/glue/#New -package glue diff --git a/vendor/github.com/aws/aws-sdk-go/service/glue/errors.go b/vendor/github.com/aws/aws-sdk-go/service/glue/errors.go deleted file mode 100644 index c54357306fc..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/glue/errors.go +++ /dev/null @@ -1,120 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package glue - -const ( - - // ErrCodeAccessDeniedException for service response error code - // "AccessDeniedException". - // - // Access to a resource was denied. - ErrCodeAccessDeniedException = "AccessDeniedException" - - // ErrCodeAlreadyExistsException for service response error code - // "AlreadyExistsException". - // - // A resource to be created or added already exists. - ErrCodeAlreadyExistsException = "AlreadyExistsException" - - // ErrCodeConcurrentModificationException for service response error code - // "ConcurrentModificationException". - // - // Two processes are trying to modify a resource simultaneously. - ErrCodeConcurrentModificationException = "ConcurrentModificationException" - - // ErrCodeConcurrentRunsExceededException for service response error code - // "ConcurrentRunsExceededException". - // - // Too many jobs are being run concurrently. - ErrCodeConcurrentRunsExceededException = "ConcurrentRunsExceededException" - - // ErrCodeCrawlerNotRunningException for service response error code - // "CrawlerNotRunningException". - // - // The specified crawler is not running. - ErrCodeCrawlerNotRunningException = "CrawlerNotRunningException" - - // ErrCodeCrawlerRunningException for service response error code - // "CrawlerRunningException". - // - // The operation cannot be performed because the crawler is already running. - ErrCodeCrawlerRunningException = "CrawlerRunningException" - - // ErrCodeCrawlerStoppingException for service response error code - // "CrawlerStoppingException". - // - // The specified crawler is stopping. - ErrCodeCrawlerStoppingException = "CrawlerStoppingException" - - // ErrCodeEntityNotFoundException for service response error code - // "EntityNotFoundException". - // - // A specified entity does not exist - ErrCodeEntityNotFoundException = "EntityNotFoundException" - - // ErrCodeIdempotentParameterMismatchException for service response error code - // "IdempotentParameterMismatchException". - // - // The same unique identifier was associated with two different records. - ErrCodeIdempotentParameterMismatchException = "IdempotentParameterMismatchException" - - // ErrCodeInternalServiceException for service response error code - // "InternalServiceException". - // - // An internal service error occurred. - ErrCodeInternalServiceException = "InternalServiceException" - - // ErrCodeInvalidInputException for service response error code - // "InvalidInputException". - // - // The input provided was not valid. - ErrCodeInvalidInputException = "InvalidInputException" - - // ErrCodeNoScheduleException for service response error code - // "NoScheduleException". - // - // There is no applicable schedule. - ErrCodeNoScheduleException = "NoScheduleException" - - // ErrCodeOperationTimeoutException for service response error code - // "OperationTimeoutException". - // - // The operation timed out. - ErrCodeOperationTimeoutException = "OperationTimeoutException" - - // ErrCodeResourceNumberLimitExceededException for service response error code - // "ResourceNumberLimitExceededException". - // - // A resource numerical limit was exceeded. - ErrCodeResourceNumberLimitExceededException = "ResourceNumberLimitExceededException" - - // ErrCodeSchedulerNotRunningException for service response error code - // "SchedulerNotRunningException". - // - // The specified scheduler is not running. - ErrCodeSchedulerNotRunningException = "SchedulerNotRunningException" - - // ErrCodeSchedulerRunningException for service response error code - // "SchedulerRunningException". - // - // The specified scheduler is already running. - ErrCodeSchedulerRunningException = "SchedulerRunningException" - - // ErrCodeSchedulerTransitioningException for service response error code - // "SchedulerTransitioningException". - // - // The specified scheduler is transitioning. - ErrCodeSchedulerTransitioningException = "SchedulerTransitioningException" - - // ErrCodeValidationException for service response error code - // "ValidationException". - // - // A value could not be validated. - ErrCodeValidationException = "ValidationException" - - // ErrCodeVersionMismatchException for service response error code - // "VersionMismatchException". - // - // There was a version conflict. - ErrCodeVersionMismatchException = "VersionMismatchException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/glue/service.go b/vendor/github.com/aws/aws-sdk-go/service/glue/service.go deleted file mode 100644 index 5e017698a2c..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/glue/service.go +++ /dev/null @@ -1,95 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package glue - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -// Glue provides the API operation methods for making requests to -// AWS Glue. See this package's package overview docs -// for details on the service. -// -// Glue methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type Glue struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "glue" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the Glue client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a Glue client from just a session. -// svc := glue.New(mySession) -// -// // Create a Glue client with additional configuration -// svc := glue.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *Glue { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Glue { - svc := &Glue{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2017-03-31", - JSONVersion: "1.1", - TargetPrefix: "AWSGlue", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a Glue operation and runs any -// custom request initialization. -func (c *Glue) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/api.go deleted file mode 100644 index 57d72bfdd33..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/api.go +++ /dev/null @@ -1,10328 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package lexmodelbuildingservice - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/restjson" -) - -const opCreateBotVersion = "CreateBotVersion" - -// CreateBotVersionRequest generates a "aws/request.Request" representing the -// client's request for the CreateBotVersion operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateBotVersion for more information on using the CreateBotVersion -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateBotVersionRequest method. -// req, resp := client.CreateBotVersionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/CreateBotVersion -func (c *LexModelBuildingService) CreateBotVersionRequest(input *CreateBotVersionInput) (req *request.Request, output *CreateBotVersionOutput) { - op := &request.Operation{ - Name: opCreateBotVersion, - HTTPMethod: "POST", - HTTPPath: "/bots/{name}/versions", - } - - if input == nil { - input = &CreateBotVersionInput{} - } - - output = &CreateBotVersionOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateBotVersion API operation for Amazon Lex Model Building Service. -// -// Creates a new version of the bot based on the $LATEST version. If the $LATEST -// version of this resource hasn't changed since you created the last version, -// Amazon Lex doesn't create a new version. It returns the last created version. -// -// You can update only the $LATEST version of the bot. You can't update the -// numbered versions that you create with the CreateBotVersion operation. -// -// When you create the first version of a bot, Amazon Lex sets the version to -// 1. Subsequent versions increment by 1. For more information, see versioning-intro. -// -// This operation requires permission for the lex:CreateBotVersion action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation CreateBotVersion for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodePreconditionFailedException "PreconditionFailedException" -// The checksum of the resource that you are trying to change does not match -// the checksum in the request. Check the resource's checksum and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/CreateBotVersion -func (c *LexModelBuildingService) CreateBotVersion(input *CreateBotVersionInput) (*CreateBotVersionOutput, error) { - req, out := c.CreateBotVersionRequest(input) - return out, req.Send() -} - -// CreateBotVersionWithContext is the same as CreateBotVersion with the addition of -// the ability to pass a context and additional request options. -// -// See CreateBotVersion for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) CreateBotVersionWithContext(ctx aws.Context, input *CreateBotVersionInput, opts ...request.Option) (*CreateBotVersionOutput, error) { - req, out := c.CreateBotVersionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateIntentVersion = "CreateIntentVersion" - -// CreateIntentVersionRequest generates a "aws/request.Request" representing the -// client's request for the CreateIntentVersion operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateIntentVersion for more information on using the CreateIntentVersion -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateIntentVersionRequest method. -// req, resp := client.CreateIntentVersionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/CreateIntentVersion -func (c *LexModelBuildingService) CreateIntentVersionRequest(input *CreateIntentVersionInput) (req *request.Request, output *CreateIntentVersionOutput) { - op := &request.Operation{ - Name: opCreateIntentVersion, - HTTPMethod: "POST", - HTTPPath: "/intents/{name}/versions", - } - - if input == nil { - input = &CreateIntentVersionInput{} - } - - output = &CreateIntentVersionOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateIntentVersion API operation for Amazon Lex Model Building Service. -// -// Creates a new version of an intent based on the $LATEST version of the intent. -// If the $LATEST version of this intent hasn't changed since you last updated -// it, Amazon Lex doesn't create a new version. It returns the last version -// you created. -// -// You can update only the $LATEST version of the intent. You can't update the -// numbered versions that you create with the CreateIntentVersion operation. -// -// When you create a version of an intent, Amazon Lex sets the version to 1. -// Subsequent versions increment by 1. For more information, see versioning-intro. -// -// This operation requires permissions to perform the lex:CreateIntentVersion -// action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation CreateIntentVersion for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodePreconditionFailedException "PreconditionFailedException" -// The checksum of the resource that you are trying to change does not match -// the checksum in the request. Check the resource's checksum and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/CreateIntentVersion -func (c *LexModelBuildingService) CreateIntentVersion(input *CreateIntentVersionInput) (*CreateIntentVersionOutput, error) { - req, out := c.CreateIntentVersionRequest(input) - return out, req.Send() -} - -// CreateIntentVersionWithContext is the same as CreateIntentVersion with the addition of -// the ability to pass a context and additional request options. -// -// See CreateIntentVersion for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) CreateIntentVersionWithContext(ctx aws.Context, input *CreateIntentVersionInput, opts ...request.Option) (*CreateIntentVersionOutput, error) { - req, out := c.CreateIntentVersionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateSlotTypeVersion = "CreateSlotTypeVersion" - -// CreateSlotTypeVersionRequest generates a "aws/request.Request" representing the -// client's request for the CreateSlotTypeVersion operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateSlotTypeVersion for more information on using the CreateSlotTypeVersion -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateSlotTypeVersionRequest method. -// req, resp := client.CreateSlotTypeVersionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/CreateSlotTypeVersion -func (c *LexModelBuildingService) CreateSlotTypeVersionRequest(input *CreateSlotTypeVersionInput) (req *request.Request, output *CreateSlotTypeVersionOutput) { - op := &request.Operation{ - Name: opCreateSlotTypeVersion, - HTTPMethod: "POST", - HTTPPath: "/slottypes/{name}/versions", - } - - if input == nil { - input = &CreateSlotTypeVersionInput{} - } - - output = &CreateSlotTypeVersionOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateSlotTypeVersion API operation for Amazon Lex Model Building Service. -// -// Creates a new version of a slot type based on the $LATEST version of the -// specified slot type. If the $LATEST version of this resource has not changed -// since the last version that you created, Amazon Lex doesn't create a new -// version. It returns the last version that you created. -// -// You can update only the $LATEST version of a slot type. You can't update -// the numbered versions that you create with the CreateSlotTypeVersion operation. -// -// When you create a version of a slot type, Amazon Lex sets the version to -// 1. Subsequent versions increment by 1. For more information, see versioning-intro. -// -// This operation requires permissions for the lex:CreateSlotTypeVersion action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation CreateSlotTypeVersion for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodePreconditionFailedException "PreconditionFailedException" -// The checksum of the resource that you are trying to change does not match -// the checksum in the request. Check the resource's checksum and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/CreateSlotTypeVersion -func (c *LexModelBuildingService) CreateSlotTypeVersion(input *CreateSlotTypeVersionInput) (*CreateSlotTypeVersionOutput, error) { - req, out := c.CreateSlotTypeVersionRequest(input) - return out, req.Send() -} - -// CreateSlotTypeVersionWithContext is the same as CreateSlotTypeVersion with the addition of -// the ability to pass a context and additional request options. -// -// See CreateSlotTypeVersion for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) CreateSlotTypeVersionWithContext(ctx aws.Context, input *CreateSlotTypeVersionInput, opts ...request.Option) (*CreateSlotTypeVersionOutput, error) { - req, out := c.CreateSlotTypeVersionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBot = "DeleteBot" - -// DeleteBotRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBot operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBot for more information on using the DeleteBot -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBotRequest method. -// req, resp := client.DeleteBotRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBot -func (c *LexModelBuildingService) DeleteBotRequest(input *DeleteBotInput) (req *request.Request, output *DeleteBotOutput) { - op := &request.Operation{ - Name: opDeleteBot, - HTTPMethod: "DELETE", - HTTPPath: "/bots/{name}", - } - - if input == nil { - input = &DeleteBotInput{} - } - - output = &DeleteBotOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBot API operation for Amazon Lex Model Building Service. -// -// Deletes all versions of the bot, including the $LATEST version. To delete -// a specific version of the bot, use the DeleteBotVersion operation. -// -// If a bot has an alias, you can't delete it. Instead, the DeleteBot operation -// returns a ResourceInUseException exception that includes a reference to the -// alias that refers to the bot. To remove the reference to the bot, delete -// the alias. If you get the same exception again, delete the referring alias -// until the DeleteBot operation is successful. -// -// This operation requires permissions for the lex:DeleteBot action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation DeleteBot for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeResourceInUseException "ResourceInUseException" -// The resource that you are attempting to delete is referred to by another -// resource. Use this information to remove references to the resource that -// you are trying to delete. -// -// The body of the exception contains a JSON object that describes the resource. -// -// { "resourceType": BOT | BOTALIAS | BOTCHANNEL | INTENT, -// -// "resourceReference": { -// -// "name": string, "version": string } } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBot -func (c *LexModelBuildingService) DeleteBot(input *DeleteBotInput) (*DeleteBotOutput, error) { - req, out := c.DeleteBotRequest(input) - return out, req.Send() -} - -// DeleteBotWithContext is the same as DeleteBot with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBot for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) DeleteBotWithContext(ctx aws.Context, input *DeleteBotInput, opts ...request.Option) (*DeleteBotOutput, error) { - req, out := c.DeleteBotRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBotAlias = "DeleteBotAlias" - -// DeleteBotAliasRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBotAlias operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBotAlias for more information on using the DeleteBotAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBotAliasRequest method. -// req, resp := client.DeleteBotAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBotAlias -func (c *LexModelBuildingService) DeleteBotAliasRequest(input *DeleteBotAliasInput) (req *request.Request, output *DeleteBotAliasOutput) { - op := &request.Operation{ - Name: opDeleteBotAlias, - HTTPMethod: "DELETE", - HTTPPath: "/bots/{botName}/aliases/{name}", - } - - if input == nil { - input = &DeleteBotAliasInput{} - } - - output = &DeleteBotAliasOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBotAlias API operation for Amazon Lex Model Building Service. -// -// Deletes an alias for the specified bot. -// -// You can't delete an alias that is used in the association between a bot and -// a messaging channel. If an alias is used in a channel association, the DeleteBot -// operation returns a ResourceInUseException exception that includes a reference -// to the channel association that refers to the bot. You can remove the reference -// to the alias by deleting the channel association. If you get the same exception -// again, delete the referring association until the DeleteBotAlias operation -// is successful. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation DeleteBotAlias for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeResourceInUseException "ResourceInUseException" -// The resource that you are attempting to delete is referred to by another -// resource. Use this information to remove references to the resource that -// you are trying to delete. -// -// The body of the exception contains a JSON object that describes the resource. -// -// { "resourceType": BOT | BOTALIAS | BOTCHANNEL | INTENT, -// -// "resourceReference": { -// -// "name": string, "version": string } } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBotAlias -func (c *LexModelBuildingService) DeleteBotAlias(input *DeleteBotAliasInput) (*DeleteBotAliasOutput, error) { - req, out := c.DeleteBotAliasRequest(input) - return out, req.Send() -} - -// DeleteBotAliasWithContext is the same as DeleteBotAlias with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBotAlias for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) DeleteBotAliasWithContext(ctx aws.Context, input *DeleteBotAliasInput, opts ...request.Option) (*DeleteBotAliasOutput, error) { - req, out := c.DeleteBotAliasRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBotChannelAssociation = "DeleteBotChannelAssociation" - -// DeleteBotChannelAssociationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBotChannelAssociation operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBotChannelAssociation for more information on using the DeleteBotChannelAssociation -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBotChannelAssociationRequest method. -// req, resp := client.DeleteBotChannelAssociationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBotChannelAssociation -func (c *LexModelBuildingService) DeleteBotChannelAssociationRequest(input *DeleteBotChannelAssociationInput) (req *request.Request, output *DeleteBotChannelAssociationOutput) { - op := &request.Operation{ - Name: opDeleteBotChannelAssociation, - HTTPMethod: "DELETE", - HTTPPath: "/bots/{botName}/aliases/{aliasName}/channels/{name}", - } - - if input == nil { - input = &DeleteBotChannelAssociationInput{} - } - - output = &DeleteBotChannelAssociationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBotChannelAssociation API operation for Amazon Lex Model Building Service. -// -// Deletes the association between an Amazon Lex bot and a messaging platform. -// -// This operation requires permission for the lex:DeleteBotChannelAssociation -// action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation DeleteBotChannelAssociation for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBotChannelAssociation -func (c *LexModelBuildingService) DeleteBotChannelAssociation(input *DeleteBotChannelAssociationInput) (*DeleteBotChannelAssociationOutput, error) { - req, out := c.DeleteBotChannelAssociationRequest(input) - return out, req.Send() -} - -// DeleteBotChannelAssociationWithContext is the same as DeleteBotChannelAssociation with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBotChannelAssociation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) DeleteBotChannelAssociationWithContext(ctx aws.Context, input *DeleteBotChannelAssociationInput, opts ...request.Option) (*DeleteBotChannelAssociationOutput, error) { - req, out := c.DeleteBotChannelAssociationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBotVersion = "DeleteBotVersion" - -// DeleteBotVersionRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBotVersion operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBotVersion for more information on using the DeleteBotVersion -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBotVersionRequest method. -// req, resp := client.DeleteBotVersionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBotVersion -func (c *LexModelBuildingService) DeleteBotVersionRequest(input *DeleteBotVersionInput) (req *request.Request, output *DeleteBotVersionOutput) { - op := &request.Operation{ - Name: opDeleteBotVersion, - HTTPMethod: "DELETE", - HTTPPath: "/bots/{name}/versions/{version}", - } - - if input == nil { - input = &DeleteBotVersionInput{} - } - - output = &DeleteBotVersionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBotVersion API operation for Amazon Lex Model Building Service. -// -// Deletes a specific version of a bot. To delete all versions of a bot, use -// the DeleteBot operation. -// -// This operation requires permissions for the lex:DeleteBotVersion action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation DeleteBotVersion for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeResourceInUseException "ResourceInUseException" -// The resource that you are attempting to delete is referred to by another -// resource. Use this information to remove references to the resource that -// you are trying to delete. -// -// The body of the exception contains a JSON object that describes the resource. -// -// { "resourceType": BOT | BOTALIAS | BOTCHANNEL | INTENT, -// -// "resourceReference": { -// -// "name": string, "version": string } } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBotVersion -func (c *LexModelBuildingService) DeleteBotVersion(input *DeleteBotVersionInput) (*DeleteBotVersionOutput, error) { - req, out := c.DeleteBotVersionRequest(input) - return out, req.Send() -} - -// DeleteBotVersionWithContext is the same as DeleteBotVersion with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBotVersion for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) DeleteBotVersionWithContext(ctx aws.Context, input *DeleteBotVersionInput, opts ...request.Option) (*DeleteBotVersionOutput, error) { - req, out := c.DeleteBotVersionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteIntent = "DeleteIntent" - -// DeleteIntentRequest generates a "aws/request.Request" representing the -// client's request for the DeleteIntent operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteIntent for more information on using the DeleteIntent -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteIntentRequest method. -// req, resp := client.DeleteIntentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteIntent -func (c *LexModelBuildingService) DeleteIntentRequest(input *DeleteIntentInput) (req *request.Request, output *DeleteIntentOutput) { - op := &request.Operation{ - Name: opDeleteIntent, - HTTPMethod: "DELETE", - HTTPPath: "/intents/{name}", - } - - if input == nil { - input = &DeleteIntentInput{} - } - - output = &DeleteIntentOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteIntent API operation for Amazon Lex Model Building Service. -// -// Deletes all versions of the intent, including the $LATEST version. To delete -// a specific version of the intent, use the DeleteIntentVersion operation. -// -// You can delete a version of an intent only if it is not referenced. To delete -// an intent that is referred to in one or more bots (see how-it-works), you -// must remove those references first. -// -// If you get the ResourceInUseException exception, it provides an example reference -// that shows where the intent is referenced. To remove the reference to the -// intent, either update the bot or delete it. If you get the same exception -// when you attempt to delete the intent again, repeat until the intent has -// no references and the call to DeleteIntent is successful. -// -// This operation requires permission for the lex:DeleteIntent action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation DeleteIntent for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeResourceInUseException "ResourceInUseException" -// The resource that you are attempting to delete is referred to by another -// resource. Use this information to remove references to the resource that -// you are trying to delete. -// -// The body of the exception contains a JSON object that describes the resource. -// -// { "resourceType": BOT | BOTALIAS | BOTCHANNEL | INTENT, -// -// "resourceReference": { -// -// "name": string, "version": string } } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteIntent -func (c *LexModelBuildingService) DeleteIntent(input *DeleteIntentInput) (*DeleteIntentOutput, error) { - req, out := c.DeleteIntentRequest(input) - return out, req.Send() -} - -// DeleteIntentWithContext is the same as DeleteIntent with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteIntent for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) DeleteIntentWithContext(ctx aws.Context, input *DeleteIntentInput, opts ...request.Option) (*DeleteIntentOutput, error) { - req, out := c.DeleteIntentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteIntentVersion = "DeleteIntentVersion" - -// DeleteIntentVersionRequest generates a "aws/request.Request" representing the -// client's request for the DeleteIntentVersion operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteIntentVersion for more information on using the DeleteIntentVersion -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteIntentVersionRequest method. -// req, resp := client.DeleteIntentVersionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteIntentVersion -func (c *LexModelBuildingService) DeleteIntentVersionRequest(input *DeleteIntentVersionInput) (req *request.Request, output *DeleteIntentVersionOutput) { - op := &request.Operation{ - Name: opDeleteIntentVersion, - HTTPMethod: "DELETE", - HTTPPath: "/intents/{name}/versions/{version}", - } - - if input == nil { - input = &DeleteIntentVersionInput{} - } - - output = &DeleteIntentVersionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteIntentVersion API operation for Amazon Lex Model Building Service. -// -// Deletes a specific version of an intent. To delete all versions of a intent, -// use the DeleteIntent operation. -// -// This operation requires permissions for the lex:DeleteIntentVersion action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation DeleteIntentVersion for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeResourceInUseException "ResourceInUseException" -// The resource that you are attempting to delete is referred to by another -// resource. Use this information to remove references to the resource that -// you are trying to delete. -// -// The body of the exception contains a JSON object that describes the resource. -// -// { "resourceType": BOT | BOTALIAS | BOTCHANNEL | INTENT, -// -// "resourceReference": { -// -// "name": string, "version": string } } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteIntentVersion -func (c *LexModelBuildingService) DeleteIntentVersion(input *DeleteIntentVersionInput) (*DeleteIntentVersionOutput, error) { - req, out := c.DeleteIntentVersionRequest(input) - return out, req.Send() -} - -// DeleteIntentVersionWithContext is the same as DeleteIntentVersion with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteIntentVersion for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) DeleteIntentVersionWithContext(ctx aws.Context, input *DeleteIntentVersionInput, opts ...request.Option) (*DeleteIntentVersionOutput, error) { - req, out := c.DeleteIntentVersionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteSlotType = "DeleteSlotType" - -// DeleteSlotTypeRequest generates a "aws/request.Request" representing the -// client's request for the DeleteSlotType operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteSlotType for more information on using the DeleteSlotType -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteSlotTypeRequest method. -// req, resp := client.DeleteSlotTypeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteSlotType -func (c *LexModelBuildingService) DeleteSlotTypeRequest(input *DeleteSlotTypeInput) (req *request.Request, output *DeleteSlotTypeOutput) { - op := &request.Operation{ - Name: opDeleteSlotType, - HTTPMethod: "DELETE", - HTTPPath: "/slottypes/{name}", - } - - if input == nil { - input = &DeleteSlotTypeInput{} - } - - output = &DeleteSlotTypeOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteSlotType API operation for Amazon Lex Model Building Service. -// -// Deletes all versions of the slot type, including the $LATEST version. To -// delete a specific version of the slot type, use the DeleteSlotTypeVersion -// operation. -// -// You can delete a version of a slot type only if it is not referenced. To -// delete a slot type that is referred to in one or more intents, you must remove -// those references first. -// -// If you get the ResourceInUseException exception, the exception provides an -// example reference that shows the intent where the slot type is referenced. -// To remove the reference to the slot type, either update the intent or delete -// it. If you get the same exception when you attempt to delete the slot type -// again, repeat until the slot type has no references and the DeleteSlotType -// call is successful. -// -// This operation requires permission for the lex:DeleteSlotType action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation DeleteSlotType for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeResourceInUseException "ResourceInUseException" -// The resource that you are attempting to delete is referred to by another -// resource. Use this information to remove references to the resource that -// you are trying to delete. -// -// The body of the exception contains a JSON object that describes the resource. -// -// { "resourceType": BOT | BOTALIAS | BOTCHANNEL | INTENT, -// -// "resourceReference": { -// -// "name": string, "version": string } } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteSlotType -func (c *LexModelBuildingService) DeleteSlotType(input *DeleteSlotTypeInput) (*DeleteSlotTypeOutput, error) { - req, out := c.DeleteSlotTypeRequest(input) - return out, req.Send() -} - -// DeleteSlotTypeWithContext is the same as DeleteSlotType with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteSlotType for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) DeleteSlotTypeWithContext(ctx aws.Context, input *DeleteSlotTypeInput, opts ...request.Option) (*DeleteSlotTypeOutput, error) { - req, out := c.DeleteSlotTypeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteSlotTypeVersion = "DeleteSlotTypeVersion" - -// DeleteSlotTypeVersionRequest generates a "aws/request.Request" representing the -// client's request for the DeleteSlotTypeVersion operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteSlotTypeVersion for more information on using the DeleteSlotTypeVersion -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteSlotTypeVersionRequest method. -// req, resp := client.DeleteSlotTypeVersionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteSlotTypeVersion -func (c *LexModelBuildingService) DeleteSlotTypeVersionRequest(input *DeleteSlotTypeVersionInput) (req *request.Request, output *DeleteSlotTypeVersionOutput) { - op := &request.Operation{ - Name: opDeleteSlotTypeVersion, - HTTPMethod: "DELETE", - HTTPPath: "/slottypes/{name}/version/{version}", - } - - if input == nil { - input = &DeleteSlotTypeVersionInput{} - } - - output = &DeleteSlotTypeVersionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteSlotTypeVersion API operation for Amazon Lex Model Building Service. -// -// Deletes a specific version of a slot type. To delete all versions of a slot -// type, use the DeleteSlotType operation. -// -// This operation requires permissions for the lex:DeleteSlotTypeVersion action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation DeleteSlotTypeVersion for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodeResourceInUseException "ResourceInUseException" -// The resource that you are attempting to delete is referred to by another -// resource. Use this information to remove references to the resource that -// you are trying to delete. -// -// The body of the exception contains a JSON object that describes the resource. -// -// { "resourceType": BOT | BOTALIAS | BOTCHANNEL | INTENT, -// -// "resourceReference": { -// -// "name": string, "version": string } } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteSlotTypeVersion -func (c *LexModelBuildingService) DeleteSlotTypeVersion(input *DeleteSlotTypeVersionInput) (*DeleteSlotTypeVersionOutput, error) { - req, out := c.DeleteSlotTypeVersionRequest(input) - return out, req.Send() -} - -// DeleteSlotTypeVersionWithContext is the same as DeleteSlotTypeVersion with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteSlotTypeVersion for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) DeleteSlotTypeVersionWithContext(ctx aws.Context, input *DeleteSlotTypeVersionInput, opts ...request.Option) (*DeleteSlotTypeVersionOutput, error) { - req, out := c.DeleteSlotTypeVersionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteUtterances = "DeleteUtterances" - -// DeleteUtterancesRequest generates a "aws/request.Request" representing the -// client's request for the DeleteUtterances operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteUtterances for more information on using the DeleteUtterances -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteUtterancesRequest method. -// req, resp := client.DeleteUtterancesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteUtterances -func (c *LexModelBuildingService) DeleteUtterancesRequest(input *DeleteUtterancesInput) (req *request.Request, output *DeleteUtterancesOutput) { - op := &request.Operation{ - Name: opDeleteUtterances, - HTTPMethod: "DELETE", - HTTPPath: "/bots/{botName}/utterances/{userId}", - } - - if input == nil { - input = &DeleteUtterancesInput{} - } - - output = &DeleteUtterancesOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteUtterances API operation for Amazon Lex Model Building Service. -// -// Deletes stored utterances. -// -// Amazon Lex stores the utterances that users send to your bot unless the childDirected -// field in the bot is set to true. Utterances are stored for 15 days for use -// with the GetUtterancesView operation, and then stored indefinitely for use -// in improving the ability of your bot to respond to user input. -// -// Use the DeleteStoredUtterances operation to manually delete stored utterances -// for a specific user. -// -// This operation requires permissions for the lex:DeleteUtterances action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation DeleteUtterances for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteUtterances -func (c *LexModelBuildingService) DeleteUtterances(input *DeleteUtterancesInput) (*DeleteUtterancesOutput, error) { - req, out := c.DeleteUtterancesRequest(input) - return out, req.Send() -} - -// DeleteUtterancesWithContext is the same as DeleteUtterances with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteUtterances for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) DeleteUtterancesWithContext(ctx aws.Context, input *DeleteUtterancesInput, opts ...request.Option) (*DeleteUtterancesOutput, error) { - req, out := c.DeleteUtterancesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBot = "GetBot" - -// GetBotRequest generates a "aws/request.Request" representing the -// client's request for the GetBot operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBot for more information on using the GetBot -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBotRequest method. -// req, resp := client.GetBotRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBot -func (c *LexModelBuildingService) GetBotRequest(input *GetBotInput) (req *request.Request, output *GetBotOutput) { - op := &request.Operation{ - Name: opGetBot, - HTTPMethod: "GET", - HTTPPath: "/bots/{name}/versions/{versionoralias}", - } - - if input == nil { - input = &GetBotInput{} - } - - output = &GetBotOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBot API operation for Amazon Lex Model Building Service. -// -// Returns metadata information for a specific bot. You must provide the bot -// name and the bot version or alias. -// -// This operation requires permissions for the lex:GetBot action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetBot for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBot -func (c *LexModelBuildingService) GetBot(input *GetBotInput) (*GetBotOutput, error) { - req, out := c.GetBotRequest(input) - return out, req.Send() -} - -// GetBotWithContext is the same as GetBot with the addition of -// the ability to pass a context and additional request options. -// -// See GetBot for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBotWithContext(ctx aws.Context, input *GetBotInput, opts ...request.Option) (*GetBotOutput, error) { - req, out := c.GetBotRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBotAlias = "GetBotAlias" - -// GetBotAliasRequest generates a "aws/request.Request" representing the -// client's request for the GetBotAlias operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBotAlias for more information on using the GetBotAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBotAliasRequest method. -// req, resp := client.GetBotAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotAlias -func (c *LexModelBuildingService) GetBotAliasRequest(input *GetBotAliasInput) (req *request.Request, output *GetBotAliasOutput) { - op := &request.Operation{ - Name: opGetBotAlias, - HTTPMethod: "GET", - HTTPPath: "/bots/{botName}/aliases/{name}", - } - - if input == nil { - input = &GetBotAliasInput{} - } - - output = &GetBotAliasOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBotAlias API operation for Amazon Lex Model Building Service. -// -// Returns information about an Amazon Lex bot alias. For more information about -// aliases, see versioning-aliases. -// -// This operation requires permissions for the lex:GetBotAlias action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetBotAlias for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotAlias -func (c *LexModelBuildingService) GetBotAlias(input *GetBotAliasInput) (*GetBotAliasOutput, error) { - req, out := c.GetBotAliasRequest(input) - return out, req.Send() -} - -// GetBotAliasWithContext is the same as GetBotAlias with the addition of -// the ability to pass a context and additional request options. -// -// See GetBotAlias for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBotAliasWithContext(ctx aws.Context, input *GetBotAliasInput, opts ...request.Option) (*GetBotAliasOutput, error) { - req, out := c.GetBotAliasRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBotAliases = "GetBotAliases" - -// GetBotAliasesRequest generates a "aws/request.Request" representing the -// client's request for the GetBotAliases operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBotAliases for more information on using the GetBotAliases -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBotAliasesRequest method. -// req, resp := client.GetBotAliasesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotAliases -func (c *LexModelBuildingService) GetBotAliasesRequest(input *GetBotAliasesInput) (req *request.Request, output *GetBotAliasesOutput) { - op := &request.Operation{ - Name: opGetBotAliases, - HTTPMethod: "GET", - HTTPPath: "/bots/{botName}/aliases/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetBotAliasesInput{} - } - - output = &GetBotAliasesOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBotAliases API operation for Amazon Lex Model Building Service. -// -// Returns a list of aliases for a specified Amazon Lex bot. -// -// This operation requires permissions for the lex:GetBotAliases action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetBotAliases for usage and error information. -// -// Returned Error Codes: -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotAliases -func (c *LexModelBuildingService) GetBotAliases(input *GetBotAliasesInput) (*GetBotAliasesOutput, error) { - req, out := c.GetBotAliasesRequest(input) - return out, req.Send() -} - -// GetBotAliasesWithContext is the same as GetBotAliases with the addition of -// the ability to pass a context and additional request options. -// -// See GetBotAliases for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBotAliasesWithContext(ctx aws.Context, input *GetBotAliasesInput, opts ...request.Option) (*GetBotAliasesOutput, error) { - req, out := c.GetBotAliasesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetBotAliasesPages iterates over the pages of a GetBotAliases operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetBotAliases method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetBotAliases operation. -// pageNum := 0 -// err := client.GetBotAliasesPages(params, -// func(page *GetBotAliasesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *LexModelBuildingService) GetBotAliasesPages(input *GetBotAliasesInput, fn func(*GetBotAliasesOutput, bool) bool) error { - return c.GetBotAliasesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetBotAliasesPagesWithContext same as GetBotAliasesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBotAliasesPagesWithContext(ctx aws.Context, input *GetBotAliasesInput, fn func(*GetBotAliasesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetBotAliasesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetBotAliasesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetBotAliasesOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetBotChannelAssociation = "GetBotChannelAssociation" - -// GetBotChannelAssociationRequest generates a "aws/request.Request" representing the -// client's request for the GetBotChannelAssociation operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBotChannelAssociation for more information on using the GetBotChannelAssociation -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBotChannelAssociationRequest method. -// req, resp := client.GetBotChannelAssociationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotChannelAssociation -func (c *LexModelBuildingService) GetBotChannelAssociationRequest(input *GetBotChannelAssociationInput) (req *request.Request, output *GetBotChannelAssociationOutput) { - op := &request.Operation{ - Name: opGetBotChannelAssociation, - HTTPMethod: "GET", - HTTPPath: "/bots/{botName}/aliases/{aliasName}/channels/{name}", - } - - if input == nil { - input = &GetBotChannelAssociationInput{} - } - - output = &GetBotChannelAssociationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBotChannelAssociation API operation for Amazon Lex Model Building Service. -// -// Returns information about the association between an Amazon Lex bot and a -// messaging platform. -// -// This operation requires permissions for the lex:GetBotChannelAssociation -// action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetBotChannelAssociation for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotChannelAssociation -func (c *LexModelBuildingService) GetBotChannelAssociation(input *GetBotChannelAssociationInput) (*GetBotChannelAssociationOutput, error) { - req, out := c.GetBotChannelAssociationRequest(input) - return out, req.Send() -} - -// GetBotChannelAssociationWithContext is the same as GetBotChannelAssociation with the addition of -// the ability to pass a context and additional request options. -// -// See GetBotChannelAssociation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBotChannelAssociationWithContext(ctx aws.Context, input *GetBotChannelAssociationInput, opts ...request.Option) (*GetBotChannelAssociationOutput, error) { - req, out := c.GetBotChannelAssociationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBotChannelAssociations = "GetBotChannelAssociations" - -// GetBotChannelAssociationsRequest generates a "aws/request.Request" representing the -// client's request for the GetBotChannelAssociations operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBotChannelAssociations for more information on using the GetBotChannelAssociations -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBotChannelAssociationsRequest method. -// req, resp := client.GetBotChannelAssociationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotChannelAssociations -func (c *LexModelBuildingService) GetBotChannelAssociationsRequest(input *GetBotChannelAssociationsInput) (req *request.Request, output *GetBotChannelAssociationsOutput) { - op := &request.Operation{ - Name: opGetBotChannelAssociations, - HTTPMethod: "GET", - HTTPPath: "/bots/{botName}/aliases/{aliasName}/channels/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetBotChannelAssociationsInput{} - } - - output = &GetBotChannelAssociationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBotChannelAssociations API operation for Amazon Lex Model Building Service. -// -// Returns a list of all of the channels associated with the specified bot. -// -// The GetBotChannelAssociations operation requires permissions for the lex:GetBotChannelAssociations -// action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetBotChannelAssociations for usage and error information. -// -// Returned Error Codes: -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotChannelAssociations -func (c *LexModelBuildingService) GetBotChannelAssociations(input *GetBotChannelAssociationsInput) (*GetBotChannelAssociationsOutput, error) { - req, out := c.GetBotChannelAssociationsRequest(input) - return out, req.Send() -} - -// GetBotChannelAssociationsWithContext is the same as GetBotChannelAssociations with the addition of -// the ability to pass a context and additional request options. -// -// See GetBotChannelAssociations for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBotChannelAssociationsWithContext(ctx aws.Context, input *GetBotChannelAssociationsInput, opts ...request.Option) (*GetBotChannelAssociationsOutput, error) { - req, out := c.GetBotChannelAssociationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetBotChannelAssociationsPages iterates over the pages of a GetBotChannelAssociations operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetBotChannelAssociations method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetBotChannelAssociations operation. -// pageNum := 0 -// err := client.GetBotChannelAssociationsPages(params, -// func(page *GetBotChannelAssociationsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *LexModelBuildingService) GetBotChannelAssociationsPages(input *GetBotChannelAssociationsInput, fn func(*GetBotChannelAssociationsOutput, bool) bool) error { - return c.GetBotChannelAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetBotChannelAssociationsPagesWithContext same as GetBotChannelAssociationsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBotChannelAssociationsPagesWithContext(ctx aws.Context, input *GetBotChannelAssociationsInput, fn func(*GetBotChannelAssociationsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetBotChannelAssociationsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetBotChannelAssociationsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetBotChannelAssociationsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetBotVersions = "GetBotVersions" - -// GetBotVersionsRequest generates a "aws/request.Request" representing the -// client's request for the GetBotVersions operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBotVersions for more information on using the GetBotVersions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBotVersionsRequest method. -// req, resp := client.GetBotVersionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotVersions -func (c *LexModelBuildingService) GetBotVersionsRequest(input *GetBotVersionsInput) (req *request.Request, output *GetBotVersionsOutput) { - op := &request.Operation{ - Name: opGetBotVersions, - HTTPMethod: "GET", - HTTPPath: "/bots/{name}/versions/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetBotVersionsInput{} - } - - output = &GetBotVersionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBotVersions API operation for Amazon Lex Model Building Service. -// -// Gets information about all of the versions of a bot. -// -// The GetBotVersions operation returns a BotMetadata object for each version -// of a bot. For example, if a bot has three numbered versions, the GetBotVersions -// operation returns four BotMetadata objects in the response, one for each -// numbered version and one for the $LATEST version. -// -// The GetBotVersions operation always returns at least one version, the $LATEST -// version. -// -// This operation requires permissions for the lex:GetBotVersions action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetBotVersions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotVersions -func (c *LexModelBuildingService) GetBotVersions(input *GetBotVersionsInput) (*GetBotVersionsOutput, error) { - req, out := c.GetBotVersionsRequest(input) - return out, req.Send() -} - -// GetBotVersionsWithContext is the same as GetBotVersions with the addition of -// the ability to pass a context and additional request options. -// -// See GetBotVersions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBotVersionsWithContext(ctx aws.Context, input *GetBotVersionsInput, opts ...request.Option) (*GetBotVersionsOutput, error) { - req, out := c.GetBotVersionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetBotVersionsPages iterates over the pages of a GetBotVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetBotVersions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetBotVersions operation. -// pageNum := 0 -// err := client.GetBotVersionsPages(params, -// func(page *GetBotVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *LexModelBuildingService) GetBotVersionsPages(input *GetBotVersionsInput, fn func(*GetBotVersionsOutput, bool) bool) error { - return c.GetBotVersionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetBotVersionsPagesWithContext same as GetBotVersionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBotVersionsPagesWithContext(ctx aws.Context, input *GetBotVersionsInput, fn func(*GetBotVersionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetBotVersionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetBotVersionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetBotVersionsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetBots = "GetBots" - -// GetBotsRequest generates a "aws/request.Request" representing the -// client's request for the GetBots operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBots for more information on using the GetBots -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBotsRequest method. -// req, resp := client.GetBotsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBots -func (c *LexModelBuildingService) GetBotsRequest(input *GetBotsInput) (req *request.Request, output *GetBotsOutput) { - op := &request.Operation{ - Name: opGetBots, - HTTPMethod: "GET", - HTTPPath: "/bots/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetBotsInput{} - } - - output = &GetBotsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBots API operation for Amazon Lex Model Building Service. -// -// Returns bot information as follows: -// -// * If you provide the nameContains field, the response includes information -// for the $LATEST version of all bots whose name contains the specified -// string. -// -// * If you don't specify the nameContains field, the operation returns information -// about the $LATEST version of all of your bots. -// -// This operation requires permission for the lex:GetBots action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetBots for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBots -func (c *LexModelBuildingService) GetBots(input *GetBotsInput) (*GetBotsOutput, error) { - req, out := c.GetBotsRequest(input) - return out, req.Send() -} - -// GetBotsWithContext is the same as GetBots with the addition of -// the ability to pass a context and additional request options. -// -// See GetBots for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBotsWithContext(ctx aws.Context, input *GetBotsInput, opts ...request.Option) (*GetBotsOutput, error) { - req, out := c.GetBotsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetBotsPages iterates over the pages of a GetBots operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetBots method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetBots operation. -// pageNum := 0 -// err := client.GetBotsPages(params, -// func(page *GetBotsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *LexModelBuildingService) GetBotsPages(input *GetBotsInput, fn func(*GetBotsOutput, bool) bool) error { - return c.GetBotsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetBotsPagesWithContext same as GetBotsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBotsPagesWithContext(ctx aws.Context, input *GetBotsInput, fn func(*GetBotsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetBotsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetBotsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetBotsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetBuiltinIntent = "GetBuiltinIntent" - -// GetBuiltinIntentRequest generates a "aws/request.Request" representing the -// client's request for the GetBuiltinIntent operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBuiltinIntent for more information on using the GetBuiltinIntent -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBuiltinIntentRequest method. -// req, resp := client.GetBuiltinIntentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBuiltinIntent -func (c *LexModelBuildingService) GetBuiltinIntentRequest(input *GetBuiltinIntentInput) (req *request.Request, output *GetBuiltinIntentOutput) { - op := &request.Operation{ - Name: opGetBuiltinIntent, - HTTPMethod: "GET", - HTTPPath: "/builtins/intents/{signature}", - } - - if input == nil { - input = &GetBuiltinIntentInput{} - } - - output = &GetBuiltinIntentOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBuiltinIntent API operation for Amazon Lex Model Building Service. -// -// Returns information about a built-in intent. -// -// This operation requires permission for the lex:GetBuiltinIntent action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetBuiltinIntent for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBuiltinIntent -func (c *LexModelBuildingService) GetBuiltinIntent(input *GetBuiltinIntentInput) (*GetBuiltinIntentOutput, error) { - req, out := c.GetBuiltinIntentRequest(input) - return out, req.Send() -} - -// GetBuiltinIntentWithContext is the same as GetBuiltinIntent with the addition of -// the ability to pass a context and additional request options. -// -// See GetBuiltinIntent for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBuiltinIntentWithContext(ctx aws.Context, input *GetBuiltinIntentInput, opts ...request.Option) (*GetBuiltinIntentOutput, error) { - req, out := c.GetBuiltinIntentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBuiltinIntents = "GetBuiltinIntents" - -// GetBuiltinIntentsRequest generates a "aws/request.Request" representing the -// client's request for the GetBuiltinIntents operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBuiltinIntents for more information on using the GetBuiltinIntents -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBuiltinIntentsRequest method. -// req, resp := client.GetBuiltinIntentsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBuiltinIntents -func (c *LexModelBuildingService) GetBuiltinIntentsRequest(input *GetBuiltinIntentsInput) (req *request.Request, output *GetBuiltinIntentsOutput) { - op := &request.Operation{ - Name: opGetBuiltinIntents, - HTTPMethod: "GET", - HTTPPath: "/builtins/intents/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetBuiltinIntentsInput{} - } - - output = &GetBuiltinIntentsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBuiltinIntents API operation for Amazon Lex Model Building Service. -// -// Gets a list of built-in intents that meet the specified criteria. -// -// This operation requires permission for the lex:GetBuiltinIntents action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetBuiltinIntents for usage and error information. -// -// Returned Error Codes: -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBuiltinIntents -func (c *LexModelBuildingService) GetBuiltinIntents(input *GetBuiltinIntentsInput) (*GetBuiltinIntentsOutput, error) { - req, out := c.GetBuiltinIntentsRequest(input) - return out, req.Send() -} - -// GetBuiltinIntentsWithContext is the same as GetBuiltinIntents with the addition of -// the ability to pass a context and additional request options. -// -// See GetBuiltinIntents for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBuiltinIntentsWithContext(ctx aws.Context, input *GetBuiltinIntentsInput, opts ...request.Option) (*GetBuiltinIntentsOutput, error) { - req, out := c.GetBuiltinIntentsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetBuiltinIntentsPages iterates over the pages of a GetBuiltinIntents operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetBuiltinIntents method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetBuiltinIntents operation. -// pageNum := 0 -// err := client.GetBuiltinIntentsPages(params, -// func(page *GetBuiltinIntentsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *LexModelBuildingService) GetBuiltinIntentsPages(input *GetBuiltinIntentsInput, fn func(*GetBuiltinIntentsOutput, bool) bool) error { - return c.GetBuiltinIntentsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetBuiltinIntentsPagesWithContext same as GetBuiltinIntentsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBuiltinIntentsPagesWithContext(ctx aws.Context, input *GetBuiltinIntentsInput, fn func(*GetBuiltinIntentsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetBuiltinIntentsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetBuiltinIntentsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetBuiltinIntentsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetBuiltinSlotTypes = "GetBuiltinSlotTypes" - -// GetBuiltinSlotTypesRequest generates a "aws/request.Request" representing the -// client's request for the GetBuiltinSlotTypes operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBuiltinSlotTypes for more information on using the GetBuiltinSlotTypes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBuiltinSlotTypesRequest method. -// req, resp := client.GetBuiltinSlotTypesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBuiltinSlotTypes -func (c *LexModelBuildingService) GetBuiltinSlotTypesRequest(input *GetBuiltinSlotTypesInput) (req *request.Request, output *GetBuiltinSlotTypesOutput) { - op := &request.Operation{ - Name: opGetBuiltinSlotTypes, - HTTPMethod: "GET", - HTTPPath: "/builtins/slottypes/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetBuiltinSlotTypesInput{} - } - - output = &GetBuiltinSlotTypesOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBuiltinSlotTypes API operation for Amazon Lex Model Building Service. -// -// Gets a list of built-in slot types that meet the specified criteria. -// -// For a list of built-in slot types, see Slot Type Reference (https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/slot-type-reference) -// in the Alexa Skills Kit. -// -// This operation requires permission for the lex:GetBuiltInSlotTypes action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetBuiltinSlotTypes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBuiltinSlotTypes -func (c *LexModelBuildingService) GetBuiltinSlotTypes(input *GetBuiltinSlotTypesInput) (*GetBuiltinSlotTypesOutput, error) { - req, out := c.GetBuiltinSlotTypesRequest(input) - return out, req.Send() -} - -// GetBuiltinSlotTypesWithContext is the same as GetBuiltinSlotTypes with the addition of -// the ability to pass a context and additional request options. -// -// See GetBuiltinSlotTypes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBuiltinSlotTypesWithContext(ctx aws.Context, input *GetBuiltinSlotTypesInput, opts ...request.Option) (*GetBuiltinSlotTypesOutput, error) { - req, out := c.GetBuiltinSlotTypesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetBuiltinSlotTypesPages iterates over the pages of a GetBuiltinSlotTypes operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetBuiltinSlotTypes method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetBuiltinSlotTypes operation. -// pageNum := 0 -// err := client.GetBuiltinSlotTypesPages(params, -// func(page *GetBuiltinSlotTypesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *LexModelBuildingService) GetBuiltinSlotTypesPages(input *GetBuiltinSlotTypesInput, fn func(*GetBuiltinSlotTypesOutput, bool) bool) error { - return c.GetBuiltinSlotTypesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetBuiltinSlotTypesPagesWithContext same as GetBuiltinSlotTypesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetBuiltinSlotTypesPagesWithContext(ctx aws.Context, input *GetBuiltinSlotTypesInput, fn func(*GetBuiltinSlotTypesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetBuiltinSlotTypesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetBuiltinSlotTypesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetBuiltinSlotTypesOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetExport = "GetExport" - -// GetExportRequest generates a "aws/request.Request" representing the -// client's request for the GetExport operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetExport for more information on using the GetExport -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetExportRequest method. -// req, resp := client.GetExportRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetExport -func (c *LexModelBuildingService) GetExportRequest(input *GetExportInput) (req *request.Request, output *GetExportOutput) { - op := &request.Operation{ - Name: opGetExport, - HTTPMethod: "GET", - HTTPPath: "/exports/", - } - - if input == nil { - input = &GetExportInput{} - } - - output = &GetExportOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetExport API operation for Amazon Lex Model Building Service. -// -// Exports the contents of a Amazon Lex resource in a specified format. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetExport for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetExport -func (c *LexModelBuildingService) GetExport(input *GetExportInput) (*GetExportOutput, error) { - req, out := c.GetExportRequest(input) - return out, req.Send() -} - -// GetExportWithContext is the same as GetExport with the addition of -// the ability to pass a context and additional request options. -// -// See GetExport for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetExportWithContext(ctx aws.Context, input *GetExportInput, opts ...request.Option) (*GetExportOutput, error) { - req, out := c.GetExportRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetIntent = "GetIntent" - -// GetIntentRequest generates a "aws/request.Request" representing the -// client's request for the GetIntent operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetIntent for more information on using the GetIntent -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetIntentRequest method. -// req, resp := client.GetIntentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetIntent -func (c *LexModelBuildingService) GetIntentRequest(input *GetIntentInput) (req *request.Request, output *GetIntentOutput) { - op := &request.Operation{ - Name: opGetIntent, - HTTPMethod: "GET", - HTTPPath: "/intents/{name}/versions/{version}", - } - - if input == nil { - input = &GetIntentInput{} - } - - output = &GetIntentOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetIntent API operation for Amazon Lex Model Building Service. -// -// Returns information about an intent. In addition to the intent name, you -// must specify the intent version. -// -// This operation requires permissions to perform the lex:GetIntent action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetIntent for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetIntent -func (c *LexModelBuildingService) GetIntent(input *GetIntentInput) (*GetIntentOutput, error) { - req, out := c.GetIntentRequest(input) - return out, req.Send() -} - -// GetIntentWithContext is the same as GetIntent with the addition of -// the ability to pass a context and additional request options. -// -// See GetIntent for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetIntentWithContext(ctx aws.Context, input *GetIntentInput, opts ...request.Option) (*GetIntentOutput, error) { - req, out := c.GetIntentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetIntentVersions = "GetIntentVersions" - -// GetIntentVersionsRequest generates a "aws/request.Request" representing the -// client's request for the GetIntentVersions operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetIntentVersions for more information on using the GetIntentVersions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetIntentVersionsRequest method. -// req, resp := client.GetIntentVersionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetIntentVersions -func (c *LexModelBuildingService) GetIntentVersionsRequest(input *GetIntentVersionsInput) (req *request.Request, output *GetIntentVersionsOutput) { - op := &request.Operation{ - Name: opGetIntentVersions, - HTTPMethod: "GET", - HTTPPath: "/intents/{name}/versions/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetIntentVersionsInput{} - } - - output = &GetIntentVersionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetIntentVersions API operation for Amazon Lex Model Building Service. -// -// Gets information about all of the versions of an intent. -// -// The GetIntentVersions operation returns an IntentMetadata object for each -// version of an intent. For example, if an intent has three numbered versions, -// the GetIntentVersions operation returns four IntentMetadata objects in the -// response, one for each numbered version and one for the $LATEST version. -// -// The GetIntentVersions operation always returns at least one version, the -// $LATEST version. -// -// This operation requires permissions for the lex:GetIntentVersions action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetIntentVersions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetIntentVersions -func (c *LexModelBuildingService) GetIntentVersions(input *GetIntentVersionsInput) (*GetIntentVersionsOutput, error) { - req, out := c.GetIntentVersionsRequest(input) - return out, req.Send() -} - -// GetIntentVersionsWithContext is the same as GetIntentVersions with the addition of -// the ability to pass a context and additional request options. -// -// See GetIntentVersions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetIntentVersionsWithContext(ctx aws.Context, input *GetIntentVersionsInput, opts ...request.Option) (*GetIntentVersionsOutput, error) { - req, out := c.GetIntentVersionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetIntentVersionsPages iterates over the pages of a GetIntentVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetIntentVersions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetIntentVersions operation. -// pageNum := 0 -// err := client.GetIntentVersionsPages(params, -// func(page *GetIntentVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *LexModelBuildingService) GetIntentVersionsPages(input *GetIntentVersionsInput, fn func(*GetIntentVersionsOutput, bool) bool) error { - return c.GetIntentVersionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetIntentVersionsPagesWithContext same as GetIntentVersionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetIntentVersionsPagesWithContext(ctx aws.Context, input *GetIntentVersionsInput, fn func(*GetIntentVersionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetIntentVersionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetIntentVersionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetIntentVersionsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetIntents = "GetIntents" - -// GetIntentsRequest generates a "aws/request.Request" representing the -// client's request for the GetIntents operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetIntents for more information on using the GetIntents -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetIntentsRequest method. -// req, resp := client.GetIntentsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetIntents -func (c *LexModelBuildingService) GetIntentsRequest(input *GetIntentsInput) (req *request.Request, output *GetIntentsOutput) { - op := &request.Operation{ - Name: opGetIntents, - HTTPMethod: "GET", - HTTPPath: "/intents/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetIntentsInput{} - } - - output = &GetIntentsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetIntents API operation for Amazon Lex Model Building Service. -// -// Returns intent information as follows: -// -// * If you specify the nameContains field, returns the $LATEST version of -// all intents that contain the specified string. -// -// * If you don't specify the nameContains field, returns information about -// the $LATEST version of all intents. -// -// The operation requires permission for the lex:GetIntents action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetIntents for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetIntents -func (c *LexModelBuildingService) GetIntents(input *GetIntentsInput) (*GetIntentsOutput, error) { - req, out := c.GetIntentsRequest(input) - return out, req.Send() -} - -// GetIntentsWithContext is the same as GetIntents with the addition of -// the ability to pass a context and additional request options. -// -// See GetIntents for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetIntentsWithContext(ctx aws.Context, input *GetIntentsInput, opts ...request.Option) (*GetIntentsOutput, error) { - req, out := c.GetIntentsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetIntentsPages iterates over the pages of a GetIntents operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetIntents method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetIntents operation. -// pageNum := 0 -// err := client.GetIntentsPages(params, -// func(page *GetIntentsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *LexModelBuildingService) GetIntentsPages(input *GetIntentsInput, fn func(*GetIntentsOutput, bool) bool) error { - return c.GetIntentsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetIntentsPagesWithContext same as GetIntentsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetIntentsPagesWithContext(ctx aws.Context, input *GetIntentsInput, fn func(*GetIntentsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetIntentsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetIntentsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetIntentsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetSlotType = "GetSlotType" - -// GetSlotTypeRequest generates a "aws/request.Request" representing the -// client's request for the GetSlotType operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSlotType for more information on using the GetSlotType -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetSlotTypeRequest method. -// req, resp := client.GetSlotTypeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetSlotType -func (c *LexModelBuildingService) GetSlotTypeRequest(input *GetSlotTypeInput) (req *request.Request, output *GetSlotTypeOutput) { - op := &request.Operation{ - Name: opGetSlotType, - HTTPMethod: "GET", - HTTPPath: "/slottypes/{name}/versions/{version}", - } - - if input == nil { - input = &GetSlotTypeInput{} - } - - output = &GetSlotTypeOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSlotType API operation for Amazon Lex Model Building Service. -// -// Returns information about a specific version of a slot type. In addition -// to specifying the slot type name, you must specify the slot type version. -// -// This operation requires permissions for the lex:GetSlotType action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetSlotType for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetSlotType -func (c *LexModelBuildingService) GetSlotType(input *GetSlotTypeInput) (*GetSlotTypeOutput, error) { - req, out := c.GetSlotTypeRequest(input) - return out, req.Send() -} - -// GetSlotTypeWithContext is the same as GetSlotType with the addition of -// the ability to pass a context and additional request options. -// -// See GetSlotType for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetSlotTypeWithContext(ctx aws.Context, input *GetSlotTypeInput, opts ...request.Option) (*GetSlotTypeOutput, error) { - req, out := c.GetSlotTypeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetSlotTypeVersions = "GetSlotTypeVersions" - -// GetSlotTypeVersionsRequest generates a "aws/request.Request" representing the -// client's request for the GetSlotTypeVersions operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSlotTypeVersions for more information on using the GetSlotTypeVersions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetSlotTypeVersionsRequest method. -// req, resp := client.GetSlotTypeVersionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetSlotTypeVersions -func (c *LexModelBuildingService) GetSlotTypeVersionsRequest(input *GetSlotTypeVersionsInput) (req *request.Request, output *GetSlotTypeVersionsOutput) { - op := &request.Operation{ - Name: opGetSlotTypeVersions, - HTTPMethod: "GET", - HTTPPath: "/slottypes/{name}/versions/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetSlotTypeVersionsInput{} - } - - output = &GetSlotTypeVersionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSlotTypeVersions API operation for Amazon Lex Model Building Service. -// -// Gets information about all versions of a slot type. -// -// The GetSlotTypeVersions operation returns a SlotTypeMetadata object for each -// version of a slot type. For example, if a slot type has three numbered versions, -// the GetSlotTypeVersions operation returns four SlotTypeMetadata objects in -// the response, one for each numbered version and one for the $LATEST version. -// -// The GetSlotTypeVersions operation always returns at least one version, the -// $LATEST version. -// -// This operation requires permissions for the lex:GetSlotTypeVersions action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetSlotTypeVersions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetSlotTypeVersions -func (c *LexModelBuildingService) GetSlotTypeVersions(input *GetSlotTypeVersionsInput) (*GetSlotTypeVersionsOutput, error) { - req, out := c.GetSlotTypeVersionsRequest(input) - return out, req.Send() -} - -// GetSlotTypeVersionsWithContext is the same as GetSlotTypeVersions with the addition of -// the ability to pass a context and additional request options. -// -// See GetSlotTypeVersions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetSlotTypeVersionsWithContext(ctx aws.Context, input *GetSlotTypeVersionsInput, opts ...request.Option) (*GetSlotTypeVersionsOutput, error) { - req, out := c.GetSlotTypeVersionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetSlotTypeVersionsPages iterates over the pages of a GetSlotTypeVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetSlotTypeVersions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetSlotTypeVersions operation. -// pageNum := 0 -// err := client.GetSlotTypeVersionsPages(params, -// func(page *GetSlotTypeVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *LexModelBuildingService) GetSlotTypeVersionsPages(input *GetSlotTypeVersionsInput, fn func(*GetSlotTypeVersionsOutput, bool) bool) error { - return c.GetSlotTypeVersionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetSlotTypeVersionsPagesWithContext same as GetSlotTypeVersionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetSlotTypeVersionsPagesWithContext(ctx aws.Context, input *GetSlotTypeVersionsInput, fn func(*GetSlotTypeVersionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetSlotTypeVersionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetSlotTypeVersionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetSlotTypeVersionsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetSlotTypes = "GetSlotTypes" - -// GetSlotTypesRequest generates a "aws/request.Request" representing the -// client's request for the GetSlotTypes operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSlotTypes for more information on using the GetSlotTypes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetSlotTypesRequest method. -// req, resp := client.GetSlotTypesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetSlotTypes -func (c *LexModelBuildingService) GetSlotTypesRequest(input *GetSlotTypesInput) (req *request.Request, output *GetSlotTypesOutput) { - op := &request.Operation{ - Name: opGetSlotTypes, - HTTPMethod: "GET", - HTTPPath: "/slottypes/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetSlotTypesInput{} - } - - output = &GetSlotTypesOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSlotTypes API operation for Amazon Lex Model Building Service. -// -// Returns slot type information as follows: -// -// * If you specify the nameContains field, returns the $LATEST version of -// all slot types that contain the specified string. -// -// * If you don't specify the nameContains field, returns information about -// the $LATEST version of all slot types. -// -// The operation requires permission for the lex:GetSlotTypes action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetSlotTypes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// The resource specified in the request was not found. Check the resource and -// try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetSlotTypes -func (c *LexModelBuildingService) GetSlotTypes(input *GetSlotTypesInput) (*GetSlotTypesOutput, error) { - req, out := c.GetSlotTypesRequest(input) - return out, req.Send() -} - -// GetSlotTypesWithContext is the same as GetSlotTypes with the addition of -// the ability to pass a context and additional request options. -// -// See GetSlotTypes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetSlotTypesWithContext(ctx aws.Context, input *GetSlotTypesInput, opts ...request.Option) (*GetSlotTypesOutput, error) { - req, out := c.GetSlotTypesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetSlotTypesPages iterates over the pages of a GetSlotTypes operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetSlotTypes method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetSlotTypes operation. -// pageNum := 0 -// err := client.GetSlotTypesPages(params, -// func(page *GetSlotTypesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *LexModelBuildingService) GetSlotTypesPages(input *GetSlotTypesInput, fn func(*GetSlotTypesOutput, bool) bool) error { - return c.GetSlotTypesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetSlotTypesPagesWithContext same as GetSlotTypesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetSlotTypesPagesWithContext(ctx aws.Context, input *GetSlotTypesInput, fn func(*GetSlotTypesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetSlotTypesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetSlotTypesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetSlotTypesOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetUtterancesView = "GetUtterancesView" - -// GetUtterancesViewRequest generates a "aws/request.Request" representing the -// client's request for the GetUtterancesView operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetUtterancesView for more information on using the GetUtterancesView -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetUtterancesViewRequest method. -// req, resp := client.GetUtterancesViewRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetUtterancesView -func (c *LexModelBuildingService) GetUtterancesViewRequest(input *GetUtterancesViewInput) (req *request.Request, output *GetUtterancesViewOutput) { - op := &request.Operation{ - Name: opGetUtterancesView, - HTTPMethod: "GET", - HTTPPath: "/bots/{botname}/utterances?view=aggregation", - } - - if input == nil { - input = &GetUtterancesViewInput{} - } - - output = &GetUtterancesViewOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetUtterancesView API operation for Amazon Lex Model Building Service. -// -// Use the GetUtterancesView operation to get information about the utterances -// that your users have made to your bot. You can use this list to tune the -// utterances that your bot responds to. -// -// For example, say that you have created a bot to order flowers. After your -// users have used your bot for a while, use the GetUtterancesView operation -// to see the requests that they have made and whether they have been successful. -// You might find that the utterance "I want flowers" is not being recognized. -// You could add this utterance to the OrderFlowers intent so that your bot -// recognizes that utterance. -// -// After you publish a new version of a bot, you can get information about the -// old version and the new so that you can compare the performance across the -// two versions. -// -// Data is available for the last 15 days. You can request information for up -// to 5 versions in each request. The response contains information about a -// maximum of 100 utterances for each version. -// -// If the bot's childDirected field is set to true, utterances for the bot are -// not stored and cannot be retrieved with the GetUtterancesView operation. -// For more information, see PutBot. -// -// This operation requires permissions for the lex:GetUtterancesView action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation GetUtterancesView for usage and error information. -// -// Returned Error Codes: -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetUtterancesView -func (c *LexModelBuildingService) GetUtterancesView(input *GetUtterancesViewInput) (*GetUtterancesViewOutput, error) { - req, out := c.GetUtterancesViewRequest(input) - return out, req.Send() -} - -// GetUtterancesViewWithContext is the same as GetUtterancesView with the addition of -// the ability to pass a context and additional request options. -// -// See GetUtterancesView for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) GetUtterancesViewWithContext(ctx aws.Context, input *GetUtterancesViewInput, opts ...request.Option) (*GetUtterancesViewOutput, error) { - req, out := c.GetUtterancesViewRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBot = "PutBot" - -// PutBotRequest generates a "aws/request.Request" representing the -// client's request for the PutBot operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBot for more information on using the PutBot -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBotRequest method. -// req, resp := client.PutBotRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutBot -func (c *LexModelBuildingService) PutBotRequest(input *PutBotInput) (req *request.Request, output *PutBotOutput) { - op := &request.Operation{ - Name: opPutBot, - HTTPMethod: "PUT", - HTTPPath: "/bots/{name}/versions/$LATEST", - } - - if input == nil { - input = &PutBotInput{} - } - - output = &PutBotOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutBot API operation for Amazon Lex Model Building Service. -// -// Creates an Amazon Lex conversational bot or replaces an existing bot. When -// you create or update a bot you are only required to specify a name. You can -// use this to add intents later, or to remove intents from an existing bot. -// When you create a bot with a name only, the bot is created or updated but -// Amazon Lex returns the response FAILED. You can build the bot after you add one or more intents. For more information -// about Amazon Lex bots, see how-it-works. -// -// If you specify the name of an existing bot, the fields in the request replace -// the existing values in the $LATESTversion of the bot. Amazon Lex removes any fields that you don't provide -// values for in the request, except for the idleTTLInSecondsand privacySettingsfields, which are set to their default values. If you don't specify values -// for required fields, Amazon Lex throws an exception. -// -// This operation requires permissions for the lex:PutBotaction. For more information, see auth-and-access-control -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation PutBot for usage and error information. -// -// Returned Error Codes: -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodePreconditionFailedException "PreconditionFailedException" -// The checksum of the resource that you are trying to change does not match -// the checksum in the request. Check the resource's checksum and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutBot -func (c *LexModelBuildingService) PutBot(input *PutBotInput) (*PutBotOutput, error) { - req, out := c.PutBotRequest(input) - return out, req.Send() -} - -// PutBotWithContext is the same as PutBot with the addition of -// the ability to pass a context and additional request options. -// -// See PutBot for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) PutBotWithContext(ctx aws.Context, input *PutBotInput, opts ...request.Option) (*PutBotOutput, error) { - req, out := c.PutBotRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBotAlias = "PutBotAlias" - -// PutBotAliasRequest generates a "aws/request.Request" representing the -// client's request for the PutBotAlias operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBotAlias for more information on using the PutBotAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBotAliasRequest method. -// req, resp := client.PutBotAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutBotAlias -func (c *LexModelBuildingService) PutBotAliasRequest(input *PutBotAliasInput) (req *request.Request, output *PutBotAliasOutput) { - op := &request.Operation{ - Name: opPutBotAlias, - HTTPMethod: "PUT", - HTTPPath: "/bots/{botName}/aliases/{name}", - } - - if input == nil { - input = &PutBotAliasInput{} - } - - output = &PutBotAliasOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutBotAlias API operation for Amazon Lex Model Building Service. -// -// Creates an alias for the specified version of the bot or replaces an alias -// for the specified bot. To change the version of the bot that the alias points -// to, replace the alias. For more information about aliases, see versioning-aliases. -// -// This operation requires permissions for the lex:PutBotAlias action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation PutBotAlias for usage and error information. -// -// Returned Error Codes: -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodePreconditionFailedException "PreconditionFailedException" -// The checksum of the resource that you are trying to change does not match -// the checksum in the request. Check the resource's checksum and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutBotAlias -func (c *LexModelBuildingService) PutBotAlias(input *PutBotAliasInput) (*PutBotAliasOutput, error) { - req, out := c.PutBotAliasRequest(input) - return out, req.Send() -} - -// PutBotAliasWithContext is the same as PutBotAlias with the addition of -// the ability to pass a context and additional request options. -// -// See PutBotAlias for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) PutBotAliasWithContext(ctx aws.Context, input *PutBotAliasInput, opts ...request.Option) (*PutBotAliasOutput, error) { - req, out := c.PutBotAliasRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutIntent = "PutIntent" - -// PutIntentRequest generates a "aws/request.Request" representing the -// client's request for the PutIntent operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutIntent for more information on using the PutIntent -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutIntentRequest method. -// req, resp := client.PutIntentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutIntent -func (c *LexModelBuildingService) PutIntentRequest(input *PutIntentInput) (req *request.Request, output *PutIntentOutput) { - op := &request.Operation{ - Name: opPutIntent, - HTTPMethod: "PUT", - HTTPPath: "/intents/{name}/versions/$LATEST", - } - - if input == nil { - input = &PutIntentInput{} - } - - output = &PutIntentOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutIntent API operation for Amazon Lex Model Building Service. -// -// Creates an intent or replaces an existing intent. -// -// To define the interaction between the user and your bot, you use one or more -// intents. For a pizza ordering bot, for example, you would create an OrderPizza -// intent. -// -// To create an intent or replace an existing intent, you must provide the following: -// -// * Intent name. For example, OrderPizza. -// -// * Sample utterances. For example, "Can I order a pizza, please." and "I -// want to order a pizza." -// -// * Information to be gathered. You specify slot types for the information -// that your bot will request from the user. You can specify standard slot -// types, such as a date or a time, or custom slot types such as the size -// and crust of a pizza. -// -// * How the intent will be fulfilled. You can provide a Lambda function -// or configure the intent to return the intent information to the client -// application. If you use a Lambda function, when all of the intent information -// is available, Amazon Lex invokes your Lambda function. If you configure -// your intent to return the intent information to the client application. -// -// -// You can specify other optional information in the request, such as: -// -// * A confirmation prompt to ask the user to confirm an intent. For example, -// "Shall I order your pizza?" -// -// * A conclusion statement to send to the user after the intent has been -// fulfilled. For example, "I placed your pizza order." -// -// * A follow-up prompt that asks the user for additional activity. For example, -// asking "Do you want to order a drink with your pizza?" -// -// If you specify an existing intent name to update the intent, Amazon Lex replaces -// the values in the $LATEST version of the intent with the values in the request. -// Amazon Lex removes fields that you don't provide in the request. If you don't -// specify the required fields, Amazon Lex throws an exception. When you update -// the $LATEST version of an intent, the status field of any bot that uses the -// $LATEST version of the intent is set to NOT_BUILT. -// -// For more information, see how-it-works. -// -// This operation requires permissions for the lex:PutIntent action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation PutIntent for usage and error information. -// -// Returned Error Codes: -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodePreconditionFailedException "PreconditionFailedException" -// The checksum of the resource that you are trying to change does not match -// the checksum in the request. Check the resource's checksum and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutIntent -func (c *LexModelBuildingService) PutIntent(input *PutIntentInput) (*PutIntentOutput, error) { - req, out := c.PutIntentRequest(input) - return out, req.Send() -} - -// PutIntentWithContext is the same as PutIntent with the addition of -// the ability to pass a context and additional request options. -// -// See PutIntent for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) PutIntentWithContext(ctx aws.Context, input *PutIntentInput, opts ...request.Option) (*PutIntentOutput, error) { - req, out := c.PutIntentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutSlotType = "PutSlotType" - -// PutSlotTypeRequest generates a "aws/request.Request" representing the -// client's request for the PutSlotType operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutSlotType for more information on using the PutSlotType -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutSlotTypeRequest method. -// req, resp := client.PutSlotTypeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutSlotType -func (c *LexModelBuildingService) PutSlotTypeRequest(input *PutSlotTypeInput) (req *request.Request, output *PutSlotTypeOutput) { - op := &request.Operation{ - Name: opPutSlotType, - HTTPMethod: "PUT", - HTTPPath: "/slottypes/{name}/versions/$LATEST", - } - - if input == nil { - input = &PutSlotTypeInput{} - } - - output = &PutSlotTypeOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutSlotType API operation for Amazon Lex Model Building Service. -// -// Creates a custom slot type or replaces an existing custom slot type. -// -// To create a custom slot type, specify a name for the slot type and a set -// of enumeration values, which are the values that a slot of this type can -// assume. For more information, see how-it-works. -// -// If you specify the name of an existing slot type, the fields in the request -// replace the existing values in the $LATEST version of the slot type. Amazon -// Lex removes the fields that you don't provide in the request. If you don't -// specify required fields, Amazon Lex throws an exception. When you update -// the $LATEST version of a slot type, if a bot uses the $LATEST version of -// an intent that contains the slot type, the bot's status field is set to NOT_BUILT. -// -// This operation requires permissions for the lex:PutSlotType action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Lex Model Building Service's -// API operation PutSlotType for usage and error information. -// -// Returned Error Codes: -// * ErrCodeConflictException "ConflictException" -// There was a conflict processing the request. Try your request again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request exceeded a limit. Try your request again. -// -// * ErrCodeInternalFailureException "InternalFailureException" -// An internal Amazon Lex error occurred. Try your request again. -// -// * ErrCodeBadRequestException "BadRequestException" -// The request is not well formed. For example, a value is invalid or a required -// field is missing. Check the field values, and try again. -// -// * ErrCodePreconditionFailedException "PreconditionFailedException" -// The checksum of the resource that you are trying to change does not match -// the checksum in the request. Check the resource's checksum and try again. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutSlotType -func (c *LexModelBuildingService) PutSlotType(input *PutSlotTypeInput) (*PutSlotTypeOutput, error) { - req, out := c.PutSlotTypeRequest(input) - return out, req.Send() -} - -// PutSlotTypeWithContext is the same as PutSlotType with the addition of -// the ability to pass a context and additional request options. -// -// See PutSlotType for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *LexModelBuildingService) PutSlotTypeWithContext(ctx aws.Context, input *PutSlotTypeInput, opts ...request.Option) (*PutSlotTypeOutput, error) { - req, out := c.PutSlotTypeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Provides information about a bot alias. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/BotAliasMetadata -type BotAliasMetadata struct { - _ struct{} `type:"structure"` - - // The name of the bot to which the alias points. - BotName *string `locationName:"botName" min:"2" type:"string"` - - // The version of the Amazon Lex bot to which the alias points. - BotVersion *string `locationName:"botVersion" min:"1" type:"string"` - - // Checksum of the bot alias. - Checksum *string `locationName:"checksum" type:"string"` - - // The date that the bot alias was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the bot alias. - Description *string `locationName:"description" type:"string"` - - // The date that the bot alias was updated. When you create a resource, the - // creation date and last updated date are the same. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // The name of the bot alias. - Name *string `locationName:"name" min:"1" type:"string"` -} - -// String returns the string representation -func (s BotAliasMetadata) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BotAliasMetadata) GoString() string { - return s.String() -} - -// SetBotName sets the BotName field's value. -func (s *BotAliasMetadata) SetBotName(v string) *BotAliasMetadata { - s.BotName = &v - return s -} - -// SetBotVersion sets the BotVersion field's value. -func (s *BotAliasMetadata) SetBotVersion(v string) *BotAliasMetadata { - s.BotVersion = &v - return s -} - -// SetChecksum sets the Checksum field's value. -func (s *BotAliasMetadata) SetChecksum(v string) *BotAliasMetadata { - s.Checksum = &v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *BotAliasMetadata) SetCreatedDate(v time.Time) *BotAliasMetadata { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *BotAliasMetadata) SetDescription(v string) *BotAliasMetadata { - s.Description = &v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *BotAliasMetadata) SetLastUpdatedDate(v time.Time) *BotAliasMetadata { - s.LastUpdatedDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *BotAliasMetadata) SetName(v string) *BotAliasMetadata { - s.Name = &v - return s -} - -// Represents an association between an Amazon Lex bot and an external messaging -// platform. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/BotChannelAssociation -type BotChannelAssociation struct { - _ struct{} `type:"structure"` - - // An alias pointing to the specific version of the Amazon Lex bot to which - // this association is being made. - BotAlias *string `locationName:"botAlias" min:"1" type:"string"` - - // Provides information necessary to communicate with the messaging platform. - BotConfiguration map[string]*string `locationName:"botConfiguration" min:"1" type:"map"` - - // The name of the Amazon Lex bot to which this association is being made. - // - // Currently, Amazon Lex supports associations with Facebook and Slack, and - // Twilio. - BotName *string `locationName:"botName" min:"2" type:"string"` - - // The date that the association between the Amazon Lex bot and the channel - // was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A text description of the association you are creating. - Description *string `locationName:"description" type:"string"` - - // If status is FAILED, Amazon Lex provides the reason that it failed to create - // the association. - FailureReason *string `locationName:"failureReason" type:"string"` - - // The name of the association between the bot and the channel. - Name *string `locationName:"name" min:"1" type:"string"` - - // The status of the bot channel. - // - // * CREATED - The channel has been created and is ready for use. - // - // * IN_PROGRESS - Channel creation is in progress. - // - // * FAILED - There was an error creating the channel. For information about - // the reason for the failure, see the failureReason field. - Status *string `locationName:"status" type:"string" enum:"ChannelStatus"` - - // Specifies the type of association by indicating the type of channel being - // established between the Amazon Lex bot and the external messaging platform. - Type *string `locationName:"type" type:"string" enum:"ChannelType"` -} - -// String returns the string representation -func (s BotChannelAssociation) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BotChannelAssociation) GoString() string { - return s.String() -} - -// SetBotAlias sets the BotAlias field's value. -func (s *BotChannelAssociation) SetBotAlias(v string) *BotChannelAssociation { - s.BotAlias = &v - return s -} - -// SetBotConfiguration sets the BotConfiguration field's value. -func (s *BotChannelAssociation) SetBotConfiguration(v map[string]*string) *BotChannelAssociation { - s.BotConfiguration = v - return s -} - -// SetBotName sets the BotName field's value. -func (s *BotChannelAssociation) SetBotName(v string) *BotChannelAssociation { - s.BotName = &v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *BotChannelAssociation) SetCreatedDate(v time.Time) *BotChannelAssociation { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *BotChannelAssociation) SetDescription(v string) *BotChannelAssociation { - s.Description = &v - return s -} - -// SetFailureReason sets the FailureReason field's value. -func (s *BotChannelAssociation) SetFailureReason(v string) *BotChannelAssociation { - s.FailureReason = &v - return s -} - -// SetName sets the Name field's value. -func (s *BotChannelAssociation) SetName(v string) *BotChannelAssociation { - s.Name = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *BotChannelAssociation) SetStatus(v string) *BotChannelAssociation { - s.Status = &v - return s -} - -// SetType sets the Type field's value. -func (s *BotChannelAssociation) SetType(v string) *BotChannelAssociation { - s.Type = &v - return s -} - -// Provides information about a bot. . -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/BotMetadata -type BotMetadata struct { - _ struct{} `type:"structure"` - - // The date that the bot was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the bot. - Description *string `locationName:"description" type:"string"` - - // The date that the bot was updated. When you create a bot, the creation date - // and last updated date are the same. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // The name of the bot. - Name *string `locationName:"name" min:"2" type:"string"` - - // The status of the bot. - Status *string `locationName:"status" type:"string" enum:"Status"` - - // The version of the bot. For a new bot, the version is always $LATEST. - Version *string `locationName:"version" min:"1" type:"string"` -} - -// String returns the string representation -func (s BotMetadata) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BotMetadata) GoString() string { - return s.String() -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *BotMetadata) SetCreatedDate(v time.Time) *BotMetadata { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *BotMetadata) SetDescription(v string) *BotMetadata { - s.Description = &v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *BotMetadata) SetLastUpdatedDate(v time.Time) *BotMetadata { - s.LastUpdatedDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *BotMetadata) SetName(v string) *BotMetadata { - s.Name = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *BotMetadata) SetStatus(v string) *BotMetadata { - s.Status = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *BotMetadata) SetVersion(v string) *BotMetadata { - s.Version = &v - return s -} - -// Provides metadata for a built-in intent. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/BuiltinIntentMetadata -type BuiltinIntentMetadata struct { - _ struct{} `type:"structure"` - - // A unique identifier for the built-in intent. To find the signature for an - // intent, see Standard Built-in Intents (https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents) - // in the Alexa Skills Kit. - Signature *string `locationName:"signature" type:"string"` - - // A list of identifiers for the locales that the intent supports. - SupportedLocales []*string `locationName:"supportedLocales" type:"list"` -} - -// String returns the string representation -func (s BuiltinIntentMetadata) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BuiltinIntentMetadata) GoString() string { - return s.String() -} - -// SetSignature sets the Signature field's value. -func (s *BuiltinIntentMetadata) SetSignature(v string) *BuiltinIntentMetadata { - s.Signature = &v - return s -} - -// SetSupportedLocales sets the SupportedLocales field's value. -func (s *BuiltinIntentMetadata) SetSupportedLocales(v []*string) *BuiltinIntentMetadata { - s.SupportedLocales = v - return s -} - -// Provides information about a slot used in a built-in intent. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/BuiltinIntentSlot -type BuiltinIntentSlot struct { - _ struct{} `type:"structure"` - - // A list of the slots defined for the intent. - Name *string `locationName:"name" type:"string"` -} - -// String returns the string representation -func (s BuiltinIntentSlot) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BuiltinIntentSlot) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *BuiltinIntentSlot) SetName(v string) *BuiltinIntentSlot { - s.Name = &v - return s -} - -// Provides information about a built in slot type. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/BuiltinSlotTypeMetadata -type BuiltinSlotTypeMetadata struct { - _ struct{} `type:"structure"` - - // A unique identifier for the built-in slot type. To find the signature for - // a slot type, see Slot Type Reference (https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/slot-type-reference) - // in the Alexa Skills Kit. - Signature *string `locationName:"signature" type:"string"` - - // A list of target locales for the slot. - SupportedLocales []*string `locationName:"supportedLocales" type:"list"` -} - -// String returns the string representation -func (s BuiltinSlotTypeMetadata) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BuiltinSlotTypeMetadata) GoString() string { - return s.String() -} - -// SetSignature sets the Signature field's value. -func (s *BuiltinSlotTypeMetadata) SetSignature(v string) *BuiltinSlotTypeMetadata { - s.Signature = &v - return s -} - -// SetSupportedLocales sets the SupportedLocales field's value. -func (s *BuiltinSlotTypeMetadata) SetSupportedLocales(v []*string) *BuiltinSlotTypeMetadata { - s.SupportedLocales = v - return s -} - -// Specifies a Lambda function that verifies requests to a bot or fulfills the -// user's request to a bot.. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/CodeHook -type CodeHook struct { - _ struct{} `type:"structure"` - - // The version of the request-response that you want Amazon Lex to use to invoke - // your Lambda function. For more information, see using-lambda. - // - // MessageVersion is a required field - MessageVersion *string `locationName:"messageVersion" min:"1" type:"string" required:"true"` - - // The Amazon Resource Name (ARN) of the Lambda function. - // - // Uri is a required field - Uri *string `locationName:"uri" min:"20" type:"string" required:"true"` -} - -// String returns the string representation -func (s CodeHook) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CodeHook) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CodeHook) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CodeHook"} - if s.MessageVersion == nil { - invalidParams.Add(request.NewErrParamRequired("MessageVersion")) - } - if s.MessageVersion != nil && len(*s.MessageVersion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MessageVersion", 1)) - } - if s.Uri == nil { - invalidParams.Add(request.NewErrParamRequired("Uri")) - } - if s.Uri != nil && len(*s.Uri) < 20 { - invalidParams.Add(request.NewErrParamMinLen("Uri", 20)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMessageVersion sets the MessageVersion field's value. -func (s *CodeHook) SetMessageVersion(v string) *CodeHook { - s.MessageVersion = &v - return s -} - -// SetUri sets the Uri field's value. -func (s *CodeHook) SetUri(v string) *CodeHook { - s.Uri = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/CreateBotVersionRequest -type CreateBotVersionInput struct { - _ struct{} `type:"structure"` - - // Identifies a specific revision of the $LATEST version of the bot. If you - // specify a checksum and the $LATEST version of the bot has a different checksum, - // a PreconditionFailedException exception is returned and Amazon Lex doesn't - // publish a new version. If you don't specify a checksum, Amazon Lex publishes - // the $LATEST version. - Checksum *string `locationName:"checksum" type:"string"` - - // The name of the bot that you want to create a new version of. The name is - // case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"2" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateBotVersionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBotVersionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateBotVersionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateBotVersionInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Name", 2)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChecksum sets the Checksum field's value. -func (s *CreateBotVersionInput) SetChecksum(v string) *CreateBotVersionInput { - s.Checksum = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateBotVersionInput) SetName(v string) *CreateBotVersionInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/CreateBotVersionResponse -type CreateBotVersionOutput struct { - _ struct{} `type:"structure"` - - // The message that Amazon Lex uses to abort a conversation. For more information, - // see PutBot. - AbortStatement *Statement `locationName:"abortStatement" type:"structure"` - - // Checksum identifying the version of the bot that was created. - Checksum *string `locationName:"checksum" type:"string"` - - // For each Amazon Lex bot created with the Amazon Lex Model Building Service, - // you must specify whether your use of Amazon Lex is related to a website, - // program, or other application that is directed or targeted, in whole or in - // part, to children under age 13 and subject to the Children's Online Privacy - // Protection Act (COPPA) by specifying true or false in the childDirected field. - // By specifying true in the childDirected field, you confirm that your use - // of Amazon Lex is related to a website, program, or other application that - // is directed or targeted, in whole or in part, to children under age 13 and - // subject to COPPA. By specifying false in the childDirected field, you confirm - // that your use of Amazon Lex is not related to a website, program, or other - // application that is directed or targeted, in whole or in part, to children - // under age 13 and subject to COPPA. You may not specify a default value for - // the childDirected field that does not accurately reflect whether your use - // of Amazon Lex is related to a website, program, or other application that - // is directed or targeted, in whole or in part, to children under age 13 and - // subject to COPPA. - // - // If your use of Amazon Lex relates to a website, program, or other application - // that is directed in whole or in part, to children under age 13, you must - // obtain any required verifiable parental consent under COPPA. For information - // regarding the use of Amazon Lex in connection with websites, programs, or - // other applications that are directed or targeted, in whole or in part, to - // children under age 13, see the Amazon Lex FAQ. (https://aws.amazon.com/lex/faqs#data-security) - ChildDirected *bool `locationName:"childDirected" type:"boolean"` - - // The message that Amazon Lex uses when it doesn't understand the user's request. - // For more information, see PutBot. - ClarificationPrompt *Prompt `locationName:"clarificationPrompt" type:"structure"` - - // The date when the bot version was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the bot. - Description *string `locationName:"description" type:"string"` - - // If status is FAILED, Amazon Lex provides the reason that it failed to build - // the bot. - FailureReason *string `locationName:"failureReason" type:"string"` - - // The maximum time in seconds that Amazon Lex retains the data gathered in - // a conversation. For more information, see PutBot. - IdleSessionTTLInSeconds *int64 `locationName:"idleSessionTTLInSeconds" min:"60" type:"integer"` - - // An array of Intent objects. For more information, see PutBot. - Intents []*Intent `locationName:"intents" type:"list"` - - // The date when the $LATEST version of this bot was updated. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // Specifies the target locale for the bot. - Locale *string `locationName:"locale" type:"string" enum:"Locale"` - - // The name of the bot. - Name *string `locationName:"name" min:"2" type:"string"` - - // When you send a request to create or update a bot, Amazon Lex sets the status - // response element to BUILDING. After Amazon Lex builds the bot, it sets status - // to READY. If Amazon Lex can't build the bot, it sets status to FAILED. Amazon - // Lex returns the reason for the failure in the failureReason response element. - Status *string `locationName:"status" type:"string" enum:"Status"` - - // The version of the bot. - Version *string `locationName:"version" min:"1" type:"string"` - - // The Amazon Polly voice ID that Amazon Lex uses for voice interactions with - // the user. - VoiceId *string `locationName:"voiceId" type:"string"` -} - -// String returns the string representation -func (s CreateBotVersionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBotVersionOutput) GoString() string { - return s.String() -} - -// SetAbortStatement sets the AbortStatement field's value. -func (s *CreateBotVersionOutput) SetAbortStatement(v *Statement) *CreateBotVersionOutput { - s.AbortStatement = v - return s -} - -// SetChecksum sets the Checksum field's value. -func (s *CreateBotVersionOutput) SetChecksum(v string) *CreateBotVersionOutput { - s.Checksum = &v - return s -} - -// SetChildDirected sets the ChildDirected field's value. -func (s *CreateBotVersionOutput) SetChildDirected(v bool) *CreateBotVersionOutput { - s.ChildDirected = &v - return s -} - -// SetClarificationPrompt sets the ClarificationPrompt field's value. -func (s *CreateBotVersionOutput) SetClarificationPrompt(v *Prompt) *CreateBotVersionOutput { - s.ClarificationPrompt = v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *CreateBotVersionOutput) SetCreatedDate(v time.Time) *CreateBotVersionOutput { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateBotVersionOutput) SetDescription(v string) *CreateBotVersionOutput { - s.Description = &v - return s -} - -// SetFailureReason sets the FailureReason field's value. -func (s *CreateBotVersionOutput) SetFailureReason(v string) *CreateBotVersionOutput { - s.FailureReason = &v - return s -} - -// SetIdleSessionTTLInSeconds sets the IdleSessionTTLInSeconds field's value. -func (s *CreateBotVersionOutput) SetIdleSessionTTLInSeconds(v int64) *CreateBotVersionOutput { - s.IdleSessionTTLInSeconds = &v - return s -} - -// SetIntents sets the Intents field's value. -func (s *CreateBotVersionOutput) SetIntents(v []*Intent) *CreateBotVersionOutput { - s.Intents = v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *CreateBotVersionOutput) SetLastUpdatedDate(v time.Time) *CreateBotVersionOutput { - s.LastUpdatedDate = &v - return s -} - -// SetLocale sets the Locale field's value. -func (s *CreateBotVersionOutput) SetLocale(v string) *CreateBotVersionOutput { - s.Locale = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateBotVersionOutput) SetName(v string) *CreateBotVersionOutput { - s.Name = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *CreateBotVersionOutput) SetStatus(v string) *CreateBotVersionOutput { - s.Status = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *CreateBotVersionOutput) SetVersion(v string) *CreateBotVersionOutput { - s.Version = &v - return s -} - -// SetVoiceId sets the VoiceId field's value. -func (s *CreateBotVersionOutput) SetVoiceId(v string) *CreateBotVersionOutput { - s.VoiceId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/CreateIntentVersionRequest -type CreateIntentVersionInput struct { - _ struct{} `type:"structure"` - - // Checksum of the $LATEST version of the intent that should be used to create - // the new version. If you specify a checksum and the $LATEST version of the - // intent has a different checksum, Amazon Lex returns a PreconditionFailedException - // exception and doesn't publish a new version. If you don't specify a checksum, - // Amazon Lex publishes the $LATEST version. - Checksum *string `locationName:"checksum" type:"string"` - - // The name of the intent that you want to create a new version of. The name - // is case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateIntentVersionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateIntentVersionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateIntentVersionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateIntentVersionInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChecksum sets the Checksum field's value. -func (s *CreateIntentVersionInput) SetChecksum(v string) *CreateIntentVersionInput { - s.Checksum = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateIntentVersionInput) SetName(v string) *CreateIntentVersionInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/CreateIntentVersionResponse -type CreateIntentVersionOutput struct { - _ struct{} `type:"structure"` - - // Checksum of the intent version created. - Checksum *string `locationName:"checksum" type:"string"` - - // After the Lambda function specified in the fulfillmentActivity field fulfills - // the intent, Amazon Lex conveys this statement to the user. - ConclusionStatement *Statement `locationName:"conclusionStatement" type:"structure"` - - // If defined, the prompt that Amazon Lex uses to confirm the user's intent - // before fulfilling it. - ConfirmationPrompt *Prompt `locationName:"confirmationPrompt" type:"structure"` - - // The date that the intent was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the intent. - Description *string `locationName:"description" type:"string"` - - // If defined, Amazon Lex invokes this Lambda function for each user input. - DialogCodeHook *CodeHook `locationName:"dialogCodeHook" type:"structure"` - - // If defined, Amazon Lex uses this prompt to solicit additional user activity - // after the intent is fulfilled. - FollowUpPrompt *FollowUpPrompt `locationName:"followUpPrompt" type:"structure"` - - // Describes how the intent is fulfilled. - FulfillmentActivity *FulfillmentActivity `locationName:"fulfillmentActivity" type:"structure"` - - // The date that the intent was updated. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // The name of the intent. - Name *string `locationName:"name" min:"1" type:"string"` - - // A unique identifier for a built-in intent. - ParentIntentSignature *string `locationName:"parentIntentSignature" type:"string"` - - // If the user answers "no" to the question defined in confirmationPrompt, Amazon - // Lex responds with this statement to acknowledge that the intent was canceled. - RejectionStatement *Statement `locationName:"rejectionStatement" type:"structure"` - - // An array of sample utterances configured for the intent. - SampleUtterances []*string `locationName:"sampleUtterances" type:"list"` - - // An array of slot types that defines the information required to fulfill the - // intent. - Slots []*Slot `locationName:"slots" type:"list"` - - // The version number assigned to the new version of the intent. - Version *string `locationName:"version" min:"1" type:"string"` -} - -// String returns the string representation -func (s CreateIntentVersionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateIntentVersionOutput) GoString() string { - return s.String() -} - -// SetChecksum sets the Checksum field's value. -func (s *CreateIntentVersionOutput) SetChecksum(v string) *CreateIntentVersionOutput { - s.Checksum = &v - return s -} - -// SetConclusionStatement sets the ConclusionStatement field's value. -func (s *CreateIntentVersionOutput) SetConclusionStatement(v *Statement) *CreateIntentVersionOutput { - s.ConclusionStatement = v - return s -} - -// SetConfirmationPrompt sets the ConfirmationPrompt field's value. -func (s *CreateIntentVersionOutput) SetConfirmationPrompt(v *Prompt) *CreateIntentVersionOutput { - s.ConfirmationPrompt = v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *CreateIntentVersionOutput) SetCreatedDate(v time.Time) *CreateIntentVersionOutput { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateIntentVersionOutput) SetDescription(v string) *CreateIntentVersionOutput { - s.Description = &v - return s -} - -// SetDialogCodeHook sets the DialogCodeHook field's value. -func (s *CreateIntentVersionOutput) SetDialogCodeHook(v *CodeHook) *CreateIntentVersionOutput { - s.DialogCodeHook = v - return s -} - -// SetFollowUpPrompt sets the FollowUpPrompt field's value. -func (s *CreateIntentVersionOutput) SetFollowUpPrompt(v *FollowUpPrompt) *CreateIntentVersionOutput { - s.FollowUpPrompt = v - return s -} - -// SetFulfillmentActivity sets the FulfillmentActivity field's value. -func (s *CreateIntentVersionOutput) SetFulfillmentActivity(v *FulfillmentActivity) *CreateIntentVersionOutput { - s.FulfillmentActivity = v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *CreateIntentVersionOutput) SetLastUpdatedDate(v time.Time) *CreateIntentVersionOutput { - s.LastUpdatedDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateIntentVersionOutput) SetName(v string) *CreateIntentVersionOutput { - s.Name = &v - return s -} - -// SetParentIntentSignature sets the ParentIntentSignature field's value. -func (s *CreateIntentVersionOutput) SetParentIntentSignature(v string) *CreateIntentVersionOutput { - s.ParentIntentSignature = &v - return s -} - -// SetRejectionStatement sets the RejectionStatement field's value. -func (s *CreateIntentVersionOutput) SetRejectionStatement(v *Statement) *CreateIntentVersionOutput { - s.RejectionStatement = v - return s -} - -// SetSampleUtterances sets the SampleUtterances field's value. -func (s *CreateIntentVersionOutput) SetSampleUtterances(v []*string) *CreateIntentVersionOutput { - s.SampleUtterances = v - return s -} - -// SetSlots sets the Slots field's value. -func (s *CreateIntentVersionOutput) SetSlots(v []*Slot) *CreateIntentVersionOutput { - s.Slots = v - return s -} - -// SetVersion sets the Version field's value. -func (s *CreateIntentVersionOutput) SetVersion(v string) *CreateIntentVersionOutput { - s.Version = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/CreateSlotTypeVersionRequest -type CreateSlotTypeVersionInput struct { - _ struct{} `type:"structure"` - - // Checksum for the $LATEST version of the slot type that you want to publish. - // If you specify a checksum and the $LATEST version of the slot type has a - // different checksum, Amazon Lex returns a PreconditionFailedException exception - // and doesn't publish the new version. If you don't specify a checksum, Amazon - // Lex publishes the $LATEST version. - Checksum *string `locationName:"checksum" type:"string"` - - // The name of the slot type that you want to create a new version for. The - // name is case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateSlotTypeVersionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateSlotTypeVersionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateSlotTypeVersionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateSlotTypeVersionInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChecksum sets the Checksum field's value. -func (s *CreateSlotTypeVersionInput) SetChecksum(v string) *CreateSlotTypeVersionInput { - s.Checksum = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateSlotTypeVersionInput) SetName(v string) *CreateSlotTypeVersionInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/CreateSlotTypeVersionResponse -type CreateSlotTypeVersionOutput struct { - _ struct{} `type:"structure"` - - // Checksum of the $LATEST version of the slot type. - Checksum *string `locationName:"checksum" type:"string"` - - // The date that the slot type was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the slot type. - Description *string `locationName:"description" type:"string"` - - // A list of EnumerationValue objects that defines the values that the slot - // type can take. - EnumerationValues []*EnumerationValue `locationName:"enumerationValues" min:"1" type:"list"` - - // The date that the slot type was updated. When you create a resource, the - // creation date and last update date are the same. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // The name of the slot type. - Name *string `locationName:"name" min:"1" type:"string"` - - // The strategy that Amazon Lex uses to determine the value of the slot. For - // more information, see PutSlotType. - ValueSelectionStrategy *string `locationName:"valueSelectionStrategy" type:"string" enum:"SlotValueSelectionStrategy"` - - // The version assigned to the new slot type version. - Version *string `locationName:"version" min:"1" type:"string"` -} - -// String returns the string representation -func (s CreateSlotTypeVersionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateSlotTypeVersionOutput) GoString() string { - return s.String() -} - -// SetChecksum sets the Checksum field's value. -func (s *CreateSlotTypeVersionOutput) SetChecksum(v string) *CreateSlotTypeVersionOutput { - s.Checksum = &v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *CreateSlotTypeVersionOutput) SetCreatedDate(v time.Time) *CreateSlotTypeVersionOutput { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateSlotTypeVersionOutput) SetDescription(v string) *CreateSlotTypeVersionOutput { - s.Description = &v - return s -} - -// SetEnumerationValues sets the EnumerationValues field's value. -func (s *CreateSlotTypeVersionOutput) SetEnumerationValues(v []*EnumerationValue) *CreateSlotTypeVersionOutput { - s.EnumerationValues = v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *CreateSlotTypeVersionOutput) SetLastUpdatedDate(v time.Time) *CreateSlotTypeVersionOutput { - s.LastUpdatedDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateSlotTypeVersionOutput) SetName(v string) *CreateSlotTypeVersionOutput { - s.Name = &v - return s -} - -// SetValueSelectionStrategy sets the ValueSelectionStrategy field's value. -func (s *CreateSlotTypeVersionOutput) SetValueSelectionStrategy(v string) *CreateSlotTypeVersionOutput { - s.ValueSelectionStrategy = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *CreateSlotTypeVersionOutput) SetVersion(v string) *CreateSlotTypeVersionOutput { - s.Version = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBotAliasRequest -type DeleteBotAliasInput struct { - _ struct{} `type:"structure"` - - // The name of the bot that the alias points to. - // - // BotName is a required field - BotName *string `location:"uri" locationName:"botName" min:"2" type:"string" required:"true"` - - // The name of the alias to delete. The name is case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBotAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBotAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBotAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBotAliasInput"} - if s.BotName == nil { - invalidParams.Add(request.NewErrParamRequired("BotName")) - } - if s.BotName != nil && len(*s.BotName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("BotName", 2)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBotName sets the BotName field's value. -func (s *DeleteBotAliasInput) SetBotName(v string) *DeleteBotAliasInput { - s.BotName = &v - return s -} - -// SetName sets the Name field's value. -func (s *DeleteBotAliasInput) SetName(v string) *DeleteBotAliasInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBotAliasOutput -type DeleteBotAliasOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBotAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBotAliasOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBotChannelAssociationRequest -type DeleteBotChannelAssociationInput struct { - _ struct{} `type:"structure"` - - // An alias that points to the specific version of the Amazon Lex bot to which - // this association is being made. - // - // BotAlias is a required field - BotAlias *string `location:"uri" locationName:"aliasName" min:"1" type:"string" required:"true"` - - // The name of the Amazon Lex bot. - // - // BotName is a required field - BotName *string `location:"uri" locationName:"botName" min:"2" type:"string" required:"true"` - - // The name of the association. The name is case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBotChannelAssociationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBotChannelAssociationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBotChannelAssociationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBotChannelAssociationInput"} - if s.BotAlias == nil { - invalidParams.Add(request.NewErrParamRequired("BotAlias")) - } - if s.BotAlias != nil && len(*s.BotAlias) < 1 { - invalidParams.Add(request.NewErrParamMinLen("BotAlias", 1)) - } - if s.BotName == nil { - invalidParams.Add(request.NewErrParamRequired("BotName")) - } - if s.BotName != nil && len(*s.BotName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("BotName", 2)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBotAlias sets the BotAlias field's value. -func (s *DeleteBotChannelAssociationInput) SetBotAlias(v string) *DeleteBotChannelAssociationInput { - s.BotAlias = &v - return s -} - -// SetBotName sets the BotName field's value. -func (s *DeleteBotChannelAssociationInput) SetBotName(v string) *DeleteBotChannelAssociationInput { - s.BotName = &v - return s -} - -// SetName sets the Name field's value. -func (s *DeleteBotChannelAssociationInput) SetName(v string) *DeleteBotChannelAssociationInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBotChannelAssociationOutput -type DeleteBotChannelAssociationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBotChannelAssociationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBotChannelAssociationOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBotRequest -type DeleteBotInput struct { - _ struct{} `type:"structure"` - - // The name of the bot. The name is case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"2" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBotInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBotInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBotInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Name", 2)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteBotInput) SetName(v string) *DeleteBotInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBotOutput -type DeleteBotOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBotOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBotOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBotVersionRequest -type DeleteBotVersionInput struct { - _ struct{} `type:"structure"` - - // The name of the bot. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"2" type:"string" required:"true"` - - // The version of the bot to delete. You cannot delete the $LATEST version of - // the bot. To delete the $LATEST version, use the DeleteBot operation. - // - // Version is a required field - Version *string `location:"uri" locationName:"version" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBotVersionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBotVersionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBotVersionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBotVersionInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Name", 2)) - } - if s.Version == nil { - invalidParams.Add(request.NewErrParamRequired("Version")) - } - if s.Version != nil && len(*s.Version) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Version", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteBotVersionInput) SetName(v string) *DeleteBotVersionInput { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *DeleteBotVersionInput) SetVersion(v string) *DeleteBotVersionInput { - s.Version = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteBotVersionOutput -type DeleteBotVersionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBotVersionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBotVersionOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteIntentRequest -type DeleteIntentInput struct { - _ struct{} `type:"structure"` - - // The name of the intent. The name is case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteIntentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteIntentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteIntentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteIntentInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteIntentInput) SetName(v string) *DeleteIntentInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteIntentOutput -type DeleteIntentOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteIntentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteIntentOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteIntentVersionRequest -type DeleteIntentVersionInput struct { - _ struct{} `type:"structure"` - - // The name of the intent. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` - - // The version of the intent to delete. You cannot delete the $LATEST version - // of the intent. To delete the $LATEST version, use the DeleteIntent operation. - // - // Version is a required field - Version *string `location:"uri" locationName:"version" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteIntentVersionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteIntentVersionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteIntentVersionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteIntentVersionInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Version == nil { - invalidParams.Add(request.NewErrParamRequired("Version")) - } - if s.Version != nil && len(*s.Version) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Version", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteIntentVersionInput) SetName(v string) *DeleteIntentVersionInput { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *DeleteIntentVersionInput) SetVersion(v string) *DeleteIntentVersionInput { - s.Version = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteIntentVersionOutput -type DeleteIntentVersionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteIntentVersionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteIntentVersionOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteSlotTypeRequest -type DeleteSlotTypeInput struct { - _ struct{} `type:"structure"` - - // The name of the slot type. The name is case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteSlotTypeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteSlotTypeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSlotTypeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSlotTypeInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteSlotTypeInput) SetName(v string) *DeleteSlotTypeInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteSlotTypeOutput -type DeleteSlotTypeOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteSlotTypeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteSlotTypeOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteSlotTypeVersionRequest -type DeleteSlotTypeVersionInput struct { - _ struct{} `type:"structure"` - - // The name of the slot type. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` - - // The version of the slot type to delete. You cannot delete the $LATEST version - // of the slot type. To delete the $LATEST version, use the DeleteSlotType operation. - // - // Version is a required field - Version *string `location:"uri" locationName:"version" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteSlotTypeVersionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteSlotTypeVersionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSlotTypeVersionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSlotTypeVersionInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Version == nil { - invalidParams.Add(request.NewErrParamRequired("Version")) - } - if s.Version != nil && len(*s.Version) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Version", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteSlotTypeVersionInput) SetName(v string) *DeleteSlotTypeVersionInput { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *DeleteSlotTypeVersionInput) SetVersion(v string) *DeleteSlotTypeVersionInput { - s.Version = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteSlotTypeVersionOutput -type DeleteSlotTypeVersionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteSlotTypeVersionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteSlotTypeVersionOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteUtterancesRequest -type DeleteUtterancesInput struct { - _ struct{} `type:"structure"` - - // The name of the bot that stored the utterances. - // - // BotName is a required field - BotName *string `location:"uri" locationName:"botName" min:"2" type:"string" required:"true"` - - // The unique identifier for the user that made the utterances. This is the - // user ID that was sent in the PostContent (http://docs.aws.amazon.com/lex/latest/dg/API_runtime_PostContent.html) - // or PostText (http://docs.aws.amazon.com/lex/latest/dg/API_runtime_PostText.html) - // operation request that contained the utterance. - // - // UserId is a required field - UserId *string `location:"uri" locationName:"userId" min:"2" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteUtterancesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteUtterancesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteUtterancesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteUtterancesInput"} - if s.BotName == nil { - invalidParams.Add(request.NewErrParamRequired("BotName")) - } - if s.BotName != nil && len(*s.BotName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("BotName", 2)) - } - if s.UserId == nil { - invalidParams.Add(request.NewErrParamRequired("UserId")) - } - if s.UserId != nil && len(*s.UserId) < 2 { - invalidParams.Add(request.NewErrParamMinLen("UserId", 2)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBotName sets the BotName field's value. -func (s *DeleteUtterancesInput) SetBotName(v string) *DeleteUtterancesInput { - s.BotName = &v - return s -} - -// SetUserId sets the UserId field's value. -func (s *DeleteUtterancesInput) SetUserId(v string) *DeleteUtterancesInput { - s.UserId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/DeleteUtterancesOutput -type DeleteUtterancesOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteUtterancesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteUtterancesOutput) GoString() string { - return s.String() -} - -// Each slot type can have a set of values. Each enumeration value represents -// a value the slot type can take. -// -// For example, a pizza ordering bot could have a slot type that specifies the -// type of crust that the pizza should have. The slot type could include the -// values -// -// * thick -// -// * thin -// -// * stuffed -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/EnumerationValue -type EnumerationValue struct { - _ struct{} `type:"structure"` - - // Additional values related to the slot type value. - Synonyms []*string `locationName:"synonyms" type:"list"` - - // The value of the slot type. - // - // Value is a required field - Value *string `locationName:"value" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s EnumerationValue) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s EnumerationValue) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *EnumerationValue) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EnumerationValue"} - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - if s.Value != nil && len(*s.Value) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Value", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSynonyms sets the Synonyms field's value. -func (s *EnumerationValue) SetSynonyms(v []*string) *EnumerationValue { - s.Synonyms = v - return s -} - -// SetValue sets the Value field's value. -func (s *EnumerationValue) SetValue(v string) *EnumerationValue { - s.Value = &v - return s -} - -// A prompt for additional activity after an intent is fulfilled. For example, -// after the OrderPizza intent is fulfilled, you might prompt the user to find -// out whether the user wants to order drinks. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/FollowUpPrompt -type FollowUpPrompt struct { - _ struct{} `type:"structure"` - - // Prompts for information from the user. - // - // Prompt is a required field - Prompt *Prompt `locationName:"prompt" type:"structure" required:"true"` - - // If the user answers "no" to the question defined in the prompt field, Amazon - // Lex responds with this statement to acknowledge that the intent was canceled. - // - // RejectionStatement is a required field - RejectionStatement *Statement `locationName:"rejectionStatement" type:"structure" required:"true"` -} - -// String returns the string representation -func (s FollowUpPrompt) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FollowUpPrompt) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *FollowUpPrompt) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "FollowUpPrompt"} - if s.Prompt == nil { - invalidParams.Add(request.NewErrParamRequired("Prompt")) - } - if s.RejectionStatement == nil { - invalidParams.Add(request.NewErrParamRequired("RejectionStatement")) - } - if s.Prompt != nil { - if err := s.Prompt.Validate(); err != nil { - invalidParams.AddNested("Prompt", err.(request.ErrInvalidParams)) - } - } - if s.RejectionStatement != nil { - if err := s.RejectionStatement.Validate(); err != nil { - invalidParams.AddNested("RejectionStatement", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrompt sets the Prompt field's value. -func (s *FollowUpPrompt) SetPrompt(v *Prompt) *FollowUpPrompt { - s.Prompt = v - return s -} - -// SetRejectionStatement sets the RejectionStatement field's value. -func (s *FollowUpPrompt) SetRejectionStatement(v *Statement) *FollowUpPrompt { - s.RejectionStatement = v - return s -} - -// Describes how the intent is fulfilled after the user provides all of the -// information required for the intent. You can provide a Lambda function to -// process the intent, or you can return the intent information to the client -// application. We recommend that you use a Lambda function so that the relevant -// logic lives in the Cloud and limit the client-side code primarily to presentation. -// If you need to update the logic, you only update the Lambda function; you -// don't need to upgrade your client application. -// -// Consider the following examples: -// -// * In a pizza ordering application, after the user provides all of the -// information for placing an order, you use a Lambda function to place an -// order with a pizzeria. -// -// * In a gaming application, when a user says "pick up a rock," this information -// must go back to the client application so that it can perform the operation -// and update the graphics. In this case, you want Amazon Lex to return the -// intent data to the client. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/FulfillmentActivity -type FulfillmentActivity struct { - _ struct{} `type:"structure"` - - // A description of the Lambda function that is run to fulfill the intent. - CodeHook *CodeHook `locationName:"codeHook" type:"structure"` - - // How the intent should be fulfilled, either by running a Lambda function or - // by returning the slot data to the client application. - // - // Type is a required field - Type *string `locationName:"type" type:"string" required:"true" enum:"FulfillmentActivityType"` -} - -// String returns the string representation -func (s FulfillmentActivity) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FulfillmentActivity) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *FulfillmentActivity) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "FulfillmentActivity"} - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - if s.CodeHook != nil { - if err := s.CodeHook.Validate(); err != nil { - invalidParams.AddNested("CodeHook", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCodeHook sets the CodeHook field's value. -func (s *FulfillmentActivity) SetCodeHook(v *CodeHook) *FulfillmentActivity { - s.CodeHook = v - return s -} - -// SetType sets the Type field's value. -func (s *FulfillmentActivity) SetType(v string) *FulfillmentActivity { - s.Type = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotAliasRequest -type GetBotAliasInput struct { - _ struct{} `type:"structure"` - - // The name of the bot. - // - // BotName is a required field - BotName *string `location:"uri" locationName:"botName" min:"2" type:"string" required:"true"` - - // The name of the bot alias. The name is case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBotAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBotAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBotAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBotAliasInput"} - if s.BotName == nil { - invalidParams.Add(request.NewErrParamRequired("BotName")) - } - if s.BotName != nil && len(*s.BotName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("BotName", 2)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBotName sets the BotName field's value. -func (s *GetBotAliasInput) SetBotName(v string) *GetBotAliasInput { - s.BotName = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetBotAliasInput) SetName(v string) *GetBotAliasInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotAliasResponse -type GetBotAliasOutput struct { - _ struct{} `type:"structure"` - - // The name of the bot that the alias points to. - BotName *string `locationName:"botName" min:"2" type:"string"` - - // The version of the bot that the alias points to. - BotVersion *string `locationName:"botVersion" min:"1" type:"string"` - - // Checksum of the bot alias. - Checksum *string `locationName:"checksum" type:"string"` - - // The date that the bot alias was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the bot alias. - Description *string `locationName:"description" type:"string"` - - // The date that the bot alias was updated. When you create a resource, the - // creation date and the last updated date are the same. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // The name of the bot alias. - Name *string `locationName:"name" min:"1" type:"string"` -} - -// String returns the string representation -func (s GetBotAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBotAliasOutput) GoString() string { - return s.String() -} - -// SetBotName sets the BotName field's value. -func (s *GetBotAliasOutput) SetBotName(v string) *GetBotAliasOutput { - s.BotName = &v - return s -} - -// SetBotVersion sets the BotVersion field's value. -func (s *GetBotAliasOutput) SetBotVersion(v string) *GetBotAliasOutput { - s.BotVersion = &v - return s -} - -// SetChecksum sets the Checksum field's value. -func (s *GetBotAliasOutput) SetChecksum(v string) *GetBotAliasOutput { - s.Checksum = &v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *GetBotAliasOutput) SetCreatedDate(v time.Time) *GetBotAliasOutput { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *GetBotAliasOutput) SetDescription(v string) *GetBotAliasOutput { - s.Description = &v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *GetBotAliasOutput) SetLastUpdatedDate(v time.Time) *GetBotAliasOutput { - s.LastUpdatedDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetBotAliasOutput) SetName(v string) *GetBotAliasOutput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotAliasesRequest -type GetBotAliasesInput struct { - _ struct{} `type:"structure"` - - // The name of the bot. - // - // BotName is a required field - BotName *string `location:"uri" locationName:"botName" min:"2" type:"string" required:"true"` - - // The maximum number of aliases to return in the response. The default is 50. - // . - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - // Substring to match in bot alias names. An alias will be returned if any part - // of its name matches the substring. For example, "xyz" matches both "xyzabc" - // and "abcxyz." - NameContains *string `location:"querystring" locationName:"nameContains" min:"1" type:"string"` - - // A pagination token for fetching the next page of aliases. If the response - // to this call is truncated, Amazon Lex returns a pagination token in the response. - // To fetch the next page of aliases, specify the pagination token in the next - // request. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetBotAliasesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBotAliasesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBotAliasesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBotAliasesInput"} - if s.BotName == nil { - invalidParams.Add(request.NewErrParamRequired("BotName")) - } - if s.BotName != nil && len(*s.BotName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("BotName", 2)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NameContains != nil && len(*s.NameContains) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NameContains", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBotName sets the BotName field's value. -func (s *GetBotAliasesInput) SetBotName(v string) *GetBotAliasesInput { - s.BotName = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetBotAliasesInput) SetMaxResults(v int64) *GetBotAliasesInput { - s.MaxResults = &v - return s -} - -// SetNameContains sets the NameContains field's value. -func (s *GetBotAliasesInput) SetNameContains(v string) *GetBotAliasesInput { - s.NameContains = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetBotAliasesInput) SetNextToken(v string) *GetBotAliasesInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotAliasesResponse -type GetBotAliasesOutput struct { - _ struct{} `type:"structure"` - - // An array of BotAliasMetadata objects, each describing a bot alias. - BotAliases []*BotAliasMetadata `type:"list"` - - // A pagination token for fetching next page of aliases. If the response to - // this call is truncated, Amazon Lex returns a pagination token in the response. - // To fetch the next page of aliases, specify the pagination token in the next - // request. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetBotAliasesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBotAliasesOutput) GoString() string { - return s.String() -} - -// SetBotAliases sets the BotAliases field's value. -func (s *GetBotAliasesOutput) SetBotAliases(v []*BotAliasMetadata) *GetBotAliasesOutput { - s.BotAliases = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetBotAliasesOutput) SetNextToken(v string) *GetBotAliasesOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotChannelAssociationRequest -type GetBotChannelAssociationInput struct { - _ struct{} `type:"structure"` - - // An alias pointing to the specific version of the Amazon Lex bot to which - // this association is being made. - // - // BotAlias is a required field - BotAlias *string `location:"uri" locationName:"aliasName" min:"1" type:"string" required:"true"` - - // The name of the Amazon Lex bot. - // - // BotName is a required field - BotName *string `location:"uri" locationName:"botName" min:"2" type:"string" required:"true"` - - // The name of the association between the bot and the channel. The name is - // case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBotChannelAssociationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBotChannelAssociationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBotChannelAssociationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBotChannelAssociationInput"} - if s.BotAlias == nil { - invalidParams.Add(request.NewErrParamRequired("BotAlias")) - } - if s.BotAlias != nil && len(*s.BotAlias) < 1 { - invalidParams.Add(request.NewErrParamMinLen("BotAlias", 1)) - } - if s.BotName == nil { - invalidParams.Add(request.NewErrParamRequired("BotName")) - } - if s.BotName != nil && len(*s.BotName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("BotName", 2)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBotAlias sets the BotAlias field's value. -func (s *GetBotChannelAssociationInput) SetBotAlias(v string) *GetBotChannelAssociationInput { - s.BotAlias = &v - return s -} - -// SetBotName sets the BotName field's value. -func (s *GetBotChannelAssociationInput) SetBotName(v string) *GetBotChannelAssociationInput { - s.BotName = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetBotChannelAssociationInput) SetName(v string) *GetBotChannelAssociationInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotChannelAssociationResponse -type GetBotChannelAssociationOutput struct { - _ struct{} `type:"structure"` - - // An alias pointing to the specific version of the Amazon Lex bot to which - // this association is being made. - BotAlias *string `locationName:"botAlias" min:"1" type:"string"` - - // Provides information that the messaging platform needs to communicate with - // the Amazon Lex bot. - BotConfiguration map[string]*string `locationName:"botConfiguration" min:"1" type:"map"` - - // The name of the Amazon Lex bot. - BotName *string `locationName:"botName" min:"2" type:"string"` - - // The date that the association between the bot and the channel was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the association between the bot and the channel. - Description *string `locationName:"description" type:"string"` - - // If status is FAILED, Amazon Lex provides the reason that it failed to create - // the association. - FailureReason *string `locationName:"failureReason" type:"string"` - - // The name of the association between the bot and the channel. - Name *string `locationName:"name" min:"1" type:"string"` - - // The status of the bot channel. - // - // * CREATED - The channel has been created and is ready for use. - // - // * IN_PROGRESS - Channel creation is in progress. - // - // * FAILED - There was an error creating the channel. For information about - // the reason for the failure, see the failureReason field. - Status *string `locationName:"status" type:"string" enum:"ChannelStatus"` - - // The type of the messaging platform. - Type *string `locationName:"type" type:"string" enum:"ChannelType"` -} - -// String returns the string representation -func (s GetBotChannelAssociationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBotChannelAssociationOutput) GoString() string { - return s.String() -} - -// SetBotAlias sets the BotAlias field's value. -func (s *GetBotChannelAssociationOutput) SetBotAlias(v string) *GetBotChannelAssociationOutput { - s.BotAlias = &v - return s -} - -// SetBotConfiguration sets the BotConfiguration field's value. -func (s *GetBotChannelAssociationOutput) SetBotConfiguration(v map[string]*string) *GetBotChannelAssociationOutput { - s.BotConfiguration = v - return s -} - -// SetBotName sets the BotName field's value. -func (s *GetBotChannelAssociationOutput) SetBotName(v string) *GetBotChannelAssociationOutput { - s.BotName = &v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *GetBotChannelAssociationOutput) SetCreatedDate(v time.Time) *GetBotChannelAssociationOutput { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *GetBotChannelAssociationOutput) SetDescription(v string) *GetBotChannelAssociationOutput { - s.Description = &v - return s -} - -// SetFailureReason sets the FailureReason field's value. -func (s *GetBotChannelAssociationOutput) SetFailureReason(v string) *GetBotChannelAssociationOutput { - s.FailureReason = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetBotChannelAssociationOutput) SetName(v string) *GetBotChannelAssociationOutput { - s.Name = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *GetBotChannelAssociationOutput) SetStatus(v string) *GetBotChannelAssociationOutput { - s.Status = &v - return s -} - -// SetType sets the Type field's value. -func (s *GetBotChannelAssociationOutput) SetType(v string) *GetBotChannelAssociationOutput { - s.Type = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotChannelAssociationsRequest -type GetBotChannelAssociationsInput struct { - _ struct{} `type:"structure"` - - // An alias pointing to the specific version of the Amazon Lex bot to which - // this association is being made. - // - // BotAlias is a required field - BotAlias *string `location:"uri" locationName:"aliasName" min:"1" type:"string" required:"true"` - - // The name of the Amazon Lex bot in the association. - // - // BotName is a required field - BotName *string `location:"uri" locationName:"botName" min:"2" type:"string" required:"true"` - - // The maximum number of associations to return in the response. The default - // is 50. - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - // Substring to match in channel association names. An association will be returned - // if any part of its name matches the substring. For example, "xyz" matches - // both "xyzabc" and "abcxyz." To return all bot channel associations, use a - // hyphen ("-") as the nameContains parameter. - NameContains *string `location:"querystring" locationName:"nameContains" min:"1" type:"string"` - - // A pagination token for fetching the next page of associations. If the response - // to this call is truncated, Amazon Lex returns a pagination token in the response. - // To fetch the next page of associations, specify the pagination token in the - // next request. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetBotChannelAssociationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBotChannelAssociationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBotChannelAssociationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBotChannelAssociationsInput"} - if s.BotAlias == nil { - invalidParams.Add(request.NewErrParamRequired("BotAlias")) - } - if s.BotAlias != nil && len(*s.BotAlias) < 1 { - invalidParams.Add(request.NewErrParamMinLen("BotAlias", 1)) - } - if s.BotName == nil { - invalidParams.Add(request.NewErrParamRequired("BotName")) - } - if s.BotName != nil && len(*s.BotName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("BotName", 2)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NameContains != nil && len(*s.NameContains) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NameContains", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBotAlias sets the BotAlias field's value. -func (s *GetBotChannelAssociationsInput) SetBotAlias(v string) *GetBotChannelAssociationsInput { - s.BotAlias = &v - return s -} - -// SetBotName sets the BotName field's value. -func (s *GetBotChannelAssociationsInput) SetBotName(v string) *GetBotChannelAssociationsInput { - s.BotName = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetBotChannelAssociationsInput) SetMaxResults(v int64) *GetBotChannelAssociationsInput { - s.MaxResults = &v - return s -} - -// SetNameContains sets the NameContains field's value. -func (s *GetBotChannelAssociationsInput) SetNameContains(v string) *GetBotChannelAssociationsInput { - s.NameContains = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetBotChannelAssociationsInput) SetNextToken(v string) *GetBotChannelAssociationsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotChannelAssociationsResponse -type GetBotChannelAssociationsOutput struct { - _ struct{} `type:"structure"` - - // An array of objects, one for each association, that provides information - // about the Amazon Lex bot and its association with the channel. - BotChannelAssociations []*BotChannelAssociation `locationName:"botChannelAssociations" type:"list"` - - // A pagination token that fetches the next page of associations. If the response - // to this call is truncated, Amazon Lex returns a pagination token in the response. - // To fetch the next page of associations, specify the pagination token in the - // next request. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetBotChannelAssociationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBotChannelAssociationsOutput) GoString() string { - return s.String() -} - -// SetBotChannelAssociations sets the BotChannelAssociations field's value. -func (s *GetBotChannelAssociationsOutput) SetBotChannelAssociations(v []*BotChannelAssociation) *GetBotChannelAssociationsOutput { - s.BotChannelAssociations = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetBotChannelAssociationsOutput) SetNextToken(v string) *GetBotChannelAssociationsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotRequest -type GetBotInput struct { - _ struct{} `type:"structure"` - - // The name of the bot. The name is case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"2" type:"string" required:"true"` - - // The version or alias of the bot. - // - // VersionOrAlias is a required field - VersionOrAlias *string `location:"uri" locationName:"versionoralias" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBotInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBotInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBotInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Name", 2)) - } - if s.VersionOrAlias == nil { - invalidParams.Add(request.NewErrParamRequired("VersionOrAlias")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *GetBotInput) SetName(v string) *GetBotInput { - s.Name = &v - return s -} - -// SetVersionOrAlias sets the VersionOrAlias field's value. -func (s *GetBotInput) SetVersionOrAlias(v string) *GetBotInput { - s.VersionOrAlias = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotResponse -type GetBotOutput struct { - _ struct{} `type:"structure"` - - // The message that Amazon Lex returns when the user elects to end the conversation - // without completing it. For more information, see PutBot. - AbortStatement *Statement `locationName:"abortStatement" type:"structure"` - - // Checksum of the bot used to identify a specific revision of the bot's $LATEST - // version. - Checksum *string `locationName:"checksum" type:"string"` - - // For each Amazon Lex bot created with the Amazon Lex Model Building Service, - // you must specify whether your use of Amazon Lex is related to a website, - // program, or other application that is directed or targeted, in whole or in - // part, to children under age 13 and subject to the Children's Online Privacy - // Protection Act (COPPA) by specifying true or false in the childDirected field. - // By specifying true in the childDirected field, you confirm that your use - // of Amazon Lex is related to a website, program, or other application that - // is directed or targeted, in whole or in part, to children under age 13 and - // subject to COPPA. By specifying false in the childDirected field, you confirm - // that your use of Amazon Lex is not related to a website, program, or other - // application that is directed or targeted, in whole or in part, to children - // under age 13 and subject to COPPA. You may not specify a default value for - // the childDirected field that does not accurately reflect whether your use - // of Amazon Lex is related to a website, program, or other application that - // is directed or targeted, in whole or in part, to children under age 13 and - // subject to COPPA. - // - // If your use of Amazon Lex relates to a website, program, or other application - // that is directed in whole or in part, to children under age 13, you must - // obtain any required verifiable parental consent under COPPA. For information - // regarding the use of Amazon Lex in connection with websites, programs, or - // other applications that are directed or targeted, in whole or in part, to - // children under age 13, see the Amazon Lex FAQ. (https://aws.amazon.com/lex/faqs#data-security) - ChildDirected *bool `locationName:"childDirected" type:"boolean"` - - // The message Amazon Lex uses when it doesn't understand the user's request. - // For more information, see PutBot. - ClarificationPrompt *Prompt `locationName:"clarificationPrompt" type:"structure"` - - // The date that the bot was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the bot. - Description *string `locationName:"description" type:"string"` - - // If status is FAILED, Amazon Lex explains why it failed to build the bot. - FailureReason *string `locationName:"failureReason" type:"string"` - - // The maximum time in seconds that Amazon Lex retains the data gathered in - // a conversation. For more information, see PutBot. - IdleSessionTTLInSeconds *int64 `locationName:"idleSessionTTLInSeconds" min:"60" type:"integer"` - - // An array of intent objects. For more information, see PutBot. - Intents []*Intent `locationName:"intents" type:"list"` - - // The date that the bot was updated. When you create a resource, the creation - // date and last updated date are the same. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // The target locale for the bot. - Locale *string `locationName:"locale" type:"string" enum:"Locale"` - - // The name of the bot. - Name *string `locationName:"name" min:"2" type:"string"` - - // The status of the bot. If the bot is ready to run, the status is READY. If - // there was a problem with building the bot, the status is FAILED and the failureReason - // explains why the bot did not build. If the bot was saved but not built, the - // status is NOT BUILT. - Status *string `locationName:"status" type:"string" enum:"Status"` - - // The version of the bot. For a new bot, the version is always $LATEST. - Version *string `locationName:"version" min:"1" type:"string"` - - // The Amazon Polly voice ID that Amazon Lex uses for voice interaction with - // the user. For more information, see PutBot. - VoiceId *string `locationName:"voiceId" type:"string"` -} - -// String returns the string representation -func (s GetBotOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBotOutput) GoString() string { - return s.String() -} - -// SetAbortStatement sets the AbortStatement field's value. -func (s *GetBotOutput) SetAbortStatement(v *Statement) *GetBotOutput { - s.AbortStatement = v - return s -} - -// SetChecksum sets the Checksum field's value. -func (s *GetBotOutput) SetChecksum(v string) *GetBotOutput { - s.Checksum = &v - return s -} - -// SetChildDirected sets the ChildDirected field's value. -func (s *GetBotOutput) SetChildDirected(v bool) *GetBotOutput { - s.ChildDirected = &v - return s -} - -// SetClarificationPrompt sets the ClarificationPrompt field's value. -func (s *GetBotOutput) SetClarificationPrompt(v *Prompt) *GetBotOutput { - s.ClarificationPrompt = v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *GetBotOutput) SetCreatedDate(v time.Time) *GetBotOutput { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *GetBotOutput) SetDescription(v string) *GetBotOutput { - s.Description = &v - return s -} - -// SetFailureReason sets the FailureReason field's value. -func (s *GetBotOutput) SetFailureReason(v string) *GetBotOutput { - s.FailureReason = &v - return s -} - -// SetIdleSessionTTLInSeconds sets the IdleSessionTTLInSeconds field's value. -func (s *GetBotOutput) SetIdleSessionTTLInSeconds(v int64) *GetBotOutput { - s.IdleSessionTTLInSeconds = &v - return s -} - -// SetIntents sets the Intents field's value. -func (s *GetBotOutput) SetIntents(v []*Intent) *GetBotOutput { - s.Intents = v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *GetBotOutput) SetLastUpdatedDate(v time.Time) *GetBotOutput { - s.LastUpdatedDate = &v - return s -} - -// SetLocale sets the Locale field's value. -func (s *GetBotOutput) SetLocale(v string) *GetBotOutput { - s.Locale = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetBotOutput) SetName(v string) *GetBotOutput { - s.Name = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *GetBotOutput) SetStatus(v string) *GetBotOutput { - s.Status = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *GetBotOutput) SetVersion(v string) *GetBotOutput { - s.Version = &v - return s -} - -// SetVoiceId sets the VoiceId field's value. -func (s *GetBotOutput) SetVoiceId(v string) *GetBotOutput { - s.VoiceId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotVersionsRequest -type GetBotVersionsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of bot versions to return in the response. The default - // is 10. - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - // The name of the bot for which versions should be returned. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"2" type:"string" required:"true"` - - // A pagination token for fetching the next page of bot versions. If the response - // to this call is truncated, Amazon Lex returns a pagination token in the response. - // To fetch the next page of versions, specify the pagination token in the next - // request. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetBotVersionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBotVersionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBotVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBotVersionsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Name", 2)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetBotVersionsInput) SetMaxResults(v int64) *GetBotVersionsInput { - s.MaxResults = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetBotVersionsInput) SetName(v string) *GetBotVersionsInput { - s.Name = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetBotVersionsInput) SetNextToken(v string) *GetBotVersionsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotVersionsResponse -type GetBotVersionsOutput struct { - _ struct{} `type:"structure"` - - // An array of BotMetadata objects, one for each numbered version of the bot - // plus one for the $LATEST version. - Bots []*BotMetadata `locationName:"bots" type:"list"` - - // A pagination token for fetching the next page of bot versions. If the response - // to this call is truncated, Amazon Lex returns a pagination token in the response. - // To fetch the next page of versions, specify the pagination token in the next - // request. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetBotVersionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBotVersionsOutput) GoString() string { - return s.String() -} - -// SetBots sets the Bots field's value. -func (s *GetBotVersionsOutput) SetBots(v []*BotMetadata) *GetBotVersionsOutput { - s.Bots = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetBotVersionsOutput) SetNextToken(v string) *GetBotVersionsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotsRequest -type GetBotsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of bots to return in the response that the request will - // return. The default is 10. - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - // Substring to match in bot names. A bot will be returned if any part of its - // name matches the substring. For example, "xyz" matches both "xyzabc" and - // "abcxyz." - NameContains *string `location:"querystring" locationName:"nameContains" min:"2" type:"string"` - - // A pagination token that fetches the next page of bots. If the response to - // this call is truncated, Amazon Lex returns a pagination token in the response. - // To fetch the next page of bots, specify the pagination token in the next - // request. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetBotsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBotsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBotsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBotsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NameContains != nil && len(*s.NameContains) < 2 { - invalidParams.Add(request.NewErrParamMinLen("NameContains", 2)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetBotsInput) SetMaxResults(v int64) *GetBotsInput { - s.MaxResults = &v - return s -} - -// SetNameContains sets the NameContains field's value. -func (s *GetBotsInput) SetNameContains(v string) *GetBotsInput { - s.NameContains = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetBotsInput) SetNextToken(v string) *GetBotsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBotsResponse -type GetBotsOutput struct { - _ struct{} `type:"structure"` - - // An array of botMetadata objects, with one entry for each bot. - Bots []*BotMetadata `locationName:"bots" type:"list"` - - // If the response is truncated, it includes a pagination token that you can - // specify in your next request to fetch the next page of bots. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetBotsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBotsOutput) GoString() string { - return s.String() -} - -// SetBots sets the Bots field's value. -func (s *GetBotsOutput) SetBots(v []*BotMetadata) *GetBotsOutput { - s.Bots = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetBotsOutput) SetNextToken(v string) *GetBotsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBuiltinIntentRequest -type GetBuiltinIntentInput struct { - _ struct{} `type:"structure"` - - // The unique identifier for a built-in intent. To find the signature for an - // intent, see Standard Built-in Intents (https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents) - // in the Alexa Skills Kit. - // - // Signature is a required field - Signature *string `location:"uri" locationName:"signature" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBuiltinIntentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBuiltinIntentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBuiltinIntentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBuiltinIntentInput"} - if s.Signature == nil { - invalidParams.Add(request.NewErrParamRequired("Signature")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSignature sets the Signature field's value. -func (s *GetBuiltinIntentInput) SetSignature(v string) *GetBuiltinIntentInput { - s.Signature = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBuiltinIntentResponse -type GetBuiltinIntentOutput struct { - _ struct{} `type:"structure"` - - // The unique identifier for a built-in intent. - Signature *string `locationName:"signature" type:"string"` - - // An array of BuiltinIntentSlot objects, one entry for each slot type in the - // intent. - Slots []*BuiltinIntentSlot `locationName:"slots" type:"list"` - - // A list of locales that the intent supports. - SupportedLocales []*string `locationName:"supportedLocales" type:"list"` -} - -// String returns the string representation -func (s GetBuiltinIntentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBuiltinIntentOutput) GoString() string { - return s.String() -} - -// SetSignature sets the Signature field's value. -func (s *GetBuiltinIntentOutput) SetSignature(v string) *GetBuiltinIntentOutput { - s.Signature = &v - return s -} - -// SetSlots sets the Slots field's value. -func (s *GetBuiltinIntentOutput) SetSlots(v []*BuiltinIntentSlot) *GetBuiltinIntentOutput { - s.Slots = v - return s -} - -// SetSupportedLocales sets the SupportedLocales field's value. -func (s *GetBuiltinIntentOutput) SetSupportedLocales(v []*string) *GetBuiltinIntentOutput { - s.SupportedLocales = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBuiltinIntentsRequest -type GetBuiltinIntentsInput struct { - _ struct{} `type:"structure"` - - // A list of locales that the intent supports. - Locale *string `location:"querystring" locationName:"locale" type:"string" enum:"Locale"` - - // The maximum number of intents to return in the response. The default is 10. - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - // A pagination token that fetches the next page of intents. If this API call - // is truncated, Amazon Lex returns a pagination token in the response. To fetch - // the next page of intents, use the pagination token in the next request. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - - // Substring to match in built-in intent signatures. An intent will be returned - // if any part of its signature matches the substring. For example, "xyz" matches - // both "xyzabc" and "abcxyz." To find the signature for an intent, see Standard - // Built-in Intents (https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents) - // in the Alexa Skills Kit. - SignatureContains *string `location:"querystring" locationName:"signatureContains" type:"string"` -} - -// String returns the string representation -func (s GetBuiltinIntentsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBuiltinIntentsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBuiltinIntentsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBuiltinIntentsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLocale sets the Locale field's value. -func (s *GetBuiltinIntentsInput) SetLocale(v string) *GetBuiltinIntentsInput { - s.Locale = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetBuiltinIntentsInput) SetMaxResults(v int64) *GetBuiltinIntentsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetBuiltinIntentsInput) SetNextToken(v string) *GetBuiltinIntentsInput { - s.NextToken = &v - return s -} - -// SetSignatureContains sets the SignatureContains field's value. -func (s *GetBuiltinIntentsInput) SetSignatureContains(v string) *GetBuiltinIntentsInput { - s.SignatureContains = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBuiltinIntentsResponse -type GetBuiltinIntentsOutput struct { - _ struct{} `type:"structure"` - - // An array of builtinIntentMetadata objects, one for each intent in the response. - Intents []*BuiltinIntentMetadata `locationName:"intents" type:"list"` - - // A pagination token that fetches the next page of intents. If the response - // to this API call is truncated, Amazon Lex returns a pagination token in the - // response. To fetch the next page of intents, specify the pagination token - // in the next request. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetBuiltinIntentsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBuiltinIntentsOutput) GoString() string { - return s.String() -} - -// SetIntents sets the Intents field's value. -func (s *GetBuiltinIntentsOutput) SetIntents(v []*BuiltinIntentMetadata) *GetBuiltinIntentsOutput { - s.Intents = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetBuiltinIntentsOutput) SetNextToken(v string) *GetBuiltinIntentsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBuiltinSlotTypesRequest -type GetBuiltinSlotTypesInput struct { - _ struct{} `type:"structure"` - - // A list of locales that the slot type supports. - Locale *string `location:"querystring" locationName:"locale" type:"string" enum:"Locale"` - - // The maximum number of slot types to return in the response. The default is - // 10. - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - // A pagination token that fetches the next page of slot types. If the response - // to this API call is truncated, Amazon Lex returns a pagination token in the - // response. To fetch the next page of slot types, specify the pagination token - // in the next request. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - - // Substring to match in built-in slot type signatures. A slot type will be - // returned if any part of its signature matches the substring. For example, - // "xyz" matches both "xyzabc" and "abcxyz." - SignatureContains *string `location:"querystring" locationName:"signatureContains" type:"string"` -} - -// String returns the string representation -func (s GetBuiltinSlotTypesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBuiltinSlotTypesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBuiltinSlotTypesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBuiltinSlotTypesInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLocale sets the Locale field's value. -func (s *GetBuiltinSlotTypesInput) SetLocale(v string) *GetBuiltinSlotTypesInput { - s.Locale = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetBuiltinSlotTypesInput) SetMaxResults(v int64) *GetBuiltinSlotTypesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetBuiltinSlotTypesInput) SetNextToken(v string) *GetBuiltinSlotTypesInput { - s.NextToken = &v - return s -} - -// SetSignatureContains sets the SignatureContains field's value. -func (s *GetBuiltinSlotTypesInput) SetSignatureContains(v string) *GetBuiltinSlotTypesInput { - s.SignatureContains = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBuiltinSlotTypesResponse -type GetBuiltinSlotTypesOutput struct { - _ struct{} `type:"structure"` - - // If the response is truncated, the response includes a pagination token that - // you can use in your next request to fetch the next page of slot types. - NextToken *string `locationName:"nextToken" type:"string"` - - // An array of BuiltInSlotTypeMetadata objects, one entry for each slot type - // returned. - SlotTypes []*BuiltinSlotTypeMetadata `locationName:"slotTypes" type:"list"` -} - -// String returns the string representation -func (s GetBuiltinSlotTypesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBuiltinSlotTypesOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *GetBuiltinSlotTypesOutput) SetNextToken(v string) *GetBuiltinSlotTypesOutput { - s.NextToken = &v - return s -} - -// SetSlotTypes sets the SlotTypes field's value. -func (s *GetBuiltinSlotTypesOutput) SetSlotTypes(v []*BuiltinSlotTypeMetadata) *GetBuiltinSlotTypesOutput { - s.SlotTypes = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetExportRequest -type GetExportInput struct { - _ struct{} `type:"structure"` - - // The format of the exported data. - // - // ExportType is a required field - ExportType *string `location:"querystring" locationName:"exportType" type:"string" required:"true" enum:"ExportType"` - - // The name of the bot to export. - // - // Name is a required field - Name *string `location:"querystring" locationName:"name" min:"1" type:"string" required:"true"` - - // The type of resource to export. - // - // ResourceType is a required field - ResourceType *string `location:"querystring" locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"` - - // The version of the bot to export. - // - // Version is a required field - Version *string `location:"querystring" locationName:"version" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetExportInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetExportInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetExportInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetExportInput"} - if s.ExportType == nil { - invalidParams.Add(request.NewErrParamRequired("ExportType")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.ResourceType == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceType")) - } - if s.Version == nil { - invalidParams.Add(request.NewErrParamRequired("Version")) - } - if s.Version != nil && len(*s.Version) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Version", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetExportType sets the ExportType field's value. -func (s *GetExportInput) SetExportType(v string) *GetExportInput { - s.ExportType = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetExportInput) SetName(v string) *GetExportInput { - s.Name = &v - return s -} - -// SetResourceType sets the ResourceType field's value. -func (s *GetExportInput) SetResourceType(v string) *GetExportInput { - s.ResourceType = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *GetExportInput) SetVersion(v string) *GetExportInput { - s.Version = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetExportResponse -type GetExportOutput struct { - _ struct{} `type:"structure"` - - // The status of the export. - // - // * IN_PROGRESS - The export is in progress. - // - // * READY - The export is complete. - // - // * FAILED - The export could not be completed. - ExportStatus *string `locationName:"exportStatus" type:"string" enum:"ExportStatus"` - - // The format of the exported data. - ExportType *string `locationName:"exportType" type:"string" enum:"ExportType"` - - // If status is FAILED, Amazon Lex provides the reason that it failed to export - // the resource. - FailureReason *string `locationName:"failureReason" type:"string"` - - // The name of the bot being exported. - Name *string `locationName:"name" min:"1" type:"string"` - - // The type of the exported resource. - ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` - - // An S3 pre-signed URL that provides the location of the exported resource. - // The exported resource is a ZIP archive that contains the exported resource - // in JSON format. The structure of the archive may change. Your code should - // not rely on the archive structure. - Url *string `locationName:"url" type:"string"` - - // The version of the bot being exported. - Version *string `locationName:"version" min:"1" type:"string"` -} - -// String returns the string representation -func (s GetExportOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetExportOutput) GoString() string { - return s.String() -} - -// SetExportStatus sets the ExportStatus field's value. -func (s *GetExportOutput) SetExportStatus(v string) *GetExportOutput { - s.ExportStatus = &v - return s -} - -// SetExportType sets the ExportType field's value. -func (s *GetExportOutput) SetExportType(v string) *GetExportOutput { - s.ExportType = &v - return s -} - -// SetFailureReason sets the FailureReason field's value. -func (s *GetExportOutput) SetFailureReason(v string) *GetExportOutput { - s.FailureReason = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetExportOutput) SetName(v string) *GetExportOutput { - s.Name = &v - return s -} - -// SetResourceType sets the ResourceType field's value. -func (s *GetExportOutput) SetResourceType(v string) *GetExportOutput { - s.ResourceType = &v - return s -} - -// SetUrl sets the Url field's value. -func (s *GetExportOutput) SetUrl(v string) *GetExportOutput { - s.Url = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *GetExportOutput) SetVersion(v string) *GetExportOutput { - s.Version = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetIntentRequest -type GetIntentInput struct { - _ struct{} `type:"structure"` - - // The name of the intent. The name is case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` - - // The version of the intent. - // - // Version is a required field - Version *string `location:"uri" locationName:"version" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetIntentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetIntentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetIntentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetIntentInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Version == nil { - invalidParams.Add(request.NewErrParamRequired("Version")) - } - if s.Version != nil && len(*s.Version) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Version", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *GetIntentInput) SetName(v string) *GetIntentInput { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *GetIntentInput) SetVersion(v string) *GetIntentInput { - s.Version = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetIntentResponse -type GetIntentOutput struct { - _ struct{} `type:"structure"` - - // Checksum of the intent. - Checksum *string `locationName:"checksum" type:"string"` - - // After the Lambda function specified in the fulfillmentActivity element fulfills - // the intent, Amazon Lex conveys this statement to the user. - ConclusionStatement *Statement `locationName:"conclusionStatement" type:"structure"` - - // If defined in the bot, Amazon Lex uses prompt to confirm the intent before - // fulfilling the user's request. For more information, see PutIntent. - ConfirmationPrompt *Prompt `locationName:"confirmationPrompt" type:"structure"` - - // The date that the intent was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the intent. - Description *string `locationName:"description" type:"string"` - - // If defined in the bot, Amazon Amazon Lex invokes this Lambda function for - // each user input. For more information, see PutIntent. - DialogCodeHook *CodeHook `locationName:"dialogCodeHook" type:"structure"` - - // If defined in the bot, Amazon Lex uses this prompt to solicit additional - // user activity after the intent is fulfilled. For more information, see PutIntent. - FollowUpPrompt *FollowUpPrompt `locationName:"followUpPrompt" type:"structure"` - - // Describes how the intent is fulfilled. For more information, see PutIntent. - FulfillmentActivity *FulfillmentActivity `locationName:"fulfillmentActivity" type:"structure"` - - // The date that the intent was updated. When you create a resource, the creation - // date and the last updated date are the same. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // The name of the intent. - Name *string `locationName:"name" min:"1" type:"string"` - - // A unique identifier for a built-in intent. - ParentIntentSignature *string `locationName:"parentIntentSignature" type:"string"` - - // If the user answers "no" to the question defined in confirmationPrompt, Amazon - // Lex responds with this statement to acknowledge that the intent was canceled. - RejectionStatement *Statement `locationName:"rejectionStatement" type:"structure"` - - // An array of sample utterances configured for the intent. - SampleUtterances []*string `locationName:"sampleUtterances" type:"list"` - - // An array of intent slots configured for the intent. - Slots []*Slot `locationName:"slots" type:"list"` - - // The version of the intent. - Version *string `locationName:"version" min:"1" type:"string"` -} - -// String returns the string representation -func (s GetIntentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetIntentOutput) GoString() string { - return s.String() -} - -// SetChecksum sets the Checksum field's value. -func (s *GetIntentOutput) SetChecksum(v string) *GetIntentOutput { - s.Checksum = &v - return s -} - -// SetConclusionStatement sets the ConclusionStatement field's value. -func (s *GetIntentOutput) SetConclusionStatement(v *Statement) *GetIntentOutput { - s.ConclusionStatement = v - return s -} - -// SetConfirmationPrompt sets the ConfirmationPrompt field's value. -func (s *GetIntentOutput) SetConfirmationPrompt(v *Prompt) *GetIntentOutput { - s.ConfirmationPrompt = v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *GetIntentOutput) SetCreatedDate(v time.Time) *GetIntentOutput { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *GetIntentOutput) SetDescription(v string) *GetIntentOutput { - s.Description = &v - return s -} - -// SetDialogCodeHook sets the DialogCodeHook field's value. -func (s *GetIntentOutput) SetDialogCodeHook(v *CodeHook) *GetIntentOutput { - s.DialogCodeHook = v - return s -} - -// SetFollowUpPrompt sets the FollowUpPrompt field's value. -func (s *GetIntentOutput) SetFollowUpPrompt(v *FollowUpPrompt) *GetIntentOutput { - s.FollowUpPrompt = v - return s -} - -// SetFulfillmentActivity sets the FulfillmentActivity field's value. -func (s *GetIntentOutput) SetFulfillmentActivity(v *FulfillmentActivity) *GetIntentOutput { - s.FulfillmentActivity = v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *GetIntentOutput) SetLastUpdatedDate(v time.Time) *GetIntentOutput { - s.LastUpdatedDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetIntentOutput) SetName(v string) *GetIntentOutput { - s.Name = &v - return s -} - -// SetParentIntentSignature sets the ParentIntentSignature field's value. -func (s *GetIntentOutput) SetParentIntentSignature(v string) *GetIntentOutput { - s.ParentIntentSignature = &v - return s -} - -// SetRejectionStatement sets the RejectionStatement field's value. -func (s *GetIntentOutput) SetRejectionStatement(v *Statement) *GetIntentOutput { - s.RejectionStatement = v - return s -} - -// SetSampleUtterances sets the SampleUtterances field's value. -func (s *GetIntentOutput) SetSampleUtterances(v []*string) *GetIntentOutput { - s.SampleUtterances = v - return s -} - -// SetSlots sets the Slots field's value. -func (s *GetIntentOutput) SetSlots(v []*Slot) *GetIntentOutput { - s.Slots = v - return s -} - -// SetVersion sets the Version field's value. -func (s *GetIntentOutput) SetVersion(v string) *GetIntentOutput { - s.Version = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetIntentVersionsRequest -type GetIntentVersionsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of intent versions to return in the response. The default - // is 10. - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - // The name of the intent for which versions should be returned. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` - - // A pagination token for fetching the next page of intent versions. If the - // response to this call is truncated, Amazon Lex returns a pagination token - // in the response. To fetch the next page of versions, specify the pagination - // token in the next request. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetIntentVersionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetIntentVersionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetIntentVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetIntentVersionsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetIntentVersionsInput) SetMaxResults(v int64) *GetIntentVersionsInput { - s.MaxResults = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetIntentVersionsInput) SetName(v string) *GetIntentVersionsInput { - s.Name = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetIntentVersionsInput) SetNextToken(v string) *GetIntentVersionsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetIntentVersionsResponse -type GetIntentVersionsOutput struct { - _ struct{} `type:"structure"` - - // An array of IntentMetadata objects, one for each numbered version of the - // intent plus one for the $LATEST version. - Intents []*IntentMetadata `locationName:"intents" type:"list"` - - // A pagination token for fetching the next page of intent versions. If the - // response to this call is truncated, Amazon Lex returns a pagination token - // in the response. To fetch the next page of versions, specify the pagination - // token in the next request. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetIntentVersionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetIntentVersionsOutput) GoString() string { - return s.String() -} - -// SetIntents sets the Intents field's value. -func (s *GetIntentVersionsOutput) SetIntents(v []*IntentMetadata) *GetIntentVersionsOutput { - s.Intents = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetIntentVersionsOutput) SetNextToken(v string) *GetIntentVersionsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetIntentsRequest -type GetIntentsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of intents to return in the response. The default is 10. - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - // Substring to match in intent names. An intent will be returned if any part - // of its name matches the substring. For example, "xyz" matches both "xyzabc" - // and "abcxyz." - NameContains *string `location:"querystring" locationName:"nameContains" min:"1" type:"string"` - - // A pagination token that fetches the next page of intents. If the response - // to this API call is truncated, Amazon Lex returns a pagination token in the - // response. To fetch the next page of intents, specify the pagination token - // in the next request. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetIntentsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetIntentsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetIntentsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetIntentsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NameContains != nil && len(*s.NameContains) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NameContains", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetIntentsInput) SetMaxResults(v int64) *GetIntentsInput { - s.MaxResults = &v - return s -} - -// SetNameContains sets the NameContains field's value. -func (s *GetIntentsInput) SetNameContains(v string) *GetIntentsInput { - s.NameContains = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetIntentsInput) SetNextToken(v string) *GetIntentsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetIntentsResponse -type GetIntentsOutput struct { - _ struct{} `type:"structure"` - - // An array of Intent objects. For more information, see PutBot. - Intents []*IntentMetadata `locationName:"intents" type:"list"` - - // If the response is truncated, the response includes a pagination token that - // you can specify in your next request to fetch the next page of intents. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetIntentsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetIntentsOutput) GoString() string { - return s.String() -} - -// SetIntents sets the Intents field's value. -func (s *GetIntentsOutput) SetIntents(v []*IntentMetadata) *GetIntentsOutput { - s.Intents = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetIntentsOutput) SetNextToken(v string) *GetIntentsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetSlotTypeRequest -type GetSlotTypeInput struct { - _ struct{} `type:"structure"` - - // The name of the slot type. The name is case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` - - // The version of the slot type. - // - // Version is a required field - Version *string `location:"uri" locationName:"version" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetSlotTypeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSlotTypeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSlotTypeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSlotTypeInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Version == nil { - invalidParams.Add(request.NewErrParamRequired("Version")) - } - if s.Version != nil && len(*s.Version) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Version", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *GetSlotTypeInput) SetName(v string) *GetSlotTypeInput { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *GetSlotTypeInput) SetVersion(v string) *GetSlotTypeInput { - s.Version = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetSlotTypeResponse -type GetSlotTypeOutput struct { - _ struct{} `type:"structure"` - - // Checksum of the $LATEST version of the slot type. - Checksum *string `locationName:"checksum" type:"string"` - - // The date that the slot type was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the slot type. - Description *string `locationName:"description" type:"string"` - - // A list of EnumerationValue objects that defines the values that the slot - // type can take. - EnumerationValues []*EnumerationValue `locationName:"enumerationValues" min:"1" type:"list"` - - // The date that the slot type was updated. When you create a resource, the - // creation date and last update date are the same. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // The name of the slot type. - Name *string `locationName:"name" min:"1" type:"string"` - - // The strategy that Amazon Lex uses to determine the value of the slot. For - // more information, see PutSlotType. - ValueSelectionStrategy *string `locationName:"valueSelectionStrategy" type:"string" enum:"SlotValueSelectionStrategy"` - - // The version of the slot type. - Version *string `locationName:"version" min:"1" type:"string"` -} - -// String returns the string representation -func (s GetSlotTypeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSlotTypeOutput) GoString() string { - return s.String() -} - -// SetChecksum sets the Checksum field's value. -func (s *GetSlotTypeOutput) SetChecksum(v string) *GetSlotTypeOutput { - s.Checksum = &v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *GetSlotTypeOutput) SetCreatedDate(v time.Time) *GetSlotTypeOutput { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *GetSlotTypeOutput) SetDescription(v string) *GetSlotTypeOutput { - s.Description = &v - return s -} - -// SetEnumerationValues sets the EnumerationValues field's value. -func (s *GetSlotTypeOutput) SetEnumerationValues(v []*EnumerationValue) *GetSlotTypeOutput { - s.EnumerationValues = v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *GetSlotTypeOutput) SetLastUpdatedDate(v time.Time) *GetSlotTypeOutput { - s.LastUpdatedDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetSlotTypeOutput) SetName(v string) *GetSlotTypeOutput { - s.Name = &v - return s -} - -// SetValueSelectionStrategy sets the ValueSelectionStrategy field's value. -func (s *GetSlotTypeOutput) SetValueSelectionStrategy(v string) *GetSlotTypeOutput { - s.ValueSelectionStrategy = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *GetSlotTypeOutput) SetVersion(v string) *GetSlotTypeOutput { - s.Version = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetSlotTypeVersionsRequest -type GetSlotTypeVersionsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of slot type versions to return in the response. The default - // is 10. - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - // The name of the slot type for which versions should be returned. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` - - // A pagination token for fetching the next page of slot type versions. If the - // response to this call is truncated, Amazon Lex returns a pagination token - // in the response. To fetch the next page of versions, specify the pagination - // token in the next request. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetSlotTypeVersionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSlotTypeVersionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSlotTypeVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSlotTypeVersionsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetSlotTypeVersionsInput) SetMaxResults(v int64) *GetSlotTypeVersionsInput { - s.MaxResults = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetSlotTypeVersionsInput) SetName(v string) *GetSlotTypeVersionsInput { - s.Name = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetSlotTypeVersionsInput) SetNextToken(v string) *GetSlotTypeVersionsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetSlotTypeVersionsResponse -type GetSlotTypeVersionsOutput struct { - _ struct{} `type:"structure"` - - // A pagination token for fetching the next page of slot type versions. If the - // response to this call is truncated, Amazon Lex returns a pagination token - // in the response. To fetch the next page of versions, specify the pagination - // token in the next request. - NextToken *string `locationName:"nextToken" type:"string"` - - // An array of SlotTypeMetadata objects, one for each numbered version of the - // slot type plus one for the $LATEST version. - SlotTypes []*SlotTypeMetadata `locationName:"slotTypes" type:"list"` -} - -// String returns the string representation -func (s GetSlotTypeVersionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSlotTypeVersionsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *GetSlotTypeVersionsOutput) SetNextToken(v string) *GetSlotTypeVersionsOutput { - s.NextToken = &v - return s -} - -// SetSlotTypes sets the SlotTypes field's value. -func (s *GetSlotTypeVersionsOutput) SetSlotTypes(v []*SlotTypeMetadata) *GetSlotTypeVersionsOutput { - s.SlotTypes = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetSlotTypesRequest -type GetSlotTypesInput struct { - _ struct{} `type:"structure"` - - // The maximum number of slot types to return in the response. The default is - // 10. - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - // Substring to match in slot type names. A slot type will be returned if any - // part of its name matches the substring. For example, "xyz" matches both "xyzabc" - // and "abcxyz." - NameContains *string `location:"querystring" locationName:"nameContains" min:"1" type:"string"` - - // A pagination token that fetches the next page of slot types. If the response - // to this API call is truncated, Amazon Lex returns a pagination token in the - // response. To fetch next page of slot types, specify the pagination token - // in the next request. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s GetSlotTypesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSlotTypesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSlotTypesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSlotTypesInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NameContains != nil && len(*s.NameContains) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NameContains", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetSlotTypesInput) SetMaxResults(v int64) *GetSlotTypesInput { - s.MaxResults = &v - return s -} - -// SetNameContains sets the NameContains field's value. -func (s *GetSlotTypesInput) SetNameContains(v string) *GetSlotTypesInput { - s.NameContains = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetSlotTypesInput) SetNextToken(v string) *GetSlotTypesInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetSlotTypesResponse -type GetSlotTypesOutput struct { - _ struct{} `type:"structure"` - - // If the response is truncated, it includes a pagination token that you can - // specify in your next request to fetch the next page of slot types. - NextToken *string `locationName:"nextToken" type:"string"` - - // An array of objects, one for each slot type, that provides information such - // as the name of the slot type, the version, and a description. - SlotTypes []*SlotTypeMetadata `locationName:"slotTypes" type:"list"` -} - -// String returns the string representation -func (s GetSlotTypesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSlotTypesOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *GetSlotTypesOutput) SetNextToken(v string) *GetSlotTypesOutput { - s.NextToken = &v - return s -} - -// SetSlotTypes sets the SlotTypes field's value. -func (s *GetSlotTypesOutput) SetSlotTypes(v []*SlotTypeMetadata) *GetSlotTypesOutput { - s.SlotTypes = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetUtterancesViewRequest -type GetUtterancesViewInput struct { - _ struct{} `type:"structure"` - - // The name of the bot for which utterance information should be returned. - // - // BotName is a required field - BotName *string `location:"uri" locationName:"botname" min:"2" type:"string" required:"true"` - - // An array of bot versions for which utterance information should be returned. - // The limit is 5 versions per request. - // - // BotVersions is a required field - BotVersions []*string `location:"querystring" locationName:"bot_versions" min:"1" type:"list" required:"true"` - - // To return utterances that were recognized and handled, useDetected. To return - // utterances that were not recognized, use Missed. - // - // StatusType is a required field - StatusType *string `location:"querystring" locationName:"status_type" type:"string" required:"true" enum:"StatusType"` -} - -// String returns the string representation -func (s GetUtterancesViewInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetUtterancesViewInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetUtterancesViewInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetUtterancesViewInput"} - if s.BotName == nil { - invalidParams.Add(request.NewErrParamRequired("BotName")) - } - if s.BotName != nil && len(*s.BotName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("BotName", 2)) - } - if s.BotVersions == nil { - invalidParams.Add(request.NewErrParamRequired("BotVersions")) - } - if s.BotVersions != nil && len(s.BotVersions) < 1 { - invalidParams.Add(request.NewErrParamMinLen("BotVersions", 1)) - } - if s.StatusType == nil { - invalidParams.Add(request.NewErrParamRequired("StatusType")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBotName sets the BotName field's value. -func (s *GetUtterancesViewInput) SetBotName(v string) *GetUtterancesViewInput { - s.BotName = &v - return s -} - -// SetBotVersions sets the BotVersions field's value. -func (s *GetUtterancesViewInput) SetBotVersions(v []*string) *GetUtterancesViewInput { - s.BotVersions = v - return s -} - -// SetStatusType sets the StatusType field's value. -func (s *GetUtterancesViewInput) SetStatusType(v string) *GetUtterancesViewInput { - s.StatusType = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetUtterancesViewResponse -type GetUtterancesViewOutput struct { - _ struct{} `type:"structure"` - - // The name of the bot for which utterance information was returned. - BotName *string `locationName:"botName" min:"2" type:"string"` - - // An array of UtteranceList objects, each containing a list of UtteranceData - // objects describing the utterances that were processed by your bot. The response - // contains a maximum of 100 UtteranceData objects for each version. - Utterances []*UtteranceList `locationName:"utterances" type:"list"` -} - -// String returns the string representation -func (s GetUtterancesViewOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetUtterancesViewOutput) GoString() string { - return s.String() -} - -// SetBotName sets the BotName field's value. -func (s *GetUtterancesViewOutput) SetBotName(v string) *GetUtterancesViewOutput { - s.BotName = &v - return s -} - -// SetUtterances sets the Utterances field's value. -func (s *GetUtterancesViewOutput) SetUtterances(v []*UtteranceList) *GetUtterancesViewOutput { - s.Utterances = v - return s -} - -// Identifies the specific version of an intent. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/Intent -type Intent struct { - _ struct{} `type:"structure"` - - // The name of the intent. - // - // IntentName is a required field - IntentName *string `locationName:"intentName" min:"1" type:"string" required:"true"` - - // The version of the intent. - // - // IntentVersion is a required field - IntentVersion *string `locationName:"intentVersion" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s Intent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Intent) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Intent) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Intent"} - if s.IntentName == nil { - invalidParams.Add(request.NewErrParamRequired("IntentName")) - } - if s.IntentName != nil && len(*s.IntentName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("IntentName", 1)) - } - if s.IntentVersion == nil { - invalidParams.Add(request.NewErrParamRequired("IntentVersion")) - } - if s.IntentVersion != nil && len(*s.IntentVersion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("IntentVersion", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetIntentName sets the IntentName field's value. -func (s *Intent) SetIntentName(v string) *Intent { - s.IntentName = &v - return s -} - -// SetIntentVersion sets the IntentVersion field's value. -func (s *Intent) SetIntentVersion(v string) *Intent { - s.IntentVersion = &v - return s -} - -// Provides information about an intent. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/IntentMetadata -type IntentMetadata struct { - _ struct{} `type:"structure"` - - // The date that the intent was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the intent. - Description *string `locationName:"description" type:"string"` - - // The date that the intent was updated. When you create an intent, the creation - // date and last updated date are the same. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // The name of the intent. - Name *string `locationName:"name" min:"1" type:"string"` - - // The version of the intent. - Version *string `locationName:"version" min:"1" type:"string"` -} - -// String returns the string representation -func (s IntentMetadata) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s IntentMetadata) GoString() string { - return s.String() -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *IntentMetadata) SetCreatedDate(v time.Time) *IntentMetadata { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *IntentMetadata) SetDescription(v string) *IntentMetadata { - s.Description = &v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *IntentMetadata) SetLastUpdatedDate(v time.Time) *IntentMetadata { - s.LastUpdatedDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *IntentMetadata) SetName(v string) *IntentMetadata { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *IntentMetadata) SetVersion(v string) *IntentMetadata { - s.Version = &v - return s -} - -// The message object that provides the message text and its type. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/Message -type Message struct { - _ struct{} `type:"structure"` - - // The text of the message. - // - // Content is a required field - Content *string `locationName:"content" min:"1" type:"string" required:"true"` - - // The content type of the message string. - // - // ContentType is a required field - ContentType *string `locationName:"contentType" type:"string" required:"true" enum:"ContentType"` -} - -// String returns the string representation -func (s Message) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Message) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Message) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Message"} - if s.Content == nil { - invalidParams.Add(request.NewErrParamRequired("Content")) - } - if s.Content != nil && len(*s.Content) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Content", 1)) - } - if s.ContentType == nil { - invalidParams.Add(request.NewErrParamRequired("ContentType")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetContent sets the Content field's value. -func (s *Message) SetContent(v string) *Message { - s.Content = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *Message) SetContentType(v string) *Message { - s.ContentType = &v - return s -} - -// Obtains information from the user. To define a prompt, provide one or more -// messages and specify the number of attempts to get information from the user. -// If you provide more than one message, Amazon Lex chooses one of the messages -// to use to prompt the user. For more information, see how-it-works. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/Prompt -type Prompt struct { - _ struct{} `type:"structure"` - - // The number of times to prompt the user for information. - // - // MaxAttempts is a required field - MaxAttempts *int64 `locationName:"maxAttempts" min:"1" type:"integer" required:"true"` - - // An array of objects, each of which provides a message string and its type. - // You can specify the message string in plain text or in Speech Synthesis Markup - // Language (SSML). - // - // Messages is a required field - Messages []*Message `locationName:"messages" min:"1" type:"list" required:"true"` - - // A response card. Amazon Lex uses this prompt at runtime, in the PostText - // API response. It substitutes session attributes and slot values for placeholders - // in the response card. For more information, see ex-resp-card. - ResponseCard *string `locationName:"responseCard" min:"1" type:"string"` -} - -// String returns the string representation -func (s Prompt) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Prompt) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Prompt) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Prompt"} - if s.MaxAttempts == nil { - invalidParams.Add(request.NewErrParamRequired("MaxAttempts")) - } - if s.MaxAttempts != nil && *s.MaxAttempts < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxAttempts", 1)) - } - if s.Messages == nil { - invalidParams.Add(request.NewErrParamRequired("Messages")) - } - if s.Messages != nil && len(s.Messages) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Messages", 1)) - } - if s.ResponseCard != nil && len(*s.ResponseCard) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResponseCard", 1)) - } - if s.Messages != nil { - for i, v := range s.Messages { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Messages", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxAttempts sets the MaxAttempts field's value. -func (s *Prompt) SetMaxAttempts(v int64) *Prompt { - s.MaxAttempts = &v - return s -} - -// SetMessages sets the Messages field's value. -func (s *Prompt) SetMessages(v []*Message) *Prompt { - s.Messages = v - return s -} - -// SetResponseCard sets the ResponseCard field's value. -func (s *Prompt) SetResponseCard(v string) *Prompt { - s.ResponseCard = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutBotAliasRequest -type PutBotAliasInput struct { - _ struct{} `type:"structure"` - - // The name of the bot. - // - // BotName is a required field - BotName *string `location:"uri" locationName:"botName" min:"2" type:"string" required:"true"` - - // The version of the bot. - // - // BotVersion is a required field - BotVersion *string `locationName:"botVersion" min:"1" type:"string" required:"true"` - - // Identifies a specific revision of the $LATEST version. - // - // When you create a new bot alias, leave the checksum field blank. If you specify - // a checksum you get a BadRequestException exception. - // - // When you want to update a bot alias, set the checksum field to the checksum - // of the most recent revision of the $LATEST version. If you don't specify - // the checksum field, or if the checksum does not match the $LATEST version, - // you get a PreconditionFailedException exception. - Checksum *string `locationName:"checksum" type:"string"` - - // A description of the alias. - Description *string `locationName:"description" type:"string"` - - // The name of the alias. The name is not case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s PutBotAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBotAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBotAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBotAliasInput"} - if s.BotName == nil { - invalidParams.Add(request.NewErrParamRequired("BotName")) - } - if s.BotName != nil && len(*s.BotName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("BotName", 2)) - } - if s.BotVersion == nil { - invalidParams.Add(request.NewErrParamRequired("BotVersion")) - } - if s.BotVersion != nil && len(*s.BotVersion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("BotVersion", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBotName sets the BotName field's value. -func (s *PutBotAliasInput) SetBotName(v string) *PutBotAliasInput { - s.BotName = &v - return s -} - -// SetBotVersion sets the BotVersion field's value. -func (s *PutBotAliasInput) SetBotVersion(v string) *PutBotAliasInput { - s.BotVersion = &v - return s -} - -// SetChecksum sets the Checksum field's value. -func (s *PutBotAliasInput) SetChecksum(v string) *PutBotAliasInput { - s.Checksum = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *PutBotAliasInput) SetDescription(v string) *PutBotAliasInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *PutBotAliasInput) SetName(v string) *PutBotAliasInput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutBotAliasResponse -type PutBotAliasOutput struct { - _ struct{} `type:"structure"` - - // The name of the bot that the alias points to. - BotName *string `locationName:"botName" min:"2" type:"string"` - - // The version of the bot that the alias points to. - BotVersion *string `locationName:"botVersion" min:"1" type:"string"` - - // The checksum for the current version of the alias. - Checksum *string `locationName:"checksum" type:"string"` - - // The date that the bot alias was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the alias. - Description *string `locationName:"description" type:"string"` - - // The date that the bot alias was updated. When you create a resource, the - // creation date and the last updated date are the same. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // The name of the alias. - Name *string `locationName:"name" min:"1" type:"string"` -} - -// String returns the string representation -func (s PutBotAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBotAliasOutput) GoString() string { - return s.String() -} - -// SetBotName sets the BotName field's value. -func (s *PutBotAliasOutput) SetBotName(v string) *PutBotAliasOutput { - s.BotName = &v - return s -} - -// SetBotVersion sets the BotVersion field's value. -func (s *PutBotAliasOutput) SetBotVersion(v string) *PutBotAliasOutput { - s.BotVersion = &v - return s -} - -// SetChecksum sets the Checksum field's value. -func (s *PutBotAliasOutput) SetChecksum(v string) *PutBotAliasOutput { - s.Checksum = &v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *PutBotAliasOutput) SetCreatedDate(v time.Time) *PutBotAliasOutput { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *PutBotAliasOutput) SetDescription(v string) *PutBotAliasOutput { - s.Description = &v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *PutBotAliasOutput) SetLastUpdatedDate(v time.Time) *PutBotAliasOutput { - s.LastUpdatedDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *PutBotAliasOutput) SetName(v string) *PutBotAliasOutput { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutBotRequest -type PutBotInput struct { - _ struct{} `type:"structure"` - - // When Amazon Lex can't understand the user's input in context, it tries to - // elicit the information a few times. After that, Amazon Lex sends the message - // defined in abortStatement to the user, and then aborts the conversation. - // To set the number of retries, use the valueElicitationPrompt field for the - // slot type. - // - // For example, in a pizza ordering bot, Amazon Lex might ask a user "What type - // of crust would you like?" If the user's response is not one of the expected - // responses (for example, "thin crust, "deep dish," etc.), Amazon Lex tries - // to elicit a correct response a few more times. - // - // For example, in a pizza ordering application, OrderPizza might be one of - // the intents. This intent might require the CrustType slot. You specify the - // valueElicitationPrompt field when you create the CrustType slot. - AbortStatement *Statement `locationName:"abortStatement" type:"structure"` - - // Identifies a specific revision of the $LATEST version. - // - // When you create a new bot, leave the checksum field blank. If you specify - // a checksum you get a BadRequestException exception. - // - // When you want to update a bot, set the checksum field to the checksum of - // the most recent revision of the $LATEST version. If you don't specify the - // checksum field, or if the checksum does not match the $LATEST version, you - // get a PreconditionFailedException exception. - Checksum *string `locationName:"checksum" type:"string"` - - // For each Amazon Lex bot created with the Amazon Lex Model Building Service, - // you must specify whether your use of Amazon Lex is related to a website, - // program, or other application that is directed or targeted, in whole or in - // part, to children under age 13 and subject to the Children's Online Privacy - // Protection Act (COPPA) by specifying true or false in the childDirected field. - // By specifying true in the childDirected field, you confirm that your use - // of Amazon Lex is related to a website, program, or other application that - // is directed or targeted, in whole or in part, to children under age 13 and - // subject to COPPA. By specifying false in the childDirected field, you confirm - // that your use of Amazon Lex is not related to a website, program, or other - // application that is directed or targeted, in whole or in part, to children - // under age 13 and subject to COPPA. You may not specify a default value for - // the childDirected field that does not accurately reflect whether your use - // of Amazon Lex is related to a website, program, or other application that - // is directed or targeted, in whole or in part, to children under age 13 and - // subject to COPPA. - // - // If your use of Amazon Lex relates to a website, program, or other application - // that is directed in whole or in part, to children under age 13, you must - // obtain any required verifiable parental consent under COPPA. For information - // regarding the use of Amazon Lex in connection with websites, programs, or - // other applications that are directed or targeted, in whole or in part, to - // children under age 13, see the Amazon Lex FAQ. (https://aws.amazon.com/lex/faqs#data-security) - // - // ChildDirected is a required field - ChildDirected *bool `locationName:"childDirected" type:"boolean" required:"true"` - - // When Amazon Lex doesn't understand the user's intent, it uses this message - // to get clarification. To specify how many times Amazon Lex should repeate - // the clarification prompt, use the maxAttempts field. If Amazon Lex still - // doesn't understand, it sends the message in the abortStatement field. - // - // When you create a clarification prompt, make sure that it suggests the correct - // response from the user. for example, for a bot that orders pizza and drinks, - // you might create this clarification prompt: "What would you like to do? You - // can say 'Order a pizza' or 'Order a drink.'" - ClarificationPrompt *Prompt `locationName:"clarificationPrompt" type:"structure"` - - // A description of the bot. - Description *string `locationName:"description" type:"string"` - - // The maximum time in seconds that Amazon Lex retains the data gathered in - // a conversation. - // - // A user interaction session remains active for the amount of time specified. - // If no conversation occurs during this time, the session expires and Amazon - // Lex deletes any data provided before the timeout. - // - // For example, suppose that a user chooses the OrderPizza intent, but gets - // sidetracked halfway through placing an order. If the user doesn't complete - // the order within the specified time, Amazon Lex discards the slot information - // that it gathered, and the user must start over. - // - // If you don't include the idleSessionTTLInSeconds element in a PutBot operation - // request, Amazon Lex uses the default value. This is also true if the request - // replaces an existing bot. - // - // The default is 300 seconds (5 minutes). - IdleSessionTTLInSeconds *int64 `locationName:"idleSessionTTLInSeconds" min:"60" type:"integer"` - - // An array of Intent objects. Each intent represents a command that a user - // can express. For example, a pizza ordering bot might support an OrderPizza - // intent. For more information, see how-it-works. - Intents []*Intent `locationName:"intents" type:"list"` - - // Specifies the target locale for the bot. Any intent used in the bot must - // be compatible with the locale of the bot. - // - // The default is en-US. - // - // Locale is a required field - Locale *string `locationName:"locale" type:"string" required:"true" enum:"Locale"` - - // The name of the bot. The name is not case sensitive. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"2" type:"string" required:"true"` - - // If you set the processBehavior element to Build, Amazon Lex builds the bot - // so that it can be run. If you set the element to SaveAmazon Lex saves the - // bot, but doesn't build it. - // - // If you don't specify this value, the default value is Save. - ProcessBehavior *string `locationName:"processBehavior" type:"string" enum:"ProcessBehavior"` - - // The Amazon Polly voice ID that you want Amazon Lex to use for voice interactions - // with the user. The locale configured for the voice must match the locale - // of the bot. For more information, see Available Voices (http://docs.aws.amazon.com/polly/latest/dg/voicelist.html) - // in the Amazon Polly Developer Guide. - VoiceId *string `locationName:"voiceId" type:"string"` -} - -// String returns the string representation -func (s PutBotInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBotInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBotInput"} - if s.ChildDirected == nil { - invalidParams.Add(request.NewErrParamRequired("ChildDirected")) - } - if s.IdleSessionTTLInSeconds != nil && *s.IdleSessionTTLInSeconds < 60 { - invalidParams.Add(request.NewErrParamMinValue("IdleSessionTTLInSeconds", 60)) - } - if s.Locale == nil { - invalidParams.Add(request.NewErrParamRequired("Locale")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Name", 2)) - } - if s.AbortStatement != nil { - if err := s.AbortStatement.Validate(); err != nil { - invalidParams.AddNested("AbortStatement", err.(request.ErrInvalidParams)) - } - } - if s.ClarificationPrompt != nil { - if err := s.ClarificationPrompt.Validate(); err != nil { - invalidParams.AddNested("ClarificationPrompt", err.(request.ErrInvalidParams)) - } - } - if s.Intents != nil { - for i, v := range s.Intents { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Intents", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAbortStatement sets the AbortStatement field's value. -func (s *PutBotInput) SetAbortStatement(v *Statement) *PutBotInput { - s.AbortStatement = v - return s -} - -// SetChecksum sets the Checksum field's value. -func (s *PutBotInput) SetChecksum(v string) *PutBotInput { - s.Checksum = &v - return s -} - -// SetChildDirected sets the ChildDirected field's value. -func (s *PutBotInput) SetChildDirected(v bool) *PutBotInput { - s.ChildDirected = &v - return s -} - -// SetClarificationPrompt sets the ClarificationPrompt field's value. -func (s *PutBotInput) SetClarificationPrompt(v *Prompt) *PutBotInput { - s.ClarificationPrompt = v - return s -} - -// SetDescription sets the Description field's value. -func (s *PutBotInput) SetDescription(v string) *PutBotInput { - s.Description = &v - return s -} - -// SetIdleSessionTTLInSeconds sets the IdleSessionTTLInSeconds field's value. -func (s *PutBotInput) SetIdleSessionTTLInSeconds(v int64) *PutBotInput { - s.IdleSessionTTLInSeconds = &v - return s -} - -// SetIntents sets the Intents field's value. -func (s *PutBotInput) SetIntents(v []*Intent) *PutBotInput { - s.Intents = v - return s -} - -// SetLocale sets the Locale field's value. -func (s *PutBotInput) SetLocale(v string) *PutBotInput { - s.Locale = &v - return s -} - -// SetName sets the Name field's value. -func (s *PutBotInput) SetName(v string) *PutBotInput { - s.Name = &v - return s -} - -// SetProcessBehavior sets the ProcessBehavior field's value. -func (s *PutBotInput) SetProcessBehavior(v string) *PutBotInput { - s.ProcessBehavior = &v - return s -} - -// SetVoiceId sets the VoiceId field's value. -func (s *PutBotInput) SetVoiceId(v string) *PutBotInput { - s.VoiceId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutBotResponse -type PutBotOutput struct { - _ struct{} `type:"structure"` - - // The message that Amazon Lex uses to abort a conversation. For more information, - // see PutBot. - AbortStatement *Statement `locationName:"abortStatement" type:"structure"` - - // Checksum of the bot that you created. - Checksum *string `locationName:"checksum" type:"string"` - - // For each Amazon Lex bot created with the Amazon Lex Model Building Service, - // you must specify whether your use of Amazon Lex is related to a website, - // program, or other application that is directed or targeted, in whole or in - // part, to children under age 13 and subject to the Children's Online Privacy - // Protection Act (COPPA) by specifying true or false in the childDirected field. - // By specifying true in the childDirected field, you confirm that your use - // of Amazon Lex is related to a website, program, or other application that - // is directed or targeted, in whole or in part, to children under age 13 and - // subject to COPPA. By specifying false in the childDirected field, you confirm - // that your use of Amazon Lex is not related to a website, program, or other - // application that is directed or targeted, in whole or in part, to children - // under age 13 and subject to COPPA. You may not specify a default value for - // the childDirected field that does not accurately reflect whether your use - // of Amazon Lex is related to a website, program, or other application that - // is directed or targeted, in whole or in part, to children under age 13 and - // subject to COPPA. - // - // If your use of Amazon Lex relates to a website, program, or other application - // that is directed in whole or in part, to children under age 13, you must - // obtain any required verifiable parental consent under COPPA. For information - // regarding the use of Amazon Lex in connection with websites, programs, or - // other applications that are directed or targeted, in whole or in part, to - // children under age 13, see the Amazon Lex FAQ. (https://aws.amazon.com/lex/faqs#data-security) - ChildDirected *bool `locationName:"childDirected" type:"boolean"` - - // The prompts that Amazon Lex uses when it doesn't understand the user's intent. - // For more information, see PutBot. - ClarificationPrompt *Prompt `locationName:"clarificationPrompt" type:"structure"` - - // The date that the bot was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the bot. - Description *string `locationName:"description" type:"string"` - - // If status is FAILED, Amazon Lex provides the reason that it failed to build - // the bot. - FailureReason *string `locationName:"failureReason" type:"string"` - - // The maximum length of time that Amazon Lex retains the data gathered in a - // conversation. For more information, see PutBot. - IdleSessionTTLInSeconds *int64 `locationName:"idleSessionTTLInSeconds" min:"60" type:"integer"` - - // An array of Intent objects. For more information, see PutBot. - Intents []*Intent `locationName:"intents" type:"list"` - - // The date that the bot was updated. When you create a resource, the creation - // date and last updated date are the same. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // The target locale for the bot. - Locale *string `locationName:"locale" type:"string" enum:"Locale"` - - // The name of the bot. - Name *string `locationName:"name" min:"2" type:"string"` - - // When you send a request to create a bot with processBehavior set to BUILD, - // Amazon Lex sets the status response element to BUILDING. After Amazon Lex - // builds the bot, it sets status to READY. If Amazon Lex can't build the bot, - // Amazon Lex sets status to FAILED. Amazon Lex returns the reason for the failure - // in the failureReason response element. - // - // When you set processBehaviorto SAVE, Amazon Lex sets the status code to NOT - // BUILT. - Status *string `locationName:"status" type:"string" enum:"Status"` - - // The version of the bot. For a new bot, the version is always $LATEST. - Version *string `locationName:"version" min:"1" type:"string"` - - // The Amazon Polly voice ID that Amazon Lex uses for voice interaction with - // the user. For more information, see PutBot. - VoiceId *string `locationName:"voiceId" type:"string"` -} - -// String returns the string representation -func (s PutBotOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBotOutput) GoString() string { - return s.String() -} - -// SetAbortStatement sets the AbortStatement field's value. -func (s *PutBotOutput) SetAbortStatement(v *Statement) *PutBotOutput { - s.AbortStatement = v - return s -} - -// SetChecksum sets the Checksum field's value. -func (s *PutBotOutput) SetChecksum(v string) *PutBotOutput { - s.Checksum = &v - return s -} - -// SetChildDirected sets the ChildDirected field's value. -func (s *PutBotOutput) SetChildDirected(v bool) *PutBotOutput { - s.ChildDirected = &v - return s -} - -// SetClarificationPrompt sets the ClarificationPrompt field's value. -func (s *PutBotOutput) SetClarificationPrompt(v *Prompt) *PutBotOutput { - s.ClarificationPrompt = v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *PutBotOutput) SetCreatedDate(v time.Time) *PutBotOutput { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *PutBotOutput) SetDescription(v string) *PutBotOutput { - s.Description = &v - return s -} - -// SetFailureReason sets the FailureReason field's value. -func (s *PutBotOutput) SetFailureReason(v string) *PutBotOutput { - s.FailureReason = &v - return s -} - -// SetIdleSessionTTLInSeconds sets the IdleSessionTTLInSeconds field's value. -func (s *PutBotOutput) SetIdleSessionTTLInSeconds(v int64) *PutBotOutput { - s.IdleSessionTTLInSeconds = &v - return s -} - -// SetIntents sets the Intents field's value. -func (s *PutBotOutput) SetIntents(v []*Intent) *PutBotOutput { - s.Intents = v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *PutBotOutput) SetLastUpdatedDate(v time.Time) *PutBotOutput { - s.LastUpdatedDate = &v - return s -} - -// SetLocale sets the Locale field's value. -func (s *PutBotOutput) SetLocale(v string) *PutBotOutput { - s.Locale = &v - return s -} - -// SetName sets the Name field's value. -func (s *PutBotOutput) SetName(v string) *PutBotOutput { - s.Name = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *PutBotOutput) SetStatus(v string) *PutBotOutput { - s.Status = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *PutBotOutput) SetVersion(v string) *PutBotOutput { - s.Version = &v - return s -} - -// SetVoiceId sets the VoiceId field's value. -func (s *PutBotOutput) SetVoiceId(v string) *PutBotOutput { - s.VoiceId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutIntentRequest -type PutIntentInput struct { - _ struct{} `type:"structure"` - - // Identifies a specific revision of the $LATEST version. - // - // When you create a new intent, leave the checksum field blank. If you specify - // a checksum you get a BadRequestException exception. - // - // When you want to update a intent, set the checksum field to the checksum - // of the most recent revision of the $LATEST version. If you don't specify - // the checksum field, or if the checksum does not match the $LATEST version, - // you get a PreconditionFailedException exception. - Checksum *string `locationName:"checksum" type:"string"` - - // The statement that you want Amazon Lex to convey to the user after the intent - // is successfully fulfilled by the Lambda function. - // - // This element is relevant only if you provide a Lambda function in the fulfillmentActivity. - // If you return the intent to the client application, you can't specify this - // element. - // - // The followUpPrompt and conclusionStatement are mutually exclusive. You can - // specify only one. - ConclusionStatement *Statement `locationName:"conclusionStatement" type:"structure"` - - // Prompts the user to confirm the intent. This question should have a yes or - // no answer. - // - // Amazon Lex uses this prompt to ensure that the user acknowledges that the - // intent is ready for fulfillment. For example, with the OrderPizza intent, - // you might want to confirm that the order is correct before placing it. For - // other intents, such as intents that simply respond to user questions, you - // might not need to ask the user for confirmation before providing the information. - // - // You you must provide both the rejectionStatement and the confirmationPrompt, - // or neither. - ConfirmationPrompt *Prompt `locationName:"confirmationPrompt" type:"structure"` - - // A description of the intent. - Description *string `locationName:"description" type:"string"` - - // Specifies a Lambda function to invoke for each user input. You can invoke - // this Lambda function to personalize user interaction. - // - // For example, suppose your bot determines that the user is John. Your Lambda - // function might retrieve John's information from a backend database and prepopulate - // some of the values. For example, if you find that John is gluten intolerant, - // you might set the corresponding intent slot, GlutenIntolerant, to true. You - // might find John's phone number and set the corresponding session attribute. - DialogCodeHook *CodeHook `locationName:"dialogCodeHook" type:"structure"` - - // Amazon Lex uses this prompt to solicit additional activity after fulfilling - // an intent. For example, after the OrderPizza intent is fulfilled, you might - // prompt the user to order a drink. - // - // The action that Amazon Lex takes depends on the user's response, as follows: - // - // * If the user says "Yes" it responds with the clarification prompt that - // is configured for the bot. - // - // * if the user says "Yes" and continues with an utterance that triggers - // an intent it starts a conversation for the intent. - // - // * If the user says "No" it responds with the rejection statement configured - // for the the follow-up prompt. - // - // * If it doesn't recognize the utterance it repeats the follow-up prompt - // again. - // - // The followUpPrompt field and the conclusionStatement field are mutually exclusive. - // You can specify only one. - FollowUpPrompt *FollowUpPrompt `locationName:"followUpPrompt" type:"structure"` - - // Required. Describes how the intent is fulfilled. For example, after a user - // provides all of the information for a pizza order, fulfillmentActivity defines - // how the bot places an order with a local pizza store. - // - // You might configure Amazon Lex to return all of the intent information to - // the client application, or direct it to invoke a Lambda function that can - // process the intent (for example, place an order with a pizzeria). - FulfillmentActivity *FulfillmentActivity `locationName:"fulfillmentActivity" type:"structure"` - - // The name of the intent. The name is not case sensitive. - // - // The name can't match a built-in intent name, or a built-in intent name with - // "AMAZON." removed. For example, because there is a built-in intent called - // AMAZON.HelpIntent, you can't create a custom intent called HelpIntent. - // - // For a list of built-in intents, see Standard Built-in Intents (https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents) - // in the Alexa Skills Kit. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` - - // A unique identifier for the built-in intent to base this intent on. To find - // the signature for an intent, see Standard Built-in Intents (https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents) - // in the Alexa Skills Kit. - ParentIntentSignature *string `locationName:"parentIntentSignature" type:"string"` - - // When the user answers "no" to the question defined in confirmationPrompt, - // Amazon Lex responds with this statement to acknowledge that the intent was - // canceled. - // - // You must provide both the rejectionStatement and the confirmationPrompt, - // or neither. - RejectionStatement *Statement `locationName:"rejectionStatement" type:"structure"` - - // An array of utterances (strings) that a user might say to signal the intent. - // For example, "I want {PizzaSize} pizza", "Order {Quantity} {PizzaSize} pizzas". - // - // In each utterance, a slot name is enclosed in curly braces. - SampleUtterances []*string `locationName:"sampleUtterances" type:"list"` - - // An array of intent slots. At runtime, Amazon Lex elicits required slot values - // from the user using prompts defined in the slots. For more information, see - // how-it-works. - Slots []*Slot `locationName:"slots" type:"list"` -} - -// String returns the string representation -func (s PutIntentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutIntentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutIntentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutIntentInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.ConclusionStatement != nil { - if err := s.ConclusionStatement.Validate(); err != nil { - invalidParams.AddNested("ConclusionStatement", err.(request.ErrInvalidParams)) - } - } - if s.ConfirmationPrompt != nil { - if err := s.ConfirmationPrompt.Validate(); err != nil { - invalidParams.AddNested("ConfirmationPrompt", err.(request.ErrInvalidParams)) - } - } - if s.DialogCodeHook != nil { - if err := s.DialogCodeHook.Validate(); err != nil { - invalidParams.AddNested("DialogCodeHook", err.(request.ErrInvalidParams)) - } - } - if s.FollowUpPrompt != nil { - if err := s.FollowUpPrompt.Validate(); err != nil { - invalidParams.AddNested("FollowUpPrompt", err.(request.ErrInvalidParams)) - } - } - if s.FulfillmentActivity != nil { - if err := s.FulfillmentActivity.Validate(); err != nil { - invalidParams.AddNested("FulfillmentActivity", err.(request.ErrInvalidParams)) - } - } - if s.RejectionStatement != nil { - if err := s.RejectionStatement.Validate(); err != nil { - invalidParams.AddNested("RejectionStatement", err.(request.ErrInvalidParams)) - } - } - if s.Slots != nil { - for i, v := range s.Slots { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Slots", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChecksum sets the Checksum field's value. -func (s *PutIntentInput) SetChecksum(v string) *PutIntentInput { - s.Checksum = &v - return s -} - -// SetConclusionStatement sets the ConclusionStatement field's value. -func (s *PutIntentInput) SetConclusionStatement(v *Statement) *PutIntentInput { - s.ConclusionStatement = v - return s -} - -// SetConfirmationPrompt sets the ConfirmationPrompt field's value. -func (s *PutIntentInput) SetConfirmationPrompt(v *Prompt) *PutIntentInput { - s.ConfirmationPrompt = v - return s -} - -// SetDescription sets the Description field's value. -func (s *PutIntentInput) SetDescription(v string) *PutIntentInput { - s.Description = &v - return s -} - -// SetDialogCodeHook sets the DialogCodeHook field's value. -func (s *PutIntentInput) SetDialogCodeHook(v *CodeHook) *PutIntentInput { - s.DialogCodeHook = v - return s -} - -// SetFollowUpPrompt sets the FollowUpPrompt field's value. -func (s *PutIntentInput) SetFollowUpPrompt(v *FollowUpPrompt) *PutIntentInput { - s.FollowUpPrompt = v - return s -} - -// SetFulfillmentActivity sets the FulfillmentActivity field's value. -func (s *PutIntentInput) SetFulfillmentActivity(v *FulfillmentActivity) *PutIntentInput { - s.FulfillmentActivity = v - return s -} - -// SetName sets the Name field's value. -func (s *PutIntentInput) SetName(v string) *PutIntentInput { - s.Name = &v - return s -} - -// SetParentIntentSignature sets the ParentIntentSignature field's value. -func (s *PutIntentInput) SetParentIntentSignature(v string) *PutIntentInput { - s.ParentIntentSignature = &v - return s -} - -// SetRejectionStatement sets the RejectionStatement field's value. -func (s *PutIntentInput) SetRejectionStatement(v *Statement) *PutIntentInput { - s.RejectionStatement = v - return s -} - -// SetSampleUtterances sets the SampleUtterances field's value. -func (s *PutIntentInput) SetSampleUtterances(v []*string) *PutIntentInput { - s.SampleUtterances = v - return s -} - -// SetSlots sets the Slots field's value. -func (s *PutIntentInput) SetSlots(v []*Slot) *PutIntentInput { - s.Slots = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutIntentResponse -type PutIntentOutput struct { - _ struct{} `type:"structure"` - - // Checksum of the $LATESTversion of the intent created or updated. - Checksum *string `locationName:"checksum" type:"string"` - - // After the Lambda function specified in thefulfillmentActivityintent fulfills - // the intent, Amazon Lex conveys this statement to the user. - ConclusionStatement *Statement `locationName:"conclusionStatement" type:"structure"` - - // If defined in the intent, Amazon Lex prompts the user to confirm the intent - // before fulfilling it. - ConfirmationPrompt *Prompt `locationName:"confirmationPrompt" type:"structure"` - - // The date that the intent was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the intent. - Description *string `locationName:"description" type:"string"` - - // If defined in the intent, Amazon Lex invokes this Lambda function for each - // user input. - DialogCodeHook *CodeHook `locationName:"dialogCodeHook" type:"structure"` - - // If defined in the intent, Amazon Lex uses this prompt to solicit additional - // user activity after the intent is fulfilled. - FollowUpPrompt *FollowUpPrompt `locationName:"followUpPrompt" type:"structure"` - - // If defined in the intent, Amazon Lex invokes this Lambda function to fulfill - // the intent after the user provides all of the information required by the - // intent. - FulfillmentActivity *FulfillmentActivity `locationName:"fulfillmentActivity" type:"structure"` - - // The date that the intent was updated. When you create a resource, the creation - // date and last update dates are the same. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // The name of the intent. - Name *string `locationName:"name" min:"1" type:"string"` - - // A unique identifier for the built-in intent that this intent is based on. - ParentIntentSignature *string `locationName:"parentIntentSignature" type:"string"` - - // If the user answers "no" to the question defined in confirmationPrompt Amazon - // Lex responds with this statement to acknowledge that the intent was canceled. - RejectionStatement *Statement `locationName:"rejectionStatement" type:"structure"` - - // An array of sample utterances that are configured for the intent. - SampleUtterances []*string `locationName:"sampleUtterances" type:"list"` - - // An array of intent slots that are configured for the intent. - Slots []*Slot `locationName:"slots" type:"list"` - - // The version of the intent. For a new intent, the version is always $LATEST. - Version *string `locationName:"version" min:"1" type:"string"` -} - -// String returns the string representation -func (s PutIntentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutIntentOutput) GoString() string { - return s.String() -} - -// SetChecksum sets the Checksum field's value. -func (s *PutIntentOutput) SetChecksum(v string) *PutIntentOutput { - s.Checksum = &v - return s -} - -// SetConclusionStatement sets the ConclusionStatement field's value. -func (s *PutIntentOutput) SetConclusionStatement(v *Statement) *PutIntentOutput { - s.ConclusionStatement = v - return s -} - -// SetConfirmationPrompt sets the ConfirmationPrompt field's value. -func (s *PutIntentOutput) SetConfirmationPrompt(v *Prompt) *PutIntentOutput { - s.ConfirmationPrompt = v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *PutIntentOutput) SetCreatedDate(v time.Time) *PutIntentOutput { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *PutIntentOutput) SetDescription(v string) *PutIntentOutput { - s.Description = &v - return s -} - -// SetDialogCodeHook sets the DialogCodeHook field's value. -func (s *PutIntentOutput) SetDialogCodeHook(v *CodeHook) *PutIntentOutput { - s.DialogCodeHook = v - return s -} - -// SetFollowUpPrompt sets the FollowUpPrompt field's value. -func (s *PutIntentOutput) SetFollowUpPrompt(v *FollowUpPrompt) *PutIntentOutput { - s.FollowUpPrompt = v - return s -} - -// SetFulfillmentActivity sets the FulfillmentActivity field's value. -func (s *PutIntentOutput) SetFulfillmentActivity(v *FulfillmentActivity) *PutIntentOutput { - s.FulfillmentActivity = v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *PutIntentOutput) SetLastUpdatedDate(v time.Time) *PutIntentOutput { - s.LastUpdatedDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *PutIntentOutput) SetName(v string) *PutIntentOutput { - s.Name = &v - return s -} - -// SetParentIntentSignature sets the ParentIntentSignature field's value. -func (s *PutIntentOutput) SetParentIntentSignature(v string) *PutIntentOutput { - s.ParentIntentSignature = &v - return s -} - -// SetRejectionStatement sets the RejectionStatement field's value. -func (s *PutIntentOutput) SetRejectionStatement(v *Statement) *PutIntentOutput { - s.RejectionStatement = v - return s -} - -// SetSampleUtterances sets the SampleUtterances field's value. -func (s *PutIntentOutput) SetSampleUtterances(v []*string) *PutIntentOutput { - s.SampleUtterances = v - return s -} - -// SetSlots sets the Slots field's value. -func (s *PutIntentOutput) SetSlots(v []*Slot) *PutIntentOutput { - s.Slots = v - return s -} - -// SetVersion sets the Version field's value. -func (s *PutIntentOutput) SetVersion(v string) *PutIntentOutput { - s.Version = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutSlotTypeRequest -type PutSlotTypeInput struct { - _ struct{} `type:"structure"` - - // Identifies a specific revision of the $LATEST version. - // - // When you create a new slot type, leave the checksum field blank. If you specify - // a checksum you get a BadRequestException exception. - // - // When you want to update a slot type, set the checksum field to the checksum - // of the most recent revision of the $LATEST version. If you don't specify - // the checksum field, or if the checksum does not match the $LATEST version, - // you get a PreconditionFailedException exception. - Checksum *string `locationName:"checksum" type:"string"` - - // A description of the slot type. - Description *string `locationName:"description" type:"string"` - - // A list of EnumerationValue objects that defines the values that the slot - // type can take. Each value can have a list of synonyms, which are additional - // values that help train the machine learning model about the values that it - // resolves for a slot. - // - // When Amazon Lex resolves a slot value, it generates a resolution list that - // contains up to five possible values for the slot. If you are using a Lambda - // function, this resolution list is passed to the function. If you are not - // using a Lambda function you can choose to return the value that the user - // entered or the first value in the resolution list as the slot value. The - // valueSelectionStrategy field indicates the option to use. - EnumerationValues []*EnumerationValue `locationName:"enumerationValues" min:"1" type:"list"` - - // The name of the slot type. The name is not case sensitive. - // - // The name can't match a built-in slot type name, or a built-in slot type name - // with "AMAZON." removed. For example, because there is a built-in slot type - // called AMAZON.DATE, you can't create a custom slot type called DATE. - // - // For a list of built-in slot types, see Slot Type Reference (https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/slot-type-reference) - // in the Alexa Skills Kit. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` - - // Determines the slot resolution strategy that Amazon Lex uses to return slot - // type values. The field can be set to one of the following values: - // - // * ORIGINAL_VALUE - Returns the value entered by the user, if the user - // value is similar to the slot value. - // - // * TOP_RESOLUTION - If there is a resolution list for the slot, return - // the first value in the resolution list as the slot type value. If there - // is no resolution list, null is returned. - // - // If you don't specify the valueSelectionStrategy, the default is ORIGINAL_VALUE. - ValueSelectionStrategy *string `locationName:"valueSelectionStrategy" type:"string" enum:"SlotValueSelectionStrategy"` -} - -// String returns the string representation -func (s PutSlotTypeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutSlotTypeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutSlotTypeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutSlotTypeInput"} - if s.EnumerationValues != nil && len(s.EnumerationValues) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EnumerationValues", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.EnumerationValues != nil { - for i, v := range s.EnumerationValues { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EnumerationValues", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChecksum sets the Checksum field's value. -func (s *PutSlotTypeInput) SetChecksum(v string) *PutSlotTypeInput { - s.Checksum = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *PutSlotTypeInput) SetDescription(v string) *PutSlotTypeInput { - s.Description = &v - return s -} - -// SetEnumerationValues sets the EnumerationValues field's value. -func (s *PutSlotTypeInput) SetEnumerationValues(v []*EnumerationValue) *PutSlotTypeInput { - s.EnumerationValues = v - return s -} - -// SetName sets the Name field's value. -func (s *PutSlotTypeInput) SetName(v string) *PutSlotTypeInput { - s.Name = &v - return s -} - -// SetValueSelectionStrategy sets the ValueSelectionStrategy field's value. -func (s *PutSlotTypeInput) SetValueSelectionStrategy(v string) *PutSlotTypeInput { - s.ValueSelectionStrategy = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/PutSlotTypeResponse -type PutSlotTypeOutput struct { - _ struct{} `type:"structure"` - - // Checksum of the $LATEST version of the slot type. - Checksum *string `locationName:"checksum" type:"string"` - - // The date that the slot type was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the slot type. - Description *string `locationName:"description" type:"string"` - - // A list of EnumerationValue objects that defines the values that the slot - // type can take. - EnumerationValues []*EnumerationValue `locationName:"enumerationValues" min:"1" type:"list"` - - // The date that the slot type was updated. When you create a slot type, the - // creation date and last update date are the same. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // The name of the slot type. - Name *string `locationName:"name" min:"1" type:"string"` - - // The slot resolution strategy that Amazon Lex uses to determine the value - // of the slot. For more information, see PutSlotType. - ValueSelectionStrategy *string `locationName:"valueSelectionStrategy" type:"string" enum:"SlotValueSelectionStrategy"` - - // The version of the slot type. For a new slot type, the version is always - // $LATEST. - Version *string `locationName:"version" min:"1" type:"string"` -} - -// String returns the string representation -func (s PutSlotTypeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutSlotTypeOutput) GoString() string { - return s.String() -} - -// SetChecksum sets the Checksum field's value. -func (s *PutSlotTypeOutput) SetChecksum(v string) *PutSlotTypeOutput { - s.Checksum = &v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *PutSlotTypeOutput) SetCreatedDate(v time.Time) *PutSlotTypeOutput { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *PutSlotTypeOutput) SetDescription(v string) *PutSlotTypeOutput { - s.Description = &v - return s -} - -// SetEnumerationValues sets the EnumerationValues field's value. -func (s *PutSlotTypeOutput) SetEnumerationValues(v []*EnumerationValue) *PutSlotTypeOutput { - s.EnumerationValues = v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *PutSlotTypeOutput) SetLastUpdatedDate(v time.Time) *PutSlotTypeOutput { - s.LastUpdatedDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *PutSlotTypeOutput) SetName(v string) *PutSlotTypeOutput { - s.Name = &v - return s -} - -// SetValueSelectionStrategy sets the ValueSelectionStrategy field's value. -func (s *PutSlotTypeOutput) SetValueSelectionStrategy(v string) *PutSlotTypeOutput { - s.ValueSelectionStrategy = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *PutSlotTypeOutput) SetVersion(v string) *PutSlotTypeOutput { - s.Version = &v - return s -} - -// Describes the resource that refers to the resource that you are attempting -// to delete. This object is returned as part of the ResourceInUseException -// exception. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/ResourceReference -type ResourceReference struct { - _ struct{} `type:"structure"` - - // The name of the resource that is using the resource that you are trying to - // delete. - Name *string `locationName:"name" min:"1" type:"string"` - - // The version of the resource that is using the resource that you are trying - // to delete. - Version *string `locationName:"version" min:"1" type:"string"` -} - -// String returns the string representation -func (s ResourceReference) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ResourceReference) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *ResourceReference) SetName(v string) *ResourceReference { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *ResourceReference) SetVersion(v string) *ResourceReference { - s.Version = &v - return s -} - -// Identifies the version of a specific slot. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/Slot -type Slot struct { - _ struct{} `type:"structure"` - - // A description of the slot. - Description *string `locationName:"description" type:"string"` - - // The name of the slot. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` - - // Directs Lex the order in which to elicit this slot value from the user. For - // example, if the intent has two slots with priorities 1 and 2, AWS Lex first - // elicits a value for the slot with priority 1. - // - // If multiple slots share the same priority, the order in which Lex elicits - // values is arbitrary. - Priority *int64 `locationName:"priority" type:"integer"` - - // A set of possible responses for the slot type used by text-based clients. - // A user chooses an option from the response card, instead of using text to - // reply. - ResponseCard *string `locationName:"responseCard" min:"1" type:"string"` - - // If you know a specific pattern with which users might respond to an Amazon - // Lex request for a slot value, you can provide those utterances to improve - // accuracy. This is optional. In most cases, Amazon Lex is capable of understanding - // user utterances. - SampleUtterances []*string `locationName:"sampleUtterances" type:"list"` - - // Specifies whether the slot is required or optional. - // - // SlotConstraint is a required field - SlotConstraint *string `locationName:"slotConstraint" type:"string" required:"true" enum:"SlotConstraint"` - - // The type of the slot, either a custom slot type that you defined or one of - // the built-in slot types. - SlotType *string `locationName:"slotType" min:"1" type:"string"` - - // The version of the slot type. - SlotTypeVersion *string `locationName:"slotTypeVersion" min:"1" type:"string"` - - // The prompt that Amazon Lex uses to elicit the slot value from the user. - ValueElicitationPrompt *Prompt `locationName:"valueElicitationPrompt" type:"structure"` -} - -// String returns the string representation -func (s Slot) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Slot) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Slot) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Slot"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.ResponseCard != nil && len(*s.ResponseCard) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResponseCard", 1)) - } - if s.SlotConstraint == nil { - invalidParams.Add(request.NewErrParamRequired("SlotConstraint")) - } - if s.SlotType != nil && len(*s.SlotType) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SlotType", 1)) - } - if s.SlotTypeVersion != nil && len(*s.SlotTypeVersion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SlotTypeVersion", 1)) - } - if s.ValueElicitationPrompt != nil { - if err := s.ValueElicitationPrompt.Validate(); err != nil { - invalidParams.AddNested("ValueElicitationPrompt", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *Slot) SetDescription(v string) *Slot { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *Slot) SetName(v string) *Slot { - s.Name = &v - return s -} - -// SetPriority sets the Priority field's value. -func (s *Slot) SetPriority(v int64) *Slot { - s.Priority = &v - return s -} - -// SetResponseCard sets the ResponseCard field's value. -func (s *Slot) SetResponseCard(v string) *Slot { - s.ResponseCard = &v - return s -} - -// SetSampleUtterances sets the SampleUtterances field's value. -func (s *Slot) SetSampleUtterances(v []*string) *Slot { - s.SampleUtterances = v - return s -} - -// SetSlotConstraint sets the SlotConstraint field's value. -func (s *Slot) SetSlotConstraint(v string) *Slot { - s.SlotConstraint = &v - return s -} - -// SetSlotType sets the SlotType field's value. -func (s *Slot) SetSlotType(v string) *Slot { - s.SlotType = &v - return s -} - -// SetSlotTypeVersion sets the SlotTypeVersion field's value. -func (s *Slot) SetSlotTypeVersion(v string) *Slot { - s.SlotTypeVersion = &v - return s -} - -// SetValueElicitationPrompt sets the ValueElicitationPrompt field's value. -func (s *Slot) SetValueElicitationPrompt(v *Prompt) *Slot { - s.ValueElicitationPrompt = v - return s -} - -// Provides information about a slot type.. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/SlotTypeMetadata -type SlotTypeMetadata struct { - _ struct{} `type:"structure"` - - // The date that the slot type was created. - CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` - - // A description of the slot type. - Description *string `locationName:"description" type:"string"` - - // The date that the slot type was updated. When you create a resource, the - // creation date and last updated date are the same. - LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` - - // The name of the slot type. - Name *string `locationName:"name" min:"1" type:"string"` - - // The version of the slot type. - Version *string `locationName:"version" min:"1" type:"string"` -} - -// String returns the string representation -func (s SlotTypeMetadata) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SlotTypeMetadata) GoString() string { - return s.String() -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *SlotTypeMetadata) SetCreatedDate(v time.Time) *SlotTypeMetadata { - s.CreatedDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *SlotTypeMetadata) SetDescription(v string) *SlotTypeMetadata { - s.Description = &v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *SlotTypeMetadata) SetLastUpdatedDate(v time.Time) *SlotTypeMetadata { - s.LastUpdatedDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *SlotTypeMetadata) SetName(v string) *SlotTypeMetadata { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *SlotTypeMetadata) SetVersion(v string) *SlotTypeMetadata { - s.Version = &v - return s -} - -// A collection of messages that convey information to the user. At runtime, -// Amazon Lex selects the message to convey. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/Statement -type Statement struct { - _ struct{} `type:"structure"` - - // A collection of message objects. - // - // Messages is a required field - Messages []*Message `locationName:"messages" min:"1" type:"list" required:"true"` - - // At runtime, if the client is using the PostText (http://docs.aws.amazon.com/lex/latest/dg/API_runtime_PostText.html) - // API, Amazon Lex includes the response card in the response. It substitutes - // all of the session attributes and slot values for placeholders in the response - // card. - ResponseCard *string `locationName:"responseCard" min:"1" type:"string"` -} - -// String returns the string representation -func (s Statement) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Statement) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Statement) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Statement"} - if s.Messages == nil { - invalidParams.Add(request.NewErrParamRequired("Messages")) - } - if s.Messages != nil && len(s.Messages) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Messages", 1)) - } - if s.ResponseCard != nil && len(*s.ResponseCard) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResponseCard", 1)) - } - if s.Messages != nil { - for i, v := range s.Messages { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Messages", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMessages sets the Messages field's value. -func (s *Statement) SetMessages(v []*Message) *Statement { - s.Messages = v - return s -} - -// SetResponseCard sets the ResponseCard field's value. -func (s *Statement) SetResponseCard(v string) *Statement { - s.ResponseCard = &v - return s -} - -// Provides information about a single utterance that was made to your bot. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/UtteranceData -type UtteranceData struct { - _ struct{} `type:"structure"` - - // The number of times that the utterance was processed. - Count *int64 `locationName:"count" type:"integer"` - - // The total number of individuals that used the utterance. - DistinctUsers *int64 `locationName:"distinctUsers" type:"integer"` - - // The date that the utterance was first recorded. - FirstUtteredDate *time.Time `locationName:"firstUtteredDate" type:"timestamp" timestampFormat:"unix"` - - // The date that the utterance was last recorded. - LastUtteredDate *time.Time `locationName:"lastUtteredDate" type:"timestamp" timestampFormat:"unix"` - - // The text that was entered by the user or the text representation of an audio - // clip. - UtteranceString *string `locationName:"utteranceString" min:"1" type:"string"` -} - -// String returns the string representation -func (s UtteranceData) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UtteranceData) GoString() string { - return s.String() -} - -// SetCount sets the Count field's value. -func (s *UtteranceData) SetCount(v int64) *UtteranceData { - s.Count = &v - return s -} - -// SetDistinctUsers sets the DistinctUsers field's value. -func (s *UtteranceData) SetDistinctUsers(v int64) *UtteranceData { - s.DistinctUsers = &v - return s -} - -// SetFirstUtteredDate sets the FirstUtteredDate field's value. -func (s *UtteranceData) SetFirstUtteredDate(v time.Time) *UtteranceData { - s.FirstUtteredDate = &v - return s -} - -// SetLastUtteredDate sets the LastUtteredDate field's value. -func (s *UtteranceData) SetLastUtteredDate(v time.Time) *UtteranceData { - s.LastUtteredDate = &v - return s -} - -// SetUtteranceString sets the UtteranceString field's value. -func (s *UtteranceData) SetUtteranceString(v string) *UtteranceData { - s.UtteranceString = &v - return s -} - -// Provides a list of utterances that have been made to a specific version of -// your bot. The list contains a maximum of 100 utterances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/UtteranceList -type UtteranceList struct { - _ struct{} `type:"structure"` - - // The version of the bot that processed the list. - BotVersion *string `locationName:"botVersion" min:"1" type:"string"` - - // One or more UtteranceData objects that contain information about the utterances - // that have been made to a bot. The maximum number of object is 100. - Utterances []*UtteranceData `locationName:"utterances" type:"list"` -} - -// String returns the string representation -func (s UtteranceList) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UtteranceList) GoString() string { - return s.String() -} - -// SetBotVersion sets the BotVersion field's value. -func (s *UtteranceList) SetBotVersion(v string) *UtteranceList { - s.BotVersion = &v - return s -} - -// SetUtterances sets the Utterances field's value. -func (s *UtteranceList) SetUtterances(v []*UtteranceData) *UtteranceList { - s.Utterances = v - return s -} - -const ( - // ChannelStatusInProgress is a ChannelStatus enum value - ChannelStatusInProgress = "IN_PROGRESS" - - // ChannelStatusCreated is a ChannelStatus enum value - ChannelStatusCreated = "CREATED" - - // ChannelStatusFailed is a ChannelStatus enum value - ChannelStatusFailed = "FAILED" -) - -const ( - // ChannelTypeFacebook is a ChannelType enum value - ChannelTypeFacebook = "Facebook" - - // ChannelTypeSlack is a ChannelType enum value - ChannelTypeSlack = "Slack" - - // ChannelTypeTwilioSms is a ChannelType enum value - ChannelTypeTwilioSms = "Twilio-Sms" -) - -const ( - // ContentTypePlainText is a ContentType enum value - ContentTypePlainText = "PlainText" - - // ContentTypeSsml is a ContentType enum value - ContentTypeSsml = "SSML" -) - -const ( - // ExportStatusInProgress is a ExportStatus enum value - ExportStatusInProgress = "IN_PROGRESS" - - // ExportStatusReady is a ExportStatus enum value - ExportStatusReady = "READY" - - // ExportStatusFailed is a ExportStatus enum value - ExportStatusFailed = "FAILED" -) - -const ( - // ExportTypeAlexaSkillsKit is a ExportType enum value - ExportTypeAlexaSkillsKit = "ALEXA_SKILLS_KIT" -) - -const ( - // FulfillmentActivityTypeReturnIntent is a FulfillmentActivityType enum value - FulfillmentActivityTypeReturnIntent = "ReturnIntent" - - // FulfillmentActivityTypeCodeHook is a FulfillmentActivityType enum value - FulfillmentActivityTypeCodeHook = "CodeHook" -) - -const ( - // LocaleEnUs is a Locale enum value - LocaleEnUs = "en-US" -) - -const ( - // ProcessBehaviorSave is a ProcessBehavior enum value - ProcessBehaviorSave = "SAVE" - - // ProcessBehaviorBuild is a ProcessBehavior enum value - ProcessBehaviorBuild = "BUILD" -) - -const ( - // ReferenceTypeIntent is a ReferenceType enum value - ReferenceTypeIntent = "Intent" - - // ReferenceTypeBot is a ReferenceType enum value - ReferenceTypeBot = "Bot" - - // ReferenceTypeBotAlias is a ReferenceType enum value - ReferenceTypeBotAlias = "BotAlias" - - // ReferenceTypeBotChannel is a ReferenceType enum value - ReferenceTypeBotChannel = "BotChannel" -) - -const ( - // ResourceTypeBot is a ResourceType enum value - ResourceTypeBot = "BOT" -) - -const ( - // SlotConstraintRequired is a SlotConstraint enum value - SlotConstraintRequired = "Required" - - // SlotConstraintOptional is a SlotConstraint enum value - SlotConstraintOptional = "Optional" -) - -const ( - // SlotValueSelectionStrategyOriginalValue is a SlotValueSelectionStrategy enum value - SlotValueSelectionStrategyOriginalValue = "ORIGINAL_VALUE" - - // SlotValueSelectionStrategyTopResolution is a SlotValueSelectionStrategy enum value - SlotValueSelectionStrategyTopResolution = "TOP_RESOLUTION" -) - -const ( - // StatusBuilding is a Status enum value - StatusBuilding = "BUILDING" - - // StatusReady is a Status enum value - StatusReady = "READY" - - // StatusFailed is a Status enum value - StatusFailed = "FAILED" - - // StatusNotBuilt is a Status enum value - StatusNotBuilt = "NOT_BUILT" -) - -const ( - // StatusTypeDetected is a StatusType enum value - StatusTypeDetected = "Detected" - - // StatusTypeMissed is a StatusType enum value - StatusTypeMissed = "Missed" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/doc.go b/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/doc.go deleted file mode 100644 index 3e4aae2ee0b..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/doc.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package lexmodelbuildingservice provides the client and types for making API -// requests to Amazon Lex Model Building Service. -// -// Amazon Lex is an AWS service for building conversational voice and text interfaces. -// Use these actions to create, update, and delete conversational bots for new -// and existing client applications. -// -// See https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19 for more information on this service. -// -// See lexmodelbuildingservice package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/lexmodelbuildingservice/ -// -// Using the Client -// -// To contact Amazon Lex Model Building Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the Amazon Lex Model Building Service client LexModelBuildingService for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/lexmodelbuildingservice/#New -package lexmodelbuildingservice diff --git a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/errors.go b/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/errors.go deleted file mode 100644 index da3b08cb8d6..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/errors.go +++ /dev/null @@ -1,61 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package lexmodelbuildingservice - -const ( - - // ErrCodeBadRequestException for service response error code - // "BadRequestException". - // - // The request is not well formed. For example, a value is invalid or a required - // field is missing. Check the field values, and try again. - ErrCodeBadRequestException = "BadRequestException" - - // ErrCodeConflictException for service response error code - // "ConflictException". - // - // There was a conflict processing the request. Try your request again. - ErrCodeConflictException = "ConflictException" - - // ErrCodeInternalFailureException for service response error code - // "InternalFailureException". - // - // An internal Amazon Lex error occurred. Try your request again. - ErrCodeInternalFailureException = "InternalFailureException" - - // ErrCodeLimitExceededException for service response error code - // "LimitExceededException". - // - // The request exceeded a limit. Try your request again. - ErrCodeLimitExceededException = "LimitExceededException" - - // ErrCodeNotFoundException for service response error code - // "NotFoundException". - // - // The resource specified in the request was not found. Check the resource and - // try again. - ErrCodeNotFoundException = "NotFoundException" - - // ErrCodePreconditionFailedException for service response error code - // "PreconditionFailedException". - // - // The checksum of the resource that you are trying to change does not match - // the checksum in the request. Check the resource's checksum and try again. - ErrCodePreconditionFailedException = "PreconditionFailedException" - - // ErrCodeResourceInUseException for service response error code - // "ResourceInUseException". - // - // The resource that you are attempting to delete is referred to by another - // resource. Use this information to remove references to the resource that - // you are trying to delete. - // - // The body of the exception contains a JSON object that describes the resource. - // - // { "resourceType": BOT | BOTALIAS | BOTCHANNEL | INTENT, - // - // "resourceReference": { - // - // "name": string, "version": string } } - ErrCodeResourceInUseException = "ResourceInUseException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go deleted file mode 100644 index 8948604660b..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go +++ /dev/null @@ -1,97 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package lexmodelbuildingservice - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/restjson" -) - -// LexModelBuildingService provides the API operation methods for making requests to -// Amazon Lex Model Building Service. See this package's package overview docs -// for details on the service. -// -// LexModelBuildingService methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type LexModelBuildingService struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "models.lex" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the LexModelBuildingService client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a LexModelBuildingService client from just a session. -// svc := lexmodelbuildingservice.New(mySession) -// -// // Create a LexModelBuildingService client with additional configuration -// svc := lexmodelbuildingservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *LexModelBuildingService { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *LexModelBuildingService { - if len(signingName) == 0 { - signingName = "lex" - } - svc := &LexModelBuildingService{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2017-04-19", - JSONVersion: "1.1", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a LexModelBuildingService operation and runs any -// custom request initialization. -func (c *LexModelBuildingService) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/api.go b/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/api.go deleted file mode 100644 index fecd4e344b9..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/api.go +++ /dev/null @@ -1,14536 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package mediaconvert - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opCancelJob = "CancelJob" - -// CancelJobRequest generates a "aws/request.Request" representing the -// client's request for the CancelJob operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CancelJob for more information on using the CancelJob -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CancelJobRequest method. -// req, resp := client.CancelJobRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CancelJob -func (c *MediaConvert) CancelJobRequest(input *CancelJobInput) (req *request.Request, output *CancelJobOutput) { - op := &request.Operation{ - Name: opCancelJob, - HTTPMethod: "DELETE", - HTTPPath: "/2017-08-29/jobs/{id}", - } - - if input == nil { - input = &CancelJobInput{} - } - - output = &CancelJobOutput{} - req = c.newRequest(op, input, output) - return -} - -// CancelJob API operation for AWS Elemental MediaConvert. -// -// Permanently remove a job from a queue. Once you have canceled a job, you -// can't start it again. You can't delete a running job. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation CancelJob for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CancelJob -func (c *MediaConvert) CancelJob(input *CancelJobInput) (*CancelJobOutput, error) { - req, out := c.CancelJobRequest(input) - return out, req.Send() -} - -// CancelJobWithContext is the same as CancelJob with the addition of -// the ability to pass a context and additional request options. -// -// See CancelJob for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) CancelJobWithContext(ctx aws.Context, input *CancelJobInput, opts ...request.Option) (*CancelJobOutput, error) { - req, out := c.CancelJobRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateJob = "CreateJob" - -// CreateJobRequest generates a "aws/request.Request" representing the -// client's request for the CreateJob operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateJob for more information on using the CreateJob -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateJobRequest method. -// req, resp := client.CreateJobRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJob -func (c *MediaConvert) CreateJobRequest(input *CreateJobInput) (req *request.Request, output *CreateJobOutput) { - op := &request.Operation{ - Name: opCreateJob, - HTTPMethod: "POST", - HTTPPath: "/2017-08-29/jobs", - } - - if input == nil { - input = &CreateJobInput{} - } - - output = &CreateJobOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateJob API operation for AWS Elemental MediaConvert. -// -// Create a new transcoding job. For information about jobs and job settings, -// see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation CreateJob for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJob -func (c *MediaConvert) CreateJob(input *CreateJobInput) (*CreateJobOutput, error) { - req, out := c.CreateJobRequest(input) - return out, req.Send() -} - -// CreateJobWithContext is the same as CreateJob with the addition of -// the ability to pass a context and additional request options. -// -// See CreateJob for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) CreateJobWithContext(ctx aws.Context, input *CreateJobInput, opts ...request.Option) (*CreateJobOutput, error) { - req, out := c.CreateJobRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateJobTemplate = "CreateJobTemplate" - -// CreateJobTemplateRequest generates a "aws/request.Request" representing the -// client's request for the CreateJobTemplate operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateJobTemplate for more information on using the CreateJobTemplate -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateJobTemplateRequest method. -// req, resp := client.CreateJobTemplateRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJobTemplate -func (c *MediaConvert) CreateJobTemplateRequest(input *CreateJobTemplateInput) (req *request.Request, output *CreateJobTemplateOutput) { - op := &request.Operation{ - Name: opCreateJobTemplate, - HTTPMethod: "POST", - HTTPPath: "/2017-08-29/jobTemplates", - } - - if input == nil { - input = &CreateJobTemplateInput{} - } - - output = &CreateJobTemplateOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateJobTemplate API operation for AWS Elemental MediaConvert. -// -// Create a new job template. For information about job templates see the User -// Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation CreateJobTemplate for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJobTemplate -func (c *MediaConvert) CreateJobTemplate(input *CreateJobTemplateInput) (*CreateJobTemplateOutput, error) { - req, out := c.CreateJobTemplateRequest(input) - return out, req.Send() -} - -// CreateJobTemplateWithContext is the same as CreateJobTemplate with the addition of -// the ability to pass a context and additional request options. -// -// See CreateJobTemplate for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) CreateJobTemplateWithContext(ctx aws.Context, input *CreateJobTemplateInput, opts ...request.Option) (*CreateJobTemplateOutput, error) { - req, out := c.CreateJobTemplateRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreatePreset = "CreatePreset" - -// CreatePresetRequest generates a "aws/request.Request" representing the -// client's request for the CreatePreset operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreatePreset for more information on using the CreatePreset -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreatePresetRequest method. -// req, resp := client.CreatePresetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreatePreset -func (c *MediaConvert) CreatePresetRequest(input *CreatePresetInput) (req *request.Request, output *CreatePresetOutput) { - op := &request.Operation{ - Name: opCreatePreset, - HTTPMethod: "POST", - HTTPPath: "/2017-08-29/presets", - } - - if input == nil { - input = &CreatePresetInput{} - } - - output = &CreatePresetOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreatePreset API operation for AWS Elemental MediaConvert. -// -// Create a new preset. For information about job templates see the User Guide -// at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation CreatePreset for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreatePreset -func (c *MediaConvert) CreatePreset(input *CreatePresetInput) (*CreatePresetOutput, error) { - req, out := c.CreatePresetRequest(input) - return out, req.Send() -} - -// CreatePresetWithContext is the same as CreatePreset with the addition of -// the ability to pass a context and additional request options. -// -// See CreatePreset for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) CreatePresetWithContext(ctx aws.Context, input *CreatePresetInput, opts ...request.Option) (*CreatePresetOutput, error) { - req, out := c.CreatePresetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateQueue = "CreateQueue" - -// CreateQueueRequest generates a "aws/request.Request" representing the -// client's request for the CreateQueue operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateQueue for more information on using the CreateQueue -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateQueueRequest method. -// req, resp := client.CreateQueueRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateQueue -func (c *MediaConvert) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, output *CreateQueueOutput) { - op := &request.Operation{ - Name: opCreateQueue, - HTTPMethod: "POST", - HTTPPath: "/2017-08-29/queues", - } - - if input == nil { - input = &CreateQueueInput{} - } - - output = &CreateQueueOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateQueue API operation for AWS Elemental MediaConvert. -// -// Create a new transcoding queue. For information about job templates see the -// User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation CreateQueue for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateQueue -func (c *MediaConvert) CreateQueue(input *CreateQueueInput) (*CreateQueueOutput, error) { - req, out := c.CreateQueueRequest(input) - return out, req.Send() -} - -// CreateQueueWithContext is the same as CreateQueue with the addition of -// the ability to pass a context and additional request options. -// -// See CreateQueue for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) CreateQueueWithContext(ctx aws.Context, input *CreateQueueInput, opts ...request.Option) (*CreateQueueOutput, error) { - req, out := c.CreateQueueRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteJobTemplate = "DeleteJobTemplate" - -// DeleteJobTemplateRequest generates a "aws/request.Request" representing the -// client's request for the DeleteJobTemplate operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteJobTemplate for more information on using the DeleteJobTemplate -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteJobTemplateRequest method. -// req, resp := client.DeleteJobTemplateRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteJobTemplate -func (c *MediaConvert) DeleteJobTemplateRequest(input *DeleteJobTemplateInput) (req *request.Request, output *DeleteJobTemplateOutput) { - op := &request.Operation{ - Name: opDeleteJobTemplate, - HTTPMethod: "DELETE", - HTTPPath: "/2017-08-29/jobTemplates/{name}", - } - - if input == nil { - input = &DeleteJobTemplateInput{} - } - - output = &DeleteJobTemplateOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteJobTemplate API operation for AWS Elemental MediaConvert. -// -// Permanently delete a job template you have created. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation DeleteJobTemplate for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteJobTemplate -func (c *MediaConvert) DeleteJobTemplate(input *DeleteJobTemplateInput) (*DeleteJobTemplateOutput, error) { - req, out := c.DeleteJobTemplateRequest(input) - return out, req.Send() -} - -// DeleteJobTemplateWithContext is the same as DeleteJobTemplate with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteJobTemplate for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) DeleteJobTemplateWithContext(ctx aws.Context, input *DeleteJobTemplateInput, opts ...request.Option) (*DeleteJobTemplateOutput, error) { - req, out := c.DeleteJobTemplateRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeletePreset = "DeletePreset" - -// DeletePresetRequest generates a "aws/request.Request" representing the -// client's request for the DeletePreset operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeletePreset for more information on using the DeletePreset -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeletePresetRequest method. -// req, resp := client.DeletePresetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeletePreset -func (c *MediaConvert) DeletePresetRequest(input *DeletePresetInput) (req *request.Request, output *DeletePresetOutput) { - op := &request.Operation{ - Name: opDeletePreset, - HTTPMethod: "DELETE", - HTTPPath: "/2017-08-29/presets/{name}", - } - - if input == nil { - input = &DeletePresetInput{} - } - - output = &DeletePresetOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeletePreset API operation for AWS Elemental MediaConvert. -// -// Permanently delete a preset you have created. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation DeletePreset for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeletePreset -func (c *MediaConvert) DeletePreset(input *DeletePresetInput) (*DeletePresetOutput, error) { - req, out := c.DeletePresetRequest(input) - return out, req.Send() -} - -// DeletePresetWithContext is the same as DeletePreset with the addition of -// the ability to pass a context and additional request options. -// -// See DeletePreset for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) DeletePresetWithContext(ctx aws.Context, input *DeletePresetInput, opts ...request.Option) (*DeletePresetOutput, error) { - req, out := c.DeletePresetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteQueue = "DeleteQueue" - -// DeleteQueueRequest generates a "aws/request.Request" representing the -// client's request for the DeleteQueue operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteQueue for more information on using the DeleteQueue -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteQueueRequest method. -// req, resp := client.DeleteQueueRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteQueue -func (c *MediaConvert) DeleteQueueRequest(input *DeleteQueueInput) (req *request.Request, output *DeleteQueueOutput) { - op := &request.Operation{ - Name: opDeleteQueue, - HTTPMethod: "DELETE", - HTTPPath: "/2017-08-29/queues/{name}", - } - - if input == nil { - input = &DeleteQueueInput{} - } - - output = &DeleteQueueOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteQueue API operation for AWS Elemental MediaConvert. -// -// Permanently delete a queue you have created. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation DeleteQueue for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteQueue -func (c *MediaConvert) DeleteQueue(input *DeleteQueueInput) (*DeleteQueueOutput, error) { - req, out := c.DeleteQueueRequest(input) - return out, req.Send() -} - -// DeleteQueueWithContext is the same as DeleteQueue with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteQueue for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) DeleteQueueWithContext(ctx aws.Context, input *DeleteQueueInput, opts ...request.Option) (*DeleteQueueOutput, error) { - req, out := c.DeleteQueueRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeEndpoints = "DescribeEndpoints" - -// DescribeEndpointsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeEndpoints operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeEndpoints for more information on using the DescribeEndpoints -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeEndpointsRequest method. -// req, resp := client.DescribeEndpointsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DescribeEndpoints -func (c *MediaConvert) DescribeEndpointsRequest(input *DescribeEndpointsInput) (req *request.Request, output *DescribeEndpointsOutput) { - op := &request.Operation{ - Name: opDescribeEndpoints, - HTTPMethod: "POST", - HTTPPath: "/2017-08-29/endpoints", - } - - if input == nil { - input = &DescribeEndpointsInput{} - } - - output = &DescribeEndpointsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeEndpoints API operation for AWS Elemental MediaConvert. -// -// Send an request with an empty body to the regional API endpoint to get your -// account API endpoint. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation DescribeEndpoints for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DescribeEndpoints -func (c *MediaConvert) DescribeEndpoints(input *DescribeEndpointsInput) (*DescribeEndpointsOutput, error) { - req, out := c.DescribeEndpointsRequest(input) - return out, req.Send() -} - -// DescribeEndpointsWithContext is the same as DescribeEndpoints with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeEndpoints for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) DescribeEndpointsWithContext(ctx aws.Context, input *DescribeEndpointsInput, opts ...request.Option) (*DescribeEndpointsOutput, error) { - req, out := c.DescribeEndpointsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetJob = "GetJob" - -// GetJobRequest generates a "aws/request.Request" representing the -// client's request for the GetJob operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetJob for more information on using the GetJob -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetJobRequest method. -// req, resp := client.GetJobRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJob -func (c *MediaConvert) GetJobRequest(input *GetJobInput) (req *request.Request, output *GetJobOutput) { - op := &request.Operation{ - Name: opGetJob, - HTTPMethod: "GET", - HTTPPath: "/2017-08-29/jobs/{id}", - } - - if input == nil { - input = &GetJobInput{} - } - - output = &GetJobOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetJob API operation for AWS Elemental MediaConvert. -// -// Retrieve the JSON for a specific completed transcoding job. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation GetJob for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJob -func (c *MediaConvert) GetJob(input *GetJobInput) (*GetJobOutput, error) { - req, out := c.GetJobRequest(input) - return out, req.Send() -} - -// GetJobWithContext is the same as GetJob with the addition of -// the ability to pass a context and additional request options. -// -// See GetJob for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) GetJobWithContext(ctx aws.Context, input *GetJobInput, opts ...request.Option) (*GetJobOutput, error) { - req, out := c.GetJobRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetJobTemplate = "GetJobTemplate" - -// GetJobTemplateRequest generates a "aws/request.Request" representing the -// client's request for the GetJobTemplate operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetJobTemplate for more information on using the GetJobTemplate -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetJobTemplateRequest method. -// req, resp := client.GetJobTemplateRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJobTemplate -func (c *MediaConvert) GetJobTemplateRequest(input *GetJobTemplateInput) (req *request.Request, output *GetJobTemplateOutput) { - op := &request.Operation{ - Name: opGetJobTemplate, - HTTPMethod: "GET", - HTTPPath: "/2017-08-29/jobTemplates/{name}", - } - - if input == nil { - input = &GetJobTemplateInput{} - } - - output = &GetJobTemplateOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetJobTemplate API operation for AWS Elemental MediaConvert. -// -// Retrieve the JSON for a specific job template. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation GetJobTemplate for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJobTemplate -func (c *MediaConvert) GetJobTemplate(input *GetJobTemplateInput) (*GetJobTemplateOutput, error) { - req, out := c.GetJobTemplateRequest(input) - return out, req.Send() -} - -// GetJobTemplateWithContext is the same as GetJobTemplate with the addition of -// the ability to pass a context and additional request options. -// -// See GetJobTemplate for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) GetJobTemplateWithContext(ctx aws.Context, input *GetJobTemplateInput, opts ...request.Option) (*GetJobTemplateOutput, error) { - req, out := c.GetJobTemplateRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetPreset = "GetPreset" - -// GetPresetRequest generates a "aws/request.Request" representing the -// client's request for the GetPreset operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetPreset for more information on using the GetPreset -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetPresetRequest method. -// req, resp := client.GetPresetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetPreset -func (c *MediaConvert) GetPresetRequest(input *GetPresetInput) (req *request.Request, output *GetPresetOutput) { - op := &request.Operation{ - Name: opGetPreset, - HTTPMethod: "GET", - HTTPPath: "/2017-08-29/presets/{name}", - } - - if input == nil { - input = &GetPresetInput{} - } - - output = &GetPresetOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetPreset API operation for AWS Elemental MediaConvert. -// -// Retrieve the JSON for a specific preset. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation GetPreset for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetPreset -func (c *MediaConvert) GetPreset(input *GetPresetInput) (*GetPresetOutput, error) { - req, out := c.GetPresetRequest(input) - return out, req.Send() -} - -// GetPresetWithContext is the same as GetPreset with the addition of -// the ability to pass a context and additional request options. -// -// See GetPreset for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) GetPresetWithContext(ctx aws.Context, input *GetPresetInput, opts ...request.Option) (*GetPresetOutput, error) { - req, out := c.GetPresetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetQueue = "GetQueue" - -// GetQueueRequest generates a "aws/request.Request" representing the -// client's request for the GetQueue operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetQueue for more information on using the GetQueue -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetQueueRequest method. -// req, resp := client.GetQueueRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetQueue -func (c *MediaConvert) GetQueueRequest(input *GetQueueInput) (req *request.Request, output *GetQueueOutput) { - op := &request.Operation{ - Name: opGetQueue, - HTTPMethod: "GET", - HTTPPath: "/2017-08-29/queues/{name}", - } - - if input == nil { - input = &GetQueueInput{} - } - - output = &GetQueueOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetQueue API operation for AWS Elemental MediaConvert. -// -// Retrieve the JSON for a specific queue. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation GetQueue for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetQueue -func (c *MediaConvert) GetQueue(input *GetQueueInput) (*GetQueueOutput, error) { - req, out := c.GetQueueRequest(input) - return out, req.Send() -} - -// GetQueueWithContext is the same as GetQueue with the addition of -// the ability to pass a context and additional request options. -// -// See GetQueue for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) GetQueueWithContext(ctx aws.Context, input *GetQueueInput, opts ...request.Option) (*GetQueueOutput, error) { - req, out := c.GetQueueRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListJobTemplates = "ListJobTemplates" - -// ListJobTemplatesRequest generates a "aws/request.Request" representing the -// client's request for the ListJobTemplates operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListJobTemplates for more information on using the ListJobTemplates -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListJobTemplatesRequest method. -// req, resp := client.ListJobTemplatesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobTemplates -func (c *MediaConvert) ListJobTemplatesRequest(input *ListJobTemplatesInput) (req *request.Request, output *ListJobTemplatesOutput) { - op := &request.Operation{ - Name: opListJobTemplates, - HTTPMethod: "GET", - HTTPPath: "/2017-08-29/jobTemplates", - } - - if input == nil { - input = &ListJobTemplatesInput{} - } - - output = &ListJobTemplatesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListJobTemplates API operation for AWS Elemental MediaConvert. -// -// Retrieve a JSON array of up to twenty of your job templates. This will return -// the templates themselves, not just a list of them. To retrieve the next twenty -// templates, use the nextToken string returned with the array -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation ListJobTemplates for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobTemplates -func (c *MediaConvert) ListJobTemplates(input *ListJobTemplatesInput) (*ListJobTemplatesOutput, error) { - req, out := c.ListJobTemplatesRequest(input) - return out, req.Send() -} - -// ListJobTemplatesWithContext is the same as ListJobTemplates with the addition of -// the ability to pass a context and additional request options. -// -// See ListJobTemplates for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) ListJobTemplatesWithContext(ctx aws.Context, input *ListJobTemplatesInput, opts ...request.Option) (*ListJobTemplatesOutput, error) { - req, out := c.ListJobTemplatesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListJobs = "ListJobs" - -// ListJobsRequest generates a "aws/request.Request" representing the -// client's request for the ListJobs operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListJobs for more information on using the ListJobs -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListJobsRequest method. -// req, resp := client.ListJobsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobs -func (c *MediaConvert) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) { - op := &request.Operation{ - Name: opListJobs, - HTTPMethod: "GET", - HTTPPath: "/2017-08-29/jobs", - } - - if input == nil { - input = &ListJobsInput{} - } - - output = &ListJobsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListJobs API operation for AWS Elemental MediaConvert. -// -// Retrieve a JSON array of up to twenty of your most recently created jobs. -// This array includes in-process, completed, and errored jobs. This will return -// the jobs themselves, not just a list of the jobs. To retrieve the twenty -// next most recent jobs, use the nextToken string returned with the array. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation ListJobs for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobs -func (c *MediaConvert) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) { - req, out := c.ListJobsRequest(input) - return out, req.Send() -} - -// ListJobsWithContext is the same as ListJobs with the addition of -// the ability to pass a context and additional request options. -// -// See ListJobs for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opts ...request.Option) (*ListJobsOutput, error) { - req, out := c.ListJobsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListPresets = "ListPresets" - -// ListPresetsRequest generates a "aws/request.Request" representing the -// client's request for the ListPresets operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListPresets for more information on using the ListPresets -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListPresetsRequest method. -// req, resp := client.ListPresetsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListPresets -func (c *MediaConvert) ListPresetsRequest(input *ListPresetsInput) (req *request.Request, output *ListPresetsOutput) { - op := &request.Operation{ - Name: opListPresets, - HTTPMethod: "GET", - HTTPPath: "/2017-08-29/presets", - } - - if input == nil { - input = &ListPresetsInput{} - } - - output = &ListPresetsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListPresets API operation for AWS Elemental MediaConvert. -// -// Retrieve a JSON array of up to twenty of your presets. This will return the -// presets themselves, not just a list of them. To retrieve the next twenty -// presets, use the nextToken string returned with the array. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation ListPresets for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListPresets -func (c *MediaConvert) ListPresets(input *ListPresetsInput) (*ListPresetsOutput, error) { - req, out := c.ListPresetsRequest(input) - return out, req.Send() -} - -// ListPresetsWithContext is the same as ListPresets with the addition of -// the ability to pass a context and additional request options. -// -// See ListPresets for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) ListPresetsWithContext(ctx aws.Context, input *ListPresetsInput, opts ...request.Option) (*ListPresetsOutput, error) { - req, out := c.ListPresetsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListQueues = "ListQueues" - -// ListQueuesRequest generates a "aws/request.Request" representing the -// client's request for the ListQueues operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListQueues for more information on using the ListQueues -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListQueuesRequest method. -// req, resp := client.ListQueuesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListQueues -func (c *MediaConvert) ListQueuesRequest(input *ListQueuesInput) (req *request.Request, output *ListQueuesOutput) { - op := &request.Operation{ - Name: opListQueues, - HTTPMethod: "GET", - HTTPPath: "/2017-08-29/queues", - } - - if input == nil { - input = &ListQueuesInput{} - } - - output = &ListQueuesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListQueues API operation for AWS Elemental MediaConvert. -// -// Retrieve a JSON array of up to twenty of your queues. This will return the -// queues themselves, not just a list of them. To retrieve the next twenty queues, -// use the nextToken string returned with the array. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation ListQueues for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListQueues -func (c *MediaConvert) ListQueues(input *ListQueuesInput) (*ListQueuesOutput, error) { - req, out := c.ListQueuesRequest(input) - return out, req.Send() -} - -// ListQueuesWithContext is the same as ListQueues with the addition of -// the ability to pass a context and additional request options. -// -// See ListQueues for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) ListQueuesWithContext(ctx aws.Context, input *ListQueuesInput, opts ...request.Option) (*ListQueuesOutput, error) { - req, out := c.ListQueuesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateJobTemplate = "UpdateJobTemplate" - -// UpdateJobTemplateRequest generates a "aws/request.Request" representing the -// client's request for the UpdateJobTemplate operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateJobTemplate for more information on using the UpdateJobTemplate -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateJobTemplateRequest method. -// req, resp := client.UpdateJobTemplateRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateJobTemplate -func (c *MediaConvert) UpdateJobTemplateRequest(input *UpdateJobTemplateInput) (req *request.Request, output *UpdateJobTemplateOutput) { - op := &request.Operation{ - Name: opUpdateJobTemplate, - HTTPMethod: "PUT", - HTTPPath: "/2017-08-29/jobTemplates/{name}", - } - - if input == nil { - input = &UpdateJobTemplateInput{} - } - - output = &UpdateJobTemplateOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateJobTemplate API operation for AWS Elemental MediaConvert. -// -// Modify one of your existing job templates. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation UpdateJobTemplate for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateJobTemplate -func (c *MediaConvert) UpdateJobTemplate(input *UpdateJobTemplateInput) (*UpdateJobTemplateOutput, error) { - req, out := c.UpdateJobTemplateRequest(input) - return out, req.Send() -} - -// UpdateJobTemplateWithContext is the same as UpdateJobTemplate with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateJobTemplate for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) UpdateJobTemplateWithContext(ctx aws.Context, input *UpdateJobTemplateInput, opts ...request.Option) (*UpdateJobTemplateOutput, error) { - req, out := c.UpdateJobTemplateRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdatePreset = "UpdatePreset" - -// UpdatePresetRequest generates a "aws/request.Request" representing the -// client's request for the UpdatePreset operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdatePreset for more information on using the UpdatePreset -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdatePresetRequest method. -// req, resp := client.UpdatePresetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdatePreset -func (c *MediaConvert) UpdatePresetRequest(input *UpdatePresetInput) (req *request.Request, output *UpdatePresetOutput) { - op := &request.Operation{ - Name: opUpdatePreset, - HTTPMethod: "PUT", - HTTPPath: "/2017-08-29/presets/{name}", - } - - if input == nil { - input = &UpdatePresetInput{} - } - - output = &UpdatePresetOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdatePreset API operation for AWS Elemental MediaConvert. -// -// Modify one of your existing presets. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation UpdatePreset for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdatePreset -func (c *MediaConvert) UpdatePreset(input *UpdatePresetInput) (*UpdatePresetOutput, error) { - req, out := c.UpdatePresetRequest(input) - return out, req.Send() -} - -// UpdatePresetWithContext is the same as UpdatePreset with the addition of -// the ability to pass a context and additional request options. -// -// See UpdatePreset for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) UpdatePresetWithContext(ctx aws.Context, input *UpdatePresetInput, opts ...request.Option) (*UpdatePresetOutput, error) { - req, out := c.UpdatePresetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateQueue = "UpdateQueue" - -// UpdateQueueRequest generates a "aws/request.Request" representing the -// client's request for the UpdateQueue operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateQueue for more information on using the UpdateQueue -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateQueueRequest method. -// req, resp := client.UpdateQueueRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateQueue -func (c *MediaConvert) UpdateQueueRequest(input *UpdateQueueInput) (req *request.Request, output *UpdateQueueOutput) { - op := &request.Operation{ - Name: opUpdateQueue, - HTTPMethod: "PUT", - HTTPPath: "/2017-08-29/queues/{name}", - } - - if input == nil { - input = &UpdateQueueInput{} - } - - output = &UpdateQueueOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateQueue API operation for AWS Elemental MediaConvert. -// -// Modify one of your existing queues. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaConvert's -// API operation UpdateQueue for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateQueue -func (c *MediaConvert) UpdateQueue(input *UpdateQueueInput) (*UpdateQueueOutput, error) { - req, out := c.UpdateQueueRequest(input) - return out, req.Send() -} - -// UpdateQueueWithContext is the same as UpdateQueue with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateQueue for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaConvert) UpdateQueueWithContext(ctx aws.Context, input *UpdateQueueInput, opts ...request.Option) (*UpdateQueueOutput, error) { - req, out := c.UpdateQueueRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to -// the value AAC. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/AacSettings -type AacSettings struct { - _ struct{} `type:"structure"` - - // Choose BROADCASTER_MIXED_AD when the input contains pre-mixed main audio - // + audio description (AD) as a stereo pair. The value for AudioType will be - // set to 3, which signals to downstream systems that this stream contains "broadcaster - // mixed AD". Note that the input received by the encoder must contain pre-mixed - // audio; the encoder does not perform the mixing. When you choose BROADCASTER_MIXED_AD, - // the encoder ignores any values you provide in AudioType and FollowInputAudioType. - // Choose NORMAL when the input does not contain pre-mixed audio + audio description - // (AD). In this case, the encoder will use any values you provide for AudioType - // and FollowInputAudioType. - AudioDescriptionBroadcasterMix *string `locationName:"audioDescriptionBroadcasterMix" type:"string" enum:"AacAudioDescriptionBroadcasterMix"` - - // Average bitrate in bits/second. Valid values depend on rate control mode - // and profile. - Bitrate *int64 `locationName:"bitrate" type:"integer"` - - // AAC Profile. - CodecProfile *string `locationName:"codecProfile" type:"string" enum:"AacCodecProfile"` - - // Mono (Audio Description), Mono, Stereo, or 5.1 channel layout. Valid values - // depend on rate control mode and profile. "1.0 - Audio Description (Receiver - // Mix)" setting receives a stereo description plus control track and emits - // a mono AAC encode of the description track, with control data emitted in - // the PES header as per ETSI TS 101 154 Annex E. - CodingMode *string `locationName:"codingMode" type:"string" enum:"AacCodingMode"` - - // Rate Control Mode. - RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"AacRateControlMode"` - - // Enables LATM/LOAS AAC output. Note that if you use LATM/LOAS AAC in an output, - // you must choose "No container" for the output container. - RawFormat *string `locationName:"rawFormat" type:"string" enum:"AacRawFormat"` - - // Sample rate in Hz. Valid values depend on rate control mode and profile. - SampleRate *int64 `locationName:"sampleRate" type:"integer"` - - // Use MPEG-2 AAC instead of MPEG-4 AAC audio for raw or MPEG-2 Transport Stream - // containers. - Specification *string `locationName:"specification" type:"string" enum:"AacSpecification"` - - // VBR Quality Level - Only used if rate_control_mode is VBR. - VbrQuality *string `locationName:"vbrQuality" type:"string" enum:"AacVbrQuality"` -} - -// String returns the string representation -func (s AacSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AacSettings) GoString() string { - return s.String() -} - -// SetAudioDescriptionBroadcasterMix sets the AudioDescriptionBroadcasterMix field's value. -func (s *AacSettings) SetAudioDescriptionBroadcasterMix(v string) *AacSettings { - s.AudioDescriptionBroadcasterMix = &v - return s -} - -// SetBitrate sets the Bitrate field's value. -func (s *AacSettings) SetBitrate(v int64) *AacSettings { - s.Bitrate = &v - return s -} - -// SetCodecProfile sets the CodecProfile field's value. -func (s *AacSettings) SetCodecProfile(v string) *AacSettings { - s.CodecProfile = &v - return s -} - -// SetCodingMode sets the CodingMode field's value. -func (s *AacSettings) SetCodingMode(v string) *AacSettings { - s.CodingMode = &v - return s -} - -// SetRateControlMode sets the RateControlMode field's value. -func (s *AacSettings) SetRateControlMode(v string) *AacSettings { - s.RateControlMode = &v - return s -} - -// SetRawFormat sets the RawFormat field's value. -func (s *AacSettings) SetRawFormat(v string) *AacSettings { - s.RawFormat = &v - return s -} - -// SetSampleRate sets the SampleRate field's value. -func (s *AacSettings) SetSampleRate(v int64) *AacSettings { - s.SampleRate = &v - return s -} - -// SetSpecification sets the Specification field's value. -func (s *AacSettings) SetSpecification(v string) *AacSettings { - s.Specification = &v - return s -} - -// SetVbrQuality sets the VbrQuality field's value. -func (s *AacSettings) SetVbrQuality(v string) *AacSettings { - s.VbrQuality = &v - return s -} - -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to -// the value AC3. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Ac3Settings -type Ac3Settings struct { - _ struct{} `type:"structure"` - - // Average bitrate in bits/second. Valid bitrates depend on the coding mode. - Bitrate *int64 `locationName:"bitrate" type:"integer"` - - // Specifies the "Bitstream Mode" (bsmod) for the emitted AC-3 stream. See ATSC - // A/52-2012 for background on these values. - BitstreamMode *string `locationName:"bitstreamMode" type:"string" enum:"Ac3BitstreamMode"` - - // Dolby Digital coding mode. Determines number of channels. - CodingMode *string `locationName:"codingMode" type:"string" enum:"Ac3CodingMode"` - - // Sets the dialnorm for the output. If blank and input audio is Dolby Digital, - // dialnorm will be passed through. - Dialnorm *int64 `locationName:"dialnorm" type:"integer"` - - // If set to FILM_STANDARD, adds dynamic range compression signaling to the - // output bitstream as defined in the Dolby Digital specification. - DynamicRangeCompressionProfile *string `locationName:"dynamicRangeCompressionProfile" type:"string" enum:"Ac3DynamicRangeCompressionProfile"` - - // Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only - // valid with 3_2_LFE coding mode. - LfeFilter *string `locationName:"lfeFilter" type:"string" enum:"Ac3LfeFilter"` - - // When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, - // or DolbyE decoder that supplied this audio data. If audio was not supplied - // from one of these streams, then the static metadata settings will be used. - MetadataControl *string `locationName:"metadataControl" type:"string" enum:"Ac3MetadataControl"` - - // Sample rate in hz. Sample rate is always 48000. - SampleRate *int64 `locationName:"sampleRate" type:"integer"` -} - -// String returns the string representation -func (s Ac3Settings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Ac3Settings) GoString() string { - return s.String() -} - -// SetBitrate sets the Bitrate field's value. -func (s *Ac3Settings) SetBitrate(v int64) *Ac3Settings { - s.Bitrate = &v - return s -} - -// SetBitstreamMode sets the BitstreamMode field's value. -func (s *Ac3Settings) SetBitstreamMode(v string) *Ac3Settings { - s.BitstreamMode = &v - return s -} - -// SetCodingMode sets the CodingMode field's value. -func (s *Ac3Settings) SetCodingMode(v string) *Ac3Settings { - s.CodingMode = &v - return s -} - -// SetDialnorm sets the Dialnorm field's value. -func (s *Ac3Settings) SetDialnorm(v int64) *Ac3Settings { - s.Dialnorm = &v - return s -} - -// SetDynamicRangeCompressionProfile sets the DynamicRangeCompressionProfile field's value. -func (s *Ac3Settings) SetDynamicRangeCompressionProfile(v string) *Ac3Settings { - s.DynamicRangeCompressionProfile = &v - return s -} - -// SetLfeFilter sets the LfeFilter field's value. -func (s *Ac3Settings) SetLfeFilter(v string) *Ac3Settings { - s.LfeFilter = &v - return s -} - -// SetMetadataControl sets the MetadataControl field's value. -func (s *Ac3Settings) SetMetadataControl(v string) *Ac3Settings { - s.MetadataControl = &v - return s -} - -// SetSampleRate sets the SampleRate field's value. -func (s *Ac3Settings) SetSampleRate(v int64) *Ac3Settings { - s.SampleRate = &v - return s -} - -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to -// the value AIFF. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/AiffSettings -type AiffSettings struct { - _ struct{} `type:"structure"` - - // Specify Bit depth (BitDepth), in bits per sample, to choose the encoding - // quality for this audio track. - BitDepth *int64 `locationName:"bitDepth" type:"integer"` - - // Set Channels to specify the number of channels in this output audio track. - // Choosing Mono in the console will give you 1 output channel; choosing Stereo - // will give you 2. In the API, valid values are 1 and 2. - Channels *int64 `locationName:"channels" type:"integer"` - - // Sample rate in hz. - SampleRate *int64 `locationName:"sampleRate" type:"integer"` -} - -// String returns the string representation -func (s AiffSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AiffSettings) GoString() string { - return s.String() -} - -// SetBitDepth sets the BitDepth field's value. -func (s *AiffSettings) SetBitDepth(v int64) *AiffSettings { - s.BitDepth = &v - return s -} - -// SetChannels sets the Channels field's value. -func (s *AiffSettings) SetChannels(v int64) *AiffSettings { - s.Channels = &v - return s -} - -// SetSampleRate sets the SampleRate field's value. -func (s *AiffSettings) SetSampleRate(v int64) *AiffSettings { - s.SampleRate = &v - return s -} - -// Settings for ancillary captions source. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/AncillarySourceSettings -type AncillarySourceSettings struct { - _ struct{} `type:"structure"` - - // Specifies the 608 channel number in the ancillary data track from which to - // extract captions. Unused for passthrough. - SourceAncillaryChannelNumber *int64 `locationName:"sourceAncillaryChannelNumber" type:"integer"` -} - -// String returns the string representation -func (s AncillarySourceSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AncillarySourceSettings) GoString() string { - return s.String() -} - -// SetSourceAncillaryChannelNumber sets the SourceAncillaryChannelNumber field's value. -func (s *AncillarySourceSettings) SetSourceAncillaryChannelNumber(v int64) *AncillarySourceSettings { - s.SourceAncillaryChannelNumber = &v - return s -} - -// Audio codec settings (CodecSettings) under (AudioDescriptions) contains the -// group of settings related to audio encoding. The settings in this group vary -// depending on the value you choose for Audio codec (Codec). For each codec -// enum you choose, define the corresponding settings object. The following -// lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings -// * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/AudioCodecSettings -type AudioCodecSettings struct { - _ struct{} `type:"structure"` - - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to - // the value AAC. - AacSettings *AacSettings `locationName:"aacSettings" type:"structure"` - - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to - // the value AC3. - Ac3Settings *Ac3Settings `locationName:"ac3Settings" type:"structure"` - - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to - // the value AIFF. - AiffSettings *AiffSettings `locationName:"aiffSettings" type:"structure"` - - // Type of Audio codec. - Codec *string `locationName:"codec" type:"string" enum:"AudioCodec"` - - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to - // the value EAC3. - Eac3Settings *Eac3Settings `locationName:"eac3Settings" type:"structure"` - - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to - // the value MP2. - Mp2Settings *Mp2Settings `locationName:"mp2Settings" type:"structure"` - - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to - // the value WAV. - WavSettings *WavSettings `locationName:"wavSettings" type:"structure"` -} - -// String returns the string representation -func (s AudioCodecSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AudioCodecSettings) GoString() string { - return s.String() -} - -// SetAacSettings sets the AacSettings field's value. -func (s *AudioCodecSettings) SetAacSettings(v *AacSettings) *AudioCodecSettings { - s.AacSettings = v - return s -} - -// SetAc3Settings sets the Ac3Settings field's value. -func (s *AudioCodecSettings) SetAc3Settings(v *Ac3Settings) *AudioCodecSettings { - s.Ac3Settings = v - return s -} - -// SetAiffSettings sets the AiffSettings field's value. -func (s *AudioCodecSettings) SetAiffSettings(v *AiffSettings) *AudioCodecSettings { - s.AiffSettings = v - return s -} - -// SetCodec sets the Codec field's value. -func (s *AudioCodecSettings) SetCodec(v string) *AudioCodecSettings { - s.Codec = &v - return s -} - -// SetEac3Settings sets the Eac3Settings field's value. -func (s *AudioCodecSettings) SetEac3Settings(v *Eac3Settings) *AudioCodecSettings { - s.Eac3Settings = v - return s -} - -// SetMp2Settings sets the Mp2Settings field's value. -func (s *AudioCodecSettings) SetMp2Settings(v *Mp2Settings) *AudioCodecSettings { - s.Mp2Settings = v - return s -} - -// SetWavSettings sets the WavSettings field's value. -func (s *AudioCodecSettings) SetWavSettings(v *WavSettings) *AudioCodecSettings { - s.WavSettings = v - return s -} - -// Description of audio output -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/AudioDescription -type AudioDescription struct { - _ struct{} `type:"structure"` - - // Settings for Audio Normalization - AudioNormalizationSettings *AudioNormalizationSettings `locationName:"audioNormalizationSettings" type:"structure"` - - // Specifies which audio data to use from each input. In the simplest case, - // specify an "Audio Selector":#inputs-audio_selector by name based on its order - // within each input. For example if you specify "Audio Selector 3", then the - // third audio selector will be used from each input. If an input does not have - // an "Audio Selector 3", then the audio selector marked as "default" in that - // input will be used. If there is no audio selector marked as "default", silence - // will be inserted for the duration of that input. Alternatively, an "Audio - // Selector Group":#inputs-audio_selector_group name may be specified, with - // similar default/silence behavior. If no audio_source_name is specified, then - // "Audio Selector 1" will be chosen automatically. - AudioSourceName *string `locationName:"audioSourceName" type:"string"` - - // Applies only if Follow Input Audio Type is unchecked (false). A number between - // 0 and 255. The following are defined in ISO-IEC 13818-1: 0 = Undefined, 1 - // = Clean Effects, 2 = Hearing Impaired, 3 = Visually Impaired Commentary, - // 4-255 = Reserved. - AudioType *int64 `locationName:"audioType" type:"integer"` - - // When set to FOLLOW_INPUT, if the input contains an ISO 639 audio_type, then - // that value is passed through to the output. If the input contains no ISO - // 639 audio_type, the value in Audio Type is included in the output. Otherwise - // the value in Audio Type is included in the output. Note that this field and - // audioType are both ignored if audioDescriptionBroadcasterMix is set to BROADCASTER_MIXED_AD. - AudioTypeControl *string `locationName:"audioTypeControl" type:"string" enum:"AudioTypeControl"` - - // Audio codec settings (CodecSettings) under (AudioDescriptions) contains the - // group of settings related to audio encoding. The settings in this group vary - // depending on the value you choose for Audio codec (Codec). For each codec - // enum you choose, define the corresponding settings object. The following - // lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings - // * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings - CodecSettings *AudioCodecSettings `locationName:"codecSettings" type:"structure"` - - // Indicates the language of the audio output track. The ISO 639 language specified - // in the 'Language Code' drop down will be used when 'Follow Input Language - // Code' is not selected or when 'Follow Input Language Code' is selected but - // there is no ISO 639 language code specified by the input. - LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` - - // Choosing FOLLOW_INPUT will cause the ISO 639 language code of the output - // to follow the ISO 639 language code of the input. The language specified - // for languageCode' will be used when USE_CONFIGURED is selected or when FOLLOW_INPUT - // is selected but there is no ISO 639 language code specified by the input. - LanguageCodeControl *string `locationName:"languageCodeControl" type:"string" enum:"AudioLanguageCodeControl"` - - // Advanced audio remixing settings. - RemixSettings *RemixSettings `locationName:"remixSettings" type:"structure"` - - // Used for MS Smooth and Apple HLS outputs. Indicates the name displayed by - // the player (eg. English, or Director Commentary). Alphanumeric characters, - // spaces, and underscore are legal. - StreamName *string `locationName:"streamName" type:"string"` -} - -// String returns the string representation -func (s AudioDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AudioDescription) GoString() string { - return s.String() -} - -// SetAudioNormalizationSettings sets the AudioNormalizationSettings field's value. -func (s *AudioDescription) SetAudioNormalizationSettings(v *AudioNormalizationSettings) *AudioDescription { - s.AudioNormalizationSettings = v - return s -} - -// SetAudioSourceName sets the AudioSourceName field's value. -func (s *AudioDescription) SetAudioSourceName(v string) *AudioDescription { - s.AudioSourceName = &v - return s -} - -// SetAudioType sets the AudioType field's value. -func (s *AudioDescription) SetAudioType(v int64) *AudioDescription { - s.AudioType = &v - return s -} - -// SetAudioTypeControl sets the AudioTypeControl field's value. -func (s *AudioDescription) SetAudioTypeControl(v string) *AudioDescription { - s.AudioTypeControl = &v - return s -} - -// SetCodecSettings sets the CodecSettings field's value. -func (s *AudioDescription) SetCodecSettings(v *AudioCodecSettings) *AudioDescription { - s.CodecSettings = v - return s -} - -// SetLanguageCode sets the LanguageCode field's value. -func (s *AudioDescription) SetLanguageCode(v string) *AudioDescription { - s.LanguageCode = &v - return s -} - -// SetLanguageCodeControl sets the LanguageCodeControl field's value. -func (s *AudioDescription) SetLanguageCodeControl(v string) *AudioDescription { - s.LanguageCodeControl = &v - return s -} - -// SetRemixSettings sets the RemixSettings field's value. -func (s *AudioDescription) SetRemixSettings(v *RemixSettings) *AudioDescription { - s.RemixSettings = v - return s -} - -// SetStreamName sets the StreamName field's value. -func (s *AudioDescription) SetStreamName(v string) *AudioDescription { - s.StreamName = &v - return s -} - -// Advanced audio normalization settings. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/AudioNormalizationSettings -type AudioNormalizationSettings struct { - _ struct{} `type:"structure"` - - // Audio normalization algorithm to use. 1770-1 conforms to the CALM Act specification, - // 1770-2 conforms to the EBU R-128 specification. - Algorithm *string `locationName:"algorithm" type:"string" enum:"AudioNormalizationAlgorithm"` - - // When enabled the output audio is corrected using the chosen algorithm. If - // disabled, the audio will be measured but not adjusted. - AlgorithmControl *string `locationName:"algorithmControl" type:"string" enum:"AudioNormalizationAlgorithmControl"` - - // Content measuring above this level will be corrected to the target level. - // Content measuring below this level will not be corrected. Gating only applies - // when not using real_time_correction. - CorrectionGateLevel *int64 `locationName:"correctionGateLevel" type:"integer"` - - // If set to LOG, log each output's audio track loudness to a CSV file. - LoudnessLogging *string `locationName:"loudnessLogging" type:"string" enum:"AudioNormalizationLoudnessLogging"` - - // If set to TRUE_PEAK, calculate and log the TruePeak for each output's audio - // track loudness. - PeakCalculation *string `locationName:"peakCalculation" type:"string" enum:"AudioNormalizationPeakCalculation"` - - // Target LKFS(loudness) to adjust volume to. If no value is entered, a default - // value will be used according to the chosen algorithm. The CALM Act (1770-1) - // recommends a target of -24 LKFS. The EBU R-128 specification (1770-2) recommends - // a target of -23 LKFS. - TargetLkfs *float64 `locationName:"targetLkfs" type:"double"` -} - -// String returns the string representation -func (s AudioNormalizationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AudioNormalizationSettings) GoString() string { - return s.String() -} - -// SetAlgorithm sets the Algorithm field's value. -func (s *AudioNormalizationSettings) SetAlgorithm(v string) *AudioNormalizationSettings { - s.Algorithm = &v - return s -} - -// SetAlgorithmControl sets the AlgorithmControl field's value. -func (s *AudioNormalizationSettings) SetAlgorithmControl(v string) *AudioNormalizationSettings { - s.AlgorithmControl = &v - return s -} - -// SetCorrectionGateLevel sets the CorrectionGateLevel field's value. -func (s *AudioNormalizationSettings) SetCorrectionGateLevel(v int64) *AudioNormalizationSettings { - s.CorrectionGateLevel = &v - return s -} - -// SetLoudnessLogging sets the LoudnessLogging field's value. -func (s *AudioNormalizationSettings) SetLoudnessLogging(v string) *AudioNormalizationSettings { - s.LoudnessLogging = &v - return s -} - -// SetPeakCalculation sets the PeakCalculation field's value. -func (s *AudioNormalizationSettings) SetPeakCalculation(v string) *AudioNormalizationSettings { - s.PeakCalculation = &v - return s -} - -// SetTargetLkfs sets the TargetLkfs field's value. -func (s *AudioNormalizationSettings) SetTargetLkfs(v float64) *AudioNormalizationSettings { - s.TargetLkfs = &v - return s -} - -// Selector for Audio -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/AudioSelector -type AudioSelector struct { - _ struct{} `type:"structure"` - - // When an "Audio Description":#audio_description specifies an AudioSelector - // or AudioSelectorGroup for which no matching source is found in the input, - // then the audio selector marked as DEFAULT will be used. If none are marked - // as default, silence will be inserted for the duration of the input. - DefaultSelection *string `locationName:"defaultSelection" type:"string" enum:"AudioDefaultSelection"` - - // Specifies audio data from an external file source. Auto populated when Infer - // External Filename is checked - ExternalAudioFileInput *string `locationName:"externalAudioFileInput" type:"string"` - - // Selects a specific language code from within an audio source. - LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` - - // Specifies a time delta in milliseconds to offset the audio from the input - // video. - Offset *int64 `locationName:"offset" type:"integer"` - - // Selects a specific PID from within an audio source (e.g. 257 selects PID - // 0x101). - Pids []*int64 `locationName:"pids" type:"list"` - - // Applies only when input streams contain Dolby E. Enter the program ID (according - // to the metadata in the audio) of the Dolby E program to extract from the - // specified track. One program extracted per audio selector. To select multiple - // programs, create multiple selectors with the same Track and different Program - // numbers. "All channels" means to ignore the program IDs and include all the - // channels in this selector; useful if metadata is known to be incorrect. - ProgramSelection *int64 `locationName:"programSelection" type:"integer"` - - // Advanced audio remixing settings. - RemixSettings *RemixSettings `locationName:"remixSettings" type:"structure"` - - // Specifies the type of the audio selector. - SelectorType *string `locationName:"selectorType" type:"string" enum:"AudioSelectorType"` - - // Identify the channel to include in this selector by entering the 1-based - // track index. To combine several tracks, enter a comma-separated list, e.g. - // "1,2,3" for tracks 1-3. - Tracks []*int64 `locationName:"tracks" type:"list"` -} - -// String returns the string representation -func (s AudioSelector) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AudioSelector) GoString() string { - return s.String() -} - -// SetDefaultSelection sets the DefaultSelection field's value. -func (s *AudioSelector) SetDefaultSelection(v string) *AudioSelector { - s.DefaultSelection = &v - return s -} - -// SetExternalAudioFileInput sets the ExternalAudioFileInput field's value. -func (s *AudioSelector) SetExternalAudioFileInput(v string) *AudioSelector { - s.ExternalAudioFileInput = &v - return s -} - -// SetLanguageCode sets the LanguageCode field's value. -func (s *AudioSelector) SetLanguageCode(v string) *AudioSelector { - s.LanguageCode = &v - return s -} - -// SetOffset sets the Offset field's value. -func (s *AudioSelector) SetOffset(v int64) *AudioSelector { - s.Offset = &v - return s -} - -// SetPids sets the Pids field's value. -func (s *AudioSelector) SetPids(v []*int64) *AudioSelector { - s.Pids = v - return s -} - -// SetProgramSelection sets the ProgramSelection field's value. -func (s *AudioSelector) SetProgramSelection(v int64) *AudioSelector { - s.ProgramSelection = &v - return s -} - -// SetRemixSettings sets the RemixSettings field's value. -func (s *AudioSelector) SetRemixSettings(v *RemixSettings) *AudioSelector { - s.RemixSettings = v - return s -} - -// SetSelectorType sets the SelectorType field's value. -func (s *AudioSelector) SetSelectorType(v string) *AudioSelector { - s.SelectorType = &v - return s -} - -// SetTracks sets the Tracks field's value. -func (s *AudioSelector) SetTracks(v []*int64) *AudioSelector { - s.Tracks = v - return s -} - -// Group of Audio Selectors -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/AudioSelectorGroup -type AudioSelectorGroup struct { - _ struct{} `type:"structure"` - - // Name of an "Audio Selector":#inputs-audio_selector within the same input - // to include in the group. Audio selector names are standardized, based on - // their order within the input (e.g. "Audio Selector 1"). The audio_selector_name - // parameter can be repeated to add any number of audio selectors to the group. - AudioSelectorNames []*string `locationName:"audioSelectorNames" type:"list"` -} - -// String returns the string representation -func (s AudioSelectorGroup) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AudioSelectorGroup) GoString() string { - return s.String() -} - -// SetAudioSelectorNames sets the AudioSelectorNames field's value. -func (s *AudioSelectorGroup) SetAudioSelectorNames(v []*string) *AudioSelectorGroup { - s.AudioSelectorNames = v - return s -} - -// Settings for Avail Blanking -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/AvailBlanking -type AvailBlanking struct { - _ struct{} `type:"structure"` - - // Blanking image to be used. Leave empty for solid black. Only bmp and png - // images are supported. - AvailBlankingImage *string `locationName:"availBlankingImage" type:"string"` -} - -// String returns the string representation -func (s AvailBlanking) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AvailBlanking) GoString() string { - return s.String() -} - -// SetAvailBlankingImage sets the AvailBlankingImage field's value. -func (s *AvailBlanking) SetAvailBlankingImage(v string) *AvailBlanking { - s.AvailBlankingImage = &v - return s -} - -// Burn-In Destination Settings. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/BurninDestinationSettings -type BurninDestinationSettings struct { - _ struct{} `type:"structure"` - - // If no explicit x_position or y_position is provided, setting alignment to - // centered will place the captions at the bottom center of the output. Similarly, - // setting a left alignment will align captions to the bottom left of the output. - // If x and y positions are given in conjunction with the alignment parameter, - // the font will be justified (either left or centered) relative to those coordinates. - // This option is not valid for source captions that are STL, 608/embedded or - // teletext. These source settings are already pre-defined by the caption stream. - // All burn-in and DVB-Sub font settings must match. - Alignment *string `locationName:"alignment" type:"string" enum:"BurninSubtitleAlignment"` - - // Specifies the color of the rectangle behind the captions.All burn-in and - // DVB-Sub font settings must match. - BackgroundColor *string `locationName:"backgroundColor" type:"string" enum:"BurninSubtitleBackgroundColor"` - - // Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. - // Leaving this parameter blank is equivalent to setting it to 0 (transparent). - // All burn-in and DVB-Sub font settings must match. - BackgroundOpacity *int64 `locationName:"backgroundOpacity" type:"integer"` - - // Specifies the color of the burned-in captions. This option is not valid for - // source captions that are STL, 608/embedded or teletext. These source settings - // are already pre-defined by the caption stream. All burn-in and DVB-Sub font - // settings must match. - FontColor *string `locationName:"fontColor" type:"string" enum:"BurninSubtitleFontColor"` - - // Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent.All - // burn-in and DVB-Sub font settings must match. - FontOpacity *int64 `locationName:"fontOpacity" type:"integer"` - - // Font resolution in DPI (dots per inch); default is 96 dpi.All burn-in and - // DVB-Sub font settings must match. - FontResolution *int64 `locationName:"fontResolution" type:"integer"` - - // A positive integer indicates the exact font size in points. Set to 0 for - // automatic font size selection. All burn-in and DVB-Sub font settings must - // match. - FontSize *int64 `locationName:"fontSize" type:"integer"` - - // Specifies font outline color. This option is not valid for source captions - // that are either 608/embedded or teletext. These source settings are already - // pre-defined by the caption stream. All burn-in and DVB-Sub font settings - // must match. - OutlineColor *string `locationName:"outlineColor" type:"string" enum:"BurninSubtitleOutlineColor"` - - // Specifies font outline size in pixels. This option is not valid for source - // captions that are either 608/embedded or teletext. These source settings - // are already pre-defined by the caption stream. All burn-in and DVB-Sub font - // settings must match. - OutlineSize *int64 `locationName:"outlineSize" type:"integer"` - - // Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub - // font settings must match. - ShadowColor *string `locationName:"shadowColor" type:"string" enum:"BurninSubtitleShadowColor"` - - // Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving - // this parameter blank is equivalent to setting it to 0 (transparent). All - // burn-in and DVB-Sub font settings must match. - ShadowOpacity *int64 `locationName:"shadowOpacity" type:"integer"` - - // Specifies the horizontal offset of the shadow relative to the captions in - // pixels. A value of -2 would result in a shadow offset 2 pixels to the left. - // All burn-in and DVB-Sub font settings must match. - ShadowXOffset *int64 `locationName:"shadowXOffset" type:"integer"` - - // Specifies the vertical offset of the shadow relative to the captions in pixels. - // A value of -2 would result in a shadow offset 2 pixels above the text. All - // burn-in and DVB-Sub font settings must match. - ShadowYOffset *int64 `locationName:"shadowYOffset" type:"integer"` - - // Controls whether a fixed grid size or proportional font spacing will be used - // to generate the output subtitles bitmap. Only applicable for Teletext inputs - // and DVB-Sub/Burn-in outputs. - TeletextSpacing *string `locationName:"teletextSpacing" type:"string" enum:"BurninSubtitleTeletextSpacing"` - - // Specifies the horizontal position of the caption relative to the left side - // of the output in pixels. A value of 10 would result in the captions starting - // 10 pixels from the left of the output. If no explicit x_position is provided, - // the horizontal caption position will be determined by the alignment parameter. - // This option is not valid for source captions that are STL, 608/embedded or - // teletext. These source settings are already pre-defined by the caption stream. - // All burn-in and DVB-Sub font settings must match. - XPosition *int64 `locationName:"xPosition" type:"integer"` - - // Specifies the vertical position of the caption relative to the top of the - // output in pixels. A value of 10 would result in the captions starting 10 - // pixels from the top of the output. If no explicit y_position is provided, - // the caption will be positioned towards the bottom of the output. This option - // is not valid for source captions that are STL, 608/embedded or teletext. - // These source settings are already pre-defined by the caption stream. All - // burn-in and DVB-Sub font settings must match. - YPosition *int64 `locationName:"yPosition" type:"integer"` -} - -// String returns the string representation -func (s BurninDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BurninDestinationSettings) GoString() string { - return s.String() -} - -// SetAlignment sets the Alignment field's value. -func (s *BurninDestinationSettings) SetAlignment(v string) *BurninDestinationSettings { - s.Alignment = &v - return s -} - -// SetBackgroundColor sets the BackgroundColor field's value. -func (s *BurninDestinationSettings) SetBackgroundColor(v string) *BurninDestinationSettings { - s.BackgroundColor = &v - return s -} - -// SetBackgroundOpacity sets the BackgroundOpacity field's value. -func (s *BurninDestinationSettings) SetBackgroundOpacity(v int64) *BurninDestinationSettings { - s.BackgroundOpacity = &v - return s -} - -// SetFontColor sets the FontColor field's value. -func (s *BurninDestinationSettings) SetFontColor(v string) *BurninDestinationSettings { - s.FontColor = &v - return s -} - -// SetFontOpacity sets the FontOpacity field's value. -func (s *BurninDestinationSettings) SetFontOpacity(v int64) *BurninDestinationSettings { - s.FontOpacity = &v - return s -} - -// SetFontResolution sets the FontResolution field's value. -func (s *BurninDestinationSettings) SetFontResolution(v int64) *BurninDestinationSettings { - s.FontResolution = &v - return s -} - -// SetFontSize sets the FontSize field's value. -func (s *BurninDestinationSettings) SetFontSize(v int64) *BurninDestinationSettings { - s.FontSize = &v - return s -} - -// SetOutlineColor sets the OutlineColor field's value. -func (s *BurninDestinationSettings) SetOutlineColor(v string) *BurninDestinationSettings { - s.OutlineColor = &v - return s -} - -// SetOutlineSize sets the OutlineSize field's value. -func (s *BurninDestinationSettings) SetOutlineSize(v int64) *BurninDestinationSettings { - s.OutlineSize = &v - return s -} - -// SetShadowColor sets the ShadowColor field's value. -func (s *BurninDestinationSettings) SetShadowColor(v string) *BurninDestinationSettings { - s.ShadowColor = &v - return s -} - -// SetShadowOpacity sets the ShadowOpacity field's value. -func (s *BurninDestinationSettings) SetShadowOpacity(v int64) *BurninDestinationSettings { - s.ShadowOpacity = &v - return s -} - -// SetShadowXOffset sets the ShadowXOffset field's value. -func (s *BurninDestinationSettings) SetShadowXOffset(v int64) *BurninDestinationSettings { - s.ShadowXOffset = &v - return s -} - -// SetShadowYOffset sets the ShadowYOffset field's value. -func (s *BurninDestinationSettings) SetShadowYOffset(v int64) *BurninDestinationSettings { - s.ShadowYOffset = &v - return s -} - -// SetTeletextSpacing sets the TeletextSpacing field's value. -func (s *BurninDestinationSettings) SetTeletextSpacing(v string) *BurninDestinationSettings { - s.TeletextSpacing = &v - return s -} - -// SetXPosition sets the XPosition field's value. -func (s *BurninDestinationSettings) SetXPosition(v int64) *BurninDestinationSettings { - s.XPosition = &v - return s -} - -// SetYPosition sets the YPosition field's value. -func (s *BurninDestinationSettings) SetYPosition(v int64) *BurninDestinationSettings { - s.YPosition = &v - return s -} - -// Cancel a job by sending a request with the job ID -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CancelJobRequest -type CancelJobInput struct { - _ struct{} `type:"structure"` - - // The Job ID of the job to be cancelled. - // - // Id is a required field - Id *string `location:"uri" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s CancelJobInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CancelJobInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CancelJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CancelJobInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetId sets the Id field's value. -func (s *CancelJobInput) SetId(v string) *CancelJobInput { - s.Id = &v - return s -} - -// A cancel job request will receive a response with an empty body. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CancelJobResponse -type CancelJobOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CancelJobOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CancelJobOutput) GoString() string { - return s.String() -} - -// Description of Caption output -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CaptionDescription -type CaptionDescription struct { - _ struct{} `type:"structure"` - - // Specifies which "Caption Selector":#inputs-caption_selector to use from each - // input when generating captions. The name should be of the format "Caption - // Selector ", which denotes that the Nth Caption Selector will be used from - // each input. - CaptionSelectorName *string `locationName:"captionSelectorName" type:"string"` - - // Specific settings required by destination type. Note that burnin_destination_settings - // are not available if the source of the caption data is Embedded or Teletext. - DestinationSettings *CaptionDestinationSettings `locationName:"destinationSettings" type:"structure"` - - // Indicates the language of the caption output track. - LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` - - // Human readable information to indicate captions available for players (eg. - // English, or Spanish). Alphanumeric characters, spaces, and underscore are - // legal. - LanguageDescription *string `locationName:"languageDescription" type:"string"` -} - -// String returns the string representation -func (s CaptionDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CaptionDescription) GoString() string { - return s.String() -} - -// SetCaptionSelectorName sets the CaptionSelectorName field's value. -func (s *CaptionDescription) SetCaptionSelectorName(v string) *CaptionDescription { - s.CaptionSelectorName = &v - return s -} - -// SetDestinationSettings sets the DestinationSettings field's value. -func (s *CaptionDescription) SetDestinationSettings(v *CaptionDestinationSettings) *CaptionDescription { - s.DestinationSettings = v - return s -} - -// SetLanguageCode sets the LanguageCode field's value. -func (s *CaptionDescription) SetLanguageCode(v string) *CaptionDescription { - s.LanguageCode = &v - return s -} - -// SetLanguageDescription sets the LanguageDescription field's value. -func (s *CaptionDescription) SetLanguageDescription(v string) *CaptionDescription { - s.LanguageDescription = &v - return s -} - -// Caption Description for preset -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CaptionDescriptionPreset -type CaptionDescriptionPreset struct { - _ struct{} `type:"structure"` - - // Specific settings required by destination type. Note that burnin_destination_settings - // are not available if the source of the caption data is Embedded or Teletext. - DestinationSettings *CaptionDestinationSettings `locationName:"destinationSettings" type:"structure"` - - // Indicates the language of the caption output track. - LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` - - // Human readable information to indicate captions available for players (eg. - // English, or Spanish). Alphanumeric characters, spaces, and underscore are - // legal. - LanguageDescription *string `locationName:"languageDescription" type:"string"` -} - -// String returns the string representation -func (s CaptionDescriptionPreset) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CaptionDescriptionPreset) GoString() string { - return s.String() -} - -// SetDestinationSettings sets the DestinationSettings field's value. -func (s *CaptionDescriptionPreset) SetDestinationSettings(v *CaptionDestinationSettings) *CaptionDescriptionPreset { - s.DestinationSettings = v - return s -} - -// SetLanguageCode sets the LanguageCode field's value. -func (s *CaptionDescriptionPreset) SetLanguageCode(v string) *CaptionDescriptionPreset { - s.LanguageCode = &v - return s -} - -// SetLanguageDescription sets the LanguageDescription field's value. -func (s *CaptionDescriptionPreset) SetLanguageDescription(v string) *CaptionDescriptionPreset { - s.LanguageDescription = &v - return s -} - -// Specific settings required by destination type. Note that burnin_destination_settings -// are not available if the source of the caption data is Embedded or Teletext. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CaptionDestinationSettings -type CaptionDestinationSettings struct { - _ struct{} `type:"structure"` - - // Burn-In Destination Settings. - BurninDestinationSettings *BurninDestinationSettings `locationName:"burninDestinationSettings" type:"structure"` - - // Type of Caption output, including Burn-In, Embedded, SCC, SRT, TTML, WebVTT, - // DVB-Sub, Teletext. - DestinationType *string `locationName:"destinationType" type:"string" enum:"CaptionDestinationType"` - - // DVB-Sub Destination Settings - DvbSubDestinationSettings *DvbSubDestinationSettings `locationName:"dvbSubDestinationSettings" type:"structure"` - - // Settings for SCC caption output. - SccDestinationSettings *SccDestinationSettings `locationName:"sccDestinationSettings" type:"structure"` - - // Settings for Teletext caption output - TeletextDestinationSettings *TeletextDestinationSettings `locationName:"teletextDestinationSettings" type:"structure"` - - // Settings specific to TTML caption outputs, including Pass style information - // (TtmlStylePassthrough). - TtmlDestinationSettings *TtmlDestinationSettings `locationName:"ttmlDestinationSettings" type:"structure"` -} - -// String returns the string representation -func (s CaptionDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CaptionDestinationSettings) GoString() string { - return s.String() -} - -// SetBurninDestinationSettings sets the BurninDestinationSettings field's value. -func (s *CaptionDestinationSettings) SetBurninDestinationSettings(v *BurninDestinationSettings) *CaptionDestinationSettings { - s.BurninDestinationSettings = v - return s -} - -// SetDestinationType sets the DestinationType field's value. -func (s *CaptionDestinationSettings) SetDestinationType(v string) *CaptionDestinationSettings { - s.DestinationType = &v - return s -} - -// SetDvbSubDestinationSettings sets the DvbSubDestinationSettings field's value. -func (s *CaptionDestinationSettings) SetDvbSubDestinationSettings(v *DvbSubDestinationSettings) *CaptionDestinationSettings { - s.DvbSubDestinationSettings = v - return s -} - -// SetSccDestinationSettings sets the SccDestinationSettings field's value. -func (s *CaptionDestinationSettings) SetSccDestinationSettings(v *SccDestinationSettings) *CaptionDestinationSettings { - s.SccDestinationSettings = v - return s -} - -// SetTeletextDestinationSettings sets the TeletextDestinationSettings field's value. -func (s *CaptionDestinationSettings) SetTeletextDestinationSettings(v *TeletextDestinationSettings) *CaptionDestinationSettings { - s.TeletextDestinationSettings = v - return s -} - -// SetTtmlDestinationSettings sets the TtmlDestinationSettings field's value. -func (s *CaptionDestinationSettings) SetTtmlDestinationSettings(v *TtmlDestinationSettings) *CaptionDestinationSettings { - s.TtmlDestinationSettings = v - return s -} - -// Caption inputs to be mapped to caption outputs. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CaptionSelector -type CaptionSelector struct { - _ struct{} `type:"structure"` - - // The specific language to extract from source. If input is SCTE-27, complete - // this field and/or PID to select the caption language to extract. If input - // is DVB-Sub and output is Burn-in or SMPTE-TT, complete this field and/or - // PID to select the caption language to extract. If input is DVB-Sub that is - // being passed through, omit this field (and PID field); there is no way to - // extract a specific language with pass-through captions. - LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` - - // Source settings (SourceSettings) contains the group of settings for captions - // in the input. - SourceSettings *CaptionSourceSettings `locationName:"sourceSettings" type:"structure"` -} - -// String returns the string representation -func (s CaptionSelector) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CaptionSelector) GoString() string { - return s.String() -} - -// SetLanguageCode sets the LanguageCode field's value. -func (s *CaptionSelector) SetLanguageCode(v string) *CaptionSelector { - s.LanguageCode = &v - return s -} - -// SetSourceSettings sets the SourceSettings field's value. -func (s *CaptionSelector) SetSourceSettings(v *CaptionSourceSettings) *CaptionSelector { - s.SourceSettings = v - return s -} - -// Source settings (SourceSettings) contains the group of settings for captions -// in the input. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CaptionSourceSettings -type CaptionSourceSettings struct { - _ struct{} `type:"structure"` - - // Settings for ancillary captions source. - AncillarySourceSettings *AncillarySourceSettings `locationName:"ancillarySourceSettings" type:"structure"` - - // DVB Sub Source Settings - DvbSubSourceSettings *DvbSubSourceSettings `locationName:"dvbSubSourceSettings" type:"structure"` - - // Settings for embedded captions Source - EmbeddedSourceSettings *EmbeddedSourceSettings `locationName:"embeddedSourceSettings" type:"structure"` - - // Settings for File-based Captions in Source - FileSourceSettings *FileSourceSettings `locationName:"fileSourceSettings" type:"structure"` - - // Use Source (SourceType) to identify the format of your input captions. The - // service cannot auto-detect caption format. - SourceType *string `locationName:"sourceType" type:"string" enum:"CaptionSourceType"` - - // Settings specific to Teletext caption sources, including Page number. - TeletextSourceSettings *TeletextSourceSettings `locationName:"teletextSourceSettings" type:"structure"` -} - -// String returns the string representation -func (s CaptionSourceSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CaptionSourceSettings) GoString() string { - return s.String() -} - -// SetAncillarySourceSettings sets the AncillarySourceSettings field's value. -func (s *CaptionSourceSettings) SetAncillarySourceSettings(v *AncillarySourceSettings) *CaptionSourceSettings { - s.AncillarySourceSettings = v - return s -} - -// SetDvbSubSourceSettings sets the DvbSubSourceSettings field's value. -func (s *CaptionSourceSettings) SetDvbSubSourceSettings(v *DvbSubSourceSettings) *CaptionSourceSettings { - s.DvbSubSourceSettings = v - return s -} - -// SetEmbeddedSourceSettings sets the EmbeddedSourceSettings field's value. -func (s *CaptionSourceSettings) SetEmbeddedSourceSettings(v *EmbeddedSourceSettings) *CaptionSourceSettings { - s.EmbeddedSourceSettings = v - return s -} - -// SetFileSourceSettings sets the FileSourceSettings field's value. -func (s *CaptionSourceSettings) SetFileSourceSettings(v *FileSourceSettings) *CaptionSourceSettings { - s.FileSourceSettings = v - return s -} - -// SetSourceType sets the SourceType field's value. -func (s *CaptionSourceSettings) SetSourceType(v string) *CaptionSourceSettings { - s.SourceType = &v - return s -} - -// SetTeletextSourceSettings sets the TeletextSourceSettings field's value. -func (s *CaptionSourceSettings) SetTeletextSourceSettings(v *TeletextSourceSettings) *CaptionSourceSettings { - s.TeletextSourceSettings = v - return s -} - -// Channel mapping (ChannelMapping) contains the group of fields that hold the -// remixing value for each channel. Units are in dB. Acceptable values are within -// the range from -60 (mute) through 6. A setting of 0 passes the input channel -// unchanged to the output channel (no attenuation or amplification). -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ChannelMapping -type ChannelMapping struct { - _ struct{} `type:"structure"` - - // List of output channels - OutputChannels []*OutputChannelMapping `locationName:"outputChannels" type:"list"` -} - -// String returns the string representation -func (s ChannelMapping) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChannelMapping) GoString() string { - return s.String() -} - -// SetOutputChannels sets the OutputChannels field's value. -func (s *ChannelMapping) SetOutputChannels(v []*OutputChannelMapping) *ChannelMapping { - s.OutputChannels = v - return s -} - -// Settings for color correction. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ColorCorrector -type ColorCorrector struct { - _ struct{} `type:"structure"` - - // Brightness level. - Brightness *int64 `locationName:"brightness" type:"integer"` - - // Determines if colorspace conversion will be performed. If set to _None_, - // no conversion will be performed. If _Force 601_ or _Force 709_ are selected, - // conversion will be performed for inputs with differing colorspaces. An input's - // colorspace can be specified explicitly in the "Video Selector":#inputs-video_selector - // if necessary. - ColorSpaceConversion *string `locationName:"colorSpaceConversion" type:"string" enum:"ColorSpaceConversion"` - - // Contrast level. - Contrast *int64 `locationName:"contrast" type:"integer"` - - // Use the HDR master display (Hdr10Metadata) settings to provide values for - // HDR color. These values vary depending on the input video and must be provided - // by a color grader. Range is 0 to 50,000, each increment represents 0.00002 - // in CIE1931 color coordinate. - Hdr10Metadata *Hdr10Metadata `locationName:"hdr10Metadata" type:"structure"` - - // Hue in degrees. - Hue *int64 `locationName:"hue" type:"integer"` - - // Saturation level. - Saturation *int64 `locationName:"saturation" type:"integer"` -} - -// String returns the string representation -func (s ColorCorrector) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ColorCorrector) GoString() string { - return s.String() -} - -// SetBrightness sets the Brightness field's value. -func (s *ColorCorrector) SetBrightness(v int64) *ColorCorrector { - s.Brightness = &v - return s -} - -// SetColorSpaceConversion sets the ColorSpaceConversion field's value. -func (s *ColorCorrector) SetColorSpaceConversion(v string) *ColorCorrector { - s.ColorSpaceConversion = &v - return s -} - -// SetContrast sets the Contrast field's value. -func (s *ColorCorrector) SetContrast(v int64) *ColorCorrector { - s.Contrast = &v - return s -} - -// SetHdr10Metadata sets the Hdr10Metadata field's value. -func (s *ColorCorrector) SetHdr10Metadata(v *Hdr10Metadata) *ColorCorrector { - s.Hdr10Metadata = v - return s -} - -// SetHue sets the Hue field's value. -func (s *ColorCorrector) SetHue(v int64) *ColorCorrector { - s.Hue = &v - return s -} - -// SetSaturation sets the Saturation field's value. -func (s *ColorCorrector) SetSaturation(v int64) *ColorCorrector { - s.Saturation = &v - return s -} - -// Container specific settings. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ContainerSettings -type ContainerSettings struct { - _ struct{} `type:"structure"` - - // Container for this output. Some containers require a container settings object. - // If not specified, the default object will be created. - Container *string `locationName:"container" type:"string" enum:"ContainerType"` - - // Settings for F4v container - F4vSettings *F4vSettings `locationName:"f4vSettings" type:"structure"` - - // Settings for M2TS Container. - M2tsSettings *M2tsSettings `locationName:"m2tsSettings" type:"structure"` - - // Settings for TS segments in HLS - M3u8Settings *M3u8Settings `locationName:"m3u8Settings" type:"structure"` - - // Settings for MOV Container. - MovSettings *MovSettings `locationName:"movSettings" type:"structure"` - - // Settings for MP4 Container - Mp4Settings *Mp4Settings `locationName:"mp4Settings" type:"structure"` -} - -// String returns the string representation -func (s ContainerSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ContainerSettings) GoString() string { - return s.String() -} - -// SetContainer sets the Container field's value. -func (s *ContainerSettings) SetContainer(v string) *ContainerSettings { - s.Container = &v - return s -} - -// SetF4vSettings sets the F4vSettings field's value. -func (s *ContainerSettings) SetF4vSettings(v *F4vSettings) *ContainerSettings { - s.F4vSettings = v - return s -} - -// SetM2tsSettings sets the M2tsSettings field's value. -func (s *ContainerSettings) SetM2tsSettings(v *M2tsSettings) *ContainerSettings { - s.M2tsSettings = v - return s -} - -// SetM3u8Settings sets the M3u8Settings field's value. -func (s *ContainerSettings) SetM3u8Settings(v *M3u8Settings) *ContainerSettings { - s.M3u8Settings = v - return s -} - -// SetMovSettings sets the MovSettings field's value. -func (s *ContainerSettings) SetMovSettings(v *MovSettings) *ContainerSettings { - s.MovSettings = v - return s -} - -// SetMp4Settings sets the Mp4Settings field's value. -func (s *ContainerSettings) SetMp4Settings(v *Mp4Settings) *ContainerSettings { - s.Mp4Settings = v - return s -} - -// Send your create job request with your job settings and IAM role. Optionally, -// include user metadata and the ARN for the queue. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJobRequest -type CreateJobInput struct { - _ struct{} `type:"structure"` - - // Idempotency token for CreateJob operation. - ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` - - // When you create a job, you can either specify a job template or specify the - // transcoding settings individually - JobTemplate *string `locationName:"jobTemplate" type:"string"` - - // Optional. When you create a job, you can specify a queue to send it to. If - // you don't specify, the job will go to the default queue. For more about queues, - // see the User Guide topic at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html. - Queue *string `locationName:"queue" type:"string"` - - // Required. The IAM role you use for creating this job. For details about permissions, - // see the User Guide topic at the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html. - Role *string `locationName:"role" type:"string"` - - // JobSettings contains all the transcode settings for a job. - Settings *JobSettings `locationName:"settings" type:"structure"` - - // User-defined metadata that you want to associate with an MediaConvert job. - // You specify metadata in key/value pairs. - UserMetadata map[string]*string `locationName:"userMetadata" type:"map"` -} - -// String returns the string representation -func (s CreateJobInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateJobInput) GoString() string { - return s.String() -} - -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *CreateJobInput) SetClientRequestToken(v string) *CreateJobInput { - s.ClientRequestToken = &v - return s -} - -// SetJobTemplate sets the JobTemplate field's value. -func (s *CreateJobInput) SetJobTemplate(v string) *CreateJobInput { - s.JobTemplate = &v - return s -} - -// SetQueue sets the Queue field's value. -func (s *CreateJobInput) SetQueue(v string) *CreateJobInput { - s.Queue = &v - return s -} - -// SetRole sets the Role field's value. -func (s *CreateJobInput) SetRole(v string) *CreateJobInput { - s.Role = &v - return s -} - -// SetSettings sets the Settings field's value. -func (s *CreateJobInput) SetSettings(v *JobSettings) *CreateJobInput { - s.Settings = v - return s -} - -// SetUserMetadata sets the UserMetadata field's value. -func (s *CreateJobInput) SetUserMetadata(v map[string]*string) *CreateJobInput { - s.UserMetadata = v - return s -} - -// Successful create job requests will return the job JSON. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJobResponse -type CreateJobOutput struct { - _ struct{} `type:"structure"` - - // Each job converts an input file into an output file or files. For more information, - // see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html - Job *Job `locationName:"job" type:"structure"` -} - -// String returns the string representation -func (s CreateJobOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateJobOutput) GoString() string { - return s.String() -} - -// SetJob sets the Job field's value. -func (s *CreateJobOutput) SetJob(v *Job) *CreateJobOutput { - s.Job = v - return s -} - -// Send your create job template request with the name of the template and the -// JSON for the template. The template JSON should include everything in a valid -// job, except for input location and filename, IAM role, and user metadata. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJobTemplateRequest -type CreateJobTemplateInput struct { - _ struct{} `type:"structure"` - - // Optional. A category for the job template you are creating - Category *string `locationName:"category" type:"string"` - - // Optional. A description of the job template you are creating. - Description *string `locationName:"description" type:"string"` - - // The name of the job template you are creating. - Name *string `locationName:"name" type:"string"` - - // Optional. The queue that jobs created from this template are assigned to. - // If you don't specify this, jobs will go to the default queue. - Queue *string `locationName:"queue" type:"string"` - - // JobTemplateSettings contains all the transcode settings saved in the template - // that will be applied to jobs created from it. - Settings *JobTemplateSettings `locationName:"settings" type:"structure"` -} - -// String returns the string representation -func (s CreateJobTemplateInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateJobTemplateInput) GoString() string { - return s.String() -} - -// SetCategory sets the Category field's value. -func (s *CreateJobTemplateInput) SetCategory(v string) *CreateJobTemplateInput { - s.Category = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateJobTemplateInput) SetDescription(v string) *CreateJobTemplateInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateJobTemplateInput) SetName(v string) *CreateJobTemplateInput { - s.Name = &v - return s -} - -// SetQueue sets the Queue field's value. -func (s *CreateJobTemplateInput) SetQueue(v string) *CreateJobTemplateInput { - s.Queue = &v - return s -} - -// SetSettings sets the Settings field's value. -func (s *CreateJobTemplateInput) SetSettings(v *JobTemplateSettings) *CreateJobTemplateInput { - s.Settings = v - return s -} - -// Successful create job template requests will return the template JSON. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJobTemplateResponse -type CreateJobTemplateOutput struct { - _ struct{} `type:"structure"` - - // A job template is a pre-made set of encoding instructions that you can use - // to quickly create a job. - JobTemplate *JobTemplate `locationName:"jobTemplate" type:"structure"` -} - -// String returns the string representation -func (s CreateJobTemplateOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateJobTemplateOutput) GoString() string { - return s.String() -} - -// SetJobTemplate sets the JobTemplate field's value. -func (s *CreateJobTemplateOutput) SetJobTemplate(v *JobTemplate) *CreateJobTemplateOutput { - s.JobTemplate = v - return s -} - -// Send your create preset request with the name of the preset and the JSON -// for the output settings specified by the preset. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreatePresetRequest -type CreatePresetInput struct { - _ struct{} `type:"structure"` - - // Optional. A category for the preset you are creating. - Category *string `locationName:"category" type:"string"` - - // Optional. A description of the preset you are creating. - Description *string `locationName:"description" type:"string"` - - // The name of the preset you are creating. - Name *string `locationName:"name" type:"string"` - - // Settings for preset - Settings *PresetSettings `locationName:"settings" type:"structure"` -} - -// String returns the string representation -func (s CreatePresetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreatePresetInput) GoString() string { - return s.String() -} - -// SetCategory sets the Category field's value. -func (s *CreatePresetInput) SetCategory(v string) *CreatePresetInput { - s.Category = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreatePresetInput) SetDescription(v string) *CreatePresetInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreatePresetInput) SetName(v string) *CreatePresetInput { - s.Name = &v - return s -} - -// SetSettings sets the Settings field's value. -func (s *CreatePresetInput) SetSettings(v *PresetSettings) *CreatePresetInput { - s.Settings = v - return s -} - -// Successful create preset requests will return the preset JSON. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreatePresetResponse -type CreatePresetOutput struct { - _ struct{} `type:"structure"` - - // A preset is a collection of preconfigured media conversion settings that - // you want MediaConvert to apply to the output during the conversion process. - Preset *Preset `locationName:"preset" type:"structure"` -} - -// String returns the string representation -func (s CreatePresetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreatePresetOutput) GoString() string { - return s.String() -} - -// SetPreset sets the Preset field's value. -func (s *CreatePresetOutput) SetPreset(v *Preset) *CreatePresetOutput { - s.Preset = v - return s -} - -// Send your create queue request with the name of the queue. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateQueueRequest -type CreateQueueInput struct { - _ struct{} `type:"structure"` - - // Optional. A description of the queue you are creating. - Description *string `locationName:"description" type:"string"` - - // The name of the queue you are creating. - Name *string `locationName:"name" type:"string"` -} - -// String returns the string representation -func (s CreateQueueInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateQueueInput) GoString() string { - return s.String() -} - -// SetDescription sets the Description field's value. -func (s *CreateQueueInput) SetDescription(v string) *CreateQueueInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateQueueInput) SetName(v string) *CreateQueueInput { - s.Name = &v - return s -} - -// Successful create queue requests will return the name of the queue you just -// created and information about it. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateQueueResponse -type CreateQueueOutput struct { - _ struct{} `type:"structure"` - - // MediaConvert jobs are submitted to a queue. Unless specified otherwise jobs - // are submitted to a built-in default queue. User can create additional queues - // to separate the jobs of different categories or priority. - Queue *Queue `locationName:"queue" type:"structure"` -} - -// String returns the string representation -func (s CreateQueueOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateQueueOutput) GoString() string { - return s.String() -} - -// SetQueue sets the Queue field's value. -func (s *CreateQueueOutput) SetQueue(v *Queue) *CreateQueueOutput { - s.Queue = v - return s -} - -// Specifies DRM settings for DASH outputs. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DashIsoEncryptionSettings -type DashIsoEncryptionSettings struct { - _ struct{} `type:"structure"` - - // Settings for use with a SPEKE key provider - SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"` -} - -// String returns the string representation -func (s DashIsoEncryptionSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DashIsoEncryptionSettings) GoString() string { - return s.String() -} - -// SetSpekeKeyProvider sets the SpekeKeyProvider field's value. -func (s *DashIsoEncryptionSettings) SetSpekeKeyProvider(v *SpekeKeyProvider) *DashIsoEncryptionSettings { - s.SpekeKeyProvider = v - return s -} - -// Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to -// DASH_ISO_GROUP_SETTINGS. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DashIsoGroupSettings -type DashIsoGroupSettings struct { - _ struct{} `type:"structure"` - - // A partial URI prefix that will be put in the manifest (.mpd) file at the - // top level BaseURL element. Can be used if streams are delivered from a different - // URL than the manifest file. - BaseUrl *string `locationName:"baseUrl" type:"string"` - - // Use Destination (Destination) to specify the S3 output location and the output - // filename base. Destination accepts format identifiers. If you do not specify - // the base filename in the URI, the service will use the filename of the input - // file. If your job has multiple inputs, the service uses the filename of the - // first input file. - Destination *string `locationName:"destination" type:"string"` - - // DRM settings. - Encryption *DashIsoEncryptionSettings `locationName:"encryption" type:"structure"` - - // Length of fragments to generate (in seconds). Fragment length must be compatible - // with GOP size and Framerate. Note that fragments will end on the next keyframe - // after this number of seconds, so actual fragment length may be longer. When - // Emit Single File is checked, the fragmentation is internal to a single output - // file and it does not cause the creation of many output files as in other - // output types. - FragmentLength *int64 `locationName:"fragmentLength" type:"integer"` - - // Supports HbbTV specification as indicated - HbbtvCompliance *string `locationName:"hbbtvCompliance" type:"string" enum:"DashIsoHbbtvCompliance"` - - // Minimum time of initially buffered media that is needed to ensure smooth - // playout. - MinBufferTime *int64 `locationName:"minBufferTime" type:"integer"` - - // When set to SINGLE_FILE, a single output file is generated, which is internally - // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, - // separate segment files will be created. - SegmentControl *string `locationName:"segmentControl" type:"string" enum:"DashIsoSegmentControl"` - - // Length of mpd segments to create (in seconds). Note that segments will end - // on the next keyframe after this number of seconds, so actual segment length - // may be longer. When Emit Single File is checked, the segmentation is internal - // to a single output file and it does not cause the creation of many output - // files as in other output types. - SegmentLength *int64 `locationName:"segmentLength" type:"integer"` -} - -// String returns the string representation -func (s DashIsoGroupSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DashIsoGroupSettings) GoString() string { - return s.String() -} - -// SetBaseUrl sets the BaseUrl field's value. -func (s *DashIsoGroupSettings) SetBaseUrl(v string) *DashIsoGroupSettings { - s.BaseUrl = &v - return s -} - -// SetDestination sets the Destination field's value. -func (s *DashIsoGroupSettings) SetDestination(v string) *DashIsoGroupSettings { - s.Destination = &v - return s -} - -// SetEncryption sets the Encryption field's value. -func (s *DashIsoGroupSettings) SetEncryption(v *DashIsoEncryptionSettings) *DashIsoGroupSettings { - s.Encryption = v - return s -} - -// SetFragmentLength sets the FragmentLength field's value. -func (s *DashIsoGroupSettings) SetFragmentLength(v int64) *DashIsoGroupSettings { - s.FragmentLength = &v - return s -} - -// SetHbbtvCompliance sets the HbbtvCompliance field's value. -func (s *DashIsoGroupSettings) SetHbbtvCompliance(v string) *DashIsoGroupSettings { - s.HbbtvCompliance = &v - return s -} - -// SetMinBufferTime sets the MinBufferTime field's value. -func (s *DashIsoGroupSettings) SetMinBufferTime(v int64) *DashIsoGroupSettings { - s.MinBufferTime = &v - return s -} - -// SetSegmentControl sets the SegmentControl field's value. -func (s *DashIsoGroupSettings) SetSegmentControl(v string) *DashIsoGroupSettings { - s.SegmentControl = &v - return s -} - -// SetSegmentLength sets the SegmentLength field's value. -func (s *DashIsoGroupSettings) SetSegmentLength(v int64) *DashIsoGroupSettings { - s.SegmentLength = &v - return s -} - -// Settings for deinterlacer -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Deinterlacer -type Deinterlacer struct { - _ struct{} `type:"structure"` - - // Only applies when you set Deinterlacer (DeinterlaceMode) to Deinterlace (DEINTERLACE) - // or Adaptive (ADAPTIVE). Motion adaptive interpolate (INTERPOLATE) produces - // sharper pictures, while blend (BLEND) produces smoother motion. Use (INTERPOLATE_TICKER) - // OR (BLEND_TICKER) if your source file includes a ticker, such as a scrolling - // headline at the bottom of the frame. - Algorithm *string `locationName:"algorithm" type:"string" enum:"DeinterlaceAlgorithm"` - - // - When set to NORMAL (default), the deinterlacer does not convert frames - // that are tagged in metadata as progressive. It will only convert those that - // are tagged as some other type. - When set to FORCE_ALL_FRAMES, the deinterlacer - // converts every frame to progressive - even those that are already tagged - // as progressive. Turn Force mode on only if there is a good chance that the - // metadata has tagged frames as progressive when they are not progressive. - // Do not turn on otherwise; processing frames that are already progressive - // into progressive will probably result in lower quality video. - Control *string `locationName:"control" type:"string" enum:"DeinterlacerControl"` - - // Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing. - // Default is Deinterlace. - Deinterlace converts interlaced to progressive. - // - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. - // - Adaptive auto-detects and converts to progressive. - Mode *string `locationName:"mode" type:"string" enum:"DeinterlacerMode"` -} - -// String returns the string representation -func (s Deinterlacer) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Deinterlacer) GoString() string { - return s.String() -} - -// SetAlgorithm sets the Algorithm field's value. -func (s *Deinterlacer) SetAlgorithm(v string) *Deinterlacer { - s.Algorithm = &v - return s -} - -// SetControl sets the Control field's value. -func (s *Deinterlacer) SetControl(v string) *Deinterlacer { - s.Control = &v - return s -} - -// SetMode sets the Mode field's value. -func (s *Deinterlacer) SetMode(v string) *Deinterlacer { - s.Mode = &v - return s -} - -// Delete a job template by sending a request with the job template name -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteJobTemplateRequest -type DeleteJobTemplateInput struct { - _ struct{} `type:"structure"` - - // The name of the job template to be deleted. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteJobTemplateInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteJobTemplateInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteJobTemplateInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteJobTemplateInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteJobTemplateInput) SetName(v string) *DeleteJobTemplateInput { - s.Name = &v - return s -} - -// Delete job template requests will return an OK message or error message with -// an empty body. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteJobTemplateResponse -type DeleteJobTemplateOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteJobTemplateOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteJobTemplateOutput) GoString() string { - return s.String() -} - -// Delete a preset by sending a request with the preset name -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeletePresetRequest -type DeletePresetInput struct { - _ struct{} `type:"structure"` - - // The name of the preset to be deleted. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeletePresetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeletePresetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeletePresetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeletePresetInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeletePresetInput) SetName(v string) *DeletePresetInput { - s.Name = &v - return s -} - -// Delete preset requests will return an OK message or error message with an -// empty body. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeletePresetResponse -type DeletePresetOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeletePresetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeletePresetOutput) GoString() string { - return s.String() -} - -// Delete a queue by sending a request with the queue name -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteQueueRequest -type DeleteQueueInput struct { - _ struct{} `type:"structure"` - - // The name of the queue to be deleted. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteQueueInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteQueueInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteQueueInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteQueueInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteQueueInput) SetName(v string) *DeleteQueueInput { - s.Name = &v - return s -} - -// Delete queue requests will return an OK message or error message with an -// empty body. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteQueueResponse -type DeleteQueueOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteQueueOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteQueueOutput) GoString() string { - return s.String() -} - -// Send an request with an empty body to the regional API endpoint to get your -// account API endpoint. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DescribeEndpointsRequest -type DescribeEndpointsInput struct { - _ struct{} `type:"structure"` - - // Optional. Max number of endpoints, up to twenty, that will be returned at - // one time. - MaxResults *int64 `locationName:"maxResults" type:"integer"` - - // Use this string, provided with the response to a previous request, to request - // the next batch of endpoints. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s DescribeEndpointsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeEndpointsInput) GoString() string { - return s.String() -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeEndpointsInput) SetMaxResults(v int64) *DescribeEndpointsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeEndpointsInput) SetNextToken(v string) *DescribeEndpointsInput { - s.NextToken = &v - return s -} - -// Successful describe endpoints requests will return your account API endpoint. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DescribeEndpointsResponse -type DescribeEndpointsOutput struct { - _ struct{} `type:"structure"` - - // List of endpoints - Endpoints []*Endpoint `locationName:"endpoints" type:"list"` - - // Use this string to request the next batch of endpoints. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s DescribeEndpointsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeEndpointsOutput) GoString() string { - return s.String() -} - -// SetEndpoints sets the Endpoints field's value. -func (s *DescribeEndpointsOutput) SetEndpoints(v []*Endpoint) *DescribeEndpointsOutput { - s.Endpoints = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeEndpointsOutput) SetNextToken(v string) *DescribeEndpointsOutput { - s.NextToken = &v - return s -} - -// Inserts DVB Network Information Table (NIT) at the specified table repetition -// interval. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DvbNitSettings -type DvbNitSettings struct { - _ struct{} `type:"structure"` - - // The numeric value placed in the Network Information Table (NIT). - NetworkId *int64 `locationName:"networkId" type:"integer"` - - // The network name text placed in the network_name_descriptor inside the Network - // Information Table. Maximum length is 256 characters. - NetworkName *string `locationName:"networkName" type:"string"` - - // The number of milliseconds between instances of this table in the output - // transport stream. - NitInterval *int64 `locationName:"nitInterval" type:"integer"` -} - -// String returns the string representation -func (s DvbNitSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DvbNitSettings) GoString() string { - return s.String() -} - -// SetNetworkId sets the NetworkId field's value. -func (s *DvbNitSettings) SetNetworkId(v int64) *DvbNitSettings { - s.NetworkId = &v - return s -} - -// SetNetworkName sets the NetworkName field's value. -func (s *DvbNitSettings) SetNetworkName(v string) *DvbNitSettings { - s.NetworkName = &v - return s -} - -// SetNitInterval sets the NitInterval field's value. -func (s *DvbNitSettings) SetNitInterval(v int64) *DvbNitSettings { - s.NitInterval = &v - return s -} - -// Inserts DVB Service Description Table (NIT) at the specified table repetition -// interval. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DvbSdtSettings -type DvbSdtSettings struct { - _ struct{} `type:"structure"` - - // Selects method of inserting SDT information into output stream. "Follow input - // SDT" copies SDT information from input stream to output stream. "Follow input - // SDT if present" copies SDT information from input stream to output stream - // if SDT information is present in the input, otherwise it will fall back on - // the user-defined values. Enter "SDT Manually" means user will enter the SDT - // information. "No SDT" means output stream will not contain SDT information. - OutputSdt *string `locationName:"outputSdt" type:"string" enum:"OutputSdt"` - - // The number of milliseconds between instances of this table in the output - // transport stream. - SdtInterval *int64 `locationName:"sdtInterval" type:"integer"` - - // The service name placed in the service_descriptor in the Service Description - // Table. Maximum length is 256 characters. - ServiceName *string `locationName:"serviceName" type:"string"` - - // The service provider name placed in the service_descriptor in the Service - // Description Table. Maximum length is 256 characters. - ServiceProviderName *string `locationName:"serviceProviderName" type:"string"` -} - -// String returns the string representation -func (s DvbSdtSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DvbSdtSettings) GoString() string { - return s.String() -} - -// SetOutputSdt sets the OutputSdt field's value. -func (s *DvbSdtSettings) SetOutputSdt(v string) *DvbSdtSettings { - s.OutputSdt = &v - return s -} - -// SetSdtInterval sets the SdtInterval field's value. -func (s *DvbSdtSettings) SetSdtInterval(v int64) *DvbSdtSettings { - s.SdtInterval = &v - return s -} - -// SetServiceName sets the ServiceName field's value. -func (s *DvbSdtSettings) SetServiceName(v string) *DvbSdtSettings { - s.ServiceName = &v - return s -} - -// SetServiceProviderName sets the ServiceProviderName field's value. -func (s *DvbSdtSettings) SetServiceProviderName(v string) *DvbSdtSettings { - s.ServiceProviderName = &v - return s -} - -// DVB-Sub Destination Settings -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DvbSubDestinationSettings -type DvbSubDestinationSettings struct { - _ struct{} `type:"structure"` - - // If no explicit x_position or y_position is provided, setting alignment to - // centered will place the captions at the bottom center of the output. Similarly, - // setting a left alignment will align captions to the bottom left of the output. - // If x and y positions are given in conjunction with the alignment parameter, - // the font will be justified (either left or centered) relative to those coordinates. - // This option is not valid for source captions that are STL, 608/embedded or - // teletext. These source settings are already pre-defined by the caption stream. - // All burn-in and DVB-Sub font settings must match. - Alignment *string `locationName:"alignment" type:"string" enum:"DvbSubtitleAlignment"` - - // Specifies the color of the rectangle behind the captions.All burn-in and - // DVB-Sub font settings must match. - BackgroundColor *string `locationName:"backgroundColor" type:"string" enum:"DvbSubtitleBackgroundColor"` - - // Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. - // Leaving this parameter blank is equivalent to setting it to 0 (transparent). - // All burn-in and DVB-Sub font settings must match. - BackgroundOpacity *int64 `locationName:"backgroundOpacity" type:"integer"` - - // Specifies the color of the burned-in captions. This option is not valid for - // source captions that are STL, 608/embedded or teletext. These source settings - // are already pre-defined by the caption stream. All burn-in and DVB-Sub font - // settings must match. - FontColor *string `locationName:"fontColor" type:"string" enum:"DvbSubtitleFontColor"` - - // Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent.All - // burn-in and DVB-Sub font settings must match. - FontOpacity *int64 `locationName:"fontOpacity" type:"integer"` - - // Font resolution in DPI (dots per inch); default is 96 dpi.All burn-in and - // DVB-Sub font settings must match. - FontResolution *int64 `locationName:"fontResolution" type:"integer"` - - // A positive integer indicates the exact font size in points. Set to 0 for - // automatic font size selection. All burn-in and DVB-Sub font settings must - // match. - FontSize *int64 `locationName:"fontSize" type:"integer"` - - // Specifies font outline color. This option is not valid for source captions - // that are either 608/embedded or teletext. These source settings are already - // pre-defined by the caption stream. All burn-in and DVB-Sub font settings - // must match. - OutlineColor *string `locationName:"outlineColor" type:"string" enum:"DvbSubtitleOutlineColor"` - - // Specifies font outline size in pixels. This option is not valid for source - // captions that are either 608/embedded or teletext. These source settings - // are already pre-defined by the caption stream. All burn-in and DVB-Sub font - // settings must match. - OutlineSize *int64 `locationName:"outlineSize" type:"integer"` - - // Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub - // font settings must match. - ShadowColor *string `locationName:"shadowColor" type:"string" enum:"DvbSubtitleShadowColor"` - - // Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving - // this parameter blank is equivalent to setting it to 0 (transparent). All - // burn-in and DVB-Sub font settings must match. - ShadowOpacity *int64 `locationName:"shadowOpacity" type:"integer"` - - // Specifies the horizontal offset of the shadow relative to the captions in - // pixels. A value of -2 would result in a shadow offset 2 pixels to the left. - // All burn-in and DVB-Sub font settings must match. - ShadowXOffset *int64 `locationName:"shadowXOffset" type:"integer"` - - // Specifies the vertical offset of the shadow relative to the captions in pixels. - // A value of -2 would result in a shadow offset 2 pixels above the text. All - // burn-in and DVB-Sub font settings must match. - ShadowYOffset *int64 `locationName:"shadowYOffset" type:"integer"` - - // Controls whether a fixed grid size or proportional font spacing will be used - // to generate the output subtitles bitmap. Only applicable for Teletext inputs - // and DVB-Sub/Burn-in outputs. - TeletextSpacing *string `locationName:"teletextSpacing" type:"string" enum:"DvbSubtitleTeletextSpacing"` - - // Specifies the horizontal position of the caption relative to the left side - // of the output in pixels. A value of 10 would result in the captions starting - // 10 pixels from the left of the output. If no explicit x_position is provided, - // the horizontal caption position will be determined by the alignment parameter. - // This option is not valid for source captions that are STL, 608/embedded or - // teletext. These source settings are already pre-defined by the caption stream. - // All burn-in and DVB-Sub font settings must match. - XPosition *int64 `locationName:"xPosition" type:"integer"` - - // Specifies the vertical position of the caption relative to the top of the - // output in pixels. A value of 10 would result in the captions starting 10 - // pixels from the top of the output. If no explicit y_position is provided, - // the caption will be positioned towards the bottom of the output. This option - // is not valid for source captions that are STL, 608/embedded or teletext. - // These source settings are already pre-defined by the caption stream. All - // burn-in and DVB-Sub font settings must match. - YPosition *int64 `locationName:"yPosition" type:"integer"` -} - -// String returns the string representation -func (s DvbSubDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DvbSubDestinationSettings) GoString() string { - return s.String() -} - -// SetAlignment sets the Alignment field's value. -func (s *DvbSubDestinationSettings) SetAlignment(v string) *DvbSubDestinationSettings { - s.Alignment = &v - return s -} - -// SetBackgroundColor sets the BackgroundColor field's value. -func (s *DvbSubDestinationSettings) SetBackgroundColor(v string) *DvbSubDestinationSettings { - s.BackgroundColor = &v - return s -} - -// SetBackgroundOpacity sets the BackgroundOpacity field's value. -func (s *DvbSubDestinationSettings) SetBackgroundOpacity(v int64) *DvbSubDestinationSettings { - s.BackgroundOpacity = &v - return s -} - -// SetFontColor sets the FontColor field's value. -func (s *DvbSubDestinationSettings) SetFontColor(v string) *DvbSubDestinationSettings { - s.FontColor = &v - return s -} - -// SetFontOpacity sets the FontOpacity field's value. -func (s *DvbSubDestinationSettings) SetFontOpacity(v int64) *DvbSubDestinationSettings { - s.FontOpacity = &v - return s -} - -// SetFontResolution sets the FontResolution field's value. -func (s *DvbSubDestinationSettings) SetFontResolution(v int64) *DvbSubDestinationSettings { - s.FontResolution = &v - return s -} - -// SetFontSize sets the FontSize field's value. -func (s *DvbSubDestinationSettings) SetFontSize(v int64) *DvbSubDestinationSettings { - s.FontSize = &v - return s -} - -// SetOutlineColor sets the OutlineColor field's value. -func (s *DvbSubDestinationSettings) SetOutlineColor(v string) *DvbSubDestinationSettings { - s.OutlineColor = &v - return s -} - -// SetOutlineSize sets the OutlineSize field's value. -func (s *DvbSubDestinationSettings) SetOutlineSize(v int64) *DvbSubDestinationSettings { - s.OutlineSize = &v - return s -} - -// SetShadowColor sets the ShadowColor field's value. -func (s *DvbSubDestinationSettings) SetShadowColor(v string) *DvbSubDestinationSettings { - s.ShadowColor = &v - return s -} - -// SetShadowOpacity sets the ShadowOpacity field's value. -func (s *DvbSubDestinationSettings) SetShadowOpacity(v int64) *DvbSubDestinationSettings { - s.ShadowOpacity = &v - return s -} - -// SetShadowXOffset sets the ShadowXOffset field's value. -func (s *DvbSubDestinationSettings) SetShadowXOffset(v int64) *DvbSubDestinationSettings { - s.ShadowXOffset = &v - return s -} - -// SetShadowYOffset sets the ShadowYOffset field's value. -func (s *DvbSubDestinationSettings) SetShadowYOffset(v int64) *DvbSubDestinationSettings { - s.ShadowYOffset = &v - return s -} - -// SetTeletextSpacing sets the TeletextSpacing field's value. -func (s *DvbSubDestinationSettings) SetTeletextSpacing(v string) *DvbSubDestinationSettings { - s.TeletextSpacing = &v - return s -} - -// SetXPosition sets the XPosition field's value. -func (s *DvbSubDestinationSettings) SetXPosition(v int64) *DvbSubDestinationSettings { - s.XPosition = &v - return s -} - -// SetYPosition sets the YPosition field's value. -func (s *DvbSubDestinationSettings) SetYPosition(v int64) *DvbSubDestinationSettings { - s.YPosition = &v - return s -} - -// DVB Sub Source Settings -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DvbSubSourceSettings -type DvbSubSourceSettings struct { - _ struct{} `type:"structure"` - - // When using DVB-Sub with Burn-In or SMPTE-TT, use this PID for the source - // content. Unused for DVB-Sub passthrough. All DVB-Sub content is passed through, - // regardless of selectors. - Pid *int64 `locationName:"pid" type:"integer"` -} - -// String returns the string representation -func (s DvbSubSourceSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DvbSubSourceSettings) GoString() string { - return s.String() -} - -// SetPid sets the Pid field's value. -func (s *DvbSubSourceSettings) SetPid(v int64) *DvbSubSourceSettings { - s.Pid = &v - return s -} - -// Inserts DVB Time and Date Table (TDT) at the specified table repetition interval. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DvbTdtSettings -type DvbTdtSettings struct { - _ struct{} `type:"structure"` - - // The number of milliseconds between instances of this table in the output - // transport stream. - TdtInterval *int64 `locationName:"tdtInterval" type:"integer"` -} - -// String returns the string representation -func (s DvbTdtSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DvbTdtSettings) GoString() string { - return s.String() -} - -// SetTdtInterval sets the TdtInterval field's value. -func (s *DvbTdtSettings) SetTdtInterval(v int64) *DvbTdtSettings { - s.TdtInterval = &v - return s -} - -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to -// the value EAC3. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Eac3Settings -type Eac3Settings struct { - _ struct{} `type:"structure"` - - // If set to ATTENUATE_3_DB, applies a 3 dB attenuation to the surround channels. - // Only used for 3/2 coding mode. - AttenuationControl *string `locationName:"attenuationControl" type:"string" enum:"Eac3AttenuationControl"` - - // Average bitrate in bits/second. Valid bitrates depend on the coding mode. - Bitrate *int64 `locationName:"bitrate" type:"integer"` - - // Specifies the "Bitstream Mode" (bsmod) for the emitted E-AC-3 stream. See - // ATSC A/52-2012 (Annex E) for background on these values. - BitstreamMode *string `locationName:"bitstreamMode" type:"string" enum:"Eac3BitstreamMode"` - - // Dolby Digital Plus coding mode. Determines number of channels. - CodingMode *string `locationName:"codingMode" type:"string" enum:"Eac3CodingMode"` - - // Activates a DC highpass filter for all input channels. - DcFilter *string `locationName:"dcFilter" type:"string" enum:"Eac3DcFilter"` - - // Sets the dialnorm for the output. If blank and input audio is Dolby Digital - // Plus, dialnorm will be passed through. - Dialnorm *int64 `locationName:"dialnorm" type:"integer"` - - // Enables Dynamic Range Compression that restricts the absolute peak level - // for a signal. - DynamicRangeCompressionLine *string `locationName:"dynamicRangeCompressionLine" type:"string" enum:"Eac3DynamicRangeCompressionLine"` - - // Enables Heavy Dynamic Range Compression, ensures that the instantaneous signal - // peaks do not exceed specified levels. - DynamicRangeCompressionRf *string `locationName:"dynamicRangeCompressionRf" type:"string" enum:"Eac3DynamicRangeCompressionRf"` - - // When encoding 3/2 audio, controls whether the LFE channel is enabled - LfeControl *string `locationName:"lfeControl" type:"string" enum:"Eac3LfeControl"` - - // Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only - // valid with 3_2_LFE coding mode. - LfeFilter *string `locationName:"lfeFilter" type:"string" enum:"Eac3LfeFilter"` - - // Left only/Right only center mix level. Only used for 3/2 coding mode.Valid - // values: 3.0, 1.5, 0.0, -1.5 -3.0 -4.5 -6.0 -60 - LoRoCenterMixLevel *float64 `locationName:"loRoCenterMixLevel" type:"double"` - - // Left only/Right only surround mix level. Only used for 3/2 coding mode.Valid - // values: -1.5 -3.0 -4.5 -6.0 -60 - LoRoSurroundMixLevel *float64 `locationName:"loRoSurroundMixLevel" type:"double"` - - // Left total/Right total center mix level. Only used for 3/2 coding mode.Valid - // values: 3.0, 1.5, 0.0, -1.5 -3.0 -4.5 -6.0 -60 - LtRtCenterMixLevel *float64 `locationName:"ltRtCenterMixLevel" type:"double"` - - // Left total/Right total surround mix level. Only used for 3/2 coding mode.Valid - // values: -1.5 -3.0 -4.5 -6.0 -60 - LtRtSurroundMixLevel *float64 `locationName:"ltRtSurroundMixLevel" type:"double"` - - // When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, - // or DolbyE decoder that supplied this audio data. If audio was not supplied - // from one of these streams, then the static metadata settings will be used. - MetadataControl *string `locationName:"metadataControl" type:"string" enum:"Eac3MetadataControl"` - - // When set to WHEN_POSSIBLE, input DD+ audio will be passed through if it is - // present on the input. this detection is dynamic over the life of the transcode. - // Inputs that alternate between DD+ and non-DD+ content will have a consistent - // DD+ output as the system alternates between passthrough and encoding. - PassthroughControl *string `locationName:"passthroughControl" type:"string" enum:"Eac3PassthroughControl"` - - // Controls the amount of phase-shift applied to the surround channels. Only - // used for 3/2 coding mode. - PhaseControl *string `locationName:"phaseControl" type:"string" enum:"Eac3PhaseControl"` - - // Sample rate in hz. Sample rate is always 48000. - SampleRate *int64 `locationName:"sampleRate" type:"integer"` - - // Stereo downmix preference. Only used for 3/2 coding mode. - StereoDownmix *string `locationName:"stereoDownmix" type:"string" enum:"Eac3StereoDownmix"` - - // When encoding 3/2 audio, sets whether an extra center back surround channel - // is matrix encoded into the left and right surround channels. - SurroundExMode *string `locationName:"surroundExMode" type:"string" enum:"Eac3SurroundExMode"` - - // When encoding 2/0 audio, sets whether Dolby Surround is matrix encoded into - // the two channels. - SurroundMode *string `locationName:"surroundMode" type:"string" enum:"Eac3SurroundMode"` -} - -// String returns the string representation -func (s Eac3Settings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Eac3Settings) GoString() string { - return s.String() -} - -// SetAttenuationControl sets the AttenuationControl field's value. -func (s *Eac3Settings) SetAttenuationControl(v string) *Eac3Settings { - s.AttenuationControl = &v - return s -} - -// SetBitrate sets the Bitrate field's value. -func (s *Eac3Settings) SetBitrate(v int64) *Eac3Settings { - s.Bitrate = &v - return s -} - -// SetBitstreamMode sets the BitstreamMode field's value. -func (s *Eac3Settings) SetBitstreamMode(v string) *Eac3Settings { - s.BitstreamMode = &v - return s -} - -// SetCodingMode sets the CodingMode field's value. -func (s *Eac3Settings) SetCodingMode(v string) *Eac3Settings { - s.CodingMode = &v - return s -} - -// SetDcFilter sets the DcFilter field's value. -func (s *Eac3Settings) SetDcFilter(v string) *Eac3Settings { - s.DcFilter = &v - return s -} - -// SetDialnorm sets the Dialnorm field's value. -func (s *Eac3Settings) SetDialnorm(v int64) *Eac3Settings { - s.Dialnorm = &v - return s -} - -// SetDynamicRangeCompressionLine sets the DynamicRangeCompressionLine field's value. -func (s *Eac3Settings) SetDynamicRangeCompressionLine(v string) *Eac3Settings { - s.DynamicRangeCompressionLine = &v - return s -} - -// SetDynamicRangeCompressionRf sets the DynamicRangeCompressionRf field's value. -func (s *Eac3Settings) SetDynamicRangeCompressionRf(v string) *Eac3Settings { - s.DynamicRangeCompressionRf = &v - return s -} - -// SetLfeControl sets the LfeControl field's value. -func (s *Eac3Settings) SetLfeControl(v string) *Eac3Settings { - s.LfeControl = &v - return s -} - -// SetLfeFilter sets the LfeFilter field's value. -func (s *Eac3Settings) SetLfeFilter(v string) *Eac3Settings { - s.LfeFilter = &v - return s -} - -// SetLoRoCenterMixLevel sets the LoRoCenterMixLevel field's value. -func (s *Eac3Settings) SetLoRoCenterMixLevel(v float64) *Eac3Settings { - s.LoRoCenterMixLevel = &v - return s -} - -// SetLoRoSurroundMixLevel sets the LoRoSurroundMixLevel field's value. -func (s *Eac3Settings) SetLoRoSurroundMixLevel(v float64) *Eac3Settings { - s.LoRoSurroundMixLevel = &v - return s -} - -// SetLtRtCenterMixLevel sets the LtRtCenterMixLevel field's value. -func (s *Eac3Settings) SetLtRtCenterMixLevel(v float64) *Eac3Settings { - s.LtRtCenterMixLevel = &v - return s -} - -// SetLtRtSurroundMixLevel sets the LtRtSurroundMixLevel field's value. -func (s *Eac3Settings) SetLtRtSurroundMixLevel(v float64) *Eac3Settings { - s.LtRtSurroundMixLevel = &v - return s -} - -// SetMetadataControl sets the MetadataControl field's value. -func (s *Eac3Settings) SetMetadataControl(v string) *Eac3Settings { - s.MetadataControl = &v - return s -} - -// SetPassthroughControl sets the PassthroughControl field's value. -func (s *Eac3Settings) SetPassthroughControl(v string) *Eac3Settings { - s.PassthroughControl = &v - return s -} - -// SetPhaseControl sets the PhaseControl field's value. -func (s *Eac3Settings) SetPhaseControl(v string) *Eac3Settings { - s.PhaseControl = &v - return s -} - -// SetSampleRate sets the SampleRate field's value. -func (s *Eac3Settings) SetSampleRate(v int64) *Eac3Settings { - s.SampleRate = &v - return s -} - -// SetStereoDownmix sets the StereoDownmix field's value. -func (s *Eac3Settings) SetStereoDownmix(v string) *Eac3Settings { - s.StereoDownmix = &v - return s -} - -// SetSurroundExMode sets the SurroundExMode field's value. -func (s *Eac3Settings) SetSurroundExMode(v string) *Eac3Settings { - s.SurroundExMode = &v - return s -} - -// SetSurroundMode sets the SurroundMode field's value. -func (s *Eac3Settings) SetSurroundMode(v string) *Eac3Settings { - s.SurroundMode = &v - return s -} - -// Settings for embedded captions Source -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/EmbeddedSourceSettings -type EmbeddedSourceSettings struct { - _ struct{} `type:"structure"` - - // When set to UPCONVERT, 608 data is both passed through via the "608 compatibility - // bytes" fields of the 708 wrapper as well as translated into 708. 708 data - // present in the source content will be discarded. - Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"EmbeddedConvert608To708"` - - // Specifies the 608/708 channel number within the video track from which to - // extract captions. Unused for passthrough. - Source608ChannelNumber *int64 `locationName:"source608ChannelNumber" type:"integer"` - - // Specifies the video track index used for extracting captions. The system - // only supports one input video track, so this should always be set to '1'. - Source608TrackNumber *int64 `locationName:"source608TrackNumber" type:"integer"` -} - -// String returns the string representation -func (s EmbeddedSourceSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s EmbeddedSourceSettings) GoString() string { - return s.String() -} - -// SetConvert608To708 sets the Convert608To708 field's value. -func (s *EmbeddedSourceSettings) SetConvert608To708(v string) *EmbeddedSourceSettings { - s.Convert608To708 = &v - return s -} - -// SetSource608ChannelNumber sets the Source608ChannelNumber field's value. -func (s *EmbeddedSourceSettings) SetSource608ChannelNumber(v int64) *EmbeddedSourceSettings { - s.Source608ChannelNumber = &v - return s -} - -// SetSource608TrackNumber sets the Source608TrackNumber field's value. -func (s *EmbeddedSourceSettings) SetSource608TrackNumber(v int64) *EmbeddedSourceSettings { - s.Source608TrackNumber = &v - return s -} - -// Describes account specific API endpoint -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Endpoint -type Endpoint struct { - _ struct{} `type:"structure"` - - // URL of endpoint - Url *string `locationName:"url" type:"string"` -} - -// String returns the string representation -func (s Endpoint) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Endpoint) GoString() string { - return s.String() -} - -// SetUrl sets the Url field's value. -func (s *Endpoint) SetUrl(v string) *Endpoint { - s.Url = &v - return s -} - -// Settings for F4v container -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/F4vSettings -type F4vSettings struct { - _ struct{} `type:"structure"` - - // If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning - // of the archive as required for progressive downloading. Otherwise it is placed - // normally at the end. - MoovPlacement *string `locationName:"moovPlacement" type:"string" enum:"F4vMoovPlacement"` -} - -// String returns the string representation -func (s F4vSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s F4vSettings) GoString() string { - return s.String() -} - -// SetMoovPlacement sets the MoovPlacement field's value. -func (s *F4vSettings) SetMoovPlacement(v string) *F4vSettings { - s.MoovPlacement = &v - return s -} - -// Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to -// FILE_GROUP_SETTINGS. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/FileGroupSettings -type FileGroupSettings struct { - _ struct{} `type:"structure"` - - // Use Destination (Destination) to specify the S3 output location and the output - // filename base. Destination accepts format identifiers. If you do not specify - // the base filename in the URI, the service will use the filename of the input - // file. If your job has multiple inputs, the service uses the filename of the - // first input file. - Destination *string `locationName:"destination" type:"string"` -} - -// String returns the string representation -func (s FileGroupSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FileGroupSettings) GoString() string { - return s.String() -} - -// SetDestination sets the Destination field's value. -func (s *FileGroupSettings) SetDestination(v string) *FileGroupSettings { - s.Destination = &v - return s -} - -// Settings for File-based Captions in Source -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/FileSourceSettings -type FileSourceSettings struct { - _ struct{} `type:"structure"` - - // If set to UPCONVERT, 608 caption data is both passed through via the "608 - // compatibility bytes" fields of the 708 wrapper as well as translated into - // 708. 708 data present in the source content will be discarded. - Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"FileSourceConvert608To708"` - - // External caption file used for loading captions. Accepted file extensions - // are 'scc', 'ttml', 'dfxp', 'stl', 'srt', and 'smi'. Auto-populated when Infer - // External Filename is checked. - SourceFile *string `locationName:"sourceFile" type:"string"` - - // Specifies a time delta in seconds to offset the captions from the source - // file. - TimeDelta *int64 `locationName:"timeDelta" type:"integer"` -} - -// String returns the string representation -func (s FileSourceSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FileSourceSettings) GoString() string { - return s.String() -} - -// SetConvert608To708 sets the Convert608To708 field's value. -func (s *FileSourceSettings) SetConvert608To708(v string) *FileSourceSettings { - s.Convert608To708 = &v - return s -} - -// SetSourceFile sets the SourceFile field's value. -func (s *FileSourceSettings) SetSourceFile(v string) *FileSourceSettings { - s.SourceFile = &v - return s -} - -// SetTimeDelta sets the TimeDelta field's value. -func (s *FileSourceSettings) SetTimeDelta(v int64) *FileSourceSettings { - s.TimeDelta = &v - return s -} - -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to -// the value FRAME_CAPTURE. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/FrameCaptureSettings -type FrameCaptureSettings struct { - _ struct{} `type:"structure"` - - // Frame capture will encode the first frame of the output stream, then one - // frame every framerateDenominator/framerateNumerator seconds. For example, - // settings of framerateNumerator = 1 and framerateDenominator = 3 (a rate of - // 1/3 frame per second) will capture the first frame, then 1 frame every 3s. - // Files will be named as filename.n.jpg where n is the 0-based sequence number - // of each Capture. - FramerateDenominator *int64 `locationName:"framerateDenominator" type:"integer"` - - // Frame capture will encode the first frame of the output stream, then one - // frame every framerateDenominator/framerateNumerator seconds. For example, - // settings of framerateNumerator = 1 and framerateDenominator = 3 (a rate of - // 1/3 frame per second) will capture the first frame, then 1 frame every 3s. - // Files will be named as filename.NNNNNNN.jpg where N is the 0-based frame - // sequence number zero padded to 7 decimal places. - FramerateNumerator *int64 `locationName:"framerateNumerator" type:"integer"` - - // Maximum number of captures (encoded jpg output files). - MaxCaptures *int64 `locationName:"maxCaptures" type:"integer"` - - // JPEG Quality - a higher value equals higher quality. - Quality *int64 `locationName:"quality" type:"integer"` -} - -// String returns the string representation -func (s FrameCaptureSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FrameCaptureSettings) GoString() string { - return s.String() -} - -// SetFramerateDenominator sets the FramerateDenominator field's value. -func (s *FrameCaptureSettings) SetFramerateDenominator(v int64) *FrameCaptureSettings { - s.FramerateDenominator = &v - return s -} - -// SetFramerateNumerator sets the FramerateNumerator field's value. -func (s *FrameCaptureSettings) SetFramerateNumerator(v int64) *FrameCaptureSettings { - s.FramerateNumerator = &v - return s -} - -// SetMaxCaptures sets the MaxCaptures field's value. -func (s *FrameCaptureSettings) SetMaxCaptures(v int64) *FrameCaptureSettings { - s.MaxCaptures = &v - return s -} - -// SetQuality sets the Quality field's value. -func (s *FrameCaptureSettings) SetQuality(v int64) *FrameCaptureSettings { - s.Quality = &v - return s -} - -// Query a job by sending a request with the job ID. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJobRequest -type GetJobInput struct { - _ struct{} `type:"structure"` - - // the job ID of the job. - // - // Id is a required field - Id *string `location:"uri" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetJobInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetJobInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetJobInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetId sets the Id field's value. -func (s *GetJobInput) SetId(v string) *GetJobInput { - s.Id = &v - return s -} - -// Successful get job requests will return an OK message and the job JSON. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJobResponse -type GetJobOutput struct { - _ struct{} `type:"structure"` - - // Each job converts an input file into an output file or files. For more information, - // see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html - Job *Job `locationName:"job" type:"structure"` -} - -// String returns the string representation -func (s GetJobOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetJobOutput) GoString() string { - return s.String() -} - -// SetJob sets the Job field's value. -func (s *GetJobOutput) SetJob(v *Job) *GetJobOutput { - s.Job = v - return s -} - -// Query a job template by sending a request with the job template name. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJobTemplateRequest -type GetJobTemplateInput struct { - _ struct{} `type:"structure"` - - // The name of the job template. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetJobTemplateInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetJobTemplateInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetJobTemplateInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetJobTemplateInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *GetJobTemplateInput) SetName(v string) *GetJobTemplateInput { - s.Name = &v - return s -} - -// Successful get job template requests will return an OK message and the job -// template JSON. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJobTemplateResponse -type GetJobTemplateOutput struct { - _ struct{} `type:"structure"` - - // A job template is a pre-made set of encoding instructions that you can use - // to quickly create a job. - JobTemplate *JobTemplate `locationName:"jobTemplate" type:"structure"` -} - -// String returns the string representation -func (s GetJobTemplateOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetJobTemplateOutput) GoString() string { - return s.String() -} - -// SetJobTemplate sets the JobTemplate field's value. -func (s *GetJobTemplateOutput) SetJobTemplate(v *JobTemplate) *GetJobTemplateOutput { - s.JobTemplate = v - return s -} - -// Query a preset by sending a request with the preset name. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetPresetRequest -type GetPresetInput struct { - _ struct{} `type:"structure"` - - // The name of the preset. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetPresetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPresetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPresetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPresetInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *GetPresetInput) SetName(v string) *GetPresetInput { - s.Name = &v - return s -} - -// Successful get preset requests will return an OK message and the preset JSON. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetPresetResponse -type GetPresetOutput struct { - _ struct{} `type:"structure"` - - // A preset is a collection of preconfigured media conversion settings that - // you want MediaConvert to apply to the output during the conversion process. - Preset *Preset `locationName:"preset" type:"structure"` -} - -// String returns the string representation -func (s GetPresetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPresetOutput) GoString() string { - return s.String() -} - -// SetPreset sets the Preset field's value. -func (s *GetPresetOutput) SetPreset(v *Preset) *GetPresetOutput { - s.Preset = v - return s -} - -// Query a queue by sending a request with the queue name. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetQueueRequest -type GetQueueInput struct { - _ struct{} `type:"structure"` - - // The name of the queue. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetQueueInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetQueueInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetQueueInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetQueueInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *GetQueueInput) SetName(v string) *GetQueueInput { - s.Name = &v - return s -} - -// Successful get queue requests will return an OK message and the queue JSON. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetQueueResponse -type GetQueueOutput struct { - _ struct{} `type:"structure"` - - // MediaConvert jobs are submitted to a queue. Unless specified otherwise jobs - // are submitted to a built-in default queue. User can create additional queues - // to separate the jobs of different categories or priority. - Queue *Queue `locationName:"queue" type:"structure"` -} - -// String returns the string representation -func (s GetQueueOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetQueueOutput) GoString() string { - return s.String() -} - -// SetQueue sets the Queue field's value. -func (s *GetQueueOutput) SetQueue(v *Queue) *GetQueueOutput { - s.Queue = v - return s -} - -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to -// the value H_264. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/H264Settings -type H264Settings struct { - _ struct{} `type:"structure"` - - // Adaptive quantization. Allows intra-frame quantizers to vary to improve visual - // quality. - AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"H264AdaptiveQuantization"` - - // Average bitrate in bits/second. Required for VBR, CBR, and ABR. Five megabits - // can be entered as 5000000 or 5m. Five hundred kilobits can be entered as - // 500000 or 0.5m. For MS Smooth outputs, bitrates must be unique when rounded - // down to the nearest multiple of 1000. - Bitrate *int64 `locationName:"bitrate" type:"integer"` - - // H.264 Level. - CodecLevel *string `locationName:"codecLevel" type:"string" enum:"H264CodecLevel"` - - // H.264 Profile. High 4:2:2 and 10-bit profiles are only available with the - // AVC-I License. - CodecProfile *string `locationName:"codecProfile" type:"string" enum:"H264CodecProfile"` - - // Entropy encoding mode. Use CABAC (must be in Main or High profile) or CAVLC. - EntropyEncoding *string `locationName:"entropyEncoding" type:"string" enum:"H264EntropyEncoding"` - - // Choosing FORCE_FIELD disables PAFF encoding for interlaced outputs. - FieldEncoding *string `locationName:"fieldEncoding" type:"string" enum:"H264FieldEncoding"` - - // Adjust quantization within each frame to reduce flicker or 'pop' on I-frames. - FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"H264FlickerAdaptiveQuantization"` - - // Using the API, set FramerateControl to INITIALIZE_FROM_SOURCE if you want - // the service to use the framerate from the input. Using the console, do this - // by choosing INITIALIZE_FROM_SOURCE for Framerate. - FramerateControl *string `locationName:"framerateControl" type:"string" enum:"H264FramerateControl"` - - // When set to INTERPOLATE, produces smoother motion during framerate conversion. - FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"H264FramerateConversionAlgorithm"` - - // When you use the API for transcode jobs that use framerate conversion, specify - // the framerate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use - // FramerateDenominator to specify the denominator of this fraction. In this - // example, use 1001 for the value of FramerateDenominator. When you use the - // console for transcode jobs that use framerate conversion, provide the value - // as a decimal number for Framerate. In this example, specify 23.976. - FramerateDenominator *int64 `locationName:"framerateDenominator" type:"integer"` - - // Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 - // fps. - FramerateNumerator *int64 `locationName:"framerateNumerator" type:"integer"` - - // If enable, use reference B frames for GOP structures that have B frames > - // 1. - GopBReference *string `locationName:"gopBReference" type:"string" enum:"H264GopBReference"` - - // Frequency of closed GOPs. In streaming applications, it is recommended that - // this be set to 1 so a decoder joining mid-stream will receive an IDR frame - // as quickly as possible. Setting this value to 0 will break output segmenting. - GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` - - // GOP Length (keyframe interval) in frames or seconds. Must be greater than - // zero. - GopSize *float64 `locationName:"gopSize" type:"double"` - - // Indicates if the GOP Size in H264 is specified in frames or seconds. If seconds - // the system will convert the GOP Size into a frame count at run time. - GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"H264GopSizeUnits"` - - // Percentage of the buffer that should initially be filled (HRD buffer model). - HrdBufferInitialFillPercentage *int64 `locationName:"hrdBufferInitialFillPercentage" type:"integer"` - - // Size of buffer (HRD buffer model). Five megabits can be entered as 5000000 - // or 5m. Five hundred kilobits can be entered as 500000 or 0.5m. - HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` - - // Use Interlace mode (InterlaceMode) to choose the scan line type for the output. - // * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce - // interlaced output with the entire output having the same field polarity (top - // or bottom first). * Follow, Default Top (FOLLOw_TOP_FIELD) and Follow, Default - // Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, - // behavior depends on the input scan type. - If the source is interlaced, the - // output will be interlaced with the same polarity as the source (it will follow - // the source). The output could therefore be a mix of "top field first" and - // "bottom field first". - If the source is progressive, the output will be - // interlaced with "top field first" or "bottom field first" polarity, depending - // on which of the Follow options you chose. - InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"H264InterlaceMode"` - - // Maximum bitrate in bits/second (for VBR mode only). Five megabits can be - // entered as 5000000 or 5m. Five hundred kilobits can be entered as 500000 - // or 0.5m. - MaxBitrate *int64 `locationName:"maxBitrate" type:"integer"` - - // Enforces separation between repeated (cadence) I-frames and I-frames inserted - // by Scene Change Detection. If a scene change I-frame is within I-interval - // frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene - // change I-frame. GOP stretch requires enabling lookahead as well as setting - // I-interval. The normal cadence resumes for the next GOP. This setting is - // only used when Scene Change Detect is enabled. Note: Maximum GOP stretch - // = GOP size + Min-I-interval - 1 - MinIInterval *int64 `locationName:"minIInterval" type:"integer"` - - // Number of B-frames between reference frames. - NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"` - - // Number of reference frames to use. The encoder may use more than requested - // if using B-frames and/or interlaced encoding. - NumberReferenceFrames *int64 `locationName:"numberReferenceFrames" type:"integer"` - - // Using the API, enable ParFollowSource if you want the service to use the - // pixel aspect ratio from the input. Using the console, do this by choosing - // Follow source for Pixel aspect ratio. - ParControl *string `locationName:"parControl" type:"string" enum:"H264ParControl"` - - // Pixel Aspect Ratio denominator. - ParDenominator *int64 `locationName:"parDenominator" type:"integer"` - - // Pixel Aspect Ratio numerator. - ParNumerator *int64 `locationName:"parNumerator" type:"integer"` - - // Use Quality tuning level (H264QualityTuningLevel) to specifiy whether to - // use fast single-pass, high-quality singlepass, or high-quality multipass - // video encoding. - QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"H264QualityTuningLevel"` - - // Rate control mode. CQ uses constant quantizer (qp), ABR (average bitrate) - // does not write HRD parameters. - RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"H264RateControlMode"` - - // Places a PPS header on each encoded picture, even if repeated. - RepeatPps *string `locationName:"repeatPps" type:"string" enum:"H264RepeatPps"` - - // Scene change detection (inserts I-frames on scene changes). - SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"H264SceneChangeDetect"` - - // Number of slices per picture. Must be less than or equal to the number of - // macroblock rows for progressive pictures, and less than or equal to half - // the number of macroblock rows for interlaced pictures. - Slices *int64 `locationName:"slices" type:"integer"` - - // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled - // as 25fps, and audio is sped up correspondingly. - SlowPal *string `locationName:"slowPal" type:"string" enum:"H264SlowPal"` - - // Softness. Selects quantizer matrix, larger values reduce high-frequency content - // in the encoded image. - Softness *int64 `locationName:"softness" type:"integer"` - - // Adjust quantization within each frame based on spatial variation of content - // complexity. - SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"H264SpatialAdaptiveQuantization"` - - // Produces a bitstream compliant with SMPTE RP-2027. - Syntax *string `locationName:"syntax" type:"string" enum:"H264Syntax"` - - // This field applies only if the Streams > Advanced > Framerate (framerate) - // field is set to 29.970. This field works with the Streams > Advanced > Preprocessors - // > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced - // Mode field (interlace_mode) to identify the scan type for the output: Progressive, - // Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output - // from 23.976 input. - Soft: produces 23.976; the player converts this output - // to 29.97i. - Telecine *string `locationName:"telecine" type:"string" enum:"H264Telecine"` - - // Adjust quantization within each frame based on temporal variation of content - // complexity. - TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"H264TemporalAdaptiveQuantization"` - - // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. - UnregisteredSeiTimecode *string `locationName:"unregisteredSeiTimecode" type:"string" enum:"H264UnregisteredSeiTimecode"` -} - -// String returns the string representation -func (s H264Settings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s H264Settings) GoString() string { - return s.String() -} - -// SetAdaptiveQuantization sets the AdaptiveQuantization field's value. -func (s *H264Settings) SetAdaptiveQuantization(v string) *H264Settings { - s.AdaptiveQuantization = &v - return s -} - -// SetBitrate sets the Bitrate field's value. -func (s *H264Settings) SetBitrate(v int64) *H264Settings { - s.Bitrate = &v - return s -} - -// SetCodecLevel sets the CodecLevel field's value. -func (s *H264Settings) SetCodecLevel(v string) *H264Settings { - s.CodecLevel = &v - return s -} - -// SetCodecProfile sets the CodecProfile field's value. -func (s *H264Settings) SetCodecProfile(v string) *H264Settings { - s.CodecProfile = &v - return s -} - -// SetEntropyEncoding sets the EntropyEncoding field's value. -func (s *H264Settings) SetEntropyEncoding(v string) *H264Settings { - s.EntropyEncoding = &v - return s -} - -// SetFieldEncoding sets the FieldEncoding field's value. -func (s *H264Settings) SetFieldEncoding(v string) *H264Settings { - s.FieldEncoding = &v - return s -} - -// SetFlickerAdaptiveQuantization sets the FlickerAdaptiveQuantization field's value. -func (s *H264Settings) SetFlickerAdaptiveQuantization(v string) *H264Settings { - s.FlickerAdaptiveQuantization = &v - return s -} - -// SetFramerateControl sets the FramerateControl field's value. -func (s *H264Settings) SetFramerateControl(v string) *H264Settings { - s.FramerateControl = &v - return s -} - -// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. -func (s *H264Settings) SetFramerateConversionAlgorithm(v string) *H264Settings { - s.FramerateConversionAlgorithm = &v - return s -} - -// SetFramerateDenominator sets the FramerateDenominator field's value. -func (s *H264Settings) SetFramerateDenominator(v int64) *H264Settings { - s.FramerateDenominator = &v - return s -} - -// SetFramerateNumerator sets the FramerateNumerator field's value. -func (s *H264Settings) SetFramerateNumerator(v int64) *H264Settings { - s.FramerateNumerator = &v - return s -} - -// SetGopBReference sets the GopBReference field's value. -func (s *H264Settings) SetGopBReference(v string) *H264Settings { - s.GopBReference = &v - return s -} - -// SetGopClosedCadence sets the GopClosedCadence field's value. -func (s *H264Settings) SetGopClosedCadence(v int64) *H264Settings { - s.GopClosedCadence = &v - return s -} - -// SetGopSize sets the GopSize field's value. -func (s *H264Settings) SetGopSize(v float64) *H264Settings { - s.GopSize = &v - return s -} - -// SetGopSizeUnits sets the GopSizeUnits field's value. -func (s *H264Settings) SetGopSizeUnits(v string) *H264Settings { - s.GopSizeUnits = &v - return s -} - -// SetHrdBufferInitialFillPercentage sets the HrdBufferInitialFillPercentage field's value. -func (s *H264Settings) SetHrdBufferInitialFillPercentage(v int64) *H264Settings { - s.HrdBufferInitialFillPercentage = &v - return s -} - -// SetHrdBufferSize sets the HrdBufferSize field's value. -func (s *H264Settings) SetHrdBufferSize(v int64) *H264Settings { - s.HrdBufferSize = &v - return s -} - -// SetInterlaceMode sets the InterlaceMode field's value. -func (s *H264Settings) SetInterlaceMode(v string) *H264Settings { - s.InterlaceMode = &v - return s -} - -// SetMaxBitrate sets the MaxBitrate field's value. -func (s *H264Settings) SetMaxBitrate(v int64) *H264Settings { - s.MaxBitrate = &v - return s -} - -// SetMinIInterval sets the MinIInterval field's value. -func (s *H264Settings) SetMinIInterval(v int64) *H264Settings { - s.MinIInterval = &v - return s -} - -// SetNumberBFramesBetweenReferenceFrames sets the NumberBFramesBetweenReferenceFrames field's value. -func (s *H264Settings) SetNumberBFramesBetweenReferenceFrames(v int64) *H264Settings { - s.NumberBFramesBetweenReferenceFrames = &v - return s -} - -// SetNumberReferenceFrames sets the NumberReferenceFrames field's value. -func (s *H264Settings) SetNumberReferenceFrames(v int64) *H264Settings { - s.NumberReferenceFrames = &v - return s -} - -// SetParControl sets the ParControl field's value. -func (s *H264Settings) SetParControl(v string) *H264Settings { - s.ParControl = &v - return s -} - -// SetParDenominator sets the ParDenominator field's value. -func (s *H264Settings) SetParDenominator(v int64) *H264Settings { - s.ParDenominator = &v - return s -} - -// SetParNumerator sets the ParNumerator field's value. -func (s *H264Settings) SetParNumerator(v int64) *H264Settings { - s.ParNumerator = &v - return s -} - -// SetQualityTuningLevel sets the QualityTuningLevel field's value. -func (s *H264Settings) SetQualityTuningLevel(v string) *H264Settings { - s.QualityTuningLevel = &v - return s -} - -// SetRateControlMode sets the RateControlMode field's value. -func (s *H264Settings) SetRateControlMode(v string) *H264Settings { - s.RateControlMode = &v - return s -} - -// SetRepeatPps sets the RepeatPps field's value. -func (s *H264Settings) SetRepeatPps(v string) *H264Settings { - s.RepeatPps = &v - return s -} - -// SetSceneChangeDetect sets the SceneChangeDetect field's value. -func (s *H264Settings) SetSceneChangeDetect(v string) *H264Settings { - s.SceneChangeDetect = &v - return s -} - -// SetSlices sets the Slices field's value. -func (s *H264Settings) SetSlices(v int64) *H264Settings { - s.Slices = &v - return s -} - -// SetSlowPal sets the SlowPal field's value. -func (s *H264Settings) SetSlowPal(v string) *H264Settings { - s.SlowPal = &v - return s -} - -// SetSoftness sets the Softness field's value. -func (s *H264Settings) SetSoftness(v int64) *H264Settings { - s.Softness = &v - return s -} - -// SetSpatialAdaptiveQuantization sets the SpatialAdaptiveQuantization field's value. -func (s *H264Settings) SetSpatialAdaptiveQuantization(v string) *H264Settings { - s.SpatialAdaptiveQuantization = &v - return s -} - -// SetSyntax sets the Syntax field's value. -func (s *H264Settings) SetSyntax(v string) *H264Settings { - s.Syntax = &v - return s -} - -// SetTelecine sets the Telecine field's value. -func (s *H264Settings) SetTelecine(v string) *H264Settings { - s.Telecine = &v - return s -} - -// SetTemporalAdaptiveQuantization sets the TemporalAdaptiveQuantization field's value. -func (s *H264Settings) SetTemporalAdaptiveQuantization(v string) *H264Settings { - s.TemporalAdaptiveQuantization = &v - return s -} - -// SetUnregisteredSeiTimecode sets the UnregisteredSeiTimecode field's value. -func (s *H264Settings) SetUnregisteredSeiTimecode(v string) *H264Settings { - s.UnregisteredSeiTimecode = &v - return s -} - -// Settings for H265 codec -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/H265Settings -type H265Settings struct { - _ struct{} `type:"structure"` - - // Adaptive quantization. Allows intra-frame quantizers to vary to improve visual - // quality. - AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"H265AdaptiveQuantization"` - - // Enables Alternate Transfer Function SEI message for outputs using Hybrid - // Log Gamma (HLG) Electro-Optical Transfer Function (EOTF). - AlternateTransferFunctionSei *string `locationName:"alternateTransferFunctionSei" type:"string" enum:"H265AlternateTransferFunctionSei"` - - // Average bitrate in bits/second. Required for VBR, CBR, and ABR. Five megabits - // can be entered as 5000000 or 5m. Five hundred kilobits can be entered as - // 500000 or 0.5m. For MS Smooth outputs, bitrates must be unique when rounded - // down to the nearest multiple of 1000. - Bitrate *int64 `locationName:"bitrate" type:"integer"` - - // H.265 Level. - CodecLevel *string `locationName:"codecLevel" type:"string" enum:"H265CodecLevel"` - - // Represents the Profile and Tier, per the HEVC (H.265) specification. Selections - // are grouped as [Profile] / [Tier], so "Main/High" represents Main Profile - // with High Tier. 4:2:2 profiles are only available with the HEVC 4:2:2 License. - CodecProfile *string `locationName:"codecProfile" type:"string" enum:"H265CodecProfile"` - - // Adjust quantization within each frame to reduce flicker or 'pop' on I-frames. - FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"H265FlickerAdaptiveQuantization"` - - // Using the API, set FramerateControl to INITIALIZE_FROM_SOURCE if you want - // the service to use the framerate from the input. Using the console, do this - // by choosing INITIALIZE_FROM_SOURCE for Framerate. - FramerateControl *string `locationName:"framerateControl" type:"string" enum:"H265FramerateControl"` - - // When set to INTERPOLATE, produces smoother motion during framerate conversion. - FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"H265FramerateConversionAlgorithm"` - - // Framerate denominator. - FramerateDenominator *int64 `locationName:"framerateDenominator" type:"integer"` - - // Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 - // fps. - FramerateNumerator *int64 `locationName:"framerateNumerator" type:"integer"` - - // If enable, use reference B frames for GOP structures that have B frames > - // 1. - GopBReference *string `locationName:"gopBReference" type:"string" enum:"H265GopBReference"` - - // Frequency of closed GOPs. In streaming applications, it is recommended that - // this be set to 1 so a decoder joining mid-stream will receive an IDR frame - // as quickly as possible. Setting this value to 0 will break output segmenting. - GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` - - // GOP Length (keyframe interval) in frames or seconds. Must be greater than - // zero. - GopSize *float64 `locationName:"gopSize" type:"double"` - - // Indicates if the GOP Size in H265 is specified in frames or seconds. If seconds - // the system will convert the GOP Size into a frame count at run time. - GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"H265GopSizeUnits"` - - // Percentage of the buffer that should initially be filled (HRD buffer model). - HrdBufferInitialFillPercentage *int64 `locationName:"hrdBufferInitialFillPercentage" type:"integer"` - - // Size of buffer (HRD buffer model). Five megabits can be entered as 5000000 - // or 5m. Five hundred kilobits can be entered as 500000 or 0.5m. - HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` - - // Use Interlace mode (InterlaceMode) to choose the scan line type for the output. - // * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce - // interlaced output with the entire output having the same field polarity (top - // or bottom first). * Follow, Default Top (FOLLOw_TOP_FIELD) and Follow, Default - // Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, - // behavior depends on the input scan type. - If the source is interlaced, the - // output will be interlaced with the same polarity as the source (it will follow - // the source). The output could therefore be a mix of "top field first" and - // "bottom field first". - If the source is progressive, the output will be - // interlaced with "top field first" or "bottom field first" polarity, depending - // on which of the Follow options you chose. - InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"H265InterlaceMode"` - - // Maximum bitrate in bits/second (for VBR mode only). Five megabits can be - // entered as 5000000 or 5m. Five hundred kilobits can be entered as 500000 - // or 0.5m. - MaxBitrate *int64 `locationName:"maxBitrate" type:"integer"` - - // Enforces separation between repeated (cadence) I-frames and I-frames inserted - // by Scene Change Detection. If a scene change I-frame is within I-interval - // frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene - // change I-frame. GOP stretch requires enabling lookahead as well as setting - // I-interval. The normal cadence resumes for the next GOP. This setting is - // only used when Scene Change Detect is enabled. Note: Maximum GOP stretch - // = GOP size + Min-I-interval - 1 - MinIInterval *int64 `locationName:"minIInterval" type:"integer"` - - // Number of B-frames between reference frames. - NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"` - - // Number of reference frames to use. The encoder may use more than requested - // if using B-frames and/or interlaced encoding. - NumberReferenceFrames *int64 `locationName:"numberReferenceFrames" type:"integer"` - - // Using the API, enable ParFollowSource if you want the service to use the - // pixel aspect ratio from the input. Using the console, do this by choosing - // Follow source for Pixel aspect ratio. - ParControl *string `locationName:"parControl" type:"string" enum:"H265ParControl"` - - // Pixel Aspect Ratio denominator. - ParDenominator *int64 `locationName:"parDenominator" type:"integer"` - - // Pixel Aspect Ratio numerator. - ParNumerator *int64 `locationName:"parNumerator" type:"integer"` - - // Use Quality tuning level (H265QualityTuningLevel) to specifiy whether to - // use fast single-pass, high-quality singlepass, or high-quality multipass - // video encoding. - QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"H265QualityTuningLevel"` - - // Rate control mode. CQ uses constant quantizer (qp), ABR (average bitrate) - // does not write HRD parameters. - RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"H265RateControlMode"` - - // Specify Sample Adaptive Offset (SAO) filter strength. Adaptive mode dynamically - // selects best strength based on content - SampleAdaptiveOffsetFilterMode *string `locationName:"sampleAdaptiveOffsetFilterMode" type:"string" enum:"H265SampleAdaptiveOffsetFilterMode"` - - // Scene change detection (inserts I-frames on scene changes). - SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"H265SceneChangeDetect"` - - // Number of slices per picture. Must be less than or equal to the number of - // macroblock rows for progressive pictures, and less than or equal to half - // the number of macroblock rows for interlaced pictures. - Slices *int64 `locationName:"slices" type:"integer"` - - // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled - // as 25fps, and audio is sped up correspondingly. - SlowPal *string `locationName:"slowPal" type:"string" enum:"H265SlowPal"` - - // Adjust quantization within each frame based on spatial variation of content - // complexity. - SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"H265SpatialAdaptiveQuantization"` - - // This field applies only if the Streams > Advanced > Framerate (framerate) - // field is set to 29.970. This field works with the Streams > Advanced > Preprocessors - // > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced - // Mode field (interlace_mode) to identify the scan type for the output: Progressive, - // Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output - // from 23.976 input. - Soft: produces 23.976; the player converts this output - // to 29.97i. - Telecine *string `locationName:"telecine" type:"string" enum:"H265Telecine"` - - // Adjust quantization within each frame based on temporal variation of content - // complexity. - TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"H265TemporalAdaptiveQuantization"` - - // Enables temporal layer identifiers in the encoded bitstream. Up to 3 layers - // are supported depending on GOP structure: I- and P-frames form one layer, - // reference B-frames can form a second layer and non-reference b-frames can - // form a third layer. Decoders can optionally decode only the lower temporal - // layers to generate a lower frame rate output. For example, given a bitstream - // with temporal IDs and with b-frames = 1 (i.e. IbPbPb display order), a decoder - // could decode all the frames for full frame rate output or only the I and - // P frames (lowest temporal layer) for a half frame rate output. - TemporalIds *string `locationName:"temporalIds" type:"string" enum:"H265TemporalIds"` - - // Enable use of tiles, allowing horizontal as well as vertical subdivision - // of the encoded pictures. - Tiles *string `locationName:"tiles" type:"string" enum:"H265Tiles"` - - // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. - UnregisteredSeiTimecode *string `locationName:"unregisteredSeiTimecode" type:"string" enum:"H265UnregisteredSeiTimecode"` -} - -// String returns the string representation -func (s H265Settings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s H265Settings) GoString() string { - return s.String() -} - -// SetAdaptiveQuantization sets the AdaptiveQuantization field's value. -func (s *H265Settings) SetAdaptiveQuantization(v string) *H265Settings { - s.AdaptiveQuantization = &v - return s -} - -// SetAlternateTransferFunctionSei sets the AlternateTransferFunctionSei field's value. -func (s *H265Settings) SetAlternateTransferFunctionSei(v string) *H265Settings { - s.AlternateTransferFunctionSei = &v - return s -} - -// SetBitrate sets the Bitrate field's value. -func (s *H265Settings) SetBitrate(v int64) *H265Settings { - s.Bitrate = &v - return s -} - -// SetCodecLevel sets the CodecLevel field's value. -func (s *H265Settings) SetCodecLevel(v string) *H265Settings { - s.CodecLevel = &v - return s -} - -// SetCodecProfile sets the CodecProfile field's value. -func (s *H265Settings) SetCodecProfile(v string) *H265Settings { - s.CodecProfile = &v - return s -} - -// SetFlickerAdaptiveQuantization sets the FlickerAdaptiveQuantization field's value. -func (s *H265Settings) SetFlickerAdaptiveQuantization(v string) *H265Settings { - s.FlickerAdaptiveQuantization = &v - return s -} - -// SetFramerateControl sets the FramerateControl field's value. -func (s *H265Settings) SetFramerateControl(v string) *H265Settings { - s.FramerateControl = &v - return s -} - -// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. -func (s *H265Settings) SetFramerateConversionAlgorithm(v string) *H265Settings { - s.FramerateConversionAlgorithm = &v - return s -} - -// SetFramerateDenominator sets the FramerateDenominator field's value. -func (s *H265Settings) SetFramerateDenominator(v int64) *H265Settings { - s.FramerateDenominator = &v - return s -} - -// SetFramerateNumerator sets the FramerateNumerator field's value. -func (s *H265Settings) SetFramerateNumerator(v int64) *H265Settings { - s.FramerateNumerator = &v - return s -} - -// SetGopBReference sets the GopBReference field's value. -func (s *H265Settings) SetGopBReference(v string) *H265Settings { - s.GopBReference = &v - return s -} - -// SetGopClosedCadence sets the GopClosedCadence field's value. -func (s *H265Settings) SetGopClosedCadence(v int64) *H265Settings { - s.GopClosedCadence = &v - return s -} - -// SetGopSize sets the GopSize field's value. -func (s *H265Settings) SetGopSize(v float64) *H265Settings { - s.GopSize = &v - return s -} - -// SetGopSizeUnits sets the GopSizeUnits field's value. -func (s *H265Settings) SetGopSizeUnits(v string) *H265Settings { - s.GopSizeUnits = &v - return s -} - -// SetHrdBufferInitialFillPercentage sets the HrdBufferInitialFillPercentage field's value. -func (s *H265Settings) SetHrdBufferInitialFillPercentage(v int64) *H265Settings { - s.HrdBufferInitialFillPercentage = &v - return s -} - -// SetHrdBufferSize sets the HrdBufferSize field's value. -func (s *H265Settings) SetHrdBufferSize(v int64) *H265Settings { - s.HrdBufferSize = &v - return s -} - -// SetInterlaceMode sets the InterlaceMode field's value. -func (s *H265Settings) SetInterlaceMode(v string) *H265Settings { - s.InterlaceMode = &v - return s -} - -// SetMaxBitrate sets the MaxBitrate field's value. -func (s *H265Settings) SetMaxBitrate(v int64) *H265Settings { - s.MaxBitrate = &v - return s -} - -// SetMinIInterval sets the MinIInterval field's value. -func (s *H265Settings) SetMinIInterval(v int64) *H265Settings { - s.MinIInterval = &v - return s -} - -// SetNumberBFramesBetweenReferenceFrames sets the NumberBFramesBetweenReferenceFrames field's value. -func (s *H265Settings) SetNumberBFramesBetweenReferenceFrames(v int64) *H265Settings { - s.NumberBFramesBetweenReferenceFrames = &v - return s -} - -// SetNumberReferenceFrames sets the NumberReferenceFrames field's value. -func (s *H265Settings) SetNumberReferenceFrames(v int64) *H265Settings { - s.NumberReferenceFrames = &v - return s -} - -// SetParControl sets the ParControl field's value. -func (s *H265Settings) SetParControl(v string) *H265Settings { - s.ParControl = &v - return s -} - -// SetParDenominator sets the ParDenominator field's value. -func (s *H265Settings) SetParDenominator(v int64) *H265Settings { - s.ParDenominator = &v - return s -} - -// SetParNumerator sets the ParNumerator field's value. -func (s *H265Settings) SetParNumerator(v int64) *H265Settings { - s.ParNumerator = &v - return s -} - -// SetQualityTuningLevel sets the QualityTuningLevel field's value. -func (s *H265Settings) SetQualityTuningLevel(v string) *H265Settings { - s.QualityTuningLevel = &v - return s -} - -// SetRateControlMode sets the RateControlMode field's value. -func (s *H265Settings) SetRateControlMode(v string) *H265Settings { - s.RateControlMode = &v - return s -} - -// SetSampleAdaptiveOffsetFilterMode sets the SampleAdaptiveOffsetFilterMode field's value. -func (s *H265Settings) SetSampleAdaptiveOffsetFilterMode(v string) *H265Settings { - s.SampleAdaptiveOffsetFilterMode = &v - return s -} - -// SetSceneChangeDetect sets the SceneChangeDetect field's value. -func (s *H265Settings) SetSceneChangeDetect(v string) *H265Settings { - s.SceneChangeDetect = &v - return s -} - -// SetSlices sets the Slices field's value. -func (s *H265Settings) SetSlices(v int64) *H265Settings { - s.Slices = &v - return s -} - -// SetSlowPal sets the SlowPal field's value. -func (s *H265Settings) SetSlowPal(v string) *H265Settings { - s.SlowPal = &v - return s -} - -// SetSpatialAdaptiveQuantization sets the SpatialAdaptiveQuantization field's value. -func (s *H265Settings) SetSpatialAdaptiveQuantization(v string) *H265Settings { - s.SpatialAdaptiveQuantization = &v - return s -} - -// SetTelecine sets the Telecine field's value. -func (s *H265Settings) SetTelecine(v string) *H265Settings { - s.Telecine = &v - return s -} - -// SetTemporalAdaptiveQuantization sets the TemporalAdaptiveQuantization field's value. -func (s *H265Settings) SetTemporalAdaptiveQuantization(v string) *H265Settings { - s.TemporalAdaptiveQuantization = &v - return s -} - -// SetTemporalIds sets the TemporalIds field's value. -func (s *H265Settings) SetTemporalIds(v string) *H265Settings { - s.TemporalIds = &v - return s -} - -// SetTiles sets the Tiles field's value. -func (s *H265Settings) SetTiles(v string) *H265Settings { - s.Tiles = &v - return s -} - -// SetUnregisteredSeiTimecode sets the UnregisteredSeiTimecode field's value. -func (s *H265Settings) SetUnregisteredSeiTimecode(v string) *H265Settings { - s.UnregisteredSeiTimecode = &v - return s -} - -// Use the HDR master display (Hdr10Metadata) settings to provide values for -// HDR color. These values vary depending on the input video and must be provided -// by a color grader. Range is 0 to 50,000, each increment represents 0.00002 -// in CIE1931 color coordinate. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Hdr10Metadata -type Hdr10Metadata struct { - _ struct{} `type:"structure"` - - // HDR Master Display Information comes from the color grader and the color - // grading tools. Range is 0 to 50,000, each increment represents 0.00002 in - // CIE1931 color coordinate. - BluePrimaryX *int64 `locationName:"bluePrimaryX" type:"integer"` - - // HDR Master Display Information comes from the color grader and the color - // grading tools. Range is 0 to 50,000, each increment represents 0.00002 in - // CIE1931 color coordinate. - BluePrimaryY *int64 `locationName:"bluePrimaryY" type:"integer"` - - // HDR Master Display Information comes from the color grader and the color - // grading tools. Range is 0 to 50,000, each increment represents 0.00002 in - // CIE1931 color coordinate. - GreenPrimaryX *int64 `locationName:"greenPrimaryX" type:"integer"` - - // HDR Master Display Information comes from the color grader and the color - // grading tools. Range is 0 to 50,000, each increment represents 0.00002 in - // CIE1931 color coordinate. - GreenPrimaryY *int64 `locationName:"greenPrimaryY" type:"integer"` - - // Maximum light level among all samples in the coded video sequence, in units - // of candelas per square meter. - MaxContentLightLevel *int64 `locationName:"maxContentLightLevel" type:"integer"` - - // Maximum average light level of any frame in the coded video sequence, in - // units of candelas per square meter. - MaxFrameAverageLightLevel *int64 `locationName:"maxFrameAverageLightLevel" type:"integer"` - - // Nominal maximum mastering display luminance in units of of 0.0001 candelas - // per square meter. - MaxLuminance *int64 `locationName:"maxLuminance" type:"integer"` - - // Nominal minimum mastering display luminance in units of of 0.0001 candelas - // per square meter - MinLuminance *int64 `locationName:"minLuminance" type:"integer"` - - // HDR Master Display Information comes from the color grader and the color - // grading tools. Range is 0 to 50,000, each increment represents 0.00002 in - // CIE1931 color coordinate. - RedPrimaryX *int64 `locationName:"redPrimaryX" type:"integer"` - - // HDR Master Display Information comes from the color grader and the color - // grading tools. Range is 0 to 50,000, each increment represents 0.00002 in - // CIE1931 color coordinate. - RedPrimaryY *int64 `locationName:"redPrimaryY" type:"integer"` - - // HDR Master Display Information comes from the color grader and the color - // grading tools. Range is 0 to 50,000, each increment represents 0.00002 in - // CIE1931 color coordinate. - WhitePointX *int64 `locationName:"whitePointX" type:"integer"` - - // HDR Master Display Information comes from the color grader and the color - // grading tools. Range is 0 to 50,000, each increment represents 0.00002 in - // CIE1931 color coordinate. - WhitePointY *int64 `locationName:"whitePointY" type:"integer"` -} - -// String returns the string representation -func (s Hdr10Metadata) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Hdr10Metadata) GoString() string { - return s.String() -} - -// SetBluePrimaryX sets the BluePrimaryX field's value. -func (s *Hdr10Metadata) SetBluePrimaryX(v int64) *Hdr10Metadata { - s.BluePrimaryX = &v - return s -} - -// SetBluePrimaryY sets the BluePrimaryY field's value. -func (s *Hdr10Metadata) SetBluePrimaryY(v int64) *Hdr10Metadata { - s.BluePrimaryY = &v - return s -} - -// SetGreenPrimaryX sets the GreenPrimaryX field's value. -func (s *Hdr10Metadata) SetGreenPrimaryX(v int64) *Hdr10Metadata { - s.GreenPrimaryX = &v - return s -} - -// SetGreenPrimaryY sets the GreenPrimaryY field's value. -func (s *Hdr10Metadata) SetGreenPrimaryY(v int64) *Hdr10Metadata { - s.GreenPrimaryY = &v - return s -} - -// SetMaxContentLightLevel sets the MaxContentLightLevel field's value. -func (s *Hdr10Metadata) SetMaxContentLightLevel(v int64) *Hdr10Metadata { - s.MaxContentLightLevel = &v - return s -} - -// SetMaxFrameAverageLightLevel sets the MaxFrameAverageLightLevel field's value. -func (s *Hdr10Metadata) SetMaxFrameAverageLightLevel(v int64) *Hdr10Metadata { - s.MaxFrameAverageLightLevel = &v - return s -} - -// SetMaxLuminance sets the MaxLuminance field's value. -func (s *Hdr10Metadata) SetMaxLuminance(v int64) *Hdr10Metadata { - s.MaxLuminance = &v - return s -} - -// SetMinLuminance sets the MinLuminance field's value. -func (s *Hdr10Metadata) SetMinLuminance(v int64) *Hdr10Metadata { - s.MinLuminance = &v - return s -} - -// SetRedPrimaryX sets the RedPrimaryX field's value. -func (s *Hdr10Metadata) SetRedPrimaryX(v int64) *Hdr10Metadata { - s.RedPrimaryX = &v - return s -} - -// SetRedPrimaryY sets the RedPrimaryY field's value. -func (s *Hdr10Metadata) SetRedPrimaryY(v int64) *Hdr10Metadata { - s.RedPrimaryY = &v - return s -} - -// SetWhitePointX sets the WhitePointX field's value. -func (s *Hdr10Metadata) SetWhitePointX(v int64) *Hdr10Metadata { - s.WhitePointX = &v - return s -} - -// SetWhitePointY sets the WhitePointY field's value. -func (s *Hdr10Metadata) SetWhitePointY(v int64) *Hdr10Metadata { - s.WhitePointY = &v - return s -} - -// Caption Language Mapping -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/HlsCaptionLanguageMapping -type HlsCaptionLanguageMapping struct { - _ struct{} `type:"structure"` - - // Caption channel. - CaptionChannel *int64 `locationName:"captionChannel" type:"integer"` - - // Code to specify the language, following the specification "ISO 639-2 three-digit - // code":http://www.loc.gov/standards/iso639-2/ - LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` - - // Caption language description. - LanguageDescription *string `locationName:"languageDescription" type:"string"` -} - -// String returns the string representation -func (s HlsCaptionLanguageMapping) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsCaptionLanguageMapping) GoString() string { - return s.String() -} - -// SetCaptionChannel sets the CaptionChannel field's value. -func (s *HlsCaptionLanguageMapping) SetCaptionChannel(v int64) *HlsCaptionLanguageMapping { - s.CaptionChannel = &v - return s -} - -// SetLanguageCode sets the LanguageCode field's value. -func (s *HlsCaptionLanguageMapping) SetLanguageCode(v string) *HlsCaptionLanguageMapping { - s.LanguageCode = &v - return s -} - -// SetLanguageDescription sets the LanguageDescription field's value. -func (s *HlsCaptionLanguageMapping) SetLanguageDescription(v string) *HlsCaptionLanguageMapping { - s.LanguageDescription = &v - return s -} - -// Settings for HLS encryption -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/HlsEncryptionSettings -type HlsEncryptionSettings struct { - _ struct{} `type:"structure"` - - // This is a 128-bit, 16-byte hex value represented by a 32-character text string. - // If this parameter is not set then the Initialization Vector will follow the - // segment number by default. - ConstantInitializationVector *string `locationName:"constantInitializationVector" type:"string"` - - // Encrypts the segments with the given encryption scheme. Leave blank to disable. - // Selecting 'Disabled' in the web interface also disables encryption. - EncryptionMethod *string `locationName:"encryptionMethod" type:"string" enum:"HlsEncryptionType"` - - // The Initialization Vector is a 128-bit number used in conjunction with the - // key for encrypting blocks. If set to INCLUDE, Initialization Vector is listed - // in the manifest. Otherwise Initialization Vector is not in the manifest. - InitializationVectorInManifest *string `locationName:"initializationVectorInManifest" type:"string" enum:"HlsInitializationVectorInManifest"` - - // Settings for use with a SPEKE key provider - SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"` - - // Settings for use with a SPEKE key provider. - StaticKeyProvider *StaticKeyProvider `locationName:"staticKeyProvider" type:"structure"` - - // Indicates which type of key provider is used for encryption. - Type *string `locationName:"type" type:"string" enum:"HlsKeyProviderType"` -} - -// String returns the string representation -func (s HlsEncryptionSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsEncryptionSettings) GoString() string { - return s.String() -} - -// SetConstantInitializationVector sets the ConstantInitializationVector field's value. -func (s *HlsEncryptionSettings) SetConstantInitializationVector(v string) *HlsEncryptionSettings { - s.ConstantInitializationVector = &v - return s -} - -// SetEncryptionMethod sets the EncryptionMethod field's value. -func (s *HlsEncryptionSettings) SetEncryptionMethod(v string) *HlsEncryptionSettings { - s.EncryptionMethod = &v - return s -} - -// SetInitializationVectorInManifest sets the InitializationVectorInManifest field's value. -func (s *HlsEncryptionSettings) SetInitializationVectorInManifest(v string) *HlsEncryptionSettings { - s.InitializationVectorInManifest = &v - return s -} - -// SetSpekeKeyProvider sets the SpekeKeyProvider field's value. -func (s *HlsEncryptionSettings) SetSpekeKeyProvider(v *SpekeKeyProvider) *HlsEncryptionSettings { - s.SpekeKeyProvider = v - return s -} - -// SetStaticKeyProvider sets the StaticKeyProvider field's value. -func (s *HlsEncryptionSettings) SetStaticKeyProvider(v *StaticKeyProvider) *HlsEncryptionSettings { - s.StaticKeyProvider = v - return s -} - -// SetType sets the Type field's value. -func (s *HlsEncryptionSettings) SetType(v string) *HlsEncryptionSettings { - s.Type = &v - return s -} - -// Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to -// HLS_GROUP_SETTINGS. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/HlsGroupSettings -type HlsGroupSettings struct { - _ struct{} `type:"structure"` - - // Choose one or more ad marker types to pass SCTE35 signals through to this - // group of Apple HLS outputs. - AdMarkers []*string `locationName:"adMarkers" type:"list"` - - // A partial URI prefix that will be prepended to each output in the media .m3u8 - // file. Can be used if base manifest is delivered from a different URL than - // the main .m3u8 file. - BaseUrl *string `locationName:"baseUrl" type:"string"` - - // Language to be used on Caption outputs - CaptionLanguageMappings []*HlsCaptionLanguageMapping `locationName:"captionLanguageMappings" type:"list"` - - // Applies only to 608 Embedded output captions. Insert: Include CLOSED-CAPTIONS - // lines in the manifest. Specify at least one language in the CC1 Language - // Code field. One CLOSED-CAPTION line is added for each Language Code you specify. - // Make sure to specify the languages in the order in which they appear in the - // original source (if the source is embedded format) or the order of the caption - // selectors (if the source is other than embedded). Otherwise, languages in - // the manifest will not match up properly with the output captions. None: Include - // CLOSED-CAPTIONS=NONE line in the manifest. Omit: Omit any CLOSED-CAPTIONS - // line from the manifest. - CaptionLanguageSetting *string `locationName:"captionLanguageSetting" type:"string" enum:"HlsCaptionLanguageSetting"` - - // When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client - // from saving media segments for later replay. - ClientCache *string `locationName:"clientCache" type:"string" enum:"HlsClientCache"` - - // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist - // generation. - CodecSpecification *string `locationName:"codecSpecification" type:"string" enum:"HlsCodecSpecification"` - - // Use Destination (Destination) to specify the S3 output location and the output - // filename base. Destination accepts format identifiers. If you do not specify - // the base filename in the URI, the service will use the filename of the input - // file. If your job has multiple inputs, the service uses the filename of the - // first input file. - Destination *string `locationName:"destination" type:"string"` - - // Indicates whether segments should be placed in subdirectories. - DirectoryStructure *string `locationName:"directoryStructure" type:"string" enum:"HlsDirectoryStructure"` - - // DRM settings. - Encryption *HlsEncryptionSettings `locationName:"encryption" type:"structure"` - - // When set to GZIP, compresses HLS playlist. - ManifestCompression *string `locationName:"manifestCompression" type:"string" enum:"HlsManifestCompression"` - - // Indicates whether the output manifest should use floating point values for - // segment duration. - ManifestDurationFormat *string `locationName:"manifestDurationFormat" type:"string" enum:"HlsManifestDurationFormat"` - - // When set, Minimum Segment Size is enforced by looking ahead and back within - // the specified range for a nearby avail and extending the segment size if - // needed. - MinSegmentLength *int64 `locationName:"minSegmentLength" type:"integer"` - - // Indicates whether the .m3u8 manifest file should be generated for this HLS - // output group. - OutputSelection *string `locationName:"outputSelection" type:"string" enum:"HlsOutputSelection"` - - // Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. - // The value is calculated as follows: either the program date and time are - // initialized using the input timecode source, or the time is initialized using - // the input timecode source and the date is initialized using the timestamp_offset. - ProgramDateTime *string `locationName:"programDateTime" type:"string" enum:"HlsProgramDateTime"` - - // Period of insertion of EXT-X-PROGRAM-DATE-TIME entry, in seconds. - ProgramDateTimePeriod *int64 `locationName:"programDateTimePeriod" type:"integer"` - - // When set to SINGLE_FILE, emits program as a single media resource (.ts) file, - // uses #EXT-X-BYTERANGE tags to index segment for playback. - SegmentControl *string `locationName:"segmentControl" type:"string" enum:"HlsSegmentControl"` - - // Length of MPEG-2 Transport Stream segments to create (in seconds). Note that - // segments will end on the next keyframe after this number of seconds, so actual - // segment length may be longer. - SegmentLength *int64 `locationName:"segmentLength" type:"integer"` - - // Number of segments to write to a subdirectory before starting a new one. - // directoryStructure must be SINGLE_DIRECTORY for this setting to have an effect. - SegmentsPerSubdirectory *int64 `locationName:"segmentsPerSubdirectory" type:"integer"` - - // Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag - // of variant manifest. - StreamInfResolution *string `locationName:"streamInfResolution" type:"string" enum:"HlsStreamInfResolution"` - - // Indicates ID3 frame that has the timecode. - TimedMetadataId3Frame *string `locationName:"timedMetadataId3Frame" type:"string" enum:"HlsTimedMetadataId3Frame"` - - // Timed Metadata interval in seconds. - TimedMetadataId3Period *int64 `locationName:"timedMetadataId3Period" type:"integer"` - - // Provides an extra millisecond delta offset to fine tune the timestamps. - TimestampDeltaMilliseconds *int64 `locationName:"timestampDeltaMilliseconds" type:"integer"` -} - -// String returns the string representation -func (s HlsGroupSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsGroupSettings) GoString() string { - return s.String() -} - -// SetAdMarkers sets the AdMarkers field's value. -func (s *HlsGroupSettings) SetAdMarkers(v []*string) *HlsGroupSettings { - s.AdMarkers = v - return s -} - -// SetBaseUrl sets the BaseUrl field's value. -func (s *HlsGroupSettings) SetBaseUrl(v string) *HlsGroupSettings { - s.BaseUrl = &v - return s -} - -// SetCaptionLanguageMappings sets the CaptionLanguageMappings field's value. -func (s *HlsGroupSettings) SetCaptionLanguageMappings(v []*HlsCaptionLanguageMapping) *HlsGroupSettings { - s.CaptionLanguageMappings = v - return s -} - -// SetCaptionLanguageSetting sets the CaptionLanguageSetting field's value. -func (s *HlsGroupSettings) SetCaptionLanguageSetting(v string) *HlsGroupSettings { - s.CaptionLanguageSetting = &v - return s -} - -// SetClientCache sets the ClientCache field's value. -func (s *HlsGroupSettings) SetClientCache(v string) *HlsGroupSettings { - s.ClientCache = &v - return s -} - -// SetCodecSpecification sets the CodecSpecification field's value. -func (s *HlsGroupSettings) SetCodecSpecification(v string) *HlsGroupSettings { - s.CodecSpecification = &v - return s -} - -// SetDestination sets the Destination field's value. -func (s *HlsGroupSettings) SetDestination(v string) *HlsGroupSettings { - s.Destination = &v - return s -} - -// SetDirectoryStructure sets the DirectoryStructure field's value. -func (s *HlsGroupSettings) SetDirectoryStructure(v string) *HlsGroupSettings { - s.DirectoryStructure = &v - return s -} - -// SetEncryption sets the Encryption field's value. -func (s *HlsGroupSettings) SetEncryption(v *HlsEncryptionSettings) *HlsGroupSettings { - s.Encryption = v - return s -} - -// SetManifestCompression sets the ManifestCompression field's value. -func (s *HlsGroupSettings) SetManifestCompression(v string) *HlsGroupSettings { - s.ManifestCompression = &v - return s -} - -// SetManifestDurationFormat sets the ManifestDurationFormat field's value. -func (s *HlsGroupSettings) SetManifestDurationFormat(v string) *HlsGroupSettings { - s.ManifestDurationFormat = &v - return s -} - -// SetMinSegmentLength sets the MinSegmentLength field's value. -func (s *HlsGroupSettings) SetMinSegmentLength(v int64) *HlsGroupSettings { - s.MinSegmentLength = &v - return s -} - -// SetOutputSelection sets the OutputSelection field's value. -func (s *HlsGroupSettings) SetOutputSelection(v string) *HlsGroupSettings { - s.OutputSelection = &v - return s -} - -// SetProgramDateTime sets the ProgramDateTime field's value. -func (s *HlsGroupSettings) SetProgramDateTime(v string) *HlsGroupSettings { - s.ProgramDateTime = &v - return s -} - -// SetProgramDateTimePeriod sets the ProgramDateTimePeriod field's value. -func (s *HlsGroupSettings) SetProgramDateTimePeriod(v int64) *HlsGroupSettings { - s.ProgramDateTimePeriod = &v - return s -} - -// SetSegmentControl sets the SegmentControl field's value. -func (s *HlsGroupSettings) SetSegmentControl(v string) *HlsGroupSettings { - s.SegmentControl = &v - return s -} - -// SetSegmentLength sets the SegmentLength field's value. -func (s *HlsGroupSettings) SetSegmentLength(v int64) *HlsGroupSettings { - s.SegmentLength = &v - return s -} - -// SetSegmentsPerSubdirectory sets the SegmentsPerSubdirectory field's value. -func (s *HlsGroupSettings) SetSegmentsPerSubdirectory(v int64) *HlsGroupSettings { - s.SegmentsPerSubdirectory = &v - return s -} - -// SetStreamInfResolution sets the StreamInfResolution field's value. -func (s *HlsGroupSettings) SetStreamInfResolution(v string) *HlsGroupSettings { - s.StreamInfResolution = &v - return s -} - -// SetTimedMetadataId3Frame sets the TimedMetadataId3Frame field's value. -func (s *HlsGroupSettings) SetTimedMetadataId3Frame(v string) *HlsGroupSettings { - s.TimedMetadataId3Frame = &v - return s -} - -// SetTimedMetadataId3Period sets the TimedMetadataId3Period field's value. -func (s *HlsGroupSettings) SetTimedMetadataId3Period(v int64) *HlsGroupSettings { - s.TimedMetadataId3Period = &v - return s -} - -// SetTimestampDeltaMilliseconds sets the TimestampDeltaMilliseconds field's value. -func (s *HlsGroupSettings) SetTimestampDeltaMilliseconds(v int64) *HlsGroupSettings { - s.TimestampDeltaMilliseconds = &v - return s -} - -// Settings for HLS output groups -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/HlsSettings -type HlsSettings struct { - _ struct{} `type:"structure"` - - // Specifies the group to which the audio Rendition belongs. - AudioGroupId *string `locationName:"audioGroupId" type:"string"` - - // List all the audio groups that are used with the video output stream. Input - // all the audio GROUP-IDs that are associated to the video, separate by ','. - AudioRenditionSets *string `locationName:"audioRenditionSets" type:"string"` - - // Four types of audio-only tracks are supported: Audio-Only Variant Stream - // The client can play back this audio-only stream instead of video in low-bandwidth - // scenarios. Represented as an EXT-X-STREAM-INF in the HLS manifest. Alternate - // Audio, Auto Select, Default Alternate rendition that the client should try - // to play back by default. Represented as an EXT-X-MEDIA in the HLS manifest - // with DEFAULT=YES, AUTOSELECT=YES Alternate Audio, Auto Select, Not Default - // Alternate rendition that the client may try to play back by default. Represented - // as an EXT-X-MEDIA in the HLS manifest with DEFAULT=NO, AUTOSELECT=YES Alternate - // Audio, not Auto Select Alternate rendition that the client will not try to - // play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with - // DEFAULT=NO, AUTOSELECT=NO - AudioTrackType *string `locationName:"audioTrackType" type:"string" enum:"HlsAudioTrackType"` - - // When set to INCLUDE, writes I-Frame Only Manifest in addition to the HLS - // manifest - IFrameOnlyManifest *string `locationName:"iFrameOnlyManifest" type:"string" enum:"HlsIFrameOnlyManifest"` - - // String concatenated to end of segment filenames. Accepts "Format Identifiers":#format_identifier_parameters. - SegmentModifier *string `locationName:"segmentModifier" type:"string"` -} - -// String returns the string representation -func (s HlsSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsSettings) GoString() string { - return s.String() -} - -// SetAudioGroupId sets the AudioGroupId field's value. -func (s *HlsSettings) SetAudioGroupId(v string) *HlsSettings { - s.AudioGroupId = &v - return s -} - -// SetAudioRenditionSets sets the AudioRenditionSets field's value. -func (s *HlsSettings) SetAudioRenditionSets(v string) *HlsSettings { - s.AudioRenditionSets = &v - return s -} - -// SetAudioTrackType sets the AudioTrackType field's value. -func (s *HlsSettings) SetAudioTrackType(v string) *HlsSettings { - s.AudioTrackType = &v - return s -} - -// SetIFrameOnlyManifest sets the IFrameOnlyManifest field's value. -func (s *HlsSettings) SetIFrameOnlyManifest(v string) *HlsSettings { - s.IFrameOnlyManifest = &v - return s -} - -// SetSegmentModifier sets the SegmentModifier field's value. -func (s *HlsSettings) SetSegmentModifier(v string) *HlsSettings { - s.SegmentModifier = &v - return s -} - -// To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3) -// to specify the base 64 encoded string and use Timecode (TimeCode) to specify -// the time when the tag should be inserted. To insert multiple ID3 tags in -// your output, create mulitple instances of ID3 insertion (Id3Insertion). -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Id3Insertion -type Id3Insertion struct { - _ struct{} `type:"structure"` - - // Use ID3 tag (Id3) to provide a tag value in base64-encode format. - Id3 *string `locationName:"id3" type:"string"` - - // Provide a Timecode (TimeCode) in HH:MM:SS:FF or HH:MM:SS;FF format. - Timecode *string `locationName:"timecode" type:"string"` -} - -// String returns the string representation -func (s Id3Insertion) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Id3Insertion) GoString() string { - return s.String() -} - -// SetId3 sets the Id3 field's value. -func (s *Id3Insertion) SetId3(v string) *Id3Insertion { - s.Id3 = &v - return s -} - -// SetTimecode sets the Timecode field's value. -func (s *Id3Insertion) SetTimecode(v string) *Id3Insertion { - s.Timecode = &v - return s -} - -// Enable the Image inserter (ImageInserter) feature to include a graphic overlay -// on your video. Enable or disable this feature for each output individually. -// This setting is disabled by default. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ImageInserter -type ImageInserter struct { - _ struct{} `type:"structure"` - - // Image to insert. Must be 32 bit windows BMP, PNG, or TGA file. Must not be - // larger than the output frames. - InsertableImages []*InsertableImage `locationName:"insertableImages" type:"list"` -} - -// String returns the string representation -func (s ImageInserter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ImageInserter) GoString() string { - return s.String() -} - -// SetInsertableImages sets the InsertableImages field's value. -func (s *ImageInserter) SetInsertableImages(v []*InsertableImage) *ImageInserter { - s.InsertableImages = v - return s -} - -// Specifies media input -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Input -type Input struct { - _ struct{} `type:"structure"` - - // Specifies set of audio selectors within an input to combine. An input may - // have multiple audio selector groups. See "Audio Selector Group":#inputs-audio_selector_group - // for more information. - AudioSelectorGroups map[string]*AudioSelectorGroup `locationName:"audioSelectorGroups" type:"map"` - - // Use Audio selectors (AudioSelectors) to specify a track or set of tracks - // from the input that you will use in your outputs. You can use mutiple Audio - // selectors per input. - AudioSelectors map[string]*AudioSelector `locationName:"audioSelectors" type:"map"` - - // Use Captions selectors (CaptionSelectors) to specify the captions data from - // the input that you will use in your outputs. You can use mutiple captions - // selectors per input. - CaptionSelectors map[string]*CaptionSelector `locationName:"captionSelectors" type:"map"` - - // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. - // Default is disabled. Only manaully controllable for MPEG2 and uncompressed - // video inputs. - DeblockFilter *string `locationName:"deblockFilter" type:"string" enum:"InputDeblockFilter"` - - // Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default - // is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video - // inputs. - DenoiseFilter *string `locationName:"denoiseFilter" type:"string" enum:"InputDenoiseFilter"` - - // Use Input (fileInput) to define the source file used in the transcode job. - // There can be multiple inputs in a job. These inputs are concantenated, in - // the order they are specified in the job, to create the output. - FileInput *string `locationName:"fileInput" type:"string"` - - // Use Filter enable (InputFilterEnable) to specify how the transcoding service - // applies the denoise and deblock filters. You must also enable the filters - // separately, with Denoise (InputDenoiseFilter) and Deblock (InputDeblockFilter). - // * Auto - The transcoding service determines whether to apply filtering, depending - // on input type and quality. * Disable - The input is not filtered. This is - // true even if you use the API to enable them in (InputDeblockFilter) and (InputDeblockFilter). - // * Force - The in put is filtered regardless of input type. - FilterEnable *string `locationName:"filterEnable" type:"string" enum:"InputFilterEnable"` - - // Use Filter strength (FilterStrength) to adjust the magnitude the input filter - // settings (Deblock and Denoise). The range is -5 to 5. Default is 0. - FilterStrength *int64 `locationName:"filterStrength" type:"integer"` - - // (InputClippings) contains sets of start and end times that together specify - // a portion of the input to be used in the outputs. If you provide only a start - // time, the clip will be the entire input from that point to the end. If you - // provide only an end time, it will be the entire input up to that point. When - // you specify more than one input clip, the transcoding service creates the - // job outputs by stringing the clips together in the order you specify them. - InputClippings []*InputClipping `locationName:"inputClippings" type:"list"` - - // Use Program (programNumber) to select a specific program from within a multi-program - // transport stream. Note that Quad 4K is not currently supported. Default is - // the first program within the transport stream. If the program you specify - // doesn't exist, the transcoding service will use this default. - ProgramNumber *int64 `locationName:"programNumber" type:"integer"` - - // Set PSI control (InputPsiControl) for transport stream inputs to specify - // which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio - // and video. * Use PSI - Scan only PSI data. - PsiControl *string `locationName:"psiControl" type:"string" enum:"InputPsiControl"` - - // Use Timecode source (InputTimecodeSource) to specify how timecode information - // from your input is adjusted and encoded in all outputs for the job. Default - // is embedded. Set to Embedded (EMBEDDED) to use the timecode that is in the - // input video. If no embedded timecode is in the source, will set the timecode - // for the first frame to 00:00:00:00. Set to Start at 0 (ZEROBASED) to set - // the timecode of the initial frame to 00:00:00:00. Set to Specified start - // (SPECIFIEDSTART) to provide the initial timecode yourself the setting (Start). - TimecodeSource *string `locationName:"timecodeSource" type:"string" enum:"InputTimecodeSource"` - - // Selector for video. - VideoSelector *VideoSelector `locationName:"videoSelector" type:"structure"` -} - -// String returns the string representation -func (s Input) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Input) GoString() string { - return s.String() -} - -// SetAudioSelectorGroups sets the AudioSelectorGroups field's value. -func (s *Input) SetAudioSelectorGroups(v map[string]*AudioSelectorGroup) *Input { - s.AudioSelectorGroups = v - return s -} - -// SetAudioSelectors sets the AudioSelectors field's value. -func (s *Input) SetAudioSelectors(v map[string]*AudioSelector) *Input { - s.AudioSelectors = v - return s -} - -// SetCaptionSelectors sets the CaptionSelectors field's value. -func (s *Input) SetCaptionSelectors(v map[string]*CaptionSelector) *Input { - s.CaptionSelectors = v - return s -} - -// SetDeblockFilter sets the DeblockFilter field's value. -func (s *Input) SetDeblockFilter(v string) *Input { - s.DeblockFilter = &v - return s -} - -// SetDenoiseFilter sets the DenoiseFilter field's value. -func (s *Input) SetDenoiseFilter(v string) *Input { - s.DenoiseFilter = &v - return s -} - -// SetFileInput sets the FileInput field's value. -func (s *Input) SetFileInput(v string) *Input { - s.FileInput = &v - return s -} - -// SetFilterEnable sets the FilterEnable field's value. -func (s *Input) SetFilterEnable(v string) *Input { - s.FilterEnable = &v - return s -} - -// SetFilterStrength sets the FilterStrength field's value. -func (s *Input) SetFilterStrength(v int64) *Input { - s.FilterStrength = &v - return s -} - -// SetInputClippings sets the InputClippings field's value. -func (s *Input) SetInputClippings(v []*InputClipping) *Input { - s.InputClippings = v - return s -} - -// SetProgramNumber sets the ProgramNumber field's value. -func (s *Input) SetProgramNumber(v int64) *Input { - s.ProgramNumber = &v - return s -} - -// SetPsiControl sets the PsiControl field's value. -func (s *Input) SetPsiControl(v string) *Input { - s.PsiControl = &v - return s -} - -// SetTimecodeSource sets the TimecodeSource field's value. -func (s *Input) SetTimecodeSource(v string) *Input { - s.TimecodeSource = &v - return s -} - -// SetVideoSelector sets the VideoSelector field's value. -func (s *Input) SetVideoSelector(v *VideoSelector) *Input { - s.VideoSelector = v - return s -} - -// Include one instance of (InputClipping) for each input clip. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/InputClipping -type InputClipping struct { - _ struct{} `type:"structure"` - - // Set End timecode (EndTimecode) to the end of the portion of the input you - // are clipping. The frame corresponding to the End timecode value is included - // in the clip. Start timecode or End timecode may be left blank, but not both. - // When choosing this value, take into account your setting for Input timecode - // source. For example, if you have embedded timecodes that start at 01:00:00:00 - // and you want your clip to begin five minutes into the video, use 01:00:05:00. - EndTimecode *string `locationName:"endTimecode" type:"string"` - - // Set Start timecode (StartTimecode) to the beginning of the portion of the - // input you are clipping. The frame corresponding to the Start timecode value - // is included in the clip. Start timecode or End timecode may be left blank, - // but not both. When choosing this value, take into account your setting for - // Input timecode source. For example, if you have embedded timecodes that start - // at 01:00:00:00 and you want your clip to begin five minutes into the video, - // use 01:00:05:00. - StartTimecode *string `locationName:"startTimecode" type:"string"` -} - -// String returns the string representation -func (s InputClipping) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputClipping) GoString() string { - return s.String() -} - -// SetEndTimecode sets the EndTimecode field's value. -func (s *InputClipping) SetEndTimecode(v string) *InputClipping { - s.EndTimecode = &v - return s -} - -// SetStartTimecode sets the StartTimecode field's value. -func (s *InputClipping) SetStartTimecode(v string) *InputClipping { - s.StartTimecode = &v - return s -} - -// Specified video input in a template. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/InputTemplate -type InputTemplate struct { - _ struct{} `type:"structure"` - - // Specifies set of audio selectors within an input to combine. An input may - // have multiple audio selector groups. See "Audio Selector Group":#inputs-audio_selector_group - // for more information. - AudioSelectorGroups map[string]*AudioSelectorGroup `locationName:"audioSelectorGroups" type:"map"` - - // Use Audio selectors (AudioSelectors) to specify a track or set of tracks - // from the input that you will use in your outputs. You can use mutiple Audio - // selectors per input. - AudioSelectors map[string]*AudioSelector `locationName:"audioSelectors" type:"map"` - - // Use Captions selectors (CaptionSelectors) to specify the captions data from - // the input that you will use in your outputs. You can use mutiple captions - // selectors per input. - CaptionSelectors map[string]*CaptionSelector `locationName:"captionSelectors" type:"map"` - - // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. - // Default is disabled. Only manaully controllable for MPEG2 and uncompressed - // video inputs. - DeblockFilter *string `locationName:"deblockFilter" type:"string" enum:"InputDeblockFilter"` - - // Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default - // is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video - // inputs. - DenoiseFilter *string `locationName:"denoiseFilter" type:"string" enum:"InputDenoiseFilter"` - - // Use Filter enable (InputFilterEnable) to specify how the transcoding service - // applies the denoise and deblock filters. You must also enable the filters - // separately, with Denoise (InputDenoiseFilter) and Deblock (InputDeblockFilter). - // * Auto - The transcoding service determines whether to apply filtering, depending - // on input type and quality. * Disable - The input is not filtered. This is - // true even if you use the API to enable them in (InputDeblockFilter) and (InputDeblockFilter). - // * Force - The in put is filtered regardless of input type. - FilterEnable *string `locationName:"filterEnable" type:"string" enum:"InputFilterEnable"` - - // Use Filter strength (FilterStrength) to adjust the magnitude the input filter - // settings (Deblock and Denoise). The range is -5 to 5. Default is 0. - FilterStrength *int64 `locationName:"filterStrength" type:"integer"` - - // (InputClippings) contains sets of start and end times that together specify - // a portion of the input to be used in the outputs. If you provide only a start - // time, the clip will be the entire input from that point to the end. If you - // provide only an end time, it will be the entire input up to that point. When - // you specify more than one input clip, the transcoding service creates the - // job outputs by stringing the clips together in the order you specify them. - InputClippings []*InputClipping `locationName:"inputClippings" type:"list"` - - // Use Program (programNumber) to select a specific program from within a multi-program - // transport stream. Note that Quad 4K is not currently supported. Default is - // the first program within the transport stream. If the program you specify - // doesn't exist, the transcoding service will use this default. - ProgramNumber *int64 `locationName:"programNumber" type:"integer"` - - // Set PSI control (InputPsiControl) for transport stream inputs to specify - // which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio - // and video. * Use PSI - Scan only PSI data. - PsiControl *string `locationName:"psiControl" type:"string" enum:"InputPsiControl"` - - // Use Timecode source (InputTimecodeSource) to specify how timecode information - // from your input is adjusted and encoded in all outputs for the job. Default - // is embedded. Set to Embedded (EMBEDDED) to use the timecode that is in the - // input video. If no embedded timecode is in the source, will set the timecode - // for the first frame to 00:00:00:00. Set to Start at 0 (ZEROBASED) to set - // the timecode of the initial frame to 00:00:00:00. Set to Specified start - // (SPECIFIEDSTART) to provide the initial timecode yourself the setting (Start). - TimecodeSource *string `locationName:"timecodeSource" type:"string" enum:"InputTimecodeSource"` - - // Selector for video. - VideoSelector *VideoSelector `locationName:"videoSelector" type:"structure"` -} - -// String returns the string representation -func (s InputTemplate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputTemplate) GoString() string { - return s.String() -} - -// SetAudioSelectorGroups sets the AudioSelectorGroups field's value. -func (s *InputTemplate) SetAudioSelectorGroups(v map[string]*AudioSelectorGroup) *InputTemplate { - s.AudioSelectorGroups = v - return s -} - -// SetAudioSelectors sets the AudioSelectors field's value. -func (s *InputTemplate) SetAudioSelectors(v map[string]*AudioSelector) *InputTemplate { - s.AudioSelectors = v - return s -} - -// SetCaptionSelectors sets the CaptionSelectors field's value. -func (s *InputTemplate) SetCaptionSelectors(v map[string]*CaptionSelector) *InputTemplate { - s.CaptionSelectors = v - return s -} - -// SetDeblockFilter sets the DeblockFilter field's value. -func (s *InputTemplate) SetDeblockFilter(v string) *InputTemplate { - s.DeblockFilter = &v - return s -} - -// SetDenoiseFilter sets the DenoiseFilter field's value. -func (s *InputTemplate) SetDenoiseFilter(v string) *InputTemplate { - s.DenoiseFilter = &v - return s -} - -// SetFilterEnable sets the FilterEnable field's value. -func (s *InputTemplate) SetFilterEnable(v string) *InputTemplate { - s.FilterEnable = &v - return s -} - -// SetFilterStrength sets the FilterStrength field's value. -func (s *InputTemplate) SetFilterStrength(v int64) *InputTemplate { - s.FilterStrength = &v - return s -} - -// SetInputClippings sets the InputClippings field's value. -func (s *InputTemplate) SetInputClippings(v []*InputClipping) *InputTemplate { - s.InputClippings = v - return s -} - -// SetProgramNumber sets the ProgramNumber field's value. -func (s *InputTemplate) SetProgramNumber(v int64) *InputTemplate { - s.ProgramNumber = &v - return s -} - -// SetPsiControl sets the PsiControl field's value. -func (s *InputTemplate) SetPsiControl(v string) *InputTemplate { - s.PsiControl = &v - return s -} - -// SetTimecodeSource sets the TimecodeSource field's value. -func (s *InputTemplate) SetTimecodeSource(v string) *InputTemplate { - s.TimecodeSource = &v - return s -} - -// SetVideoSelector sets the VideoSelector field's value. -func (s *InputTemplate) SetVideoSelector(v *VideoSelector) *InputTemplate { - s.VideoSelector = v - return s -} - -// Settings for Insertable Image -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/InsertableImage -type InsertableImage struct { - _ struct{} `type:"structure"` - - // Use Duration (Duration) to set the time, in milliseconds, for the image to - // remain on the output video. - Duration *int64 `locationName:"duration" type:"integer"` - - // Use Fade in (FadeIut) to set the length, in milliseconds, of the inserted - // image fade in. If you don't specify a value for Fade in, the image will appear - // abruptly at the Start time. - FadeIn *int64 `locationName:"fadeIn" type:"integer"` - - // Use Fade out (FadeOut) to set the length, in milliseconds, of the inserted - // image fade out. If you don't specify a value for Fade out, the image will - // disappear abruptly at the end of the inserted image duration. - FadeOut *int64 `locationName:"fadeOut" type:"integer"` - - // Specify the Height (Height) of the inserted image. Use a value that is less - // than or equal to the video resolution height. Leave this setting blank to - // use the native height of the image. - Height *int64 `locationName:"height" type:"integer"` - - // Use Image location (imageInserterInput) to specify the Amazon S3 location - // of the image to be inserted into the output. Use a 32 bit BMP, PNG, or TGA - // file that fits inside the video frame. - ImageInserterInput *string `locationName:"imageInserterInput" type:"string"` - - // Use Left (ImageX) to set the distance, in pixels, between the inserted image - // and the left edge of the frame. Required for BMP, PNG and TGA input. - ImageX *int64 `locationName:"imageX" type:"integer"` - - // Use Top (ImageY) to set the distance, in pixels, between the inserted image - // and the top edge of the video frame. Required for BMP, PNG and TGA input. - ImageY *int64 `locationName:"imageY" type:"integer"` - - // Use Layer (Layer) to specify how overlapping inserted images appear. Images - // with higher values of layer appear on top of images with lower values of - // layer. - Layer *int64 `locationName:"layer" type:"integer"` - - // Use Opacity (Opacity) to specify how much of the underlying video shows through - // the inserted image. 0 is transparent and 100 is fully opaque. Default is - // 50. - Opacity *int64 `locationName:"opacity" type:"integer"` - - // Use Start time (StartTime) to specify the video timecode when the image is - // inserted in the output. This must be in timecode format (HH:MM:SS:FF) - StartTime *string `locationName:"startTime" type:"string"` - - // Specify the Width (Width) of the inserted image. Use a value that is less - // than or equal to the video resolution width. Leave this setting blank to - // use the native width of the image. - Width *int64 `locationName:"width" type:"integer"` -} - -// String returns the string representation -func (s InsertableImage) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InsertableImage) GoString() string { - return s.String() -} - -// SetDuration sets the Duration field's value. -func (s *InsertableImage) SetDuration(v int64) *InsertableImage { - s.Duration = &v - return s -} - -// SetFadeIn sets the FadeIn field's value. -func (s *InsertableImage) SetFadeIn(v int64) *InsertableImage { - s.FadeIn = &v - return s -} - -// SetFadeOut sets the FadeOut field's value. -func (s *InsertableImage) SetFadeOut(v int64) *InsertableImage { - s.FadeOut = &v - return s -} - -// SetHeight sets the Height field's value. -func (s *InsertableImage) SetHeight(v int64) *InsertableImage { - s.Height = &v - return s -} - -// SetImageInserterInput sets the ImageInserterInput field's value. -func (s *InsertableImage) SetImageInserterInput(v string) *InsertableImage { - s.ImageInserterInput = &v - return s -} - -// SetImageX sets the ImageX field's value. -func (s *InsertableImage) SetImageX(v int64) *InsertableImage { - s.ImageX = &v - return s -} - -// SetImageY sets the ImageY field's value. -func (s *InsertableImage) SetImageY(v int64) *InsertableImage { - s.ImageY = &v - return s -} - -// SetLayer sets the Layer field's value. -func (s *InsertableImage) SetLayer(v int64) *InsertableImage { - s.Layer = &v - return s -} - -// SetOpacity sets the Opacity field's value. -func (s *InsertableImage) SetOpacity(v int64) *InsertableImage { - s.Opacity = &v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *InsertableImage) SetStartTime(v string) *InsertableImage { - s.StartTime = &v - return s -} - -// SetWidth sets the Width field's value. -func (s *InsertableImage) SetWidth(v int64) *InsertableImage { - s.Width = &v - return s -} - -// Each job converts an input file into an output file or files. For more information, -// see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Job -type Job struct { - _ struct{} `type:"structure"` - - // An identifier for this resource that is unique within all of AWS. - Arn *string `locationName:"arn" type:"string"` - - // The time, in Unix epoch format in seconds, when the job got created. - CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` - - // Error code for the job - ErrorCode *int64 `locationName:"errorCode" type:"integer"` - - // Error message of Job - ErrorMessage *string `locationName:"errorMessage" type:"string"` - - // A portion of the job's ARN, unique within your AWS Elemental MediaConvert - // resources - Id *string `locationName:"id" type:"string"` - - // The job template that the job is created from, if it is created from a job - // template. - JobTemplate *string `locationName:"jobTemplate" type:"string"` - - // List of output group details - OutputGroupDetails []*OutputGroupDetail `locationName:"outputGroupDetails" type:"list"` - - // Optional. When you create a job, you can specify a queue to send it to. If - // you don't specify, the job will go to the default queue. For more about queues, - // see the User Guide topic at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html - Queue *string `locationName:"queue" type:"string"` - - // The IAM role you use for creating this job. For details about permissions, - // see the User Guide topic at the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html - Role *string `locationName:"role" type:"string"` - - // JobSettings contains all the transcode settings for a job. - Settings *JobSettings `locationName:"settings" type:"structure"` - - // A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR. - Status *string `locationName:"status" type:"string" enum:"JobStatus"` - - // Information about when jobs are submitted, started, and finished is specified - // in Unix epoch format in seconds. - Timing *Timing `locationName:"timing" type:"structure"` - - // User-defined metadata that you want to associate with an MediaConvert job. - // You specify metadata in key/value pairs. - UserMetadata map[string]*string `locationName:"userMetadata" type:"map"` -} - -// String returns the string representation -func (s Job) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Job) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *Job) SetArn(v string) *Job { - s.Arn = &v - return s -} - -// SetCreatedAt sets the CreatedAt field's value. -func (s *Job) SetCreatedAt(v time.Time) *Job { - s.CreatedAt = &v - return s -} - -// SetErrorCode sets the ErrorCode field's value. -func (s *Job) SetErrorCode(v int64) *Job { - s.ErrorCode = &v - return s -} - -// SetErrorMessage sets the ErrorMessage field's value. -func (s *Job) SetErrorMessage(v string) *Job { - s.ErrorMessage = &v - return s -} - -// SetId sets the Id field's value. -func (s *Job) SetId(v string) *Job { - s.Id = &v - return s -} - -// SetJobTemplate sets the JobTemplate field's value. -func (s *Job) SetJobTemplate(v string) *Job { - s.JobTemplate = &v - return s -} - -// SetOutputGroupDetails sets the OutputGroupDetails field's value. -func (s *Job) SetOutputGroupDetails(v []*OutputGroupDetail) *Job { - s.OutputGroupDetails = v - return s -} - -// SetQueue sets the Queue field's value. -func (s *Job) SetQueue(v string) *Job { - s.Queue = &v - return s -} - -// SetRole sets the Role field's value. -func (s *Job) SetRole(v string) *Job { - s.Role = &v - return s -} - -// SetSettings sets the Settings field's value. -func (s *Job) SetSettings(v *JobSettings) *Job { - s.Settings = v - return s -} - -// SetStatus sets the Status field's value. -func (s *Job) SetStatus(v string) *Job { - s.Status = &v - return s -} - -// SetTiming sets the Timing field's value. -func (s *Job) SetTiming(v *Timing) *Job { - s.Timing = v - return s -} - -// SetUserMetadata sets the UserMetadata field's value. -func (s *Job) SetUserMetadata(v map[string]*string) *Job { - s.UserMetadata = v - return s -} - -// JobSettings contains all the transcode settings for a job. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/JobSettings -type JobSettings struct { - _ struct{} `type:"structure"` - - // When specified, this offset (in milliseconds) is added to the input Ad Avail - // PTS time. - AdAvailOffset *int64 `locationName:"adAvailOffset" type:"integer"` - - // Settings for ad avail blanking. Video can be blanked or overlaid with an - // image, and audio muted during SCTE-35 triggered ad avails. - AvailBlanking *AvailBlanking `locationName:"availBlanking" type:"structure"` - - // Use Inputs (inputs) to define source file used in the transcode job. There - // can be multiple inputs add in a job. These inputs will be concantenated together - // to create the output. - Inputs []*Input `locationName:"inputs" type:"list"` - - // Settings for Nielsen Configuration - NielsenConfiguration *NielsenConfiguration `locationName:"nielsenConfiguration" type:"structure"` - - // **!!**(OutputGroups) contains one group of settings for each set of outputs - // that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, - // Quicktime, MXF, and no container) are grouped in a single output group as - // well. Required in (OutputGroups) is a group of settings that apply to the - // whole group. This required object depends on the value you set for (Type) - // under (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are - // as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, - // HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, - // MsSmoothGroupSettings - OutputGroups []*OutputGroup `locationName:"outputGroups" type:"list"` - - // Contains settings used to acquire and adjust timecode information from inputs. - TimecodeConfig *TimecodeConfig `locationName:"timecodeConfig" type:"structure"` - - // Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags - // in your job. To include timed metadata, you must enable it here, enable it - // in each output container, and specify tags and timecodes in ID3 insertion - // (Id3Insertion) objects. - TimedMetadataInsertion *TimedMetadataInsertion `locationName:"timedMetadataInsertion" type:"structure"` -} - -// String returns the string representation -func (s JobSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s JobSettings) GoString() string { - return s.String() -} - -// SetAdAvailOffset sets the AdAvailOffset field's value. -func (s *JobSettings) SetAdAvailOffset(v int64) *JobSettings { - s.AdAvailOffset = &v - return s -} - -// SetAvailBlanking sets the AvailBlanking field's value. -func (s *JobSettings) SetAvailBlanking(v *AvailBlanking) *JobSettings { - s.AvailBlanking = v - return s -} - -// SetInputs sets the Inputs field's value. -func (s *JobSettings) SetInputs(v []*Input) *JobSettings { - s.Inputs = v - return s -} - -// SetNielsenConfiguration sets the NielsenConfiguration field's value. -func (s *JobSettings) SetNielsenConfiguration(v *NielsenConfiguration) *JobSettings { - s.NielsenConfiguration = v - return s -} - -// SetOutputGroups sets the OutputGroups field's value. -func (s *JobSettings) SetOutputGroups(v []*OutputGroup) *JobSettings { - s.OutputGroups = v - return s -} - -// SetTimecodeConfig sets the TimecodeConfig field's value. -func (s *JobSettings) SetTimecodeConfig(v *TimecodeConfig) *JobSettings { - s.TimecodeConfig = v - return s -} - -// SetTimedMetadataInsertion sets the TimedMetadataInsertion field's value. -func (s *JobSettings) SetTimedMetadataInsertion(v *TimedMetadataInsertion) *JobSettings { - s.TimedMetadataInsertion = v - return s -} - -// A job template is a pre-made set of encoding instructions that you can use -// to quickly create a job. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/JobTemplate -type JobTemplate struct { - _ struct{} `type:"structure"` - - // An identifier for this resource that is unique within all of AWS. - Arn *string `locationName:"arn" type:"string"` - - // An optional category you create to organize your job templates. - Category *string `locationName:"category" type:"string"` - - // The timestamp in epoch seconds for Job template creation. - CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` - - // An optional description you create for each job template. - Description *string `locationName:"description" type:"string"` - - // The timestamp in epoch seconds when the Job template was last updated. - LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unix"` - - // A name you create for each job template. Each name must be unique within - // your account. - Name *string `locationName:"name" type:"string"` - - // Optional. The queue that jobs created from this template are assigned to. - // If you don't specify this, jobs will go to the default queue. - Queue *string `locationName:"queue" type:"string"` - - // JobTemplateSettings contains all the transcode settings saved in the template - // that will be applied to jobs created from it. - Settings *JobTemplateSettings `locationName:"settings" type:"structure"` - - // A job template can be of two types: system or custom. System or built-in - // job templates can’t be modified or deleted by the user. - Type *string `locationName:"type" type:"string" enum:"Type"` -} - -// String returns the string representation -func (s JobTemplate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s JobTemplate) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *JobTemplate) SetArn(v string) *JobTemplate { - s.Arn = &v - return s -} - -// SetCategory sets the Category field's value. -func (s *JobTemplate) SetCategory(v string) *JobTemplate { - s.Category = &v - return s -} - -// SetCreatedAt sets the CreatedAt field's value. -func (s *JobTemplate) SetCreatedAt(v time.Time) *JobTemplate { - s.CreatedAt = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *JobTemplate) SetDescription(v string) *JobTemplate { - s.Description = &v - return s -} - -// SetLastUpdated sets the LastUpdated field's value. -func (s *JobTemplate) SetLastUpdated(v time.Time) *JobTemplate { - s.LastUpdated = &v - return s -} - -// SetName sets the Name field's value. -func (s *JobTemplate) SetName(v string) *JobTemplate { - s.Name = &v - return s -} - -// SetQueue sets the Queue field's value. -func (s *JobTemplate) SetQueue(v string) *JobTemplate { - s.Queue = &v - return s -} - -// SetSettings sets the Settings field's value. -func (s *JobTemplate) SetSettings(v *JobTemplateSettings) *JobTemplate { - s.Settings = v - return s -} - -// SetType sets the Type field's value. -func (s *JobTemplate) SetType(v string) *JobTemplate { - s.Type = &v - return s -} - -// JobTemplateSettings contains all the transcode settings saved in the template -// that will be applied to jobs created from it. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/JobTemplateSettings -type JobTemplateSettings struct { - _ struct{} `type:"structure"` - - // When specified, this offset (in milliseconds) is added to the input Ad Avail - // PTS time. - AdAvailOffset *int64 `locationName:"adAvailOffset" type:"integer"` - - // Settings for ad avail blanking. Video can be blanked or overlaid with an - // image, and audio muted during SCTE-35 triggered ad avails. - AvailBlanking *AvailBlanking `locationName:"availBlanking" type:"structure"` - - // Use Inputs (inputs) to define the source file used in the transcode job. - // There can only be one input in a job template. Using the API, you can include - // multiple inputs when referencing a job template. - Inputs []*InputTemplate `locationName:"inputs" type:"list"` - - // Settings for Nielsen Configuration - NielsenConfiguration *NielsenConfiguration `locationName:"nielsenConfiguration" type:"structure"` - - // **!!**(OutputGroups) contains one group of settings for each set of outputs - // that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, - // Quicktime, MXF, and no container) are grouped in a single output group as - // well. Required in (OutputGroups) is a group of settings that apply to the - // whole group. This required object depends on the value you set for (Type) - // under (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are - // as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, - // HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, - // MsSmoothGroupSettings - OutputGroups []*OutputGroup `locationName:"outputGroups" type:"list"` - - // Contains settings used to acquire and adjust timecode information from inputs. - TimecodeConfig *TimecodeConfig `locationName:"timecodeConfig" type:"structure"` - - // Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags - // in your job. To include timed metadata, you must enable it here, enable it - // in each output container, and specify tags and timecodes in ID3 insertion - // (Id3Insertion) objects. - TimedMetadataInsertion *TimedMetadataInsertion `locationName:"timedMetadataInsertion" type:"structure"` -} - -// String returns the string representation -func (s JobTemplateSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s JobTemplateSettings) GoString() string { - return s.String() -} - -// SetAdAvailOffset sets the AdAvailOffset field's value. -func (s *JobTemplateSettings) SetAdAvailOffset(v int64) *JobTemplateSettings { - s.AdAvailOffset = &v - return s -} - -// SetAvailBlanking sets the AvailBlanking field's value. -func (s *JobTemplateSettings) SetAvailBlanking(v *AvailBlanking) *JobTemplateSettings { - s.AvailBlanking = v - return s -} - -// SetInputs sets the Inputs field's value. -func (s *JobTemplateSettings) SetInputs(v []*InputTemplate) *JobTemplateSettings { - s.Inputs = v - return s -} - -// SetNielsenConfiguration sets the NielsenConfiguration field's value. -func (s *JobTemplateSettings) SetNielsenConfiguration(v *NielsenConfiguration) *JobTemplateSettings { - s.NielsenConfiguration = v - return s -} - -// SetOutputGroups sets the OutputGroups field's value. -func (s *JobTemplateSettings) SetOutputGroups(v []*OutputGroup) *JobTemplateSettings { - s.OutputGroups = v - return s -} - -// SetTimecodeConfig sets the TimecodeConfig field's value. -func (s *JobTemplateSettings) SetTimecodeConfig(v *TimecodeConfig) *JobTemplateSettings { - s.TimecodeConfig = v - return s -} - -// SetTimedMetadataInsertion sets the TimedMetadataInsertion field's value. -func (s *JobTemplateSettings) SetTimedMetadataInsertion(v *TimedMetadataInsertion) *JobTemplateSettings { - s.TimedMetadataInsertion = v - return s -} - -// You can send list job templates requests with an empty body. Optionally, -// you can filter the response by category by specifying it in your request -// body. You can also optionally specify the maximum number, up to twenty, of -// job templates to be returned. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobTemplatesRequest -type ListJobTemplatesInput struct { - _ struct{} `type:"structure"` - - // Optionally, specify a job template category to limit responses to only job - // templates from that category. - Category *string `location:"querystring" locationName:"category" type:"string"` - - // Optional. When you request a list of job templates, you can choose to list - // them alphabetically by NAME or chronologically by CREATION_DATE. If you don't - // specify, the service will list them by name. - ListBy *string `location:"querystring" locationName:"listBy" type:"string" enum:"JobTemplateListBy"` - - // Optional. Number of job templates, up to twenty, that will be returned at - // one time. - MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` - - // Use this string, provided with the response to a previous request, to request - // the next batch of job templates. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - - // When you request lists of resources, you can optionally specify whether they - // are sorted in ASCENDING or DESCENDING order. Default varies by resource. - Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"` -} - -// String returns the string representation -func (s ListJobTemplatesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListJobTemplatesInput) GoString() string { - return s.String() -} - -// SetCategory sets the Category field's value. -func (s *ListJobTemplatesInput) SetCategory(v string) *ListJobTemplatesInput { - s.Category = &v - return s -} - -// SetListBy sets the ListBy field's value. -func (s *ListJobTemplatesInput) SetListBy(v string) *ListJobTemplatesInput { - s.ListBy = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListJobTemplatesInput) SetMaxResults(v int64) *ListJobTemplatesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListJobTemplatesInput) SetNextToken(v string) *ListJobTemplatesInput { - s.NextToken = &v - return s -} - -// SetOrder sets the Order field's value. -func (s *ListJobTemplatesInput) SetOrder(v string) *ListJobTemplatesInput { - s.Order = &v - return s -} - -// Successful list job templates requests return a JSON array of job templates. -// If you do not specify how they are ordered, you will receive them in alphabetical -// order by name. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobTemplatesResponse -type ListJobTemplatesOutput struct { - _ struct{} `type:"structure"` - - // List of Job templates. - JobTemplates []*JobTemplate `locationName:"jobTemplates" type:"list"` - - // Use this string to request the next batch of job templates. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListJobTemplatesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListJobTemplatesOutput) GoString() string { - return s.String() -} - -// SetJobTemplates sets the JobTemplates field's value. -func (s *ListJobTemplatesOutput) SetJobTemplates(v []*JobTemplate) *ListJobTemplatesOutput { - s.JobTemplates = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListJobTemplatesOutput) SetNextToken(v string) *ListJobTemplatesOutput { - s.NextToken = &v - return s -} - -// You can send list jobs requests with an empty body. Optionally, you can filter -// the response by queue and/or job status by specifying them in your request -// body. You can also optionally specify the maximum number, up to twenty, of -// jobs to be returned. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobsRequest -type ListJobsInput struct { - _ struct{} `type:"structure"` - - // Optional. Number of jobs, up to twenty, that will be returned at one time. - MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` - - // Use this string, provided with the response to a previous request, to request - // the next batch of jobs. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - - // When you request lists of resources, you can optionally specify whether they - // are sorted in ASCENDING or DESCENDING order. Default varies by resource. - Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"` - - // Provide a queue name to get back only jobs from that queue. - Queue *string `location:"querystring" locationName:"queue" type:"string"` - - // A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR. - Status *string `location:"querystring" locationName:"status" type:"string" enum:"JobStatus"` -} - -// String returns the string representation -func (s ListJobsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListJobsInput) GoString() string { - return s.String() -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListJobsInput) SetMaxResults(v int64) *ListJobsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListJobsInput) SetNextToken(v string) *ListJobsInput { - s.NextToken = &v - return s -} - -// SetOrder sets the Order field's value. -func (s *ListJobsInput) SetOrder(v string) *ListJobsInput { - s.Order = &v - return s -} - -// SetQueue sets the Queue field's value. -func (s *ListJobsInput) SetQueue(v string) *ListJobsInput { - s.Queue = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *ListJobsInput) SetStatus(v string) *ListJobsInput { - s.Status = &v - return s -} - -// Successful list jobs requests return a JSON array of jobs. If you do not -// specify how they are ordered, you will receive the most recently created -// first. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobsResponse -type ListJobsOutput struct { - _ struct{} `type:"structure"` - - // List of jobs - Jobs []*Job `locationName:"jobs" type:"list"` - - // Use this string to request the next batch of jobs. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListJobsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListJobsOutput) GoString() string { - return s.String() -} - -// SetJobs sets the Jobs field's value. -func (s *ListJobsOutput) SetJobs(v []*Job) *ListJobsOutput { - s.Jobs = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput { - s.NextToken = &v - return s -} - -// You can send list presets requests with an empty body. Optionally, you can -// filter the response by category by specifying it in your request body. You -// can also optionally specify the maximum number, up to twenty, of queues to -// be returned. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListPresetsRequest -type ListPresetsInput struct { - _ struct{} `type:"structure"` - - // Optionally, specify a preset category to limit responses to only presets - // from that category. - Category *string `location:"querystring" locationName:"category" type:"string"` - - // Optional. When you request a list of presets, you can choose to list them - // alphabetically by NAME or chronologically by CREATION_DATE. If you don't - // specify, the service will list them by name. - ListBy *string `location:"querystring" locationName:"listBy" type:"string" enum:"PresetListBy"` - - // Optional. Number of presets, up to twenty, that will be returned at one time - MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` - - // Use this string, provided with the response to a previous request, to request - // the next batch of presets. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - - // When you request lists of resources, you can optionally specify whether they - // are sorted in ASCENDING or DESCENDING order. Default varies by resource. - Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"` -} - -// String returns the string representation -func (s ListPresetsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListPresetsInput) GoString() string { - return s.String() -} - -// SetCategory sets the Category field's value. -func (s *ListPresetsInput) SetCategory(v string) *ListPresetsInput { - s.Category = &v - return s -} - -// SetListBy sets the ListBy field's value. -func (s *ListPresetsInput) SetListBy(v string) *ListPresetsInput { - s.ListBy = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListPresetsInput) SetMaxResults(v int64) *ListPresetsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListPresetsInput) SetNextToken(v string) *ListPresetsInput { - s.NextToken = &v - return s -} - -// SetOrder sets the Order field's value. -func (s *ListPresetsInput) SetOrder(v string) *ListPresetsInput { - s.Order = &v - return s -} - -// Successful list presets requests return a JSON array of presets. If you do -// not specify how they are ordered, you will receive them alphabetically by -// name. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListPresetsResponse -type ListPresetsOutput struct { - _ struct{} `type:"structure"` - - // Use this string to request the next batch of presets. - NextToken *string `locationName:"nextToken" type:"string"` - - // List of presets - Presets []*Preset `locationName:"presets" type:"list"` -} - -// String returns the string representation -func (s ListPresetsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListPresetsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListPresetsOutput) SetNextToken(v string) *ListPresetsOutput { - s.NextToken = &v - return s -} - -// SetPresets sets the Presets field's value. -func (s *ListPresetsOutput) SetPresets(v []*Preset) *ListPresetsOutput { - s.Presets = v - return s -} - -// You can send list queues requests with an empty body. You can optionally -// specify the maximum number, up to twenty, of queues to be returned. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListQueuesRequest -type ListQueuesInput struct { - _ struct{} `type:"structure"` - - // Optional. When you request a list of queues, you can choose to list them - // alphabetically by NAME or chronologically by CREATION_DATE. If you don't - // specify, the service will list them by creation date. - ListBy *string `location:"querystring" locationName:"listBy" type:"string" enum:"QueueListBy"` - - // Optional. Number of queues, up to twenty, that will be returned at one time. - MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` - - // Use this string, provided with the response to a previous request, to request - // the next batch of queues. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - - // When you request lists of resources, you can optionally specify whether they - // are sorted in ASCENDING or DESCENDING order. Default varies by resource. - Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"` -} - -// String returns the string representation -func (s ListQueuesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListQueuesInput) GoString() string { - return s.String() -} - -// SetListBy sets the ListBy field's value. -func (s *ListQueuesInput) SetListBy(v string) *ListQueuesInput { - s.ListBy = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListQueuesInput) SetMaxResults(v int64) *ListQueuesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListQueuesInput) SetNextToken(v string) *ListQueuesInput { - s.NextToken = &v - return s -} - -// SetOrder sets the Order field's value. -func (s *ListQueuesInput) SetOrder(v string) *ListQueuesInput { - s.Order = &v - return s -} - -// Successful list queues return a JSON array of queues. If you do not specify -// how they are ordered, you will receive them alphabetically by name. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListQueuesResponse -type ListQueuesOutput struct { - _ struct{} `type:"structure"` - - // Use this string to request the next batch of queues. - NextToken *string `locationName:"nextToken" type:"string"` - - // List of queues - Queues []*Queue `locationName:"queues" type:"list"` -} - -// String returns the string representation -func (s ListQueuesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListQueuesOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListQueuesOutput) SetNextToken(v string) *ListQueuesOutput { - s.NextToken = &v - return s -} - -// SetQueues sets the Queues field's value. -func (s *ListQueuesOutput) SetQueues(v []*Queue) *ListQueuesOutput { - s.Queues = v - return s -} - -// Settings for M2TS Container. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/M2tsSettings -type M2tsSettings struct { - _ struct{} `type:"structure"` - - // Selects between the DVB and ATSC buffer models for Dolby Digital audio. - AudioBufferModel *string `locationName:"audioBufferModel" type:"string" enum:"M2tsAudioBufferModel"` - - // The number of audio frames to insert for each PES packet. - AudioFramesPerPes *int64 `locationName:"audioFramesPerPes" type:"integer"` - - // Packet Identifier (PID) of the elementary audio stream(s) in the transport - // stream. Multiple values are accepted, and can be entered in ranges and/or - // by comma separation. Can be entered as decimal or hexadecimal values. - AudioPids []*int64 `locationName:"audioPids" type:"list"` - - // The output bitrate of the transport stream in bits per second. Setting to - // 0 lets the muxer automatically determine the appropriate bitrate. Other common - // values are 3750000, 7500000, and 15000000. - Bitrate *int64 `locationName:"bitrate" type:"integer"` - - // Controls what buffer model to use for accurate interleaving. If set to MULTIPLEX, - // use multiplex buffer model. If set to NONE, this can lead to lower latency, - // but low-memory devices may not be able to play back the stream without interruptions. - BufferModel *string `locationName:"bufferModel" type:"string" enum:"M2tsBufferModel"` - - // Inserts DVB Network Information Table (NIT) at the specified table repetition - // interval. - DvbNitSettings *DvbNitSettings `locationName:"dvbNitSettings" type:"structure"` - - // Inserts DVB Service Description Table (NIT) at the specified table repetition - // interval. - DvbSdtSettings *DvbSdtSettings `locationName:"dvbSdtSettings" type:"structure"` - - // Packet Identifier (PID) for input source DVB Subtitle data to this output. - // Multiple values are accepted, and can be entered in ranges and/or by comma - // separation. Can be entered as decimal or hexadecimal values. - DvbSubPids []*int64 `locationName:"dvbSubPids" type:"list"` - - // Inserts DVB Time and Date Table (TDT) at the specified table repetition interval. - DvbTdtSettings *DvbTdtSettings `locationName:"dvbTdtSettings" type:"structure"` - - // Packet Identifier (PID) for input source DVB Teletext data to this output. - // Can be entered as a decimal or hexadecimal value. - DvbTeletextPid *int64 `locationName:"dvbTeletextPid" type:"integer"` - - // When set to VIDEO_AND_FIXED_INTERVALS, audio EBP markers will be added to - // partitions 3 and 4. The interval between these additional markers will be - // fixed, and will be slightly shorter than the video EBP marker interval. When - // set to VIDEO_INTERVAL, these additional markers will not be inserted. Only - // applicable when EBP segmentation markers are is selected (segmentationMarkers - // is EBP or EBP_LEGACY). - EbpAudioInterval *string `locationName:"ebpAudioInterval" type:"string" enum:"M2tsEbpAudioInterval"` - - // Selects which PIDs to place EBP markers on. They can either be placed only - // on the video PID, or on both the video PID and all audio PIDs. Only applicable - // when EBP segmentation markers are is selected (segmentationMarkers is EBP - // or EBP_LEGACY). - EbpPlacement *string `locationName:"ebpPlacement" type:"string" enum:"M2tsEbpPlacement"` - - // Controls whether to include the ES Rate field in the PES header. - EsRateInPes *string `locationName:"esRateInPes" type:"string" enum:"M2tsEsRateInPes"` - - // The length in seconds of each fragment. Only used with EBP markers. - FragmentTime *float64 `locationName:"fragmentTime" type:"double"` - - // Maximum time in milliseconds between Program Clock References (PCRs) inserted - // into the transport stream. - MaxPcrInterval *int64 `locationName:"maxPcrInterval" type:"integer"` - - // When set, enforces that Encoder Boundary Points do not come within the specified - // time interval of each other by looking ahead at input video. If another EBP - // is going to come in within the specified time interval, the current EBP is - // not emitted, and the segment is "stretched" to the next marker. The lookahead - // value does not add latency to the system. The Live Event must be configured - // elsewhere to create sufficient latency to make the lookahead accurate. - MinEbpInterval *int64 `locationName:"minEbpInterval" type:"integer"` - - // Value in bits per second of extra null packets to insert into the transport - // stream. This can be used if a downstream encryption system requires periodic - // null packets. - NullPacketBitrate *float64 `locationName:"nullPacketBitrate" type:"double"` - - // The number of milliseconds between instances of this table in the output - // transport stream. - PatInterval *int64 `locationName:"patInterval" type:"integer"` - - // When set to PCR_EVERY_PES_PACKET, a Program Clock Reference value is inserted - // for every Packetized Elementary Stream (PES) header. This is effective only - // when the PCR PID is the same as the video or audio elementary stream. - PcrControl *string `locationName:"pcrControl" type:"string" enum:"M2tsPcrControl"` - - // Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport - // stream. When no value is given, the encoder will assign the same value as - // the Video PID. Can be entered as a decimal or hexadecimal value. - PcrPid *int64 `locationName:"pcrPid" type:"integer"` - - // The number of milliseconds between instances of this table in the output - // transport stream. - PmtInterval *int64 `locationName:"pmtInterval" type:"integer"` - - // Packet Identifier (PID) for the Program Map Table (PMT) in the transport - // stream. Can be entered as a decimal or hexadecimal value. - PmtPid *int64 `locationName:"pmtPid" type:"integer"` - - // Packet Identifier (PID) of the private metadata stream in the transport stream. - // Can be entered as a decimal or hexadecimal value. - PrivateMetadataPid *int64 `locationName:"privateMetadataPid" type:"integer"` - - // The value of the program number field in the Program Map Table. - ProgramNumber *int64 `locationName:"programNumber" type:"integer"` - - // When set to CBR, inserts null packets into transport stream to fill specified - // bitrate. When set to VBR, the bitrate setting acts as the maximum bitrate, - // but the output will not be padded up to that bitrate. - RateMode *string `locationName:"rateMode" type:"string" enum:"M2tsRateMode"` - - // Packet Identifier (PID) of the SCTE-35 stream in the transport stream. Can - // be entered as a decimal or hexadecimal value. - Scte35Pid *int64 `locationName:"scte35Pid" type:"integer"` - - // Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from - // input to output. This is only available for certain containers. - Scte35Source *string `locationName:"scte35Source" type:"string" enum:"M2tsScte35Source"` - - // Inserts segmentation markers at each segmentation_time period. rai_segstart - // sets the Random Access Indicator bit in the adaptation field. rai_adapt sets - // the RAI bit and adds the current timecode in the private data bytes. psi_segstart - // inserts PAT and PMT tables at the start of segments. ebp adds Encoder Boundary - // Point information to the adaptation field as per OpenCable specification - // OC-SP-EBP-I01-130118. ebp_legacy adds Encoder Boundary Point information - // to the adaptation field using a legacy proprietary format. - SegmentationMarkers *string `locationName:"segmentationMarkers" type:"string" enum:"M2tsSegmentationMarkers"` - - // The segmentation style parameter controls how segmentation markers are inserted - // into the transport stream. With avails, it is possible that segments may - // be truncated, which can influence where future segmentation markers are inserted. - // When a segmentation style of "reset_cadence" is selected and a segment is - // truncated due to an avail, we will reset the segmentation cadence. This means - // the subsequent segment will have a duration of of $segmentation_time seconds. - // When a segmentation style of "maintain_cadence" is selected and a segment - // is truncated due to an avail, we will not reset the segmentation cadence. - // This means the subsequent segment will likely be truncated as well. However, - // all segments after that will have a duration of $segmentation_time seconds. - // Note that EBP lookahead is a slight exception to this rule. - SegmentationStyle *string `locationName:"segmentationStyle" type:"string" enum:"M2tsSegmentationStyle"` - - // The length in seconds of each segment. Required unless markers is set to - // _none_. - SegmentationTime *float64 `locationName:"segmentationTime" type:"double"` - - // The value of the transport stream ID field in the Program Map Table. - TransportStreamId *int64 `locationName:"transportStreamId" type:"integer"` - - // Packet Identifier (PID) of the elementary video stream in the transport stream. - // Can be entered as a decimal or hexadecimal value. - VideoPid *int64 `locationName:"videoPid" type:"integer"` -} - -// String returns the string representation -func (s M2tsSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s M2tsSettings) GoString() string { - return s.String() -} - -// SetAudioBufferModel sets the AudioBufferModel field's value. -func (s *M2tsSettings) SetAudioBufferModel(v string) *M2tsSettings { - s.AudioBufferModel = &v - return s -} - -// SetAudioFramesPerPes sets the AudioFramesPerPes field's value. -func (s *M2tsSettings) SetAudioFramesPerPes(v int64) *M2tsSettings { - s.AudioFramesPerPes = &v - return s -} - -// SetAudioPids sets the AudioPids field's value. -func (s *M2tsSettings) SetAudioPids(v []*int64) *M2tsSettings { - s.AudioPids = v - return s -} - -// SetBitrate sets the Bitrate field's value. -func (s *M2tsSettings) SetBitrate(v int64) *M2tsSettings { - s.Bitrate = &v - return s -} - -// SetBufferModel sets the BufferModel field's value. -func (s *M2tsSettings) SetBufferModel(v string) *M2tsSettings { - s.BufferModel = &v - return s -} - -// SetDvbNitSettings sets the DvbNitSettings field's value. -func (s *M2tsSettings) SetDvbNitSettings(v *DvbNitSettings) *M2tsSettings { - s.DvbNitSettings = v - return s -} - -// SetDvbSdtSettings sets the DvbSdtSettings field's value. -func (s *M2tsSettings) SetDvbSdtSettings(v *DvbSdtSettings) *M2tsSettings { - s.DvbSdtSettings = v - return s -} - -// SetDvbSubPids sets the DvbSubPids field's value. -func (s *M2tsSettings) SetDvbSubPids(v []*int64) *M2tsSettings { - s.DvbSubPids = v - return s -} - -// SetDvbTdtSettings sets the DvbTdtSettings field's value. -func (s *M2tsSettings) SetDvbTdtSettings(v *DvbTdtSettings) *M2tsSettings { - s.DvbTdtSettings = v - return s -} - -// SetDvbTeletextPid sets the DvbTeletextPid field's value. -func (s *M2tsSettings) SetDvbTeletextPid(v int64) *M2tsSettings { - s.DvbTeletextPid = &v - return s -} - -// SetEbpAudioInterval sets the EbpAudioInterval field's value. -func (s *M2tsSettings) SetEbpAudioInterval(v string) *M2tsSettings { - s.EbpAudioInterval = &v - return s -} - -// SetEbpPlacement sets the EbpPlacement field's value. -func (s *M2tsSettings) SetEbpPlacement(v string) *M2tsSettings { - s.EbpPlacement = &v - return s -} - -// SetEsRateInPes sets the EsRateInPes field's value. -func (s *M2tsSettings) SetEsRateInPes(v string) *M2tsSettings { - s.EsRateInPes = &v - return s -} - -// SetFragmentTime sets the FragmentTime field's value. -func (s *M2tsSettings) SetFragmentTime(v float64) *M2tsSettings { - s.FragmentTime = &v - return s -} - -// SetMaxPcrInterval sets the MaxPcrInterval field's value. -func (s *M2tsSettings) SetMaxPcrInterval(v int64) *M2tsSettings { - s.MaxPcrInterval = &v - return s -} - -// SetMinEbpInterval sets the MinEbpInterval field's value. -func (s *M2tsSettings) SetMinEbpInterval(v int64) *M2tsSettings { - s.MinEbpInterval = &v - return s -} - -// SetNullPacketBitrate sets the NullPacketBitrate field's value. -func (s *M2tsSettings) SetNullPacketBitrate(v float64) *M2tsSettings { - s.NullPacketBitrate = &v - return s -} - -// SetPatInterval sets the PatInterval field's value. -func (s *M2tsSettings) SetPatInterval(v int64) *M2tsSettings { - s.PatInterval = &v - return s -} - -// SetPcrControl sets the PcrControl field's value. -func (s *M2tsSettings) SetPcrControl(v string) *M2tsSettings { - s.PcrControl = &v - return s -} - -// SetPcrPid sets the PcrPid field's value. -func (s *M2tsSettings) SetPcrPid(v int64) *M2tsSettings { - s.PcrPid = &v - return s -} - -// SetPmtInterval sets the PmtInterval field's value. -func (s *M2tsSettings) SetPmtInterval(v int64) *M2tsSettings { - s.PmtInterval = &v - return s -} - -// SetPmtPid sets the PmtPid field's value. -func (s *M2tsSettings) SetPmtPid(v int64) *M2tsSettings { - s.PmtPid = &v - return s -} - -// SetPrivateMetadataPid sets the PrivateMetadataPid field's value. -func (s *M2tsSettings) SetPrivateMetadataPid(v int64) *M2tsSettings { - s.PrivateMetadataPid = &v - return s -} - -// SetProgramNumber sets the ProgramNumber field's value. -func (s *M2tsSettings) SetProgramNumber(v int64) *M2tsSettings { - s.ProgramNumber = &v - return s -} - -// SetRateMode sets the RateMode field's value. -func (s *M2tsSettings) SetRateMode(v string) *M2tsSettings { - s.RateMode = &v - return s -} - -// SetScte35Pid sets the Scte35Pid field's value. -func (s *M2tsSettings) SetScte35Pid(v int64) *M2tsSettings { - s.Scte35Pid = &v - return s -} - -// SetScte35Source sets the Scte35Source field's value. -func (s *M2tsSettings) SetScte35Source(v string) *M2tsSettings { - s.Scte35Source = &v - return s -} - -// SetSegmentationMarkers sets the SegmentationMarkers field's value. -func (s *M2tsSettings) SetSegmentationMarkers(v string) *M2tsSettings { - s.SegmentationMarkers = &v - return s -} - -// SetSegmentationStyle sets the SegmentationStyle field's value. -func (s *M2tsSettings) SetSegmentationStyle(v string) *M2tsSettings { - s.SegmentationStyle = &v - return s -} - -// SetSegmentationTime sets the SegmentationTime field's value. -func (s *M2tsSettings) SetSegmentationTime(v float64) *M2tsSettings { - s.SegmentationTime = &v - return s -} - -// SetTransportStreamId sets the TransportStreamId field's value. -func (s *M2tsSettings) SetTransportStreamId(v int64) *M2tsSettings { - s.TransportStreamId = &v - return s -} - -// SetVideoPid sets the VideoPid field's value. -func (s *M2tsSettings) SetVideoPid(v int64) *M2tsSettings { - s.VideoPid = &v - return s -} - -// Settings for TS segments in HLS -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/M3u8Settings -type M3u8Settings struct { - _ struct{} `type:"structure"` - - // The number of audio frames to insert for each PES packet. - AudioFramesPerPes *int64 `locationName:"audioFramesPerPes" type:"integer"` - - // Packet Identifier (PID) of the elementary audio stream(s) in the transport - // stream. Multiple values are accepted, and can be entered in ranges and/or - // by comma separation. Can be entered as decimal or hexadecimal values. - AudioPids []*int64 `locationName:"audioPids" type:"list"` - - // The number of milliseconds between instances of this table in the output - // transport stream. - PatInterval *int64 `locationName:"patInterval" type:"integer"` - - // When set to PCR_EVERY_PES_PACKET a Program Clock Reference value is inserted - // for every Packetized Elementary Stream (PES) header. This parameter is effective - // only when the PCR PID is the same as the video or audio elementary stream. - PcrControl *string `locationName:"pcrControl" type:"string" enum:"M3u8PcrControl"` - - // Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport - // stream. When no value is given, the encoder will assign the same value as - // the Video PID. Can be entered as a decimal or hexadecimal value. - PcrPid *int64 `locationName:"pcrPid" type:"integer"` - - // The number of milliseconds between instances of this table in the output - // transport stream. - PmtInterval *int64 `locationName:"pmtInterval" type:"integer"` - - // Packet Identifier (PID) for the Program Map Table (PMT) in the transport - // stream. Can be entered as a decimal or hexadecimal value. - PmtPid *int64 `locationName:"pmtPid" type:"integer"` - - // Packet Identifier (PID) of the private metadata stream in the transport stream. - // Can be entered as a decimal or hexadecimal value. - PrivateMetadataPid *int64 `locationName:"privateMetadataPid" type:"integer"` - - // The value of the program number field in the Program Map Table. - ProgramNumber *int64 `locationName:"programNumber" type:"integer"` - - // Packet Identifier (PID) of the SCTE-35 stream in the transport stream. Can - // be entered as a decimal or hexadecimal value. - Scte35Pid *int64 `locationName:"scte35Pid" type:"integer"` - - // Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from - // input to output. This is only available for certain containers. - Scte35Source *string `locationName:"scte35Source" type:"string" enum:"M3u8Scte35Source"` - - // If PASSTHROUGH, inserts ID3 timed metadata from the timed_metadata REST command - // into this output. Only available for certain containers. - TimedMetadata *string `locationName:"timedMetadata" type:"string" enum:"TimedMetadata"` - - // Packet Identifier (PID) of the timed metadata stream in the transport stream. - // Can be entered as a decimal or hexadecimal value. - TimedMetadataPid *int64 `locationName:"timedMetadataPid" type:"integer"` - - // The value of the transport stream ID field in the Program Map Table. - TransportStreamId *int64 `locationName:"transportStreamId" type:"integer"` - - // Packet Identifier (PID) of the elementary video stream in the transport stream. - // Can be entered as a decimal or hexadecimal value. - VideoPid *int64 `locationName:"videoPid" type:"integer"` -} - -// String returns the string representation -func (s M3u8Settings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s M3u8Settings) GoString() string { - return s.String() -} - -// SetAudioFramesPerPes sets the AudioFramesPerPes field's value. -func (s *M3u8Settings) SetAudioFramesPerPes(v int64) *M3u8Settings { - s.AudioFramesPerPes = &v - return s -} - -// SetAudioPids sets the AudioPids field's value. -func (s *M3u8Settings) SetAudioPids(v []*int64) *M3u8Settings { - s.AudioPids = v - return s -} - -// SetPatInterval sets the PatInterval field's value. -func (s *M3u8Settings) SetPatInterval(v int64) *M3u8Settings { - s.PatInterval = &v - return s -} - -// SetPcrControl sets the PcrControl field's value. -func (s *M3u8Settings) SetPcrControl(v string) *M3u8Settings { - s.PcrControl = &v - return s -} - -// SetPcrPid sets the PcrPid field's value. -func (s *M3u8Settings) SetPcrPid(v int64) *M3u8Settings { - s.PcrPid = &v - return s -} - -// SetPmtInterval sets the PmtInterval field's value. -func (s *M3u8Settings) SetPmtInterval(v int64) *M3u8Settings { - s.PmtInterval = &v - return s -} - -// SetPmtPid sets the PmtPid field's value. -func (s *M3u8Settings) SetPmtPid(v int64) *M3u8Settings { - s.PmtPid = &v - return s -} - -// SetPrivateMetadataPid sets the PrivateMetadataPid field's value. -func (s *M3u8Settings) SetPrivateMetadataPid(v int64) *M3u8Settings { - s.PrivateMetadataPid = &v - return s -} - -// SetProgramNumber sets the ProgramNumber field's value. -func (s *M3u8Settings) SetProgramNumber(v int64) *M3u8Settings { - s.ProgramNumber = &v - return s -} - -// SetScte35Pid sets the Scte35Pid field's value. -func (s *M3u8Settings) SetScte35Pid(v int64) *M3u8Settings { - s.Scte35Pid = &v - return s -} - -// SetScte35Source sets the Scte35Source field's value. -func (s *M3u8Settings) SetScte35Source(v string) *M3u8Settings { - s.Scte35Source = &v - return s -} - -// SetTimedMetadata sets the TimedMetadata field's value. -func (s *M3u8Settings) SetTimedMetadata(v string) *M3u8Settings { - s.TimedMetadata = &v - return s -} - -// SetTimedMetadataPid sets the TimedMetadataPid field's value. -func (s *M3u8Settings) SetTimedMetadataPid(v int64) *M3u8Settings { - s.TimedMetadataPid = &v - return s -} - -// SetTransportStreamId sets the TransportStreamId field's value. -func (s *M3u8Settings) SetTransportStreamId(v int64) *M3u8Settings { - s.TransportStreamId = &v - return s -} - -// SetVideoPid sets the VideoPid field's value. -func (s *M3u8Settings) SetVideoPid(v int64) *M3u8Settings { - s.VideoPid = &v - return s -} - -// Settings for MOV Container. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/MovSettings -type MovSettings struct { - _ struct{} `type:"structure"` - - // When enabled, include 'clap' atom if appropriate for the video output settings. - ClapAtom *string `locationName:"clapAtom" type:"string" enum:"MovClapAtom"` - - // When enabled, file composition times will start at zero, composition times - // in the 'ctts' (composition time to sample) box for B-frames will be negative, - // and a 'cslg' (composition shift least greatest) box will be included per - // 14496-1 amendment 1. This improves compatibility with Apple players and tools. - CslgAtom *string `locationName:"cslgAtom" type:"string" enum:"MovCslgAtom"` - - // When set to XDCAM, writes MPEG2 video streams into the QuickTime file using - // XDCAM fourcc codes. This increases compatibility with Apple editors and players, - // but may decrease compatibility with other players. Only applicable when the - // video codec is MPEG2. - Mpeg2FourCCControl *string `locationName:"mpeg2FourCCControl" type:"string" enum:"MovMpeg2FourCCControl"` - - // If set to OMNEON, inserts Omneon-compatible padding - PaddingControl *string `locationName:"paddingControl" type:"string" enum:"MovPaddingControl"` - - // A value of 'external' creates separate media files and the wrapper file (.mov) - // contains references to these media files. A value of 'self_contained' creates - // only a wrapper (.mov) file and this file contains all of the media. - Reference *string `locationName:"reference" type:"string" enum:"MovReference"` -} - -// String returns the string representation -func (s MovSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MovSettings) GoString() string { - return s.String() -} - -// SetClapAtom sets the ClapAtom field's value. -func (s *MovSettings) SetClapAtom(v string) *MovSettings { - s.ClapAtom = &v - return s -} - -// SetCslgAtom sets the CslgAtom field's value. -func (s *MovSettings) SetCslgAtom(v string) *MovSettings { - s.CslgAtom = &v - return s -} - -// SetMpeg2FourCCControl sets the Mpeg2FourCCControl field's value. -func (s *MovSettings) SetMpeg2FourCCControl(v string) *MovSettings { - s.Mpeg2FourCCControl = &v - return s -} - -// SetPaddingControl sets the PaddingControl field's value. -func (s *MovSettings) SetPaddingControl(v string) *MovSettings { - s.PaddingControl = &v - return s -} - -// SetReference sets the Reference field's value. -func (s *MovSettings) SetReference(v string) *MovSettings { - s.Reference = &v - return s -} - -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to -// the value MP2. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Mp2Settings -type Mp2Settings struct { - _ struct{} `type:"structure"` - - // Average bitrate in bits/second. - Bitrate *int64 `locationName:"bitrate" type:"integer"` - - // Set Channels to specify the number of channels in this output audio track. - // Choosing Mono in the console will give you 1 output channel; choosing Stereo - // will give you 2. In the API, valid values are 1 and 2. - Channels *int64 `locationName:"channels" type:"integer"` - - // Sample rate in hz. - SampleRate *int64 `locationName:"sampleRate" type:"integer"` -} - -// String returns the string representation -func (s Mp2Settings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Mp2Settings) GoString() string { - return s.String() -} - -// SetBitrate sets the Bitrate field's value. -func (s *Mp2Settings) SetBitrate(v int64) *Mp2Settings { - s.Bitrate = &v - return s -} - -// SetChannels sets the Channels field's value. -func (s *Mp2Settings) SetChannels(v int64) *Mp2Settings { - s.Channels = &v - return s -} - -// SetSampleRate sets the SampleRate field's value. -func (s *Mp2Settings) SetSampleRate(v int64) *Mp2Settings { - s.SampleRate = &v - return s -} - -// Settings for MP4 Container -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Mp4Settings -type Mp4Settings struct { - _ struct{} `type:"structure"` - - // When enabled, file composition times will start at zero, composition times - // in the 'ctts' (composition time to sample) box for B-frames will be negative, - // and a 'cslg' (composition shift least greatest) box will be included per - // 14496-1 amendment 1. This improves compatibility with Apple players and tools. - CslgAtom *string `locationName:"cslgAtom" type:"string" enum:"Mp4CslgAtom"` - - // Inserts a free-space box immediately after the moov box. - FreeSpaceBox *string `locationName:"freeSpaceBox" type:"string" enum:"Mp4FreeSpaceBox"` - - // If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning - // of the archive as required for progressive downloading. Otherwise it is placed - // normally at the end. - MoovPlacement *string `locationName:"moovPlacement" type:"string" enum:"Mp4MoovPlacement"` - - // Overrides the "Major Brand" field in the output file. Usually not necessary - // to specify. - Mp4MajorBrand *string `locationName:"mp4MajorBrand" type:"string"` -} - -// String returns the string representation -func (s Mp4Settings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Mp4Settings) GoString() string { - return s.String() -} - -// SetCslgAtom sets the CslgAtom field's value. -func (s *Mp4Settings) SetCslgAtom(v string) *Mp4Settings { - s.CslgAtom = &v - return s -} - -// SetFreeSpaceBox sets the FreeSpaceBox field's value. -func (s *Mp4Settings) SetFreeSpaceBox(v string) *Mp4Settings { - s.FreeSpaceBox = &v - return s -} - -// SetMoovPlacement sets the MoovPlacement field's value. -func (s *Mp4Settings) SetMoovPlacement(v string) *Mp4Settings { - s.MoovPlacement = &v - return s -} - -// SetMp4MajorBrand sets the Mp4MajorBrand field's value. -func (s *Mp4Settings) SetMp4MajorBrand(v string) *Mp4Settings { - s.Mp4MajorBrand = &v - return s -} - -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to -// the value MPEG2. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Mpeg2Settings -type Mpeg2Settings struct { - _ struct{} `type:"structure"` - - // Adaptive quantization. Allows intra-frame quantizers to vary to improve visual - // quality. - AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"Mpeg2AdaptiveQuantization"` - - // Average bitrate in bits/second. Required for VBR, CBR, and ABR. Five megabits - // can be entered as 5000000 or 5m. Five hundred kilobits can be entered as - // 500000 or 0.5m. For MS Smooth outputs, bitrates must be unique when rounded - // down to the nearest multiple of 1000. - Bitrate *int64 `locationName:"bitrate" type:"integer"` - - // Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output. - CodecLevel *string `locationName:"codecLevel" type:"string" enum:"Mpeg2CodecLevel"` - - // Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output. - CodecProfile *string `locationName:"codecProfile" type:"string" enum:"Mpeg2CodecProfile"` - - // Using the API, set FramerateControl to INITIALIZE_FROM_SOURCE if you want - // the service to use the framerate from the input. Using the console, do this - // by choosing INITIALIZE_FROM_SOURCE for Framerate. - FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Mpeg2FramerateControl"` - - // When set to INTERPOLATE, produces smoother motion during framerate conversion. - FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Mpeg2FramerateConversionAlgorithm"` - - // Framerate denominator. - FramerateDenominator *int64 `locationName:"framerateDenominator" type:"integer"` - - // Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 - // fps. - FramerateNumerator *int64 `locationName:"framerateNumerator" type:"integer"` - - // Frequency of closed GOPs. In streaming applications, it is recommended that - // this be set to 1 so a decoder joining mid-stream will receive an IDR frame - // as quickly as possible. Setting this value to 0 will break output segmenting. - GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` - - // GOP Length (keyframe interval) in frames or seconds. Must be greater than - // zero. - GopSize *float64 `locationName:"gopSize" type:"double"` - - // Indicates if the GOP Size in MPEG2 is specified in frames or seconds. If - // seconds the system will convert the GOP Size into a frame count at run time. - GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"Mpeg2GopSizeUnits"` - - // Percentage of the buffer that should initially be filled (HRD buffer model). - HrdBufferInitialFillPercentage *int64 `locationName:"hrdBufferInitialFillPercentage" type:"integer"` - - // Size of buffer (HRD buffer model). Five megabits can be entered as 5000000 - // or 5m. Five hundred kilobits can be entered as 500000 or 0.5m. - HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` - - // Use Interlace mode (InterlaceMode) to choose the scan line type for the output. - // * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce - // interlaced output with the entire output having the same field polarity (top - // or bottom first). * Follow, Default Top (FOLLOw_TOP_FIELD) and Follow, Default - // Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, - // behavior depends on the input scan type. - If the source is interlaced, the - // output will be interlaced with the same polarity as the source (it will follow - // the source). The output could therefore be a mix of "top field first" and - // "bottom field first". - If the source is progressive, the output will be - // interlaced with "top field first" or "bottom field first" polarity, depending - // on which of the Follow options you chose. - InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"Mpeg2InterlaceMode"` - - // Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision - // for intra-block DC coefficients. If you choose the value auto, the service - // will automatically select the precision based on the per-frame compression - // ratio. - IntraDcPrecision *string `locationName:"intraDcPrecision" type:"string" enum:"Mpeg2IntraDcPrecision"` - - // Maximum bitrate in bits/second (for VBR mode only). Five megabits can be - // entered as 5000000 or 5m. Five hundred kilobits can be entered as 500000 - // or 0.5m. - MaxBitrate *int64 `locationName:"maxBitrate" type:"integer"` - - // Enforces separation between repeated (cadence) I-frames and I-frames inserted - // by Scene Change Detection. If a scene change I-frame is within I-interval - // frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene - // change I-frame. GOP stretch requires enabling lookahead as well as setting - // I-interval. The normal cadence resumes for the next GOP. This setting is - // only used when Scene Change Detect is enabled. Note: Maximum GOP stretch - // = GOP size + Min-I-interval - 1 - MinIInterval *int64 `locationName:"minIInterval" type:"integer"` - - // Number of B-frames between reference frames. - NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"` - - // Using the API, enable ParFollowSource if you want the service to use the - // pixel aspect ratio from the input. Using the console, do this by choosing - // Follow source for Pixel aspect ratio. - ParControl *string `locationName:"parControl" type:"string" enum:"Mpeg2ParControl"` - - // Pixel Aspect Ratio denominator. - ParDenominator *int64 `locationName:"parDenominator" type:"integer"` - - // Pixel Aspect Ratio numerator. - ParNumerator *int64 `locationName:"parNumerator" type:"integer"` - - // Use Quality tuning level (Mpeg2QualityTuningLevel) to specifiy whether to - // use single-pass or multipass video encoding. - QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Mpeg2QualityTuningLevel"` - - // Use Rate control mode (Mpeg2RateControlMode) to specifiy whether the bitrate - // is variable (vbr) or constant (cbr). - RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Mpeg2RateControlMode"` - - // Scene change detection (inserts I-frames on scene changes). - SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"Mpeg2SceneChangeDetect"` - - // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled - // as 25fps, and audio is sped up correspondingly. - SlowPal *string `locationName:"slowPal" type:"string" enum:"Mpeg2SlowPal"` - - // Softness. Selects quantizer matrix, larger values reduce high-frequency content - // in the encoded image. - Softness *int64 `locationName:"softness" type:"integer"` - - // Adjust quantization within each frame based on spatial variation of content - // complexity. - SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"Mpeg2SpatialAdaptiveQuantization"` - - // Produces a Type D-10 compatible bitstream (SMPTE 356M-2001). - Syntax *string `locationName:"syntax" type:"string" enum:"Mpeg2Syntax"` - - // Only use Telecine (Mpeg2Telecine) when you set Framerate (Framerate) to 29.970. - // Set Telecine (Mpeg2Telecine) to Hard (hard) to produce a 29.97i output from - // a 23.976 input. Set it to Soft (soft) to produce 23.976 output and leave - // converstion to the player. - Telecine *string `locationName:"telecine" type:"string" enum:"Mpeg2Telecine"` - - // Adjust quantization within each frame based on temporal variation of content - // complexity. - TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"Mpeg2TemporalAdaptiveQuantization"` -} - -// String returns the string representation -func (s Mpeg2Settings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Mpeg2Settings) GoString() string { - return s.String() -} - -// SetAdaptiveQuantization sets the AdaptiveQuantization field's value. -func (s *Mpeg2Settings) SetAdaptiveQuantization(v string) *Mpeg2Settings { - s.AdaptiveQuantization = &v - return s -} - -// SetBitrate sets the Bitrate field's value. -func (s *Mpeg2Settings) SetBitrate(v int64) *Mpeg2Settings { - s.Bitrate = &v - return s -} - -// SetCodecLevel sets the CodecLevel field's value. -func (s *Mpeg2Settings) SetCodecLevel(v string) *Mpeg2Settings { - s.CodecLevel = &v - return s -} - -// SetCodecProfile sets the CodecProfile field's value. -func (s *Mpeg2Settings) SetCodecProfile(v string) *Mpeg2Settings { - s.CodecProfile = &v - return s -} - -// SetFramerateControl sets the FramerateControl field's value. -func (s *Mpeg2Settings) SetFramerateControl(v string) *Mpeg2Settings { - s.FramerateControl = &v - return s -} - -// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. -func (s *Mpeg2Settings) SetFramerateConversionAlgorithm(v string) *Mpeg2Settings { - s.FramerateConversionAlgorithm = &v - return s -} - -// SetFramerateDenominator sets the FramerateDenominator field's value. -func (s *Mpeg2Settings) SetFramerateDenominator(v int64) *Mpeg2Settings { - s.FramerateDenominator = &v - return s -} - -// SetFramerateNumerator sets the FramerateNumerator field's value. -func (s *Mpeg2Settings) SetFramerateNumerator(v int64) *Mpeg2Settings { - s.FramerateNumerator = &v - return s -} - -// SetGopClosedCadence sets the GopClosedCadence field's value. -func (s *Mpeg2Settings) SetGopClosedCadence(v int64) *Mpeg2Settings { - s.GopClosedCadence = &v - return s -} - -// SetGopSize sets the GopSize field's value. -func (s *Mpeg2Settings) SetGopSize(v float64) *Mpeg2Settings { - s.GopSize = &v - return s -} - -// SetGopSizeUnits sets the GopSizeUnits field's value. -func (s *Mpeg2Settings) SetGopSizeUnits(v string) *Mpeg2Settings { - s.GopSizeUnits = &v - return s -} - -// SetHrdBufferInitialFillPercentage sets the HrdBufferInitialFillPercentage field's value. -func (s *Mpeg2Settings) SetHrdBufferInitialFillPercentage(v int64) *Mpeg2Settings { - s.HrdBufferInitialFillPercentage = &v - return s -} - -// SetHrdBufferSize sets the HrdBufferSize field's value. -func (s *Mpeg2Settings) SetHrdBufferSize(v int64) *Mpeg2Settings { - s.HrdBufferSize = &v - return s -} - -// SetInterlaceMode sets the InterlaceMode field's value. -func (s *Mpeg2Settings) SetInterlaceMode(v string) *Mpeg2Settings { - s.InterlaceMode = &v - return s -} - -// SetIntraDcPrecision sets the IntraDcPrecision field's value. -func (s *Mpeg2Settings) SetIntraDcPrecision(v string) *Mpeg2Settings { - s.IntraDcPrecision = &v - return s -} - -// SetMaxBitrate sets the MaxBitrate field's value. -func (s *Mpeg2Settings) SetMaxBitrate(v int64) *Mpeg2Settings { - s.MaxBitrate = &v - return s -} - -// SetMinIInterval sets the MinIInterval field's value. -func (s *Mpeg2Settings) SetMinIInterval(v int64) *Mpeg2Settings { - s.MinIInterval = &v - return s -} - -// SetNumberBFramesBetweenReferenceFrames sets the NumberBFramesBetweenReferenceFrames field's value. -func (s *Mpeg2Settings) SetNumberBFramesBetweenReferenceFrames(v int64) *Mpeg2Settings { - s.NumberBFramesBetweenReferenceFrames = &v - return s -} - -// SetParControl sets the ParControl field's value. -func (s *Mpeg2Settings) SetParControl(v string) *Mpeg2Settings { - s.ParControl = &v - return s -} - -// SetParDenominator sets the ParDenominator field's value. -func (s *Mpeg2Settings) SetParDenominator(v int64) *Mpeg2Settings { - s.ParDenominator = &v - return s -} - -// SetParNumerator sets the ParNumerator field's value. -func (s *Mpeg2Settings) SetParNumerator(v int64) *Mpeg2Settings { - s.ParNumerator = &v - return s -} - -// SetQualityTuningLevel sets the QualityTuningLevel field's value. -func (s *Mpeg2Settings) SetQualityTuningLevel(v string) *Mpeg2Settings { - s.QualityTuningLevel = &v - return s -} - -// SetRateControlMode sets the RateControlMode field's value. -func (s *Mpeg2Settings) SetRateControlMode(v string) *Mpeg2Settings { - s.RateControlMode = &v - return s -} - -// SetSceneChangeDetect sets the SceneChangeDetect field's value. -func (s *Mpeg2Settings) SetSceneChangeDetect(v string) *Mpeg2Settings { - s.SceneChangeDetect = &v - return s -} - -// SetSlowPal sets the SlowPal field's value. -func (s *Mpeg2Settings) SetSlowPal(v string) *Mpeg2Settings { - s.SlowPal = &v - return s -} - -// SetSoftness sets the Softness field's value. -func (s *Mpeg2Settings) SetSoftness(v int64) *Mpeg2Settings { - s.Softness = &v - return s -} - -// SetSpatialAdaptiveQuantization sets the SpatialAdaptiveQuantization field's value. -func (s *Mpeg2Settings) SetSpatialAdaptiveQuantization(v string) *Mpeg2Settings { - s.SpatialAdaptiveQuantization = &v - return s -} - -// SetSyntax sets the Syntax field's value. -func (s *Mpeg2Settings) SetSyntax(v string) *Mpeg2Settings { - s.Syntax = &v - return s -} - -// SetTelecine sets the Telecine field's value. -func (s *Mpeg2Settings) SetTelecine(v string) *Mpeg2Settings { - s.Telecine = &v - return s -} - -// SetTemporalAdaptiveQuantization sets the TemporalAdaptiveQuantization field's value. -func (s *Mpeg2Settings) SetTemporalAdaptiveQuantization(v string) *Mpeg2Settings { - s.TemporalAdaptiveQuantization = &v - return s -} - -// If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify -// the value SpekeKeyProvider. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/MsSmoothEncryptionSettings -type MsSmoothEncryptionSettings struct { - _ struct{} `type:"structure"` - - // Settings for use with a SPEKE key provider - SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"` -} - -// String returns the string representation -func (s MsSmoothEncryptionSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MsSmoothEncryptionSettings) GoString() string { - return s.String() -} - -// SetSpekeKeyProvider sets the SpekeKeyProvider field's value. -func (s *MsSmoothEncryptionSettings) SetSpekeKeyProvider(v *SpekeKeyProvider) *MsSmoothEncryptionSettings { - s.SpekeKeyProvider = v - return s -} - -// Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to -// MS_SMOOTH_GROUP_SETTINGS. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/MsSmoothGroupSettings -type MsSmoothGroupSettings struct { - _ struct{} `type:"structure"` - - // COMBINE_DUPLICATE_STREAMS combines identical audio encoding settings across - // a Microsoft Smooth output group into a single audio stream. - AudioDeduplication *string `locationName:"audioDeduplication" type:"string" enum:"MsSmoothAudioDeduplication"` - - // Use Destination (Destination) to specify the S3 output location and the output - // filename base. Destination accepts format identifiers. If you do not specify - // the base filename in the URI, the service will use the filename of the input - // file. If your job has multiple inputs, the service uses the filename of the - // first input file. - Destination *string `locationName:"destination" type:"string"` - - // If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify - // the value SpekeKeyProvider. - Encryption *MsSmoothEncryptionSettings `locationName:"encryption" type:"structure"` - - // Use Fragment length (FragmentLength) to specify the mp4 fragment sizes in - // seconds. Fragment length must be compatible with GOP size and framerate. - FragmentLength *int64 `locationName:"fragmentLength" type:"integer"` - - // Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding - // format for the server and client manifest. Valid options are utf8 and utf16. - ManifestEncoding *string `locationName:"manifestEncoding" type:"string" enum:"MsSmoothManifestEncoding"` -} - -// String returns the string representation -func (s MsSmoothGroupSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MsSmoothGroupSettings) GoString() string { - return s.String() -} - -// SetAudioDeduplication sets the AudioDeduplication field's value. -func (s *MsSmoothGroupSettings) SetAudioDeduplication(v string) *MsSmoothGroupSettings { - s.AudioDeduplication = &v - return s -} - -// SetDestination sets the Destination field's value. -func (s *MsSmoothGroupSettings) SetDestination(v string) *MsSmoothGroupSettings { - s.Destination = &v - return s -} - -// SetEncryption sets the Encryption field's value. -func (s *MsSmoothGroupSettings) SetEncryption(v *MsSmoothEncryptionSettings) *MsSmoothGroupSettings { - s.Encryption = v - return s -} - -// SetFragmentLength sets the FragmentLength field's value. -func (s *MsSmoothGroupSettings) SetFragmentLength(v int64) *MsSmoothGroupSettings { - s.FragmentLength = &v - return s -} - -// SetManifestEncoding sets the ManifestEncoding field's value. -func (s *MsSmoothGroupSettings) SetManifestEncoding(v string) *MsSmoothGroupSettings { - s.ManifestEncoding = &v - return s -} - -// Settings for Nielsen Configuration -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/NielsenConfiguration -type NielsenConfiguration struct { - _ struct{} `type:"structure"` - - // Use Nielsen Configuration (NielsenConfiguration) to set the Nielsen measurement - // system breakout code. Supported values are 0, 3, 7, and 9. - BreakoutCode *int64 `locationName:"breakoutCode" type:"integer"` - - // Use Distributor ID (DistributorID) to specify the distributor ID that is - // assigned to your organization by Neilsen. - DistributorId *string `locationName:"distributorId" type:"string"` -} - -// String returns the string representation -func (s NielsenConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NielsenConfiguration) GoString() string { - return s.String() -} - -// SetBreakoutCode sets the BreakoutCode field's value. -func (s *NielsenConfiguration) SetBreakoutCode(v int64) *NielsenConfiguration { - s.BreakoutCode = &v - return s -} - -// SetDistributorId sets the DistributorId field's value. -func (s *NielsenConfiguration) SetDistributorId(v string) *NielsenConfiguration { - s.DistributorId = &v - return s -} - -// Enable the Noise reducer (NoiseReducer) feature to remove noise from your -// video output if necessary. Enable or disable this feature for each output -// individually. This setting is disabled by default. When you enable Noise -// reducer (NoiseReducer), you must also select a value for Noise reducer filter -// (NoiseReducerFilter). -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/NoiseReducer -type NoiseReducer struct { - _ struct{} `type:"structure"` - - // Use Noise reducer filter (NoiseReducerFilter) to select one of the following - // spatial image filtering functions. To use this setting, you must also enable - // Noise reducer (NoiseReducer). * Bilateral is an edge preserving noise reduction - // filter * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) are convolution - // filters * Conserve is a min/max noise reduction filter * Spatial is frequency-domain - // filter based on JND principles. - Filter *string `locationName:"filter" type:"string" enum:"NoiseReducerFilter"` - - // Settings for a noise reducer filter - FilterSettings *NoiseReducerFilterSettings `locationName:"filterSettings" type:"structure"` - - // Noise reducer filter settings for spatial filter. - SpatialFilterSettings *NoiseReducerSpatialFilterSettings `locationName:"spatialFilterSettings" type:"structure"` -} - -// String returns the string representation -func (s NoiseReducer) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NoiseReducer) GoString() string { - return s.String() -} - -// SetFilter sets the Filter field's value. -func (s *NoiseReducer) SetFilter(v string) *NoiseReducer { - s.Filter = &v - return s -} - -// SetFilterSettings sets the FilterSettings field's value. -func (s *NoiseReducer) SetFilterSettings(v *NoiseReducerFilterSettings) *NoiseReducer { - s.FilterSettings = v - return s -} - -// SetSpatialFilterSettings sets the SpatialFilterSettings field's value. -func (s *NoiseReducer) SetSpatialFilterSettings(v *NoiseReducerSpatialFilterSettings) *NoiseReducer { - s.SpatialFilterSettings = v - return s -} - -// Settings for a noise reducer filter -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/NoiseReducerFilterSettings -type NoiseReducerFilterSettings struct { - _ struct{} `type:"structure"` - - // Relative strength of noise reducing filter. Higher values produce stronger - // filtering. - Strength *int64 `locationName:"strength" type:"integer"` -} - -// String returns the string representation -func (s NoiseReducerFilterSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NoiseReducerFilterSettings) GoString() string { - return s.String() -} - -// SetStrength sets the Strength field's value. -func (s *NoiseReducerFilterSettings) SetStrength(v int64) *NoiseReducerFilterSettings { - s.Strength = &v - return s -} - -// Noise reducer filter settings for spatial filter. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/NoiseReducerSpatialFilterSettings -type NoiseReducerSpatialFilterSettings struct { - _ struct{} `type:"structure"` - - // Specify strength of post noise reduction sharpening filter, with 0 disabling - // the filter and 3 enabling it at maximum strength. - PostFilterSharpenStrength *int64 `locationName:"postFilterSharpenStrength" type:"integer"` - - // The speed of the filter, from -2 (lower speed) to 3 (higher speed), with - // 0 being the nominal value. - Speed *int64 `locationName:"speed" type:"integer"` - - // Relative strength of noise reducing filter. Higher values produce stronger - // filtering. - Strength *int64 `locationName:"strength" type:"integer"` -} - -// String returns the string representation -func (s NoiseReducerSpatialFilterSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NoiseReducerSpatialFilterSettings) GoString() string { - return s.String() -} - -// SetPostFilterSharpenStrength sets the PostFilterSharpenStrength field's value. -func (s *NoiseReducerSpatialFilterSettings) SetPostFilterSharpenStrength(v int64) *NoiseReducerSpatialFilterSettings { - s.PostFilterSharpenStrength = &v - return s -} - -// SetSpeed sets the Speed field's value. -func (s *NoiseReducerSpatialFilterSettings) SetSpeed(v int64) *NoiseReducerSpatialFilterSettings { - s.Speed = &v - return s -} - -// SetStrength sets the Strength field's value. -func (s *NoiseReducerSpatialFilterSettings) SetStrength(v int64) *NoiseReducerSpatialFilterSettings { - s.Strength = &v - return s -} - -// An output object describes the settings for a single output file or stream -// in an output group. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Output -type Output struct { - _ struct{} `type:"structure"` - - // (AudioDescriptions) contains groups of audio encoding settings organized - // by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions) - // can contain multiple groups of encoding settings. - AudioDescriptions []*AudioDescription `locationName:"audioDescriptions" type:"list"` - - // (CaptionDescriptions) contains groups of captions settings. For each output - // that has captions, include one instance of (CaptionDescriptions). (CaptionDescriptions) - // can contain multiple groups of captions settings. - CaptionDescriptions []*CaptionDescription `locationName:"captionDescriptions" type:"list"` - - // Container specific settings. - ContainerSettings *ContainerSettings `locationName:"containerSettings" type:"structure"` - - // Use Extension (Extension) to specify the file extension for outputs in File - // output groups. If you do not specify a value, the service will use default - // extensions by container type as follows * MPEG-2 transport stream, m2ts * - // Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * No Container, - // the service will use codec extensions (e.g. AAC, H265, H265, AC3) - Extension *string `locationName:"extension" type:"string"` - - // Use Name modifier (NameModifier) to have the service add a string to the - // end of each output filename. You specify the base filename as part of your - // destination URI. When you create multiple outputs in the same output group, - // Name modifier is required. Name modifier also accepts format identifiers. - // For DASH ISO outputs, if you use the format identifiers $Number$ or $Time$ - // in one output, you must use them in the same way in all outputs of the output - // group. - NameModifier *string `locationName:"nameModifier" type:"string"` - - // Specific settings for this type of output. - OutputSettings *OutputSettings `locationName:"outputSettings" type:"structure"` - - // Use Preset (Preset) to specifiy a preset for your transcoding settings. Provide - // the system or custom preset name. You can specify either Preset (Preset) - // or Container settings (ContainerSettings), but not both. - Preset *string `locationName:"preset" type:"string"` - - // (VideoDescription) contains a group of video encoding settings. The specific - // video settings depend on the video codec you choose when you specify a value - // for Video codec (codec). Include one instance of (VideoDescription) per output. - VideoDescription *VideoDescription `locationName:"videoDescription" type:"structure"` -} - -// String returns the string representation -func (s Output) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Output) GoString() string { - return s.String() -} - -// SetAudioDescriptions sets the AudioDescriptions field's value. -func (s *Output) SetAudioDescriptions(v []*AudioDescription) *Output { - s.AudioDescriptions = v - return s -} - -// SetCaptionDescriptions sets the CaptionDescriptions field's value. -func (s *Output) SetCaptionDescriptions(v []*CaptionDescription) *Output { - s.CaptionDescriptions = v - return s -} - -// SetContainerSettings sets the ContainerSettings field's value. -func (s *Output) SetContainerSettings(v *ContainerSettings) *Output { - s.ContainerSettings = v - return s -} - -// SetExtension sets the Extension field's value. -func (s *Output) SetExtension(v string) *Output { - s.Extension = &v - return s -} - -// SetNameModifier sets the NameModifier field's value. -func (s *Output) SetNameModifier(v string) *Output { - s.NameModifier = &v - return s -} - -// SetOutputSettings sets the OutputSettings field's value. -func (s *Output) SetOutputSettings(v *OutputSettings) *Output { - s.OutputSettings = v - return s -} - -// SetPreset sets the Preset field's value. -func (s *Output) SetPreset(v string) *Output { - s.Preset = &v - return s -} - -// SetVideoDescription sets the VideoDescription field's value. -func (s *Output) SetVideoDescription(v *VideoDescription) *Output { - s.VideoDescription = v - return s -} - -// OutputChannel mapping settings. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/OutputChannelMapping -type OutputChannelMapping struct { - _ struct{} `type:"structure"` - - // List of input channels - InputChannels []*int64 `locationName:"inputChannels" type:"list"` -} - -// String returns the string representation -func (s OutputChannelMapping) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputChannelMapping) GoString() string { - return s.String() -} - -// SetInputChannels sets the InputChannels field's value. -func (s *OutputChannelMapping) SetInputChannels(v []*int64) *OutputChannelMapping { - s.InputChannels = v - return s -} - -// Details regarding output -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/OutputDetail -type OutputDetail struct { - _ struct{} `type:"structure"` - - // Duration in milliseconds - DurationInMs *int64 `locationName:"durationInMs" type:"integer"` - - // Contains details about the output's video stream - VideoDetails *VideoDetail `locationName:"videoDetails" type:"structure"` -} - -// String returns the string representation -func (s OutputDetail) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputDetail) GoString() string { - return s.String() -} - -// SetDurationInMs sets the DurationInMs field's value. -func (s *OutputDetail) SetDurationInMs(v int64) *OutputDetail { - s.DurationInMs = &v - return s -} - -// SetVideoDetails sets the VideoDetails field's value. -func (s *OutputDetail) SetVideoDetails(v *VideoDetail) *OutputDetail { - s.VideoDetails = v - return s -} - -// Group of outputs -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/OutputGroup -type OutputGroup struct { - _ struct{} `type:"structure"` - - // Use Custom Group Name (CustomName) to specify a name for the output group. - // This value is displayed on the console and can make your job settings JSON - // more human-readable. It does not affect your outputs. Use up to twelve characters - // that are either letters, numbers, spaces, or underscores. - CustomName *string `locationName:"customName" type:"string"` - - // Name of the output group - Name *string `locationName:"name" type:"string"` - - // Output Group settings, including type - OutputGroupSettings *OutputGroupSettings `locationName:"outputGroupSettings" type:"structure"` - - // This object holds groups of encoding settings, one group of settings per - // output. - Outputs []*Output `locationName:"outputs" type:"list"` -} - -// String returns the string representation -func (s OutputGroup) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputGroup) GoString() string { - return s.String() -} - -// SetCustomName sets the CustomName field's value. -func (s *OutputGroup) SetCustomName(v string) *OutputGroup { - s.CustomName = &v - return s -} - -// SetName sets the Name field's value. -func (s *OutputGroup) SetName(v string) *OutputGroup { - s.Name = &v - return s -} - -// SetOutputGroupSettings sets the OutputGroupSettings field's value. -func (s *OutputGroup) SetOutputGroupSettings(v *OutputGroupSettings) *OutputGroup { - s.OutputGroupSettings = v - return s -} - -// SetOutputs sets the Outputs field's value. -func (s *OutputGroup) SetOutputs(v []*Output) *OutputGroup { - s.Outputs = v - return s -} - -// Contains details about the output groups specified in the job settings. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/OutputGroupDetail -type OutputGroupDetail struct { - _ struct{} `type:"structure"` - - // Details about the output - OutputDetails []*OutputDetail `locationName:"outputDetails" type:"list"` -} - -// String returns the string representation -func (s OutputGroupDetail) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputGroupDetail) GoString() string { - return s.String() -} - -// SetOutputDetails sets the OutputDetails field's value. -func (s *OutputGroupDetail) SetOutputDetails(v []*OutputDetail) *OutputGroupDetail { - s.OutputDetails = v - return s -} - -// Output Group settings, including type -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/OutputGroupSettings -type OutputGroupSettings struct { - _ struct{} `type:"structure"` - - // Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to - // DASH_ISO_GROUP_SETTINGS. - DashIsoGroupSettings *DashIsoGroupSettings `locationName:"dashIsoGroupSettings" type:"structure"` - - // Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to - // FILE_GROUP_SETTINGS. - FileGroupSettings *FileGroupSettings `locationName:"fileGroupSettings" type:"structure"` - - // Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to - // HLS_GROUP_SETTINGS. - HlsGroupSettings *HlsGroupSettings `locationName:"hlsGroupSettings" type:"structure"` - - // Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to - // MS_SMOOTH_GROUP_SETTINGS. - MsSmoothGroupSettings *MsSmoothGroupSettings `locationName:"msSmoothGroupSettings" type:"structure"` - - // Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth Streaming) - Type *string `locationName:"type" type:"string" enum:"OutputGroupType"` -} - -// String returns the string representation -func (s OutputGroupSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputGroupSettings) GoString() string { - return s.String() -} - -// SetDashIsoGroupSettings sets the DashIsoGroupSettings field's value. -func (s *OutputGroupSettings) SetDashIsoGroupSettings(v *DashIsoGroupSettings) *OutputGroupSettings { - s.DashIsoGroupSettings = v - return s -} - -// SetFileGroupSettings sets the FileGroupSettings field's value. -func (s *OutputGroupSettings) SetFileGroupSettings(v *FileGroupSettings) *OutputGroupSettings { - s.FileGroupSettings = v - return s -} - -// SetHlsGroupSettings sets the HlsGroupSettings field's value. -func (s *OutputGroupSettings) SetHlsGroupSettings(v *HlsGroupSettings) *OutputGroupSettings { - s.HlsGroupSettings = v - return s -} - -// SetMsSmoothGroupSettings sets the MsSmoothGroupSettings field's value. -func (s *OutputGroupSettings) SetMsSmoothGroupSettings(v *MsSmoothGroupSettings) *OutputGroupSettings { - s.MsSmoothGroupSettings = v - return s -} - -// SetType sets the Type field's value. -func (s *OutputGroupSettings) SetType(v string) *OutputGroupSettings { - s.Type = &v - return s -} - -// Specific settings for this type of output. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/OutputSettings -type OutputSettings struct { - _ struct{} `type:"structure"` - - // Settings for HLS output groups - HlsSettings *HlsSettings `locationName:"hlsSettings" type:"structure"` -} - -// String returns the string representation -func (s OutputSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputSettings) GoString() string { - return s.String() -} - -// SetHlsSettings sets the HlsSettings field's value. -func (s *OutputSettings) SetHlsSettings(v *HlsSettings) *OutputSettings { - s.HlsSettings = v - return s -} - -// A preset is a collection of preconfigured media conversion settings that -// you want MediaConvert to apply to the output during the conversion process. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Preset -type Preset struct { - _ struct{} `type:"structure"` - - // An identifier for this resource that is unique within all of AWS. - Arn *string `locationName:"arn" type:"string"` - - // An optional category you create to organize your presets. - Category *string `locationName:"category" type:"string"` - - // The timestamp in epoch seconds for preset creation. - CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` - - // An optional description you create for each preset. - Description *string `locationName:"description" type:"string"` - - // The timestamp in epoch seconds when the preset was last updated. - LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unix"` - - // A name you create for each preset. Each name must be unique within your account. - Name *string `locationName:"name" type:"string"` - - // Settings for preset - Settings *PresetSettings `locationName:"settings" type:"structure"` - - // A preset can be of two types: system or custom. System or built-in preset - // can’t be modified or deleted by the user. - Type *string `locationName:"type" type:"string" enum:"Type"` -} - -// String returns the string representation -func (s Preset) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Preset) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *Preset) SetArn(v string) *Preset { - s.Arn = &v - return s -} - -// SetCategory sets the Category field's value. -func (s *Preset) SetCategory(v string) *Preset { - s.Category = &v - return s -} - -// SetCreatedAt sets the CreatedAt field's value. -func (s *Preset) SetCreatedAt(v time.Time) *Preset { - s.CreatedAt = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *Preset) SetDescription(v string) *Preset { - s.Description = &v - return s -} - -// SetLastUpdated sets the LastUpdated field's value. -func (s *Preset) SetLastUpdated(v time.Time) *Preset { - s.LastUpdated = &v - return s -} - -// SetName sets the Name field's value. -func (s *Preset) SetName(v string) *Preset { - s.Name = &v - return s -} - -// SetSettings sets the Settings field's value. -func (s *Preset) SetSettings(v *PresetSettings) *Preset { - s.Settings = v - return s -} - -// SetType sets the Type field's value. -func (s *Preset) SetType(v string) *Preset { - s.Type = &v - return s -} - -// Settings for preset -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/PresetSettings -type PresetSettings struct { - _ struct{} `type:"structure"` - - // (AudioDescriptions) contains groups of audio encoding settings organized - // by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions) - // can contain multiple groups of encoding settings. - AudioDescriptions []*AudioDescription `locationName:"audioDescriptions" type:"list"` - - // Caption settings for this preset. There can be multiple caption settings - // in a single output. - CaptionDescriptions []*CaptionDescriptionPreset `locationName:"captionDescriptions" type:"list"` - - // Container specific settings. - ContainerSettings *ContainerSettings `locationName:"containerSettings" type:"structure"` - - // (VideoDescription) contains a group of video encoding settings. The specific - // video settings depend on the video codec you choose when you specify a value - // for Video codec (codec). Include one instance of (VideoDescription) per output. - VideoDescription *VideoDescription `locationName:"videoDescription" type:"structure"` -} - -// String returns the string representation -func (s PresetSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PresetSettings) GoString() string { - return s.String() -} - -// SetAudioDescriptions sets the AudioDescriptions field's value. -func (s *PresetSettings) SetAudioDescriptions(v []*AudioDescription) *PresetSettings { - s.AudioDescriptions = v - return s -} - -// SetCaptionDescriptions sets the CaptionDescriptions field's value. -func (s *PresetSettings) SetCaptionDescriptions(v []*CaptionDescriptionPreset) *PresetSettings { - s.CaptionDescriptions = v - return s -} - -// SetContainerSettings sets the ContainerSettings field's value. -func (s *PresetSettings) SetContainerSettings(v *ContainerSettings) *PresetSettings { - s.ContainerSettings = v - return s -} - -// SetVideoDescription sets the VideoDescription field's value. -func (s *PresetSettings) SetVideoDescription(v *VideoDescription) *PresetSettings { - s.VideoDescription = v - return s -} - -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to -// the value PRORES. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ProresSettings -type ProresSettings struct { - _ struct{} `type:"structure"` - - // Use Profile (ProResCodecProfile) to specifiy the type of Apple ProRes codec - // to use for this output. - CodecProfile *string `locationName:"codecProfile" type:"string" enum:"ProresCodecProfile"` - - // Using the API, set FramerateControl to INITIALIZE_FROM_SOURCE if you want - // the service to use the framerate from the input. Using the console, do this - // by choosing INITIALIZE_FROM_SOURCE for Framerate. - FramerateControl *string `locationName:"framerateControl" type:"string" enum:"ProresFramerateControl"` - - // When set to INTERPOLATE, produces smoother motion during framerate conversion. - FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"ProresFramerateConversionAlgorithm"` - - // Framerate denominator. - FramerateDenominator *int64 `locationName:"framerateDenominator" type:"integer"` - - // When you use the API for transcode jobs that use framerate conversion, specify - // the framerate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use - // FramerateNumerator to specify the numerator of this fraction. In this example, - // use 24000 for the value of FramerateNumerator. - FramerateNumerator *int64 `locationName:"framerateNumerator" type:"integer"` - - // Use Interlace mode (InterlaceMode) to choose the scan line type for the output. - // * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce - // interlaced output with the entire output having the same field polarity (top - // or bottom first). * Follow, Default Top (FOLLOw_TOP_FIELD) and Follow, Default - // Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, - // behavior depends on the input scan type. - If the source is interlaced, the - // output will be interlaced with the same polarity as the source (it will follow - // the source). The output could therefore be a mix of "top field first" and - // "bottom field first". - If the source is progressive, the output will be - // interlaced with "top field first" or "bottom field first" polarity, depending - // on which of the Follow options you chose. - InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"ProresInterlaceMode"` - - // Use (ProresParControl) to specify how the service determines the pixel aspect - // ratio. Set to Follow source (INITIALIZE_FROM_SOURCE) to use the pixel aspect - // ratio from the input. To specify a different pixel aspect ratio: Using the - // console, choose it from the dropdown menu. Using the API, set ProresParControl - // to (SPECIFIED) and provide for (ParNumerator) and (ParDenominator). - ParControl *string `locationName:"parControl" type:"string" enum:"ProresParControl"` - - // Pixel Aspect Ratio denominator. - ParDenominator *int64 `locationName:"parDenominator" type:"integer"` - - // Pixel Aspect Ratio numerator. - ParNumerator *int64 `locationName:"parNumerator" type:"integer"` - - // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled - // as 25fps, and audio is sped up correspondingly. - SlowPal *string `locationName:"slowPal" type:"string" enum:"ProresSlowPal"` - - // Only use Telecine (ProresTelecine) when you set Framerate (Framerate) to - // 29.970. Set Telecine (ProresTelecine) to Hard (hard) to produce a 29.97i - // output from a 23.976 input. Set it to Soft (soft) to produce 23.976 output - // and leave converstion to the player. - Telecine *string `locationName:"telecine" type:"string" enum:"ProresTelecine"` -} - -// String returns the string representation -func (s ProresSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ProresSettings) GoString() string { - return s.String() -} - -// SetCodecProfile sets the CodecProfile field's value. -func (s *ProresSettings) SetCodecProfile(v string) *ProresSettings { - s.CodecProfile = &v - return s -} - -// SetFramerateControl sets the FramerateControl field's value. -func (s *ProresSettings) SetFramerateControl(v string) *ProresSettings { - s.FramerateControl = &v - return s -} - -// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. -func (s *ProresSettings) SetFramerateConversionAlgorithm(v string) *ProresSettings { - s.FramerateConversionAlgorithm = &v - return s -} - -// SetFramerateDenominator sets the FramerateDenominator field's value. -func (s *ProresSettings) SetFramerateDenominator(v int64) *ProresSettings { - s.FramerateDenominator = &v - return s -} - -// SetFramerateNumerator sets the FramerateNumerator field's value. -func (s *ProresSettings) SetFramerateNumerator(v int64) *ProresSettings { - s.FramerateNumerator = &v - return s -} - -// SetInterlaceMode sets the InterlaceMode field's value. -func (s *ProresSettings) SetInterlaceMode(v string) *ProresSettings { - s.InterlaceMode = &v - return s -} - -// SetParControl sets the ParControl field's value. -func (s *ProresSettings) SetParControl(v string) *ProresSettings { - s.ParControl = &v - return s -} - -// SetParDenominator sets the ParDenominator field's value. -func (s *ProresSettings) SetParDenominator(v int64) *ProresSettings { - s.ParDenominator = &v - return s -} - -// SetParNumerator sets the ParNumerator field's value. -func (s *ProresSettings) SetParNumerator(v int64) *ProresSettings { - s.ParNumerator = &v - return s -} - -// SetSlowPal sets the SlowPal field's value. -func (s *ProresSettings) SetSlowPal(v string) *ProresSettings { - s.SlowPal = &v - return s -} - -// SetTelecine sets the Telecine field's value. -func (s *ProresSettings) SetTelecine(v string) *ProresSettings { - s.Telecine = &v - return s -} - -// MediaConvert jobs are submitted to a queue. Unless specified otherwise jobs -// are submitted to a built-in default queue. User can create additional queues -// to separate the jobs of different categories or priority. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Queue -type Queue struct { - _ struct{} `type:"structure"` - - // An identifier for this resource that is unique within all of AWS. - Arn *string `locationName:"arn" type:"string"` - - // The timestamp in epoch seconds for queue creation. - CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` - - // An optional description you create for each queue. - Description *string `locationName:"description" type:"string"` - - // The timestamp in epoch seconds when the queue was last updated. - LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unix"` - - // A name you create for each queue. Each name must be unique within your account. - Name *string `locationName:"name" type:"string"` - - // Queues can be ACTIVE or PAUSED. If you pause a queue, jobs in that queue - // will not begin. Jobs running when a queue is paused continue to run until - // they finish or error out. - Status *string `locationName:"status" type:"string" enum:"QueueStatus"` - - // A queue can be of two types: system or custom. System or built-in queues - // can’t be modified or deleted by the user. - Type *string `locationName:"type" type:"string" enum:"Type"` -} - -// String returns the string representation -func (s Queue) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Queue) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *Queue) SetArn(v string) *Queue { - s.Arn = &v - return s -} - -// SetCreatedAt sets the CreatedAt field's value. -func (s *Queue) SetCreatedAt(v time.Time) *Queue { - s.CreatedAt = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *Queue) SetDescription(v string) *Queue { - s.Description = &v - return s -} - -// SetLastUpdated sets the LastUpdated field's value. -func (s *Queue) SetLastUpdated(v time.Time) *Queue { - s.LastUpdated = &v - return s -} - -// SetName sets the Name field's value. -func (s *Queue) SetName(v string) *Queue { - s.Name = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *Queue) SetStatus(v string) *Queue { - s.Status = &v - return s -} - -// SetType sets the Type field's value. -func (s *Queue) SetType(v string) *Queue { - s.Type = &v - return s -} - -// Use Rectangle to identify a specific area of the video frame. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Rectangle -type Rectangle struct { - _ struct{} `type:"structure"` - - // Height of rectangle in pixels. - Height *int64 `locationName:"height" type:"integer"` - - // Width of rectangle in pixels. - Width *int64 `locationName:"width" type:"integer"` - - // The distance, in pixels, between the rectangle and the left edge of the video - // frame. - X *int64 `locationName:"x" type:"integer"` - - // The distance, in pixels, between the rectangle and the top edge of the video - // frame. - Y *int64 `locationName:"y" type:"integer"` -} - -// String returns the string representation -func (s Rectangle) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Rectangle) GoString() string { - return s.String() -} - -// SetHeight sets the Height field's value. -func (s *Rectangle) SetHeight(v int64) *Rectangle { - s.Height = &v - return s -} - -// SetWidth sets the Width field's value. -func (s *Rectangle) SetWidth(v int64) *Rectangle { - s.Width = &v - return s -} - -// SetX sets the X field's value. -func (s *Rectangle) SetX(v int64) *Rectangle { - s.X = &v - return s -} - -// SetY sets the Y field's value. -func (s *Rectangle) SetY(v int64) *Rectangle { - s.Y = &v - return s -} - -// Use Manual audio remixing (RemixSettings) to adjust audio levels for each -// output channel. With audio remixing, you can output more or fewer audio channels -// than your input audio source provides. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/RemixSettings -type RemixSettings struct { - _ struct{} `type:"structure"` - - // Channel mapping (ChannelMapping) contains the group of fields that hold the - // remixing value for each channel. Units are in dB. Acceptable values are within - // the range from -60 (mute) through 6. A setting of 0 passes the input channel - // unchanged to the output channel (no attenuation or amplification). - ChannelMapping *ChannelMapping `locationName:"channelMapping" type:"structure"` - - // Specify the number of audio channels from your input that you want to use - // in your output. With remixing, you might combine or split the data in these - // channels, so the number of channels in your final output might be different. - ChannelsIn *int64 `locationName:"channelsIn" type:"integer"` - - // Specify the number of channels in this output after remixing. Valid values: - // 1, 2, 4, 6, 8 - ChannelsOut *int64 `locationName:"channelsOut" type:"integer"` -} - -// String returns the string representation -func (s RemixSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RemixSettings) GoString() string { - return s.String() -} - -// SetChannelMapping sets the ChannelMapping field's value. -func (s *RemixSettings) SetChannelMapping(v *ChannelMapping) *RemixSettings { - s.ChannelMapping = v - return s -} - -// SetChannelsIn sets the ChannelsIn field's value. -func (s *RemixSettings) SetChannelsIn(v int64) *RemixSettings { - s.ChannelsIn = &v - return s -} - -// SetChannelsOut sets the ChannelsOut field's value. -func (s *RemixSettings) SetChannelsOut(v int64) *RemixSettings { - s.ChannelsOut = &v - return s -} - -// Settings for SCC caption output. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/SccDestinationSettings -type SccDestinationSettings struct { - _ struct{} `type:"structure"` - - // Set Framerate (SccDestinationFramerate) to make sure that the captions and - // the video are synchronized in the output. Specify a framerate that matches - // the framerate of the associated video. If the video framerate is 29.97, choose - // 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has video_insertion=true - // and drop_frame_timecode=true; otherwise, choose 29.97 non-dropframe (FRAMERATE_29_97_NON_DROPFRAME). - Framerate *string `locationName:"framerate" type:"string" enum:"SccDestinationFramerate"` -} - -// String returns the string representation -func (s SccDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SccDestinationSettings) GoString() string { - return s.String() -} - -// SetFramerate sets the Framerate field's value. -func (s *SccDestinationSettings) SetFramerate(v string) *SccDestinationSettings { - s.Framerate = &v - return s -} - -// Settings for use with a SPEKE key provider -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/SpekeKeyProvider -type SpekeKeyProvider struct { - _ struct{} `type:"structure"` - - // The SPEKE-compliant server uses Resource ID (ResourceId) to identify content. - ResourceId *string `locationName:"resourceId" type:"string"` - - // Relates to SPEKE implementation. DRM system identifiers. DASH output groups - // support a max of two system ids. Other group types support one system id. - SystemIds []*string `locationName:"systemIds" type:"list"` - - // Use URL (Url) to specify the SPEKE-compliant server that will provide keys - // for content. - Url *string `locationName:"url" type:"string"` -} - -// String returns the string representation -func (s SpekeKeyProvider) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SpekeKeyProvider) GoString() string { - return s.String() -} - -// SetResourceId sets the ResourceId field's value. -func (s *SpekeKeyProvider) SetResourceId(v string) *SpekeKeyProvider { - s.ResourceId = &v - return s -} - -// SetSystemIds sets the SystemIds field's value. -func (s *SpekeKeyProvider) SetSystemIds(v []*string) *SpekeKeyProvider { - s.SystemIds = v - return s -} - -// SetUrl sets the Url field's value. -func (s *SpekeKeyProvider) SetUrl(v string) *SpekeKeyProvider { - s.Url = &v - return s -} - -// Settings for use with a SPEKE key provider. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/StaticKeyProvider -type StaticKeyProvider struct { - _ struct{} `type:"structure"` - - // Relates to DRM implementation. Sets the value of the KEYFORMAT attribute. - // Must be 'identity' or a reverse DNS string. May be omitted to indicate an - // implicit value of 'identity'. - KeyFormat *string `locationName:"keyFormat" type:"string"` - - // Relates to DRM implementation. Either a single positive integer version value - // or a slash delimited list of version values (1/2/3). - KeyFormatVersions *string `locationName:"keyFormatVersions" type:"string"` - - // Relates to DRM implementation. Use a 32-character hexidecimal string to specify - // Key Value (StaticKeyValue). - StaticKeyValue *string `locationName:"staticKeyValue" type:"string"` - - // Relates to DRM implementation. The location of the license server used for - // protecting content. - Url *string `locationName:"url" type:"string"` -} - -// String returns the string representation -func (s StaticKeyProvider) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StaticKeyProvider) GoString() string { - return s.String() -} - -// SetKeyFormat sets the KeyFormat field's value. -func (s *StaticKeyProvider) SetKeyFormat(v string) *StaticKeyProvider { - s.KeyFormat = &v - return s -} - -// SetKeyFormatVersions sets the KeyFormatVersions field's value. -func (s *StaticKeyProvider) SetKeyFormatVersions(v string) *StaticKeyProvider { - s.KeyFormatVersions = &v - return s -} - -// SetStaticKeyValue sets the StaticKeyValue field's value. -func (s *StaticKeyProvider) SetStaticKeyValue(v string) *StaticKeyProvider { - s.StaticKeyValue = &v - return s -} - -// SetUrl sets the Url field's value. -func (s *StaticKeyProvider) SetUrl(v string) *StaticKeyProvider { - s.Url = &v - return s -} - -// Settings for Teletext caption output -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/TeletextDestinationSettings -type TeletextDestinationSettings struct { - _ struct{} `type:"structure"` - - // Set pageNumber to the Teletext page number for the destination captions for - // this output. This value must be a three-digit hexadecimal string; strings - // ending in -FF are invalid. If you are passing through the entire set of Teletext - // data, do not use this field. - PageNumber *string `locationName:"pageNumber" type:"string"` -} - -// String returns the string representation -func (s TeletextDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TeletextDestinationSettings) GoString() string { - return s.String() -} - -// SetPageNumber sets the PageNumber field's value. -func (s *TeletextDestinationSettings) SetPageNumber(v string) *TeletextDestinationSettings { - s.PageNumber = &v - return s -} - -// Settings specific to Teletext caption sources, including Page number. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/TeletextSourceSettings -type TeletextSourceSettings struct { - _ struct{} `type:"structure"` - - // Use Page Number (PageNumber) to specify the three-digit hexadecimal page - // number that will be used for Teletext captions. Do not use this setting if - // you are passing through teletext from the input source to output. - PageNumber *string `locationName:"pageNumber" type:"string"` -} - -// String returns the string representation -func (s TeletextSourceSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TeletextSourceSettings) GoString() string { - return s.String() -} - -// SetPageNumber sets the PageNumber field's value. -func (s *TeletextSourceSettings) SetPageNumber(v string) *TeletextSourceSettings { - s.PageNumber = &v - return s -} - -// Timecode burn-in (TimecodeBurnIn)--Burns the output timecode and specified -// prefix into the output. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/TimecodeBurnin -type TimecodeBurnin struct { - _ struct{} `type:"structure"` - - // Use Font Size (FontSize) to set the font size of any burned-in timecode. - // Valid values are 10, 16, 32, 48. - FontSize *int64 `locationName:"fontSize" type:"integer"` - - // Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to - // specify the location the burned-in timecode on output video. - Position *string `locationName:"position" type:"string" enum:"TimecodeBurninPosition"` - - // Use Prefix (Prefix) to place ASCII characters before any burned-in timecode. - // For example, a prefix of "EZ-" will result in the timecode "EZ-00:00:00:00". - // Provide either the characters themselves or the ASCII code equivalents. The - // supported range of characters is 0x20 through 0x7e. This includes letters, - // numbers, and all special characters represented on a standard English keyboard. - Prefix *string `locationName:"prefix" type:"string"` -} - -// String returns the string representation -func (s TimecodeBurnin) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TimecodeBurnin) GoString() string { - return s.String() -} - -// SetFontSize sets the FontSize field's value. -func (s *TimecodeBurnin) SetFontSize(v int64) *TimecodeBurnin { - s.FontSize = &v - return s -} - -// SetPosition sets the Position field's value. -func (s *TimecodeBurnin) SetPosition(v string) *TimecodeBurnin { - s.Position = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *TimecodeBurnin) SetPrefix(v string) *TimecodeBurnin { - s.Prefix = &v - return s -} - -// Contains settings used to acquire and adjust timecode information from inputs. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/TimecodeConfig -type TimecodeConfig struct { - _ struct{} `type:"structure"` - - // If you use an editing platform that relies on an anchor timecode, use Anchor - // Timecode (Anchor) to specify a timecode that will match the input video frame - // to the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) - // or (HH:MM:SS;FF). This setting ignores framerate conversion. System behavior - // for Anchor Timecode varies depending on your setting for Timecode source - // (TimecodeSource). * If Timecode source (TimecodeSource) is set to Specified - // Start (specifiedstart), the first input frame is the specified value in Start - // Timecode (Start). Anchor Timecode (Anchor) and Start Timecode (Start) are - // used calculate output timecode. * If Timecode source (TimecodeSource) is - // set to Start at 0 (zerobased) the first frame is 00:00:00:00. * If Timecode - // source (TimecodeSource) is set to Embedded (embedded), the first frame is - // the timecode value on the first input frame of the input. - Anchor *string `locationName:"anchor" type:"string"` - - // Use Timecode source (TimecodeSource) to set how timecodes are handled within - // this input. To make sure that your video, audio, captions, and markers are - // synchronized and that time-based features, such as image inserter, work correctly, - // choose the Timecode source option that matches your assets. All timecodes - // are in a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) - // - Use the timecode that is in the input video. If no embedded timecode is - // in the source, the service will use Start at 0 (ZEROBASED) instead. * Start - // at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00. - // * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame - // to a value other than zero. You use Start timecode (Start) to provide this - // value. - Source *string `locationName:"source" type:"string" enum:"TimecodeSource"` - - // Only use when you set Timecode Source (TimecodeSource) to Specified Start - // (SPECIFIEDSTART). Use Start timecode (Start) to specify the timecode for - // the initial frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or - // (HH:MM:SS;FF). - Start *string `locationName:"start" type:"string"` - - // Only applies to outputs that support program-date-time stamp. Use Time stamp - // offset (TimestampOffset) to overwrite the timecode date without affecting - // the time and frame number. Provide the new date as a string in the format - // "yyyy-mm-dd". To use Time stamp offset, you must also enable Insert program-date-time - // (InsertProgramDateTime) in the output settings. - TimestampOffset *string `locationName:"timestampOffset" type:"string"` -} - -// String returns the string representation -func (s TimecodeConfig) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TimecodeConfig) GoString() string { - return s.String() -} - -// SetAnchor sets the Anchor field's value. -func (s *TimecodeConfig) SetAnchor(v string) *TimecodeConfig { - s.Anchor = &v - return s -} - -// SetSource sets the Source field's value. -func (s *TimecodeConfig) SetSource(v string) *TimecodeConfig { - s.Source = &v - return s -} - -// SetStart sets the Start field's value. -func (s *TimecodeConfig) SetStart(v string) *TimecodeConfig { - s.Start = &v - return s -} - -// SetTimestampOffset sets the TimestampOffset field's value. -func (s *TimecodeConfig) SetTimestampOffset(v string) *TimecodeConfig { - s.TimestampOffset = &v - return s -} - -// Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags -// in your job. To include timed metadata, you must enable it here, enable it -// in each output container, and specify tags and timecodes in ID3 insertion -// (Id3Insertion) objects. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/TimedMetadataInsertion -type TimedMetadataInsertion struct { - _ struct{} `type:"structure"` - - // Id3Insertions contains the array of Id3Insertion instances. - Id3Insertions []*Id3Insertion `locationName:"id3Insertions" type:"list"` -} - -// String returns the string representation -func (s TimedMetadataInsertion) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TimedMetadataInsertion) GoString() string { - return s.String() -} - -// SetId3Insertions sets the Id3Insertions field's value. -func (s *TimedMetadataInsertion) SetId3Insertions(v []*Id3Insertion) *TimedMetadataInsertion { - s.Id3Insertions = v - return s -} - -// Information about when jobs are submitted, started, and finished is specified -// in Unix epoch format in seconds. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/Timing -type Timing struct { - _ struct{} `type:"structure"` - - // The time, in Unix epoch format, that the transcoding job finished - FinishTime *time.Time `locationName:"finishTime" type:"timestamp" timestampFormat:"unix"` - - // The time, in Unix epoch format, that transcoding for the job began. - StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"unix"` - - // The time, in Unix epoch format, that you submitted the job. - SubmitTime *time.Time `locationName:"submitTime" type:"timestamp" timestampFormat:"unix"` -} - -// String returns the string representation -func (s Timing) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Timing) GoString() string { - return s.String() -} - -// SetFinishTime sets the FinishTime field's value. -func (s *Timing) SetFinishTime(v time.Time) *Timing { - s.FinishTime = &v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *Timing) SetStartTime(v time.Time) *Timing { - s.StartTime = &v - return s -} - -// SetSubmitTime sets the SubmitTime field's value. -func (s *Timing) SetSubmitTime(v time.Time) *Timing { - s.SubmitTime = &v - return s -} - -// Settings for TTML caption output -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/TtmlDestinationSettings -type TtmlDestinationSettings struct { - _ struct{} `type:"structure"` - - // Pass through style and position information from a TTML-like input source - // (TTML, SMPTE-TT, CFF-TT) to the CFF-TT output or TTML output. - StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"TtmlStylePassthrough"` -} - -// String returns the string representation -func (s TtmlDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TtmlDestinationSettings) GoString() string { - return s.String() -} - -// SetStylePassthrough sets the StylePassthrough field's value. -func (s *TtmlDestinationSettings) SetStylePassthrough(v string) *TtmlDestinationSettings { - s.StylePassthrough = &v - return s -} - -// Modify a job template by sending a request with the job template name and -// any of the following that you wish to change: description, category, and -// queue. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateJobTemplateRequest -type UpdateJobTemplateInput struct { - _ struct{} `type:"structure"` - - // The new category for the job template, if you are changing it. - Category *string `locationName:"category" type:"string"` - - // The new description for the job template, if you are changing it. - Description *string `locationName:"description" type:"string"` - - // The name of the job template you are modifying - // - // Name is a required field - Name *string `location:"uri" locationName:"name" type:"string" required:"true"` - - // The new queue for the job template, if you are changing it. - Queue *string `locationName:"queue" type:"string"` - - // JobTemplateSettings contains all the transcode settings saved in the template - // that will be applied to jobs created from it. - Settings *JobTemplateSettings `locationName:"settings" type:"structure"` -} - -// String returns the string representation -func (s UpdateJobTemplateInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateJobTemplateInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateJobTemplateInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateJobTemplateInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCategory sets the Category field's value. -func (s *UpdateJobTemplateInput) SetCategory(v string) *UpdateJobTemplateInput { - s.Category = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *UpdateJobTemplateInput) SetDescription(v string) *UpdateJobTemplateInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateJobTemplateInput) SetName(v string) *UpdateJobTemplateInput { - s.Name = &v - return s -} - -// SetQueue sets the Queue field's value. -func (s *UpdateJobTemplateInput) SetQueue(v string) *UpdateJobTemplateInput { - s.Queue = &v - return s -} - -// SetSettings sets the Settings field's value. -func (s *UpdateJobTemplateInput) SetSettings(v *JobTemplateSettings) *UpdateJobTemplateInput { - s.Settings = v - return s -} - -// Successful update job template requests will return the new job template -// JSON. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateJobTemplateResponse -type UpdateJobTemplateOutput struct { - _ struct{} `type:"structure"` - - // A job template is a pre-made set of encoding instructions that you can use - // to quickly create a job. - JobTemplate *JobTemplate `locationName:"jobTemplate" type:"structure"` -} - -// String returns the string representation -func (s UpdateJobTemplateOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateJobTemplateOutput) GoString() string { - return s.String() -} - -// SetJobTemplate sets the JobTemplate field's value. -func (s *UpdateJobTemplateOutput) SetJobTemplate(v *JobTemplate) *UpdateJobTemplateOutput { - s.JobTemplate = v - return s -} - -// Modify a preset by sending a request with the preset name and any of the -// following that you wish to change: description, category, and transcoding -// settings. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdatePresetRequest -type UpdatePresetInput struct { - _ struct{} `type:"structure"` - - // The new category for the preset, if you are changing it. - Category *string `locationName:"category" type:"string"` - - // The new description for the preset, if you are changing it. - Description *string `locationName:"description" type:"string"` - - // The name of the preset you are modifying. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" type:"string" required:"true"` - - // Settings for preset - Settings *PresetSettings `locationName:"settings" type:"structure"` -} - -// String returns the string representation -func (s UpdatePresetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdatePresetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdatePresetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdatePresetInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCategory sets the Category field's value. -func (s *UpdatePresetInput) SetCategory(v string) *UpdatePresetInput { - s.Category = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *UpdatePresetInput) SetDescription(v string) *UpdatePresetInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *UpdatePresetInput) SetName(v string) *UpdatePresetInput { - s.Name = &v - return s -} - -// SetSettings sets the Settings field's value. -func (s *UpdatePresetInput) SetSettings(v *PresetSettings) *UpdatePresetInput { - s.Settings = v - return s -} - -// Successful update preset requests will return the new preset JSON. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdatePresetResponse -type UpdatePresetOutput struct { - _ struct{} `type:"structure"` - - // A preset is a collection of preconfigured media conversion settings that - // you want MediaConvert to apply to the output during the conversion process. - Preset *Preset `locationName:"preset" type:"structure"` -} - -// String returns the string representation -func (s UpdatePresetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdatePresetOutput) GoString() string { - return s.String() -} - -// SetPreset sets the Preset field's value. -func (s *UpdatePresetOutput) SetPreset(v *Preset) *UpdatePresetOutput { - s.Preset = v - return s -} - -// Modify a queue by sending a request with the queue name and any of the following -// that you wish to change - description, status. You pause or activate a queue -// by changing its status between ACTIVE and PAUSED. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateQueueRequest -type UpdateQueueInput struct { - _ struct{} `type:"structure"` - - // The new description for the queue, if you are changing it. - Description *string `locationName:"description" type:"string"` - - // The name of the queue you are modifying. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" type:"string" required:"true"` - - // Queues can be ACTIVE or PAUSED. If you pause a queue, jobs in that queue - // will not begin. Jobs running when a queue is paused continue to run until - // they finish or error out. - Status *string `locationName:"status" type:"string" enum:"QueueStatus"` -} - -// String returns the string representation -func (s UpdateQueueInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateQueueInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateQueueInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateQueueInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *UpdateQueueInput) SetDescription(v string) *UpdateQueueInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateQueueInput) SetName(v string) *UpdateQueueInput { - s.Name = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *UpdateQueueInput) SetStatus(v string) *UpdateQueueInput { - s.Status = &v - return s -} - -// Successful update queue requests will return the new queue JSON. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateQueueResponse -type UpdateQueueOutput struct { - _ struct{} `type:"structure"` - - // MediaConvert jobs are submitted to a queue. Unless specified otherwise jobs - // are submitted to a built-in default queue. User can create additional queues - // to separate the jobs of different categories or priority. - Queue *Queue `locationName:"queue" type:"structure"` -} - -// String returns the string representation -func (s UpdateQueueOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateQueueOutput) GoString() string { - return s.String() -} - -// SetQueue sets the Queue field's value. -func (s *UpdateQueueOutput) SetQueue(v *Queue) *UpdateQueueOutput { - s.Queue = v - return s -} - -// Video codec settings, (CodecSettings) under (VideoDescription), contains -// the group of settings related to video encoding. The settings in this group -// vary depending on the value you choose for Video codec (Codec). For each -// codec enum you choose, define the corresponding settings object. The following -// lists the codec enum, settings object pairs. * H_264, H264Settings * H_265, -// H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, -// FrameCaptureSettings -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/VideoCodecSettings -type VideoCodecSettings struct { - _ struct{} `type:"structure"` - - // Type of video codec - Codec *string `locationName:"codec" type:"string" enum:"VideoCodec"` - - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to - // the value FRAME_CAPTURE. - FrameCaptureSettings *FrameCaptureSettings `locationName:"frameCaptureSettings" type:"structure"` - - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to - // the value H_264. - H264Settings *H264Settings `locationName:"h264Settings" type:"structure"` - - // Settings for H265 codec - H265Settings *H265Settings `locationName:"h265Settings" type:"structure"` - - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to - // the value MPEG2. - Mpeg2Settings *Mpeg2Settings `locationName:"mpeg2Settings" type:"structure"` - - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to - // the value PRORES. - ProresSettings *ProresSettings `locationName:"proresSettings" type:"structure"` -} - -// String returns the string representation -func (s VideoCodecSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VideoCodecSettings) GoString() string { - return s.String() -} - -// SetCodec sets the Codec field's value. -func (s *VideoCodecSettings) SetCodec(v string) *VideoCodecSettings { - s.Codec = &v - return s -} - -// SetFrameCaptureSettings sets the FrameCaptureSettings field's value. -func (s *VideoCodecSettings) SetFrameCaptureSettings(v *FrameCaptureSettings) *VideoCodecSettings { - s.FrameCaptureSettings = v - return s -} - -// SetH264Settings sets the H264Settings field's value. -func (s *VideoCodecSettings) SetH264Settings(v *H264Settings) *VideoCodecSettings { - s.H264Settings = v - return s -} - -// SetH265Settings sets the H265Settings field's value. -func (s *VideoCodecSettings) SetH265Settings(v *H265Settings) *VideoCodecSettings { - s.H265Settings = v - return s -} - -// SetMpeg2Settings sets the Mpeg2Settings field's value. -func (s *VideoCodecSettings) SetMpeg2Settings(v *Mpeg2Settings) *VideoCodecSettings { - s.Mpeg2Settings = v - return s -} - -// SetProresSettings sets the ProresSettings field's value. -func (s *VideoCodecSettings) SetProresSettings(v *ProresSettings) *VideoCodecSettings { - s.ProresSettings = v - return s -} - -// Settings for video outputs -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/VideoDescription -type VideoDescription struct { - _ struct{} `type:"structure"` - - // This setting only applies to H.264 and MPEG2 outputs. Use Insert AFD signaling - // (AfdSignaling) to whether there are AFD values in the output video data and - // what those values are. * Choose None to remove all AFD values from this output. - // * Choose Fixed to ignore input AFD values and instead encode the value specified - // in the job. * Choose Auto to calculate output AFD values based on the input - // AFD scaler data. - AfdSignaling *string `locationName:"afdSignaling" type:"string" enum:"AfdSignaling"` - - // Enable Anti-alias (AntiAlias) to enhance sharp edges in video output when - // your input resolution is much larger than your output resolution. Default - // is enabled. - AntiAlias *string `locationName:"antiAlias" type:"string" enum:"AntiAlias"` - - // Video codec settings, (CodecSettings) under (VideoDescription), contains - // the group of settings related to video encoding. The settings in this group - // vary depending on the value you choose for Video codec (Codec). For each - // codec enum you choose, define the corresponding settings object. The following - // lists the codec enum, settings object pairs. * H_264, H264Settings * H_265, - // H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, - // FrameCaptureSettings - CodecSettings *VideoCodecSettings `locationName:"codecSettings" type:"structure"` - - // Enable Insert color metadata (ColorMetadata) to include color metadata in - // this output. This setting is enabled by default. - ColorMetadata *string `locationName:"colorMetadata" type:"string" enum:"ColorMetadata"` - - // Applies only if your input aspect ratio is different from your output aspect - // ratio. Use Input cropping rectangle (Crop) to specify the video area the - // service will include in the output. This will crop the input source, causing - // video pixels to be removed on encode. Do not use this setting if you have - // enabled Stretch to output (stretchToOutput) in your output settings. - Crop *Rectangle `locationName:"crop" type:"structure"` - - // Applies only to 29.97 fps outputs. When this feature is enabled, the service - // will use drop-frame timecode on outputs. If it is not possible to use drop-frame - // timecode, the system will fall back to non-drop-frame. This setting is enabled - // by default when Timecode insertion (TimecodeInsertion) is enabled. - DropFrameTimecode *string `locationName:"dropFrameTimecode" type:"string" enum:"DropFrameTimecode"` - - // Applies only if you set AFD Signaling(AfdSignaling) to Fixed (FIXED). Use - // Fixed (FixedAfd) to specify a four-bit AFD value which the service will write - // on all frames of this video output. - FixedAfd *int64 `locationName:"fixedAfd" type:"integer"` - - // Use the Height (Height) setting to define the video resolution height for - // this output. Specify in pixels. If you don't provide a value here, the service - // will use the input height. - Height *int64 `locationName:"height" type:"integer"` - - // Use Position (Position) to point to a rectangle object to define your position. - // This setting overrides any other aspect ratio. - Position *Rectangle `locationName:"position" type:"structure"` - - // Use Respond to AFD (RespondToAfd) to specify how the service changes the - // video itself in response to AFD values in the input. * Choose Respond to - // clip the input video frame according to the AFD value, input display aspect - // ratio, and output display aspect ratio. * Choose Passthrough to include the - // input AFD values. Do not choose this when AfdSignaling is set to (NONE). - // A preferred implementation of this workflow is to set RespondToAfd to (NONE) - // and set AfdSignaling to (AUTO). * Choose None to remove all input AFD values - // from this output. - RespondToAfd *string `locationName:"respondToAfd" type:"string" enum:"RespondToAfd"` - - // Applies only if your input aspect ratio is different from your output aspect - // ratio. Enable Stretch to output (StretchToOutput) to have the service stretch - // your video image to fit. Leave this setting disabled to allow the service - // to letterbox your video instead. This setting overrides any positioning value - // you specify elsewhere in the job. - ScalingBehavior *string `locationName:"scalingBehavior" type:"string" enum:"ScalingBehavior"` - - // Use Sharpness (Sharpness)setting to specify the strength of anti-aliasing. - // This setting changes the width of the anti-alias filter kernel used for scaling. - // Sharpness only applies if your output resolution is different from your input - // resolution, and if you set Anti-alias (AntiAlias) to ENABLED. 0 is the softest - // setting, 100 the sharpest, and 50 recommended for most content. - Sharpness *int64 `locationName:"sharpness" type:"integer"` - - // Enable Timecode insertion to include timecode information in this output. - // Do this in the API by setting (VideoTimecodeInsertion) to (PIC_TIMING_SEI). - // To get timecodes to appear correctly in your output, also set up the timecode - // configuration for your job in the input settings. Only enable Timecode insertion - // when the input framerate is identical to output framerate. Disable this setting - // to remove the timecode from the output. Default is disabled. - TimecodeInsertion *string `locationName:"timecodeInsertion" type:"string" enum:"VideoTimecodeInsertion"` - - // Find additional transcoding features under Preprocessors (VideoPreprocessors). - // Enable the features at each output individually. These features are disabled - // by default. - VideoPreprocessors *VideoPreprocessor `locationName:"videoPreprocessors" type:"structure"` - - // Use Width (Width) to define the video resolution width, in pixels, for this - // output. If you don't provide a value here, the service will use the input - // width. - Width *int64 `locationName:"width" type:"integer"` -} - -// String returns the string representation -func (s VideoDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VideoDescription) GoString() string { - return s.String() -} - -// SetAfdSignaling sets the AfdSignaling field's value. -func (s *VideoDescription) SetAfdSignaling(v string) *VideoDescription { - s.AfdSignaling = &v - return s -} - -// SetAntiAlias sets the AntiAlias field's value. -func (s *VideoDescription) SetAntiAlias(v string) *VideoDescription { - s.AntiAlias = &v - return s -} - -// SetCodecSettings sets the CodecSettings field's value. -func (s *VideoDescription) SetCodecSettings(v *VideoCodecSettings) *VideoDescription { - s.CodecSettings = v - return s -} - -// SetColorMetadata sets the ColorMetadata field's value. -func (s *VideoDescription) SetColorMetadata(v string) *VideoDescription { - s.ColorMetadata = &v - return s -} - -// SetCrop sets the Crop field's value. -func (s *VideoDescription) SetCrop(v *Rectangle) *VideoDescription { - s.Crop = v - return s -} - -// SetDropFrameTimecode sets the DropFrameTimecode field's value. -func (s *VideoDescription) SetDropFrameTimecode(v string) *VideoDescription { - s.DropFrameTimecode = &v - return s -} - -// SetFixedAfd sets the FixedAfd field's value. -func (s *VideoDescription) SetFixedAfd(v int64) *VideoDescription { - s.FixedAfd = &v - return s -} - -// SetHeight sets the Height field's value. -func (s *VideoDescription) SetHeight(v int64) *VideoDescription { - s.Height = &v - return s -} - -// SetPosition sets the Position field's value. -func (s *VideoDescription) SetPosition(v *Rectangle) *VideoDescription { - s.Position = v - return s -} - -// SetRespondToAfd sets the RespondToAfd field's value. -func (s *VideoDescription) SetRespondToAfd(v string) *VideoDescription { - s.RespondToAfd = &v - return s -} - -// SetScalingBehavior sets the ScalingBehavior field's value. -func (s *VideoDescription) SetScalingBehavior(v string) *VideoDescription { - s.ScalingBehavior = &v - return s -} - -// SetSharpness sets the Sharpness field's value. -func (s *VideoDescription) SetSharpness(v int64) *VideoDescription { - s.Sharpness = &v - return s -} - -// SetTimecodeInsertion sets the TimecodeInsertion field's value. -func (s *VideoDescription) SetTimecodeInsertion(v string) *VideoDescription { - s.TimecodeInsertion = &v - return s -} - -// SetVideoPreprocessors sets the VideoPreprocessors field's value. -func (s *VideoDescription) SetVideoPreprocessors(v *VideoPreprocessor) *VideoDescription { - s.VideoPreprocessors = v - return s -} - -// SetWidth sets the Width field's value. -func (s *VideoDescription) SetWidth(v int64) *VideoDescription { - s.Width = &v - return s -} - -// Contains details about the output's video stream -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/VideoDetail -type VideoDetail struct { - _ struct{} `type:"structure"` - - // Height in pixels for the output - HeightInPx *int64 `locationName:"heightInPx" type:"integer"` - - // Width in pixels for the output - WidthInPx *int64 `locationName:"widthInPx" type:"integer"` -} - -// String returns the string representation -func (s VideoDetail) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VideoDetail) GoString() string { - return s.String() -} - -// SetHeightInPx sets the HeightInPx field's value. -func (s *VideoDetail) SetHeightInPx(v int64) *VideoDetail { - s.HeightInPx = &v - return s -} - -// SetWidthInPx sets the WidthInPx field's value. -func (s *VideoDetail) SetWidthInPx(v int64) *VideoDetail { - s.WidthInPx = &v - return s -} - -// Find additional transcoding features under Preprocessors (VideoPreprocessors). -// Enable the features at each output individually. These features are disabled -// by default. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/VideoPreprocessor -type VideoPreprocessor struct { - _ struct{} `type:"structure"` - - // Enable the Color corrector (ColorCorrector) feature if necessary. Enable - // or disable this feature for each output individually. This setting is disabled - // by default. - ColorCorrector *ColorCorrector `locationName:"colorCorrector" type:"structure"` - - // Use Deinterlacer (Deinterlacer) to produce smoother motion and a clearer - // picture. - Deinterlacer *Deinterlacer `locationName:"deinterlacer" type:"structure"` - - // Enable the Image inserter (ImageInserter) feature to include a graphic overlay - // on your video. Enable or disable this feature for each output individually. - // This setting is disabled by default. - ImageInserter *ImageInserter `locationName:"imageInserter" type:"structure"` - - // Enable the Noise reducer (NoiseReducer) feature to remove noise from your - // video output if necessary. Enable or disable this feature for each output - // individually. This setting is disabled by default. - NoiseReducer *NoiseReducer `locationName:"noiseReducer" type:"structure"` - - // Timecode burn-in (TimecodeBurnIn)--Burns the output timecode and specified - // prefix into the output. - TimecodeBurnin *TimecodeBurnin `locationName:"timecodeBurnin" type:"structure"` -} - -// String returns the string representation -func (s VideoPreprocessor) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VideoPreprocessor) GoString() string { - return s.String() -} - -// SetColorCorrector sets the ColorCorrector field's value. -func (s *VideoPreprocessor) SetColorCorrector(v *ColorCorrector) *VideoPreprocessor { - s.ColorCorrector = v - return s -} - -// SetDeinterlacer sets the Deinterlacer field's value. -func (s *VideoPreprocessor) SetDeinterlacer(v *Deinterlacer) *VideoPreprocessor { - s.Deinterlacer = v - return s -} - -// SetImageInserter sets the ImageInserter field's value. -func (s *VideoPreprocessor) SetImageInserter(v *ImageInserter) *VideoPreprocessor { - s.ImageInserter = v - return s -} - -// SetNoiseReducer sets the NoiseReducer field's value. -func (s *VideoPreprocessor) SetNoiseReducer(v *NoiseReducer) *VideoPreprocessor { - s.NoiseReducer = v - return s -} - -// SetTimecodeBurnin sets the TimecodeBurnin field's value. -func (s *VideoPreprocessor) SetTimecodeBurnin(v *TimecodeBurnin) *VideoPreprocessor { - s.TimecodeBurnin = v - return s -} - -// Selector for video. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/VideoSelector -type VideoSelector struct { - _ struct{} `type:"structure"` - - // Specifies the colorspace of an input. This setting works in tandem with "Color - // Corrector":#color_corrector > color_space_conversion to determine if any - // conversion will be performed. - ColorSpace *string `locationName:"colorSpace" type:"string" enum:"ColorSpace"` - - // There are two sources for color metadata, the input file and the job configuration. - // This enum controls which takes precedence. FORCE: System will use color metadata - // supplied by user, if any. If the user does not supply color metadata the - // system will use data from the source. FALLBACK: System will use color metadata - // from the source. If source has no color metadata, the system will use user-supplied - // color metadata values if available. - ColorSpaceUsage *string `locationName:"colorSpaceUsage" type:"string" enum:"ColorSpaceUsage"` - - // Use the HDR master display (Hdr10Metadata) settings to provide values for - // HDR color. These values vary depending on the input video and must be provided - // by a color grader. Range is 0 to 50,000, each increment represents 0.00002 - // in CIE1931 color coordinate. - Hdr10Metadata *Hdr10Metadata `locationName:"hdr10Metadata" type:"structure"` - - // Use PID (Pid) to select specific video data from an input file. Specify this - // value as an integer; the system automatically converts it to the hexidecimal - // value. For example, 257 selects PID 0x101. A PID, or packet identifier, is - // an identifier for a set of data in an MPEG-2 transport stream container. - Pid *int64 `locationName:"pid" type:"integer"` - - // Selects a specific program from within a multi-program transport stream. - // Note that Quad 4K is not currently supported. - ProgramNumber *int64 `locationName:"programNumber" type:"integer"` -} - -// String returns the string representation -func (s VideoSelector) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VideoSelector) GoString() string { - return s.String() -} - -// SetColorSpace sets the ColorSpace field's value. -func (s *VideoSelector) SetColorSpace(v string) *VideoSelector { - s.ColorSpace = &v - return s -} - -// SetColorSpaceUsage sets the ColorSpaceUsage field's value. -func (s *VideoSelector) SetColorSpaceUsage(v string) *VideoSelector { - s.ColorSpaceUsage = &v - return s -} - -// SetHdr10Metadata sets the Hdr10Metadata field's value. -func (s *VideoSelector) SetHdr10Metadata(v *Hdr10Metadata) *VideoSelector { - s.Hdr10Metadata = v - return s -} - -// SetPid sets the Pid field's value. -func (s *VideoSelector) SetPid(v int64) *VideoSelector { - s.Pid = &v - return s -} - -// SetProgramNumber sets the ProgramNumber field's value. -func (s *VideoSelector) SetProgramNumber(v int64) *VideoSelector { - s.ProgramNumber = &v - return s -} - -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to -// the value WAV. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/WavSettings -type WavSettings struct { - _ struct{} `type:"structure"` - - // Specify Bit depth (BitDepth), in bits per sample, to choose the encoding - // quality for this audio track. - BitDepth *int64 `locationName:"bitDepth" type:"integer"` - - // Set Channels to specify the number of channels in this output audio track. - // With WAV, valid values 1, 2, 4, and 8. In the console, these values are Mono, - // Stereo, 4-Channel, and 8-Channel, respectively. - Channels *int64 `locationName:"channels" type:"integer"` - - // Sample rate in Hz. - SampleRate *int64 `locationName:"sampleRate" type:"integer"` -} - -// String returns the string representation -func (s WavSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WavSettings) GoString() string { - return s.String() -} - -// SetBitDepth sets the BitDepth field's value. -func (s *WavSettings) SetBitDepth(v int64) *WavSettings { - s.BitDepth = &v - return s -} - -// SetChannels sets the Channels field's value. -func (s *WavSettings) SetChannels(v int64) *WavSettings { - s.Channels = &v - return s -} - -// SetSampleRate sets the SampleRate field's value. -func (s *WavSettings) SetSampleRate(v int64) *WavSettings { - s.SampleRate = &v - return s -} - -// Choose BROADCASTER_MIXED_AD when the input contains pre-mixed main audio -// + audio description (AD) as a stereo pair. The value for AudioType will be -// set to 3, which signals to downstream systems that this stream contains "broadcaster -// mixed AD". Note that the input received by the encoder must contain pre-mixed -// audio; the encoder does not perform the mixing. When you choose BROADCASTER_MIXED_AD, -// the encoder ignores any values you provide in AudioType and FollowInputAudioType. -// Choose NORMAL when the input does not contain pre-mixed audio + audio description -// (AD). In this case, the encoder will use any values you provide for AudioType -// and FollowInputAudioType. -const ( - // AacAudioDescriptionBroadcasterMixBroadcasterMixedAd is a AacAudioDescriptionBroadcasterMix enum value - AacAudioDescriptionBroadcasterMixBroadcasterMixedAd = "BROADCASTER_MIXED_AD" - - // AacAudioDescriptionBroadcasterMixNormal is a AacAudioDescriptionBroadcasterMix enum value - AacAudioDescriptionBroadcasterMixNormal = "NORMAL" -) - -// AAC Profile. -const ( - // AacCodecProfileLc is a AacCodecProfile enum value - AacCodecProfileLc = "LC" - - // AacCodecProfileHev1 is a AacCodecProfile enum value - AacCodecProfileHev1 = "HEV1" - - // AacCodecProfileHev2 is a AacCodecProfile enum value - AacCodecProfileHev2 = "HEV2" -) - -// Mono (Audio Description), Mono, Stereo, or 5.1 channel layout. Valid values -// depend on rate control mode and profile. "1.0 - Audio Description (Receiver -// Mix)" setting receives a stereo description plus control track and emits -// a mono AAC encode of the description track, with control data emitted in -// the PES header as per ETSI TS 101 154 Annex E. -const ( - // AacCodingModeAdReceiverMix is a AacCodingMode enum value - AacCodingModeAdReceiverMix = "AD_RECEIVER_MIX" - - // AacCodingModeCodingMode10 is a AacCodingMode enum value - AacCodingModeCodingMode10 = "CODING_MODE_1_0" - - // AacCodingModeCodingMode11 is a AacCodingMode enum value - AacCodingModeCodingMode11 = "CODING_MODE_1_1" - - // AacCodingModeCodingMode20 is a AacCodingMode enum value - AacCodingModeCodingMode20 = "CODING_MODE_2_0" - - // AacCodingModeCodingMode51 is a AacCodingMode enum value - AacCodingModeCodingMode51 = "CODING_MODE_5_1" -) - -// Rate Control Mode. -const ( - // AacRateControlModeCbr is a AacRateControlMode enum value - AacRateControlModeCbr = "CBR" - - // AacRateControlModeVbr is a AacRateControlMode enum value - AacRateControlModeVbr = "VBR" -) - -// Enables LATM/LOAS AAC output. Note that if you use LATM/LOAS AAC in an output, -// you must choose "No container" for the output container. -const ( - // AacRawFormatLatmLoas is a AacRawFormat enum value - AacRawFormatLatmLoas = "LATM_LOAS" - - // AacRawFormatNone is a AacRawFormat enum value - AacRawFormatNone = "NONE" -) - -// Use MPEG-2 AAC instead of MPEG-4 AAC audio for raw or MPEG-2 Transport Stream -// containers. -const ( - // AacSpecificationMpeg2 is a AacSpecification enum value - AacSpecificationMpeg2 = "MPEG2" - - // AacSpecificationMpeg4 is a AacSpecification enum value - AacSpecificationMpeg4 = "MPEG4" -) - -// VBR Quality Level - Only used if rate_control_mode is VBR. -const ( - // AacVbrQualityLow is a AacVbrQuality enum value - AacVbrQualityLow = "LOW" - - // AacVbrQualityMediumLow is a AacVbrQuality enum value - AacVbrQualityMediumLow = "MEDIUM_LOW" - - // AacVbrQualityMediumHigh is a AacVbrQuality enum value - AacVbrQualityMediumHigh = "MEDIUM_HIGH" - - // AacVbrQualityHigh is a AacVbrQuality enum value - AacVbrQualityHigh = "HIGH" -) - -// Specifies the "Bitstream Mode" (bsmod) for the emitted AC-3 stream. See ATSC -// A/52-2012 for background on these values. -const ( - // Ac3BitstreamModeCompleteMain is a Ac3BitstreamMode enum value - Ac3BitstreamModeCompleteMain = "COMPLETE_MAIN" - - // Ac3BitstreamModeCommentary is a Ac3BitstreamMode enum value - Ac3BitstreamModeCommentary = "COMMENTARY" - - // Ac3BitstreamModeDialogue is a Ac3BitstreamMode enum value - Ac3BitstreamModeDialogue = "DIALOGUE" - - // Ac3BitstreamModeEmergency is a Ac3BitstreamMode enum value - Ac3BitstreamModeEmergency = "EMERGENCY" - - // Ac3BitstreamModeHearingImpaired is a Ac3BitstreamMode enum value - Ac3BitstreamModeHearingImpaired = "HEARING_IMPAIRED" - - // Ac3BitstreamModeMusicAndEffects is a Ac3BitstreamMode enum value - Ac3BitstreamModeMusicAndEffects = "MUSIC_AND_EFFECTS" - - // Ac3BitstreamModeVisuallyImpaired is a Ac3BitstreamMode enum value - Ac3BitstreamModeVisuallyImpaired = "VISUALLY_IMPAIRED" - - // Ac3BitstreamModeVoiceOver is a Ac3BitstreamMode enum value - Ac3BitstreamModeVoiceOver = "VOICE_OVER" -) - -// Dolby Digital coding mode. Determines number of channels. -const ( - // Ac3CodingModeCodingMode10 is a Ac3CodingMode enum value - Ac3CodingModeCodingMode10 = "CODING_MODE_1_0" - - // Ac3CodingModeCodingMode11 is a Ac3CodingMode enum value - Ac3CodingModeCodingMode11 = "CODING_MODE_1_1" - - // Ac3CodingModeCodingMode20 is a Ac3CodingMode enum value - Ac3CodingModeCodingMode20 = "CODING_MODE_2_0" - - // Ac3CodingModeCodingMode32Lfe is a Ac3CodingMode enum value - Ac3CodingModeCodingMode32Lfe = "CODING_MODE_3_2_LFE" -) - -// If set to FILM_STANDARD, adds dynamic range compression signaling to the -// output bitstream as defined in the Dolby Digital specification. -const ( - // Ac3DynamicRangeCompressionProfileFilmStandard is a Ac3DynamicRangeCompressionProfile enum value - Ac3DynamicRangeCompressionProfileFilmStandard = "FILM_STANDARD" - - // Ac3DynamicRangeCompressionProfileNone is a Ac3DynamicRangeCompressionProfile enum value - Ac3DynamicRangeCompressionProfileNone = "NONE" -) - -// Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only -// valid with 3_2_LFE coding mode. -const ( - // Ac3LfeFilterEnabled is a Ac3LfeFilter enum value - Ac3LfeFilterEnabled = "ENABLED" - - // Ac3LfeFilterDisabled is a Ac3LfeFilter enum value - Ac3LfeFilterDisabled = "DISABLED" -) - -// When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, -// or DolbyE decoder that supplied this audio data. If audio was not supplied -// from one of these streams, then the static metadata settings will be used. -const ( - // Ac3MetadataControlFollowInput is a Ac3MetadataControl enum value - Ac3MetadataControlFollowInput = "FOLLOW_INPUT" - - // Ac3MetadataControlUseConfigured is a Ac3MetadataControl enum value - Ac3MetadataControlUseConfigured = "USE_CONFIGURED" -) - -// This setting only applies to H.264 and MPEG2 outputs. Use Insert AFD signaling -// (AfdSignaling) to whether there are AFD values in the output video data and -// what those values are. * Choose None to remove all AFD values from this output. -// * Choose Fixed to ignore input AFD values and instead encode the value specified -// in the job. * Choose Auto to calculate output AFD values based on the input -// AFD scaler data. -const ( - // AfdSignalingNone is a AfdSignaling enum value - AfdSignalingNone = "NONE" - - // AfdSignalingAuto is a AfdSignaling enum value - AfdSignalingAuto = "AUTO" - - // AfdSignalingFixed is a AfdSignaling enum value - AfdSignalingFixed = "FIXED" -) - -// Enable Anti-alias (AntiAlias) to enhance sharp edges in video output when -// your input resolution is much larger than your output resolution. Default -// is enabled. -const ( - // AntiAliasDisabled is a AntiAlias enum value - AntiAliasDisabled = "DISABLED" - - // AntiAliasEnabled is a AntiAlias enum value - AntiAliasEnabled = "ENABLED" -) - -// Type of Audio codec. -const ( - // AudioCodecAac is a AudioCodec enum value - AudioCodecAac = "AAC" - - // AudioCodecMp2 is a AudioCodec enum value - AudioCodecMp2 = "MP2" - - // AudioCodecWav is a AudioCodec enum value - AudioCodecWav = "WAV" - - // AudioCodecAiff is a AudioCodec enum value - AudioCodecAiff = "AIFF" - - // AudioCodecAc3 is a AudioCodec enum value - AudioCodecAc3 = "AC3" - - // AudioCodecEac3 is a AudioCodec enum value - AudioCodecEac3 = "EAC3" - - // AudioCodecPassthrough is a AudioCodec enum value - AudioCodecPassthrough = "PASSTHROUGH" -) - -// When an "Audio Description":#audio_description specifies an AudioSelector -// or AudioSelectorGroup for which no matching source is found in the input, -// then the audio selector marked as DEFAULT will be used. If none are marked -// as default, silence will be inserted for the duration of the input. -const ( - // AudioDefaultSelectionDefault is a AudioDefaultSelection enum value - AudioDefaultSelectionDefault = "DEFAULT" - - // AudioDefaultSelectionNotDefault is a AudioDefaultSelection enum value - AudioDefaultSelectionNotDefault = "NOT_DEFAULT" -) - -// Choosing FOLLOW_INPUT will cause the ISO 639 language code of the output -// to follow the ISO 639 language code of the input. The language specified -// for languageCode' will be used when USE_CONFIGURED is selected or when FOLLOW_INPUT -// is selected but there is no ISO 639 language code specified by the input. -const ( - // AudioLanguageCodeControlFollowInput is a AudioLanguageCodeControl enum value - AudioLanguageCodeControlFollowInput = "FOLLOW_INPUT" - - // AudioLanguageCodeControlUseConfigured is a AudioLanguageCodeControl enum value - AudioLanguageCodeControlUseConfigured = "USE_CONFIGURED" -) - -// Audio normalization algorithm to use. 1770-1 conforms to the CALM Act specification, -// 1770-2 conforms to the EBU R-128 specification. -const ( - // AudioNormalizationAlgorithmItuBs17701 is a AudioNormalizationAlgorithm enum value - AudioNormalizationAlgorithmItuBs17701 = "ITU_BS_1770_1" - - // AudioNormalizationAlgorithmItuBs17702 is a AudioNormalizationAlgorithm enum value - AudioNormalizationAlgorithmItuBs17702 = "ITU_BS_1770_2" -) - -// When enabled the output audio is corrected using the chosen algorithm. If -// disabled, the audio will be measured but not adjusted. -const ( - // AudioNormalizationAlgorithmControlCorrectAudio is a AudioNormalizationAlgorithmControl enum value - AudioNormalizationAlgorithmControlCorrectAudio = "CORRECT_AUDIO" - - // AudioNormalizationAlgorithmControlMeasureOnly is a AudioNormalizationAlgorithmControl enum value - AudioNormalizationAlgorithmControlMeasureOnly = "MEASURE_ONLY" -) - -// If set to LOG, log each output's audio track loudness to a CSV file. -const ( - // AudioNormalizationLoudnessLoggingLog is a AudioNormalizationLoudnessLogging enum value - AudioNormalizationLoudnessLoggingLog = "LOG" - - // AudioNormalizationLoudnessLoggingDontLog is a AudioNormalizationLoudnessLogging enum value - AudioNormalizationLoudnessLoggingDontLog = "DONT_LOG" -) - -// If set to TRUE_PEAK, calculate and log the TruePeak for each output's audio -// track loudness. -const ( - // AudioNormalizationPeakCalculationTruePeak is a AudioNormalizationPeakCalculation enum value - AudioNormalizationPeakCalculationTruePeak = "TRUE_PEAK" - - // AudioNormalizationPeakCalculationNone is a AudioNormalizationPeakCalculation enum value - AudioNormalizationPeakCalculationNone = "NONE" -) - -// Specifies the type of the audio selector. -const ( - // AudioSelectorTypePid is a AudioSelectorType enum value - AudioSelectorTypePid = "PID" - - // AudioSelectorTypeTrack is a AudioSelectorType enum value - AudioSelectorTypeTrack = "TRACK" - - // AudioSelectorTypeLanguageCode is a AudioSelectorType enum value - AudioSelectorTypeLanguageCode = "LANGUAGE_CODE" -) - -// When set to FOLLOW_INPUT, if the input contains an ISO 639 audio_type, then -// that value is passed through to the output. If the input contains no ISO -// 639 audio_type, the value in Audio Type is included in the output. Otherwise -// the value in Audio Type is included in the output. Note that this field and -// audioType are both ignored if audioDescriptionBroadcasterMix is set to BROADCASTER_MIXED_AD. -const ( - // AudioTypeControlFollowInput is a AudioTypeControl enum value - AudioTypeControlFollowInput = "FOLLOW_INPUT" - - // AudioTypeControlUseConfigured is a AudioTypeControl enum value - AudioTypeControlUseConfigured = "USE_CONFIGURED" -) - -// If no explicit x_position or y_position is provided, setting alignment to -// centered will place the captions at the bottom center of the output. Similarly, -// setting a left alignment will align captions to the bottom left of the output. -// If x and y positions are given in conjunction with the alignment parameter, -// the font will be justified (either left or centered) relative to those coordinates. -// This option is not valid for source captions that are STL, 608/embedded or -// teletext. These source settings are already pre-defined by the caption stream. -// All burn-in and DVB-Sub font settings must match. -const ( - // BurninSubtitleAlignmentCentered is a BurninSubtitleAlignment enum value - BurninSubtitleAlignmentCentered = "CENTERED" - - // BurninSubtitleAlignmentLeft is a BurninSubtitleAlignment enum value - BurninSubtitleAlignmentLeft = "LEFT" -) - -// Specifies the color of the rectangle behind the captions.All burn-in and -// DVB-Sub font settings must match. -const ( - // BurninSubtitleBackgroundColorNone is a BurninSubtitleBackgroundColor enum value - BurninSubtitleBackgroundColorNone = "NONE" - - // BurninSubtitleBackgroundColorBlack is a BurninSubtitleBackgroundColor enum value - BurninSubtitleBackgroundColorBlack = "BLACK" - - // BurninSubtitleBackgroundColorWhite is a BurninSubtitleBackgroundColor enum value - BurninSubtitleBackgroundColorWhite = "WHITE" -) - -// Specifies the color of the burned-in captions. This option is not valid for -// source captions that are STL, 608/embedded or teletext. These source settings -// are already pre-defined by the caption stream. All burn-in and DVB-Sub font -// settings must match. -const ( - // BurninSubtitleFontColorWhite is a BurninSubtitleFontColor enum value - BurninSubtitleFontColorWhite = "WHITE" - - // BurninSubtitleFontColorBlack is a BurninSubtitleFontColor enum value - BurninSubtitleFontColorBlack = "BLACK" - - // BurninSubtitleFontColorYellow is a BurninSubtitleFontColor enum value - BurninSubtitleFontColorYellow = "YELLOW" - - // BurninSubtitleFontColorRed is a BurninSubtitleFontColor enum value - BurninSubtitleFontColorRed = "RED" - - // BurninSubtitleFontColorGreen is a BurninSubtitleFontColor enum value - BurninSubtitleFontColorGreen = "GREEN" - - // BurninSubtitleFontColorBlue is a BurninSubtitleFontColor enum value - BurninSubtitleFontColorBlue = "BLUE" -) - -// Specifies font outline color. This option is not valid for source captions -// that are either 608/embedded or teletext. These source settings are already -// pre-defined by the caption stream. All burn-in and DVB-Sub font settings -// must match. -const ( - // BurninSubtitleOutlineColorBlack is a BurninSubtitleOutlineColor enum value - BurninSubtitleOutlineColorBlack = "BLACK" - - // BurninSubtitleOutlineColorWhite is a BurninSubtitleOutlineColor enum value - BurninSubtitleOutlineColorWhite = "WHITE" - - // BurninSubtitleOutlineColorYellow is a BurninSubtitleOutlineColor enum value - BurninSubtitleOutlineColorYellow = "YELLOW" - - // BurninSubtitleOutlineColorRed is a BurninSubtitleOutlineColor enum value - BurninSubtitleOutlineColorRed = "RED" - - // BurninSubtitleOutlineColorGreen is a BurninSubtitleOutlineColor enum value - BurninSubtitleOutlineColorGreen = "GREEN" - - // BurninSubtitleOutlineColorBlue is a BurninSubtitleOutlineColor enum value - BurninSubtitleOutlineColorBlue = "BLUE" -) - -// Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub -// font settings must match. -const ( - // BurninSubtitleShadowColorNone is a BurninSubtitleShadowColor enum value - BurninSubtitleShadowColorNone = "NONE" - - // BurninSubtitleShadowColorBlack is a BurninSubtitleShadowColor enum value - BurninSubtitleShadowColorBlack = "BLACK" - - // BurninSubtitleShadowColorWhite is a BurninSubtitleShadowColor enum value - BurninSubtitleShadowColorWhite = "WHITE" -) - -// Controls whether a fixed grid size or proportional font spacing will be used -// to generate the output subtitles bitmap. Only applicable for Teletext inputs -// and DVB-Sub/Burn-in outputs. -const ( - // BurninSubtitleTeletextSpacingFixedGrid is a BurninSubtitleTeletextSpacing enum value - BurninSubtitleTeletextSpacingFixedGrid = "FIXED_GRID" - - // BurninSubtitleTeletextSpacingProportional is a BurninSubtitleTeletextSpacing enum value - BurninSubtitleTeletextSpacingProportional = "PROPORTIONAL" -) - -const ( - // CaptionDestinationTypeBurnIn is a CaptionDestinationType enum value - CaptionDestinationTypeBurnIn = "BURN_IN" - - // CaptionDestinationTypeDvbSub is a CaptionDestinationType enum value - CaptionDestinationTypeDvbSub = "DVB_SUB" - - // CaptionDestinationTypeEmbedded is a CaptionDestinationType enum value - CaptionDestinationTypeEmbedded = "EMBEDDED" - - // CaptionDestinationTypeScc is a CaptionDestinationType enum value - CaptionDestinationTypeScc = "SCC" - - // CaptionDestinationTypeSrt is a CaptionDestinationType enum value - CaptionDestinationTypeSrt = "SRT" - - // CaptionDestinationTypeTeletext is a CaptionDestinationType enum value - CaptionDestinationTypeTeletext = "TELETEXT" - - // CaptionDestinationTypeTtml is a CaptionDestinationType enum value - CaptionDestinationTypeTtml = "TTML" - - // CaptionDestinationTypeWebvtt is a CaptionDestinationType enum value - CaptionDestinationTypeWebvtt = "WEBVTT" -) - -// Use Source (SourceType) to identify the format of your input captions. The -// service cannot auto-detect caption format. -const ( - // CaptionSourceTypeAncillary is a CaptionSourceType enum value - CaptionSourceTypeAncillary = "ANCILLARY" - - // CaptionSourceTypeDvbSub is a CaptionSourceType enum value - CaptionSourceTypeDvbSub = "DVB_SUB" - - // CaptionSourceTypeEmbedded is a CaptionSourceType enum value - CaptionSourceTypeEmbedded = "EMBEDDED" - - // CaptionSourceTypeScc is a CaptionSourceType enum value - CaptionSourceTypeScc = "SCC" - - // CaptionSourceTypeTtml is a CaptionSourceType enum value - CaptionSourceTypeTtml = "TTML" - - // CaptionSourceTypeStl is a CaptionSourceType enum value - CaptionSourceTypeStl = "STL" - - // CaptionSourceTypeSrt is a CaptionSourceType enum value - CaptionSourceTypeSrt = "SRT" - - // CaptionSourceTypeTeletext is a CaptionSourceType enum value - CaptionSourceTypeTeletext = "TELETEXT" - - // CaptionSourceTypeNullSource is a CaptionSourceType enum value - CaptionSourceTypeNullSource = "NULL_SOURCE" -) - -// Enable Insert color metadata (ColorMetadata) to include color metadata in -// this output. This setting is enabled by default. -const ( - // ColorMetadataIgnore is a ColorMetadata enum value - ColorMetadataIgnore = "IGNORE" - - // ColorMetadataInsert is a ColorMetadata enum value - ColorMetadataInsert = "INSERT" -) - -// Specifies the colorspace of an input. This setting works in tandem with "Color -// Corrector":#color_corrector > color_space_conversion to determine if any -// conversion will be performed. -const ( - // ColorSpaceFollow is a ColorSpace enum value - ColorSpaceFollow = "FOLLOW" - - // ColorSpaceRec601 is a ColorSpace enum value - ColorSpaceRec601 = "REC_601" - - // ColorSpaceRec709 is a ColorSpace enum value - ColorSpaceRec709 = "REC_709" - - // ColorSpaceHdr10 is a ColorSpace enum value - ColorSpaceHdr10 = "HDR10" - - // ColorSpaceHlg2020 is a ColorSpace enum value - ColorSpaceHlg2020 = "HLG_2020" -) - -// Determines if colorspace conversion will be performed. If set to _None_, -// no conversion will be performed. If _Force 601_ or _Force 709_ are selected, -// conversion will be performed for inputs with differing colorspaces. An input's -// colorspace can be specified explicitly in the "Video Selector":#inputs-video_selector -// if necessary. -const ( - // ColorSpaceConversionNone is a ColorSpaceConversion enum value - ColorSpaceConversionNone = "NONE" - - // ColorSpaceConversionForce601 is a ColorSpaceConversion enum value - ColorSpaceConversionForce601 = "FORCE_601" - - // ColorSpaceConversionForce709 is a ColorSpaceConversion enum value - ColorSpaceConversionForce709 = "FORCE_709" - - // ColorSpaceConversionForceHdr10 is a ColorSpaceConversion enum value - ColorSpaceConversionForceHdr10 = "FORCE_HDR10" - - // ColorSpaceConversionForceHlg2020 is a ColorSpaceConversion enum value - ColorSpaceConversionForceHlg2020 = "FORCE_HLG_2020" -) - -// There are two sources for color metadata, the input file and the job configuration. -// This enum controls which takes precedence. FORCE: System will use color metadata -// supplied by user, if any. If the user does not supply color metadata the -// system will use data from the source. FALLBACK: System will use color metadata -// from the source. If source has no color metadata, the system will use user-supplied -// color metadata values if available. -const ( - // ColorSpaceUsageForce is a ColorSpaceUsage enum value - ColorSpaceUsageForce = "FORCE" - - // ColorSpaceUsageFallback is a ColorSpaceUsage enum value - ColorSpaceUsageFallback = "FALLBACK" -) - -// Container for this output. Some containers require a container settings object. -// If not specified, the default object will be created. -const ( - // ContainerTypeF4v is a ContainerType enum value - ContainerTypeF4v = "F4V" - - // ContainerTypeIsmv is a ContainerType enum value - ContainerTypeIsmv = "ISMV" - - // ContainerTypeM2ts is a ContainerType enum value - ContainerTypeM2ts = "M2TS" - - // ContainerTypeM3u8 is a ContainerType enum value - ContainerTypeM3u8 = "M3U8" - - // ContainerTypeMov is a ContainerType enum value - ContainerTypeMov = "MOV" - - // ContainerTypeMp4 is a ContainerType enum value - ContainerTypeMp4 = "MP4" - - // ContainerTypeMpd is a ContainerType enum value - ContainerTypeMpd = "MPD" - - // ContainerTypeMxf is a ContainerType enum value - ContainerTypeMxf = "MXF" - - // ContainerTypeRaw is a ContainerType enum value - ContainerTypeRaw = "RAW" -) - -// Supports HbbTV specification as indicated -const ( - // DashIsoHbbtvComplianceHbbtv15 is a DashIsoHbbtvCompliance enum value - DashIsoHbbtvComplianceHbbtv15 = "HBBTV_1_5" - - // DashIsoHbbtvComplianceNone is a DashIsoHbbtvCompliance enum value - DashIsoHbbtvComplianceNone = "NONE" -) - -// When set to SINGLE_FILE, a single output file is generated, which is internally -// segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, -// separate segment files will be created. -const ( - // DashIsoSegmentControlSingleFile is a DashIsoSegmentControl enum value - DashIsoSegmentControlSingleFile = "SINGLE_FILE" - - // DashIsoSegmentControlSegmentedFiles is a DashIsoSegmentControl enum value - DashIsoSegmentControlSegmentedFiles = "SEGMENTED_FILES" -) - -// Only applies when you set Deinterlacer (DeinterlaceMode) to Deinterlace (DEINTERLACE) -// or Adaptive (ADAPTIVE). Motion adaptive interpolate (INTERPOLATE) produces -// sharper pictures, while blend (BLEND) produces smoother motion. Use (INTERPOLATE_TICKER) -// OR (BLEND_TICKER) if your source file includes a ticker, such as a scrolling -// headline at the bottom of the frame. -const ( - // DeinterlaceAlgorithmInterpolate is a DeinterlaceAlgorithm enum value - DeinterlaceAlgorithmInterpolate = "INTERPOLATE" - - // DeinterlaceAlgorithmInterpolateTicker is a DeinterlaceAlgorithm enum value - DeinterlaceAlgorithmInterpolateTicker = "INTERPOLATE_TICKER" - - // DeinterlaceAlgorithmBlend is a DeinterlaceAlgorithm enum value - DeinterlaceAlgorithmBlend = "BLEND" - - // DeinterlaceAlgorithmBlendTicker is a DeinterlaceAlgorithm enum value - DeinterlaceAlgorithmBlendTicker = "BLEND_TICKER" -) - -// - When set to NORMAL (default), the deinterlacer does not convert frames -// that are tagged in metadata as progressive. It will only convert those that -// are tagged as some other type. - When set to FORCE_ALL_FRAMES, the deinterlacer -// converts every frame to progressive - even those that are already tagged -// as progressive. Turn Force mode on only if there is a good chance that the -// metadata has tagged frames as progressive when they are not progressive. -// Do not turn on otherwise; processing frames that are already progressive -// into progressive will probably result in lower quality video. -const ( - // DeinterlacerControlForceAllFrames is a DeinterlacerControl enum value - DeinterlacerControlForceAllFrames = "FORCE_ALL_FRAMES" - - // DeinterlacerControlNormal is a DeinterlacerControl enum value - DeinterlacerControlNormal = "NORMAL" -) - -// Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing. -// Default is Deinterlace. - Deinterlace converts interlaced to progressive. -// - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. -// - Adaptive auto-detects and converts to progressive. -const ( - // DeinterlacerModeDeinterlace is a DeinterlacerMode enum value - DeinterlacerModeDeinterlace = "DEINTERLACE" - - // DeinterlacerModeInverseTelecine is a DeinterlacerMode enum value - DeinterlacerModeInverseTelecine = "INVERSE_TELECINE" - - // DeinterlacerModeAdaptive is a DeinterlacerMode enum value - DeinterlacerModeAdaptive = "ADAPTIVE" -) - -// Applies only to 29.97 fps outputs. When this feature is enabled, the service -// will use drop-frame timecode on outputs. If it is not possible to use drop-frame -// timecode, the system will fall back to non-drop-frame. This setting is enabled -// by default when Timecode insertion (TimecodeInsertion) is enabled. -const ( - // DropFrameTimecodeDisabled is a DropFrameTimecode enum value - DropFrameTimecodeDisabled = "DISABLED" - - // DropFrameTimecodeEnabled is a DropFrameTimecode enum value - DropFrameTimecodeEnabled = "ENABLED" -) - -// If no explicit x_position or y_position is provided, setting alignment to -// centered will place the captions at the bottom center of the output. Similarly, -// setting a left alignment will align captions to the bottom left of the output. -// If x and y positions are given in conjunction with the alignment parameter, -// the font will be justified (either left or centered) relative to those coordinates. -// This option is not valid for source captions that are STL, 608/embedded or -// teletext. These source settings are already pre-defined by the caption stream. -// All burn-in and DVB-Sub font settings must match. -const ( - // DvbSubtitleAlignmentCentered is a DvbSubtitleAlignment enum value - DvbSubtitleAlignmentCentered = "CENTERED" - - // DvbSubtitleAlignmentLeft is a DvbSubtitleAlignment enum value - DvbSubtitleAlignmentLeft = "LEFT" -) - -// Specifies the color of the rectangle behind the captions.All burn-in and -// DVB-Sub font settings must match. -const ( - // DvbSubtitleBackgroundColorNone is a DvbSubtitleBackgroundColor enum value - DvbSubtitleBackgroundColorNone = "NONE" - - // DvbSubtitleBackgroundColorBlack is a DvbSubtitleBackgroundColor enum value - DvbSubtitleBackgroundColorBlack = "BLACK" - - // DvbSubtitleBackgroundColorWhite is a DvbSubtitleBackgroundColor enum value - DvbSubtitleBackgroundColorWhite = "WHITE" -) - -// Specifies the color of the burned-in captions. This option is not valid for -// source captions that are STL, 608/embedded or teletext. These source settings -// are already pre-defined by the caption stream. All burn-in and DVB-Sub font -// settings must match. -const ( - // DvbSubtitleFontColorWhite is a DvbSubtitleFontColor enum value - DvbSubtitleFontColorWhite = "WHITE" - - // DvbSubtitleFontColorBlack is a DvbSubtitleFontColor enum value - DvbSubtitleFontColorBlack = "BLACK" - - // DvbSubtitleFontColorYellow is a DvbSubtitleFontColor enum value - DvbSubtitleFontColorYellow = "YELLOW" - - // DvbSubtitleFontColorRed is a DvbSubtitleFontColor enum value - DvbSubtitleFontColorRed = "RED" - - // DvbSubtitleFontColorGreen is a DvbSubtitleFontColor enum value - DvbSubtitleFontColorGreen = "GREEN" - - // DvbSubtitleFontColorBlue is a DvbSubtitleFontColor enum value - DvbSubtitleFontColorBlue = "BLUE" -) - -// Specifies font outline color. This option is not valid for source captions -// that are either 608/embedded or teletext. These source settings are already -// pre-defined by the caption stream. All burn-in and DVB-Sub font settings -// must match. -const ( - // DvbSubtitleOutlineColorBlack is a DvbSubtitleOutlineColor enum value - DvbSubtitleOutlineColorBlack = "BLACK" - - // DvbSubtitleOutlineColorWhite is a DvbSubtitleOutlineColor enum value - DvbSubtitleOutlineColorWhite = "WHITE" - - // DvbSubtitleOutlineColorYellow is a DvbSubtitleOutlineColor enum value - DvbSubtitleOutlineColorYellow = "YELLOW" - - // DvbSubtitleOutlineColorRed is a DvbSubtitleOutlineColor enum value - DvbSubtitleOutlineColorRed = "RED" - - // DvbSubtitleOutlineColorGreen is a DvbSubtitleOutlineColor enum value - DvbSubtitleOutlineColorGreen = "GREEN" - - // DvbSubtitleOutlineColorBlue is a DvbSubtitleOutlineColor enum value - DvbSubtitleOutlineColorBlue = "BLUE" -) - -// Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub -// font settings must match. -const ( - // DvbSubtitleShadowColorNone is a DvbSubtitleShadowColor enum value - DvbSubtitleShadowColorNone = "NONE" - - // DvbSubtitleShadowColorBlack is a DvbSubtitleShadowColor enum value - DvbSubtitleShadowColorBlack = "BLACK" - - // DvbSubtitleShadowColorWhite is a DvbSubtitleShadowColor enum value - DvbSubtitleShadowColorWhite = "WHITE" -) - -// Controls whether a fixed grid size or proportional font spacing will be used -// to generate the output subtitles bitmap. Only applicable for Teletext inputs -// and DVB-Sub/Burn-in outputs. -const ( - // DvbSubtitleTeletextSpacingFixedGrid is a DvbSubtitleTeletextSpacing enum value - DvbSubtitleTeletextSpacingFixedGrid = "FIXED_GRID" - - // DvbSubtitleTeletextSpacingProportional is a DvbSubtitleTeletextSpacing enum value - DvbSubtitleTeletextSpacingProportional = "PROPORTIONAL" -) - -// If set to ATTENUATE_3_DB, applies a 3 dB attenuation to the surround channels. -// Only used for 3/2 coding mode. -const ( - // Eac3AttenuationControlAttenuate3Db is a Eac3AttenuationControl enum value - Eac3AttenuationControlAttenuate3Db = "ATTENUATE_3_DB" - - // Eac3AttenuationControlNone is a Eac3AttenuationControl enum value - Eac3AttenuationControlNone = "NONE" -) - -// Specifies the "Bitstream Mode" (bsmod) for the emitted E-AC-3 stream. See -// ATSC A/52-2012 (Annex E) for background on these values. -const ( - // Eac3BitstreamModeCompleteMain is a Eac3BitstreamMode enum value - Eac3BitstreamModeCompleteMain = "COMPLETE_MAIN" - - // Eac3BitstreamModeCommentary is a Eac3BitstreamMode enum value - Eac3BitstreamModeCommentary = "COMMENTARY" - - // Eac3BitstreamModeEmergency is a Eac3BitstreamMode enum value - Eac3BitstreamModeEmergency = "EMERGENCY" - - // Eac3BitstreamModeHearingImpaired is a Eac3BitstreamMode enum value - Eac3BitstreamModeHearingImpaired = "HEARING_IMPAIRED" - - // Eac3BitstreamModeVisuallyImpaired is a Eac3BitstreamMode enum value - Eac3BitstreamModeVisuallyImpaired = "VISUALLY_IMPAIRED" -) - -// Dolby Digital Plus coding mode. Determines number of channels. -const ( - // Eac3CodingModeCodingMode10 is a Eac3CodingMode enum value - Eac3CodingModeCodingMode10 = "CODING_MODE_1_0" - - // Eac3CodingModeCodingMode20 is a Eac3CodingMode enum value - Eac3CodingModeCodingMode20 = "CODING_MODE_2_0" - - // Eac3CodingModeCodingMode32 is a Eac3CodingMode enum value - Eac3CodingModeCodingMode32 = "CODING_MODE_3_2" -) - -// Activates a DC highpass filter for all input channels. -const ( - // Eac3DcFilterEnabled is a Eac3DcFilter enum value - Eac3DcFilterEnabled = "ENABLED" - - // Eac3DcFilterDisabled is a Eac3DcFilter enum value - Eac3DcFilterDisabled = "DISABLED" -) - -// Enables Dynamic Range Compression that restricts the absolute peak level -// for a signal. -const ( - // Eac3DynamicRangeCompressionLineNone is a Eac3DynamicRangeCompressionLine enum value - Eac3DynamicRangeCompressionLineNone = "NONE" - - // Eac3DynamicRangeCompressionLineFilmStandard is a Eac3DynamicRangeCompressionLine enum value - Eac3DynamicRangeCompressionLineFilmStandard = "FILM_STANDARD" - - // Eac3DynamicRangeCompressionLineFilmLight is a Eac3DynamicRangeCompressionLine enum value - Eac3DynamicRangeCompressionLineFilmLight = "FILM_LIGHT" - - // Eac3DynamicRangeCompressionLineMusicStandard is a Eac3DynamicRangeCompressionLine enum value - Eac3DynamicRangeCompressionLineMusicStandard = "MUSIC_STANDARD" - - // Eac3DynamicRangeCompressionLineMusicLight is a Eac3DynamicRangeCompressionLine enum value - Eac3DynamicRangeCompressionLineMusicLight = "MUSIC_LIGHT" - - // Eac3DynamicRangeCompressionLineSpeech is a Eac3DynamicRangeCompressionLine enum value - Eac3DynamicRangeCompressionLineSpeech = "SPEECH" -) - -// Enables Heavy Dynamic Range Compression, ensures that the instantaneous signal -// peaks do not exceed specified levels. -const ( - // Eac3DynamicRangeCompressionRfNone is a Eac3DynamicRangeCompressionRf enum value - Eac3DynamicRangeCompressionRfNone = "NONE" - - // Eac3DynamicRangeCompressionRfFilmStandard is a Eac3DynamicRangeCompressionRf enum value - Eac3DynamicRangeCompressionRfFilmStandard = "FILM_STANDARD" - - // Eac3DynamicRangeCompressionRfFilmLight is a Eac3DynamicRangeCompressionRf enum value - Eac3DynamicRangeCompressionRfFilmLight = "FILM_LIGHT" - - // Eac3DynamicRangeCompressionRfMusicStandard is a Eac3DynamicRangeCompressionRf enum value - Eac3DynamicRangeCompressionRfMusicStandard = "MUSIC_STANDARD" - - // Eac3DynamicRangeCompressionRfMusicLight is a Eac3DynamicRangeCompressionRf enum value - Eac3DynamicRangeCompressionRfMusicLight = "MUSIC_LIGHT" - - // Eac3DynamicRangeCompressionRfSpeech is a Eac3DynamicRangeCompressionRf enum value - Eac3DynamicRangeCompressionRfSpeech = "SPEECH" -) - -// When encoding 3/2 audio, controls whether the LFE channel is enabled -const ( - // Eac3LfeControlLfe is a Eac3LfeControl enum value - Eac3LfeControlLfe = "LFE" - - // Eac3LfeControlNoLfe is a Eac3LfeControl enum value - Eac3LfeControlNoLfe = "NO_LFE" -) - -// Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only -// valid with 3_2_LFE coding mode. -const ( - // Eac3LfeFilterEnabled is a Eac3LfeFilter enum value - Eac3LfeFilterEnabled = "ENABLED" - - // Eac3LfeFilterDisabled is a Eac3LfeFilter enum value - Eac3LfeFilterDisabled = "DISABLED" -) - -// When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, -// or DolbyE decoder that supplied this audio data. If audio was not supplied -// from one of these streams, then the static metadata settings will be used. -const ( - // Eac3MetadataControlFollowInput is a Eac3MetadataControl enum value - Eac3MetadataControlFollowInput = "FOLLOW_INPUT" - - // Eac3MetadataControlUseConfigured is a Eac3MetadataControl enum value - Eac3MetadataControlUseConfigured = "USE_CONFIGURED" -) - -// When set to WHEN_POSSIBLE, input DD+ audio will be passed through if it is -// present on the input. this detection is dynamic over the life of the transcode. -// Inputs that alternate between DD+ and non-DD+ content will have a consistent -// DD+ output as the system alternates between passthrough and encoding. -const ( - // Eac3PassthroughControlWhenPossible is a Eac3PassthroughControl enum value - Eac3PassthroughControlWhenPossible = "WHEN_POSSIBLE" - - // Eac3PassthroughControlNoPassthrough is a Eac3PassthroughControl enum value - Eac3PassthroughControlNoPassthrough = "NO_PASSTHROUGH" -) - -// Controls the amount of phase-shift applied to the surround channels. Only -// used for 3/2 coding mode. -const ( - // Eac3PhaseControlShift90Degrees is a Eac3PhaseControl enum value - Eac3PhaseControlShift90Degrees = "SHIFT_90_DEGREES" - - // Eac3PhaseControlNoShift is a Eac3PhaseControl enum value - Eac3PhaseControlNoShift = "NO_SHIFT" -) - -// Stereo downmix preference. Only used for 3/2 coding mode. -const ( - // Eac3StereoDownmixNotIndicated is a Eac3StereoDownmix enum value - Eac3StereoDownmixNotIndicated = "NOT_INDICATED" - - // Eac3StereoDownmixLoRo is a Eac3StereoDownmix enum value - Eac3StereoDownmixLoRo = "LO_RO" - - // Eac3StereoDownmixLtRt is a Eac3StereoDownmix enum value - Eac3StereoDownmixLtRt = "LT_RT" - - // Eac3StereoDownmixDpl2 is a Eac3StereoDownmix enum value - Eac3StereoDownmixDpl2 = "DPL2" -) - -// When encoding 3/2 audio, sets whether an extra center back surround channel -// is matrix encoded into the left and right surround channels. -const ( - // Eac3SurroundExModeNotIndicated is a Eac3SurroundExMode enum value - Eac3SurroundExModeNotIndicated = "NOT_INDICATED" - - // Eac3SurroundExModeEnabled is a Eac3SurroundExMode enum value - Eac3SurroundExModeEnabled = "ENABLED" - - // Eac3SurroundExModeDisabled is a Eac3SurroundExMode enum value - Eac3SurroundExModeDisabled = "DISABLED" -) - -// When encoding 2/0 audio, sets whether Dolby Surround is matrix encoded into -// the two channels. -const ( - // Eac3SurroundModeNotIndicated is a Eac3SurroundMode enum value - Eac3SurroundModeNotIndicated = "NOT_INDICATED" - - // Eac3SurroundModeEnabled is a Eac3SurroundMode enum value - Eac3SurroundModeEnabled = "ENABLED" - - // Eac3SurroundModeDisabled is a Eac3SurroundMode enum value - Eac3SurroundModeDisabled = "DISABLED" -) - -// When set to UPCONVERT, 608 data is both passed through via the "608 compatibility -// bytes" fields of the 708 wrapper as well as translated into 708. 708 data -// present in the source content will be discarded. -const ( - // EmbeddedConvert608To708Upconvert is a EmbeddedConvert608To708 enum value - EmbeddedConvert608To708Upconvert = "UPCONVERT" - - // EmbeddedConvert608To708Disabled is a EmbeddedConvert608To708 enum value - EmbeddedConvert608To708Disabled = "DISABLED" -) - -// If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning -// of the archive as required for progressive downloading. Otherwise it is placed -// normally at the end. -const ( - // F4vMoovPlacementProgressiveDownload is a F4vMoovPlacement enum value - F4vMoovPlacementProgressiveDownload = "PROGRESSIVE_DOWNLOAD" - - // F4vMoovPlacementNormal is a F4vMoovPlacement enum value - F4vMoovPlacementNormal = "NORMAL" -) - -// If set to UPCONVERT, 608 caption data is both passed through via the "608 -// compatibility bytes" fields of the 708 wrapper as well as translated into -// 708. 708 data present in the source content will be discarded. -const ( - // FileSourceConvert608To708Upconvert is a FileSourceConvert608To708 enum value - FileSourceConvert608To708Upconvert = "UPCONVERT" - - // FileSourceConvert608To708Disabled is a FileSourceConvert608To708 enum value - FileSourceConvert608To708Disabled = "DISABLED" -) - -// Adaptive quantization. Allows intra-frame quantizers to vary to improve visual -// quality. -const ( - // H264AdaptiveQuantizationOff is a H264AdaptiveQuantization enum value - H264AdaptiveQuantizationOff = "OFF" - - // H264AdaptiveQuantizationLow is a H264AdaptiveQuantization enum value - H264AdaptiveQuantizationLow = "LOW" - - // H264AdaptiveQuantizationMedium is a H264AdaptiveQuantization enum value - H264AdaptiveQuantizationMedium = "MEDIUM" - - // H264AdaptiveQuantizationHigh is a H264AdaptiveQuantization enum value - H264AdaptiveQuantizationHigh = "HIGH" - - // H264AdaptiveQuantizationHigher is a H264AdaptiveQuantization enum value - H264AdaptiveQuantizationHigher = "HIGHER" - - // H264AdaptiveQuantizationMax is a H264AdaptiveQuantization enum value - H264AdaptiveQuantizationMax = "MAX" -) - -// H.264 Level. -const ( - // H264CodecLevelAuto is a H264CodecLevel enum value - H264CodecLevelAuto = "AUTO" - - // H264CodecLevelLevel1 is a H264CodecLevel enum value - H264CodecLevelLevel1 = "LEVEL_1" - - // H264CodecLevelLevel11 is a H264CodecLevel enum value - H264CodecLevelLevel11 = "LEVEL_1_1" - - // H264CodecLevelLevel12 is a H264CodecLevel enum value - H264CodecLevelLevel12 = "LEVEL_1_2" - - // H264CodecLevelLevel13 is a H264CodecLevel enum value - H264CodecLevelLevel13 = "LEVEL_1_3" - - // H264CodecLevelLevel2 is a H264CodecLevel enum value - H264CodecLevelLevel2 = "LEVEL_2" - - // H264CodecLevelLevel21 is a H264CodecLevel enum value - H264CodecLevelLevel21 = "LEVEL_2_1" - - // H264CodecLevelLevel22 is a H264CodecLevel enum value - H264CodecLevelLevel22 = "LEVEL_2_2" - - // H264CodecLevelLevel3 is a H264CodecLevel enum value - H264CodecLevelLevel3 = "LEVEL_3" - - // H264CodecLevelLevel31 is a H264CodecLevel enum value - H264CodecLevelLevel31 = "LEVEL_3_1" - - // H264CodecLevelLevel32 is a H264CodecLevel enum value - H264CodecLevelLevel32 = "LEVEL_3_2" - - // H264CodecLevelLevel4 is a H264CodecLevel enum value - H264CodecLevelLevel4 = "LEVEL_4" - - // H264CodecLevelLevel41 is a H264CodecLevel enum value - H264CodecLevelLevel41 = "LEVEL_4_1" - - // H264CodecLevelLevel42 is a H264CodecLevel enum value - H264CodecLevelLevel42 = "LEVEL_4_2" - - // H264CodecLevelLevel5 is a H264CodecLevel enum value - H264CodecLevelLevel5 = "LEVEL_5" - - // H264CodecLevelLevel51 is a H264CodecLevel enum value - H264CodecLevelLevel51 = "LEVEL_5_1" - - // H264CodecLevelLevel52 is a H264CodecLevel enum value - H264CodecLevelLevel52 = "LEVEL_5_2" -) - -// H.264 Profile. High 4:2:2 and 10-bit profiles are only available with the -// AVC-I License. -const ( - // H264CodecProfileBaseline is a H264CodecProfile enum value - H264CodecProfileBaseline = "BASELINE" - - // H264CodecProfileHigh is a H264CodecProfile enum value - H264CodecProfileHigh = "HIGH" - - // H264CodecProfileHigh10bit is a H264CodecProfile enum value - H264CodecProfileHigh10bit = "HIGH_10BIT" - - // H264CodecProfileHigh422 is a H264CodecProfile enum value - H264CodecProfileHigh422 = "HIGH_422" - - // H264CodecProfileHigh42210bit is a H264CodecProfile enum value - H264CodecProfileHigh42210bit = "HIGH_422_10BIT" - - // H264CodecProfileMain is a H264CodecProfile enum value - H264CodecProfileMain = "MAIN" -) - -// Entropy encoding mode. Use CABAC (must be in Main or High profile) or CAVLC. -const ( - // H264EntropyEncodingCabac is a H264EntropyEncoding enum value - H264EntropyEncodingCabac = "CABAC" - - // H264EntropyEncodingCavlc is a H264EntropyEncoding enum value - H264EntropyEncodingCavlc = "CAVLC" -) - -// Choosing FORCE_FIELD disables PAFF encoding for interlaced outputs. -const ( - // H264FieldEncodingPaff is a H264FieldEncoding enum value - H264FieldEncodingPaff = "PAFF" - - // H264FieldEncodingForceField is a H264FieldEncoding enum value - H264FieldEncodingForceField = "FORCE_FIELD" -) - -// Adjust quantization within each frame to reduce flicker or 'pop' on I-frames. -const ( - // H264FlickerAdaptiveQuantizationDisabled is a H264FlickerAdaptiveQuantization enum value - H264FlickerAdaptiveQuantizationDisabled = "DISABLED" - - // H264FlickerAdaptiveQuantizationEnabled is a H264FlickerAdaptiveQuantization enum value - H264FlickerAdaptiveQuantizationEnabled = "ENABLED" -) - -// Using the API, set FramerateControl to INITIALIZE_FROM_SOURCE if you want -// the service to use the framerate from the input. Using the console, do this -// by choosing INITIALIZE_FROM_SOURCE for Framerate. -const ( - // H264FramerateControlInitializeFromSource is a H264FramerateControl enum value - H264FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" - - // H264FramerateControlSpecified is a H264FramerateControl enum value - H264FramerateControlSpecified = "SPECIFIED" -) - -// When set to INTERPOLATE, produces smoother motion during framerate conversion. -const ( - // H264FramerateConversionAlgorithmDuplicateDrop is a H264FramerateConversionAlgorithm enum value - H264FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" - - // H264FramerateConversionAlgorithmInterpolate is a H264FramerateConversionAlgorithm enum value - H264FramerateConversionAlgorithmInterpolate = "INTERPOLATE" -) - -// If enable, use reference B frames for GOP structures that have B frames > -// 1. -const ( - // H264GopBReferenceDisabled is a H264GopBReference enum value - H264GopBReferenceDisabled = "DISABLED" - - // H264GopBReferenceEnabled is a H264GopBReference enum value - H264GopBReferenceEnabled = "ENABLED" -) - -// Indicates if the GOP Size in H264 is specified in frames or seconds. If seconds -// the system will convert the GOP Size into a frame count at run time. -const ( - // H264GopSizeUnitsFrames is a H264GopSizeUnits enum value - H264GopSizeUnitsFrames = "FRAMES" - - // H264GopSizeUnitsSeconds is a H264GopSizeUnits enum value - H264GopSizeUnitsSeconds = "SECONDS" -) - -// Use Interlace mode (InterlaceMode) to choose the scan line type for the output. -// * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce -// interlaced output with the entire output having the same field polarity (top -// or bottom first). * Follow, Default Top (FOLLOw_TOP_FIELD) and Follow, Default -// Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, -// behavior depends on the input scan type. - If the source is interlaced, the -// output will be interlaced with the same polarity as the source (it will follow -// the source). The output could therefore be a mix of "top field first" and -// "bottom field first". - If the source is progressive, the output will be -// interlaced with "top field first" or "bottom field first" polarity, depending -// on which of the Follow options you chose. -const ( - // H264InterlaceModeProgressive is a H264InterlaceMode enum value - H264InterlaceModeProgressive = "PROGRESSIVE" - - // H264InterlaceModeTopField is a H264InterlaceMode enum value - H264InterlaceModeTopField = "TOP_FIELD" - - // H264InterlaceModeBottomField is a H264InterlaceMode enum value - H264InterlaceModeBottomField = "BOTTOM_FIELD" - - // H264InterlaceModeFollowTopField is a H264InterlaceMode enum value - H264InterlaceModeFollowTopField = "FOLLOW_TOP_FIELD" - - // H264InterlaceModeFollowBottomField is a H264InterlaceMode enum value - H264InterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" -) - -// Using the API, enable ParFollowSource if you want the service to use the -// pixel aspect ratio from the input. Using the console, do this by choosing -// Follow source for Pixel aspect ratio. -const ( - // H264ParControlInitializeFromSource is a H264ParControl enum value - H264ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" - - // H264ParControlSpecified is a H264ParControl enum value - H264ParControlSpecified = "SPECIFIED" -) - -// Use Quality tuning level (H264QualityTuningLevel) to specifiy whether to -// use fast single-pass, high-quality singlepass, or high-quality multipass -// video encoding. -const ( - // H264QualityTuningLevelSinglePass is a H264QualityTuningLevel enum value - H264QualityTuningLevelSinglePass = "SINGLE_PASS" - - // H264QualityTuningLevelSinglePassHq is a H264QualityTuningLevel enum value - H264QualityTuningLevelSinglePassHq = "SINGLE_PASS_HQ" - - // H264QualityTuningLevelMultiPassHq is a H264QualityTuningLevel enum value - H264QualityTuningLevelMultiPassHq = "MULTI_PASS_HQ" -) - -// Rate control mode. CQ uses constant quantizer (qp), ABR (average bitrate) -// does not write HRD parameters. -const ( - // H264RateControlModeVbr is a H264RateControlMode enum value - H264RateControlModeVbr = "VBR" - - // H264RateControlModeCbr is a H264RateControlMode enum value - H264RateControlModeCbr = "CBR" -) - -// Places a PPS header on each encoded picture, even if repeated. -const ( - // H264RepeatPpsDisabled is a H264RepeatPps enum value - H264RepeatPpsDisabled = "DISABLED" - - // H264RepeatPpsEnabled is a H264RepeatPps enum value - H264RepeatPpsEnabled = "ENABLED" -) - -// Scene change detection (inserts I-frames on scene changes). -const ( - // H264SceneChangeDetectDisabled is a H264SceneChangeDetect enum value - H264SceneChangeDetectDisabled = "DISABLED" - - // H264SceneChangeDetectEnabled is a H264SceneChangeDetect enum value - H264SceneChangeDetectEnabled = "ENABLED" -) - -// Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled -// as 25fps, and audio is sped up correspondingly. -const ( - // H264SlowPalDisabled is a H264SlowPal enum value - H264SlowPalDisabled = "DISABLED" - - // H264SlowPalEnabled is a H264SlowPal enum value - H264SlowPalEnabled = "ENABLED" -) - -// Adjust quantization within each frame based on spatial variation of content -// complexity. -const ( - // H264SpatialAdaptiveQuantizationDisabled is a H264SpatialAdaptiveQuantization enum value - H264SpatialAdaptiveQuantizationDisabled = "DISABLED" - - // H264SpatialAdaptiveQuantizationEnabled is a H264SpatialAdaptiveQuantization enum value - H264SpatialAdaptiveQuantizationEnabled = "ENABLED" -) - -// Produces a bitstream compliant with SMPTE RP-2027. -const ( - // H264SyntaxDefault is a H264Syntax enum value - H264SyntaxDefault = "DEFAULT" - - // H264SyntaxRp2027 is a H264Syntax enum value - H264SyntaxRp2027 = "RP2027" -) - -// This field applies only if the Streams > Advanced > Framerate (framerate) -// field is set to 29.970. This field works with the Streams > Advanced > Preprocessors -// > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced -// Mode field (interlace_mode) to identify the scan type for the output: Progressive, -// Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output -// from 23.976 input. - Soft: produces 23.976; the player converts this output -// to 29.97i. -const ( - // H264TelecineNone is a H264Telecine enum value - H264TelecineNone = "NONE" - - // H264TelecineSoft is a H264Telecine enum value - H264TelecineSoft = "SOFT" - - // H264TelecineHard is a H264Telecine enum value - H264TelecineHard = "HARD" -) - -// Adjust quantization within each frame based on temporal variation of content -// complexity. -const ( - // H264TemporalAdaptiveQuantizationDisabled is a H264TemporalAdaptiveQuantization enum value - H264TemporalAdaptiveQuantizationDisabled = "DISABLED" - - // H264TemporalAdaptiveQuantizationEnabled is a H264TemporalAdaptiveQuantization enum value - H264TemporalAdaptiveQuantizationEnabled = "ENABLED" -) - -// Inserts timecode for each frame as 4 bytes of an unregistered SEI message. -const ( - // H264UnregisteredSeiTimecodeDisabled is a H264UnregisteredSeiTimecode enum value - H264UnregisteredSeiTimecodeDisabled = "DISABLED" - - // H264UnregisteredSeiTimecodeEnabled is a H264UnregisteredSeiTimecode enum value - H264UnregisteredSeiTimecodeEnabled = "ENABLED" -) - -// Adaptive quantization. Allows intra-frame quantizers to vary to improve visual -// quality. -const ( - // H265AdaptiveQuantizationOff is a H265AdaptiveQuantization enum value - H265AdaptiveQuantizationOff = "OFF" - - // H265AdaptiveQuantizationLow is a H265AdaptiveQuantization enum value - H265AdaptiveQuantizationLow = "LOW" - - // H265AdaptiveQuantizationMedium is a H265AdaptiveQuantization enum value - H265AdaptiveQuantizationMedium = "MEDIUM" - - // H265AdaptiveQuantizationHigh is a H265AdaptiveQuantization enum value - H265AdaptiveQuantizationHigh = "HIGH" - - // H265AdaptiveQuantizationHigher is a H265AdaptiveQuantization enum value - H265AdaptiveQuantizationHigher = "HIGHER" - - // H265AdaptiveQuantizationMax is a H265AdaptiveQuantization enum value - H265AdaptiveQuantizationMax = "MAX" -) - -// Enables Alternate Transfer Function SEI message for outputs using Hybrid -// Log Gamma (HLG) Electro-Optical Transfer Function (EOTF). -const ( - // H265AlternateTransferFunctionSeiDisabled is a H265AlternateTransferFunctionSei enum value - H265AlternateTransferFunctionSeiDisabled = "DISABLED" - - // H265AlternateTransferFunctionSeiEnabled is a H265AlternateTransferFunctionSei enum value - H265AlternateTransferFunctionSeiEnabled = "ENABLED" -) - -// H.265 Level. -const ( - // H265CodecLevelAuto is a H265CodecLevel enum value - H265CodecLevelAuto = "AUTO" - - // H265CodecLevelLevel1 is a H265CodecLevel enum value - H265CodecLevelLevel1 = "LEVEL_1" - - // H265CodecLevelLevel2 is a H265CodecLevel enum value - H265CodecLevelLevel2 = "LEVEL_2" - - // H265CodecLevelLevel21 is a H265CodecLevel enum value - H265CodecLevelLevel21 = "LEVEL_2_1" - - // H265CodecLevelLevel3 is a H265CodecLevel enum value - H265CodecLevelLevel3 = "LEVEL_3" - - // H265CodecLevelLevel31 is a H265CodecLevel enum value - H265CodecLevelLevel31 = "LEVEL_3_1" - - // H265CodecLevelLevel4 is a H265CodecLevel enum value - H265CodecLevelLevel4 = "LEVEL_4" - - // H265CodecLevelLevel41 is a H265CodecLevel enum value - H265CodecLevelLevel41 = "LEVEL_4_1" - - // H265CodecLevelLevel5 is a H265CodecLevel enum value - H265CodecLevelLevel5 = "LEVEL_5" - - // H265CodecLevelLevel51 is a H265CodecLevel enum value - H265CodecLevelLevel51 = "LEVEL_5_1" - - // H265CodecLevelLevel52 is a H265CodecLevel enum value - H265CodecLevelLevel52 = "LEVEL_5_2" - - // H265CodecLevelLevel6 is a H265CodecLevel enum value - H265CodecLevelLevel6 = "LEVEL_6" - - // H265CodecLevelLevel61 is a H265CodecLevel enum value - H265CodecLevelLevel61 = "LEVEL_6_1" - - // H265CodecLevelLevel62 is a H265CodecLevel enum value - H265CodecLevelLevel62 = "LEVEL_6_2" -) - -// Represents the Profile and Tier, per the HEVC (H.265) specification. Selections -// are grouped as [Profile] / [Tier], so "Main/High" represents Main Profile -// with High Tier. 4:2:2 profiles are only available with the HEVC 4:2:2 License. -const ( - // H265CodecProfileMainMain is a H265CodecProfile enum value - H265CodecProfileMainMain = "MAIN_MAIN" - - // H265CodecProfileMainHigh is a H265CodecProfile enum value - H265CodecProfileMainHigh = "MAIN_HIGH" - - // H265CodecProfileMain10Main is a H265CodecProfile enum value - H265CodecProfileMain10Main = "MAIN10_MAIN" - - // H265CodecProfileMain10High is a H265CodecProfile enum value - H265CodecProfileMain10High = "MAIN10_HIGH" - - // H265CodecProfileMain4228bitMain is a H265CodecProfile enum value - H265CodecProfileMain4228bitMain = "MAIN_422_8BIT_MAIN" - - // H265CodecProfileMain4228bitHigh is a H265CodecProfile enum value - H265CodecProfileMain4228bitHigh = "MAIN_422_8BIT_HIGH" - - // H265CodecProfileMain42210bitMain is a H265CodecProfile enum value - H265CodecProfileMain42210bitMain = "MAIN_422_10BIT_MAIN" - - // H265CodecProfileMain42210bitHigh is a H265CodecProfile enum value - H265CodecProfileMain42210bitHigh = "MAIN_422_10BIT_HIGH" -) - -// Adjust quantization within each frame to reduce flicker or 'pop' on I-frames. -const ( - // H265FlickerAdaptiveQuantizationDisabled is a H265FlickerAdaptiveQuantization enum value - H265FlickerAdaptiveQuantizationDisabled = "DISABLED" - - // H265FlickerAdaptiveQuantizationEnabled is a H265FlickerAdaptiveQuantization enum value - H265FlickerAdaptiveQuantizationEnabled = "ENABLED" -) - -// Using the API, set FramerateControl to INITIALIZE_FROM_SOURCE if you want -// the service to use the framerate from the input. Using the console, do this -// by choosing INITIALIZE_FROM_SOURCE for Framerate. -const ( - // H265FramerateControlInitializeFromSource is a H265FramerateControl enum value - H265FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" - - // H265FramerateControlSpecified is a H265FramerateControl enum value - H265FramerateControlSpecified = "SPECIFIED" -) - -// When set to INTERPOLATE, produces smoother motion during framerate conversion. -const ( - // H265FramerateConversionAlgorithmDuplicateDrop is a H265FramerateConversionAlgorithm enum value - H265FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" - - // H265FramerateConversionAlgorithmInterpolate is a H265FramerateConversionAlgorithm enum value - H265FramerateConversionAlgorithmInterpolate = "INTERPOLATE" -) - -// If enable, use reference B frames for GOP structures that have B frames > -// 1. -const ( - // H265GopBReferenceDisabled is a H265GopBReference enum value - H265GopBReferenceDisabled = "DISABLED" - - // H265GopBReferenceEnabled is a H265GopBReference enum value - H265GopBReferenceEnabled = "ENABLED" -) - -// Indicates if the GOP Size in H265 is specified in frames or seconds. If seconds -// the system will convert the GOP Size into a frame count at run time. -const ( - // H265GopSizeUnitsFrames is a H265GopSizeUnits enum value - H265GopSizeUnitsFrames = "FRAMES" - - // H265GopSizeUnitsSeconds is a H265GopSizeUnits enum value - H265GopSizeUnitsSeconds = "SECONDS" -) - -// Use Interlace mode (InterlaceMode) to choose the scan line type for the output. -// * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce -// interlaced output with the entire output having the same field polarity (top -// or bottom first). * Follow, Default Top (FOLLOw_TOP_FIELD) and Follow, Default -// Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, -// behavior depends on the input scan type. - If the source is interlaced, the -// output will be interlaced with the same polarity as the source (it will follow -// the source). The output could therefore be a mix of "top field first" and -// "bottom field first". - If the source is progressive, the output will be -// interlaced with "top field first" or "bottom field first" polarity, depending -// on which of the Follow options you chose. -const ( - // H265InterlaceModeProgressive is a H265InterlaceMode enum value - H265InterlaceModeProgressive = "PROGRESSIVE" - - // H265InterlaceModeTopField is a H265InterlaceMode enum value - H265InterlaceModeTopField = "TOP_FIELD" - - // H265InterlaceModeBottomField is a H265InterlaceMode enum value - H265InterlaceModeBottomField = "BOTTOM_FIELD" - - // H265InterlaceModeFollowTopField is a H265InterlaceMode enum value - H265InterlaceModeFollowTopField = "FOLLOW_TOP_FIELD" - - // H265InterlaceModeFollowBottomField is a H265InterlaceMode enum value - H265InterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" -) - -// Using the API, enable ParFollowSource if you want the service to use the -// pixel aspect ratio from the input. Using the console, do this by choosing -// Follow source for Pixel aspect ratio. -const ( - // H265ParControlInitializeFromSource is a H265ParControl enum value - H265ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" - - // H265ParControlSpecified is a H265ParControl enum value - H265ParControlSpecified = "SPECIFIED" -) - -// Use Quality tuning level (H265QualityTuningLevel) to specifiy whether to -// use fast single-pass, high-quality singlepass, or high-quality multipass -// video encoding. -const ( - // H265QualityTuningLevelSinglePass is a H265QualityTuningLevel enum value - H265QualityTuningLevelSinglePass = "SINGLE_PASS" - - // H265QualityTuningLevelSinglePassHq is a H265QualityTuningLevel enum value - H265QualityTuningLevelSinglePassHq = "SINGLE_PASS_HQ" - - // H265QualityTuningLevelMultiPassHq is a H265QualityTuningLevel enum value - H265QualityTuningLevelMultiPassHq = "MULTI_PASS_HQ" -) - -// Rate control mode. CQ uses constant quantizer (qp), ABR (average bitrate) -// does not write HRD parameters. -const ( - // H265RateControlModeVbr is a H265RateControlMode enum value - H265RateControlModeVbr = "VBR" - - // H265RateControlModeCbr is a H265RateControlMode enum value - H265RateControlModeCbr = "CBR" -) - -// Specify Sample Adaptive Offset (SAO) filter strength. Adaptive mode dynamically -// selects best strength based on content -const ( - // H265SampleAdaptiveOffsetFilterModeDefault is a H265SampleAdaptiveOffsetFilterMode enum value - H265SampleAdaptiveOffsetFilterModeDefault = "DEFAULT" - - // H265SampleAdaptiveOffsetFilterModeAdaptive is a H265SampleAdaptiveOffsetFilterMode enum value - H265SampleAdaptiveOffsetFilterModeAdaptive = "ADAPTIVE" - - // H265SampleAdaptiveOffsetFilterModeOff is a H265SampleAdaptiveOffsetFilterMode enum value - H265SampleAdaptiveOffsetFilterModeOff = "OFF" -) - -// Scene change detection (inserts I-frames on scene changes). -const ( - // H265SceneChangeDetectDisabled is a H265SceneChangeDetect enum value - H265SceneChangeDetectDisabled = "DISABLED" - - // H265SceneChangeDetectEnabled is a H265SceneChangeDetect enum value - H265SceneChangeDetectEnabled = "ENABLED" -) - -// Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled -// as 25fps, and audio is sped up correspondingly. -const ( - // H265SlowPalDisabled is a H265SlowPal enum value - H265SlowPalDisabled = "DISABLED" - - // H265SlowPalEnabled is a H265SlowPal enum value - H265SlowPalEnabled = "ENABLED" -) - -// Adjust quantization within each frame based on spatial variation of content -// complexity. -const ( - // H265SpatialAdaptiveQuantizationDisabled is a H265SpatialAdaptiveQuantization enum value - H265SpatialAdaptiveQuantizationDisabled = "DISABLED" - - // H265SpatialAdaptiveQuantizationEnabled is a H265SpatialAdaptiveQuantization enum value - H265SpatialAdaptiveQuantizationEnabled = "ENABLED" -) - -// This field applies only if the Streams > Advanced > Framerate (framerate) -// field is set to 29.970. This field works with the Streams > Advanced > Preprocessors -// > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced -// Mode field (interlace_mode) to identify the scan type for the output: Progressive, -// Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output -// from 23.976 input. - Soft: produces 23.976; the player converts this output -// to 29.97i. -const ( - // H265TelecineNone is a H265Telecine enum value - H265TelecineNone = "NONE" - - // H265TelecineSoft is a H265Telecine enum value - H265TelecineSoft = "SOFT" - - // H265TelecineHard is a H265Telecine enum value - H265TelecineHard = "HARD" -) - -// Adjust quantization within each frame based on temporal variation of content -// complexity. -const ( - // H265TemporalAdaptiveQuantizationDisabled is a H265TemporalAdaptiveQuantization enum value - H265TemporalAdaptiveQuantizationDisabled = "DISABLED" - - // H265TemporalAdaptiveQuantizationEnabled is a H265TemporalAdaptiveQuantization enum value - H265TemporalAdaptiveQuantizationEnabled = "ENABLED" -) - -// Enables temporal layer identifiers in the encoded bitstream. Up to 3 layers -// are supported depending on GOP structure: I- and P-frames form one layer, -// reference B-frames can form a second layer and non-reference b-frames can -// form a third layer. Decoders can optionally decode only the lower temporal -// layers to generate a lower frame rate output. For example, given a bitstream -// with temporal IDs and with b-frames = 1 (i.e. IbPbPb display order), a decoder -// could decode all the frames for full frame rate output or only the I and -// P frames (lowest temporal layer) for a half frame rate output. -const ( - // H265TemporalIdsDisabled is a H265TemporalIds enum value - H265TemporalIdsDisabled = "DISABLED" - - // H265TemporalIdsEnabled is a H265TemporalIds enum value - H265TemporalIdsEnabled = "ENABLED" -) - -// Enable use of tiles, allowing horizontal as well as vertical subdivision -// of the encoded pictures. -const ( - // H265TilesDisabled is a H265Tiles enum value - H265TilesDisabled = "DISABLED" - - // H265TilesEnabled is a H265Tiles enum value - H265TilesEnabled = "ENABLED" -) - -// Inserts timecode for each frame as 4 bytes of an unregistered SEI message. -const ( - // H265UnregisteredSeiTimecodeDisabled is a H265UnregisteredSeiTimecode enum value - H265UnregisteredSeiTimecodeDisabled = "DISABLED" - - // H265UnregisteredSeiTimecodeEnabled is a H265UnregisteredSeiTimecode enum value - H265UnregisteredSeiTimecodeEnabled = "ENABLED" -) - -const ( - // HlsAdMarkersElemental is a HlsAdMarkers enum value - HlsAdMarkersElemental = "ELEMENTAL" - - // HlsAdMarkersElementalScte35 is a HlsAdMarkers enum value - HlsAdMarkersElementalScte35 = "ELEMENTAL_SCTE35" -) - -// Four types of audio-only tracks are supported: Audio-Only Variant Stream -// The client can play back this audio-only stream instead of video in low-bandwidth -// scenarios. Represented as an EXT-X-STREAM-INF in the HLS manifest. Alternate -// Audio, Auto Select, Default Alternate rendition that the client should try -// to play back by default. Represented as an EXT-X-MEDIA in the HLS manifest -// with DEFAULT=YES, AUTOSELECT=YES Alternate Audio, Auto Select, Not Default -// Alternate rendition that the client may try to play back by default. Represented -// as an EXT-X-MEDIA in the HLS manifest with DEFAULT=NO, AUTOSELECT=YES Alternate -// Audio, not Auto Select Alternate rendition that the client will not try to -// play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with -// DEFAULT=NO, AUTOSELECT=NO -const ( - // HlsAudioTrackTypeAlternateAudioAutoSelectDefault is a HlsAudioTrackType enum value - HlsAudioTrackTypeAlternateAudioAutoSelectDefault = "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT" - - // HlsAudioTrackTypeAlternateAudioAutoSelect is a HlsAudioTrackType enum value - HlsAudioTrackTypeAlternateAudioAutoSelect = "ALTERNATE_AUDIO_AUTO_SELECT" - - // HlsAudioTrackTypeAlternateAudioNotAutoSelect is a HlsAudioTrackType enum value - HlsAudioTrackTypeAlternateAudioNotAutoSelect = "ALTERNATE_AUDIO_NOT_AUTO_SELECT" - - // HlsAudioTrackTypeAudioOnlyVariantStream is a HlsAudioTrackType enum value - HlsAudioTrackTypeAudioOnlyVariantStream = "AUDIO_ONLY_VARIANT_STREAM" -) - -// Applies only to 608 Embedded output captions. Insert: Include CLOSED-CAPTIONS -// lines in the manifest. Specify at least one language in the CC1 Language -// Code field. One CLOSED-CAPTION line is added for each Language Code you specify. -// Make sure to specify the languages in the order in which they appear in the -// original source (if the source is embedded format) or the order of the caption -// selectors (if the source is other than embedded). Otherwise, languages in -// the manifest will not match up properly with the output captions. None: Include -// CLOSED-CAPTIONS=NONE line in the manifest. Omit: Omit any CLOSED-CAPTIONS -// line from the manifest. -const ( - // HlsCaptionLanguageSettingInsert is a HlsCaptionLanguageSetting enum value - HlsCaptionLanguageSettingInsert = "INSERT" - - // HlsCaptionLanguageSettingOmit is a HlsCaptionLanguageSetting enum value - HlsCaptionLanguageSettingOmit = "OMIT" - - // HlsCaptionLanguageSettingNone is a HlsCaptionLanguageSetting enum value - HlsCaptionLanguageSettingNone = "NONE" -) - -// When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client -// from saving media segments for later replay. -const ( - // HlsClientCacheDisabled is a HlsClientCache enum value - HlsClientCacheDisabled = "DISABLED" - - // HlsClientCacheEnabled is a HlsClientCache enum value - HlsClientCacheEnabled = "ENABLED" -) - -// Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist -// generation. -const ( - // HlsCodecSpecificationRfc6381 is a HlsCodecSpecification enum value - HlsCodecSpecificationRfc6381 = "RFC_6381" - - // HlsCodecSpecificationRfc4281 is a HlsCodecSpecification enum value - HlsCodecSpecificationRfc4281 = "RFC_4281" -) - -// Indicates whether segments should be placed in subdirectories. -const ( - // HlsDirectoryStructureSingleDirectory is a HlsDirectoryStructure enum value - HlsDirectoryStructureSingleDirectory = "SINGLE_DIRECTORY" - - // HlsDirectoryStructureSubdirectoryPerStream is a HlsDirectoryStructure enum value - HlsDirectoryStructureSubdirectoryPerStream = "SUBDIRECTORY_PER_STREAM" -) - -// Encrypts the segments with the given encryption scheme. Leave blank to disable. -// Selecting 'Disabled' in the web interface also disables encryption. -const ( - // HlsEncryptionTypeAes128 is a HlsEncryptionType enum value - HlsEncryptionTypeAes128 = "AES128" - - // HlsEncryptionTypeSampleAes is a HlsEncryptionType enum value - HlsEncryptionTypeSampleAes = "SAMPLE_AES" -) - -// When set to INCLUDE, writes I-Frame Only Manifest in addition to the HLS -// manifest -const ( - // HlsIFrameOnlyManifestInclude is a HlsIFrameOnlyManifest enum value - HlsIFrameOnlyManifestInclude = "INCLUDE" - - // HlsIFrameOnlyManifestExclude is a HlsIFrameOnlyManifest enum value - HlsIFrameOnlyManifestExclude = "EXCLUDE" -) - -// The Initialization Vector is a 128-bit number used in conjunction with the -// key for encrypting blocks. If set to INCLUDE, Initialization Vector is listed -// in the manifest. Otherwise Initialization Vector is not in the manifest. -const ( - // HlsInitializationVectorInManifestInclude is a HlsInitializationVectorInManifest enum value - HlsInitializationVectorInManifestInclude = "INCLUDE" - - // HlsInitializationVectorInManifestExclude is a HlsInitializationVectorInManifest enum value - HlsInitializationVectorInManifestExclude = "EXCLUDE" -) - -// Indicates which type of key provider is used for encryption. -const ( - // HlsKeyProviderTypeSpeke is a HlsKeyProviderType enum value - HlsKeyProviderTypeSpeke = "SPEKE" - - // HlsKeyProviderTypeStaticKey is a HlsKeyProviderType enum value - HlsKeyProviderTypeStaticKey = "STATIC_KEY" -) - -// When set to GZIP, compresses HLS playlist. -const ( - // HlsManifestCompressionGzip is a HlsManifestCompression enum value - HlsManifestCompressionGzip = "GZIP" - - // HlsManifestCompressionNone is a HlsManifestCompression enum value - HlsManifestCompressionNone = "NONE" -) - -// Indicates whether the output manifest should use floating point values for -// segment duration. -const ( - // HlsManifestDurationFormatFloatingPoint is a HlsManifestDurationFormat enum value - HlsManifestDurationFormatFloatingPoint = "FLOATING_POINT" - - // HlsManifestDurationFormatInteger is a HlsManifestDurationFormat enum value - HlsManifestDurationFormatInteger = "INTEGER" -) - -// Indicates whether the .m3u8 manifest file should be generated for this HLS -// output group. -const ( - // HlsOutputSelectionManifestsAndSegments is a HlsOutputSelection enum value - HlsOutputSelectionManifestsAndSegments = "MANIFESTS_AND_SEGMENTS" - - // HlsOutputSelectionSegmentsOnly is a HlsOutputSelection enum value - HlsOutputSelectionSegmentsOnly = "SEGMENTS_ONLY" -) - -// Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. -// The value is calculated as follows: either the program date and time are -// initialized using the input timecode source, or the time is initialized using -// the input timecode source and the date is initialized using the timestamp_offset. -const ( - // HlsProgramDateTimeInclude is a HlsProgramDateTime enum value - HlsProgramDateTimeInclude = "INCLUDE" - - // HlsProgramDateTimeExclude is a HlsProgramDateTime enum value - HlsProgramDateTimeExclude = "EXCLUDE" -) - -// When set to SINGLE_FILE, emits program as a single media resource (.ts) file, -// uses #EXT-X-BYTERANGE tags to index segment for playback. -const ( - // HlsSegmentControlSingleFile is a HlsSegmentControl enum value - HlsSegmentControlSingleFile = "SINGLE_FILE" - - // HlsSegmentControlSegmentedFiles is a HlsSegmentControl enum value - HlsSegmentControlSegmentedFiles = "SEGMENTED_FILES" -) - -// Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag -// of variant manifest. -const ( - // HlsStreamInfResolutionInclude is a HlsStreamInfResolution enum value - HlsStreamInfResolutionInclude = "INCLUDE" - - // HlsStreamInfResolutionExclude is a HlsStreamInfResolution enum value - HlsStreamInfResolutionExclude = "EXCLUDE" -) - -// Indicates ID3 frame that has the timecode. -const ( - // HlsTimedMetadataId3FrameNone is a HlsTimedMetadataId3Frame enum value - HlsTimedMetadataId3FrameNone = "NONE" - - // HlsTimedMetadataId3FramePriv is a HlsTimedMetadataId3Frame enum value - HlsTimedMetadataId3FramePriv = "PRIV" - - // HlsTimedMetadataId3FrameTdrl is a HlsTimedMetadataId3Frame enum value - HlsTimedMetadataId3FrameTdrl = "TDRL" -) - -// Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. -// Default is disabled. Only manaully controllable for MPEG2 and uncompressed -// video inputs. -const ( - // InputDeblockFilterEnabled is a InputDeblockFilter enum value - InputDeblockFilterEnabled = "ENABLED" - - // InputDeblockFilterDisabled is a InputDeblockFilter enum value - InputDeblockFilterDisabled = "DISABLED" -) - -// Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default -// is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video -// inputs. -const ( - // InputDenoiseFilterEnabled is a InputDenoiseFilter enum value - InputDenoiseFilterEnabled = "ENABLED" - - // InputDenoiseFilterDisabled is a InputDenoiseFilter enum value - InputDenoiseFilterDisabled = "DISABLED" -) - -// Use Filter enable (InputFilterEnable) to specify how the transcoding service -// applies the denoise and deblock filters. You must also enable the filters -// separately, with Denoise (InputDenoiseFilter) and Deblock (InputDeblockFilter). -// * Auto - The transcoding service determines whether to apply filtering, depending -// on input type and quality. * Disable - The input is not filtered. This is -// true even if you use the API to enable them in (InputDeblockFilter) and (InputDeblockFilter). -// * Force - The in put is filtered regardless of input type. -const ( - // InputFilterEnableAuto is a InputFilterEnable enum value - InputFilterEnableAuto = "AUTO" - - // InputFilterEnableDisable is a InputFilterEnable enum value - InputFilterEnableDisable = "DISABLE" - - // InputFilterEnableForce is a InputFilterEnable enum value - InputFilterEnableForce = "FORCE" -) - -// Set PSI control (InputPsiControl) for transport stream inputs to specify -// which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio -// and video. * Use PSI - Scan only PSI data. -const ( - // InputPsiControlIgnorePsi is a InputPsiControl enum value - InputPsiControlIgnorePsi = "IGNORE_PSI" - - // InputPsiControlUsePsi is a InputPsiControl enum value - InputPsiControlUsePsi = "USE_PSI" -) - -// Use Timecode source (InputTimecodeSource) to specify how timecode information -// from your input is adjusted and encoded in all outputs for the job. Default -// is embedded. Set to Embedded (EMBEDDED) to use the timecode that is in the -// input video. If no embedded timecode is in the source, will set the timecode -// for the first frame to 00:00:00:00. Set to Start at 0 (ZEROBASED) to set -// the timecode of the initial frame to 00:00:00:00. Set to Specified start -// (SPECIFIEDSTART) to provide the initial timecode yourself the setting (Start). -const ( - // InputTimecodeSourceEmbedded is a InputTimecodeSource enum value - InputTimecodeSourceEmbedded = "EMBEDDED" - - // InputTimecodeSourceZerobased is a InputTimecodeSource enum value - InputTimecodeSourceZerobased = "ZEROBASED" - - // InputTimecodeSourceSpecifiedstart is a InputTimecodeSource enum value - InputTimecodeSourceSpecifiedstart = "SPECIFIEDSTART" -) - -// A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR. -const ( - // JobStatusSubmitted is a JobStatus enum value - JobStatusSubmitted = "SUBMITTED" - - // JobStatusProgressing is a JobStatus enum value - JobStatusProgressing = "PROGRESSING" - - // JobStatusComplete is a JobStatus enum value - JobStatusComplete = "COMPLETE" - - // JobStatusCanceled is a JobStatus enum value - JobStatusCanceled = "CANCELED" - - // JobStatusError is a JobStatus enum value - JobStatusError = "ERROR" -) - -// Optional. When you request a list of job templates, you can choose to list -// them alphabetically by NAME or chronologically by CREATION_DATE. If you don't -// specify, the service will list them by name. -const ( - // JobTemplateListByName is a JobTemplateListBy enum value - JobTemplateListByName = "NAME" - - // JobTemplateListByCreationDate is a JobTemplateListBy enum value - JobTemplateListByCreationDate = "CREATION_DATE" - - // JobTemplateListBySystem is a JobTemplateListBy enum value - JobTemplateListBySystem = "SYSTEM" -) - -// Code to specify the language, following the specification "ISO 639-2 three-digit -// code":http://www.loc.gov/standards/iso639-2/ -const ( - // LanguageCodeEng is a LanguageCode enum value - LanguageCodeEng = "ENG" - - // LanguageCodeSpa is a LanguageCode enum value - LanguageCodeSpa = "SPA" - - // LanguageCodeFra is a LanguageCode enum value - LanguageCodeFra = "FRA" - - // LanguageCodeDeu is a LanguageCode enum value - LanguageCodeDeu = "DEU" - - // LanguageCodeGer is a LanguageCode enum value - LanguageCodeGer = "GER" - - // LanguageCodeZho is a LanguageCode enum value - LanguageCodeZho = "ZHO" - - // LanguageCodeAra is a LanguageCode enum value - LanguageCodeAra = "ARA" - - // LanguageCodeHin is a LanguageCode enum value - LanguageCodeHin = "HIN" - - // LanguageCodeJpn is a LanguageCode enum value - LanguageCodeJpn = "JPN" - - // LanguageCodeRus is a LanguageCode enum value - LanguageCodeRus = "RUS" - - // LanguageCodePor is a LanguageCode enum value - LanguageCodePor = "POR" - - // LanguageCodeIta is a LanguageCode enum value - LanguageCodeIta = "ITA" - - // LanguageCodeUrd is a LanguageCode enum value - LanguageCodeUrd = "URD" - - // LanguageCodeVie is a LanguageCode enum value - LanguageCodeVie = "VIE" - - // LanguageCodeKor is a LanguageCode enum value - LanguageCodeKor = "KOR" - - // LanguageCodePan is a LanguageCode enum value - LanguageCodePan = "PAN" - - // LanguageCodeAbk is a LanguageCode enum value - LanguageCodeAbk = "ABK" - - // LanguageCodeAar is a LanguageCode enum value - LanguageCodeAar = "AAR" - - // LanguageCodeAfr is a LanguageCode enum value - LanguageCodeAfr = "AFR" - - // LanguageCodeAka is a LanguageCode enum value - LanguageCodeAka = "AKA" - - // LanguageCodeSqi is a LanguageCode enum value - LanguageCodeSqi = "SQI" - - // LanguageCodeAmh is a LanguageCode enum value - LanguageCodeAmh = "AMH" - - // LanguageCodeArg is a LanguageCode enum value - LanguageCodeArg = "ARG" - - // LanguageCodeHye is a LanguageCode enum value - LanguageCodeHye = "HYE" - - // LanguageCodeAsm is a LanguageCode enum value - LanguageCodeAsm = "ASM" - - // LanguageCodeAva is a LanguageCode enum value - LanguageCodeAva = "AVA" - - // LanguageCodeAve is a LanguageCode enum value - LanguageCodeAve = "AVE" - - // LanguageCodeAym is a LanguageCode enum value - LanguageCodeAym = "AYM" - - // LanguageCodeAze is a LanguageCode enum value - LanguageCodeAze = "AZE" - - // LanguageCodeBam is a LanguageCode enum value - LanguageCodeBam = "BAM" - - // LanguageCodeBak is a LanguageCode enum value - LanguageCodeBak = "BAK" - - // LanguageCodeEus is a LanguageCode enum value - LanguageCodeEus = "EUS" - - // LanguageCodeBel is a LanguageCode enum value - LanguageCodeBel = "BEL" - - // LanguageCodeBen is a LanguageCode enum value - LanguageCodeBen = "BEN" - - // LanguageCodeBih is a LanguageCode enum value - LanguageCodeBih = "BIH" - - // LanguageCodeBis is a LanguageCode enum value - LanguageCodeBis = "BIS" - - // LanguageCodeBos is a LanguageCode enum value - LanguageCodeBos = "BOS" - - // LanguageCodeBre is a LanguageCode enum value - LanguageCodeBre = "BRE" - - // LanguageCodeBul is a LanguageCode enum value - LanguageCodeBul = "BUL" - - // LanguageCodeMya is a LanguageCode enum value - LanguageCodeMya = "MYA" - - // LanguageCodeCat is a LanguageCode enum value - LanguageCodeCat = "CAT" - - // LanguageCodeKhm is a LanguageCode enum value - LanguageCodeKhm = "KHM" - - // LanguageCodeCha is a LanguageCode enum value - LanguageCodeCha = "CHA" - - // LanguageCodeChe is a LanguageCode enum value - LanguageCodeChe = "CHE" - - // LanguageCodeNya is a LanguageCode enum value - LanguageCodeNya = "NYA" - - // LanguageCodeChu is a LanguageCode enum value - LanguageCodeChu = "CHU" - - // LanguageCodeChv is a LanguageCode enum value - LanguageCodeChv = "CHV" - - // LanguageCodeCor is a LanguageCode enum value - LanguageCodeCor = "COR" - - // LanguageCodeCos is a LanguageCode enum value - LanguageCodeCos = "COS" - - // LanguageCodeCre is a LanguageCode enum value - LanguageCodeCre = "CRE" - - // LanguageCodeHrv is a LanguageCode enum value - LanguageCodeHrv = "HRV" - - // LanguageCodeCes is a LanguageCode enum value - LanguageCodeCes = "CES" - - // LanguageCodeDan is a LanguageCode enum value - LanguageCodeDan = "DAN" - - // LanguageCodeDiv is a LanguageCode enum value - LanguageCodeDiv = "DIV" - - // LanguageCodeNld is a LanguageCode enum value - LanguageCodeNld = "NLD" - - // LanguageCodeDzo is a LanguageCode enum value - LanguageCodeDzo = "DZO" - - // LanguageCodeEnm is a LanguageCode enum value - LanguageCodeEnm = "ENM" - - // LanguageCodeEpo is a LanguageCode enum value - LanguageCodeEpo = "EPO" - - // LanguageCodeEst is a LanguageCode enum value - LanguageCodeEst = "EST" - - // LanguageCodeEwe is a LanguageCode enum value - LanguageCodeEwe = "EWE" - - // LanguageCodeFao is a LanguageCode enum value - LanguageCodeFao = "FAO" - - // LanguageCodeFij is a LanguageCode enum value - LanguageCodeFij = "FIJ" - - // LanguageCodeFin is a LanguageCode enum value - LanguageCodeFin = "FIN" - - // LanguageCodeFrm is a LanguageCode enum value - LanguageCodeFrm = "FRM" - - // LanguageCodeFul is a LanguageCode enum value - LanguageCodeFul = "FUL" - - // LanguageCodeGla is a LanguageCode enum value - LanguageCodeGla = "GLA" - - // LanguageCodeGlg is a LanguageCode enum value - LanguageCodeGlg = "GLG" - - // LanguageCodeLug is a LanguageCode enum value - LanguageCodeLug = "LUG" - - // LanguageCodeKat is a LanguageCode enum value - LanguageCodeKat = "KAT" - - // LanguageCodeEll is a LanguageCode enum value - LanguageCodeEll = "ELL" - - // LanguageCodeGrn is a LanguageCode enum value - LanguageCodeGrn = "GRN" - - // LanguageCodeGuj is a LanguageCode enum value - LanguageCodeGuj = "GUJ" - - // LanguageCodeHat is a LanguageCode enum value - LanguageCodeHat = "HAT" - - // LanguageCodeHau is a LanguageCode enum value - LanguageCodeHau = "HAU" - - // LanguageCodeHeb is a LanguageCode enum value - LanguageCodeHeb = "HEB" - - // LanguageCodeHer is a LanguageCode enum value - LanguageCodeHer = "HER" - - // LanguageCodeHmo is a LanguageCode enum value - LanguageCodeHmo = "HMO" - - // LanguageCodeHun is a LanguageCode enum value - LanguageCodeHun = "HUN" - - // LanguageCodeIsl is a LanguageCode enum value - LanguageCodeIsl = "ISL" - - // LanguageCodeIdo is a LanguageCode enum value - LanguageCodeIdo = "IDO" - - // LanguageCodeIbo is a LanguageCode enum value - LanguageCodeIbo = "IBO" - - // LanguageCodeInd is a LanguageCode enum value - LanguageCodeInd = "IND" - - // LanguageCodeIna is a LanguageCode enum value - LanguageCodeIna = "INA" - - // LanguageCodeIle is a LanguageCode enum value - LanguageCodeIle = "ILE" - - // LanguageCodeIku is a LanguageCode enum value - LanguageCodeIku = "IKU" - - // LanguageCodeIpk is a LanguageCode enum value - LanguageCodeIpk = "IPK" - - // LanguageCodeGle is a LanguageCode enum value - LanguageCodeGle = "GLE" - - // LanguageCodeJav is a LanguageCode enum value - LanguageCodeJav = "JAV" - - // LanguageCodeKal is a LanguageCode enum value - LanguageCodeKal = "KAL" - - // LanguageCodeKan is a LanguageCode enum value - LanguageCodeKan = "KAN" - - // LanguageCodeKau is a LanguageCode enum value - LanguageCodeKau = "KAU" - - // LanguageCodeKas is a LanguageCode enum value - LanguageCodeKas = "KAS" - - // LanguageCodeKaz is a LanguageCode enum value - LanguageCodeKaz = "KAZ" - - // LanguageCodeKik is a LanguageCode enum value - LanguageCodeKik = "KIK" - - // LanguageCodeKin is a LanguageCode enum value - LanguageCodeKin = "KIN" - - // LanguageCodeKir is a LanguageCode enum value - LanguageCodeKir = "KIR" - - // LanguageCodeKom is a LanguageCode enum value - LanguageCodeKom = "KOM" - - // LanguageCodeKon is a LanguageCode enum value - LanguageCodeKon = "KON" - - // LanguageCodeKua is a LanguageCode enum value - LanguageCodeKua = "KUA" - - // LanguageCodeKur is a LanguageCode enum value - LanguageCodeKur = "KUR" - - // LanguageCodeLao is a LanguageCode enum value - LanguageCodeLao = "LAO" - - // LanguageCodeLat is a LanguageCode enum value - LanguageCodeLat = "LAT" - - // LanguageCodeLav is a LanguageCode enum value - LanguageCodeLav = "LAV" - - // LanguageCodeLim is a LanguageCode enum value - LanguageCodeLim = "LIM" - - // LanguageCodeLin is a LanguageCode enum value - LanguageCodeLin = "LIN" - - // LanguageCodeLit is a LanguageCode enum value - LanguageCodeLit = "LIT" - - // LanguageCodeLub is a LanguageCode enum value - LanguageCodeLub = "LUB" - - // LanguageCodeLtz is a LanguageCode enum value - LanguageCodeLtz = "LTZ" - - // LanguageCodeMkd is a LanguageCode enum value - LanguageCodeMkd = "MKD" - - // LanguageCodeMlg is a LanguageCode enum value - LanguageCodeMlg = "MLG" - - // LanguageCodeMsa is a LanguageCode enum value - LanguageCodeMsa = "MSA" - - // LanguageCodeMal is a LanguageCode enum value - LanguageCodeMal = "MAL" - - // LanguageCodeMlt is a LanguageCode enum value - LanguageCodeMlt = "MLT" - - // LanguageCodeGlv is a LanguageCode enum value - LanguageCodeGlv = "GLV" - - // LanguageCodeMri is a LanguageCode enum value - LanguageCodeMri = "MRI" - - // LanguageCodeMar is a LanguageCode enum value - LanguageCodeMar = "MAR" - - // LanguageCodeMah is a LanguageCode enum value - LanguageCodeMah = "MAH" - - // LanguageCodeMon is a LanguageCode enum value - LanguageCodeMon = "MON" - - // LanguageCodeNau is a LanguageCode enum value - LanguageCodeNau = "NAU" - - // LanguageCodeNav is a LanguageCode enum value - LanguageCodeNav = "NAV" - - // LanguageCodeNde is a LanguageCode enum value - LanguageCodeNde = "NDE" - - // LanguageCodeNbl is a LanguageCode enum value - LanguageCodeNbl = "NBL" - - // LanguageCodeNdo is a LanguageCode enum value - LanguageCodeNdo = "NDO" - - // LanguageCodeNep is a LanguageCode enum value - LanguageCodeNep = "NEP" - - // LanguageCodeSme is a LanguageCode enum value - LanguageCodeSme = "SME" - - // LanguageCodeNor is a LanguageCode enum value - LanguageCodeNor = "NOR" - - // LanguageCodeNob is a LanguageCode enum value - LanguageCodeNob = "NOB" - - // LanguageCodeNno is a LanguageCode enum value - LanguageCodeNno = "NNO" - - // LanguageCodeOci is a LanguageCode enum value - LanguageCodeOci = "OCI" - - // LanguageCodeOji is a LanguageCode enum value - LanguageCodeOji = "OJI" - - // LanguageCodeOri is a LanguageCode enum value - LanguageCodeOri = "ORI" - - // LanguageCodeOrm is a LanguageCode enum value - LanguageCodeOrm = "ORM" - - // LanguageCodeOss is a LanguageCode enum value - LanguageCodeOss = "OSS" - - // LanguageCodePli is a LanguageCode enum value - LanguageCodePli = "PLI" - - // LanguageCodeFas is a LanguageCode enum value - LanguageCodeFas = "FAS" - - // LanguageCodePol is a LanguageCode enum value - LanguageCodePol = "POL" - - // LanguageCodePus is a LanguageCode enum value - LanguageCodePus = "PUS" - - // LanguageCodeQue is a LanguageCode enum value - LanguageCodeQue = "QUE" - - // LanguageCodeQaa is a LanguageCode enum value - LanguageCodeQaa = "QAA" - - // LanguageCodeRon is a LanguageCode enum value - LanguageCodeRon = "RON" - - // LanguageCodeRoh is a LanguageCode enum value - LanguageCodeRoh = "ROH" - - // LanguageCodeRun is a LanguageCode enum value - LanguageCodeRun = "RUN" - - // LanguageCodeSmo is a LanguageCode enum value - LanguageCodeSmo = "SMO" - - // LanguageCodeSag is a LanguageCode enum value - LanguageCodeSag = "SAG" - - // LanguageCodeSan is a LanguageCode enum value - LanguageCodeSan = "SAN" - - // LanguageCodeSrd is a LanguageCode enum value - LanguageCodeSrd = "SRD" - - // LanguageCodeSrb is a LanguageCode enum value - LanguageCodeSrb = "SRB" - - // LanguageCodeSna is a LanguageCode enum value - LanguageCodeSna = "SNA" - - // LanguageCodeIii is a LanguageCode enum value - LanguageCodeIii = "III" - - // LanguageCodeSnd is a LanguageCode enum value - LanguageCodeSnd = "SND" - - // LanguageCodeSin is a LanguageCode enum value - LanguageCodeSin = "SIN" - - // LanguageCodeSlk is a LanguageCode enum value - LanguageCodeSlk = "SLK" - - // LanguageCodeSlv is a LanguageCode enum value - LanguageCodeSlv = "SLV" - - // LanguageCodeSom is a LanguageCode enum value - LanguageCodeSom = "SOM" - - // LanguageCodeSot is a LanguageCode enum value - LanguageCodeSot = "SOT" - - // LanguageCodeSun is a LanguageCode enum value - LanguageCodeSun = "SUN" - - // LanguageCodeSwa is a LanguageCode enum value - LanguageCodeSwa = "SWA" - - // LanguageCodeSsw is a LanguageCode enum value - LanguageCodeSsw = "SSW" - - // LanguageCodeSwe is a LanguageCode enum value - LanguageCodeSwe = "SWE" - - // LanguageCodeTgl is a LanguageCode enum value - LanguageCodeTgl = "TGL" - - // LanguageCodeTah is a LanguageCode enum value - LanguageCodeTah = "TAH" - - // LanguageCodeTgk is a LanguageCode enum value - LanguageCodeTgk = "TGK" - - // LanguageCodeTam is a LanguageCode enum value - LanguageCodeTam = "TAM" - - // LanguageCodeTat is a LanguageCode enum value - LanguageCodeTat = "TAT" - - // LanguageCodeTel is a LanguageCode enum value - LanguageCodeTel = "TEL" - - // LanguageCodeTha is a LanguageCode enum value - LanguageCodeTha = "THA" - - // LanguageCodeBod is a LanguageCode enum value - LanguageCodeBod = "BOD" - - // LanguageCodeTir is a LanguageCode enum value - LanguageCodeTir = "TIR" - - // LanguageCodeTon is a LanguageCode enum value - LanguageCodeTon = "TON" - - // LanguageCodeTso is a LanguageCode enum value - LanguageCodeTso = "TSO" - - // LanguageCodeTsn is a LanguageCode enum value - LanguageCodeTsn = "TSN" - - // LanguageCodeTur is a LanguageCode enum value - LanguageCodeTur = "TUR" - - // LanguageCodeTuk is a LanguageCode enum value - LanguageCodeTuk = "TUK" - - // LanguageCodeTwi is a LanguageCode enum value - LanguageCodeTwi = "TWI" - - // LanguageCodeUig is a LanguageCode enum value - LanguageCodeUig = "UIG" - - // LanguageCodeUkr is a LanguageCode enum value - LanguageCodeUkr = "UKR" - - // LanguageCodeUzb is a LanguageCode enum value - LanguageCodeUzb = "UZB" - - // LanguageCodeVen is a LanguageCode enum value - LanguageCodeVen = "VEN" - - // LanguageCodeVol is a LanguageCode enum value - LanguageCodeVol = "VOL" - - // LanguageCodeWln is a LanguageCode enum value - LanguageCodeWln = "WLN" - - // LanguageCodeCym is a LanguageCode enum value - LanguageCodeCym = "CYM" - - // LanguageCodeFry is a LanguageCode enum value - LanguageCodeFry = "FRY" - - // LanguageCodeWol is a LanguageCode enum value - LanguageCodeWol = "WOL" - - // LanguageCodeXho is a LanguageCode enum value - LanguageCodeXho = "XHO" - - // LanguageCodeYid is a LanguageCode enum value - LanguageCodeYid = "YID" - - // LanguageCodeYor is a LanguageCode enum value - LanguageCodeYor = "YOR" - - // LanguageCodeZha is a LanguageCode enum value - LanguageCodeZha = "ZHA" - - // LanguageCodeZul is a LanguageCode enum value - LanguageCodeZul = "ZUL" - - // LanguageCodeOrj is a LanguageCode enum value - LanguageCodeOrj = "ORJ" - - // LanguageCodeQpc is a LanguageCode enum value - LanguageCodeQpc = "QPC" - - // LanguageCodeTng is a LanguageCode enum value - LanguageCodeTng = "TNG" -) - -// Selects between the DVB and ATSC buffer models for Dolby Digital audio. -const ( - // M2tsAudioBufferModelDvb is a M2tsAudioBufferModel enum value - M2tsAudioBufferModelDvb = "DVB" - - // M2tsAudioBufferModelAtsc is a M2tsAudioBufferModel enum value - M2tsAudioBufferModelAtsc = "ATSC" -) - -// Controls what buffer model to use for accurate interleaving. If set to MULTIPLEX, -// use multiplex buffer model. If set to NONE, this can lead to lower latency, -// but low-memory devices may not be able to play back the stream without interruptions. -const ( - // M2tsBufferModelMultiplex is a M2tsBufferModel enum value - M2tsBufferModelMultiplex = "MULTIPLEX" - - // M2tsBufferModelNone is a M2tsBufferModel enum value - M2tsBufferModelNone = "NONE" -) - -// When set to VIDEO_AND_FIXED_INTERVALS, audio EBP markers will be added to -// partitions 3 and 4. The interval between these additional markers will be -// fixed, and will be slightly shorter than the video EBP marker interval. When -// set to VIDEO_INTERVAL, these additional markers will not be inserted. Only -// applicable when EBP segmentation markers are is selected (segmentationMarkers -// is EBP or EBP_LEGACY). -const ( - // M2tsEbpAudioIntervalVideoAndFixedIntervals is a M2tsEbpAudioInterval enum value - M2tsEbpAudioIntervalVideoAndFixedIntervals = "VIDEO_AND_FIXED_INTERVALS" - - // M2tsEbpAudioIntervalVideoInterval is a M2tsEbpAudioInterval enum value - M2tsEbpAudioIntervalVideoInterval = "VIDEO_INTERVAL" -) - -// Selects which PIDs to place EBP markers on. They can either be placed only -// on the video PID, or on both the video PID and all audio PIDs. Only applicable -// when EBP segmentation markers are is selected (segmentationMarkers is EBP -// or EBP_LEGACY). -const ( - // M2tsEbpPlacementVideoAndAudioPids is a M2tsEbpPlacement enum value - M2tsEbpPlacementVideoAndAudioPids = "VIDEO_AND_AUDIO_PIDS" - - // M2tsEbpPlacementVideoPid is a M2tsEbpPlacement enum value - M2tsEbpPlacementVideoPid = "VIDEO_PID" -) - -// Controls whether to include the ES Rate field in the PES header. -const ( - // M2tsEsRateInPesInclude is a M2tsEsRateInPes enum value - M2tsEsRateInPesInclude = "INCLUDE" - - // M2tsEsRateInPesExclude is a M2tsEsRateInPes enum value - M2tsEsRateInPesExclude = "EXCLUDE" -) - -// When set to PCR_EVERY_PES_PACKET, a Program Clock Reference value is inserted -// for every Packetized Elementary Stream (PES) header. This is effective only -// when the PCR PID is the same as the video or audio elementary stream. -const ( - // M2tsPcrControlPcrEveryPesPacket is a M2tsPcrControl enum value - M2tsPcrControlPcrEveryPesPacket = "PCR_EVERY_PES_PACKET" - - // M2tsPcrControlConfiguredPcrPeriod is a M2tsPcrControl enum value - M2tsPcrControlConfiguredPcrPeriod = "CONFIGURED_PCR_PERIOD" -) - -// When set to CBR, inserts null packets into transport stream to fill specified -// bitrate. When set to VBR, the bitrate setting acts as the maximum bitrate, -// but the output will not be padded up to that bitrate. -const ( - // M2tsRateModeVbr is a M2tsRateMode enum value - M2tsRateModeVbr = "VBR" - - // M2tsRateModeCbr is a M2tsRateMode enum value - M2tsRateModeCbr = "CBR" -) - -// Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from -// input to output. This is only available for certain containers. -const ( - // M2tsScte35SourcePassthrough is a M2tsScte35Source enum value - M2tsScte35SourcePassthrough = "PASSTHROUGH" - - // M2tsScte35SourceNone is a M2tsScte35Source enum value - M2tsScte35SourceNone = "NONE" -) - -// Inserts segmentation markers at each segmentation_time period. rai_segstart -// sets the Random Access Indicator bit in the adaptation field. rai_adapt sets -// the RAI bit and adds the current timecode in the private data bytes. psi_segstart -// inserts PAT and PMT tables at the start of segments. ebp adds Encoder Boundary -// Point information to the adaptation field as per OpenCable specification -// OC-SP-EBP-I01-130118. ebp_legacy adds Encoder Boundary Point information -// to the adaptation field using a legacy proprietary format. -const ( - // M2tsSegmentationMarkersNone is a M2tsSegmentationMarkers enum value - M2tsSegmentationMarkersNone = "NONE" - - // M2tsSegmentationMarkersRaiSegstart is a M2tsSegmentationMarkers enum value - M2tsSegmentationMarkersRaiSegstart = "RAI_SEGSTART" - - // M2tsSegmentationMarkersRaiAdapt is a M2tsSegmentationMarkers enum value - M2tsSegmentationMarkersRaiAdapt = "RAI_ADAPT" - - // M2tsSegmentationMarkersPsiSegstart is a M2tsSegmentationMarkers enum value - M2tsSegmentationMarkersPsiSegstart = "PSI_SEGSTART" - - // M2tsSegmentationMarkersEbp is a M2tsSegmentationMarkers enum value - M2tsSegmentationMarkersEbp = "EBP" - - // M2tsSegmentationMarkersEbpLegacy is a M2tsSegmentationMarkers enum value - M2tsSegmentationMarkersEbpLegacy = "EBP_LEGACY" -) - -// The segmentation style parameter controls how segmentation markers are inserted -// into the transport stream. With avails, it is possible that segments may -// be truncated, which can influence where future segmentation markers are inserted. -// When a segmentation style of "reset_cadence" is selected and a segment is -// truncated due to an avail, we will reset the segmentation cadence. This means -// the subsequent segment will have a duration of of $segmentation_time seconds. -// When a segmentation style of "maintain_cadence" is selected and a segment -// is truncated due to an avail, we will not reset the segmentation cadence. -// This means the subsequent segment will likely be truncated as well. However, -// all segments after that will have a duration of $segmentation_time seconds. -// Note that EBP lookahead is a slight exception to this rule. -const ( - // M2tsSegmentationStyleMaintainCadence is a M2tsSegmentationStyle enum value - M2tsSegmentationStyleMaintainCadence = "MAINTAIN_CADENCE" - - // M2tsSegmentationStyleResetCadence is a M2tsSegmentationStyle enum value - M2tsSegmentationStyleResetCadence = "RESET_CADENCE" -) - -// When set to PCR_EVERY_PES_PACKET a Program Clock Reference value is inserted -// for every Packetized Elementary Stream (PES) header. This parameter is effective -// only when the PCR PID is the same as the video or audio elementary stream. -const ( - // M3u8PcrControlPcrEveryPesPacket is a M3u8PcrControl enum value - M3u8PcrControlPcrEveryPesPacket = "PCR_EVERY_PES_PACKET" - - // M3u8PcrControlConfiguredPcrPeriod is a M3u8PcrControl enum value - M3u8PcrControlConfiguredPcrPeriod = "CONFIGURED_PCR_PERIOD" -) - -// Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from -// input to output. This is only available for certain containers. -const ( - // M3u8Scte35SourcePassthrough is a M3u8Scte35Source enum value - M3u8Scte35SourcePassthrough = "PASSTHROUGH" - - // M3u8Scte35SourceNone is a M3u8Scte35Source enum value - M3u8Scte35SourceNone = "NONE" -) - -// When enabled, include 'clap' atom if appropriate for the video output settings. -const ( - // MovClapAtomInclude is a MovClapAtom enum value - MovClapAtomInclude = "INCLUDE" - - // MovClapAtomExclude is a MovClapAtom enum value - MovClapAtomExclude = "EXCLUDE" -) - -// When enabled, file composition times will start at zero, composition times -// in the 'ctts' (composition time to sample) box for B-frames will be negative, -// and a 'cslg' (composition shift least greatest) box will be included per -// 14496-1 amendment 1. This improves compatibility with Apple players and tools. -const ( - // MovCslgAtomInclude is a MovCslgAtom enum value - MovCslgAtomInclude = "INCLUDE" - - // MovCslgAtomExclude is a MovCslgAtom enum value - MovCslgAtomExclude = "EXCLUDE" -) - -// When set to XDCAM, writes MPEG2 video streams into the QuickTime file using -// XDCAM fourcc codes. This increases compatibility with Apple editors and players, -// but may decrease compatibility with other players. Only applicable when the -// video codec is MPEG2. -const ( - // MovMpeg2FourCCControlXdcam is a MovMpeg2FourCCControl enum value - MovMpeg2FourCCControlXdcam = "XDCAM" - - // MovMpeg2FourCCControlMpeg is a MovMpeg2FourCCControl enum value - MovMpeg2FourCCControlMpeg = "MPEG" -) - -// If set to OMNEON, inserts Omneon-compatible padding -const ( - // MovPaddingControlOmneon is a MovPaddingControl enum value - MovPaddingControlOmneon = "OMNEON" - - // MovPaddingControlNone is a MovPaddingControl enum value - MovPaddingControlNone = "NONE" -) - -// A value of 'external' creates separate media files and the wrapper file (.mov) -// contains references to these media files. A value of 'self_contained' creates -// only a wrapper (.mov) file and this file contains all of the media. -const ( - // MovReferenceSelfContained is a MovReference enum value - MovReferenceSelfContained = "SELF_CONTAINED" - - // MovReferenceExternal is a MovReference enum value - MovReferenceExternal = "EXTERNAL" -) - -// When enabled, file composition times will start at zero, composition times -// in the 'ctts' (composition time to sample) box for B-frames will be negative, -// and a 'cslg' (composition shift least greatest) box will be included per -// 14496-1 amendment 1. This improves compatibility with Apple players and tools. -const ( - // Mp4CslgAtomInclude is a Mp4CslgAtom enum value - Mp4CslgAtomInclude = "INCLUDE" - - // Mp4CslgAtomExclude is a Mp4CslgAtom enum value - Mp4CslgAtomExclude = "EXCLUDE" -) - -// Inserts a free-space box immediately after the moov box. -const ( - // Mp4FreeSpaceBoxInclude is a Mp4FreeSpaceBox enum value - Mp4FreeSpaceBoxInclude = "INCLUDE" - - // Mp4FreeSpaceBoxExclude is a Mp4FreeSpaceBox enum value - Mp4FreeSpaceBoxExclude = "EXCLUDE" -) - -// If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning -// of the archive as required for progressive downloading. Otherwise it is placed -// normally at the end. -const ( - // Mp4MoovPlacementProgressiveDownload is a Mp4MoovPlacement enum value - Mp4MoovPlacementProgressiveDownload = "PROGRESSIVE_DOWNLOAD" - - // Mp4MoovPlacementNormal is a Mp4MoovPlacement enum value - Mp4MoovPlacementNormal = "NORMAL" -) - -// Adaptive quantization. Allows intra-frame quantizers to vary to improve visual -// quality. -const ( - // Mpeg2AdaptiveQuantizationOff is a Mpeg2AdaptiveQuantization enum value - Mpeg2AdaptiveQuantizationOff = "OFF" - - // Mpeg2AdaptiveQuantizationLow is a Mpeg2AdaptiveQuantization enum value - Mpeg2AdaptiveQuantizationLow = "LOW" - - // Mpeg2AdaptiveQuantizationMedium is a Mpeg2AdaptiveQuantization enum value - Mpeg2AdaptiveQuantizationMedium = "MEDIUM" - - // Mpeg2AdaptiveQuantizationHigh is a Mpeg2AdaptiveQuantization enum value - Mpeg2AdaptiveQuantizationHigh = "HIGH" -) - -// Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output. -const ( - // Mpeg2CodecLevelAuto is a Mpeg2CodecLevel enum value - Mpeg2CodecLevelAuto = "AUTO" - - // Mpeg2CodecLevelLow is a Mpeg2CodecLevel enum value - Mpeg2CodecLevelLow = "LOW" - - // Mpeg2CodecLevelMain is a Mpeg2CodecLevel enum value - Mpeg2CodecLevelMain = "MAIN" - - // Mpeg2CodecLevelHigh1440 is a Mpeg2CodecLevel enum value - Mpeg2CodecLevelHigh1440 = "HIGH1440" - - // Mpeg2CodecLevelHigh is a Mpeg2CodecLevel enum value - Mpeg2CodecLevelHigh = "HIGH" -) - -// Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output. -const ( - // Mpeg2CodecProfileMain is a Mpeg2CodecProfile enum value - Mpeg2CodecProfileMain = "MAIN" - - // Mpeg2CodecProfileProfile422 is a Mpeg2CodecProfile enum value - Mpeg2CodecProfileProfile422 = "PROFILE_422" -) - -// Using the API, set FramerateControl to INITIALIZE_FROM_SOURCE if you want -// the service to use the framerate from the input. Using the console, do this -// by choosing INITIALIZE_FROM_SOURCE for Framerate. -const ( - // Mpeg2FramerateControlInitializeFromSource is a Mpeg2FramerateControl enum value - Mpeg2FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" - - // Mpeg2FramerateControlSpecified is a Mpeg2FramerateControl enum value - Mpeg2FramerateControlSpecified = "SPECIFIED" -) - -// When set to INTERPOLATE, produces smoother motion during framerate conversion. -const ( - // Mpeg2FramerateConversionAlgorithmDuplicateDrop is a Mpeg2FramerateConversionAlgorithm enum value - Mpeg2FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" - - // Mpeg2FramerateConversionAlgorithmInterpolate is a Mpeg2FramerateConversionAlgorithm enum value - Mpeg2FramerateConversionAlgorithmInterpolate = "INTERPOLATE" -) - -// Indicates if the GOP Size in MPEG2 is specified in frames or seconds. If -// seconds the system will convert the GOP Size into a frame count at run time. -const ( - // Mpeg2GopSizeUnitsFrames is a Mpeg2GopSizeUnits enum value - Mpeg2GopSizeUnitsFrames = "FRAMES" - - // Mpeg2GopSizeUnitsSeconds is a Mpeg2GopSizeUnits enum value - Mpeg2GopSizeUnitsSeconds = "SECONDS" -) - -// Use Interlace mode (InterlaceMode) to choose the scan line type for the output. -// * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce -// interlaced output with the entire output having the same field polarity (top -// or bottom first). * Follow, Default Top (FOLLOw_TOP_FIELD) and Follow, Default -// Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, -// behavior depends on the input scan type. - If the source is interlaced, the -// output will be interlaced with the same polarity as the source (it will follow -// the source). The output could therefore be a mix of "top field first" and -// "bottom field first". - If the source is progressive, the output will be -// interlaced with "top field first" or "bottom field first" polarity, depending -// on which of the Follow options you chose. -const ( - // Mpeg2InterlaceModeProgressive is a Mpeg2InterlaceMode enum value - Mpeg2InterlaceModeProgressive = "PROGRESSIVE" - - // Mpeg2InterlaceModeTopField is a Mpeg2InterlaceMode enum value - Mpeg2InterlaceModeTopField = "TOP_FIELD" - - // Mpeg2InterlaceModeBottomField is a Mpeg2InterlaceMode enum value - Mpeg2InterlaceModeBottomField = "BOTTOM_FIELD" - - // Mpeg2InterlaceModeFollowTopField is a Mpeg2InterlaceMode enum value - Mpeg2InterlaceModeFollowTopField = "FOLLOW_TOP_FIELD" - - // Mpeg2InterlaceModeFollowBottomField is a Mpeg2InterlaceMode enum value - Mpeg2InterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" -) - -// Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision -// for intra-block DC coefficients. If you choose the value auto, the service -// will automatically select the precision based on the per-frame compression -// ratio. -const ( - // Mpeg2IntraDcPrecisionAuto is a Mpeg2IntraDcPrecision enum value - Mpeg2IntraDcPrecisionAuto = "AUTO" - - // Mpeg2IntraDcPrecisionIntraDcPrecision8 is a Mpeg2IntraDcPrecision enum value - Mpeg2IntraDcPrecisionIntraDcPrecision8 = "INTRA_DC_PRECISION_8" - - // Mpeg2IntraDcPrecisionIntraDcPrecision9 is a Mpeg2IntraDcPrecision enum value - Mpeg2IntraDcPrecisionIntraDcPrecision9 = "INTRA_DC_PRECISION_9" - - // Mpeg2IntraDcPrecisionIntraDcPrecision10 is a Mpeg2IntraDcPrecision enum value - Mpeg2IntraDcPrecisionIntraDcPrecision10 = "INTRA_DC_PRECISION_10" - - // Mpeg2IntraDcPrecisionIntraDcPrecision11 is a Mpeg2IntraDcPrecision enum value - Mpeg2IntraDcPrecisionIntraDcPrecision11 = "INTRA_DC_PRECISION_11" -) - -// Using the API, enable ParFollowSource if you want the service to use the -// pixel aspect ratio from the input. Using the console, do this by choosing -// Follow source for Pixel aspect ratio. -const ( - // Mpeg2ParControlInitializeFromSource is a Mpeg2ParControl enum value - Mpeg2ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" - - // Mpeg2ParControlSpecified is a Mpeg2ParControl enum value - Mpeg2ParControlSpecified = "SPECIFIED" -) - -// Use Quality tuning level (Mpeg2QualityTuningLevel) to specifiy whether to -// use single-pass or multipass video encoding. -const ( - // Mpeg2QualityTuningLevelSinglePass is a Mpeg2QualityTuningLevel enum value - Mpeg2QualityTuningLevelSinglePass = "SINGLE_PASS" - - // Mpeg2QualityTuningLevelMultiPass is a Mpeg2QualityTuningLevel enum value - Mpeg2QualityTuningLevelMultiPass = "MULTI_PASS" -) - -// Use Rate control mode (Mpeg2RateControlMode) to specifiy whether the bitrate -// is variable (vbr) or constant (cbr). -const ( - // Mpeg2RateControlModeVbr is a Mpeg2RateControlMode enum value - Mpeg2RateControlModeVbr = "VBR" - - // Mpeg2RateControlModeCbr is a Mpeg2RateControlMode enum value - Mpeg2RateControlModeCbr = "CBR" -) - -// Scene change detection (inserts I-frames on scene changes). -const ( - // Mpeg2SceneChangeDetectDisabled is a Mpeg2SceneChangeDetect enum value - Mpeg2SceneChangeDetectDisabled = "DISABLED" - - // Mpeg2SceneChangeDetectEnabled is a Mpeg2SceneChangeDetect enum value - Mpeg2SceneChangeDetectEnabled = "ENABLED" -) - -// Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled -// as 25fps, and audio is sped up correspondingly. -const ( - // Mpeg2SlowPalDisabled is a Mpeg2SlowPal enum value - Mpeg2SlowPalDisabled = "DISABLED" - - // Mpeg2SlowPalEnabled is a Mpeg2SlowPal enum value - Mpeg2SlowPalEnabled = "ENABLED" -) - -// Adjust quantization within each frame based on spatial variation of content -// complexity. -const ( - // Mpeg2SpatialAdaptiveQuantizationDisabled is a Mpeg2SpatialAdaptiveQuantization enum value - Mpeg2SpatialAdaptiveQuantizationDisabled = "DISABLED" - - // Mpeg2SpatialAdaptiveQuantizationEnabled is a Mpeg2SpatialAdaptiveQuantization enum value - Mpeg2SpatialAdaptiveQuantizationEnabled = "ENABLED" -) - -// Produces a Type D-10 compatible bitstream (SMPTE 356M-2001). -const ( - // Mpeg2SyntaxDefault is a Mpeg2Syntax enum value - Mpeg2SyntaxDefault = "DEFAULT" - - // Mpeg2SyntaxD10 is a Mpeg2Syntax enum value - Mpeg2SyntaxD10 = "D_10" -) - -// Only use Telecine (Mpeg2Telecine) when you set Framerate (Framerate) to 29.970. -// Set Telecine (Mpeg2Telecine) to Hard (hard) to produce a 29.97i output from -// a 23.976 input. Set it to Soft (soft) to produce 23.976 output and leave -// converstion to the player. -const ( - // Mpeg2TelecineNone is a Mpeg2Telecine enum value - Mpeg2TelecineNone = "NONE" - - // Mpeg2TelecineSoft is a Mpeg2Telecine enum value - Mpeg2TelecineSoft = "SOFT" - - // Mpeg2TelecineHard is a Mpeg2Telecine enum value - Mpeg2TelecineHard = "HARD" -) - -// Adjust quantization within each frame based on temporal variation of content -// complexity. -const ( - // Mpeg2TemporalAdaptiveQuantizationDisabled is a Mpeg2TemporalAdaptiveQuantization enum value - Mpeg2TemporalAdaptiveQuantizationDisabled = "DISABLED" - - // Mpeg2TemporalAdaptiveQuantizationEnabled is a Mpeg2TemporalAdaptiveQuantization enum value - Mpeg2TemporalAdaptiveQuantizationEnabled = "ENABLED" -) - -// COMBINE_DUPLICATE_STREAMS combines identical audio encoding settings across -// a Microsoft Smooth output group into a single audio stream. -const ( - // MsSmoothAudioDeduplicationCombineDuplicateStreams is a MsSmoothAudioDeduplication enum value - MsSmoothAudioDeduplicationCombineDuplicateStreams = "COMBINE_DUPLICATE_STREAMS" - - // MsSmoothAudioDeduplicationNone is a MsSmoothAudioDeduplication enum value - MsSmoothAudioDeduplicationNone = "NONE" -) - -// Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding -// format for the server and client manifest. Valid options are utf8 and utf16. -const ( - // MsSmoothManifestEncodingUtf8 is a MsSmoothManifestEncoding enum value - MsSmoothManifestEncodingUtf8 = "UTF8" - - // MsSmoothManifestEncodingUtf16 is a MsSmoothManifestEncoding enum value - MsSmoothManifestEncodingUtf16 = "UTF16" -) - -// Use Noise reducer filter (NoiseReducerFilter) to select one of the following -// spatial image filtering functions. To use this setting, you must also enable -// Noise reducer (NoiseReducer). * Bilateral is an edge preserving noise reduction -// filter * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) are convolution -// filters * Conserve is a min/max noise reduction filter * Spatial is frequency-domain -// filter based on JND principles. -const ( - // NoiseReducerFilterBilateral is a NoiseReducerFilter enum value - NoiseReducerFilterBilateral = "BILATERAL" - - // NoiseReducerFilterMean is a NoiseReducerFilter enum value - NoiseReducerFilterMean = "MEAN" - - // NoiseReducerFilterGaussian is a NoiseReducerFilter enum value - NoiseReducerFilterGaussian = "GAUSSIAN" - - // NoiseReducerFilterLanczos is a NoiseReducerFilter enum value - NoiseReducerFilterLanczos = "LANCZOS" - - // NoiseReducerFilterSharpen is a NoiseReducerFilter enum value - NoiseReducerFilterSharpen = "SHARPEN" - - // NoiseReducerFilterConserve is a NoiseReducerFilter enum value - NoiseReducerFilterConserve = "CONSERVE" - - // NoiseReducerFilterSpatial is a NoiseReducerFilter enum value - NoiseReducerFilterSpatial = "SPATIAL" -) - -// When you request lists of resources, you can optionally specify whether they -// are sorted in ASCENDING or DESCENDING order. Default varies by resource. -const ( - // OrderAscending is a Order enum value - OrderAscending = "ASCENDING" - - // OrderDescending is a Order enum value - OrderDescending = "DESCENDING" -) - -const ( - // OutputGroupTypeHlsGroupSettings is a OutputGroupType enum value - OutputGroupTypeHlsGroupSettings = "HLS_GROUP_SETTINGS" - - // OutputGroupTypeDashIsoGroupSettings is a OutputGroupType enum value - OutputGroupTypeDashIsoGroupSettings = "DASH_ISO_GROUP_SETTINGS" - - // OutputGroupTypeFileGroupSettings is a OutputGroupType enum value - OutputGroupTypeFileGroupSettings = "FILE_GROUP_SETTINGS" - - // OutputGroupTypeMsSmoothGroupSettings is a OutputGroupType enum value - OutputGroupTypeMsSmoothGroupSettings = "MS_SMOOTH_GROUP_SETTINGS" -) - -// Selects method of inserting SDT information into output stream. "Follow input -// SDT" copies SDT information from input stream to output stream. "Follow input -// SDT if present" copies SDT information from input stream to output stream -// if SDT information is present in the input, otherwise it will fall back on -// the user-defined values. Enter "SDT Manually" means user will enter the SDT -// information. "No SDT" means output stream will not contain SDT information. -const ( - // OutputSdtSdtFollow is a OutputSdt enum value - OutputSdtSdtFollow = "SDT_FOLLOW" - - // OutputSdtSdtFollowIfPresent is a OutputSdt enum value - OutputSdtSdtFollowIfPresent = "SDT_FOLLOW_IF_PRESENT" - - // OutputSdtSdtManual is a OutputSdt enum value - OutputSdtSdtManual = "SDT_MANUAL" - - // OutputSdtSdtNone is a OutputSdt enum value - OutputSdtSdtNone = "SDT_NONE" -) - -// Optional. When you request a list of presets, you can choose to list them -// alphabetically by NAME or chronologically by CREATION_DATE. If you don't -// specify, the service will list them by name. -const ( - // PresetListByName is a PresetListBy enum value - PresetListByName = "NAME" - - // PresetListByCreationDate is a PresetListBy enum value - PresetListByCreationDate = "CREATION_DATE" - - // PresetListBySystem is a PresetListBy enum value - PresetListBySystem = "SYSTEM" -) - -// Use Profile (ProResCodecProfile) to specifiy the type of Apple ProRes codec -// to use for this output. -const ( - // ProresCodecProfileAppleProres422 is a ProresCodecProfile enum value - ProresCodecProfileAppleProres422 = "APPLE_PRORES_422" - - // ProresCodecProfileAppleProres422Hq is a ProresCodecProfile enum value - ProresCodecProfileAppleProres422Hq = "APPLE_PRORES_422_HQ" - - // ProresCodecProfileAppleProres422Lt is a ProresCodecProfile enum value - ProresCodecProfileAppleProres422Lt = "APPLE_PRORES_422_LT" - - // ProresCodecProfileAppleProres422Proxy is a ProresCodecProfile enum value - ProresCodecProfileAppleProres422Proxy = "APPLE_PRORES_422_PROXY" -) - -// Using the API, set FramerateControl to INITIALIZE_FROM_SOURCE if you want -// the service to use the framerate from the input. Using the console, do this -// by choosing INITIALIZE_FROM_SOURCE for Framerate. -const ( - // ProresFramerateControlInitializeFromSource is a ProresFramerateControl enum value - ProresFramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" - - // ProresFramerateControlSpecified is a ProresFramerateControl enum value - ProresFramerateControlSpecified = "SPECIFIED" -) - -// When set to INTERPOLATE, produces smoother motion during framerate conversion. -const ( - // ProresFramerateConversionAlgorithmDuplicateDrop is a ProresFramerateConversionAlgorithm enum value - ProresFramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" - - // ProresFramerateConversionAlgorithmInterpolate is a ProresFramerateConversionAlgorithm enum value - ProresFramerateConversionAlgorithmInterpolate = "INTERPOLATE" -) - -// Use Interlace mode (InterlaceMode) to choose the scan line type for the output. -// * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce -// interlaced output with the entire output having the same field polarity (top -// or bottom first). * Follow, Default Top (FOLLOw_TOP_FIELD) and Follow, Default -// Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, -// behavior depends on the input scan type. - If the source is interlaced, the -// output will be interlaced with the same polarity as the source (it will follow -// the source). The output could therefore be a mix of "top field first" and -// "bottom field first". - If the source is progressive, the output will be -// interlaced with "top field first" or "bottom field first" polarity, depending -// on which of the Follow options you chose. -const ( - // ProresInterlaceModeProgressive is a ProresInterlaceMode enum value - ProresInterlaceModeProgressive = "PROGRESSIVE" - - // ProresInterlaceModeTopField is a ProresInterlaceMode enum value - ProresInterlaceModeTopField = "TOP_FIELD" - - // ProresInterlaceModeBottomField is a ProresInterlaceMode enum value - ProresInterlaceModeBottomField = "BOTTOM_FIELD" - - // ProresInterlaceModeFollowTopField is a ProresInterlaceMode enum value - ProresInterlaceModeFollowTopField = "FOLLOW_TOP_FIELD" - - // ProresInterlaceModeFollowBottomField is a ProresInterlaceMode enum value - ProresInterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" -) - -// Use (ProresParControl) to specify how the service determines the pixel aspect -// ratio. Set to Follow source (INITIALIZE_FROM_SOURCE) to use the pixel aspect -// ratio from the input. To specify a different pixel aspect ratio: Using the -// console, choose it from the dropdown menu. Using the API, set ProresParControl -// to (SPECIFIED) and provide for (ParNumerator) and (ParDenominator). -const ( - // ProresParControlInitializeFromSource is a ProresParControl enum value - ProresParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" - - // ProresParControlSpecified is a ProresParControl enum value - ProresParControlSpecified = "SPECIFIED" -) - -// Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled -// as 25fps, and audio is sped up correspondingly. -const ( - // ProresSlowPalDisabled is a ProresSlowPal enum value - ProresSlowPalDisabled = "DISABLED" - - // ProresSlowPalEnabled is a ProresSlowPal enum value - ProresSlowPalEnabled = "ENABLED" -) - -// Only use Telecine (ProresTelecine) when you set Framerate (Framerate) to -// 29.970. Set Telecine (ProresTelecine) to Hard (hard) to produce a 29.97i -// output from a 23.976 input. Set it to Soft (soft) to produce 23.976 output -// and leave converstion to the player. -const ( - // ProresTelecineNone is a ProresTelecine enum value - ProresTelecineNone = "NONE" - - // ProresTelecineHard is a ProresTelecine enum value - ProresTelecineHard = "HARD" -) - -// Optional. When you request a list of queues, you can choose to list them -// alphabetically by NAME or chronologically by CREATION_DATE. If you don't -// specify, the service will list them by creation date. -const ( - // QueueListByName is a QueueListBy enum value - QueueListByName = "NAME" - - // QueueListByCreationDate is a QueueListBy enum value - QueueListByCreationDate = "CREATION_DATE" -) - -// Queues can be ACTIVE or PAUSED. If you pause a queue, jobs in that queue -// will not begin. Jobs running when a queue is paused continue to run until -// they finish or error out. -const ( - // QueueStatusActive is a QueueStatus enum value - QueueStatusActive = "ACTIVE" - - // QueueStatusPaused is a QueueStatus enum value - QueueStatusPaused = "PAUSED" -) - -// Use Respond to AFD (RespondToAfd) to specify how the service changes the -// video itself in response to AFD values in the input. * Choose Respond to -// clip the input video frame according to the AFD value, input display aspect -// ratio, and output display aspect ratio. * Choose Passthrough to include the -// input AFD values. Do not choose this when AfdSignaling is set to (NONE). -// A preferred implementation of this workflow is to set RespondToAfd to (NONE) -// and set AfdSignaling to (AUTO). * Choose None to remove all input AFD values -// from this output. -const ( - // RespondToAfdNone is a RespondToAfd enum value - RespondToAfdNone = "NONE" - - // RespondToAfdRespond is a RespondToAfd enum value - RespondToAfdRespond = "RESPOND" - - // RespondToAfdPassthrough is a RespondToAfd enum value - RespondToAfdPassthrough = "PASSTHROUGH" -) - -// Applies only if your input aspect ratio is different from your output aspect -// ratio. Enable Stretch to output (StretchToOutput) to have the service stretch -// your video image to fit. Leave this setting disabled to allow the service -// to letterbox your video instead. This setting overrides any positioning value -// you specify elsewhere in the job. -const ( - // ScalingBehaviorDefault is a ScalingBehavior enum value - ScalingBehaviorDefault = "DEFAULT" - - // ScalingBehaviorStretchToOutput is a ScalingBehavior enum value - ScalingBehaviorStretchToOutput = "STRETCH_TO_OUTPUT" -) - -// Set Framerate (SccDestinationFramerate) to make sure that the captions and -// the video are synchronized in the output. Specify a framerate that matches -// the framerate of the associated video. If the video framerate is 29.97, choose -// 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has video_insertion=true -// and drop_frame_timecode=true; otherwise, choose 29.97 non-dropframe (FRAMERATE_29_97_NON_DROPFRAME). -const ( - // SccDestinationFramerateFramerate2397 is a SccDestinationFramerate enum value - SccDestinationFramerateFramerate2397 = "FRAMERATE_23_97" - - // SccDestinationFramerateFramerate24 is a SccDestinationFramerate enum value - SccDestinationFramerateFramerate24 = "FRAMERATE_24" - - // SccDestinationFramerateFramerate2997Dropframe is a SccDestinationFramerate enum value - SccDestinationFramerateFramerate2997Dropframe = "FRAMERATE_29_97_DROPFRAME" - - // SccDestinationFramerateFramerate2997NonDropframe is a SccDestinationFramerate enum value - SccDestinationFramerateFramerate2997NonDropframe = "FRAMERATE_29_97_NON_DROPFRAME" -) - -// Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to -// specify the location the burned-in timecode on output video. -const ( - // TimecodeBurninPositionTopCenter is a TimecodeBurninPosition enum value - TimecodeBurninPositionTopCenter = "TOP_CENTER" - - // TimecodeBurninPositionTopLeft is a TimecodeBurninPosition enum value - TimecodeBurninPositionTopLeft = "TOP_LEFT" - - // TimecodeBurninPositionTopRight is a TimecodeBurninPosition enum value - TimecodeBurninPositionTopRight = "TOP_RIGHT" - - // TimecodeBurninPositionMiddleLeft is a TimecodeBurninPosition enum value - TimecodeBurninPositionMiddleLeft = "MIDDLE_LEFT" - - // TimecodeBurninPositionMiddleCenter is a TimecodeBurninPosition enum value - TimecodeBurninPositionMiddleCenter = "MIDDLE_CENTER" - - // TimecodeBurninPositionMiddleRight is a TimecodeBurninPosition enum value - TimecodeBurninPositionMiddleRight = "MIDDLE_RIGHT" - - // TimecodeBurninPositionBottomLeft is a TimecodeBurninPosition enum value - TimecodeBurninPositionBottomLeft = "BOTTOM_LEFT" - - // TimecodeBurninPositionBottomCenter is a TimecodeBurninPosition enum value - TimecodeBurninPositionBottomCenter = "BOTTOM_CENTER" - - // TimecodeBurninPositionBottomRight is a TimecodeBurninPosition enum value - TimecodeBurninPositionBottomRight = "BOTTOM_RIGHT" -) - -// Use Timecode source (TimecodeSource) to set how timecodes are handled within -// this input. To make sure that your video, audio, captions, and markers are -// synchronized and that time-based features, such as image inserter, work correctly, -// choose the Timecode source option that matches your assets. All timecodes -// are in a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) -// - Use the timecode that is in the input video. If no embedded timecode is -// in the source, the service will use Start at 0 (ZEROBASED) instead. * Start -// at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00. -// * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame -// to a value other than zero. You use Start timecode (Start) to provide this -// value. -const ( - // TimecodeSourceEmbedded is a TimecodeSource enum value - TimecodeSourceEmbedded = "EMBEDDED" - - // TimecodeSourceZerobased is a TimecodeSource enum value - TimecodeSourceZerobased = "ZEROBASED" - - // TimecodeSourceSpecifiedstart is a TimecodeSource enum value - TimecodeSourceSpecifiedstart = "SPECIFIEDSTART" -) - -// If PASSTHROUGH, inserts ID3 timed metadata from the timed_metadata REST command -// into this output. Only available for certain containers. -const ( - // TimedMetadataPassthrough is a TimedMetadata enum value - TimedMetadataPassthrough = "PASSTHROUGH" - - // TimedMetadataNone is a TimedMetadata enum value - TimedMetadataNone = "NONE" -) - -// Pass through style and position information from a TTML-like input source -// (TTML, SMPTE-TT, CFF-TT) to the CFF-TT output or TTML output. -const ( - // TtmlStylePassthroughEnabled is a TtmlStylePassthrough enum value - TtmlStylePassthroughEnabled = "ENABLED" - - // TtmlStylePassthroughDisabled is a TtmlStylePassthrough enum value - TtmlStylePassthroughDisabled = "DISABLED" -) - -const ( - // TypeSystem is a Type enum value - TypeSystem = "SYSTEM" - - // TypeCustom is a Type enum value - TypeCustom = "CUSTOM" -) - -// Type of video codec -const ( - // VideoCodecFrameCapture is a VideoCodec enum value - VideoCodecFrameCapture = "FRAME_CAPTURE" - - // VideoCodecH264 is a VideoCodec enum value - VideoCodecH264 = "H_264" - - // VideoCodecH265 is a VideoCodec enum value - VideoCodecH265 = "H_265" - - // VideoCodecMpeg2 is a VideoCodec enum value - VideoCodecMpeg2 = "MPEG2" - - // VideoCodecProres is a VideoCodec enum value - VideoCodecProres = "PRORES" -) - -// Enable Timecode insertion to include timecode information in this output. -// Do this in the API by setting (VideoTimecodeInsertion) to (PIC_TIMING_SEI). -// To get timecodes to appear correctly in your output, also set up the timecode -// configuration for your job in the input settings. Only enable Timecode insertion -// when the input framerate is identical to output framerate. Disable this setting -// to remove the timecode from the output. Default is disabled. -const ( - // VideoTimecodeInsertionDisabled is a VideoTimecodeInsertion enum value - VideoTimecodeInsertionDisabled = "DISABLED" - - // VideoTimecodeInsertionPicTimingSei is a VideoTimecodeInsertion enum value - VideoTimecodeInsertionPicTimingSei = "PIC_TIMING_SEI" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/doc.go b/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/doc.go deleted file mode 100644 index f30ea5eb708..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package mediaconvert provides the client and types for making API -// requests to AWS Elemental MediaConvert. -// -// AWS Elemental MediaConvert -// -// See https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29 for more information on this service. -// -// See mediaconvert package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/mediaconvert/ -// -// Using the Client -// -// To contact AWS Elemental MediaConvert with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Elemental MediaConvert client MediaConvert for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/mediaconvert/#New -package mediaconvert diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/errors.go b/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/errors.go deleted file mode 100644 index 7a607419d1c..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/errors.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package mediaconvert - -const ( - - // ErrCodeBadRequestException for service response error code - // "BadRequestException". - ErrCodeBadRequestException = "BadRequestException" - - // ErrCodeConflictException for service response error code - // "ConflictException". - ErrCodeConflictException = "ConflictException" - - // ErrCodeForbiddenException for service response error code - // "ForbiddenException". - ErrCodeForbiddenException = "ForbiddenException" - - // ErrCodeInternalServerErrorException for service response error code - // "InternalServerErrorException". - ErrCodeInternalServerErrorException = "InternalServerErrorException" - - // ErrCodeNotFoundException for service response error code - // "NotFoundException". - ErrCodeNotFoundException = "NotFoundException" - - // ErrCodeTooManyRequestsException for service response error code - // "TooManyRequestsException". - ErrCodeTooManyRequestsException = "TooManyRequestsException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go deleted file mode 100644 index 57088839281..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go +++ /dev/null @@ -1,97 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package mediaconvert - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/restjson" -) - -// MediaConvert provides the API operation methods for making requests to -// AWS Elemental MediaConvert. See this package's package overview docs -// for details on the service. -// -// MediaConvert methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type MediaConvert struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "mediaconvert" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the MediaConvert client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a MediaConvert client from just a session. -// svc := mediaconvert.New(mySession) -// -// // Create a MediaConvert client with additional configuration -// svc := mediaconvert.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaConvert { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaConvert { - if len(signingName) == 0 { - signingName = "mediaconvert" - } - svc := &MediaConvert{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2017-08-29", - JSONVersion: "1.1", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a MediaConvert operation and runs any -// custom request initialization. -func (c *MediaConvert) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go b/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go deleted file mode 100644 index f2ef274c06e..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go +++ /dev/null @@ -1,10820 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package medialive - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opCreateChannel = "CreateChannel" - -// CreateChannelRequest generates a "aws/request.Request" representing the -// client's request for the CreateChannel operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateChannel for more information on using the CreateChannel -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateChannelRequest method. -// req, resp := client.CreateChannelRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateChannel -func (c *MediaLive) CreateChannelRequest(input *CreateChannelInput) (req *request.Request, output *CreateChannelOutput) { - op := &request.Operation{ - Name: opCreateChannel, - HTTPMethod: "POST", - HTTPPath: "/prod/channels", - } - - if input == nil { - input = &CreateChannelInput{} - } - - output = &CreateChannelOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateChannel API operation for AWS Elemental MediaLive. -// -// Creates a new channel -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaLive's -// API operation CreateChannel for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" -// -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeBadGatewayException "BadGatewayException" -// -// * ErrCodeGatewayTimeoutException "GatewayTimeoutException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateChannel -func (c *MediaLive) CreateChannel(input *CreateChannelInput) (*CreateChannelOutput, error) { - req, out := c.CreateChannelRequest(input) - return out, req.Send() -} - -// CreateChannelWithContext is the same as CreateChannel with the addition of -// the ability to pass a context and additional request options. -// -// See CreateChannel for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) CreateChannelWithContext(ctx aws.Context, input *CreateChannelInput, opts ...request.Option) (*CreateChannelOutput, error) { - req, out := c.CreateChannelRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateInput = "CreateInput" - -// CreateInputRequest generates a "aws/request.Request" representing the -// client's request for the CreateInput operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateInput for more information on using the CreateInput -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateInputRequest method. -// req, resp := client.CreateInputRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateInput -func (c *MediaLive) CreateInputRequest(input *CreateInputInput) (req *request.Request, output *CreateInputOutput) { - op := &request.Operation{ - Name: opCreateInput, - HTTPMethod: "POST", - HTTPPath: "/prod/inputs", - } - - if input == nil { - input = &CreateInputInput{} - } - - output = &CreateInputOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateInput API operation for AWS Elemental MediaLive. -// -// Create an input -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaLive's -// API operation CreateInput for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeBadGatewayException "BadGatewayException" -// -// * ErrCodeGatewayTimeoutException "GatewayTimeoutException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateInput -func (c *MediaLive) CreateInput(input *CreateInputInput) (*CreateInputOutput, error) { - req, out := c.CreateInputRequest(input) - return out, req.Send() -} - -// CreateInputWithContext is the same as CreateInput with the addition of -// the ability to pass a context and additional request options. -// -// See CreateInput for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) CreateInputWithContext(ctx aws.Context, input *CreateInputInput, opts ...request.Option) (*CreateInputOutput, error) { - req, out := c.CreateInputRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateInputSecurityGroup = "CreateInputSecurityGroup" - -// CreateInputSecurityGroupRequest generates a "aws/request.Request" representing the -// client's request for the CreateInputSecurityGroup operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateInputSecurityGroup for more information on using the CreateInputSecurityGroup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateInputSecurityGroupRequest method. -// req, resp := client.CreateInputSecurityGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateInputSecurityGroup -func (c *MediaLive) CreateInputSecurityGroupRequest(input *CreateInputSecurityGroupInput) (req *request.Request, output *CreateInputSecurityGroupOutput) { - op := &request.Operation{ - Name: opCreateInputSecurityGroup, - HTTPMethod: "POST", - HTTPPath: "/prod/inputSecurityGroups", - } - - if input == nil { - input = &CreateInputSecurityGroupInput{} - } - - output = &CreateInputSecurityGroupOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateInputSecurityGroup API operation for AWS Elemental MediaLive. -// -// Creates a Input Security Group -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaLive's -// API operation CreateInputSecurityGroup for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeBadGatewayException "BadGatewayException" -// -// * ErrCodeGatewayTimeoutException "GatewayTimeoutException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateInputSecurityGroup -func (c *MediaLive) CreateInputSecurityGroup(input *CreateInputSecurityGroupInput) (*CreateInputSecurityGroupOutput, error) { - req, out := c.CreateInputSecurityGroupRequest(input) - return out, req.Send() -} - -// CreateInputSecurityGroupWithContext is the same as CreateInputSecurityGroup with the addition of -// the ability to pass a context and additional request options. -// -// See CreateInputSecurityGroup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) CreateInputSecurityGroupWithContext(ctx aws.Context, input *CreateInputSecurityGroupInput, opts ...request.Option) (*CreateInputSecurityGroupOutput, error) { - req, out := c.CreateInputSecurityGroupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteChannel = "DeleteChannel" - -// DeleteChannelRequest generates a "aws/request.Request" representing the -// client's request for the DeleteChannel operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteChannel for more information on using the DeleteChannel -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteChannelRequest method. -// req, resp := client.DeleteChannelRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DeleteChannel -func (c *MediaLive) DeleteChannelRequest(input *DeleteChannelInput) (req *request.Request, output *DeleteChannelOutput) { - op := &request.Operation{ - Name: opDeleteChannel, - HTTPMethod: "DELETE", - HTTPPath: "/prod/channels/{channelId}", - } - - if input == nil { - input = &DeleteChannelInput{} - } - - output = &DeleteChannelOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteChannel API operation for AWS Elemental MediaLive. -// -// Starts deletion of channel. The associated outputs are also deleted. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaLive's -// API operation DeleteChannel for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeBadGatewayException "BadGatewayException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeGatewayTimeoutException "GatewayTimeoutException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DeleteChannel -func (c *MediaLive) DeleteChannel(input *DeleteChannelInput) (*DeleteChannelOutput, error) { - req, out := c.DeleteChannelRequest(input) - return out, req.Send() -} - -// DeleteChannelWithContext is the same as DeleteChannel with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteChannel for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) DeleteChannelWithContext(ctx aws.Context, input *DeleteChannelInput, opts ...request.Option) (*DeleteChannelOutput, error) { - req, out := c.DeleteChannelRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteInput = "DeleteInput" - -// DeleteInputRequest generates a "aws/request.Request" representing the -// client's request for the DeleteInput operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteInput for more information on using the DeleteInput -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteInputRequest method. -// req, resp := client.DeleteInputRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DeleteInput -func (c *MediaLive) DeleteInputRequest(input *DeleteInputInput) (req *request.Request, output *DeleteInputOutput) { - op := &request.Operation{ - Name: opDeleteInput, - HTTPMethod: "DELETE", - HTTPPath: "/prod/inputs/{inputId}", - } - - if input == nil { - input = &DeleteInputInput{} - } - - output = &DeleteInputOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteInput API operation for AWS Elemental MediaLive. -// -// Deletes the input end point -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaLive's -// API operation DeleteInput for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeBadGatewayException "BadGatewayException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeGatewayTimeoutException "GatewayTimeoutException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DeleteInput -func (c *MediaLive) DeleteInput(input *DeleteInputInput) (*DeleteInputOutput, error) { - req, out := c.DeleteInputRequest(input) - return out, req.Send() -} - -// DeleteInputWithContext is the same as DeleteInput with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteInput for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) DeleteInputWithContext(ctx aws.Context, input *DeleteInputInput, opts ...request.Option) (*DeleteInputOutput, error) { - req, out := c.DeleteInputRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteInputSecurityGroup = "DeleteInputSecurityGroup" - -// DeleteInputSecurityGroupRequest generates a "aws/request.Request" representing the -// client's request for the DeleteInputSecurityGroup operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteInputSecurityGroup for more information on using the DeleteInputSecurityGroup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteInputSecurityGroupRequest method. -// req, resp := client.DeleteInputSecurityGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DeleteInputSecurityGroup -func (c *MediaLive) DeleteInputSecurityGroupRequest(input *DeleteInputSecurityGroupInput) (req *request.Request, output *DeleteInputSecurityGroupOutput) { - op := &request.Operation{ - Name: opDeleteInputSecurityGroup, - HTTPMethod: "DELETE", - HTTPPath: "/prod/inputSecurityGroups/{inputSecurityGroupId}", - } - - if input == nil { - input = &DeleteInputSecurityGroupInput{} - } - - output = &DeleteInputSecurityGroupOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteInputSecurityGroup API operation for AWS Elemental MediaLive. -// -// Deletes an Input Security Group -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaLive's -// API operation DeleteInputSecurityGroup for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeBadGatewayException "BadGatewayException" -// -// * ErrCodeGatewayTimeoutException "GatewayTimeoutException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DeleteInputSecurityGroup -func (c *MediaLive) DeleteInputSecurityGroup(input *DeleteInputSecurityGroupInput) (*DeleteInputSecurityGroupOutput, error) { - req, out := c.DeleteInputSecurityGroupRequest(input) - return out, req.Send() -} - -// DeleteInputSecurityGroupWithContext is the same as DeleteInputSecurityGroup with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteInputSecurityGroup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) DeleteInputSecurityGroupWithContext(ctx aws.Context, input *DeleteInputSecurityGroupInput, opts ...request.Option) (*DeleteInputSecurityGroupOutput, error) { - req, out := c.DeleteInputSecurityGroupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeChannel = "DescribeChannel" - -// DescribeChannelRequest generates a "aws/request.Request" representing the -// client's request for the DescribeChannel operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeChannel for more information on using the DescribeChannel -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeChannelRequest method. -// req, resp := client.DescribeChannelRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeChannel -func (c *MediaLive) DescribeChannelRequest(input *DescribeChannelInput) (req *request.Request, output *DescribeChannelOutput) { - op := &request.Operation{ - Name: opDescribeChannel, - HTTPMethod: "GET", - HTTPPath: "/prod/channels/{channelId}", - } - - if input == nil { - input = &DescribeChannelInput{} - } - - output = &DescribeChannelOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeChannel API operation for AWS Elemental MediaLive. -// -// Gets details about a channel -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaLive's -// API operation DescribeChannel for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeBadGatewayException "BadGatewayException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeGatewayTimeoutException "GatewayTimeoutException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeChannel -func (c *MediaLive) DescribeChannel(input *DescribeChannelInput) (*DescribeChannelOutput, error) { - req, out := c.DescribeChannelRequest(input) - return out, req.Send() -} - -// DescribeChannelWithContext is the same as DescribeChannel with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeChannel for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) DescribeChannelWithContext(ctx aws.Context, input *DescribeChannelInput, opts ...request.Option) (*DescribeChannelOutput, error) { - req, out := c.DescribeChannelRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeInput = "DescribeInput" - -// DescribeInputRequest generates a "aws/request.Request" representing the -// client's request for the DescribeInput operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeInput for more information on using the DescribeInput -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeInputRequest method. -// req, resp := client.DescribeInputRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInput -func (c *MediaLive) DescribeInputRequest(input *DescribeInputInput) (req *request.Request, output *DescribeInputOutput) { - op := &request.Operation{ - Name: opDescribeInput, - HTTPMethod: "GET", - HTTPPath: "/prod/inputs/{inputId}", - } - - if input == nil { - input = &DescribeInputInput{} - } - - output = &DescribeInputOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeInput API operation for AWS Elemental MediaLive. -// -// Produces details about an input -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaLive's -// API operation DescribeInput for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeBadGatewayException "BadGatewayException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeGatewayTimeoutException "GatewayTimeoutException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInput -func (c *MediaLive) DescribeInput(input *DescribeInputInput) (*DescribeInputOutput, error) { - req, out := c.DescribeInputRequest(input) - return out, req.Send() -} - -// DescribeInputWithContext is the same as DescribeInput with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeInput for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) DescribeInputWithContext(ctx aws.Context, input *DescribeInputInput, opts ...request.Option) (*DescribeInputOutput, error) { - req, out := c.DescribeInputRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeInputSecurityGroup = "DescribeInputSecurityGroup" - -// DescribeInputSecurityGroupRequest generates a "aws/request.Request" representing the -// client's request for the DescribeInputSecurityGroup operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeInputSecurityGroup for more information on using the DescribeInputSecurityGroup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeInputSecurityGroupRequest method. -// req, resp := client.DescribeInputSecurityGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputSecurityGroup -func (c *MediaLive) DescribeInputSecurityGroupRequest(input *DescribeInputSecurityGroupInput) (req *request.Request, output *DescribeInputSecurityGroupOutput) { - op := &request.Operation{ - Name: opDescribeInputSecurityGroup, - HTTPMethod: "GET", - HTTPPath: "/prod/inputSecurityGroups/{inputSecurityGroupId}", - } - - if input == nil { - input = &DescribeInputSecurityGroupInput{} - } - - output = &DescribeInputSecurityGroupOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeInputSecurityGroup API operation for AWS Elemental MediaLive. -// -// Produces a summary of an Input Security Group -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaLive's -// API operation DescribeInputSecurityGroup for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeBadGatewayException "BadGatewayException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeGatewayTimeoutException "GatewayTimeoutException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputSecurityGroup -func (c *MediaLive) DescribeInputSecurityGroup(input *DescribeInputSecurityGroupInput) (*DescribeInputSecurityGroupOutput, error) { - req, out := c.DescribeInputSecurityGroupRequest(input) - return out, req.Send() -} - -// DescribeInputSecurityGroupWithContext is the same as DescribeInputSecurityGroup with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeInputSecurityGroup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) DescribeInputSecurityGroupWithContext(ctx aws.Context, input *DescribeInputSecurityGroupInput, opts ...request.Option) (*DescribeInputSecurityGroupOutput, error) { - req, out := c.DescribeInputSecurityGroupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListChannels = "ListChannels" - -// ListChannelsRequest generates a "aws/request.Request" representing the -// client's request for the ListChannels operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListChannels for more information on using the ListChannels -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListChannelsRequest method. -// req, resp := client.ListChannelsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListChannels -func (c *MediaLive) ListChannelsRequest(input *ListChannelsInput) (req *request.Request, output *ListChannelsOutput) { - op := &request.Operation{ - Name: opListChannels, - HTTPMethod: "GET", - HTTPPath: "/prod/channels", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListChannelsInput{} - } - - output = &ListChannelsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListChannels API operation for AWS Elemental MediaLive. -// -// Produces list of channels that have been created -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaLive's -// API operation ListChannels for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeBadGatewayException "BadGatewayException" -// -// * ErrCodeGatewayTimeoutException "GatewayTimeoutException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListChannels -func (c *MediaLive) ListChannels(input *ListChannelsInput) (*ListChannelsOutput, error) { - req, out := c.ListChannelsRequest(input) - return out, req.Send() -} - -// ListChannelsWithContext is the same as ListChannels with the addition of -// the ability to pass a context and additional request options. -// -// See ListChannels for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) ListChannelsWithContext(ctx aws.Context, input *ListChannelsInput, opts ...request.Option) (*ListChannelsOutput, error) { - req, out := c.ListChannelsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListChannelsPages iterates over the pages of a ListChannels operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListChannels method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListChannels operation. -// pageNum := 0 -// err := client.ListChannelsPages(params, -// func(page *ListChannelsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *MediaLive) ListChannelsPages(input *ListChannelsInput, fn func(*ListChannelsOutput, bool) bool) error { - return c.ListChannelsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListChannelsPagesWithContext same as ListChannelsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) ListChannelsPagesWithContext(ctx aws.Context, input *ListChannelsInput, fn func(*ListChannelsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListChannelsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListChannelsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListChannelsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListInputSecurityGroups = "ListInputSecurityGroups" - -// ListInputSecurityGroupsRequest generates a "aws/request.Request" representing the -// client's request for the ListInputSecurityGroups operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListInputSecurityGroups for more information on using the ListInputSecurityGroups -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListInputSecurityGroupsRequest method. -// req, resp := client.ListInputSecurityGroupsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputSecurityGroups -func (c *MediaLive) ListInputSecurityGroupsRequest(input *ListInputSecurityGroupsInput) (req *request.Request, output *ListInputSecurityGroupsOutput) { - op := &request.Operation{ - Name: opListInputSecurityGroups, - HTTPMethod: "GET", - HTTPPath: "/prod/inputSecurityGroups", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListInputSecurityGroupsInput{} - } - - output = &ListInputSecurityGroupsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListInputSecurityGroups API operation for AWS Elemental MediaLive. -// -// Produces a list of Input Security Groups for an account -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaLive's -// API operation ListInputSecurityGroups for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeBadGatewayException "BadGatewayException" -// -// * ErrCodeGatewayTimeoutException "GatewayTimeoutException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputSecurityGroups -func (c *MediaLive) ListInputSecurityGroups(input *ListInputSecurityGroupsInput) (*ListInputSecurityGroupsOutput, error) { - req, out := c.ListInputSecurityGroupsRequest(input) - return out, req.Send() -} - -// ListInputSecurityGroupsWithContext is the same as ListInputSecurityGroups with the addition of -// the ability to pass a context and additional request options. -// -// See ListInputSecurityGroups for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) ListInputSecurityGroupsWithContext(ctx aws.Context, input *ListInputSecurityGroupsInput, opts ...request.Option) (*ListInputSecurityGroupsOutput, error) { - req, out := c.ListInputSecurityGroupsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListInputSecurityGroupsPages iterates over the pages of a ListInputSecurityGroups operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListInputSecurityGroups method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListInputSecurityGroups operation. -// pageNum := 0 -// err := client.ListInputSecurityGroupsPages(params, -// func(page *ListInputSecurityGroupsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *MediaLive) ListInputSecurityGroupsPages(input *ListInputSecurityGroupsInput, fn func(*ListInputSecurityGroupsOutput, bool) bool) error { - return c.ListInputSecurityGroupsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListInputSecurityGroupsPagesWithContext same as ListInputSecurityGroupsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) ListInputSecurityGroupsPagesWithContext(ctx aws.Context, input *ListInputSecurityGroupsInput, fn func(*ListInputSecurityGroupsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListInputSecurityGroupsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListInputSecurityGroupsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListInputSecurityGroupsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListInputs = "ListInputs" - -// ListInputsRequest generates a "aws/request.Request" representing the -// client's request for the ListInputs operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListInputs for more information on using the ListInputs -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListInputsRequest method. -// req, resp := client.ListInputsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputs -func (c *MediaLive) ListInputsRequest(input *ListInputsInput) (req *request.Request, output *ListInputsOutput) { - op := &request.Operation{ - Name: opListInputs, - HTTPMethod: "GET", - HTTPPath: "/prod/inputs", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListInputsInput{} - } - - output = &ListInputsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListInputs API operation for AWS Elemental MediaLive. -// -// Produces list of inputs that have been created -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaLive's -// API operation ListInputs for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeBadGatewayException "BadGatewayException" -// -// * ErrCodeGatewayTimeoutException "GatewayTimeoutException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputs -func (c *MediaLive) ListInputs(input *ListInputsInput) (*ListInputsOutput, error) { - req, out := c.ListInputsRequest(input) - return out, req.Send() -} - -// ListInputsWithContext is the same as ListInputs with the addition of -// the ability to pass a context and additional request options. -// -// See ListInputs for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) ListInputsWithContext(ctx aws.Context, input *ListInputsInput, opts ...request.Option) (*ListInputsOutput, error) { - req, out := c.ListInputsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListInputsPages iterates over the pages of a ListInputs operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListInputs method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListInputs operation. -// pageNum := 0 -// err := client.ListInputsPages(params, -// func(page *ListInputsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *MediaLive) ListInputsPages(input *ListInputsInput, fn func(*ListInputsOutput, bool) bool) error { - return c.ListInputsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListInputsPagesWithContext same as ListInputsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) ListInputsPagesWithContext(ctx aws.Context, input *ListInputsInput, fn func(*ListInputsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListInputsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListInputsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListInputsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opStartChannel = "StartChannel" - -// StartChannelRequest generates a "aws/request.Request" representing the -// client's request for the StartChannel operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StartChannel for more information on using the StartChannel -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StartChannelRequest method. -// req, resp := client.StartChannelRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/StartChannel -func (c *MediaLive) StartChannelRequest(input *StartChannelInput) (req *request.Request, output *StartChannelOutput) { - op := &request.Operation{ - Name: opStartChannel, - HTTPMethod: "POST", - HTTPPath: "/prod/channels/{channelId}/start", - } - - if input == nil { - input = &StartChannelInput{} - } - - output = &StartChannelOutput{} - req = c.newRequest(op, input, output) - return -} - -// StartChannel API operation for AWS Elemental MediaLive. -// -// Starts an existing channel -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaLive's -// API operation StartChannel for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeBadGatewayException "BadGatewayException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeGatewayTimeoutException "GatewayTimeoutException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/StartChannel -func (c *MediaLive) StartChannel(input *StartChannelInput) (*StartChannelOutput, error) { - req, out := c.StartChannelRequest(input) - return out, req.Send() -} - -// StartChannelWithContext is the same as StartChannel with the addition of -// the ability to pass a context and additional request options. -// -// See StartChannel for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) StartChannelWithContext(ctx aws.Context, input *StartChannelInput, opts ...request.Option) (*StartChannelOutput, error) { - req, out := c.StartChannelRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStopChannel = "StopChannel" - -// StopChannelRequest generates a "aws/request.Request" representing the -// client's request for the StopChannel operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StopChannel for more information on using the StopChannel -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StopChannelRequest method. -// req, resp := client.StopChannelRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/StopChannel -func (c *MediaLive) StopChannelRequest(input *StopChannelInput) (req *request.Request, output *StopChannelOutput) { - op := &request.Operation{ - Name: opStopChannel, - HTTPMethod: "POST", - HTTPPath: "/prod/channels/{channelId}/stop", - } - - if input == nil { - input = &StopChannelInput{} - } - - output = &StopChannelOutput{} - req = c.newRequest(op, input, output) - return -} - -// StopChannel API operation for AWS Elemental MediaLive. -// -// Stops a running channel -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaLive's -// API operation StopChannel for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeBadGatewayException "BadGatewayException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeGatewayTimeoutException "GatewayTimeoutException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// * ErrCodeConflictException "ConflictException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/StopChannel -func (c *MediaLive) StopChannel(input *StopChannelInput) (*StopChannelOutput, error) { - req, out := c.StopChannelRequest(input) - return out, req.Send() -} - -// StopChannelWithContext is the same as StopChannel with the addition of -// the ability to pass a context and additional request options. -// -// See StopChannel for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaLive) StopChannelWithContext(ctx aws.Context, input *StopChannelInput, opts ...request.Option) (*StopChannelOutput, error) { - req, out := c.StopChannelRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AacSettings -type AacSettings struct { - _ struct{} `type:"structure"` - - // Average bitrate in bits/second. Valid values depend on rate control mode - // and profile. - Bitrate *float64 `locationName:"bitrate" type:"double"` - - // Mono, Stereo, or 5.1 channel layout. Valid values depend on rate control - // mode and profile. The adReceiverMix setting receives a stereo description - // plus control track and emits a mono AAC encode of the description track, - // with control data emitted in the PES header as per ETSI TS 101 154 Annex - // E. - CodingMode *string `locationName:"codingMode" type:"string" enum:"AacCodingMode"` - - // Set to "broadcasterMixedAd" when input contains pre-mixed main audio + AD - // (narration) as a stereo pair. The Audio Type field (audioType) will be set - // to 3, which signals to downstream systems that this stream contains "broadcaster - // mixed AD". Note that the input received by the encoder must contain pre-mixed - // audio; the encoder does not perform the mixing. The values in audioTypeControl - // and audioType (in AudioDescription) are ignored when set to broadcasterMixedAd.Leave - // set to "normal" when input does not contain pre-mixed audio + AD. - InputType *string `locationName:"inputType" type:"string" enum:"AacInputType"` - - // AAC Profile. - Profile *string `locationName:"profile" type:"string" enum:"AacProfile"` - - // Rate Control Mode. - RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"AacRateControlMode"` - - // Sets LATM / LOAS AAC output for raw containers. - RawFormat *string `locationName:"rawFormat" type:"string" enum:"AacRawFormat"` - - // Sample rate in Hz. Valid values depend on rate control mode and profile. - SampleRate *float64 `locationName:"sampleRate" type:"double"` - - // Use MPEG-2 AAC audio instead of MPEG-4 AAC audio for raw or MPEG-2 Transport - // Stream containers. - Spec *string `locationName:"spec" type:"string" enum:"AacSpec"` - - // VBR Quality Level - Only used if rateControlMode is VBR. - VbrQuality *string `locationName:"vbrQuality" type:"string" enum:"AacVbrQuality"` -} - -// String returns the string representation -func (s AacSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AacSettings) GoString() string { - return s.String() -} - -// SetBitrate sets the Bitrate field's value. -func (s *AacSettings) SetBitrate(v float64) *AacSettings { - s.Bitrate = &v - return s -} - -// SetCodingMode sets the CodingMode field's value. -func (s *AacSettings) SetCodingMode(v string) *AacSettings { - s.CodingMode = &v - return s -} - -// SetInputType sets the InputType field's value. -func (s *AacSettings) SetInputType(v string) *AacSettings { - s.InputType = &v - return s -} - -// SetProfile sets the Profile field's value. -func (s *AacSettings) SetProfile(v string) *AacSettings { - s.Profile = &v - return s -} - -// SetRateControlMode sets the RateControlMode field's value. -func (s *AacSettings) SetRateControlMode(v string) *AacSettings { - s.RateControlMode = &v - return s -} - -// SetRawFormat sets the RawFormat field's value. -func (s *AacSettings) SetRawFormat(v string) *AacSettings { - s.RawFormat = &v - return s -} - -// SetSampleRate sets the SampleRate field's value. -func (s *AacSettings) SetSampleRate(v float64) *AacSettings { - s.SampleRate = &v - return s -} - -// SetSpec sets the Spec field's value. -func (s *AacSettings) SetSpec(v string) *AacSettings { - s.Spec = &v - return s -} - -// SetVbrQuality sets the VbrQuality field's value. -func (s *AacSettings) SetVbrQuality(v string) *AacSettings { - s.VbrQuality = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/Ac3Settings -type Ac3Settings struct { - _ struct{} `type:"structure"` - - // Average bitrate in bits/second. Valid bitrates depend on the coding mode. - Bitrate *float64 `locationName:"bitrate" type:"double"` - - // Specifies the bitstream mode (bsmod) for the emitted AC-3 stream. See ATSC - // A/52-2012 for background on these values. - BitstreamMode *string `locationName:"bitstreamMode" type:"string" enum:"Ac3BitstreamMode"` - - // Dolby Digital coding mode. Determines number of channels. - CodingMode *string `locationName:"codingMode" type:"string" enum:"Ac3CodingMode"` - - // Sets the dialnorm for the output. If excluded and input audio is Dolby Digital, - // dialnorm will be passed through. - Dialnorm *int64 `locationName:"dialnorm" type:"integer"` - - // If set to filmStandard, adds dynamic range compression signaling to the output - // bitstream as defined in the Dolby Digital specification. - DrcProfile *string `locationName:"drcProfile" type:"string" enum:"Ac3DrcProfile"` - - // When set to enabled, applies a 120Hz lowpass filter to the LFE channel prior - // to encoding. Only valid in codingMode32Lfe mode. - LfeFilter *string `locationName:"lfeFilter" type:"string" enum:"Ac3LfeFilter"` - - // When set to "followInput", encoder metadata will be sourced from the DD, - // DD+, or DolbyE decoder that supplied this audio data. If audio was not supplied - // from one of these streams, then the static metadata settings will be used. - MetadataControl *string `locationName:"metadataControl" type:"string" enum:"Ac3MetadataControl"` -} - -// String returns the string representation -func (s Ac3Settings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Ac3Settings) GoString() string { - return s.String() -} - -// SetBitrate sets the Bitrate field's value. -func (s *Ac3Settings) SetBitrate(v float64) *Ac3Settings { - s.Bitrate = &v - return s -} - -// SetBitstreamMode sets the BitstreamMode field's value. -func (s *Ac3Settings) SetBitstreamMode(v string) *Ac3Settings { - s.BitstreamMode = &v - return s -} - -// SetCodingMode sets the CodingMode field's value. -func (s *Ac3Settings) SetCodingMode(v string) *Ac3Settings { - s.CodingMode = &v - return s -} - -// SetDialnorm sets the Dialnorm field's value. -func (s *Ac3Settings) SetDialnorm(v int64) *Ac3Settings { - s.Dialnorm = &v - return s -} - -// SetDrcProfile sets the DrcProfile field's value. -func (s *Ac3Settings) SetDrcProfile(v string) *Ac3Settings { - s.DrcProfile = &v - return s -} - -// SetLfeFilter sets the LfeFilter field's value. -func (s *Ac3Settings) SetLfeFilter(v string) *Ac3Settings { - s.LfeFilter = &v - return s -} - -// SetMetadataControl sets the MetadataControl field's value. -func (s *Ac3Settings) SetMetadataControl(v string) *Ac3Settings { - s.MetadataControl = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ArchiveContainerSettings -type ArchiveContainerSettings struct { - _ struct{} `type:"structure"` - - M2tsSettings *M2tsSettings `locationName:"m2tsSettings" type:"structure"` -} - -// String returns the string representation -func (s ArchiveContainerSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ArchiveContainerSettings) GoString() string { - return s.String() -} - -// SetM2tsSettings sets the M2tsSettings field's value. -func (s *ArchiveContainerSettings) SetM2tsSettings(v *M2tsSettings) *ArchiveContainerSettings { - s.M2tsSettings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ArchiveGroupSettings -type ArchiveGroupSettings struct { - _ struct{} `type:"structure"` - - // A directory and base filename where archive files should be written. If the - // base filename portion of the URI is left blank, the base filename of the - // first input will be automatically inserted. - Destination *OutputLocationRef `locationName:"destination" type:"structure"` - - // Number of seconds to write to archive file before closing and starting a - // new one. - RolloverInterval *int64 `locationName:"rolloverInterval" type:"integer"` -} - -// String returns the string representation -func (s ArchiveGroupSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ArchiveGroupSettings) GoString() string { - return s.String() -} - -// SetDestination sets the Destination field's value. -func (s *ArchiveGroupSettings) SetDestination(v *OutputLocationRef) *ArchiveGroupSettings { - s.Destination = v - return s -} - -// SetRolloverInterval sets the RolloverInterval field's value. -func (s *ArchiveGroupSettings) SetRolloverInterval(v int64) *ArchiveGroupSettings { - s.RolloverInterval = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ArchiveOutputSettings -type ArchiveOutputSettings struct { - _ struct{} `type:"structure"` - - // Settings specific to the container type of the file. - ContainerSettings *ArchiveContainerSettings `locationName:"containerSettings" type:"structure"` - - // Output file extension. If excluded, this will be auto-selected from the container - // type. - Extension *string `locationName:"extension" type:"string"` - - // String concatenated to the end of the destination filename. Required for - // multiple outputs of the same type. - NameModifier *string `locationName:"nameModifier" type:"string"` -} - -// String returns the string representation -func (s ArchiveOutputSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ArchiveOutputSettings) GoString() string { - return s.String() -} - -// SetContainerSettings sets the ContainerSettings field's value. -func (s *ArchiveOutputSettings) SetContainerSettings(v *ArchiveContainerSettings) *ArchiveOutputSettings { - s.ContainerSettings = v - return s -} - -// SetExtension sets the Extension field's value. -func (s *ArchiveOutputSettings) SetExtension(v string) *ArchiveOutputSettings { - s.Extension = &v - return s -} - -// SetNameModifier sets the NameModifier field's value. -func (s *ArchiveOutputSettings) SetNameModifier(v string) *ArchiveOutputSettings { - s.NameModifier = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AribDestinationSettings -type AribDestinationSettings struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s AribDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AribDestinationSettings) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AribSourceSettings -type AribSourceSettings struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s AribSourceSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AribSourceSettings) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AudioChannelMapping -type AudioChannelMapping struct { - _ struct{} `type:"structure"` - - // Indices and gain values for each input channel that should be remixed into - // this output channel. - InputChannelLevels []*InputChannelLevel `locationName:"inputChannelLevels" type:"list"` - - // The index of the output channel being produced. - OutputChannel *int64 `locationName:"outputChannel" type:"integer"` -} - -// String returns the string representation -func (s AudioChannelMapping) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AudioChannelMapping) GoString() string { - return s.String() -} - -// SetInputChannelLevels sets the InputChannelLevels field's value. -func (s *AudioChannelMapping) SetInputChannelLevels(v []*InputChannelLevel) *AudioChannelMapping { - s.InputChannelLevels = v - return s -} - -// SetOutputChannel sets the OutputChannel field's value. -func (s *AudioChannelMapping) SetOutputChannel(v int64) *AudioChannelMapping { - s.OutputChannel = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AudioCodecSettings -type AudioCodecSettings struct { - _ struct{} `type:"structure"` - - AacSettings *AacSettings `locationName:"aacSettings" type:"structure"` - - Ac3Settings *Ac3Settings `locationName:"ac3Settings" type:"structure"` - - Eac3Settings *Eac3Settings `locationName:"eac3Settings" type:"structure"` - - Mp2Settings *Mp2Settings `locationName:"mp2Settings" type:"structure"` - - PassThroughSettings *PassThroughSettings `locationName:"passThroughSettings" type:"structure"` -} - -// String returns the string representation -func (s AudioCodecSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AudioCodecSettings) GoString() string { - return s.String() -} - -// SetAacSettings sets the AacSettings field's value. -func (s *AudioCodecSettings) SetAacSettings(v *AacSettings) *AudioCodecSettings { - s.AacSettings = v - return s -} - -// SetAc3Settings sets the Ac3Settings field's value. -func (s *AudioCodecSettings) SetAc3Settings(v *Ac3Settings) *AudioCodecSettings { - s.Ac3Settings = v - return s -} - -// SetEac3Settings sets the Eac3Settings field's value. -func (s *AudioCodecSettings) SetEac3Settings(v *Eac3Settings) *AudioCodecSettings { - s.Eac3Settings = v - return s -} - -// SetMp2Settings sets the Mp2Settings field's value. -func (s *AudioCodecSettings) SetMp2Settings(v *Mp2Settings) *AudioCodecSettings { - s.Mp2Settings = v - return s -} - -// SetPassThroughSettings sets the PassThroughSettings field's value. -func (s *AudioCodecSettings) SetPassThroughSettings(v *PassThroughSettings) *AudioCodecSettings { - s.PassThroughSettings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AudioDescription -type AudioDescription struct { - _ struct{} `type:"structure"` - - // Advanced audio normalization settings. - AudioNormalizationSettings *AudioNormalizationSettings `locationName:"audioNormalizationSettings" type:"structure"` - - // The name of the AudioSelector used as the source for this AudioDescription. - AudioSelectorName *string `locationName:"audioSelectorName" type:"string"` - - // Applies only if audioTypeControl is useConfigured. The values for audioType - // are defined in ISO-IEC 13818-1. - AudioType *string `locationName:"audioType" type:"string" enum:"AudioType"` - - // Determines how audio type is determined. followInput: If the input contains - // an ISO 639 audioType, then that value is passed through to the output. If - // the input contains no ISO 639 audioType, the value in Audio Type is included - // in the output. useConfigured: The value in Audio Type is included in the - // output.Note that this field and audioType are both ignored if inputType is - // broadcasterMixedAd. - AudioTypeControl *string `locationName:"audioTypeControl" type:"string" enum:"AudioDescriptionAudioTypeControl"` - - // Audio codec settings. - CodecSettings *AudioCodecSettings `locationName:"codecSettings" type:"structure"` - - // Indicates the language of the audio output track. Only used if languageControlMode - // is useConfigured, or there is no ISO 639 language code specified in the input. - LanguageCode *string `locationName:"languageCode" type:"string"` - - // Choosing followInput will cause the ISO 639 language code of the output to - // follow the ISO 639 language code of the input. The languageCode will be used - // when useConfigured is set, or when followInput is selected but there is no - // ISO 639 language code specified by the input. - LanguageCodeControl *string `locationName:"languageCodeControl" type:"string" enum:"AudioDescriptionLanguageCodeControl"` - - // The name of this AudioDescription. Outputs will use this name to uniquely - // identify this AudioDescription. Description names should be unique within - // this Live Event. - Name *string `locationName:"name" type:"string"` - - // Settings that control how input audio channels are remixed into the output - // audio channels. - RemixSettings *RemixSettings `locationName:"remixSettings" type:"structure"` - - // Used for MS Smooth and Apple HLS outputs. Indicates the name displayed by - // the player (eg. English, or Director Commentary). - StreamName *string `locationName:"streamName" type:"string"` -} - -// String returns the string representation -func (s AudioDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AudioDescription) GoString() string { - return s.String() -} - -// SetAudioNormalizationSettings sets the AudioNormalizationSettings field's value. -func (s *AudioDescription) SetAudioNormalizationSettings(v *AudioNormalizationSettings) *AudioDescription { - s.AudioNormalizationSettings = v - return s -} - -// SetAudioSelectorName sets the AudioSelectorName field's value. -func (s *AudioDescription) SetAudioSelectorName(v string) *AudioDescription { - s.AudioSelectorName = &v - return s -} - -// SetAudioType sets the AudioType field's value. -func (s *AudioDescription) SetAudioType(v string) *AudioDescription { - s.AudioType = &v - return s -} - -// SetAudioTypeControl sets the AudioTypeControl field's value. -func (s *AudioDescription) SetAudioTypeControl(v string) *AudioDescription { - s.AudioTypeControl = &v - return s -} - -// SetCodecSettings sets the CodecSettings field's value. -func (s *AudioDescription) SetCodecSettings(v *AudioCodecSettings) *AudioDescription { - s.CodecSettings = v - return s -} - -// SetLanguageCode sets the LanguageCode field's value. -func (s *AudioDescription) SetLanguageCode(v string) *AudioDescription { - s.LanguageCode = &v - return s -} - -// SetLanguageCodeControl sets the LanguageCodeControl field's value. -func (s *AudioDescription) SetLanguageCodeControl(v string) *AudioDescription { - s.LanguageCodeControl = &v - return s -} - -// SetName sets the Name field's value. -func (s *AudioDescription) SetName(v string) *AudioDescription { - s.Name = &v - return s -} - -// SetRemixSettings sets the RemixSettings field's value. -func (s *AudioDescription) SetRemixSettings(v *RemixSettings) *AudioDescription { - s.RemixSettings = v - return s -} - -// SetStreamName sets the StreamName field's value. -func (s *AudioDescription) SetStreamName(v string) *AudioDescription { - s.StreamName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AudioLanguageSelection -type AudioLanguageSelection struct { - _ struct{} `type:"structure"` - - // Selects a specific three-letter language code from within an audio source. - LanguageCode *string `locationName:"languageCode" type:"string"` - - // When set to "strict", the transport stream demux strictly identifies audio - // streams by their language descriptor. If a PMT update occurs such that an - // audio stream matching the initially selected language is no longer present - // then mute will be encoded until the language returns. If "loose", then on - // a PMT update the demux will choose another audio stream in the program with - // the same stream type if it can't find one with the same language. - LanguageSelectionPolicy *string `locationName:"languageSelectionPolicy" type:"string" enum:"AudioLanguageSelectionPolicy"` -} - -// String returns the string representation -func (s AudioLanguageSelection) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AudioLanguageSelection) GoString() string { - return s.String() -} - -// SetLanguageCode sets the LanguageCode field's value. -func (s *AudioLanguageSelection) SetLanguageCode(v string) *AudioLanguageSelection { - s.LanguageCode = &v - return s -} - -// SetLanguageSelectionPolicy sets the LanguageSelectionPolicy field's value. -func (s *AudioLanguageSelection) SetLanguageSelectionPolicy(v string) *AudioLanguageSelection { - s.LanguageSelectionPolicy = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AudioNormalizationSettings -type AudioNormalizationSettings struct { - _ struct{} `type:"structure"` - - // Audio normalization algorithm to use. itu17701 conforms to the CALM Act specification, - // itu17702 conforms to the EBU R-128 specification. - Algorithm *string `locationName:"algorithm" type:"string" enum:"AudioNormalizationAlgorithm"` - - // When set to correctAudio the output audio is corrected using the chosen algorithm. - // If set to measureOnly, the audio will be measured but not adjusted. - AlgorithmControl *string `locationName:"algorithmControl" type:"string" enum:"AudioNormalizationAlgorithmControl"` - - // Target LKFS(loudness) to adjust volume to. If no value is entered, a default - // value will be used according to the chosen algorithm. The CALM Act (1770-1) - // recommends a target of -24 LKFS. The EBU R-128 specification (1770-2) recommends - // a target of -23 LKFS. - TargetLkfs *float64 `locationName:"targetLkfs" type:"double"` -} - -// String returns the string representation -func (s AudioNormalizationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AudioNormalizationSettings) GoString() string { - return s.String() -} - -// SetAlgorithm sets the Algorithm field's value. -func (s *AudioNormalizationSettings) SetAlgorithm(v string) *AudioNormalizationSettings { - s.Algorithm = &v - return s -} - -// SetAlgorithmControl sets the AlgorithmControl field's value. -func (s *AudioNormalizationSettings) SetAlgorithmControl(v string) *AudioNormalizationSettings { - s.AlgorithmControl = &v - return s -} - -// SetTargetLkfs sets the TargetLkfs field's value. -func (s *AudioNormalizationSettings) SetTargetLkfs(v float64) *AudioNormalizationSettings { - s.TargetLkfs = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AudioOnlyHlsSettings -type AudioOnlyHlsSettings struct { - _ struct{} `type:"structure"` - - // Specifies the group to which the audio Rendition belongs. - AudioGroupId *string `locationName:"audioGroupId" type:"string"` - - // For use with an audio only Stream. Must be a .jpg or .png file. If given, - // this image will be used as the cover-art for the audio only output. Ideally, - // it should be formatted for an iPhone screen for two reasons. The iPhone does - // not resize the image, it crops a centered image on the top/bottom and left/right. - // Additionally, this image file gets saved bit-for-bit into every 10-second - // segment file, so will increase bandwidth by {image file size} * {segment - // count} * {user count.}. - AudioOnlyImage *InputLocation `locationName:"audioOnlyImage" type:"structure"` - - // Four types of audio-only tracks are supported:Audio-Only Variant StreamThe - // client can play back this audio-only stream instead of video in low-bandwidth - // scenarios. Represented as an EXT-X-STREAM-INF in the HLS manifest.Alternate - // Audio, Auto Select, DefaultAlternate rendition that the client should try - // to play back by default. Represented as an EXT-X-MEDIA in the HLS manifest - // with DEFAULT=YES, AUTOSELECT=YESAlternate Audio, Auto Select, Not DefaultAlternate - // rendition that the client may try to play back by default. Represented as - // an EXT-X-MEDIA in the HLS manifest with DEFAULT=NO, AUTOSELECT=YESAlternate - // Audio, not Auto SelectAlternate rendition that the client will not try to - // play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with - // DEFAULT=NO, AUTOSELECT=NO - AudioTrackType *string `locationName:"audioTrackType" type:"string" enum:"AudioOnlyHlsTrackType"` -} - -// String returns the string representation -func (s AudioOnlyHlsSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AudioOnlyHlsSettings) GoString() string { - return s.String() -} - -// SetAudioGroupId sets the AudioGroupId field's value. -func (s *AudioOnlyHlsSettings) SetAudioGroupId(v string) *AudioOnlyHlsSettings { - s.AudioGroupId = &v - return s -} - -// SetAudioOnlyImage sets the AudioOnlyImage field's value. -func (s *AudioOnlyHlsSettings) SetAudioOnlyImage(v *InputLocation) *AudioOnlyHlsSettings { - s.AudioOnlyImage = v - return s -} - -// SetAudioTrackType sets the AudioTrackType field's value. -func (s *AudioOnlyHlsSettings) SetAudioTrackType(v string) *AudioOnlyHlsSettings { - s.AudioTrackType = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AudioPidSelection -type AudioPidSelection struct { - _ struct{} `type:"structure"` - - // Selects a specific PID from within a source. - Pid *int64 `locationName:"pid" type:"integer"` -} - -// String returns the string representation -func (s AudioPidSelection) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AudioPidSelection) GoString() string { - return s.String() -} - -// SetPid sets the Pid field's value. -func (s *AudioPidSelection) SetPid(v int64) *AudioPidSelection { - s.Pid = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AudioSelector -type AudioSelector struct { - _ struct{} `type:"structure"` - - // The name of this AudioSelector. AudioDescriptions will use this name to uniquely - // identify this Selector. Selector names should be unique per input. - Name *string `locationName:"name" type:"string"` - - // The audio selector settings. - SelectorSettings *AudioSelectorSettings `locationName:"selectorSettings" type:"structure"` -} - -// String returns the string representation -func (s AudioSelector) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AudioSelector) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *AudioSelector) SetName(v string) *AudioSelector { - s.Name = &v - return s -} - -// SetSelectorSettings sets the SelectorSettings field's value. -func (s *AudioSelector) SetSelectorSettings(v *AudioSelectorSettings) *AudioSelector { - s.SelectorSettings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AudioSelectorSettings -type AudioSelectorSettings struct { - _ struct{} `type:"structure"` - - AudioLanguageSelection *AudioLanguageSelection `locationName:"audioLanguageSelection" type:"structure"` - - AudioPidSelection *AudioPidSelection `locationName:"audioPidSelection" type:"structure"` -} - -// String returns the string representation -func (s AudioSelectorSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AudioSelectorSettings) GoString() string { - return s.String() -} - -// SetAudioLanguageSelection sets the AudioLanguageSelection field's value. -func (s *AudioSelectorSettings) SetAudioLanguageSelection(v *AudioLanguageSelection) *AudioSelectorSettings { - s.AudioLanguageSelection = v - return s -} - -// SetAudioPidSelection sets the AudioPidSelection field's value. -func (s *AudioSelectorSettings) SetAudioPidSelection(v *AudioPidSelection) *AudioSelectorSettings { - s.AudioPidSelection = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AvailBlanking -type AvailBlanking struct { - _ struct{} `type:"structure"` - - // Blanking image to be used. Leave empty for solid black. Only bmp and png - // images are supported. - AvailBlankingImage *InputLocation `locationName:"availBlankingImage" type:"structure"` - - // When set to enabled, causes video, audio and captions to be blanked when - // insertion metadata is added. - State *string `locationName:"state" type:"string" enum:"AvailBlankingState"` -} - -// String returns the string representation -func (s AvailBlanking) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AvailBlanking) GoString() string { - return s.String() -} - -// SetAvailBlankingImage sets the AvailBlankingImage field's value. -func (s *AvailBlanking) SetAvailBlankingImage(v *InputLocation) *AvailBlanking { - s.AvailBlankingImage = v - return s -} - -// SetState sets the State field's value. -func (s *AvailBlanking) SetState(v string) *AvailBlanking { - s.State = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AvailConfiguration -type AvailConfiguration struct { - _ struct{} `type:"structure"` - - // Ad avail settings. - AvailSettings *AvailSettings `locationName:"availSettings" type:"structure"` -} - -// String returns the string representation -func (s AvailConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AvailConfiguration) GoString() string { - return s.String() -} - -// SetAvailSettings sets the AvailSettings field's value. -func (s *AvailConfiguration) SetAvailSettings(v *AvailSettings) *AvailConfiguration { - s.AvailSettings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/AvailSettings -type AvailSettings struct { - _ struct{} `type:"structure"` - - Scte35SpliceInsert *Scte35SpliceInsert `locationName:"scte35SpliceInsert" type:"structure"` - - Scte35TimeSignalApos *Scte35TimeSignalApos `locationName:"scte35TimeSignalApos" type:"structure"` -} - -// String returns the string representation -func (s AvailSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AvailSettings) GoString() string { - return s.String() -} - -// SetScte35SpliceInsert sets the Scte35SpliceInsert field's value. -func (s *AvailSettings) SetScte35SpliceInsert(v *Scte35SpliceInsert) *AvailSettings { - s.Scte35SpliceInsert = v - return s -} - -// SetScte35TimeSignalApos sets the Scte35TimeSignalApos field's value. -func (s *AvailSettings) SetScte35TimeSignalApos(v *Scte35TimeSignalApos) *AvailSettings { - s.Scte35TimeSignalApos = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/BlackoutSlate -type BlackoutSlate struct { - _ struct{} `type:"structure"` - - // Blackout slate image to be used. Leave empty for solid black. Only bmp and - // png images are supported. - BlackoutSlateImage *InputLocation `locationName:"blackoutSlateImage" type:"structure"` - - // Setting to enabled causes the encoder to blackout the video, audio, and captions, - // and raise the "Network Blackout Image" slate when an SCTE104/35 Network End - // Segmentation Descriptor is encountered. The blackout will be lifted when - // the Network Start Segmentation Descriptor is encountered. The Network End - // and Network Start descriptors must contain a network ID that matches the - // value entered in "Network ID". - NetworkEndBlackout *string `locationName:"networkEndBlackout" type:"string" enum:"BlackoutSlateNetworkEndBlackout"` - - // Path to local file to use as Network End Blackout image. Image will be scaled - // to fill the entire output raster. - NetworkEndBlackoutImage *InputLocation `locationName:"networkEndBlackoutImage" type:"structure"` - - // Provides Network ID that matches EIDR ID format (e.g., "10.XXXX/XXXX-XXXX-XXXX-XXXX-XXXX-C"). - NetworkId *string `locationName:"networkId" type:"string"` - - // When set to enabled, causes video, audio and captions to be blanked when - // indicated by program metadata. - State *string `locationName:"state" type:"string" enum:"BlackoutSlateState"` -} - -// String returns the string representation -func (s BlackoutSlate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BlackoutSlate) GoString() string { - return s.String() -} - -// SetBlackoutSlateImage sets the BlackoutSlateImage field's value. -func (s *BlackoutSlate) SetBlackoutSlateImage(v *InputLocation) *BlackoutSlate { - s.BlackoutSlateImage = v - return s -} - -// SetNetworkEndBlackout sets the NetworkEndBlackout field's value. -func (s *BlackoutSlate) SetNetworkEndBlackout(v string) *BlackoutSlate { - s.NetworkEndBlackout = &v - return s -} - -// SetNetworkEndBlackoutImage sets the NetworkEndBlackoutImage field's value. -func (s *BlackoutSlate) SetNetworkEndBlackoutImage(v *InputLocation) *BlackoutSlate { - s.NetworkEndBlackoutImage = v - return s -} - -// SetNetworkId sets the NetworkId field's value. -func (s *BlackoutSlate) SetNetworkId(v string) *BlackoutSlate { - s.NetworkId = &v - return s -} - -// SetState sets the State field's value. -func (s *BlackoutSlate) SetState(v string) *BlackoutSlate { - s.State = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/BurnInDestinationSettings -type BurnInDestinationSettings struct { - _ struct{} `type:"structure"` - - // If no explicit xPosition or yPosition is provided, setting alignment to centered - // will place the captions at the bottom center of the output. Similarly, setting - // a left alignment will align captions to the bottom left of the output. If - // x and y positions are given in conjunction with the alignment parameter, - // the font will be justified (either left or centered) relative to those coordinates. - // Selecting "smart" justification will left-justify live subtitles and center-justify - // pre-recorded subtitles. All burn-in and DVB-Sub font settings must match. - Alignment *string `locationName:"alignment" type:"string" enum:"BurnInAlignment"` - - // Specifies the color of the rectangle behind the captions. All burn-in and - // DVB-Sub font settings must match. - BackgroundColor *string `locationName:"backgroundColor" type:"string" enum:"BurnInBackgroundColor"` - - // Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. - // Leaving this parameter out is equivalent to setting it to 0 (transparent). - // All burn-in and DVB-Sub font settings must match. - BackgroundOpacity *int64 `locationName:"backgroundOpacity" type:"integer"` - - // External font file used for caption burn-in. File extension must be 'ttf' - // or 'tte'. Although the user can select output fonts for many different types - // of input captions, embedded, STL and teletext sources use a strict grid system. - // Using external fonts with these caption sources could cause unexpected display - // of proportional fonts. All burn-in and DVB-Sub font settings must match. - Font *InputLocation `locationName:"font" type:"structure"` - - // Specifies the color of the burned-in captions. This option is not valid for - // source captions that are STL, 608/embedded or teletext. These source settings - // are already pre-defined by the caption stream. All burn-in and DVB-Sub font - // settings must match. - FontColor *string `locationName:"fontColor" type:"string" enum:"BurnInFontColor"` - - // Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent. - // All burn-in and DVB-Sub font settings must match. - FontOpacity *int64 `locationName:"fontOpacity" type:"integer"` - - // Font resolution in DPI (dots per inch); default is 96 dpi. All burn-in and - // DVB-Sub font settings must match. - FontResolution *int64 `locationName:"fontResolution" type:"integer"` - - // When set to 'auto' fontSize will scale depending on the size of the output. - // Giving a positive integer will specify the exact font size in points. All - // burn-in and DVB-Sub font settings must match. - FontSize *string `locationName:"fontSize" type:"string"` - - // Specifies font outline color. This option is not valid for source captions - // that are either 608/embedded or teletext. These source settings are already - // pre-defined by the caption stream. All burn-in and DVB-Sub font settings - // must match. - OutlineColor *string `locationName:"outlineColor" type:"string" enum:"BurnInOutlineColor"` - - // Specifies font outline size in pixels. This option is not valid for source - // captions that are either 608/embedded or teletext. These source settings - // are already pre-defined by the caption stream. All burn-in and DVB-Sub font - // settings must match. - OutlineSize *int64 `locationName:"outlineSize" type:"integer"` - - // Specifies the color of the shadow cast by the captions. All burn-in and DVB-Sub - // font settings must match. - ShadowColor *string `locationName:"shadowColor" type:"string" enum:"BurnInShadowColor"` - - // Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving - // this parameter out is equivalent to setting it to 0 (transparent). All burn-in - // and DVB-Sub font settings must match. - ShadowOpacity *int64 `locationName:"shadowOpacity" type:"integer"` - - // Specifies the horizontal offset of the shadow relative to the captions in - // pixels. A value of -2 would result in a shadow offset 2 pixels to the left. - // All burn-in and DVB-Sub font settings must match. - ShadowXOffset *int64 `locationName:"shadowXOffset" type:"integer"` - - // Specifies the vertical offset of the shadow relative to the captions in pixels. - // A value of -2 would result in a shadow offset 2 pixels above the text. All - // burn-in and DVB-Sub font settings must match. - ShadowYOffset *int64 `locationName:"shadowYOffset" type:"integer"` - - // Controls whether a fixed grid size will be used to generate the output subtitles - // bitmap. Only applicable for Teletext inputs and DVB-Sub/Burn-in outputs. - TeletextGridControl *string `locationName:"teletextGridControl" type:"string" enum:"BurnInTeletextGridControl"` - - // Specifies the horizontal position of the caption relative to the left side - // of the output in pixels. A value of 10 would result in the captions starting - // 10 pixels from the left of the output. If no explicit xPosition is provided, - // the horizontal caption position will be determined by the alignment parameter. - // All burn-in and DVB-Sub font settings must match. - XPosition *int64 `locationName:"xPosition" type:"integer"` - - // Specifies the vertical position of the caption relative to the top of the - // output in pixels. A value of 10 would result in the captions starting 10 - // pixels from the top of the output. If no explicit yPosition is provided, - // the caption will be positioned towards the bottom of the output. All burn-in - // and DVB-Sub font settings must match. - YPosition *int64 `locationName:"yPosition" type:"integer"` -} - -// String returns the string representation -func (s BurnInDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BurnInDestinationSettings) GoString() string { - return s.String() -} - -// SetAlignment sets the Alignment field's value. -func (s *BurnInDestinationSettings) SetAlignment(v string) *BurnInDestinationSettings { - s.Alignment = &v - return s -} - -// SetBackgroundColor sets the BackgroundColor field's value. -func (s *BurnInDestinationSettings) SetBackgroundColor(v string) *BurnInDestinationSettings { - s.BackgroundColor = &v - return s -} - -// SetBackgroundOpacity sets the BackgroundOpacity field's value. -func (s *BurnInDestinationSettings) SetBackgroundOpacity(v int64) *BurnInDestinationSettings { - s.BackgroundOpacity = &v - return s -} - -// SetFont sets the Font field's value. -func (s *BurnInDestinationSettings) SetFont(v *InputLocation) *BurnInDestinationSettings { - s.Font = v - return s -} - -// SetFontColor sets the FontColor field's value. -func (s *BurnInDestinationSettings) SetFontColor(v string) *BurnInDestinationSettings { - s.FontColor = &v - return s -} - -// SetFontOpacity sets the FontOpacity field's value. -func (s *BurnInDestinationSettings) SetFontOpacity(v int64) *BurnInDestinationSettings { - s.FontOpacity = &v - return s -} - -// SetFontResolution sets the FontResolution field's value. -func (s *BurnInDestinationSettings) SetFontResolution(v int64) *BurnInDestinationSettings { - s.FontResolution = &v - return s -} - -// SetFontSize sets the FontSize field's value. -func (s *BurnInDestinationSettings) SetFontSize(v string) *BurnInDestinationSettings { - s.FontSize = &v - return s -} - -// SetOutlineColor sets the OutlineColor field's value. -func (s *BurnInDestinationSettings) SetOutlineColor(v string) *BurnInDestinationSettings { - s.OutlineColor = &v - return s -} - -// SetOutlineSize sets the OutlineSize field's value. -func (s *BurnInDestinationSettings) SetOutlineSize(v int64) *BurnInDestinationSettings { - s.OutlineSize = &v - return s -} - -// SetShadowColor sets the ShadowColor field's value. -func (s *BurnInDestinationSettings) SetShadowColor(v string) *BurnInDestinationSettings { - s.ShadowColor = &v - return s -} - -// SetShadowOpacity sets the ShadowOpacity field's value. -func (s *BurnInDestinationSettings) SetShadowOpacity(v int64) *BurnInDestinationSettings { - s.ShadowOpacity = &v - return s -} - -// SetShadowXOffset sets the ShadowXOffset field's value. -func (s *BurnInDestinationSettings) SetShadowXOffset(v int64) *BurnInDestinationSettings { - s.ShadowXOffset = &v - return s -} - -// SetShadowYOffset sets the ShadowYOffset field's value. -func (s *BurnInDestinationSettings) SetShadowYOffset(v int64) *BurnInDestinationSettings { - s.ShadowYOffset = &v - return s -} - -// SetTeletextGridControl sets the TeletextGridControl field's value. -func (s *BurnInDestinationSettings) SetTeletextGridControl(v string) *BurnInDestinationSettings { - s.TeletextGridControl = &v - return s -} - -// SetXPosition sets the XPosition field's value. -func (s *BurnInDestinationSettings) SetXPosition(v int64) *BurnInDestinationSettings { - s.XPosition = &v - return s -} - -// SetYPosition sets the YPosition field's value. -func (s *BurnInDestinationSettings) SetYPosition(v int64) *BurnInDestinationSettings { - s.YPosition = &v - return s -} - -// Output groups for this Live Event. Output groups contain information about -// where streams should be distributed. -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CaptionDescription -type CaptionDescription struct { - _ struct{} `type:"structure"` - - // Specifies which input caption selector to use as a caption source when generating - // output captions. This field should match a captionSelector name. - CaptionSelectorName *string `locationName:"captionSelectorName" type:"string"` - - // Additional settings for captions destination that depend on the destination - // type. - DestinationSettings *CaptionDestinationSettings `locationName:"destinationSettings" type:"structure"` - - // ISO 639-2 three-digit code: http://www.loc.gov/standards/iso639-2/ - LanguageCode *string `locationName:"languageCode" type:"string"` - - // Human readable information to indicate captions available for players (eg. - // English, or Spanish). - LanguageDescription *string `locationName:"languageDescription" type:"string"` - - // Name of the caption description. Used to associate a caption description - // with an output. Names must be unique within an event. - Name *string `locationName:"name" type:"string"` -} - -// String returns the string representation -func (s CaptionDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CaptionDescription) GoString() string { - return s.String() -} - -// SetCaptionSelectorName sets the CaptionSelectorName field's value. -func (s *CaptionDescription) SetCaptionSelectorName(v string) *CaptionDescription { - s.CaptionSelectorName = &v - return s -} - -// SetDestinationSettings sets the DestinationSettings field's value. -func (s *CaptionDescription) SetDestinationSettings(v *CaptionDestinationSettings) *CaptionDescription { - s.DestinationSettings = v - return s -} - -// SetLanguageCode sets the LanguageCode field's value. -func (s *CaptionDescription) SetLanguageCode(v string) *CaptionDescription { - s.LanguageCode = &v - return s -} - -// SetLanguageDescription sets the LanguageDescription field's value. -func (s *CaptionDescription) SetLanguageDescription(v string) *CaptionDescription { - s.LanguageDescription = &v - return s -} - -// SetName sets the Name field's value. -func (s *CaptionDescription) SetName(v string) *CaptionDescription { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CaptionDestinationSettings -type CaptionDestinationSettings struct { - _ struct{} `type:"structure"` - - AribDestinationSettings *AribDestinationSettings `locationName:"aribDestinationSettings" type:"structure"` - - BurnInDestinationSettings *BurnInDestinationSettings `locationName:"burnInDestinationSettings" type:"structure"` - - DvbSubDestinationSettings *DvbSubDestinationSettings `locationName:"dvbSubDestinationSettings" type:"structure"` - - EmbeddedDestinationSettings *EmbeddedDestinationSettings `locationName:"embeddedDestinationSettings" type:"structure"` - - EmbeddedPlusScte20DestinationSettings *EmbeddedPlusScte20DestinationSettings `locationName:"embeddedPlusScte20DestinationSettings" type:"structure"` - - Scte20PlusEmbeddedDestinationSettings *Scte20PlusEmbeddedDestinationSettings `locationName:"scte20PlusEmbeddedDestinationSettings" type:"structure"` - - Scte27DestinationSettings *Scte27DestinationSettings `locationName:"scte27DestinationSettings" type:"structure"` - - SmpteTtDestinationSettings *SmpteTtDestinationSettings `locationName:"smpteTtDestinationSettings" type:"structure"` - - TeletextDestinationSettings *TeletextDestinationSettings `locationName:"teletextDestinationSettings" type:"structure"` - - TtmlDestinationSettings *TtmlDestinationSettings `locationName:"ttmlDestinationSettings" type:"structure"` - - WebvttDestinationSettings *WebvttDestinationSettings `locationName:"webvttDestinationSettings" type:"structure"` -} - -// String returns the string representation -func (s CaptionDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CaptionDestinationSettings) GoString() string { - return s.String() -} - -// SetAribDestinationSettings sets the AribDestinationSettings field's value. -func (s *CaptionDestinationSettings) SetAribDestinationSettings(v *AribDestinationSettings) *CaptionDestinationSettings { - s.AribDestinationSettings = v - return s -} - -// SetBurnInDestinationSettings sets the BurnInDestinationSettings field's value. -func (s *CaptionDestinationSettings) SetBurnInDestinationSettings(v *BurnInDestinationSettings) *CaptionDestinationSettings { - s.BurnInDestinationSettings = v - return s -} - -// SetDvbSubDestinationSettings sets the DvbSubDestinationSettings field's value. -func (s *CaptionDestinationSettings) SetDvbSubDestinationSettings(v *DvbSubDestinationSettings) *CaptionDestinationSettings { - s.DvbSubDestinationSettings = v - return s -} - -// SetEmbeddedDestinationSettings sets the EmbeddedDestinationSettings field's value. -func (s *CaptionDestinationSettings) SetEmbeddedDestinationSettings(v *EmbeddedDestinationSettings) *CaptionDestinationSettings { - s.EmbeddedDestinationSettings = v - return s -} - -// SetEmbeddedPlusScte20DestinationSettings sets the EmbeddedPlusScte20DestinationSettings field's value. -func (s *CaptionDestinationSettings) SetEmbeddedPlusScte20DestinationSettings(v *EmbeddedPlusScte20DestinationSettings) *CaptionDestinationSettings { - s.EmbeddedPlusScte20DestinationSettings = v - return s -} - -// SetScte20PlusEmbeddedDestinationSettings sets the Scte20PlusEmbeddedDestinationSettings field's value. -func (s *CaptionDestinationSettings) SetScte20PlusEmbeddedDestinationSettings(v *Scte20PlusEmbeddedDestinationSettings) *CaptionDestinationSettings { - s.Scte20PlusEmbeddedDestinationSettings = v - return s -} - -// SetScte27DestinationSettings sets the Scte27DestinationSettings field's value. -func (s *CaptionDestinationSettings) SetScte27DestinationSettings(v *Scte27DestinationSettings) *CaptionDestinationSettings { - s.Scte27DestinationSettings = v - return s -} - -// SetSmpteTtDestinationSettings sets the SmpteTtDestinationSettings field's value. -func (s *CaptionDestinationSettings) SetSmpteTtDestinationSettings(v *SmpteTtDestinationSettings) *CaptionDestinationSettings { - s.SmpteTtDestinationSettings = v - return s -} - -// SetTeletextDestinationSettings sets the TeletextDestinationSettings field's value. -func (s *CaptionDestinationSettings) SetTeletextDestinationSettings(v *TeletextDestinationSettings) *CaptionDestinationSettings { - s.TeletextDestinationSettings = v - return s -} - -// SetTtmlDestinationSettings sets the TtmlDestinationSettings field's value. -func (s *CaptionDestinationSettings) SetTtmlDestinationSettings(v *TtmlDestinationSettings) *CaptionDestinationSettings { - s.TtmlDestinationSettings = v - return s -} - -// SetWebvttDestinationSettings sets the WebvttDestinationSettings field's value. -func (s *CaptionDestinationSettings) SetWebvttDestinationSettings(v *WebvttDestinationSettings) *CaptionDestinationSettings { - s.WebvttDestinationSettings = v - return s -} - -// Maps a caption channel to an ISO 693-2 language code (http://www.loc.gov/standards/iso639-2), -// with an optional description. -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CaptionLanguageMapping -type CaptionLanguageMapping struct { - _ struct{} `type:"structure"` - - // Channel to insert closed captions. Each channel mapping must have a unique - // channel number (maximum of 4) - CaptionChannel *int64 `locationName:"captionChannel" type:"integer"` - - // Three character ISO 639-2 language code (see http://www.loc.gov/standards/iso639-2) - LanguageCode *string `locationName:"languageCode" type:"string"` - - // Textual description of language - LanguageDescription *string `locationName:"languageDescription" type:"string"` -} - -// String returns the string representation -func (s CaptionLanguageMapping) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CaptionLanguageMapping) GoString() string { - return s.String() -} - -// SetCaptionChannel sets the CaptionChannel field's value. -func (s *CaptionLanguageMapping) SetCaptionChannel(v int64) *CaptionLanguageMapping { - s.CaptionChannel = &v - return s -} - -// SetLanguageCode sets the LanguageCode field's value. -func (s *CaptionLanguageMapping) SetLanguageCode(v string) *CaptionLanguageMapping { - s.LanguageCode = &v - return s -} - -// SetLanguageDescription sets the LanguageDescription field's value. -func (s *CaptionLanguageMapping) SetLanguageDescription(v string) *CaptionLanguageMapping { - s.LanguageDescription = &v - return s -} - -// Output groups for this Live Event. Output groups contain information about -// where streams should be distributed. -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CaptionSelector -type CaptionSelector struct { - _ struct{} `type:"structure"` - - // When specified this field indicates the three letter language code of the - // caption track to extract from the source. - LanguageCode *string `locationName:"languageCode" type:"string"` - - // Name identifier for a caption selector. This name is used to associate this - // caption selector with one or more caption descriptions. Names must be unique - // within an event. - Name *string `locationName:"name" type:"string"` - - // Caption selector settings. - SelectorSettings *CaptionSelectorSettings `locationName:"selectorSettings" type:"structure"` -} - -// String returns the string representation -func (s CaptionSelector) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CaptionSelector) GoString() string { - return s.String() -} - -// SetLanguageCode sets the LanguageCode field's value. -func (s *CaptionSelector) SetLanguageCode(v string) *CaptionSelector { - s.LanguageCode = &v - return s -} - -// SetName sets the Name field's value. -func (s *CaptionSelector) SetName(v string) *CaptionSelector { - s.Name = &v - return s -} - -// SetSelectorSettings sets the SelectorSettings field's value. -func (s *CaptionSelector) SetSelectorSettings(v *CaptionSelectorSettings) *CaptionSelector { - s.SelectorSettings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CaptionSelectorSettings -type CaptionSelectorSettings struct { - _ struct{} `type:"structure"` - - AribSourceSettings *AribSourceSettings `locationName:"aribSourceSettings" type:"structure"` - - DvbSubSourceSettings *DvbSubSourceSettings `locationName:"dvbSubSourceSettings" type:"structure"` - - EmbeddedSourceSettings *EmbeddedSourceSettings `locationName:"embeddedSourceSettings" type:"structure"` - - Scte20SourceSettings *Scte20SourceSettings `locationName:"scte20SourceSettings" type:"structure"` - - Scte27SourceSettings *Scte27SourceSettings `locationName:"scte27SourceSettings" type:"structure"` - - TeletextSourceSettings *TeletextSourceSettings `locationName:"teletextSourceSettings" type:"structure"` -} - -// String returns the string representation -func (s CaptionSelectorSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CaptionSelectorSettings) GoString() string { - return s.String() -} - -// SetAribSourceSettings sets the AribSourceSettings field's value. -func (s *CaptionSelectorSettings) SetAribSourceSettings(v *AribSourceSettings) *CaptionSelectorSettings { - s.AribSourceSettings = v - return s -} - -// SetDvbSubSourceSettings sets the DvbSubSourceSettings field's value. -func (s *CaptionSelectorSettings) SetDvbSubSourceSettings(v *DvbSubSourceSettings) *CaptionSelectorSettings { - s.DvbSubSourceSettings = v - return s -} - -// SetEmbeddedSourceSettings sets the EmbeddedSourceSettings field's value. -func (s *CaptionSelectorSettings) SetEmbeddedSourceSettings(v *EmbeddedSourceSettings) *CaptionSelectorSettings { - s.EmbeddedSourceSettings = v - return s -} - -// SetScte20SourceSettings sets the Scte20SourceSettings field's value. -func (s *CaptionSelectorSettings) SetScte20SourceSettings(v *Scte20SourceSettings) *CaptionSelectorSettings { - s.Scte20SourceSettings = v - return s -} - -// SetScte27SourceSettings sets the Scte27SourceSettings field's value. -func (s *CaptionSelectorSettings) SetScte27SourceSettings(v *Scte27SourceSettings) *CaptionSelectorSettings { - s.Scte27SourceSettings = v - return s -} - -// SetTeletextSourceSettings sets the TeletextSourceSettings field's value. -func (s *CaptionSelectorSettings) SetTeletextSourceSettings(v *TeletextSourceSettings) *CaptionSelectorSettings { - s.TeletextSourceSettings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/Channel -type Channel struct { - _ struct{} `type:"structure"` - - // The unique arn of the channel. - Arn *string `locationName:"arn" type:"string"` - - // A list of destinations of the channel. For UDP outputs, there is onedestination - // per output. For other types (HLS, for example), there isone destination per - // packager. - Destinations []*OutputDestination `locationName:"destinations" type:"list"` - - // The endpoints where outgoing connections initiate from - EgressEndpoints []*ChannelEgressEndpoint `locationName:"egressEndpoints" type:"list"` - - EncoderSettings *EncoderSettings `locationName:"encoderSettings" type:"structure"` - - // The unique id of the channel. - Id *string `locationName:"id" type:"string"` - - // List of input attachments for channel. - InputAttachments []*InputAttachment `locationName:"inputAttachments" type:"list"` - - // The name of the channel. (user-mutable) - Name *string `locationName:"name" type:"string"` - - // The number of currently healthy pipelines. - PipelinesRunningCount *int64 `locationName:"pipelinesRunningCount" type:"integer"` - - // The Amazon Resource Name (ARN) of the role assumed when running the Channel. - RoleArn *string `locationName:"roleArn" type:"string"` - - State *string `locationName:"state" type:"string" enum:"ChannelState"` -} - -// String returns the string representation -func (s Channel) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Channel) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *Channel) SetArn(v string) *Channel { - s.Arn = &v - return s -} - -// SetDestinations sets the Destinations field's value. -func (s *Channel) SetDestinations(v []*OutputDestination) *Channel { - s.Destinations = v - return s -} - -// SetEgressEndpoints sets the EgressEndpoints field's value. -func (s *Channel) SetEgressEndpoints(v []*ChannelEgressEndpoint) *Channel { - s.EgressEndpoints = v - return s -} - -// SetEncoderSettings sets the EncoderSettings field's value. -func (s *Channel) SetEncoderSettings(v *EncoderSettings) *Channel { - s.EncoderSettings = v - return s -} - -// SetId sets the Id field's value. -func (s *Channel) SetId(v string) *Channel { - s.Id = &v - return s -} - -// SetInputAttachments sets the InputAttachments field's value. -func (s *Channel) SetInputAttachments(v []*InputAttachment) *Channel { - s.InputAttachments = v - return s -} - -// SetName sets the Name field's value. -func (s *Channel) SetName(v string) *Channel { - s.Name = &v - return s -} - -// SetPipelinesRunningCount sets the PipelinesRunningCount field's value. -func (s *Channel) SetPipelinesRunningCount(v int64) *Channel { - s.PipelinesRunningCount = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *Channel) SetRoleArn(v string) *Channel { - s.RoleArn = &v - return s -} - -// SetState sets the State field's value. -func (s *Channel) SetState(v string) *Channel { - s.State = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ChannelEgressEndpoint -type ChannelEgressEndpoint struct { - _ struct{} `type:"structure"` - - // Public IP of where a channel's output comes from - SourceIp *string `locationName:"sourceIp" type:"string"` -} - -// String returns the string representation -func (s ChannelEgressEndpoint) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChannelEgressEndpoint) GoString() string { - return s.String() -} - -// SetSourceIp sets the SourceIp field's value. -func (s *ChannelEgressEndpoint) SetSourceIp(v string) *ChannelEgressEndpoint { - s.SourceIp = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ChannelSummary -type ChannelSummary struct { - _ struct{} `type:"structure"` - - // The unique arn of the channel. - Arn *string `locationName:"arn" type:"string"` - - // A list of destinations of the channel. For UDP outputs, there is onedestination - // per output. For other types (HLS, for example), there isone destination per - // packager. - Destinations []*OutputDestination `locationName:"destinations" type:"list"` - - // The endpoints where outgoing connections initiate from - EgressEndpoints []*ChannelEgressEndpoint `locationName:"egressEndpoints" type:"list"` - - // The unique id of the channel. - Id *string `locationName:"id" type:"string"` - - // List of input attachments for channel. - InputAttachments []*InputAttachment `locationName:"inputAttachments" type:"list"` - - // The name of the channel. (user-mutable) - Name *string `locationName:"name" type:"string"` - - // The number of currently healthy pipelines. - PipelinesRunningCount *int64 `locationName:"pipelinesRunningCount" type:"integer"` - - // The Amazon Resource Name (ARN) of the role assumed when running the Channel. - RoleArn *string `locationName:"roleArn" type:"string"` - - State *string `locationName:"state" type:"string" enum:"ChannelState"` -} - -// String returns the string representation -func (s ChannelSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChannelSummary) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *ChannelSummary) SetArn(v string) *ChannelSummary { - s.Arn = &v - return s -} - -// SetDestinations sets the Destinations field's value. -func (s *ChannelSummary) SetDestinations(v []*OutputDestination) *ChannelSummary { - s.Destinations = v - return s -} - -// SetEgressEndpoints sets the EgressEndpoints field's value. -func (s *ChannelSummary) SetEgressEndpoints(v []*ChannelEgressEndpoint) *ChannelSummary { - s.EgressEndpoints = v - return s -} - -// SetId sets the Id field's value. -func (s *ChannelSummary) SetId(v string) *ChannelSummary { - s.Id = &v - return s -} - -// SetInputAttachments sets the InputAttachments field's value. -func (s *ChannelSummary) SetInputAttachments(v []*InputAttachment) *ChannelSummary { - s.InputAttachments = v - return s -} - -// SetName sets the Name field's value. -func (s *ChannelSummary) SetName(v string) *ChannelSummary { - s.Name = &v - return s -} - -// SetPipelinesRunningCount sets the PipelinesRunningCount field's value. -func (s *ChannelSummary) SetPipelinesRunningCount(v int64) *ChannelSummary { - s.PipelinesRunningCount = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *ChannelSummary) SetRoleArn(v string) *ChannelSummary { - s.RoleArn = &v - return s -} - -// SetState sets the State field's value. -func (s *ChannelSummary) SetState(v string) *ChannelSummary { - s.State = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateChannelRequest -type CreateChannelInput struct { - _ struct{} `type:"structure"` - - Destinations []*OutputDestination `locationName:"destinations" type:"list"` - - EncoderSettings *EncoderSettings `locationName:"encoderSettings" type:"structure"` - - InputAttachments []*InputAttachment `locationName:"inputAttachments" type:"list"` - - Name *string `locationName:"name" type:"string"` - - RequestId *string `locationName:"requestId" type:"string" idempotencyToken:"true"` - - Reserved *string `locationName:"reserved" type:"string"` - - RoleArn *string `locationName:"roleArn" type:"string"` -} - -// String returns the string representation -func (s CreateChannelInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateChannelInput) GoString() string { - return s.String() -} - -// SetDestinations sets the Destinations field's value. -func (s *CreateChannelInput) SetDestinations(v []*OutputDestination) *CreateChannelInput { - s.Destinations = v - return s -} - -// SetEncoderSettings sets the EncoderSettings field's value. -func (s *CreateChannelInput) SetEncoderSettings(v *EncoderSettings) *CreateChannelInput { - s.EncoderSettings = v - return s -} - -// SetInputAttachments sets the InputAttachments field's value. -func (s *CreateChannelInput) SetInputAttachments(v []*InputAttachment) *CreateChannelInput { - s.InputAttachments = v - return s -} - -// SetName sets the Name field's value. -func (s *CreateChannelInput) SetName(v string) *CreateChannelInput { - s.Name = &v - return s -} - -// SetRequestId sets the RequestId field's value. -func (s *CreateChannelInput) SetRequestId(v string) *CreateChannelInput { - s.RequestId = &v - return s -} - -// SetReserved sets the Reserved field's value. -func (s *CreateChannelInput) SetReserved(v string) *CreateChannelInput { - s.Reserved = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *CreateChannelInput) SetRoleArn(v string) *CreateChannelInput { - s.RoleArn = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateChannelResponse -type CreateChannelOutput struct { - _ struct{} `type:"structure"` - - Channel *Channel `locationName:"channel" type:"structure"` -} - -// String returns the string representation -func (s CreateChannelOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateChannelOutput) GoString() string { - return s.String() -} - -// SetChannel sets the Channel field's value. -func (s *CreateChannelOutput) SetChannel(v *Channel) *CreateChannelOutput { - s.Channel = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateInputRequest -type CreateInputInput struct { - _ struct{} `type:"structure"` - - Destinations []*InputDestinationRequest `locationName:"destinations" type:"list"` - - InputSecurityGroups []*string `locationName:"inputSecurityGroups" type:"list"` - - Name *string `locationName:"name" type:"string"` - - RequestId *string `locationName:"requestId" type:"string" idempotencyToken:"true"` - - Sources []*InputSourceRequest `locationName:"sources" type:"list"` - - Type *string `locationName:"type" type:"string" enum:"InputType"` -} - -// String returns the string representation -func (s CreateInputInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateInputInput) GoString() string { - return s.String() -} - -// SetDestinations sets the Destinations field's value. -func (s *CreateInputInput) SetDestinations(v []*InputDestinationRequest) *CreateInputInput { - s.Destinations = v - return s -} - -// SetInputSecurityGroups sets the InputSecurityGroups field's value. -func (s *CreateInputInput) SetInputSecurityGroups(v []*string) *CreateInputInput { - s.InputSecurityGroups = v - return s -} - -// SetName sets the Name field's value. -func (s *CreateInputInput) SetName(v string) *CreateInputInput { - s.Name = &v - return s -} - -// SetRequestId sets the RequestId field's value. -func (s *CreateInputInput) SetRequestId(v string) *CreateInputInput { - s.RequestId = &v - return s -} - -// SetSources sets the Sources field's value. -func (s *CreateInputInput) SetSources(v []*InputSourceRequest) *CreateInputInput { - s.Sources = v - return s -} - -// SetType sets the Type field's value. -func (s *CreateInputInput) SetType(v string) *CreateInputInput { - s.Type = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateInputResponse -type CreateInputOutput struct { - _ struct{} `type:"structure"` - - Input *Input `locationName:"input" type:"structure"` -} - -// String returns the string representation -func (s CreateInputOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateInputOutput) GoString() string { - return s.String() -} - -// SetInput sets the Input field's value. -func (s *CreateInputOutput) SetInput(v *Input) *CreateInputOutput { - s.Input = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateInputSecurityGroupRequest -type CreateInputSecurityGroupInput struct { - _ struct{} `type:"structure"` - - WhitelistRules []*InputWhitelistRuleCidr `locationName:"whitelistRules" type:"list"` -} - -// String returns the string representation -func (s CreateInputSecurityGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateInputSecurityGroupInput) GoString() string { - return s.String() -} - -// SetWhitelistRules sets the WhitelistRules field's value. -func (s *CreateInputSecurityGroupInput) SetWhitelistRules(v []*InputWhitelistRuleCidr) *CreateInputSecurityGroupInput { - s.WhitelistRules = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/CreateInputSecurityGroupResponse -type CreateInputSecurityGroupOutput struct { - _ struct{} `type:"structure"` - - // An Input Security Group - SecurityGroup *InputSecurityGroup `locationName:"securityGroup" type:"structure"` -} - -// String returns the string representation -func (s CreateInputSecurityGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateInputSecurityGroupOutput) GoString() string { - return s.String() -} - -// SetSecurityGroup sets the SecurityGroup field's value. -func (s *CreateInputSecurityGroupOutput) SetSecurityGroup(v *InputSecurityGroup) *CreateInputSecurityGroupOutput { - s.SecurityGroup = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DeleteChannelRequest -type DeleteChannelInput struct { - _ struct{} `type:"structure"` - - // ChannelId is a required field - ChannelId *string `location:"uri" locationName:"channelId" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteChannelInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteChannelInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteChannelInput"} - if s.ChannelId == nil { - invalidParams.Add(request.NewErrParamRequired("ChannelId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChannelId sets the ChannelId field's value. -func (s *DeleteChannelInput) SetChannelId(v string) *DeleteChannelInput { - s.ChannelId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DeleteChannelResponse -type DeleteChannelOutput struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - Destinations []*OutputDestination `locationName:"destinations" type:"list"` - - EgressEndpoints []*ChannelEgressEndpoint `locationName:"egressEndpoints" type:"list"` - - EncoderSettings *EncoderSettings `locationName:"encoderSettings" type:"structure"` - - Id *string `locationName:"id" type:"string"` - - InputAttachments []*InputAttachment `locationName:"inputAttachments" type:"list"` - - Name *string `locationName:"name" type:"string"` - - PipelinesRunningCount *int64 `locationName:"pipelinesRunningCount" type:"integer"` - - RoleArn *string `locationName:"roleArn" type:"string"` - - State *string `locationName:"state" type:"string" enum:"ChannelState"` -} - -// String returns the string representation -func (s DeleteChannelOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteChannelOutput) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *DeleteChannelOutput) SetArn(v string) *DeleteChannelOutput { - s.Arn = &v - return s -} - -// SetDestinations sets the Destinations field's value. -func (s *DeleteChannelOutput) SetDestinations(v []*OutputDestination) *DeleteChannelOutput { - s.Destinations = v - return s -} - -// SetEgressEndpoints sets the EgressEndpoints field's value. -func (s *DeleteChannelOutput) SetEgressEndpoints(v []*ChannelEgressEndpoint) *DeleteChannelOutput { - s.EgressEndpoints = v - return s -} - -// SetEncoderSettings sets the EncoderSettings field's value. -func (s *DeleteChannelOutput) SetEncoderSettings(v *EncoderSettings) *DeleteChannelOutput { - s.EncoderSettings = v - return s -} - -// SetId sets the Id field's value. -func (s *DeleteChannelOutput) SetId(v string) *DeleteChannelOutput { - s.Id = &v - return s -} - -// SetInputAttachments sets the InputAttachments field's value. -func (s *DeleteChannelOutput) SetInputAttachments(v []*InputAttachment) *DeleteChannelOutput { - s.InputAttachments = v - return s -} - -// SetName sets the Name field's value. -func (s *DeleteChannelOutput) SetName(v string) *DeleteChannelOutput { - s.Name = &v - return s -} - -// SetPipelinesRunningCount sets the PipelinesRunningCount field's value. -func (s *DeleteChannelOutput) SetPipelinesRunningCount(v int64) *DeleteChannelOutput { - s.PipelinesRunningCount = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *DeleteChannelOutput) SetRoleArn(v string) *DeleteChannelOutput { - s.RoleArn = &v - return s -} - -// SetState sets the State field's value. -func (s *DeleteChannelOutput) SetState(v string) *DeleteChannelOutput { - s.State = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DeleteInputRequest -type DeleteInputInput struct { - _ struct{} `type:"structure"` - - // InputId is a required field - InputId *string `location:"uri" locationName:"inputId" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteInputInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteInputInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteInputInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteInputInput"} - if s.InputId == nil { - invalidParams.Add(request.NewErrParamRequired("InputId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetInputId sets the InputId field's value. -func (s *DeleteInputInput) SetInputId(v string) *DeleteInputInput { - s.InputId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DeleteInputResponse -type DeleteInputOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteInputOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteInputOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DeleteInputSecurityGroupRequest -type DeleteInputSecurityGroupInput struct { - _ struct{} `type:"structure"` - - // InputSecurityGroupId is a required field - InputSecurityGroupId *string `location:"uri" locationName:"inputSecurityGroupId" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteInputSecurityGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteInputSecurityGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteInputSecurityGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteInputSecurityGroupInput"} - if s.InputSecurityGroupId == nil { - invalidParams.Add(request.NewErrParamRequired("InputSecurityGroupId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetInputSecurityGroupId sets the InputSecurityGroupId field's value. -func (s *DeleteInputSecurityGroupInput) SetInputSecurityGroupId(v string) *DeleteInputSecurityGroupInput { - s.InputSecurityGroupId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DeleteInputSecurityGroupResponse -type DeleteInputSecurityGroupOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteInputSecurityGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteInputSecurityGroupOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeChannelRequest -type DescribeChannelInput struct { - _ struct{} `type:"structure"` - - // ChannelId is a required field - ChannelId *string `location:"uri" locationName:"channelId" type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeChannelInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeChannelInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeChannelInput"} - if s.ChannelId == nil { - invalidParams.Add(request.NewErrParamRequired("ChannelId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChannelId sets the ChannelId field's value. -func (s *DescribeChannelInput) SetChannelId(v string) *DescribeChannelInput { - s.ChannelId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeChannelResponse -type DescribeChannelOutput struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - Destinations []*OutputDestination `locationName:"destinations" type:"list"` - - EgressEndpoints []*ChannelEgressEndpoint `locationName:"egressEndpoints" type:"list"` - - EncoderSettings *EncoderSettings `locationName:"encoderSettings" type:"structure"` - - Id *string `locationName:"id" type:"string"` - - InputAttachments []*InputAttachment `locationName:"inputAttachments" type:"list"` - - Name *string `locationName:"name" type:"string"` - - PipelinesRunningCount *int64 `locationName:"pipelinesRunningCount" type:"integer"` - - RoleArn *string `locationName:"roleArn" type:"string"` - - State *string `locationName:"state" type:"string" enum:"ChannelState"` -} - -// String returns the string representation -func (s DescribeChannelOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeChannelOutput) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *DescribeChannelOutput) SetArn(v string) *DescribeChannelOutput { - s.Arn = &v - return s -} - -// SetDestinations sets the Destinations field's value. -func (s *DescribeChannelOutput) SetDestinations(v []*OutputDestination) *DescribeChannelOutput { - s.Destinations = v - return s -} - -// SetEgressEndpoints sets the EgressEndpoints field's value. -func (s *DescribeChannelOutput) SetEgressEndpoints(v []*ChannelEgressEndpoint) *DescribeChannelOutput { - s.EgressEndpoints = v - return s -} - -// SetEncoderSettings sets the EncoderSettings field's value. -func (s *DescribeChannelOutput) SetEncoderSettings(v *EncoderSettings) *DescribeChannelOutput { - s.EncoderSettings = v - return s -} - -// SetId sets the Id field's value. -func (s *DescribeChannelOutput) SetId(v string) *DescribeChannelOutput { - s.Id = &v - return s -} - -// SetInputAttachments sets the InputAttachments field's value. -func (s *DescribeChannelOutput) SetInputAttachments(v []*InputAttachment) *DescribeChannelOutput { - s.InputAttachments = v - return s -} - -// SetName sets the Name field's value. -func (s *DescribeChannelOutput) SetName(v string) *DescribeChannelOutput { - s.Name = &v - return s -} - -// SetPipelinesRunningCount sets the PipelinesRunningCount field's value. -func (s *DescribeChannelOutput) SetPipelinesRunningCount(v int64) *DescribeChannelOutput { - s.PipelinesRunningCount = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *DescribeChannelOutput) SetRoleArn(v string) *DescribeChannelOutput { - s.RoleArn = &v - return s -} - -// SetState sets the State field's value. -func (s *DescribeChannelOutput) SetState(v string) *DescribeChannelOutput { - s.State = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputRequest -type DescribeInputInput struct { - _ struct{} `type:"structure"` - - // InputId is a required field - InputId *string `location:"uri" locationName:"inputId" type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeInputInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeInputInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeInputInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeInputInput"} - if s.InputId == nil { - invalidParams.Add(request.NewErrParamRequired("InputId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetInputId sets the InputId field's value. -func (s *DescribeInputInput) SetInputId(v string) *DescribeInputInput { - s.InputId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputResponse -type DescribeInputOutput struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - AttachedChannels []*string `locationName:"attachedChannels" type:"list"` - - Destinations []*InputDestination `locationName:"destinations" type:"list"` - - Id *string `locationName:"id" type:"string"` - - Name *string `locationName:"name" type:"string"` - - SecurityGroups []*string `locationName:"securityGroups" type:"list"` - - Sources []*InputSource `locationName:"sources" type:"list"` - - State *string `locationName:"state" type:"string" enum:"InputState"` - - Type *string `locationName:"type" type:"string" enum:"InputType"` -} - -// String returns the string representation -func (s DescribeInputOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeInputOutput) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *DescribeInputOutput) SetArn(v string) *DescribeInputOutput { - s.Arn = &v - return s -} - -// SetAttachedChannels sets the AttachedChannels field's value. -func (s *DescribeInputOutput) SetAttachedChannels(v []*string) *DescribeInputOutput { - s.AttachedChannels = v - return s -} - -// SetDestinations sets the Destinations field's value. -func (s *DescribeInputOutput) SetDestinations(v []*InputDestination) *DescribeInputOutput { - s.Destinations = v - return s -} - -// SetId sets the Id field's value. -func (s *DescribeInputOutput) SetId(v string) *DescribeInputOutput { - s.Id = &v - return s -} - -// SetName sets the Name field's value. -func (s *DescribeInputOutput) SetName(v string) *DescribeInputOutput { - s.Name = &v - return s -} - -// SetSecurityGroups sets the SecurityGroups field's value. -func (s *DescribeInputOutput) SetSecurityGroups(v []*string) *DescribeInputOutput { - s.SecurityGroups = v - return s -} - -// SetSources sets the Sources field's value. -func (s *DescribeInputOutput) SetSources(v []*InputSource) *DescribeInputOutput { - s.Sources = v - return s -} - -// SetState sets the State field's value. -func (s *DescribeInputOutput) SetState(v string) *DescribeInputOutput { - s.State = &v - return s -} - -// SetType sets the Type field's value. -func (s *DescribeInputOutput) SetType(v string) *DescribeInputOutput { - s.Type = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputSecurityGroupRequest -type DescribeInputSecurityGroupInput struct { - _ struct{} `type:"structure"` - - // InputSecurityGroupId is a required field - InputSecurityGroupId *string `location:"uri" locationName:"inputSecurityGroupId" type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeInputSecurityGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeInputSecurityGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeInputSecurityGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeInputSecurityGroupInput"} - if s.InputSecurityGroupId == nil { - invalidParams.Add(request.NewErrParamRequired("InputSecurityGroupId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetInputSecurityGroupId sets the InputSecurityGroupId field's value. -func (s *DescribeInputSecurityGroupInput) SetInputSecurityGroupId(v string) *DescribeInputSecurityGroupInput { - s.InputSecurityGroupId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputSecurityGroupResponse -type DescribeInputSecurityGroupOutput struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - Id *string `locationName:"id" type:"string"` - - WhitelistRules []*InputWhitelistRule `locationName:"whitelistRules" type:"list"` -} - -// String returns the string representation -func (s DescribeInputSecurityGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeInputSecurityGroupOutput) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *DescribeInputSecurityGroupOutput) SetArn(v string) *DescribeInputSecurityGroupOutput { - s.Arn = &v - return s -} - -// SetId sets the Id field's value. -func (s *DescribeInputSecurityGroupOutput) SetId(v string) *DescribeInputSecurityGroupOutput { - s.Id = &v - return s -} - -// SetWhitelistRules sets the WhitelistRules field's value. -func (s *DescribeInputSecurityGroupOutput) SetWhitelistRules(v []*InputWhitelistRule) *DescribeInputSecurityGroupOutput { - s.WhitelistRules = v - return s -} - -// DVB Network Information Table (NIT) -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DvbNitSettings -type DvbNitSettings struct { - _ struct{} `type:"structure"` - - // The numeric value placed in the Network Information Table (NIT). - NetworkId *int64 `locationName:"networkId" type:"integer"` - - // The network name text placed in the networkNameDescriptor inside the Network - // Information Table. Maximum length is 256 characters. - NetworkName *string `locationName:"networkName" type:"string"` - - // The number of milliseconds between instances of this table in the output - // transport stream. - RepInterval *int64 `locationName:"repInterval" type:"integer"` -} - -// String returns the string representation -func (s DvbNitSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DvbNitSettings) GoString() string { - return s.String() -} - -// SetNetworkId sets the NetworkId field's value. -func (s *DvbNitSettings) SetNetworkId(v int64) *DvbNitSettings { - s.NetworkId = &v - return s -} - -// SetNetworkName sets the NetworkName field's value. -func (s *DvbNitSettings) SetNetworkName(v string) *DvbNitSettings { - s.NetworkName = &v - return s -} - -// SetRepInterval sets the RepInterval field's value. -func (s *DvbNitSettings) SetRepInterval(v int64) *DvbNitSettings { - s.RepInterval = &v - return s -} - -// DVB Service Description Table (SDT) -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DvbSdtSettings -type DvbSdtSettings struct { - _ struct{} `type:"structure"` - - // Selects method of inserting SDT information into output stream. The sdtFollow - // setting copies SDT information from input stream to output stream. The sdtFollowIfPresent - // setting copies SDT information from input stream to output stream if SDT - // information is present in the input, otherwise it will fall back on the user-defined - // values. The sdtManual setting means user will enter the SDT information. - // The sdtNone setting means output stream will not contain SDT information. - OutputSdt *string `locationName:"outputSdt" type:"string" enum:"DvbSdtOutputSdt"` - - // The number of milliseconds between instances of this table in the output - // transport stream. - RepInterval *int64 `locationName:"repInterval" type:"integer"` - - // The service name placed in the serviceDescriptor in the Service Description - // Table. Maximum length is 256 characters. - ServiceName *string `locationName:"serviceName" type:"string"` - - // The service provider name placed in the serviceDescriptor in the Service - // Description Table. Maximum length is 256 characters. - ServiceProviderName *string `locationName:"serviceProviderName" type:"string"` -} - -// String returns the string representation -func (s DvbSdtSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DvbSdtSettings) GoString() string { - return s.String() -} - -// SetOutputSdt sets the OutputSdt field's value. -func (s *DvbSdtSettings) SetOutputSdt(v string) *DvbSdtSettings { - s.OutputSdt = &v - return s -} - -// SetRepInterval sets the RepInterval field's value. -func (s *DvbSdtSettings) SetRepInterval(v int64) *DvbSdtSettings { - s.RepInterval = &v - return s -} - -// SetServiceName sets the ServiceName field's value. -func (s *DvbSdtSettings) SetServiceName(v string) *DvbSdtSettings { - s.ServiceName = &v - return s -} - -// SetServiceProviderName sets the ServiceProviderName field's value. -func (s *DvbSdtSettings) SetServiceProviderName(v string) *DvbSdtSettings { - s.ServiceProviderName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DvbSubDestinationSettings -type DvbSubDestinationSettings struct { - _ struct{} `type:"structure"` - - // If no explicit xPosition or yPosition is provided, setting alignment to centered - // will place the captions at the bottom center of the output. Similarly, setting - // a left alignment will align captions to the bottom left of the output. If - // x and y positions are given in conjunction with the alignment parameter, - // the font will be justified (either left or centered) relative to those coordinates. - // Selecting "smart" justification will left-justify live subtitles and center-justify - // pre-recorded subtitles. This option is not valid for source captions that - // are STL or 608/embedded. These source settings are already pre-defined by - // the caption stream. All burn-in and DVB-Sub font settings must match. - Alignment *string `locationName:"alignment" type:"string" enum:"DvbSubDestinationAlignment"` - - // Specifies the color of the rectangle behind the captions. All burn-in and - // DVB-Sub font settings must match. - BackgroundColor *string `locationName:"backgroundColor" type:"string" enum:"DvbSubDestinationBackgroundColor"` - - // Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. - // Leaving this parameter blank is equivalent to setting it to 0 (transparent). - // All burn-in and DVB-Sub font settings must match. - BackgroundOpacity *int64 `locationName:"backgroundOpacity" type:"integer"` - - // External font file used for caption burn-in. File extension must be 'ttf' - // or 'tte'. Although the user can select output fonts for many different types - // of input captions, embedded, STL and teletext sources use a strict grid system. - // Using external fonts with these caption sources could cause unexpected display - // of proportional fonts. All burn-in and DVB-Sub font settings must match. - Font *InputLocation `locationName:"font" type:"structure"` - - // Specifies the color of the burned-in captions. This option is not valid for - // source captions that are STL, 608/embedded or teletext. These source settings - // are already pre-defined by the caption stream. All burn-in and DVB-Sub font - // settings must match. - FontColor *string `locationName:"fontColor" type:"string" enum:"DvbSubDestinationFontColor"` - - // Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent. - // All burn-in and DVB-Sub font settings must match. - FontOpacity *int64 `locationName:"fontOpacity" type:"integer"` - - // Font resolution in DPI (dots per inch); default is 96 dpi. All burn-in and - // DVB-Sub font settings must match. - FontResolution *int64 `locationName:"fontResolution" type:"integer"` - - // When set to auto fontSize will scale depending on the size of the output. - // Giving a positive integer will specify the exact font size in points. All - // burn-in and DVB-Sub font settings must match. - FontSize *string `locationName:"fontSize" type:"string"` - - // Specifies font outline color. This option is not valid for source captions - // that are either 608/embedded or teletext. These source settings are already - // pre-defined by the caption stream. All burn-in and DVB-Sub font settings - // must match. - OutlineColor *string `locationName:"outlineColor" type:"string" enum:"DvbSubDestinationOutlineColor"` - - // Specifies font outline size in pixels. This option is not valid for source - // captions that are either 608/embedded or teletext. These source settings - // are already pre-defined by the caption stream. All burn-in and DVB-Sub font - // settings must match. - OutlineSize *int64 `locationName:"outlineSize" type:"integer"` - - // Specifies the color of the shadow cast by the captions. All burn-in and DVB-Sub - // font settings must match. - ShadowColor *string `locationName:"shadowColor" type:"string" enum:"DvbSubDestinationShadowColor"` - - // Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving - // this parameter blank is equivalent to setting it to 0 (transparent). All - // burn-in and DVB-Sub font settings must match. - ShadowOpacity *int64 `locationName:"shadowOpacity" type:"integer"` - - // Specifies the horizontal offset of the shadow relative to the captions in - // pixels. A value of -2 would result in a shadow offset 2 pixels to the left. - // All burn-in and DVB-Sub font settings must match. - ShadowXOffset *int64 `locationName:"shadowXOffset" type:"integer"` - - // Specifies the vertical offset of the shadow relative to the captions in pixels. - // A value of -2 would result in a shadow offset 2 pixels above the text. All - // burn-in and DVB-Sub font settings must match. - ShadowYOffset *int64 `locationName:"shadowYOffset" type:"integer"` - - // Controls whether a fixed grid size will be used to generate the output subtitles - // bitmap. Only applicable for Teletext inputs and DVB-Sub/Burn-in outputs. - TeletextGridControl *string `locationName:"teletextGridControl" type:"string" enum:"DvbSubDestinationTeletextGridControl"` - - // Specifies the horizontal position of the caption relative to the left side - // of the output in pixels. A value of 10 would result in the captions starting - // 10 pixels from the left of the output. If no explicit xPosition is provided, - // the horizontal caption position will be determined by the alignment parameter. - // This option is not valid for source captions that are STL, 608/embedded or - // teletext. These source settings are already pre-defined by the caption stream. - // All burn-in and DVB-Sub font settings must match. - XPosition *int64 `locationName:"xPosition" type:"integer"` - - // Specifies the vertical position of the caption relative to the top of the - // output in pixels. A value of 10 would result in the captions starting 10 - // pixels from the top of the output. If no explicit yPosition is provided, - // the caption will be positioned towards the bottom of the output. This option - // is not valid for source captions that are STL, 608/embedded or teletext. - // These source settings are already pre-defined by the caption stream. All - // burn-in and DVB-Sub font settings must match. - YPosition *int64 `locationName:"yPosition" type:"integer"` -} - -// String returns the string representation -func (s DvbSubDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DvbSubDestinationSettings) GoString() string { - return s.String() -} - -// SetAlignment sets the Alignment field's value. -func (s *DvbSubDestinationSettings) SetAlignment(v string) *DvbSubDestinationSettings { - s.Alignment = &v - return s -} - -// SetBackgroundColor sets the BackgroundColor field's value. -func (s *DvbSubDestinationSettings) SetBackgroundColor(v string) *DvbSubDestinationSettings { - s.BackgroundColor = &v - return s -} - -// SetBackgroundOpacity sets the BackgroundOpacity field's value. -func (s *DvbSubDestinationSettings) SetBackgroundOpacity(v int64) *DvbSubDestinationSettings { - s.BackgroundOpacity = &v - return s -} - -// SetFont sets the Font field's value. -func (s *DvbSubDestinationSettings) SetFont(v *InputLocation) *DvbSubDestinationSettings { - s.Font = v - return s -} - -// SetFontColor sets the FontColor field's value. -func (s *DvbSubDestinationSettings) SetFontColor(v string) *DvbSubDestinationSettings { - s.FontColor = &v - return s -} - -// SetFontOpacity sets the FontOpacity field's value. -func (s *DvbSubDestinationSettings) SetFontOpacity(v int64) *DvbSubDestinationSettings { - s.FontOpacity = &v - return s -} - -// SetFontResolution sets the FontResolution field's value. -func (s *DvbSubDestinationSettings) SetFontResolution(v int64) *DvbSubDestinationSettings { - s.FontResolution = &v - return s -} - -// SetFontSize sets the FontSize field's value. -func (s *DvbSubDestinationSettings) SetFontSize(v string) *DvbSubDestinationSettings { - s.FontSize = &v - return s -} - -// SetOutlineColor sets the OutlineColor field's value. -func (s *DvbSubDestinationSettings) SetOutlineColor(v string) *DvbSubDestinationSettings { - s.OutlineColor = &v - return s -} - -// SetOutlineSize sets the OutlineSize field's value. -func (s *DvbSubDestinationSettings) SetOutlineSize(v int64) *DvbSubDestinationSettings { - s.OutlineSize = &v - return s -} - -// SetShadowColor sets the ShadowColor field's value. -func (s *DvbSubDestinationSettings) SetShadowColor(v string) *DvbSubDestinationSettings { - s.ShadowColor = &v - return s -} - -// SetShadowOpacity sets the ShadowOpacity field's value. -func (s *DvbSubDestinationSettings) SetShadowOpacity(v int64) *DvbSubDestinationSettings { - s.ShadowOpacity = &v - return s -} - -// SetShadowXOffset sets the ShadowXOffset field's value. -func (s *DvbSubDestinationSettings) SetShadowXOffset(v int64) *DvbSubDestinationSettings { - s.ShadowXOffset = &v - return s -} - -// SetShadowYOffset sets the ShadowYOffset field's value. -func (s *DvbSubDestinationSettings) SetShadowYOffset(v int64) *DvbSubDestinationSettings { - s.ShadowYOffset = &v - return s -} - -// SetTeletextGridControl sets the TeletextGridControl field's value. -func (s *DvbSubDestinationSettings) SetTeletextGridControl(v string) *DvbSubDestinationSettings { - s.TeletextGridControl = &v - return s -} - -// SetXPosition sets the XPosition field's value. -func (s *DvbSubDestinationSettings) SetXPosition(v int64) *DvbSubDestinationSettings { - s.XPosition = &v - return s -} - -// SetYPosition sets the YPosition field's value. -func (s *DvbSubDestinationSettings) SetYPosition(v int64) *DvbSubDestinationSettings { - s.YPosition = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DvbSubSourceSettings -type DvbSubSourceSettings struct { - _ struct{} `type:"structure"` - - // When using DVB-Sub with Burn-In or SMPTE-TT, use this PID for the source - // content. Unused for DVB-Sub passthrough. All DVB-Sub content is passed through, - // regardless of selectors. - Pid *int64 `locationName:"pid" type:"integer"` -} - -// String returns the string representation -func (s DvbSubSourceSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DvbSubSourceSettings) GoString() string { - return s.String() -} - -// SetPid sets the Pid field's value. -func (s *DvbSubSourceSettings) SetPid(v int64) *DvbSubSourceSettings { - s.Pid = &v - return s -} - -// DVB Time and Date Table (SDT) -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DvbTdtSettings -type DvbTdtSettings struct { - _ struct{} `type:"structure"` - - // The number of milliseconds between instances of this table in the output - // transport stream. - RepInterval *int64 `locationName:"repInterval" type:"integer"` -} - -// String returns the string representation -func (s DvbTdtSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DvbTdtSettings) GoString() string { - return s.String() -} - -// SetRepInterval sets the RepInterval field's value. -func (s *DvbTdtSettings) SetRepInterval(v int64) *DvbTdtSettings { - s.RepInterval = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/Eac3Settings -type Eac3Settings struct { - _ struct{} `type:"structure"` - - // When set to attenuate3Db, applies a 3 dB attenuation to the surround channels. - // Only used for 3/2 coding mode. - AttenuationControl *string `locationName:"attenuationControl" type:"string" enum:"Eac3AttenuationControl"` - - // Average bitrate in bits/second. Valid bitrates depend on the coding mode. - Bitrate *float64 `locationName:"bitrate" type:"double"` - - // Specifies the bitstream mode (bsmod) for the emitted E-AC-3 stream. See ATSC - // A/52-2012 (Annex E) for background on these values. - BitstreamMode *string `locationName:"bitstreamMode" type:"string" enum:"Eac3BitstreamMode"` - - // Dolby Digital Plus coding mode. Determines number of channels. - CodingMode *string `locationName:"codingMode" type:"string" enum:"Eac3CodingMode"` - - // When set to enabled, activates a DC highpass filter for all input channels. - DcFilter *string `locationName:"dcFilter" type:"string" enum:"Eac3DcFilter"` - - // Sets the dialnorm for the output. If blank and input audio is Dolby Digital - // Plus, dialnorm will be passed through. - Dialnorm *int64 `locationName:"dialnorm" type:"integer"` - - // Sets the Dolby dynamic range compression profile. - DrcLine *string `locationName:"drcLine" type:"string" enum:"Eac3DrcLine"` - - // Sets the profile for heavy Dolby dynamic range compression, ensures that - // the instantaneous signal peaks do not exceed specified levels. - DrcRf *string `locationName:"drcRf" type:"string" enum:"Eac3DrcRf"` - - // When encoding 3/2 audio, setting to lfe enables the LFE channel - LfeControl *string `locationName:"lfeControl" type:"string" enum:"Eac3LfeControl"` - - // When set to enabled, applies a 120Hz lowpass filter to the LFE channel prior - // to encoding. Only valid with codingMode32 coding mode. - LfeFilter *string `locationName:"lfeFilter" type:"string" enum:"Eac3LfeFilter"` - - // Left only/Right only center mix level. Only used for 3/2 coding mode. - LoRoCenterMixLevel *float64 `locationName:"loRoCenterMixLevel" type:"double"` - - // Left only/Right only surround mix level. Only used for 3/2 coding mode. - LoRoSurroundMixLevel *float64 `locationName:"loRoSurroundMixLevel" type:"double"` - - // Left total/Right total center mix level. Only used for 3/2 coding mode. - LtRtCenterMixLevel *float64 `locationName:"ltRtCenterMixLevel" type:"double"` - - // Left total/Right total surround mix level. Only used for 3/2 coding mode. - LtRtSurroundMixLevel *float64 `locationName:"ltRtSurroundMixLevel" type:"double"` - - // When set to followInput, encoder metadata will be sourced from the DD, DD+, - // or DolbyE decoder that supplied this audio data. If audio was not supplied - // from one of these streams, then the static metadata settings will be used. - MetadataControl *string `locationName:"metadataControl" type:"string" enum:"Eac3MetadataControl"` - - // When set to whenPossible, input DD+ audio will be passed through if it is - // present on the input. This detection is dynamic over the life of the transcode. - // Inputs that alternate between DD+ and non-DD+ content will have a consistent - // DD+ output as the system alternates between passthrough and encoding. - PassthroughControl *string `locationName:"passthroughControl" type:"string" enum:"Eac3PassthroughControl"` - - // When set to shift90Degrees, applies a 90-degree phase shift to the surround - // channels. Only used for 3/2 coding mode. - PhaseControl *string `locationName:"phaseControl" type:"string" enum:"Eac3PhaseControl"` - - // Stereo downmix preference. Only used for 3/2 coding mode. - StereoDownmix *string `locationName:"stereoDownmix" type:"string" enum:"Eac3StereoDownmix"` - - // When encoding 3/2 audio, sets whether an extra center back surround channel - // is matrix encoded into the left and right surround channels. - SurroundExMode *string `locationName:"surroundExMode" type:"string" enum:"Eac3SurroundExMode"` - - // When encoding 2/0 audio, sets whether Dolby Surround is matrix encoded into - // the two channels. - SurroundMode *string `locationName:"surroundMode" type:"string" enum:"Eac3SurroundMode"` -} - -// String returns the string representation -func (s Eac3Settings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Eac3Settings) GoString() string { - return s.String() -} - -// SetAttenuationControl sets the AttenuationControl field's value. -func (s *Eac3Settings) SetAttenuationControl(v string) *Eac3Settings { - s.AttenuationControl = &v - return s -} - -// SetBitrate sets the Bitrate field's value. -func (s *Eac3Settings) SetBitrate(v float64) *Eac3Settings { - s.Bitrate = &v - return s -} - -// SetBitstreamMode sets the BitstreamMode field's value. -func (s *Eac3Settings) SetBitstreamMode(v string) *Eac3Settings { - s.BitstreamMode = &v - return s -} - -// SetCodingMode sets the CodingMode field's value. -func (s *Eac3Settings) SetCodingMode(v string) *Eac3Settings { - s.CodingMode = &v - return s -} - -// SetDcFilter sets the DcFilter field's value. -func (s *Eac3Settings) SetDcFilter(v string) *Eac3Settings { - s.DcFilter = &v - return s -} - -// SetDialnorm sets the Dialnorm field's value. -func (s *Eac3Settings) SetDialnorm(v int64) *Eac3Settings { - s.Dialnorm = &v - return s -} - -// SetDrcLine sets the DrcLine field's value. -func (s *Eac3Settings) SetDrcLine(v string) *Eac3Settings { - s.DrcLine = &v - return s -} - -// SetDrcRf sets the DrcRf field's value. -func (s *Eac3Settings) SetDrcRf(v string) *Eac3Settings { - s.DrcRf = &v - return s -} - -// SetLfeControl sets the LfeControl field's value. -func (s *Eac3Settings) SetLfeControl(v string) *Eac3Settings { - s.LfeControl = &v - return s -} - -// SetLfeFilter sets the LfeFilter field's value. -func (s *Eac3Settings) SetLfeFilter(v string) *Eac3Settings { - s.LfeFilter = &v - return s -} - -// SetLoRoCenterMixLevel sets the LoRoCenterMixLevel field's value. -func (s *Eac3Settings) SetLoRoCenterMixLevel(v float64) *Eac3Settings { - s.LoRoCenterMixLevel = &v - return s -} - -// SetLoRoSurroundMixLevel sets the LoRoSurroundMixLevel field's value. -func (s *Eac3Settings) SetLoRoSurroundMixLevel(v float64) *Eac3Settings { - s.LoRoSurroundMixLevel = &v - return s -} - -// SetLtRtCenterMixLevel sets the LtRtCenterMixLevel field's value. -func (s *Eac3Settings) SetLtRtCenterMixLevel(v float64) *Eac3Settings { - s.LtRtCenterMixLevel = &v - return s -} - -// SetLtRtSurroundMixLevel sets the LtRtSurroundMixLevel field's value. -func (s *Eac3Settings) SetLtRtSurroundMixLevel(v float64) *Eac3Settings { - s.LtRtSurroundMixLevel = &v - return s -} - -// SetMetadataControl sets the MetadataControl field's value. -func (s *Eac3Settings) SetMetadataControl(v string) *Eac3Settings { - s.MetadataControl = &v - return s -} - -// SetPassthroughControl sets the PassthroughControl field's value. -func (s *Eac3Settings) SetPassthroughControl(v string) *Eac3Settings { - s.PassthroughControl = &v - return s -} - -// SetPhaseControl sets the PhaseControl field's value. -func (s *Eac3Settings) SetPhaseControl(v string) *Eac3Settings { - s.PhaseControl = &v - return s -} - -// SetStereoDownmix sets the StereoDownmix field's value. -func (s *Eac3Settings) SetStereoDownmix(v string) *Eac3Settings { - s.StereoDownmix = &v - return s -} - -// SetSurroundExMode sets the SurroundExMode field's value. -func (s *Eac3Settings) SetSurroundExMode(v string) *Eac3Settings { - s.SurroundExMode = &v - return s -} - -// SetSurroundMode sets the SurroundMode field's value. -func (s *Eac3Settings) SetSurroundMode(v string) *Eac3Settings { - s.SurroundMode = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/EmbeddedDestinationSettings -type EmbeddedDestinationSettings struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s EmbeddedDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s EmbeddedDestinationSettings) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/EmbeddedPlusScte20DestinationSettings -type EmbeddedPlusScte20DestinationSettings struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s EmbeddedPlusScte20DestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s EmbeddedPlusScte20DestinationSettings) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/EmbeddedSourceSettings -type EmbeddedSourceSettings struct { - _ struct{} `type:"structure"` - - // If upconvert, 608 data is both passed through via the "608 compatibility - // bytes" fields of the 708 wrapper as well as translated into 708. 708 data - // present in the source content will be discarded. - Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"EmbeddedConvert608To708"` - - // Set to "auto" to handle streams with intermittent and/or non-aligned SCTE-20 - // and Embedded captions. - Scte20Detection *string `locationName:"scte20Detection" type:"string" enum:"EmbeddedScte20Detection"` - - // Specifies the 608/708 channel number within the video track from which to - // extract captions. Unused for passthrough. - Source608ChannelNumber *int64 `locationName:"source608ChannelNumber" type:"integer"` - - // This field is unused and deprecated. - Source608TrackNumber *int64 `locationName:"source608TrackNumber" type:"integer"` -} - -// String returns the string representation -func (s EmbeddedSourceSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s EmbeddedSourceSettings) GoString() string { - return s.String() -} - -// SetConvert608To708 sets the Convert608To708 field's value. -func (s *EmbeddedSourceSettings) SetConvert608To708(v string) *EmbeddedSourceSettings { - s.Convert608To708 = &v - return s -} - -// SetScte20Detection sets the Scte20Detection field's value. -func (s *EmbeddedSourceSettings) SetScte20Detection(v string) *EmbeddedSourceSettings { - s.Scte20Detection = &v - return s -} - -// SetSource608ChannelNumber sets the Source608ChannelNumber field's value. -func (s *EmbeddedSourceSettings) SetSource608ChannelNumber(v int64) *EmbeddedSourceSettings { - s.Source608ChannelNumber = &v - return s -} - -// SetSource608TrackNumber sets the Source608TrackNumber field's value. -func (s *EmbeddedSourceSettings) SetSource608TrackNumber(v int64) *EmbeddedSourceSettings { - s.Source608TrackNumber = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/EncoderSettings -type EncoderSettings struct { - _ struct{} `type:"structure"` - - AudioDescriptions []*AudioDescription `locationName:"audioDescriptions" type:"list"` - - // Settings for ad avail blanking. - AvailBlanking *AvailBlanking `locationName:"availBlanking" type:"structure"` - - // Event-wide configuration settings for ad avail insertion. - AvailConfiguration *AvailConfiguration `locationName:"availConfiguration" type:"structure"` - - // Settings for blackout slate. - BlackoutSlate *BlackoutSlate `locationName:"blackoutSlate" type:"structure"` - - // Settings for caption decriptions - CaptionDescriptions []*CaptionDescription `locationName:"captionDescriptions" type:"list"` - - // Configuration settings that apply to the event as a whole. - GlobalConfiguration *GlobalConfiguration `locationName:"globalConfiguration" type:"structure"` - - OutputGroups []*OutputGroup `locationName:"outputGroups" type:"list"` - - // Contains settings used to acquire and adjust timecode information from inputs. - TimecodeConfig *TimecodeConfig `locationName:"timecodeConfig" type:"structure"` - - VideoDescriptions []*VideoDescription `locationName:"videoDescriptions" type:"list"` -} - -// String returns the string representation -func (s EncoderSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s EncoderSettings) GoString() string { - return s.String() -} - -// SetAudioDescriptions sets the AudioDescriptions field's value. -func (s *EncoderSettings) SetAudioDescriptions(v []*AudioDescription) *EncoderSettings { - s.AudioDescriptions = v - return s -} - -// SetAvailBlanking sets the AvailBlanking field's value. -func (s *EncoderSettings) SetAvailBlanking(v *AvailBlanking) *EncoderSettings { - s.AvailBlanking = v - return s -} - -// SetAvailConfiguration sets the AvailConfiguration field's value. -func (s *EncoderSettings) SetAvailConfiguration(v *AvailConfiguration) *EncoderSettings { - s.AvailConfiguration = v - return s -} - -// SetBlackoutSlate sets the BlackoutSlate field's value. -func (s *EncoderSettings) SetBlackoutSlate(v *BlackoutSlate) *EncoderSettings { - s.BlackoutSlate = v - return s -} - -// SetCaptionDescriptions sets the CaptionDescriptions field's value. -func (s *EncoderSettings) SetCaptionDescriptions(v []*CaptionDescription) *EncoderSettings { - s.CaptionDescriptions = v - return s -} - -// SetGlobalConfiguration sets the GlobalConfiguration field's value. -func (s *EncoderSettings) SetGlobalConfiguration(v *GlobalConfiguration) *EncoderSettings { - s.GlobalConfiguration = v - return s -} - -// SetOutputGroups sets the OutputGroups field's value. -func (s *EncoderSettings) SetOutputGroups(v []*OutputGroup) *EncoderSettings { - s.OutputGroups = v - return s -} - -// SetTimecodeConfig sets the TimecodeConfig field's value. -func (s *EncoderSettings) SetTimecodeConfig(v *TimecodeConfig) *EncoderSettings { - s.TimecodeConfig = v - return s -} - -// SetVideoDescriptions sets the VideoDescriptions field's value. -func (s *EncoderSettings) SetVideoDescriptions(v []*VideoDescription) *EncoderSettings { - s.VideoDescriptions = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/FecOutputSettings -type FecOutputSettings struct { - _ struct{} `type:"structure"` - - // Parameter D from SMPTE 2022-1. The height of the FEC protection matrix. The - // number of transport stream packets per column error correction packet. Must - // be between 4 and 20, inclusive. - ColumnDepth *int64 `locationName:"columnDepth" type:"integer"` - - // Enables column only or column and row based FEC - IncludeFec *string `locationName:"includeFec" type:"string" enum:"FecOutputIncludeFec"` - - // Parameter L from SMPTE 2022-1. The width of the FEC protection matrix. Must - // be between 1 and 20, inclusive. If only Column FEC is used, then larger values - // increase robustness. If Row FEC is used, then this is the number of transport - // stream packets per row error correction packet, and the value must be between - // 4 and 20, inclusive, if includeFec is columnAndRow. If includeFec is column, - // this value must be 1 to 20, inclusive. - RowLength *int64 `locationName:"rowLength" type:"integer"` -} - -// String returns the string representation -func (s FecOutputSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FecOutputSettings) GoString() string { - return s.String() -} - -// SetColumnDepth sets the ColumnDepth field's value. -func (s *FecOutputSettings) SetColumnDepth(v int64) *FecOutputSettings { - s.ColumnDepth = &v - return s -} - -// SetIncludeFec sets the IncludeFec field's value. -func (s *FecOutputSettings) SetIncludeFec(v string) *FecOutputSettings { - s.IncludeFec = &v - return s -} - -// SetRowLength sets the RowLength field's value. -func (s *FecOutputSettings) SetRowLength(v int64) *FecOutputSettings { - s.RowLength = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/GlobalConfiguration -type GlobalConfiguration struct { - _ struct{} `type:"structure"` - - // Value to set the initial audio gain for the Live Event. - InitialAudioGain *int64 `locationName:"initialAudioGain" type:"integer"` - - // Indicates the action to take when an input completes (e.g. end-of-file.) - // Options include immediately switching to the next sequential input (via "switchInput"), - // switching to the next input and looping back to the first input when last - // input ends (via "switchAndLoopInputs") or not switching inputs and instead - // transcoding black / color / slate images per the "Input Loss Behavior" configuration - // until an activateInput REST command is received (via "none"). - InputEndAction *string `locationName:"inputEndAction" type:"string" enum:"GlobalConfigurationInputEndAction"` - - // Settings for system actions when input is lost. - InputLossBehavior *InputLossBehavior `locationName:"inputLossBehavior" type:"structure"` - - // Indicates whether the rate of frames emitted by the Live encoder should be - // paced by its system clock (which optionally may be locked to another source - // via NTP) or should be locked to the clock of the source that is providing - // the input stream. - OutputTimingSource *string `locationName:"outputTimingSource" type:"string" enum:"GlobalConfigurationOutputTimingSource"` - - // Adjusts video input buffer for streams with very low video framerates. This - // is commonly set to enabled for music channels with less than one video frame - // per second. - SupportLowFramerateInputs *string `locationName:"supportLowFramerateInputs" type:"string" enum:"GlobalConfigurationLowFramerateInputs"` -} - -// String returns the string representation -func (s GlobalConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GlobalConfiguration) GoString() string { - return s.String() -} - -// SetInitialAudioGain sets the InitialAudioGain field's value. -func (s *GlobalConfiguration) SetInitialAudioGain(v int64) *GlobalConfiguration { - s.InitialAudioGain = &v - return s -} - -// SetInputEndAction sets the InputEndAction field's value. -func (s *GlobalConfiguration) SetInputEndAction(v string) *GlobalConfiguration { - s.InputEndAction = &v - return s -} - -// SetInputLossBehavior sets the InputLossBehavior field's value. -func (s *GlobalConfiguration) SetInputLossBehavior(v *InputLossBehavior) *GlobalConfiguration { - s.InputLossBehavior = v - return s -} - -// SetOutputTimingSource sets the OutputTimingSource field's value. -func (s *GlobalConfiguration) SetOutputTimingSource(v string) *GlobalConfiguration { - s.OutputTimingSource = &v - return s -} - -// SetSupportLowFramerateInputs sets the SupportLowFramerateInputs field's value. -func (s *GlobalConfiguration) SetSupportLowFramerateInputs(v string) *GlobalConfiguration { - s.SupportLowFramerateInputs = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/H264Settings -type H264Settings struct { - _ struct{} `type:"structure"` - - // Adaptive quantization. Allows intra-frame quantizers to vary to improve visual - // quality. - AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"H264AdaptiveQuantization"` - - // Indicates that AFD values will be written into the output stream. If afdSignaling - // is "auto", the system will try to preserve the input AFD value (in cases - // where multiple AFD values are valid). If set to "fixed", the AFD value will - // be the value configured in the fixedAfd parameter. - AfdSignaling *string `locationName:"afdSignaling" type:"string" enum:"AfdSignaling"` - - // Average bitrate in bits/second. Required for VBR, CBR, and ABR. For MS Smooth - // outputs, bitrates must be unique when rounded down to the nearest multiple - // of 1000. - Bitrate *int64 `locationName:"bitrate" type:"integer"` - - // Percentage of the buffer that should initially be filled (HRD buffer model). - BufFillPct *int64 `locationName:"bufFillPct" type:"integer"` - - // Size of buffer (HRD buffer model) in bits/second. - BufSize *int64 `locationName:"bufSize" type:"integer"` - - // Includes colorspace metadata in the output. - ColorMetadata *string `locationName:"colorMetadata" type:"string" enum:"H264ColorMetadata"` - - // Entropy encoding mode. Use cabac (must be in Main or High profile) or cavlc. - EntropyEncoding *string `locationName:"entropyEncoding" type:"string" enum:"H264EntropyEncoding"` - - // Four bit AFD value to write on all frames of video in the output stream. - // Only valid when afdSignaling is set to 'Fixed'. - FixedAfd *string `locationName:"fixedAfd" type:"string" enum:"FixedAfd"` - - // If set to enabled, adjust quantization within each frame to reduce flicker - // or 'pop' on I-frames. - FlickerAq *string `locationName:"flickerAq" type:"string" enum:"H264FlickerAq"` - - // This field indicates how the output video frame rate is specified. If "specified" - // is selected then the output video frame rate is determined by framerateNumerator - // and framerateDenominator, else if "initializeFromSource" is selected then - // the output video frame rate will be set equal to the input video frame rate - // of the first input. - FramerateControl *string `locationName:"framerateControl" type:"string" enum:"H264FramerateControl"` - - // Framerate denominator. - FramerateDenominator *int64 `locationName:"framerateDenominator" type:"integer"` - - // Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 - // fps. - FramerateNumerator *int64 `locationName:"framerateNumerator" type:"integer"` - - // If enabled, use reference B frames for GOP structures that have B frames - // > 1. - GopBReference *string `locationName:"gopBReference" type:"string" enum:"H264GopBReference"` - - // Frequency of closed GOPs. In streaming applications, it is recommended that - // this be set to 1 so a decoder joining mid-stream will receive an IDR frame - // as quickly as possible. Setting this value to 0 will break output segmenting. - GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` - - // Number of B-frames between reference frames. - GopNumBFrames *int64 `locationName:"gopNumBFrames" type:"integer"` - - // GOP size (keyframe interval) in units of either frames or seconds per gopSizeUnits. - // Must be greater than zero. - GopSize *float64 `locationName:"gopSize" type:"double"` - - // Indicates if the gopSize is specified in frames or seconds. If seconds the - // system will convert the gopSize into a frame count at run time. - GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"H264GopSizeUnits"` - - // H.264 Level. - Level *string `locationName:"level" type:"string" enum:"H264Level"` - - // Amount of lookahead. A value of low can decrease latency and memory usage, - // while high can produce better quality for certain content. - LookAheadRateControl *string `locationName:"lookAheadRateControl" type:"string" enum:"H264LookAheadRateControl"` - - // Maximum bitrate in bits/second (for VBR mode only). - MaxBitrate *int64 `locationName:"maxBitrate" type:"integer"` - - // Only meaningful if sceneChangeDetect is set to enabled. Enforces separation - // between repeated (cadence) I-frames and I-frames inserted by Scene Change - // Detection. If a scene change I-frame is within I-interval frames of a cadence - // I-frame, the GOP is shrunk and/or stretched to the scene change I-frame. - // GOP stretch requires enabling lookahead as well as setting I-interval. The - // normal cadence resumes for the next GOP. Note: Maximum GOP stretch = GOP - // size + Min-I-interval - 1 - MinIInterval *int64 `locationName:"minIInterval" type:"integer"` - - // Number of reference frames to use. The encoder may use more than requested - // if using B-frames and/or interlaced encoding. - NumRefFrames *int64 `locationName:"numRefFrames" type:"integer"` - - // This field indicates how the output pixel aspect ratio is specified. If "specified" - // is selected then the output video pixel aspect ratio is determined by parNumerator - // and parDenominator, else if "initializeFromSource" is selected then the output - // pixsel aspect ratio will be set equal to the input video pixel aspect ratio - // of the first input. - ParControl *string `locationName:"parControl" type:"string" enum:"H264ParControl"` - - // Pixel Aspect Ratio denominator. - ParDenominator *int64 `locationName:"parDenominator" type:"integer"` - - // Pixel Aspect Ratio numerator. - ParNumerator *int64 `locationName:"parNumerator" type:"integer"` - - // H.264 Profile. - Profile *string `locationName:"profile" type:"string" enum:"H264Profile"` - - // Rate control mode. - RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"H264RateControlMode"` - - // Sets the scan type of the output to progressive or top-field-first interlaced. - ScanType *string `locationName:"scanType" type:"string" enum:"H264ScanType"` - - // Scene change detection. Inserts I-frames on scene changes when enabled. - SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"H264SceneChangeDetect"` - - // Number of slices per picture. Must be less than or equal to the number of - // macroblock rows for progressive pictures, and less than or equal to half - // the number of macroblock rows for interlaced pictures.This field is optional; - // when no value is specified the encoder will choose the number of slices based - // on encode resolution. - Slices *int64 `locationName:"slices" type:"integer"` - - // Softness. Selects quantizer matrix, larger values reduce high-frequency content - // in the encoded image. - Softness *int64 `locationName:"softness" type:"integer"` - - // If set to enabled, adjust quantization within each frame based on spatial - // variation of content complexity. - SpatialAq *string `locationName:"spatialAq" type:"string" enum:"H264SpatialAq"` - - // Produces a bitstream compliant with SMPTE RP-2027. - Syntax *string `locationName:"syntax" type:"string" enum:"H264Syntax"` - - // If set to enabled, adjust quantization within each frame based on temporal - // variation of content complexity. - TemporalAq *string `locationName:"temporalAq" type:"string" enum:"H264TemporalAq"` - - // Determines how timecodes should be inserted into the video elementary stream.- - // 'disabled': Do not include timecodes- 'picTimingSei': Pass through picture - // timing SEI messages from the source specified in Timecode Config - TimecodeInsertion *string `locationName:"timecodeInsertion" type:"string" enum:"H264TimecodeInsertionBehavior"` -} - -// String returns the string representation -func (s H264Settings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s H264Settings) GoString() string { - return s.String() -} - -// SetAdaptiveQuantization sets the AdaptiveQuantization field's value. -func (s *H264Settings) SetAdaptiveQuantization(v string) *H264Settings { - s.AdaptiveQuantization = &v - return s -} - -// SetAfdSignaling sets the AfdSignaling field's value. -func (s *H264Settings) SetAfdSignaling(v string) *H264Settings { - s.AfdSignaling = &v - return s -} - -// SetBitrate sets the Bitrate field's value. -func (s *H264Settings) SetBitrate(v int64) *H264Settings { - s.Bitrate = &v - return s -} - -// SetBufFillPct sets the BufFillPct field's value. -func (s *H264Settings) SetBufFillPct(v int64) *H264Settings { - s.BufFillPct = &v - return s -} - -// SetBufSize sets the BufSize field's value. -func (s *H264Settings) SetBufSize(v int64) *H264Settings { - s.BufSize = &v - return s -} - -// SetColorMetadata sets the ColorMetadata field's value. -func (s *H264Settings) SetColorMetadata(v string) *H264Settings { - s.ColorMetadata = &v - return s -} - -// SetEntropyEncoding sets the EntropyEncoding field's value. -func (s *H264Settings) SetEntropyEncoding(v string) *H264Settings { - s.EntropyEncoding = &v - return s -} - -// SetFixedAfd sets the FixedAfd field's value. -func (s *H264Settings) SetFixedAfd(v string) *H264Settings { - s.FixedAfd = &v - return s -} - -// SetFlickerAq sets the FlickerAq field's value. -func (s *H264Settings) SetFlickerAq(v string) *H264Settings { - s.FlickerAq = &v - return s -} - -// SetFramerateControl sets the FramerateControl field's value. -func (s *H264Settings) SetFramerateControl(v string) *H264Settings { - s.FramerateControl = &v - return s -} - -// SetFramerateDenominator sets the FramerateDenominator field's value. -func (s *H264Settings) SetFramerateDenominator(v int64) *H264Settings { - s.FramerateDenominator = &v - return s -} - -// SetFramerateNumerator sets the FramerateNumerator field's value. -func (s *H264Settings) SetFramerateNumerator(v int64) *H264Settings { - s.FramerateNumerator = &v - return s -} - -// SetGopBReference sets the GopBReference field's value. -func (s *H264Settings) SetGopBReference(v string) *H264Settings { - s.GopBReference = &v - return s -} - -// SetGopClosedCadence sets the GopClosedCadence field's value. -func (s *H264Settings) SetGopClosedCadence(v int64) *H264Settings { - s.GopClosedCadence = &v - return s -} - -// SetGopNumBFrames sets the GopNumBFrames field's value. -func (s *H264Settings) SetGopNumBFrames(v int64) *H264Settings { - s.GopNumBFrames = &v - return s -} - -// SetGopSize sets the GopSize field's value. -func (s *H264Settings) SetGopSize(v float64) *H264Settings { - s.GopSize = &v - return s -} - -// SetGopSizeUnits sets the GopSizeUnits field's value. -func (s *H264Settings) SetGopSizeUnits(v string) *H264Settings { - s.GopSizeUnits = &v - return s -} - -// SetLevel sets the Level field's value. -func (s *H264Settings) SetLevel(v string) *H264Settings { - s.Level = &v - return s -} - -// SetLookAheadRateControl sets the LookAheadRateControl field's value. -func (s *H264Settings) SetLookAheadRateControl(v string) *H264Settings { - s.LookAheadRateControl = &v - return s -} - -// SetMaxBitrate sets the MaxBitrate field's value. -func (s *H264Settings) SetMaxBitrate(v int64) *H264Settings { - s.MaxBitrate = &v - return s -} - -// SetMinIInterval sets the MinIInterval field's value. -func (s *H264Settings) SetMinIInterval(v int64) *H264Settings { - s.MinIInterval = &v - return s -} - -// SetNumRefFrames sets the NumRefFrames field's value. -func (s *H264Settings) SetNumRefFrames(v int64) *H264Settings { - s.NumRefFrames = &v - return s -} - -// SetParControl sets the ParControl field's value. -func (s *H264Settings) SetParControl(v string) *H264Settings { - s.ParControl = &v - return s -} - -// SetParDenominator sets the ParDenominator field's value. -func (s *H264Settings) SetParDenominator(v int64) *H264Settings { - s.ParDenominator = &v - return s -} - -// SetParNumerator sets the ParNumerator field's value. -func (s *H264Settings) SetParNumerator(v int64) *H264Settings { - s.ParNumerator = &v - return s -} - -// SetProfile sets the Profile field's value. -func (s *H264Settings) SetProfile(v string) *H264Settings { - s.Profile = &v - return s -} - -// SetRateControlMode sets the RateControlMode field's value. -func (s *H264Settings) SetRateControlMode(v string) *H264Settings { - s.RateControlMode = &v - return s -} - -// SetScanType sets the ScanType field's value. -func (s *H264Settings) SetScanType(v string) *H264Settings { - s.ScanType = &v - return s -} - -// SetSceneChangeDetect sets the SceneChangeDetect field's value. -func (s *H264Settings) SetSceneChangeDetect(v string) *H264Settings { - s.SceneChangeDetect = &v - return s -} - -// SetSlices sets the Slices field's value. -func (s *H264Settings) SetSlices(v int64) *H264Settings { - s.Slices = &v - return s -} - -// SetSoftness sets the Softness field's value. -func (s *H264Settings) SetSoftness(v int64) *H264Settings { - s.Softness = &v - return s -} - -// SetSpatialAq sets the SpatialAq field's value. -func (s *H264Settings) SetSpatialAq(v string) *H264Settings { - s.SpatialAq = &v - return s -} - -// SetSyntax sets the Syntax field's value. -func (s *H264Settings) SetSyntax(v string) *H264Settings { - s.Syntax = &v - return s -} - -// SetTemporalAq sets the TemporalAq field's value. -func (s *H264Settings) SetTemporalAq(v string) *H264Settings { - s.TemporalAq = &v - return s -} - -// SetTimecodeInsertion sets the TimecodeInsertion field's value. -func (s *H264Settings) SetTimecodeInsertion(v string) *H264Settings { - s.TimecodeInsertion = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/HlsAkamaiSettings -type HlsAkamaiSettings struct { - _ struct{} `type:"structure"` - - // Number of seconds to wait before retrying connection to the CDN if the connection - // is lost. - ConnectionRetryInterval *int64 `locationName:"connectionRetryInterval" type:"integer"` - - // Size in seconds of file cache for streaming outputs. - FilecacheDuration *int64 `locationName:"filecacheDuration" type:"integer"` - - // Specify whether or not to use chunked transfer encoding to Akamai. User should - // contact Akamai to enable this feature. - HttpTransferMode *string `locationName:"httpTransferMode" type:"string" enum:"HlsAkamaiHttpTransferMode"` - - // Number of retry attempts that will be made before the Live Event is put into - // an error state. - NumRetries *int64 `locationName:"numRetries" type:"integer"` - - // If a streaming output fails, number of seconds to wait until a restart is - // initiated. A value of 0 means never restart. - RestartDelay *int64 `locationName:"restartDelay" type:"integer"` - - // Salt for authenticated Akamai. - Salt *string `locationName:"salt" type:"string"` - - // Token parameter for authenticated akamai. If not specified, _gda_ is used. - Token *string `locationName:"token" type:"string"` -} - -// String returns the string representation -func (s HlsAkamaiSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsAkamaiSettings) GoString() string { - return s.String() -} - -// SetConnectionRetryInterval sets the ConnectionRetryInterval field's value. -func (s *HlsAkamaiSettings) SetConnectionRetryInterval(v int64) *HlsAkamaiSettings { - s.ConnectionRetryInterval = &v - return s -} - -// SetFilecacheDuration sets the FilecacheDuration field's value. -func (s *HlsAkamaiSettings) SetFilecacheDuration(v int64) *HlsAkamaiSettings { - s.FilecacheDuration = &v - return s -} - -// SetHttpTransferMode sets the HttpTransferMode field's value. -func (s *HlsAkamaiSettings) SetHttpTransferMode(v string) *HlsAkamaiSettings { - s.HttpTransferMode = &v - return s -} - -// SetNumRetries sets the NumRetries field's value. -func (s *HlsAkamaiSettings) SetNumRetries(v int64) *HlsAkamaiSettings { - s.NumRetries = &v - return s -} - -// SetRestartDelay sets the RestartDelay field's value. -func (s *HlsAkamaiSettings) SetRestartDelay(v int64) *HlsAkamaiSettings { - s.RestartDelay = &v - return s -} - -// SetSalt sets the Salt field's value. -func (s *HlsAkamaiSettings) SetSalt(v string) *HlsAkamaiSettings { - s.Salt = &v - return s -} - -// SetToken sets the Token field's value. -func (s *HlsAkamaiSettings) SetToken(v string) *HlsAkamaiSettings { - s.Token = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/HlsBasicPutSettings -type HlsBasicPutSettings struct { - _ struct{} `type:"structure"` - - // Number of seconds to wait before retrying connection to the CDN if the connection - // is lost. - ConnectionRetryInterval *int64 `locationName:"connectionRetryInterval" type:"integer"` - - // Size in seconds of file cache for streaming outputs. - FilecacheDuration *int64 `locationName:"filecacheDuration" type:"integer"` - - // Number of retry attempts that will be made before the Live Event is put into - // an error state. - NumRetries *int64 `locationName:"numRetries" type:"integer"` - - // If a streaming output fails, number of seconds to wait until a restart is - // initiated. A value of 0 means never restart. - RestartDelay *int64 `locationName:"restartDelay" type:"integer"` -} - -// String returns the string representation -func (s HlsBasicPutSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsBasicPutSettings) GoString() string { - return s.String() -} - -// SetConnectionRetryInterval sets the ConnectionRetryInterval field's value. -func (s *HlsBasicPutSettings) SetConnectionRetryInterval(v int64) *HlsBasicPutSettings { - s.ConnectionRetryInterval = &v - return s -} - -// SetFilecacheDuration sets the FilecacheDuration field's value. -func (s *HlsBasicPutSettings) SetFilecacheDuration(v int64) *HlsBasicPutSettings { - s.FilecacheDuration = &v - return s -} - -// SetNumRetries sets the NumRetries field's value. -func (s *HlsBasicPutSettings) SetNumRetries(v int64) *HlsBasicPutSettings { - s.NumRetries = &v - return s -} - -// SetRestartDelay sets the RestartDelay field's value. -func (s *HlsBasicPutSettings) SetRestartDelay(v int64) *HlsBasicPutSettings { - s.RestartDelay = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/HlsCdnSettings -type HlsCdnSettings struct { - _ struct{} `type:"structure"` - - HlsAkamaiSettings *HlsAkamaiSettings `locationName:"hlsAkamaiSettings" type:"structure"` - - HlsBasicPutSettings *HlsBasicPutSettings `locationName:"hlsBasicPutSettings" type:"structure"` - - HlsMediaStoreSettings *HlsMediaStoreSettings `locationName:"hlsMediaStoreSettings" type:"structure"` - - HlsWebdavSettings *HlsWebdavSettings `locationName:"hlsWebdavSettings" type:"structure"` -} - -// String returns the string representation -func (s HlsCdnSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsCdnSettings) GoString() string { - return s.String() -} - -// SetHlsAkamaiSettings sets the HlsAkamaiSettings field's value. -func (s *HlsCdnSettings) SetHlsAkamaiSettings(v *HlsAkamaiSettings) *HlsCdnSettings { - s.HlsAkamaiSettings = v - return s -} - -// SetHlsBasicPutSettings sets the HlsBasicPutSettings field's value. -func (s *HlsCdnSettings) SetHlsBasicPutSettings(v *HlsBasicPutSettings) *HlsCdnSettings { - s.HlsBasicPutSettings = v - return s -} - -// SetHlsMediaStoreSettings sets the HlsMediaStoreSettings field's value. -func (s *HlsCdnSettings) SetHlsMediaStoreSettings(v *HlsMediaStoreSettings) *HlsCdnSettings { - s.HlsMediaStoreSettings = v - return s -} - -// SetHlsWebdavSettings sets the HlsWebdavSettings field's value. -func (s *HlsCdnSettings) SetHlsWebdavSettings(v *HlsWebdavSettings) *HlsCdnSettings { - s.HlsWebdavSettings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/HlsGroupSettings -type HlsGroupSettings struct { - _ struct{} `type:"structure"` - - // Choose one or more ad marker types to pass SCTE35 signals through to this - // group of Apple HLS outputs. - AdMarkers []*string `locationName:"adMarkers" type:"list"` - - // A partial URI prefix that will be prepended to each output in the media .m3u8 - // file. Can be used if base manifest is delivered from a different URL than - // the main .m3u8 file. - BaseUrlContent *string `locationName:"baseUrlContent" type:"string"` - - // A partial URI prefix that will be prepended to each output in the media .m3u8 - // file. Can be used if base manifest is delivered from a different URL than - // the main .m3u8 file. - BaseUrlManifest *string `locationName:"baseUrlManifest" type:"string"` - - // Mapping of up to 4 caption channels to caption languages. Is only meaningful - // if captionLanguageSetting is set to "insert". - CaptionLanguageMappings []*CaptionLanguageMapping `locationName:"captionLanguageMappings" type:"list"` - - // Applies only to 608 Embedded output captions.insert: Include CLOSED-CAPTIONS - // lines in the manifest. Specify at least one language in the CC1 Language - // Code field. One CLOSED-CAPTION line is added for each Language Code you specify. - // Make sure to specify the languages in the order in which they appear in the - // original source (if the source is embedded format) or the order of the caption - // selectors (if the source is other than embedded). Otherwise, languages in - // the manifest will not match up properly with the output captions.none: Include - // CLOSED-CAPTIONS=NONE line in the manifest.omit: Omit any CLOSED-CAPTIONS - // line from the manifest. - CaptionLanguageSetting *string `locationName:"captionLanguageSetting" type:"string" enum:"HlsCaptionLanguageSetting"` - - // When set to "disabled", sets the #EXT-X-ALLOW-CACHE:no tag in the manifest, - // which prevents clients from saving media segments for later replay. - ClientCache *string `locationName:"clientCache" type:"string" enum:"HlsClientCache"` - - // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist - // generation. - CodecSpecification *string `locationName:"codecSpecification" type:"string" enum:"HlsCodecSpecification"` - - // For use with encryptionType. This is a 128-bit, 16-byte hex value represented - // by a 32-character text string. If ivSource is set to "explicit" then this - // parameter is required and is used as the IV for encryption. - ConstantIv *string `locationName:"constantIv" type:"string"` - - // A directory or HTTP destination for the HLS segments, manifest files, and - // encryption keys (if enabled). - Destination *OutputLocationRef `locationName:"destination" type:"structure"` - - // Place segments in subdirectories. - DirectoryStructure *string `locationName:"directoryStructure" type:"string" enum:"HlsDirectoryStructure"` - - // Encrypts the segments with the given encryption scheme. Exclude this parameter - // if no encryption is desired. - EncryptionType *string `locationName:"encryptionType" type:"string" enum:"HlsEncryptionType"` - - // Parameters that control interactions with the CDN. - HlsCdnSettings *HlsCdnSettings `locationName:"hlsCdnSettings" type:"structure"` - - // Number of segments to keep in the playlist (.m3u8) file. mode must be "vod" - // for this setting to have an effect, and this number should be less than or - // equal to keepSegments. - IndexNSegments *int64 `locationName:"indexNSegments" type:"integer"` - - // Parameter that control output group behavior on input loss. - InputLossAction *string `locationName:"inputLossAction" type:"string" enum:"InputLossActionForHlsOut"` - - // For use with encryptionType. The IV (Initialization Vector) is a 128-bit - // number used in conjunction with the key for encrypting blocks. If set to - // "include", IV is listed in the manifest, otherwise the IV is not in the manifest. - IvInManifest *string `locationName:"ivInManifest" type:"string" enum:"HlsIvInManifest"` - - // For use with encryptionType. The IV (Initialization Vector) is a 128-bit - // number used in conjunction with the key for encrypting blocks. If this setting - // is "followsSegmentNumber", it will cause the IV to change every segment (to - // match the segment number). If this is set to "explicit", you must enter a - // constantIv value. - IvSource *string `locationName:"ivSource" type:"string" enum:"HlsIvSource"` - - // Number of segments to retain in the destination directory. mode must be "live" - // for this setting to have an effect. - KeepSegments *int64 `locationName:"keepSegments" type:"integer"` - - // The value specifies how the key is represented in the resource identified - // by the URI. If parameter is absent, an implicit value of "identity" is used. - // A reverse DNS string can also be given. - KeyFormat *string `locationName:"keyFormat" type:"string"` - - // Either a single positive integer version value or a slash delimited list - // of version values (1/2/3). - KeyFormatVersions *string `locationName:"keyFormatVersions" type:"string"` - - // The key provider settings. - KeyProviderSettings *KeyProviderSettings `locationName:"keyProviderSettings" type:"structure"` - - // When set to gzip, compresses HLS playlist. - ManifestCompression *string `locationName:"manifestCompression" type:"string" enum:"HlsManifestCompression"` - - // Indicates whether the output manifest should use floating point or integer - // values for segment duration. - ManifestDurationFormat *string `locationName:"manifestDurationFormat" type:"string" enum:"HlsManifestDurationFormat"` - - // When set, minimumSegmentLength is enforced by looking ahead and back within - // the specified range for a nearby avail and extending the segment size if - // needed. - MinSegmentLength *int64 `locationName:"minSegmentLength" type:"integer"` - - // If set to "vod", keeps and indexes all segments starting with the first segment. - // If set to "live" segments will age out and only the last keepSegments number - // of segments will be retained. - Mode *string `locationName:"mode" type:"string" enum:"HlsMode"` - - // Generates the .m3u8 playlist file for this HLS output group. The segmentsOnly - // option will output segments without the .m3u8 file. - OutputSelection *string `locationName:"outputSelection" type:"string" enum:"HlsOutputSelection"` - - // Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. - // The value is calculated as follows: either the program date and time are - // initialized using the input timecode source, or the time is initialized using - // the input timecode source and the date is initialized using the timestampOffset. - ProgramDateTime *string `locationName:"programDateTime" type:"string" enum:"HlsProgramDateTime"` - - // Period of insertion of EXT-X-PROGRAM-DATE-TIME entry, in seconds. - ProgramDateTimePeriod *int64 `locationName:"programDateTimePeriod" type:"integer"` - - // Length of MPEG-2 Transport Stream segments to create (in seconds). Note that - // segments will end on the next keyframe after this number of seconds, so actual - // segment length may be longer. - SegmentLength *int64 `locationName:"segmentLength" type:"integer"` - - // When set to useInputSegmentation, the output segment or fragment points are - // set by the RAI markers from the input streams. - SegmentationMode *string `locationName:"segmentationMode" type:"string" enum:"HlsSegmentationMode"` - - // Number of segments to write to a subdirectory before starting a new one. - // directoryStructure must be subdirectoryPerStream for this setting to have - // an effect. - SegmentsPerSubdirectory *int64 `locationName:"segmentsPerSubdirectory" type:"integer"` - - // Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag - // of variant manifest. - StreamInfResolution *string `locationName:"streamInfResolution" type:"string" enum:"HlsStreamInfResolution"` - - // Indicates ID3 frame that has the timecode. - TimedMetadataId3Frame *string `locationName:"timedMetadataId3Frame" type:"string" enum:"HlsTimedMetadataId3Frame"` - - // Timed Metadata interval in seconds. - TimedMetadataId3Period *int64 `locationName:"timedMetadataId3Period" type:"integer"` - - // Provides an extra millisecond delta offset to fine tune the timestamps. - TimestampDeltaMilliseconds *int64 `locationName:"timestampDeltaMilliseconds" type:"integer"` - - // When set to "singleFile", emits the program as a single media resource (.ts) - // file, and uses #EXT-X-BYTERANGE tags to index segment for playback. Playback - // of VOD mode content during event is not guaranteed due to HTTP server caching. - TsFileMode *string `locationName:"tsFileMode" type:"string" enum:"HlsTsFileMode"` -} - -// String returns the string representation -func (s HlsGroupSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsGroupSettings) GoString() string { - return s.String() -} - -// SetAdMarkers sets the AdMarkers field's value. -func (s *HlsGroupSettings) SetAdMarkers(v []*string) *HlsGroupSettings { - s.AdMarkers = v - return s -} - -// SetBaseUrlContent sets the BaseUrlContent field's value. -func (s *HlsGroupSettings) SetBaseUrlContent(v string) *HlsGroupSettings { - s.BaseUrlContent = &v - return s -} - -// SetBaseUrlManifest sets the BaseUrlManifest field's value. -func (s *HlsGroupSettings) SetBaseUrlManifest(v string) *HlsGroupSettings { - s.BaseUrlManifest = &v - return s -} - -// SetCaptionLanguageMappings sets the CaptionLanguageMappings field's value. -func (s *HlsGroupSettings) SetCaptionLanguageMappings(v []*CaptionLanguageMapping) *HlsGroupSettings { - s.CaptionLanguageMappings = v - return s -} - -// SetCaptionLanguageSetting sets the CaptionLanguageSetting field's value. -func (s *HlsGroupSettings) SetCaptionLanguageSetting(v string) *HlsGroupSettings { - s.CaptionLanguageSetting = &v - return s -} - -// SetClientCache sets the ClientCache field's value. -func (s *HlsGroupSettings) SetClientCache(v string) *HlsGroupSettings { - s.ClientCache = &v - return s -} - -// SetCodecSpecification sets the CodecSpecification field's value. -func (s *HlsGroupSettings) SetCodecSpecification(v string) *HlsGroupSettings { - s.CodecSpecification = &v - return s -} - -// SetConstantIv sets the ConstantIv field's value. -func (s *HlsGroupSettings) SetConstantIv(v string) *HlsGroupSettings { - s.ConstantIv = &v - return s -} - -// SetDestination sets the Destination field's value. -func (s *HlsGroupSettings) SetDestination(v *OutputLocationRef) *HlsGroupSettings { - s.Destination = v - return s -} - -// SetDirectoryStructure sets the DirectoryStructure field's value. -func (s *HlsGroupSettings) SetDirectoryStructure(v string) *HlsGroupSettings { - s.DirectoryStructure = &v - return s -} - -// SetEncryptionType sets the EncryptionType field's value. -func (s *HlsGroupSettings) SetEncryptionType(v string) *HlsGroupSettings { - s.EncryptionType = &v - return s -} - -// SetHlsCdnSettings sets the HlsCdnSettings field's value. -func (s *HlsGroupSettings) SetHlsCdnSettings(v *HlsCdnSettings) *HlsGroupSettings { - s.HlsCdnSettings = v - return s -} - -// SetIndexNSegments sets the IndexNSegments field's value. -func (s *HlsGroupSettings) SetIndexNSegments(v int64) *HlsGroupSettings { - s.IndexNSegments = &v - return s -} - -// SetInputLossAction sets the InputLossAction field's value. -func (s *HlsGroupSettings) SetInputLossAction(v string) *HlsGroupSettings { - s.InputLossAction = &v - return s -} - -// SetIvInManifest sets the IvInManifest field's value. -func (s *HlsGroupSettings) SetIvInManifest(v string) *HlsGroupSettings { - s.IvInManifest = &v - return s -} - -// SetIvSource sets the IvSource field's value. -func (s *HlsGroupSettings) SetIvSource(v string) *HlsGroupSettings { - s.IvSource = &v - return s -} - -// SetKeepSegments sets the KeepSegments field's value. -func (s *HlsGroupSettings) SetKeepSegments(v int64) *HlsGroupSettings { - s.KeepSegments = &v - return s -} - -// SetKeyFormat sets the KeyFormat field's value. -func (s *HlsGroupSettings) SetKeyFormat(v string) *HlsGroupSettings { - s.KeyFormat = &v - return s -} - -// SetKeyFormatVersions sets the KeyFormatVersions field's value. -func (s *HlsGroupSettings) SetKeyFormatVersions(v string) *HlsGroupSettings { - s.KeyFormatVersions = &v - return s -} - -// SetKeyProviderSettings sets the KeyProviderSettings field's value. -func (s *HlsGroupSettings) SetKeyProviderSettings(v *KeyProviderSettings) *HlsGroupSettings { - s.KeyProviderSettings = v - return s -} - -// SetManifestCompression sets the ManifestCompression field's value. -func (s *HlsGroupSettings) SetManifestCompression(v string) *HlsGroupSettings { - s.ManifestCompression = &v - return s -} - -// SetManifestDurationFormat sets the ManifestDurationFormat field's value. -func (s *HlsGroupSettings) SetManifestDurationFormat(v string) *HlsGroupSettings { - s.ManifestDurationFormat = &v - return s -} - -// SetMinSegmentLength sets the MinSegmentLength field's value. -func (s *HlsGroupSettings) SetMinSegmentLength(v int64) *HlsGroupSettings { - s.MinSegmentLength = &v - return s -} - -// SetMode sets the Mode field's value. -func (s *HlsGroupSettings) SetMode(v string) *HlsGroupSettings { - s.Mode = &v - return s -} - -// SetOutputSelection sets the OutputSelection field's value. -func (s *HlsGroupSettings) SetOutputSelection(v string) *HlsGroupSettings { - s.OutputSelection = &v - return s -} - -// SetProgramDateTime sets the ProgramDateTime field's value. -func (s *HlsGroupSettings) SetProgramDateTime(v string) *HlsGroupSettings { - s.ProgramDateTime = &v - return s -} - -// SetProgramDateTimePeriod sets the ProgramDateTimePeriod field's value. -func (s *HlsGroupSettings) SetProgramDateTimePeriod(v int64) *HlsGroupSettings { - s.ProgramDateTimePeriod = &v - return s -} - -// SetSegmentLength sets the SegmentLength field's value. -func (s *HlsGroupSettings) SetSegmentLength(v int64) *HlsGroupSettings { - s.SegmentLength = &v - return s -} - -// SetSegmentationMode sets the SegmentationMode field's value. -func (s *HlsGroupSettings) SetSegmentationMode(v string) *HlsGroupSettings { - s.SegmentationMode = &v - return s -} - -// SetSegmentsPerSubdirectory sets the SegmentsPerSubdirectory field's value. -func (s *HlsGroupSettings) SetSegmentsPerSubdirectory(v int64) *HlsGroupSettings { - s.SegmentsPerSubdirectory = &v - return s -} - -// SetStreamInfResolution sets the StreamInfResolution field's value. -func (s *HlsGroupSettings) SetStreamInfResolution(v string) *HlsGroupSettings { - s.StreamInfResolution = &v - return s -} - -// SetTimedMetadataId3Frame sets the TimedMetadataId3Frame field's value. -func (s *HlsGroupSettings) SetTimedMetadataId3Frame(v string) *HlsGroupSettings { - s.TimedMetadataId3Frame = &v - return s -} - -// SetTimedMetadataId3Period sets the TimedMetadataId3Period field's value. -func (s *HlsGroupSettings) SetTimedMetadataId3Period(v int64) *HlsGroupSettings { - s.TimedMetadataId3Period = &v - return s -} - -// SetTimestampDeltaMilliseconds sets the TimestampDeltaMilliseconds field's value. -func (s *HlsGroupSettings) SetTimestampDeltaMilliseconds(v int64) *HlsGroupSettings { - s.TimestampDeltaMilliseconds = &v - return s -} - -// SetTsFileMode sets the TsFileMode field's value. -func (s *HlsGroupSettings) SetTsFileMode(v string) *HlsGroupSettings { - s.TsFileMode = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/HlsInputSettings -type HlsInputSettings struct { - _ struct{} `type:"structure"` - - // When specified the HLS stream with the m3u8 BANDWIDTH that most closely matches - // this value will be chosen, otherwise the highest bandwidth stream in the - // m3u8 will be chosen. The bitrate is specified in bits per second, as in an - // HLS manifest. - Bandwidth *int64 `locationName:"bandwidth" type:"integer"` - - // When specified, reading of the HLS input will begin this many buffer segments - // from the end (most recently written segment). When not specified, the HLS - // input will begin with the first segment specified in the m3u8. - BufferSegments *int64 `locationName:"bufferSegments" type:"integer"` - - // The number of consecutive times that attempts to read a manifest or segment - // must fail before the input is considered unavailable. - Retries *int64 `locationName:"retries" type:"integer"` - - // The number of seconds between retries when an attempt to read a manifest - // or segment fails. - RetryInterval *int64 `locationName:"retryInterval" type:"integer"` -} - -// String returns the string representation -func (s HlsInputSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsInputSettings) GoString() string { - return s.String() -} - -// SetBandwidth sets the Bandwidth field's value. -func (s *HlsInputSettings) SetBandwidth(v int64) *HlsInputSettings { - s.Bandwidth = &v - return s -} - -// SetBufferSegments sets the BufferSegments field's value. -func (s *HlsInputSettings) SetBufferSegments(v int64) *HlsInputSettings { - s.BufferSegments = &v - return s -} - -// SetRetries sets the Retries field's value. -func (s *HlsInputSettings) SetRetries(v int64) *HlsInputSettings { - s.Retries = &v - return s -} - -// SetRetryInterval sets the RetryInterval field's value. -func (s *HlsInputSettings) SetRetryInterval(v int64) *HlsInputSettings { - s.RetryInterval = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/HlsMediaStoreSettings -type HlsMediaStoreSettings struct { - _ struct{} `type:"structure"` - - // Number of seconds to wait before retrying connection to the CDN if the connection - // is lost. - ConnectionRetryInterval *int64 `locationName:"connectionRetryInterval" type:"integer"` - - // Size in seconds of file cache for streaming outputs. - FilecacheDuration *int64 `locationName:"filecacheDuration" type:"integer"` - - // When set to temporal, output files are stored in non-persistent memory for - // faster reading and writing. - MediaStoreStorageClass *string `locationName:"mediaStoreStorageClass" type:"string" enum:"HlsMediaStoreStorageClass"` - - // Number of retry attempts that will be made before the Live Event is put into - // an error state. - NumRetries *int64 `locationName:"numRetries" type:"integer"` - - // If a streaming output fails, number of seconds to wait until a restart is - // initiated. A value of 0 means never restart. - RestartDelay *int64 `locationName:"restartDelay" type:"integer"` -} - -// String returns the string representation -func (s HlsMediaStoreSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsMediaStoreSettings) GoString() string { - return s.String() -} - -// SetConnectionRetryInterval sets the ConnectionRetryInterval field's value. -func (s *HlsMediaStoreSettings) SetConnectionRetryInterval(v int64) *HlsMediaStoreSettings { - s.ConnectionRetryInterval = &v - return s -} - -// SetFilecacheDuration sets the FilecacheDuration field's value. -func (s *HlsMediaStoreSettings) SetFilecacheDuration(v int64) *HlsMediaStoreSettings { - s.FilecacheDuration = &v - return s -} - -// SetMediaStoreStorageClass sets the MediaStoreStorageClass field's value. -func (s *HlsMediaStoreSettings) SetMediaStoreStorageClass(v string) *HlsMediaStoreSettings { - s.MediaStoreStorageClass = &v - return s -} - -// SetNumRetries sets the NumRetries field's value. -func (s *HlsMediaStoreSettings) SetNumRetries(v int64) *HlsMediaStoreSettings { - s.NumRetries = &v - return s -} - -// SetRestartDelay sets the RestartDelay field's value. -func (s *HlsMediaStoreSettings) SetRestartDelay(v int64) *HlsMediaStoreSettings { - s.RestartDelay = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/HlsOutputSettings -type HlsOutputSettings struct { - _ struct{} `type:"structure"` - - // Settings regarding the underlying stream. These settings are different for - // audio-only outputs. - HlsSettings *HlsSettings `locationName:"hlsSettings" type:"structure"` - - // String concatenated to the end of the destination filename. Accepts \"Format - // Identifiers\":#formatIdentifierParameters. - NameModifier *string `locationName:"nameModifier" type:"string"` - - // String concatenated to end of segment filenames. - SegmentModifier *string `locationName:"segmentModifier" type:"string"` -} - -// String returns the string representation -func (s HlsOutputSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsOutputSettings) GoString() string { - return s.String() -} - -// SetHlsSettings sets the HlsSettings field's value. -func (s *HlsOutputSettings) SetHlsSettings(v *HlsSettings) *HlsOutputSettings { - s.HlsSettings = v - return s -} - -// SetNameModifier sets the NameModifier field's value. -func (s *HlsOutputSettings) SetNameModifier(v string) *HlsOutputSettings { - s.NameModifier = &v - return s -} - -// SetSegmentModifier sets the SegmentModifier field's value. -func (s *HlsOutputSettings) SetSegmentModifier(v string) *HlsOutputSettings { - s.SegmentModifier = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/HlsSettings -type HlsSettings struct { - _ struct{} `type:"structure"` - - AudioOnlyHlsSettings *AudioOnlyHlsSettings `locationName:"audioOnlyHlsSettings" type:"structure"` - - StandardHlsSettings *StandardHlsSettings `locationName:"standardHlsSettings" type:"structure"` -} - -// String returns the string representation -func (s HlsSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsSettings) GoString() string { - return s.String() -} - -// SetAudioOnlyHlsSettings sets the AudioOnlyHlsSettings field's value. -func (s *HlsSettings) SetAudioOnlyHlsSettings(v *AudioOnlyHlsSettings) *HlsSettings { - s.AudioOnlyHlsSettings = v - return s -} - -// SetStandardHlsSettings sets the StandardHlsSettings field's value. -func (s *HlsSettings) SetStandardHlsSettings(v *StandardHlsSettings) *HlsSettings { - s.StandardHlsSettings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/HlsWebdavSettings -type HlsWebdavSettings struct { - _ struct{} `type:"structure"` - - // Number of seconds to wait before retrying connection to the CDN if the connection - // is lost. - ConnectionRetryInterval *int64 `locationName:"connectionRetryInterval" type:"integer"` - - // Size in seconds of file cache for streaming outputs. - FilecacheDuration *int64 `locationName:"filecacheDuration" type:"integer"` - - // Specify whether or not to use chunked transfer encoding to WebDAV. - HttpTransferMode *string `locationName:"httpTransferMode" type:"string" enum:"HlsWebdavHttpTransferMode"` - - // Number of retry attempts that will be made before the Live Event is put into - // an error state. - NumRetries *int64 `locationName:"numRetries" type:"integer"` - - // If a streaming output fails, number of seconds to wait until a restart is - // initiated. A value of 0 means never restart. - RestartDelay *int64 `locationName:"restartDelay" type:"integer"` -} - -// String returns the string representation -func (s HlsWebdavSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsWebdavSettings) GoString() string { - return s.String() -} - -// SetConnectionRetryInterval sets the ConnectionRetryInterval field's value. -func (s *HlsWebdavSettings) SetConnectionRetryInterval(v int64) *HlsWebdavSettings { - s.ConnectionRetryInterval = &v - return s -} - -// SetFilecacheDuration sets the FilecacheDuration field's value. -func (s *HlsWebdavSettings) SetFilecacheDuration(v int64) *HlsWebdavSettings { - s.FilecacheDuration = &v - return s -} - -// SetHttpTransferMode sets the HttpTransferMode field's value. -func (s *HlsWebdavSettings) SetHttpTransferMode(v string) *HlsWebdavSettings { - s.HttpTransferMode = &v - return s -} - -// SetNumRetries sets the NumRetries field's value. -func (s *HlsWebdavSettings) SetNumRetries(v int64) *HlsWebdavSettings { - s.NumRetries = &v - return s -} - -// SetRestartDelay sets the RestartDelay field's value. -func (s *HlsWebdavSettings) SetRestartDelay(v int64) *HlsWebdavSettings { - s.RestartDelay = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/Input -type Input struct { - _ struct{} `type:"structure"` - - // Unique ARN of input (generated, immutable) - Arn *string `locationName:"arn" type:"string"` - - // List of channel IDs that that input is attached to (currently an input can - // only be attached to one channel) - AttachedChannels []*string `locationName:"attachedChannels" type:"list"` - - // List of destinations of input (PULL-type) - Destinations []*InputDestination `locationName:"destinations" type:"list"` - - // generated ID of input (unique for user account, immutable) - Id *string `locationName:"id" type:"string"` - - // user-assigned name (mutable) - Name *string `locationName:"name" type:"string"` - - // List of IDs for all the security groups attached to the input. - SecurityGroups []*string `locationName:"securityGroups" type:"list"` - - // List of sources of input (PULL-type) - Sources []*InputSource `locationName:"sources" type:"list"` - - State *string `locationName:"state" type:"string" enum:"InputState"` - - Type *string `locationName:"type" type:"string" enum:"InputType"` -} - -// String returns the string representation -func (s Input) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Input) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *Input) SetArn(v string) *Input { - s.Arn = &v - return s -} - -// SetAttachedChannels sets the AttachedChannels field's value. -func (s *Input) SetAttachedChannels(v []*string) *Input { - s.AttachedChannels = v - return s -} - -// SetDestinations sets the Destinations field's value. -func (s *Input) SetDestinations(v []*InputDestination) *Input { - s.Destinations = v - return s -} - -// SetId sets the Id field's value. -func (s *Input) SetId(v string) *Input { - s.Id = &v - return s -} - -// SetName sets the Name field's value. -func (s *Input) SetName(v string) *Input { - s.Name = &v - return s -} - -// SetSecurityGroups sets the SecurityGroups field's value. -func (s *Input) SetSecurityGroups(v []*string) *Input { - s.SecurityGroups = v - return s -} - -// SetSources sets the Sources field's value. -func (s *Input) SetSources(v []*InputSource) *Input { - s.Sources = v - return s -} - -// SetState sets the State field's value. -func (s *Input) SetState(v string) *Input { - s.State = &v - return s -} - -// SetType sets the Type field's value. -func (s *Input) SetType(v string) *Input { - s.Type = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/InputAttachment -type InputAttachment struct { - _ struct{} `type:"structure"` - - // The ID of the input - InputId *string `locationName:"inputId" type:"string"` - - // Settings of an input (caption selector, etc.) - InputSettings *InputSettings `locationName:"inputSettings" type:"structure"` -} - -// String returns the string representation -func (s InputAttachment) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputAttachment) GoString() string { - return s.String() -} - -// SetInputId sets the InputId field's value. -func (s *InputAttachment) SetInputId(v string) *InputAttachment { - s.InputId = &v - return s -} - -// SetInputSettings sets the InputSettings field's value. -func (s *InputAttachment) SetInputSettings(v *InputSettings) *InputAttachment { - s.InputSettings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/InputChannelLevel -type InputChannelLevel struct { - _ struct{} `type:"structure"` - - // Remixing value. Units are in dB and acceptable values are within the range - // from -60 (mute) and 6 dB. - Gain *int64 `locationName:"gain" type:"integer"` - - // The index of the input channel used as a source. - InputChannel *int64 `locationName:"inputChannel" type:"integer"` -} - -// String returns the string representation -func (s InputChannelLevel) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputChannelLevel) GoString() string { - return s.String() -} - -// SetGain sets the Gain field's value. -func (s *InputChannelLevel) SetGain(v int64) *InputChannelLevel { - s.Gain = &v - return s -} - -// SetInputChannel sets the InputChannel field's value. -func (s *InputChannelLevel) SetInputChannel(v int64) *InputChannelLevel { - s.InputChannel = &v - return s -} - -// Settings for a PUSH type input -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/InputDestination -type InputDestination struct { - _ struct{} `type:"structure"` - - // system-generated static IP address of endpoint.Remains fixed for the lifetime - // of the input - Ip *string `locationName:"ip" type:"string"` - - // port for input - Port *string `locationName:"port" type:"string"` - - // This represents the endpoint that the customer stream will bepushed to. - Url *string `locationName:"url" type:"string"` -} - -// String returns the string representation -func (s InputDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputDestination) GoString() string { - return s.String() -} - -// SetIp sets the Ip field's value. -func (s *InputDestination) SetIp(v string) *InputDestination { - s.Ip = &v - return s -} - -// SetPort sets the Port field's value. -func (s *InputDestination) SetPort(v string) *InputDestination { - s.Port = &v - return s -} - -// SetUrl sets the Url field's value. -func (s *InputDestination) SetUrl(v string) *InputDestination { - s.Url = &v - return s -} - -// Endpoint settings for a PUSH type input -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/InputDestinationRequest -type InputDestinationRequest struct { - _ struct{} `type:"structure"` - - // A unique name for the location the RTMP stream is being pushedto. - StreamName *string `locationName:"streamName" type:"string"` -} - -// String returns the string representation -func (s InputDestinationRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputDestinationRequest) GoString() string { - return s.String() -} - -// SetStreamName sets the StreamName field's value. -func (s *InputDestinationRequest) SetStreamName(v string) *InputDestinationRequest { - s.StreamName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/InputLocation -type InputLocation struct { - _ struct{} `type:"structure"` - - // key used to extract the password from EC2 Parameter store - PasswordParam *string `locationName:"passwordParam" type:"string"` - - // Uniform Resource Identifier - This should be a path to a file accessible - // to the Live system (eg. a http:// URI) depending on the output type. For - // example, a rtmpEndpoint should have a uri simliar to: "rtmp://fmsserver/live". - Uri *string `locationName:"uri" type:"string"` - - // Username if credentials are required to access a file or publishing point. - // This can be either a plaintext username, or a reference to an AWS parameter - // store name from which the username can be retrieved. AWS Parameter store - // format: "ssm://" - Username *string `locationName:"username" type:"string"` -} - -// String returns the string representation -func (s InputLocation) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputLocation) GoString() string { - return s.String() -} - -// SetPasswordParam sets the PasswordParam field's value. -func (s *InputLocation) SetPasswordParam(v string) *InputLocation { - s.PasswordParam = &v - return s -} - -// SetUri sets the Uri field's value. -func (s *InputLocation) SetUri(v string) *InputLocation { - s.Uri = &v - return s -} - -// SetUsername sets the Username field's value. -func (s *InputLocation) SetUsername(v string) *InputLocation { - s.Username = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/InputLossBehavior -type InputLossBehavior struct { - _ struct{} `type:"structure"` - - // On input loss, the number of milliseconds to substitute black into the output - // before switching to the frame specified by inputLossImageType. A value x, - // where 0 <= x <= 1,000,000 and a value of 1,000,000 will be interpreted as - // infinite. - BlackFrameMsec *int64 `locationName:"blackFrameMsec" type:"integer"` - - // When input loss image type is "color" this field specifies the color to use. - // Value: 6 hex characters representing the values of RGB. - InputLossImageColor *string `locationName:"inputLossImageColor" type:"string"` - - // When input loss image type is "slate" these fields specify the parameters - // for accessing the slate. - InputLossImageSlate *InputLocation `locationName:"inputLossImageSlate" type:"structure"` - - // Indicates whether to substitute a solid color or a slate into the output - // after input loss exceeds blackFrameMsec. - InputLossImageType *string `locationName:"inputLossImageType" type:"string" enum:"InputLossImageType"` - - // On input loss, the number of milliseconds to repeat the previous picture - // before substituting black into the output. A value x, where 0 <= x <= 1,000,000 - // and a value of 1,000,000 will be interpreted as infinite. - RepeatFrameMsec *int64 `locationName:"repeatFrameMsec" type:"integer"` -} - -// String returns the string representation -func (s InputLossBehavior) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputLossBehavior) GoString() string { - return s.String() -} - -// SetBlackFrameMsec sets the BlackFrameMsec field's value. -func (s *InputLossBehavior) SetBlackFrameMsec(v int64) *InputLossBehavior { - s.BlackFrameMsec = &v - return s -} - -// SetInputLossImageColor sets the InputLossImageColor field's value. -func (s *InputLossBehavior) SetInputLossImageColor(v string) *InputLossBehavior { - s.InputLossImageColor = &v - return s -} - -// SetInputLossImageSlate sets the InputLossImageSlate field's value. -func (s *InputLossBehavior) SetInputLossImageSlate(v *InputLocation) *InputLossBehavior { - s.InputLossImageSlate = v - return s -} - -// SetInputLossImageType sets the InputLossImageType field's value. -func (s *InputLossBehavior) SetInputLossImageType(v string) *InputLossBehavior { - s.InputLossImageType = &v - return s -} - -// SetRepeatFrameMsec sets the RepeatFrameMsec field's value. -func (s *InputLossBehavior) SetRepeatFrameMsec(v int64) *InputLossBehavior { - s.RepeatFrameMsec = &v - return s -} - -// An Input Security Group -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/InputSecurityGroup -type InputSecurityGroup struct { - _ struct{} `type:"structure"` - - // Unique ARN of Input Security Group - Arn *string `locationName:"arn" type:"string"` - - // The Id of the Input Security Group - Id *string `locationName:"id" type:"string"` - - // Whitelist rules and their sync status - WhitelistRules []*InputWhitelistRule `locationName:"whitelistRules" type:"list"` -} - -// String returns the string representation -func (s InputSecurityGroup) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputSecurityGroup) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *InputSecurityGroup) SetArn(v string) *InputSecurityGroup { - s.Arn = &v - return s -} - -// SetId sets the Id field's value. -func (s *InputSecurityGroup) SetId(v string) *InputSecurityGroup { - s.Id = &v - return s -} - -// SetWhitelistRules sets the WhitelistRules field's value. -func (s *InputSecurityGroup) SetWhitelistRules(v []*InputWhitelistRule) *InputSecurityGroup { - s.WhitelistRules = v - return s -} - -// Live Event input parameters. There can be multiple inputs in a single Live -// Event. -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/InputSettings -type InputSettings struct { - _ struct{} `type:"structure"` - - // Used to select the audio stream to decode for inputs that have multiple available. - AudioSelectors []*AudioSelector `locationName:"audioSelectors" type:"list"` - - // Used to select the caption input to use for inputs that have multiple available. - CaptionSelectors []*CaptionSelector `locationName:"captionSelectors" type:"list"` - - // Enable or disable the deblock filter when filtering. - DeblockFilter *string `locationName:"deblockFilter" type:"string" enum:"InputDeblockFilter"` - - // Enable or disable the denoise filter when filtering. - DenoiseFilter *string `locationName:"denoiseFilter" type:"string" enum:"InputDenoiseFilter"` - - // Adjusts the magnitude of filtering from 1 (minimal) to 5 (strongest). - FilterStrength *int64 `locationName:"filterStrength" type:"integer"` - - // Turns on the filter for this input. MPEG-2 inputs have the deblocking filter - // enabled by default.1) auto - filtering will be applied depending on input - // type/quality2) disabled - no filtering will be applied to the input3) forced - // - filtering will be applied regardless of input type - InputFilter *string `locationName:"inputFilter" type:"string" enum:"InputFilter"` - - // Input settings. - NetworkInputSettings *NetworkInputSettings `locationName:"networkInputSettings" type:"structure"` - - // Loop input if it is a file. This allows a file input to be streamed indefinitely. - SourceEndBehavior *string `locationName:"sourceEndBehavior" type:"string" enum:"InputSourceEndBehavior"` - - // Informs which video elementary stream to decode for input types that have - // multiple available. - VideoSelector *VideoSelector `locationName:"videoSelector" type:"structure"` -} - -// String returns the string representation -func (s InputSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputSettings) GoString() string { - return s.String() -} - -// SetAudioSelectors sets the AudioSelectors field's value. -func (s *InputSettings) SetAudioSelectors(v []*AudioSelector) *InputSettings { - s.AudioSelectors = v - return s -} - -// SetCaptionSelectors sets the CaptionSelectors field's value. -func (s *InputSettings) SetCaptionSelectors(v []*CaptionSelector) *InputSettings { - s.CaptionSelectors = v - return s -} - -// SetDeblockFilter sets the DeblockFilter field's value. -func (s *InputSettings) SetDeblockFilter(v string) *InputSettings { - s.DeblockFilter = &v - return s -} - -// SetDenoiseFilter sets the DenoiseFilter field's value. -func (s *InputSettings) SetDenoiseFilter(v string) *InputSettings { - s.DenoiseFilter = &v - return s -} - -// SetFilterStrength sets the FilterStrength field's value. -func (s *InputSettings) SetFilterStrength(v int64) *InputSettings { - s.FilterStrength = &v - return s -} - -// SetInputFilter sets the InputFilter field's value. -func (s *InputSettings) SetInputFilter(v string) *InputSettings { - s.InputFilter = &v - return s -} - -// SetNetworkInputSettings sets the NetworkInputSettings field's value. -func (s *InputSettings) SetNetworkInputSettings(v *NetworkInputSettings) *InputSettings { - s.NetworkInputSettings = v - return s -} - -// SetSourceEndBehavior sets the SourceEndBehavior field's value. -func (s *InputSettings) SetSourceEndBehavior(v string) *InputSettings { - s.SourceEndBehavior = &v - return s -} - -// SetVideoSelector sets the VideoSelector field's value. -func (s *InputSettings) SetVideoSelector(v *VideoSelector) *InputSettings { - s.VideoSelector = v - return s -} - -// Settings for a PULL type input -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/InputSource -type InputSource struct { - _ struct{} `type:"structure"` - - // key used to extract the password from EC2 Parameter store - PasswordParam *string `locationName:"passwordParam" type:"string"` - - // This represents the customer's source URL where stream ispulled from. - Url *string `locationName:"url" type:"string"` - - // username for input source - Username *string `locationName:"username" type:"string"` -} - -// String returns the string representation -func (s InputSource) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputSource) GoString() string { - return s.String() -} - -// SetPasswordParam sets the PasswordParam field's value. -func (s *InputSource) SetPasswordParam(v string) *InputSource { - s.PasswordParam = &v - return s -} - -// SetUrl sets the Url field's value. -func (s *InputSource) SetUrl(v string) *InputSource { - s.Url = &v - return s -} - -// SetUsername sets the Username field's value. -func (s *InputSource) SetUsername(v string) *InputSource { - s.Username = &v - return s -} - -// Settings for for a PULL type input -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/InputSourceRequest -type InputSourceRequest struct { - _ struct{} `type:"structure"` - - // key used to extract the password from EC2 Parameter store - PasswordParam *string `locationName:"passwordParam" type:"string"` - - // This represents the customer's source URL where stream ispulled from. - Url *string `locationName:"url" type:"string"` - - // username for input source - Username *string `locationName:"username" type:"string"` -} - -// String returns the string representation -func (s InputSourceRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputSourceRequest) GoString() string { - return s.String() -} - -// SetPasswordParam sets the PasswordParam field's value. -func (s *InputSourceRequest) SetPasswordParam(v string) *InputSourceRequest { - s.PasswordParam = &v - return s -} - -// SetUrl sets the Url field's value. -func (s *InputSourceRequest) SetUrl(v string) *InputSourceRequest { - s.Url = &v - return s -} - -// SetUsername sets the Username field's value. -func (s *InputSourceRequest) SetUsername(v string) *InputSourceRequest { - s.Username = &v - return s -} - -// Whitelist rule -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/InputWhitelistRule -type InputWhitelistRule struct { - _ struct{} `type:"structure"` - - // The IPv4 CIDR that's whitelisted. - Cidr *string `locationName:"cidr" type:"string"` -} - -// String returns the string representation -func (s InputWhitelistRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputWhitelistRule) GoString() string { - return s.String() -} - -// SetCidr sets the Cidr field's value. -func (s *InputWhitelistRule) SetCidr(v string) *InputWhitelistRule { - s.Cidr = &v - return s -} - -// An IPv4 CIDR to whitelist. -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/InputWhitelistRuleCidr -type InputWhitelistRuleCidr struct { - _ struct{} `type:"structure"` - - // The IPv4 CIDR to whitelist - Cidr *string `locationName:"cidr" type:"string"` -} - -// String returns the string representation -func (s InputWhitelistRuleCidr) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputWhitelistRuleCidr) GoString() string { - return s.String() -} - -// SetCidr sets the Cidr field's value. -func (s *InputWhitelistRuleCidr) SetCidr(v string) *InputWhitelistRuleCidr { - s.Cidr = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/KeyProviderSettings -type KeyProviderSettings struct { - _ struct{} `type:"structure"` - - StaticKeySettings *StaticKeySettings `locationName:"staticKeySettings" type:"structure"` -} - -// String returns the string representation -func (s KeyProviderSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s KeyProviderSettings) GoString() string { - return s.String() -} - -// SetStaticKeySettings sets the StaticKeySettings field's value. -func (s *KeyProviderSettings) SetStaticKeySettings(v *StaticKeySettings) *KeyProviderSettings { - s.StaticKeySettings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListChannelsRequest -type ListChannelsInput struct { - _ struct{} `type:"structure"` - - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListChannelsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListChannelsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListChannelsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListChannelsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListChannelsInput) SetMaxResults(v int64) *ListChannelsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListChannelsInput) SetNextToken(v string) *ListChannelsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListChannelsResponse -type ListChannelsOutput struct { - _ struct{} `type:"structure"` - - Channels []*ChannelSummary `locationName:"channels" type:"list"` - - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListChannelsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListChannelsOutput) GoString() string { - return s.String() -} - -// SetChannels sets the Channels field's value. -func (s *ListChannelsOutput) SetChannels(v []*ChannelSummary) *ListChannelsOutput { - s.Channels = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListChannelsOutput) SetNextToken(v string) *ListChannelsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputSecurityGroupsRequest -type ListInputSecurityGroupsInput struct { - _ struct{} `type:"structure"` - - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListInputSecurityGroupsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListInputSecurityGroupsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListInputSecurityGroupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListInputSecurityGroupsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListInputSecurityGroupsInput) SetMaxResults(v int64) *ListInputSecurityGroupsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListInputSecurityGroupsInput) SetNextToken(v string) *ListInputSecurityGroupsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputSecurityGroupsResponse -type ListInputSecurityGroupsOutput struct { - _ struct{} `type:"structure"` - - InputSecurityGroups []*InputSecurityGroup `locationName:"inputSecurityGroups" type:"list"` - - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListInputSecurityGroupsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListInputSecurityGroupsOutput) GoString() string { - return s.String() -} - -// SetInputSecurityGroups sets the InputSecurityGroups field's value. -func (s *ListInputSecurityGroupsOutput) SetInputSecurityGroups(v []*InputSecurityGroup) *ListInputSecurityGroupsOutput { - s.InputSecurityGroups = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListInputSecurityGroupsOutput) SetNextToken(v string) *ListInputSecurityGroupsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputsRequest -type ListInputsInput struct { - _ struct{} `type:"structure"` - - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListInputsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListInputsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListInputsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListInputsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListInputsInput) SetMaxResults(v int64) *ListInputsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListInputsInput) SetNextToken(v string) *ListInputsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputsResponse -type ListInputsOutput struct { - _ struct{} `type:"structure"` - - Inputs []*Input `locationName:"inputs" type:"list"` - - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListInputsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListInputsOutput) GoString() string { - return s.String() -} - -// SetInputs sets the Inputs field's value. -func (s *ListInputsOutput) SetInputs(v []*Input) *ListInputsOutput { - s.Inputs = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListInputsOutput) SetNextToken(v string) *ListInputsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/M2tsSettings -type M2tsSettings struct { - _ struct{} `type:"structure"` - - // When set to drop, output audio streams will be removed from the program if - // the selected input audio stream is removed from the input. This allows the - // output audio configuration to dynamically change based on input configuration. - // If this is set to encodeSilence, all output audio streams will output encoded - // silence when not connected to an active input stream. - AbsentInputAudioBehavior *string `locationName:"absentInputAudioBehavior" type:"string" enum:"M2tsAbsentInputAudioBehavior"` - - // When set to enabled, uses ARIB-compliant field muxing and removes video descriptor. - Arib *string `locationName:"arib" type:"string" enum:"M2tsArib"` - - // Packet Identifier (PID) for ARIB Captions in the transport stream. Can be - // entered as a decimal or hexadecimal value. Valid values are 32 (or 0x20)..8182 - // (or 0x1ff6). - AribCaptionsPid *string `locationName:"aribCaptionsPid" type:"string"` - - // If set to auto, pid number used for ARIB Captions will be auto-selected from - // unused pids. If set to useConfigured, ARIB Captions will be on the configured - // pid number. - AribCaptionsPidControl *string `locationName:"aribCaptionsPidControl" type:"string" enum:"M2tsAribCaptionsPidControl"` - - // When set to dvb, uses DVB buffer model for Dolby Digital audio. When set - // to atsc, the ATSC model is used. - AudioBufferModel *string `locationName:"audioBufferModel" type:"string" enum:"M2tsAudioBufferModel"` - - // The number of audio frames to insert for each PES packet. - AudioFramesPerPes *int64 `locationName:"audioFramesPerPes" type:"integer"` - - // Packet Identifier (PID) of the elementary audio stream(s) in the transport - // stream. Multiple values are accepted, and can be entered in ranges and/or - // by comma separation. Can be entered as decimal or hexadecimal values. Each - // PID specified must be in the range of 32 (or 0x20)..8182 (or 0x1ff6). - AudioPids *string `locationName:"audioPids" type:"string"` - - // When set to atsc, uses stream type = 0x81 for AC3 and stream type = 0x87 - // for EAC3. When set to dvb, uses stream type = 0x06. - AudioStreamType *string `locationName:"audioStreamType" type:"string" enum:"M2tsAudioStreamType"` - - // The output bitrate of the transport stream in bits per second. Setting to - // 0 lets the muxer automatically determine the appropriate bitrate. - Bitrate *int64 `locationName:"bitrate" type:"integer"` - - // If set to multiplex, use multiplex buffer model for accurate interleaving. - // Setting to bufferModel to none can lead to lower latency, but low-memory - // devices may not be able to play back the stream without interruptions. - BufferModel *string `locationName:"bufferModel" type:"string" enum:"M2tsBufferModel"` - - // When set to enabled, generates captionServiceDescriptor in PMT. - CcDescriptor *string `locationName:"ccDescriptor" type:"string" enum:"M2tsCcDescriptor"` - - // Inserts DVB Network Information Table (NIT) at the specified table repetition - // interval. - DvbNitSettings *DvbNitSettings `locationName:"dvbNitSettings" type:"structure"` - - // Inserts DVB Service Description Table (SDT) at the specified table repetition - // interval. - DvbSdtSettings *DvbSdtSettings `locationName:"dvbSdtSettings" type:"structure"` - - // Packet Identifier (PID) for input source DVB Subtitle data to this output. - // Multiple values are accepted, and can be entered in ranges and/or by comma - // separation. Can be entered as decimal or hexadecimal values. Each PID specified - // must be in the range of 32 (or 0x20)..8182 (or 0x1ff6). - DvbSubPids *string `locationName:"dvbSubPids" type:"string"` - - // Inserts DVB Time and Date Table (TDT) at the specified table repetition interval. - DvbTdtSettings *DvbTdtSettings `locationName:"dvbTdtSettings" type:"structure"` - - // Packet Identifier (PID) for input source DVB Teletext data to this output. - // Can be entered as a decimal or hexadecimal value. Valid values are 32 (or - // 0x20)..8182 (or 0x1ff6). - DvbTeletextPid *string `locationName:"dvbTeletextPid" type:"string"` - - // If set to passthrough, passes any EBIF data from the input source to this - // output. - Ebif *string `locationName:"ebif" type:"string" enum:"M2tsEbifControl"` - - // When videoAndFixedIntervals is selected, audio EBP markers will be added - // to partitions 3 and 4. The interval between these additional markers will - // be fixed, and will be slightly shorter than the video EBP marker interval. - // Only available when EBP Cablelabs segmentation markers are selected. Partitions - // 1 and 2 will always follow the video interval. - EbpAudioInterval *string `locationName:"ebpAudioInterval" type:"string" enum:"M2tsAudioInterval"` - - // When set, enforces that Encoder Boundary Points do not come within the specified - // time interval of each other by looking ahead at input video. If another EBP - // is going to come in within the specified time interval, the current EBP is - // not emitted, and the segment is "stretched" to the next marker. The lookahead - // value does not add latency to the system. The Live Event must be configured - // elsewhere to create sufficient latency to make the lookahead accurate. - EbpLookaheadMs *int64 `locationName:"ebpLookaheadMs" type:"integer"` - - // Controls placement of EBP on Audio PIDs. If set to videoAndAudioPids, EBP - // markers will be placed on the video PID and all audio PIDs. If set to videoPid, - // EBP markers will be placed on only the video PID. - EbpPlacement *string `locationName:"ebpPlacement" type:"string" enum:"M2tsEbpPlacement"` - - // Packet Identifier (PID) for ECM in the transport stream. Only enabled when - // Simulcrypt is enabled. Can be entered as a decimal or hexadecimal value. - // Valid values are 32 (or 0x20)..8182 (or 0x1ff6). - EcmPid *string `locationName:"ecmPid" type:"string"` - - // Include or exclude the ES Rate field in the PES header. - EsRateInPes *string `locationName:"esRateInPes" type:"string" enum:"M2tsEsRateInPes"` - - // Packet Identifier (PID) for input source ETV Platform data to this output. - // Can be entered as a decimal or hexadecimal value. Valid values are 32 (or - // 0x20)..8182 (or 0x1ff6). - EtvPlatformPid *string `locationName:"etvPlatformPid" type:"string"` - - // Packet Identifier (PID) for input source ETV Signal data to this output. - // Can be entered as a decimal or hexadecimal value. Valid values are 32 (or - // 0x20)..8182 (or 0x1ff6). - EtvSignalPid *string `locationName:"etvSignalPid" type:"string"` - - // The length in seconds of each fragment. Only used with EBP markers. - FragmentTime *float64 `locationName:"fragmentTime" type:"double"` - - // If set to passthrough, passes any KLV data from the input source to this - // output. - Klv *string `locationName:"klv" type:"string" enum:"M2tsKlv"` - - // Packet Identifier (PID) for input source KLV data to this output. Multiple - // values are accepted, and can be entered in ranges and/or by comma separation. - // Can be entered as decimal or hexadecimal values. Each PID specified must - // be in the range of 32 (or 0x20)..8182 (or 0x1ff6). - KlvDataPids *string `locationName:"klvDataPids" type:"string"` - - // Value in bits per second of extra null packets to insert into the transport - // stream. This can be used if a downstream encryption system requires periodic - // null packets. - NullPacketBitrate *float64 `locationName:"nullPacketBitrate" type:"double"` - - // The number of milliseconds between instances of this table in the output - // transport stream. Valid values are 0, 10..1000. - PatInterval *int64 `locationName:"patInterval" type:"integer"` - - // When set to pcrEveryPesPacket, a Program Clock Reference value is inserted - // for every Packetized Elementary Stream (PES) header. This parameter is effective - // only when the PCR PID is the same as the video or audio elementary stream. - PcrControl *string `locationName:"pcrControl" type:"string" enum:"M2tsPcrControl"` - - // Maximum time in milliseconds between Program Clock Reference (PCRs) inserted - // into the transport stream. - PcrPeriod *int64 `locationName:"pcrPeriod" type:"integer"` - - // Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport - // stream. When no value is given, the encoder will assign the same value as - // the Video PID. Can be entered as a decimal or hexadecimal value. Valid values - // are 32 (or 0x20)..8182 (or 0x1ff6). - PcrPid *string `locationName:"pcrPid" type:"string"` - - // The number of milliseconds between instances of this table in the output - // transport stream. Valid values are 0, 10..1000. - PmtInterval *int64 `locationName:"pmtInterval" type:"integer"` - - // Packet Identifier (PID) for the Program Map Table (PMT) in the transport - // stream. Can be entered as a decimal or hexadecimal value. Valid values are - // 32 (or 0x20)..8182 (or 0x1ff6). - PmtPid *string `locationName:"pmtPid" type:"string"` - - // The value of the program number field in the Program Map Table. - ProgramNum *int64 `locationName:"programNum" type:"integer"` - - // When vbr, does not insert null packets into transport stream to fill specified - // bitrate. The bitrate setting acts as the maximum bitrate when vbr is set. - RateMode *string `locationName:"rateMode" type:"string" enum:"M2tsRateMode"` - - // Packet Identifier (PID) for input source SCTE-27 data to this output. Multiple - // values are accepted, and can be entered in ranges and/or by comma separation. - // Can be entered as decimal or hexadecimal values. Each PID specified must - // be in the range of 32 (or 0x20)..8182 (or 0x1ff6). - Scte27Pids *string `locationName:"scte27Pids" type:"string"` - - // Optionally pass SCTE-35 signals from the input source to this output. - Scte35Control *string `locationName:"scte35Control" type:"string" enum:"M2tsScte35Control"` - - // Packet Identifier (PID) of the SCTE-35 stream in the transport stream. Can - // be entered as a decimal or hexadecimal value. Valid values are 32 (or 0x20)..8182 - // (or 0x1ff6). - Scte35Pid *string `locationName:"scte35Pid" type:"string"` - - // Inserts segmentation markers at each segmentationTime period. raiSegstart - // sets the Random Access Indicator bit in the adaptation field. raiAdapt sets - // the RAI bit and adds the current timecode in the private data bytes. psiSegstart - // inserts PAT and PMT tables at the start of segments. ebp adds Encoder Boundary - // Point information to the adaptation field as per OpenCable specification - // OC-SP-EBP-I01-130118. ebpLegacy adds Encoder Boundary Point information to - // the adaptation field using a legacy proprietary format. - SegmentationMarkers *string `locationName:"segmentationMarkers" type:"string" enum:"M2tsSegmentationMarkers"` - - // The segmentation style parameter controls how segmentation markers are inserted - // into the transport stream. With avails, it is possible that segments may - // be truncated, which can influence where future segmentation markers are inserted.When - // a segmentation style of "resetCadence" is selected and a segment is truncated - // due to an avail, we will reset the segmentation cadence. This means the subsequent - // segment will have a duration of $segmentationTime seconds.When a segmentation - // style of "maintainCadence" is selected and a segment is truncated due to - // an avail, we will not reset the segmentation cadence. This means the subsequent - // segment will likely be truncated as well. However, all segments after that - // will have a duration of $segmentationTime seconds. Note that EBP lookahead - // is a slight exception to this rule. - SegmentationStyle *string `locationName:"segmentationStyle" type:"string" enum:"M2tsSegmentationStyle"` - - // The length in seconds of each segment. Required unless markers is set to - // None_. - SegmentationTime *float64 `locationName:"segmentationTime" type:"double"` - - // When set to passthrough, timed metadata will be passed through from input - // to output. - TimedMetadataBehavior *string `locationName:"timedMetadataBehavior" type:"string" enum:"M2tsTimedMetadataBehavior"` - - // Packet Identifier (PID) of the timed metadata stream in the transport stream. - // Can be entered as a decimal or hexadecimal value. Valid values are 32 (or - // 0x20)..8182 (or 0x1ff6). - TimedMetadataPid *string `locationName:"timedMetadataPid" type:"string"` - - // The value of the transport stream ID field in the Program Map Table. - TransportStreamId *int64 `locationName:"transportStreamId" type:"integer"` - - // Packet Identifier (PID) of the elementary video stream in the transport stream. - // Can be entered as a decimal or hexadecimal value. Valid values are 32 (or - // 0x20)..8182 (or 0x1ff6). - VideoPid *string `locationName:"videoPid" type:"string"` -} - -// String returns the string representation -func (s M2tsSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s M2tsSettings) GoString() string { - return s.String() -} - -// SetAbsentInputAudioBehavior sets the AbsentInputAudioBehavior field's value. -func (s *M2tsSettings) SetAbsentInputAudioBehavior(v string) *M2tsSettings { - s.AbsentInputAudioBehavior = &v - return s -} - -// SetArib sets the Arib field's value. -func (s *M2tsSettings) SetArib(v string) *M2tsSettings { - s.Arib = &v - return s -} - -// SetAribCaptionsPid sets the AribCaptionsPid field's value. -func (s *M2tsSettings) SetAribCaptionsPid(v string) *M2tsSettings { - s.AribCaptionsPid = &v - return s -} - -// SetAribCaptionsPidControl sets the AribCaptionsPidControl field's value. -func (s *M2tsSettings) SetAribCaptionsPidControl(v string) *M2tsSettings { - s.AribCaptionsPidControl = &v - return s -} - -// SetAudioBufferModel sets the AudioBufferModel field's value. -func (s *M2tsSettings) SetAudioBufferModel(v string) *M2tsSettings { - s.AudioBufferModel = &v - return s -} - -// SetAudioFramesPerPes sets the AudioFramesPerPes field's value. -func (s *M2tsSettings) SetAudioFramesPerPes(v int64) *M2tsSettings { - s.AudioFramesPerPes = &v - return s -} - -// SetAudioPids sets the AudioPids field's value. -func (s *M2tsSettings) SetAudioPids(v string) *M2tsSettings { - s.AudioPids = &v - return s -} - -// SetAudioStreamType sets the AudioStreamType field's value. -func (s *M2tsSettings) SetAudioStreamType(v string) *M2tsSettings { - s.AudioStreamType = &v - return s -} - -// SetBitrate sets the Bitrate field's value. -func (s *M2tsSettings) SetBitrate(v int64) *M2tsSettings { - s.Bitrate = &v - return s -} - -// SetBufferModel sets the BufferModel field's value. -func (s *M2tsSettings) SetBufferModel(v string) *M2tsSettings { - s.BufferModel = &v - return s -} - -// SetCcDescriptor sets the CcDescriptor field's value. -func (s *M2tsSettings) SetCcDescriptor(v string) *M2tsSettings { - s.CcDescriptor = &v - return s -} - -// SetDvbNitSettings sets the DvbNitSettings field's value. -func (s *M2tsSettings) SetDvbNitSettings(v *DvbNitSettings) *M2tsSettings { - s.DvbNitSettings = v - return s -} - -// SetDvbSdtSettings sets the DvbSdtSettings field's value. -func (s *M2tsSettings) SetDvbSdtSettings(v *DvbSdtSettings) *M2tsSettings { - s.DvbSdtSettings = v - return s -} - -// SetDvbSubPids sets the DvbSubPids field's value. -func (s *M2tsSettings) SetDvbSubPids(v string) *M2tsSettings { - s.DvbSubPids = &v - return s -} - -// SetDvbTdtSettings sets the DvbTdtSettings field's value. -func (s *M2tsSettings) SetDvbTdtSettings(v *DvbTdtSettings) *M2tsSettings { - s.DvbTdtSettings = v - return s -} - -// SetDvbTeletextPid sets the DvbTeletextPid field's value. -func (s *M2tsSettings) SetDvbTeletextPid(v string) *M2tsSettings { - s.DvbTeletextPid = &v - return s -} - -// SetEbif sets the Ebif field's value. -func (s *M2tsSettings) SetEbif(v string) *M2tsSettings { - s.Ebif = &v - return s -} - -// SetEbpAudioInterval sets the EbpAudioInterval field's value. -func (s *M2tsSettings) SetEbpAudioInterval(v string) *M2tsSettings { - s.EbpAudioInterval = &v - return s -} - -// SetEbpLookaheadMs sets the EbpLookaheadMs field's value. -func (s *M2tsSettings) SetEbpLookaheadMs(v int64) *M2tsSettings { - s.EbpLookaheadMs = &v - return s -} - -// SetEbpPlacement sets the EbpPlacement field's value. -func (s *M2tsSettings) SetEbpPlacement(v string) *M2tsSettings { - s.EbpPlacement = &v - return s -} - -// SetEcmPid sets the EcmPid field's value. -func (s *M2tsSettings) SetEcmPid(v string) *M2tsSettings { - s.EcmPid = &v - return s -} - -// SetEsRateInPes sets the EsRateInPes field's value. -func (s *M2tsSettings) SetEsRateInPes(v string) *M2tsSettings { - s.EsRateInPes = &v - return s -} - -// SetEtvPlatformPid sets the EtvPlatformPid field's value. -func (s *M2tsSettings) SetEtvPlatformPid(v string) *M2tsSettings { - s.EtvPlatformPid = &v - return s -} - -// SetEtvSignalPid sets the EtvSignalPid field's value. -func (s *M2tsSettings) SetEtvSignalPid(v string) *M2tsSettings { - s.EtvSignalPid = &v - return s -} - -// SetFragmentTime sets the FragmentTime field's value. -func (s *M2tsSettings) SetFragmentTime(v float64) *M2tsSettings { - s.FragmentTime = &v - return s -} - -// SetKlv sets the Klv field's value. -func (s *M2tsSettings) SetKlv(v string) *M2tsSettings { - s.Klv = &v - return s -} - -// SetKlvDataPids sets the KlvDataPids field's value. -func (s *M2tsSettings) SetKlvDataPids(v string) *M2tsSettings { - s.KlvDataPids = &v - return s -} - -// SetNullPacketBitrate sets the NullPacketBitrate field's value. -func (s *M2tsSettings) SetNullPacketBitrate(v float64) *M2tsSettings { - s.NullPacketBitrate = &v - return s -} - -// SetPatInterval sets the PatInterval field's value. -func (s *M2tsSettings) SetPatInterval(v int64) *M2tsSettings { - s.PatInterval = &v - return s -} - -// SetPcrControl sets the PcrControl field's value. -func (s *M2tsSettings) SetPcrControl(v string) *M2tsSettings { - s.PcrControl = &v - return s -} - -// SetPcrPeriod sets the PcrPeriod field's value. -func (s *M2tsSettings) SetPcrPeriod(v int64) *M2tsSettings { - s.PcrPeriod = &v - return s -} - -// SetPcrPid sets the PcrPid field's value. -func (s *M2tsSettings) SetPcrPid(v string) *M2tsSettings { - s.PcrPid = &v - return s -} - -// SetPmtInterval sets the PmtInterval field's value. -func (s *M2tsSettings) SetPmtInterval(v int64) *M2tsSettings { - s.PmtInterval = &v - return s -} - -// SetPmtPid sets the PmtPid field's value. -func (s *M2tsSettings) SetPmtPid(v string) *M2tsSettings { - s.PmtPid = &v - return s -} - -// SetProgramNum sets the ProgramNum field's value. -func (s *M2tsSettings) SetProgramNum(v int64) *M2tsSettings { - s.ProgramNum = &v - return s -} - -// SetRateMode sets the RateMode field's value. -func (s *M2tsSettings) SetRateMode(v string) *M2tsSettings { - s.RateMode = &v - return s -} - -// SetScte27Pids sets the Scte27Pids field's value. -func (s *M2tsSettings) SetScte27Pids(v string) *M2tsSettings { - s.Scte27Pids = &v - return s -} - -// SetScte35Control sets the Scte35Control field's value. -func (s *M2tsSettings) SetScte35Control(v string) *M2tsSettings { - s.Scte35Control = &v - return s -} - -// SetScte35Pid sets the Scte35Pid field's value. -func (s *M2tsSettings) SetScte35Pid(v string) *M2tsSettings { - s.Scte35Pid = &v - return s -} - -// SetSegmentationMarkers sets the SegmentationMarkers field's value. -func (s *M2tsSettings) SetSegmentationMarkers(v string) *M2tsSettings { - s.SegmentationMarkers = &v - return s -} - -// SetSegmentationStyle sets the SegmentationStyle field's value. -func (s *M2tsSettings) SetSegmentationStyle(v string) *M2tsSettings { - s.SegmentationStyle = &v - return s -} - -// SetSegmentationTime sets the SegmentationTime field's value. -func (s *M2tsSettings) SetSegmentationTime(v float64) *M2tsSettings { - s.SegmentationTime = &v - return s -} - -// SetTimedMetadataBehavior sets the TimedMetadataBehavior field's value. -func (s *M2tsSettings) SetTimedMetadataBehavior(v string) *M2tsSettings { - s.TimedMetadataBehavior = &v - return s -} - -// SetTimedMetadataPid sets the TimedMetadataPid field's value. -func (s *M2tsSettings) SetTimedMetadataPid(v string) *M2tsSettings { - s.TimedMetadataPid = &v - return s -} - -// SetTransportStreamId sets the TransportStreamId field's value. -func (s *M2tsSettings) SetTransportStreamId(v int64) *M2tsSettings { - s.TransportStreamId = &v - return s -} - -// SetVideoPid sets the VideoPid field's value. -func (s *M2tsSettings) SetVideoPid(v string) *M2tsSettings { - s.VideoPid = &v - return s -} - -// Settings information for the .m3u8 container -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/M3u8Settings -type M3u8Settings struct { - _ struct{} `type:"structure"` - - // The number of audio frames to insert for each PES packet. - AudioFramesPerPes *int64 `locationName:"audioFramesPerPes" type:"integer"` - - // Packet Identifier (PID) of the elementary audio stream(s) in the transport - // stream. Multiple values are accepted, and can be entered in ranges and/or - // by comma separation. Can be entered as decimal or hexadecimal values. - AudioPids *string `locationName:"audioPids" type:"string"` - - // ThePlatform-protected transport streams using 'microsoft' as Target Client - // include an ECM stream. This ECM stream contains the size, IV, and PTS of - // every sample in the transport stream. This stream PID is specified here. - // This PID has no effect on non ThePlatform-protected streams. - EcmPid *string `locationName:"ecmPid" type:"string"` - - // The number of milliseconds between instances of this table in the output - // transport stream. A value of \"0\" writes out the PMT once per segment file. - PatInterval *int64 `locationName:"patInterval" type:"integer"` - - // When set to pcrEveryPesPacket, a Program Clock Reference value is inserted - // for every Packetized Elementary Stream (PES) header. This parameter is effective - // only when the PCR PID is the same as the video or audio elementary stream. - PcrControl *string `locationName:"pcrControl" type:"string" enum:"M3u8PcrControl"` - - // Maximum time in milliseconds between Program Clock References (PCRs) inserted - // into the transport stream. - PcrPeriod *int64 `locationName:"pcrPeriod" type:"integer"` - - // Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport - // stream. When no value is given, the encoder will assign the same value as - // the Video PID. Can be entered as a decimal or hexadecimal value. - PcrPid *string `locationName:"pcrPid" type:"string"` - - // The number of milliseconds between instances of this table in the output - // transport stream. A value of \"0\" writes out the PMT once per segment file. - PmtInterval *int64 `locationName:"pmtInterval" type:"integer"` - - // Packet Identifier (PID) for the Program Map Table (PMT) in the transport - // stream. Can be entered as a decimal or hexadecimal value. - PmtPid *string `locationName:"pmtPid" type:"string"` - - // The value of the program number field in the Program Map Table. - ProgramNum *int64 `locationName:"programNum" type:"integer"` - - // If set to passthrough, passes any SCTE-35 signals from the input source to - // this output. - Scte35Behavior *string `locationName:"scte35Behavior" type:"string" enum:"M3u8Scte35Behavior"` - - // Packet Identifier (PID) of the SCTE-35 stream in the transport stream. Can - // be entered as a decimal or hexadecimal value. - Scte35Pid *string `locationName:"scte35Pid" type:"string"` - - // When set to passthrough, timed metadata is passed through from input to output. - TimedMetadataBehavior *string `locationName:"timedMetadataBehavior" type:"string" enum:"M3u8TimedMetadataBehavior"` - - // The value of the transport stream ID field in the Program Map Table. - TransportStreamId *int64 `locationName:"transportStreamId" type:"integer"` - - // Packet Identifier (PID) of the elementary video stream in the transport stream. - // Can be entered as a decimal or hexadecimal value. - VideoPid *string `locationName:"videoPid" type:"string"` -} - -// String returns the string representation -func (s M3u8Settings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s M3u8Settings) GoString() string { - return s.String() -} - -// SetAudioFramesPerPes sets the AudioFramesPerPes field's value. -func (s *M3u8Settings) SetAudioFramesPerPes(v int64) *M3u8Settings { - s.AudioFramesPerPes = &v - return s -} - -// SetAudioPids sets the AudioPids field's value. -func (s *M3u8Settings) SetAudioPids(v string) *M3u8Settings { - s.AudioPids = &v - return s -} - -// SetEcmPid sets the EcmPid field's value. -func (s *M3u8Settings) SetEcmPid(v string) *M3u8Settings { - s.EcmPid = &v - return s -} - -// SetPatInterval sets the PatInterval field's value. -func (s *M3u8Settings) SetPatInterval(v int64) *M3u8Settings { - s.PatInterval = &v - return s -} - -// SetPcrControl sets the PcrControl field's value. -func (s *M3u8Settings) SetPcrControl(v string) *M3u8Settings { - s.PcrControl = &v - return s -} - -// SetPcrPeriod sets the PcrPeriod field's value. -func (s *M3u8Settings) SetPcrPeriod(v int64) *M3u8Settings { - s.PcrPeriod = &v - return s -} - -// SetPcrPid sets the PcrPid field's value. -func (s *M3u8Settings) SetPcrPid(v string) *M3u8Settings { - s.PcrPid = &v - return s -} - -// SetPmtInterval sets the PmtInterval field's value. -func (s *M3u8Settings) SetPmtInterval(v int64) *M3u8Settings { - s.PmtInterval = &v - return s -} - -// SetPmtPid sets the PmtPid field's value. -func (s *M3u8Settings) SetPmtPid(v string) *M3u8Settings { - s.PmtPid = &v - return s -} - -// SetProgramNum sets the ProgramNum field's value. -func (s *M3u8Settings) SetProgramNum(v int64) *M3u8Settings { - s.ProgramNum = &v - return s -} - -// SetScte35Behavior sets the Scte35Behavior field's value. -func (s *M3u8Settings) SetScte35Behavior(v string) *M3u8Settings { - s.Scte35Behavior = &v - return s -} - -// SetScte35Pid sets the Scte35Pid field's value. -func (s *M3u8Settings) SetScte35Pid(v string) *M3u8Settings { - s.Scte35Pid = &v - return s -} - -// SetTimedMetadataBehavior sets the TimedMetadataBehavior field's value. -func (s *M3u8Settings) SetTimedMetadataBehavior(v string) *M3u8Settings { - s.TimedMetadataBehavior = &v - return s -} - -// SetTransportStreamId sets the TransportStreamId field's value. -func (s *M3u8Settings) SetTransportStreamId(v int64) *M3u8Settings { - s.TransportStreamId = &v - return s -} - -// SetVideoPid sets the VideoPid field's value. -func (s *M3u8Settings) SetVideoPid(v string) *M3u8Settings { - s.VideoPid = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/Mp2Settings -type Mp2Settings struct { - _ struct{} `type:"structure"` - - // Average bitrate in bits/second. - Bitrate *float64 `locationName:"bitrate" type:"double"` - - // The MPEG2 Audio coding mode. Valid values are codingMode10 (for mono) or - // codingMode20 (for stereo). - CodingMode *string `locationName:"codingMode" type:"string" enum:"Mp2CodingMode"` - - // Sample rate in Hz. - SampleRate *float64 `locationName:"sampleRate" type:"double"` -} - -// String returns the string representation -func (s Mp2Settings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Mp2Settings) GoString() string { - return s.String() -} - -// SetBitrate sets the Bitrate field's value. -func (s *Mp2Settings) SetBitrate(v float64) *Mp2Settings { - s.Bitrate = &v - return s -} - -// SetCodingMode sets the CodingMode field's value. -func (s *Mp2Settings) SetCodingMode(v string) *Mp2Settings { - s.CodingMode = &v - return s -} - -// SetSampleRate sets the SampleRate field's value. -func (s *Mp2Settings) SetSampleRate(v float64) *Mp2Settings { - s.SampleRate = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/MsSmoothGroupSettings -type MsSmoothGroupSettings struct { - _ struct{} `type:"structure"` - - // The value of the "Acquisition Point Identity" element used in each message - // placed in the sparse track. Only enabled if sparseTrackType is not "none". - AcquisitionPointId *string `locationName:"acquisitionPointId" type:"string"` - - // If set to passthrough for an audio-only MS Smooth output, the fragment absolute - // time will be set to the current timecode. This option does not write timecodes - // to the audio elementary stream. - AudioOnlyTimecodeControl *string `locationName:"audioOnlyTimecodeControl" type:"string" enum:"SmoothGroupAudioOnlyTimecodeControl"` - - // If set to verifyAuthenticity, verify the https certificate chain to a trusted - // Certificate Authority (CA). This will cause https outputs to self-signed - // certificates to fail unless those certificates are manually added to the - // OS trusted keystore. - CertificateMode *string `locationName:"certificateMode" type:"string" enum:"SmoothGroupCertificateMode"` - - // Number of seconds to wait before retrying connection to the IIS server if - // the connection is lost. Content will be cached during this time and the cache - // will be be delivered to the IIS server once the connection is re-established. - ConnectionRetryInterval *int64 `locationName:"connectionRetryInterval" type:"integer"` - - // Smooth Streaming publish point on an IIS server. Elemental Live acts as a - // "Push" encoder to IIS. - Destination *OutputLocationRef `locationName:"destination" type:"structure"` - - // MS Smooth event ID to be sent to the IIS server.Should only be specified - // if eventIdMode is set to useConfigured. - EventId *string `locationName:"eventId" type:"string"` - - // Specifies whether or not to send an event ID to the IIS server. If no event - // ID is sent and the same Live Event is used without changing the publishing - // point, clients might see cached video from the previous run.Options:- "useConfigured" - // - use the value provided in eventId- "useTimestamp" - generate and send an - // event ID based on the current timestamp- "noEventId" - do not send an event - // ID to the IIS server. - EventIdMode *string `locationName:"eventIdMode" type:"string" enum:"SmoothGroupEventIdMode"` - - // When set to sendEos, send EOS signal to IIS server when stopping the event - EventStopBehavior *string `locationName:"eventStopBehavior" type:"string" enum:"SmoothGroupEventStopBehavior"` - - // Size in seconds of file cache for streaming outputs. - FilecacheDuration *int64 `locationName:"filecacheDuration" type:"integer"` - - // Length of mp4 fragments to generate (in seconds). Fragment length must be - // compatible with GOP size and framerate. - FragmentLength *int64 `locationName:"fragmentLength" type:"integer"` - - // Parameter that control output group behavior on input loss. - InputLossAction *string `locationName:"inputLossAction" type:"string" enum:"InputLossActionForMsSmoothOut"` - - // Number of retry attempts. - NumRetries *int64 `locationName:"numRetries" type:"integer"` - - // Number of seconds before initiating a restart due to output failure, due - // to exhausting the numRetries on one segment, or exceeding filecacheDuration. - RestartDelay *int64 `locationName:"restartDelay" type:"integer"` - - // When set to useInputSegmentation, the output segment or fragment points are - // set by the RAI markers from the input streams. - SegmentationMode *string `locationName:"segmentationMode" type:"string" enum:"SmoothGroupSegmentationMode"` - - // Outputs that are "output locked" can use this delay. Assign a delay to the - // output that is "secondary". Do not assign a delay to the "primary" output. - // The delay means that the primary output will always reach the downstream - // system before the secondary, which helps ensure that the downstream system - // always uses the primary output. (If there were no delay, the downstream system - // might flip-flop between whichever output happens to arrive first.) If the - // primary fails, the downstream system will switch to the secondary output. - // When the primary is restarted, the downstream system will switch back to - // the primary (because once again it is always arriving first) - SendDelayMs *int64 `locationName:"sendDelayMs" type:"integer"` - - // If set to scte35, use incoming SCTE-35 messages to generate a sparse track - // in this group of MS-Smooth outputs. - SparseTrackType *string `locationName:"sparseTrackType" type:"string" enum:"SmoothGroupSparseTrackType"` - - // When set to send, send stream manifest so publishing point doesn't start - // until all streams start. - StreamManifestBehavior *string `locationName:"streamManifestBehavior" type:"string" enum:"SmoothGroupStreamManifestBehavior"` - - // Timestamp offset for the event. Only used if timestampOffsetMode is set to - // useConfiguredOffset. - TimestampOffset *string `locationName:"timestampOffset" type:"string"` - - // Type of timestamp date offset to use.- useEventStartDate: Use the date the - // event was started as the offset- useConfiguredOffset: Use an explicitly configured - // date as the offset - TimestampOffsetMode *string `locationName:"timestampOffsetMode" type:"string" enum:"SmoothGroupTimestampOffsetMode"` -} - -// String returns the string representation -func (s MsSmoothGroupSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MsSmoothGroupSettings) GoString() string { - return s.String() -} - -// SetAcquisitionPointId sets the AcquisitionPointId field's value. -func (s *MsSmoothGroupSettings) SetAcquisitionPointId(v string) *MsSmoothGroupSettings { - s.AcquisitionPointId = &v - return s -} - -// SetAudioOnlyTimecodeControl sets the AudioOnlyTimecodeControl field's value. -func (s *MsSmoothGroupSettings) SetAudioOnlyTimecodeControl(v string) *MsSmoothGroupSettings { - s.AudioOnlyTimecodeControl = &v - return s -} - -// SetCertificateMode sets the CertificateMode field's value. -func (s *MsSmoothGroupSettings) SetCertificateMode(v string) *MsSmoothGroupSettings { - s.CertificateMode = &v - return s -} - -// SetConnectionRetryInterval sets the ConnectionRetryInterval field's value. -func (s *MsSmoothGroupSettings) SetConnectionRetryInterval(v int64) *MsSmoothGroupSettings { - s.ConnectionRetryInterval = &v - return s -} - -// SetDestination sets the Destination field's value. -func (s *MsSmoothGroupSettings) SetDestination(v *OutputLocationRef) *MsSmoothGroupSettings { - s.Destination = v - return s -} - -// SetEventId sets the EventId field's value. -func (s *MsSmoothGroupSettings) SetEventId(v string) *MsSmoothGroupSettings { - s.EventId = &v - return s -} - -// SetEventIdMode sets the EventIdMode field's value. -func (s *MsSmoothGroupSettings) SetEventIdMode(v string) *MsSmoothGroupSettings { - s.EventIdMode = &v - return s -} - -// SetEventStopBehavior sets the EventStopBehavior field's value. -func (s *MsSmoothGroupSettings) SetEventStopBehavior(v string) *MsSmoothGroupSettings { - s.EventStopBehavior = &v - return s -} - -// SetFilecacheDuration sets the FilecacheDuration field's value. -func (s *MsSmoothGroupSettings) SetFilecacheDuration(v int64) *MsSmoothGroupSettings { - s.FilecacheDuration = &v - return s -} - -// SetFragmentLength sets the FragmentLength field's value. -func (s *MsSmoothGroupSettings) SetFragmentLength(v int64) *MsSmoothGroupSettings { - s.FragmentLength = &v - return s -} - -// SetInputLossAction sets the InputLossAction field's value. -func (s *MsSmoothGroupSettings) SetInputLossAction(v string) *MsSmoothGroupSettings { - s.InputLossAction = &v - return s -} - -// SetNumRetries sets the NumRetries field's value. -func (s *MsSmoothGroupSettings) SetNumRetries(v int64) *MsSmoothGroupSettings { - s.NumRetries = &v - return s -} - -// SetRestartDelay sets the RestartDelay field's value. -func (s *MsSmoothGroupSettings) SetRestartDelay(v int64) *MsSmoothGroupSettings { - s.RestartDelay = &v - return s -} - -// SetSegmentationMode sets the SegmentationMode field's value. -func (s *MsSmoothGroupSettings) SetSegmentationMode(v string) *MsSmoothGroupSettings { - s.SegmentationMode = &v - return s -} - -// SetSendDelayMs sets the SendDelayMs field's value. -func (s *MsSmoothGroupSettings) SetSendDelayMs(v int64) *MsSmoothGroupSettings { - s.SendDelayMs = &v - return s -} - -// SetSparseTrackType sets the SparseTrackType field's value. -func (s *MsSmoothGroupSettings) SetSparseTrackType(v string) *MsSmoothGroupSettings { - s.SparseTrackType = &v - return s -} - -// SetStreamManifestBehavior sets the StreamManifestBehavior field's value. -func (s *MsSmoothGroupSettings) SetStreamManifestBehavior(v string) *MsSmoothGroupSettings { - s.StreamManifestBehavior = &v - return s -} - -// SetTimestampOffset sets the TimestampOffset field's value. -func (s *MsSmoothGroupSettings) SetTimestampOffset(v string) *MsSmoothGroupSettings { - s.TimestampOffset = &v - return s -} - -// SetTimestampOffsetMode sets the TimestampOffsetMode field's value. -func (s *MsSmoothGroupSettings) SetTimestampOffsetMode(v string) *MsSmoothGroupSettings { - s.TimestampOffsetMode = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/MsSmoothOutputSettings -type MsSmoothOutputSettings struct { - _ struct{} `type:"structure"` - - // String concatenated to the end of the destination filename. Required for - // multiple outputs of the same type. - NameModifier *string `locationName:"nameModifier" type:"string"` -} - -// String returns the string representation -func (s MsSmoothOutputSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MsSmoothOutputSettings) GoString() string { - return s.String() -} - -// SetNameModifier sets the NameModifier field's value. -func (s *MsSmoothOutputSettings) SetNameModifier(v string) *MsSmoothOutputSettings { - s.NameModifier = &v - return s -} - -// Network source to transcode. Must be accessible to the Elemental Live node -// that is running the live event through a network connection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/NetworkInputSettings -type NetworkInputSettings struct { - _ struct{} `type:"structure"` - - // Specifies HLS input settings when the uri is for a HLS manifest. - HlsInputSettings *HlsInputSettings `locationName:"hlsInputSettings" type:"structure"` - - // Check HTTPS server certificates. When set to checkCryptographyOnly, cryptography - // in the certificate will be checked, but not the server's name. Certain subdomains - // (notably S3 buckets that use dots in the bucket name) do not strictly match - // the corresponding certificate's wildcard pattern and would otherwise cause - // the event to error. This setting is ignored for protocols that do not use - // https. - ServerValidation *string `locationName:"serverValidation" type:"string" enum:"NetworkInputServerValidation"` -} - -// String returns the string representation -func (s NetworkInputSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NetworkInputSettings) GoString() string { - return s.String() -} - -// SetHlsInputSettings sets the HlsInputSettings field's value. -func (s *NetworkInputSettings) SetHlsInputSettings(v *HlsInputSettings) *NetworkInputSettings { - s.HlsInputSettings = v - return s -} - -// SetServerValidation sets the ServerValidation field's value. -func (s *NetworkInputSettings) SetServerValidation(v string) *NetworkInputSettings { - s.ServerValidation = &v - return s -} - -// Output settings. There can be multiple outputs within a group. -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/Output -type Output struct { - _ struct{} `type:"structure"` - - // The names of the AudioDescriptions used as audio sources for this output. - AudioDescriptionNames []*string `locationName:"audioDescriptionNames" type:"list"` - - // The names of the CaptionDescriptions used as caption sources for this output. - CaptionDescriptionNames []*string `locationName:"captionDescriptionNames" type:"list"` - - // The name used to identify an output. - OutputName *string `locationName:"outputName" type:"string"` - - // Output type-specific settings. - OutputSettings *OutputSettings `locationName:"outputSettings" type:"structure"` - - // The name of the VideoDescription used as the source for this output. - VideoDescriptionName *string `locationName:"videoDescriptionName" type:"string"` -} - -// String returns the string representation -func (s Output) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Output) GoString() string { - return s.String() -} - -// SetAudioDescriptionNames sets the AudioDescriptionNames field's value. -func (s *Output) SetAudioDescriptionNames(v []*string) *Output { - s.AudioDescriptionNames = v - return s -} - -// SetCaptionDescriptionNames sets the CaptionDescriptionNames field's value. -func (s *Output) SetCaptionDescriptionNames(v []*string) *Output { - s.CaptionDescriptionNames = v - return s -} - -// SetOutputName sets the OutputName field's value. -func (s *Output) SetOutputName(v string) *Output { - s.OutputName = &v - return s -} - -// SetOutputSettings sets the OutputSettings field's value. -func (s *Output) SetOutputSettings(v *OutputSettings) *Output { - s.OutputSettings = v - return s -} - -// SetVideoDescriptionName sets the VideoDescriptionName field's value. -func (s *Output) SetVideoDescriptionName(v string) *Output { - s.VideoDescriptionName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/OutputDestination -type OutputDestination struct { - _ struct{} `type:"structure"` - - // User-specified id. This is used in an output group or an output. - Id *string `locationName:"id" type:"string"` - - // Destination settings for output; one for each redundant encoder. - Settings []*OutputDestinationSettings `locationName:"settings" type:"list"` -} - -// String returns the string representation -func (s OutputDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputDestination) GoString() string { - return s.String() -} - -// SetId sets the Id field's value. -func (s *OutputDestination) SetId(v string) *OutputDestination { - s.Id = &v - return s -} - -// SetSettings sets the Settings field's value. -func (s *OutputDestination) SetSettings(v []*OutputDestinationSettings) *OutputDestination { - s.Settings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/OutputDestinationSettings -type OutputDestinationSettings struct { - _ struct{} `type:"structure"` - - // key used to extract the password from EC2 Parameter store - PasswordParam *string `locationName:"passwordParam" type:"string"` - - // A URL specifying a destination - Url *string `locationName:"url" type:"string"` - - // username for destination - Username *string `locationName:"username" type:"string"` -} - -// String returns the string representation -func (s OutputDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputDestinationSettings) GoString() string { - return s.String() -} - -// SetPasswordParam sets the PasswordParam field's value. -func (s *OutputDestinationSettings) SetPasswordParam(v string) *OutputDestinationSettings { - s.PasswordParam = &v - return s -} - -// SetUrl sets the Url field's value. -func (s *OutputDestinationSettings) SetUrl(v string) *OutputDestinationSettings { - s.Url = &v - return s -} - -// SetUsername sets the Username field's value. -func (s *OutputDestinationSettings) SetUsername(v string) *OutputDestinationSettings { - s.Username = &v - return s -} - -// Output groups for this Live Event. Output groups contain information about -// where streams should be distributed. -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/OutputGroup -type OutputGroup struct { - _ struct{} `type:"structure"` - - // Custom output group name optionally defined by the user. Only letters, numbers, - // and the underscore character allowed; only 32 characters allowed. - Name *string `locationName:"name" type:"string"` - - // Settings associated with the output group. - OutputGroupSettings *OutputGroupSettings `locationName:"outputGroupSettings" type:"structure"` - - Outputs []*Output `locationName:"outputs" type:"list"` -} - -// String returns the string representation -func (s OutputGroup) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputGroup) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *OutputGroup) SetName(v string) *OutputGroup { - s.Name = &v - return s -} - -// SetOutputGroupSettings sets the OutputGroupSettings field's value. -func (s *OutputGroup) SetOutputGroupSettings(v *OutputGroupSettings) *OutputGroup { - s.OutputGroupSettings = v - return s -} - -// SetOutputs sets the Outputs field's value. -func (s *OutputGroup) SetOutputs(v []*Output) *OutputGroup { - s.Outputs = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/OutputGroupSettings -type OutputGroupSettings struct { - _ struct{} `type:"structure"` - - ArchiveGroupSettings *ArchiveGroupSettings `locationName:"archiveGroupSettings" type:"structure"` - - HlsGroupSettings *HlsGroupSettings `locationName:"hlsGroupSettings" type:"structure"` - - MsSmoothGroupSettings *MsSmoothGroupSettings `locationName:"msSmoothGroupSettings" type:"structure"` - - UdpGroupSettings *UdpGroupSettings `locationName:"udpGroupSettings" type:"structure"` -} - -// String returns the string representation -func (s OutputGroupSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputGroupSettings) GoString() string { - return s.String() -} - -// SetArchiveGroupSettings sets the ArchiveGroupSettings field's value. -func (s *OutputGroupSettings) SetArchiveGroupSettings(v *ArchiveGroupSettings) *OutputGroupSettings { - s.ArchiveGroupSettings = v - return s -} - -// SetHlsGroupSettings sets the HlsGroupSettings field's value. -func (s *OutputGroupSettings) SetHlsGroupSettings(v *HlsGroupSettings) *OutputGroupSettings { - s.HlsGroupSettings = v - return s -} - -// SetMsSmoothGroupSettings sets the MsSmoothGroupSettings field's value. -func (s *OutputGroupSettings) SetMsSmoothGroupSettings(v *MsSmoothGroupSettings) *OutputGroupSettings { - s.MsSmoothGroupSettings = v - return s -} - -// SetUdpGroupSettings sets the UdpGroupSettings field's value. -func (s *OutputGroupSettings) SetUdpGroupSettings(v *UdpGroupSettings) *OutputGroupSettings { - s.UdpGroupSettings = v - return s -} - -// Reference to an OutputDestination ID defined in the channel -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/OutputLocationRef -type OutputLocationRef struct { - _ struct{} `type:"structure"` - - DestinationRefId *string `locationName:"destinationRefId" type:"string"` -} - -// String returns the string representation -func (s OutputLocationRef) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputLocationRef) GoString() string { - return s.String() -} - -// SetDestinationRefId sets the DestinationRefId field's value. -func (s *OutputLocationRef) SetDestinationRefId(v string) *OutputLocationRef { - s.DestinationRefId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/OutputSettings -type OutputSettings struct { - _ struct{} `type:"structure"` - - ArchiveOutputSettings *ArchiveOutputSettings `locationName:"archiveOutputSettings" type:"structure"` - - HlsOutputSettings *HlsOutputSettings `locationName:"hlsOutputSettings" type:"structure"` - - MsSmoothOutputSettings *MsSmoothOutputSettings `locationName:"msSmoothOutputSettings" type:"structure"` - - UdpOutputSettings *UdpOutputSettings `locationName:"udpOutputSettings" type:"structure"` -} - -// String returns the string representation -func (s OutputSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputSettings) GoString() string { - return s.String() -} - -// SetArchiveOutputSettings sets the ArchiveOutputSettings field's value. -func (s *OutputSettings) SetArchiveOutputSettings(v *ArchiveOutputSettings) *OutputSettings { - s.ArchiveOutputSettings = v - return s -} - -// SetHlsOutputSettings sets the HlsOutputSettings field's value. -func (s *OutputSettings) SetHlsOutputSettings(v *HlsOutputSettings) *OutputSettings { - s.HlsOutputSettings = v - return s -} - -// SetMsSmoothOutputSettings sets the MsSmoothOutputSettings field's value. -func (s *OutputSettings) SetMsSmoothOutputSettings(v *MsSmoothOutputSettings) *OutputSettings { - s.MsSmoothOutputSettings = v - return s -} - -// SetUdpOutputSettings sets the UdpOutputSettings field's value. -func (s *OutputSettings) SetUdpOutputSettings(v *UdpOutputSettings) *OutputSettings { - s.UdpOutputSettings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/PassThroughSettings -type PassThroughSettings struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PassThroughSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PassThroughSettings) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/RemixSettings -type RemixSettings struct { - _ struct{} `type:"structure"` - - // Mapping of input channels to output channels, with appropriate gain adjustments. - ChannelMappings []*AudioChannelMapping `locationName:"channelMappings" type:"list"` - - // Number of input channels to be used. - ChannelsIn *int64 `locationName:"channelsIn" type:"integer"` - - // Number of output channels to be produced.Valid values: 1, 2, 4, 6, 8 - ChannelsOut *int64 `locationName:"channelsOut" type:"integer"` -} - -// String returns the string representation -func (s RemixSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RemixSettings) GoString() string { - return s.String() -} - -// SetChannelMappings sets the ChannelMappings field's value. -func (s *RemixSettings) SetChannelMappings(v []*AudioChannelMapping) *RemixSettings { - s.ChannelMappings = v - return s -} - -// SetChannelsIn sets the ChannelsIn field's value. -func (s *RemixSettings) SetChannelsIn(v int64) *RemixSettings { - s.ChannelsIn = &v - return s -} - -// SetChannelsOut sets the ChannelsOut field's value. -func (s *RemixSettings) SetChannelsOut(v int64) *RemixSettings { - s.ChannelsOut = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/Scte20PlusEmbeddedDestinationSettings -type Scte20PlusEmbeddedDestinationSettings struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s Scte20PlusEmbeddedDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Scte20PlusEmbeddedDestinationSettings) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/Scte20SourceSettings -type Scte20SourceSettings struct { - _ struct{} `type:"structure"` - - // If upconvert, 608 data is both passed through via the "608 compatibility - // bytes" fields of the 708 wrapper as well as translated into 708. 708 data - // present in the source content will be discarded. - Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"Scte20Convert608To708"` - - // Specifies the 608/708 channel number within the video track from which to - // extract captions. Unused for passthrough. - Source608ChannelNumber *int64 `locationName:"source608ChannelNumber" type:"integer"` -} - -// String returns the string representation -func (s Scte20SourceSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Scte20SourceSettings) GoString() string { - return s.String() -} - -// SetConvert608To708 sets the Convert608To708 field's value. -func (s *Scte20SourceSettings) SetConvert608To708(v string) *Scte20SourceSettings { - s.Convert608To708 = &v - return s -} - -// SetSource608ChannelNumber sets the Source608ChannelNumber field's value. -func (s *Scte20SourceSettings) SetSource608ChannelNumber(v int64) *Scte20SourceSettings { - s.Source608ChannelNumber = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/Scte27DestinationSettings -type Scte27DestinationSettings struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s Scte27DestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Scte27DestinationSettings) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/Scte27SourceSettings -type Scte27SourceSettings struct { - _ struct{} `type:"structure"` - - // The pid field is used in conjunction with the caption selector languageCode - // field as follows: - Specify PID and Language: Extracts captions from that - // PID; the language is "informational". - Specify PID and omit Language: Extracts - // the specified PID. - Omit PID and specify Language: Extracts the specified - // language, whichever PID that happens to be. - Omit PID and omit Language: - // Valid only if source is DVB-Sub that is being passed through; all languages - // will be passed through. - Pid *int64 `locationName:"pid" type:"integer"` -} - -// String returns the string representation -func (s Scte27SourceSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Scte27SourceSettings) GoString() string { - return s.String() -} - -// SetPid sets the Pid field's value. -func (s *Scte27SourceSettings) SetPid(v int64) *Scte27SourceSettings { - s.Pid = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/Scte35SpliceInsert -type Scte35SpliceInsert struct { - _ struct{} `type:"structure"` - - // When specified, this offset (in milliseconds) is added to the input Ad Avail - // PTS time. This only applies to embedded SCTE 104/35 messages and does not - // apply to OOB messages. - AdAvailOffset *int64 `locationName:"adAvailOffset" type:"integer"` - - // When set to ignore, Segment Descriptors with noRegionalBlackoutFlag set to - // 0 will no longer trigger blackouts or Ad Avail slates - NoRegionalBlackoutFlag *string `locationName:"noRegionalBlackoutFlag" type:"string" enum:"Scte35SpliceInsertNoRegionalBlackoutBehavior"` - - // When set to ignore, Segment Descriptors with webDeliveryAllowedFlag set to - // 0 will no longer trigger blackouts or Ad Avail slates - WebDeliveryAllowedFlag *string `locationName:"webDeliveryAllowedFlag" type:"string" enum:"Scte35SpliceInsertWebDeliveryAllowedBehavior"` -} - -// String returns the string representation -func (s Scte35SpliceInsert) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Scte35SpliceInsert) GoString() string { - return s.String() -} - -// SetAdAvailOffset sets the AdAvailOffset field's value. -func (s *Scte35SpliceInsert) SetAdAvailOffset(v int64) *Scte35SpliceInsert { - s.AdAvailOffset = &v - return s -} - -// SetNoRegionalBlackoutFlag sets the NoRegionalBlackoutFlag field's value. -func (s *Scte35SpliceInsert) SetNoRegionalBlackoutFlag(v string) *Scte35SpliceInsert { - s.NoRegionalBlackoutFlag = &v - return s -} - -// SetWebDeliveryAllowedFlag sets the WebDeliveryAllowedFlag field's value. -func (s *Scte35SpliceInsert) SetWebDeliveryAllowedFlag(v string) *Scte35SpliceInsert { - s.WebDeliveryAllowedFlag = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/Scte35TimeSignalApos -type Scte35TimeSignalApos struct { - _ struct{} `type:"structure"` - - // When specified, this offset (in milliseconds) is added to the input Ad Avail - // PTS time. This only applies to embedded SCTE 104/35 messages and does not - // apply to OOB messages. - AdAvailOffset *int64 `locationName:"adAvailOffset" type:"integer"` - - // When set to ignore, Segment Descriptors with noRegionalBlackoutFlag set to - // 0 will no longer trigger blackouts or Ad Avail slates - NoRegionalBlackoutFlag *string `locationName:"noRegionalBlackoutFlag" type:"string" enum:"Scte35AposNoRegionalBlackoutBehavior"` - - // When set to ignore, Segment Descriptors with webDeliveryAllowedFlag set to - // 0 will no longer trigger blackouts or Ad Avail slates - WebDeliveryAllowedFlag *string `locationName:"webDeliveryAllowedFlag" type:"string" enum:"Scte35AposWebDeliveryAllowedBehavior"` -} - -// String returns the string representation -func (s Scte35TimeSignalApos) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Scte35TimeSignalApos) GoString() string { - return s.String() -} - -// SetAdAvailOffset sets the AdAvailOffset field's value. -func (s *Scte35TimeSignalApos) SetAdAvailOffset(v int64) *Scte35TimeSignalApos { - s.AdAvailOffset = &v - return s -} - -// SetNoRegionalBlackoutFlag sets the NoRegionalBlackoutFlag field's value. -func (s *Scte35TimeSignalApos) SetNoRegionalBlackoutFlag(v string) *Scte35TimeSignalApos { - s.NoRegionalBlackoutFlag = &v - return s -} - -// SetWebDeliveryAllowedFlag sets the WebDeliveryAllowedFlag field's value. -func (s *Scte35TimeSignalApos) SetWebDeliveryAllowedFlag(v string) *Scte35TimeSignalApos { - s.WebDeliveryAllowedFlag = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/SmpteTtDestinationSettings -type SmpteTtDestinationSettings struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s SmpteTtDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SmpteTtDestinationSettings) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/StandardHlsSettings -type StandardHlsSettings struct { - _ struct{} `type:"structure"` - - // List all the audio groups that are used with the video output stream. Input - // all the audio GROUP-IDs that are associated to the video, separate by ','. - AudioRenditionSets *string `locationName:"audioRenditionSets" type:"string"` - - // Settings information for the .m3u8 container - M3u8Settings *M3u8Settings `locationName:"m3u8Settings" type:"structure"` -} - -// String returns the string representation -func (s StandardHlsSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StandardHlsSettings) GoString() string { - return s.String() -} - -// SetAudioRenditionSets sets the AudioRenditionSets field's value. -func (s *StandardHlsSettings) SetAudioRenditionSets(v string) *StandardHlsSettings { - s.AudioRenditionSets = &v - return s -} - -// SetM3u8Settings sets the M3u8Settings field's value. -func (s *StandardHlsSettings) SetM3u8Settings(v *M3u8Settings) *StandardHlsSettings { - s.M3u8Settings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/StartChannelRequest -type StartChannelInput struct { - _ struct{} `type:"structure"` - - // ChannelId is a required field - ChannelId *string `location:"uri" locationName:"channelId" type:"string" required:"true"` -} - -// String returns the string representation -func (s StartChannelInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartChannelInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartChannelInput"} - if s.ChannelId == nil { - invalidParams.Add(request.NewErrParamRequired("ChannelId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChannelId sets the ChannelId field's value. -func (s *StartChannelInput) SetChannelId(v string) *StartChannelInput { - s.ChannelId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/StartChannelResponse -type StartChannelOutput struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - Destinations []*OutputDestination `locationName:"destinations" type:"list"` - - EgressEndpoints []*ChannelEgressEndpoint `locationName:"egressEndpoints" type:"list"` - - EncoderSettings *EncoderSettings `locationName:"encoderSettings" type:"structure"` - - Id *string `locationName:"id" type:"string"` - - InputAttachments []*InputAttachment `locationName:"inputAttachments" type:"list"` - - Name *string `locationName:"name" type:"string"` - - PipelinesRunningCount *int64 `locationName:"pipelinesRunningCount" type:"integer"` - - RoleArn *string `locationName:"roleArn" type:"string"` - - State *string `locationName:"state" type:"string" enum:"ChannelState"` -} - -// String returns the string representation -func (s StartChannelOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartChannelOutput) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *StartChannelOutput) SetArn(v string) *StartChannelOutput { - s.Arn = &v - return s -} - -// SetDestinations sets the Destinations field's value. -func (s *StartChannelOutput) SetDestinations(v []*OutputDestination) *StartChannelOutput { - s.Destinations = v - return s -} - -// SetEgressEndpoints sets the EgressEndpoints field's value. -func (s *StartChannelOutput) SetEgressEndpoints(v []*ChannelEgressEndpoint) *StartChannelOutput { - s.EgressEndpoints = v - return s -} - -// SetEncoderSettings sets the EncoderSettings field's value. -func (s *StartChannelOutput) SetEncoderSettings(v *EncoderSettings) *StartChannelOutput { - s.EncoderSettings = v - return s -} - -// SetId sets the Id field's value. -func (s *StartChannelOutput) SetId(v string) *StartChannelOutput { - s.Id = &v - return s -} - -// SetInputAttachments sets the InputAttachments field's value. -func (s *StartChannelOutput) SetInputAttachments(v []*InputAttachment) *StartChannelOutput { - s.InputAttachments = v - return s -} - -// SetName sets the Name field's value. -func (s *StartChannelOutput) SetName(v string) *StartChannelOutput { - s.Name = &v - return s -} - -// SetPipelinesRunningCount sets the PipelinesRunningCount field's value. -func (s *StartChannelOutput) SetPipelinesRunningCount(v int64) *StartChannelOutput { - s.PipelinesRunningCount = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *StartChannelOutput) SetRoleArn(v string) *StartChannelOutput { - s.RoleArn = &v - return s -} - -// SetState sets the State field's value. -func (s *StartChannelOutput) SetState(v string) *StartChannelOutput { - s.State = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/StaticKeySettings -type StaticKeySettings struct { - _ struct{} `type:"structure"` - - // The URL of the license server used for protecting content. - KeyProviderServer *InputLocation `locationName:"keyProviderServer" type:"structure"` - - // Static key value as a 32 character hexadecimal string. - StaticKeyValue *string `locationName:"staticKeyValue" type:"string"` -} - -// String returns the string representation -func (s StaticKeySettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StaticKeySettings) GoString() string { - return s.String() -} - -// SetKeyProviderServer sets the KeyProviderServer field's value. -func (s *StaticKeySettings) SetKeyProviderServer(v *InputLocation) *StaticKeySettings { - s.KeyProviderServer = v - return s -} - -// SetStaticKeyValue sets the StaticKeyValue field's value. -func (s *StaticKeySettings) SetStaticKeyValue(v string) *StaticKeySettings { - s.StaticKeyValue = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/StopChannelRequest -type StopChannelInput struct { - _ struct{} `type:"structure"` - - // ChannelId is a required field - ChannelId *string `location:"uri" locationName:"channelId" type:"string" required:"true"` -} - -// String returns the string representation -func (s StopChannelInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopChannelInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StopChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StopChannelInput"} - if s.ChannelId == nil { - invalidParams.Add(request.NewErrParamRequired("ChannelId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChannelId sets the ChannelId field's value. -func (s *StopChannelInput) SetChannelId(v string) *StopChannelInput { - s.ChannelId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/StopChannelResponse -type StopChannelOutput struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - Destinations []*OutputDestination `locationName:"destinations" type:"list"` - - EgressEndpoints []*ChannelEgressEndpoint `locationName:"egressEndpoints" type:"list"` - - EncoderSettings *EncoderSettings `locationName:"encoderSettings" type:"structure"` - - Id *string `locationName:"id" type:"string"` - - InputAttachments []*InputAttachment `locationName:"inputAttachments" type:"list"` - - Name *string `locationName:"name" type:"string"` - - PipelinesRunningCount *int64 `locationName:"pipelinesRunningCount" type:"integer"` - - RoleArn *string `locationName:"roleArn" type:"string"` - - State *string `locationName:"state" type:"string" enum:"ChannelState"` -} - -// String returns the string representation -func (s StopChannelOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopChannelOutput) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *StopChannelOutput) SetArn(v string) *StopChannelOutput { - s.Arn = &v - return s -} - -// SetDestinations sets the Destinations field's value. -func (s *StopChannelOutput) SetDestinations(v []*OutputDestination) *StopChannelOutput { - s.Destinations = v - return s -} - -// SetEgressEndpoints sets the EgressEndpoints field's value. -func (s *StopChannelOutput) SetEgressEndpoints(v []*ChannelEgressEndpoint) *StopChannelOutput { - s.EgressEndpoints = v - return s -} - -// SetEncoderSettings sets the EncoderSettings field's value. -func (s *StopChannelOutput) SetEncoderSettings(v *EncoderSettings) *StopChannelOutput { - s.EncoderSettings = v - return s -} - -// SetId sets the Id field's value. -func (s *StopChannelOutput) SetId(v string) *StopChannelOutput { - s.Id = &v - return s -} - -// SetInputAttachments sets the InputAttachments field's value. -func (s *StopChannelOutput) SetInputAttachments(v []*InputAttachment) *StopChannelOutput { - s.InputAttachments = v - return s -} - -// SetName sets the Name field's value. -func (s *StopChannelOutput) SetName(v string) *StopChannelOutput { - s.Name = &v - return s -} - -// SetPipelinesRunningCount sets the PipelinesRunningCount field's value. -func (s *StopChannelOutput) SetPipelinesRunningCount(v int64) *StopChannelOutput { - s.PipelinesRunningCount = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *StopChannelOutput) SetRoleArn(v string) *StopChannelOutput { - s.RoleArn = &v - return s -} - -// SetState sets the State field's value. -func (s *StopChannelOutput) SetState(v string) *StopChannelOutput { - s.State = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/TeletextDestinationSettings -type TeletextDestinationSettings struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s TeletextDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TeletextDestinationSettings) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/TeletextSourceSettings -type TeletextSourceSettings struct { - _ struct{} `type:"structure"` - - // Specifies the teletext page number within the data stream from which to extract - // captions. Range of 0x100 (256) to 0x8FF (2303). Unused for passthrough. Should - // be specified as a hexadecimal string with no "0x" prefix. - PageNumber *string `locationName:"pageNumber" type:"string"` -} - -// String returns the string representation -func (s TeletextSourceSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TeletextSourceSettings) GoString() string { - return s.String() -} - -// SetPageNumber sets the PageNumber field's value. -func (s *TeletextSourceSettings) SetPageNumber(v string) *TeletextSourceSettings { - s.PageNumber = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/TimecodeConfig -type TimecodeConfig struct { - _ struct{} `type:"structure"` - - // Identifies the source for the timecode that will be associated with the events - // outputs.-Embedded (embedded): Initialize the output timecode with timecode - // from the the source. If no embedded timecode is detected in the source, the - // system falls back to using "Start at 0" (zerobased).-System Clock (systemclock): - // Use the UTC time.-Start at 0 (zerobased): The time of the first frame of - // the event will be 00:00:00:00. - Source *string `locationName:"source" type:"string" enum:"TimecodeConfigSource"` - - // Threshold in frames beyond which output timecode is resynchronized to the - // input timecode. Discrepancies below this threshold are permitted to avoid - // unnecessary discontinuities in the output timecode. No timecode sync when - // this is not specified. - SyncThreshold *int64 `locationName:"syncThreshold" type:"integer"` -} - -// String returns the string representation -func (s TimecodeConfig) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TimecodeConfig) GoString() string { - return s.String() -} - -// SetSource sets the Source field's value. -func (s *TimecodeConfig) SetSource(v string) *TimecodeConfig { - s.Source = &v - return s -} - -// SetSyncThreshold sets the SyncThreshold field's value. -func (s *TimecodeConfig) SetSyncThreshold(v int64) *TimecodeConfig { - s.SyncThreshold = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/TtmlDestinationSettings -type TtmlDestinationSettings struct { - _ struct{} `type:"structure"` - - // When set to passthrough, passes through style and position information from - // a TTML-like input source (TTML, SMPTE-TT, CFF-TT) to the CFF-TT output or - // TTML output. - StyleControl *string `locationName:"styleControl" type:"string" enum:"TtmlDestinationStyleControl"` -} - -// String returns the string representation -func (s TtmlDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TtmlDestinationSettings) GoString() string { - return s.String() -} - -// SetStyleControl sets the StyleControl field's value. -func (s *TtmlDestinationSettings) SetStyleControl(v string) *TtmlDestinationSettings { - s.StyleControl = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/UdpContainerSettings -type UdpContainerSettings struct { - _ struct{} `type:"structure"` - - M2tsSettings *M2tsSettings `locationName:"m2tsSettings" type:"structure"` -} - -// String returns the string representation -func (s UdpContainerSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UdpContainerSettings) GoString() string { - return s.String() -} - -// SetM2tsSettings sets the M2tsSettings field's value. -func (s *UdpContainerSettings) SetM2tsSettings(v *M2tsSettings) *UdpContainerSettings { - s.M2tsSettings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/UdpGroupSettings -type UdpGroupSettings struct { - _ struct{} `type:"structure"` - - // Specifies behavior of last resort when input video is lost, and no more backup - // inputs are available. When dropTs is selected the entire transport stream - // will stop being emitted. When dropProgram is selected the program can be - // dropped from the transport stream (and replaced with null packets to meet - // the TS bitrate requirement). Or, when emitProgram is chosen the transport - // stream will continue to be produced normally with repeat frames, black frames, - // or slate frames substituted for the absent input video. - InputLossAction *string `locationName:"inputLossAction" type:"string" enum:"InputLossActionForUdpOut"` - - // Indicates ID3 frame that has the timecode. - TimedMetadataId3Frame *string `locationName:"timedMetadataId3Frame" type:"string" enum:"UdpTimedMetadataId3Frame"` - - // Timed Metadata interval in seconds. - TimedMetadataId3Period *int64 `locationName:"timedMetadataId3Period" type:"integer"` -} - -// String returns the string representation -func (s UdpGroupSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UdpGroupSettings) GoString() string { - return s.String() -} - -// SetInputLossAction sets the InputLossAction field's value. -func (s *UdpGroupSettings) SetInputLossAction(v string) *UdpGroupSettings { - s.InputLossAction = &v - return s -} - -// SetTimedMetadataId3Frame sets the TimedMetadataId3Frame field's value. -func (s *UdpGroupSettings) SetTimedMetadataId3Frame(v string) *UdpGroupSettings { - s.TimedMetadataId3Frame = &v - return s -} - -// SetTimedMetadataId3Period sets the TimedMetadataId3Period field's value. -func (s *UdpGroupSettings) SetTimedMetadataId3Period(v int64) *UdpGroupSettings { - s.TimedMetadataId3Period = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/UdpOutputSettings -type UdpOutputSettings struct { - _ struct{} `type:"structure"` - - // UDP output buffering in milliseconds. Larger values increase latency through - // the transcoder but simultaneously assist the transcoder in maintaining a - // constant, low-jitter UDP/RTP output while accommodating clock recovery, input - // switching, input disruptions, picture reordering, etc. - BufferMsec *int64 `locationName:"bufferMsec" type:"integer"` - - ContainerSettings *UdpContainerSettings `locationName:"containerSettings" type:"structure"` - - // Destination address and port number for RTP or UDP packets. Can be unicast - // or multicast RTP or UDP (eg. rtp://239.10.10.10:5001 or udp://10.100.100.100:5002). - Destination *OutputLocationRef `locationName:"destination" type:"structure"` - - // Settings for enabling and adjusting Forward Error Correction on UDP outputs. - FecOutputSettings *FecOutputSettings `locationName:"fecOutputSettings" type:"structure"` -} - -// String returns the string representation -func (s UdpOutputSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UdpOutputSettings) GoString() string { - return s.String() -} - -// SetBufferMsec sets the BufferMsec field's value. -func (s *UdpOutputSettings) SetBufferMsec(v int64) *UdpOutputSettings { - s.BufferMsec = &v - return s -} - -// SetContainerSettings sets the ContainerSettings field's value. -func (s *UdpOutputSettings) SetContainerSettings(v *UdpContainerSettings) *UdpOutputSettings { - s.ContainerSettings = v - return s -} - -// SetDestination sets the Destination field's value. -func (s *UdpOutputSettings) SetDestination(v *OutputLocationRef) *UdpOutputSettings { - s.Destination = v - return s -} - -// SetFecOutputSettings sets the FecOutputSettings field's value. -func (s *UdpOutputSettings) SetFecOutputSettings(v *FecOutputSettings) *UdpOutputSettings { - s.FecOutputSettings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ValidationError -type ValidationError struct { - _ struct{} `type:"structure"` - - ElementPath *string `locationName:"elementPath" type:"string"` - - ErrorMessage *string `locationName:"errorMessage" type:"string"` -} - -// String returns the string representation -func (s ValidationError) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ValidationError) GoString() string { - return s.String() -} - -// SetElementPath sets the ElementPath field's value. -func (s *ValidationError) SetElementPath(v string) *ValidationError { - s.ElementPath = &v - return s -} - -// SetErrorMessage sets the ErrorMessage field's value. -func (s *ValidationError) SetErrorMessage(v string) *ValidationError { - s.ErrorMessage = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/VideoCodecSettings -type VideoCodecSettings struct { - _ struct{} `type:"structure"` - - H264Settings *H264Settings `locationName:"h264Settings" type:"structure"` -} - -// String returns the string representation -func (s VideoCodecSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VideoCodecSettings) GoString() string { - return s.String() -} - -// SetH264Settings sets the H264Settings field's value. -func (s *VideoCodecSettings) SetH264Settings(v *H264Settings) *VideoCodecSettings { - s.H264Settings = v - return s -} - -// Video settings for this stream. -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/VideoDescription -type VideoDescription struct { - _ struct{} `type:"structure"` - - // Video codec settings. - CodecSettings *VideoCodecSettings `locationName:"codecSettings" type:"structure"` - - // Output video height (in pixels). Leave blank to use source video height. - // If left blank, width must also be unspecified. - Height *int64 `locationName:"height" type:"integer"` - - // The name of this VideoDescription. Outputs will use this name to uniquely - // identify this Description. Description names should be unique within this - // Live Event. - Name *string `locationName:"name" type:"string"` - - // Indicates how to respond to the AFD values in the input stream. Setting to - // "respond" causes input video to be clipped, depending on AFD value, input - // display aspect ratio and output display aspect ratio. - RespondToAfd *string `locationName:"respondToAfd" type:"string" enum:"VideoDescriptionRespondToAfd"` - - // When set to "stretchToOutput", automatically configures the output position - // to stretch the video to the specified output resolution. This option will - // override any position value. - ScalingBehavior *string `locationName:"scalingBehavior" type:"string" enum:"VideoDescriptionScalingBehavior"` - - // Changes the width of the anti-alias filter kernel used for scaling. Only - // applies if scaling is being performed and antiAlias is set to true. 0 is - // the softest setting, 100 the sharpest, and 50 recommended for most content. - Sharpness *int64 `locationName:"sharpness" type:"integer"` - - // Output video width (in pixels). Leave out to use source video width. If left - // out, height must also be left out. Display aspect ratio is always preserved - // by letterboxing or pillarboxing when necessary. - Width *int64 `locationName:"width" type:"integer"` -} - -// String returns the string representation -func (s VideoDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VideoDescription) GoString() string { - return s.String() -} - -// SetCodecSettings sets the CodecSettings field's value. -func (s *VideoDescription) SetCodecSettings(v *VideoCodecSettings) *VideoDescription { - s.CodecSettings = v - return s -} - -// SetHeight sets the Height field's value. -func (s *VideoDescription) SetHeight(v int64) *VideoDescription { - s.Height = &v - return s -} - -// SetName sets the Name field's value. -func (s *VideoDescription) SetName(v string) *VideoDescription { - s.Name = &v - return s -} - -// SetRespondToAfd sets the RespondToAfd field's value. -func (s *VideoDescription) SetRespondToAfd(v string) *VideoDescription { - s.RespondToAfd = &v - return s -} - -// SetScalingBehavior sets the ScalingBehavior field's value. -func (s *VideoDescription) SetScalingBehavior(v string) *VideoDescription { - s.ScalingBehavior = &v - return s -} - -// SetSharpness sets the Sharpness field's value. -func (s *VideoDescription) SetSharpness(v int64) *VideoDescription { - s.Sharpness = &v - return s -} - -// SetWidth sets the Width field's value. -func (s *VideoDescription) SetWidth(v int64) *VideoDescription { - s.Width = &v - return s -} - -// Specifies a particular video stream within an input source. An input may -// have only a single video selector. -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/VideoSelector -type VideoSelector struct { - _ struct{} `type:"structure"` - - // Specifies the colorspace of an input. This setting works in tandem with colorSpaceConversion - // to determine if any conversion will be performed. - ColorSpace *string `locationName:"colorSpace" type:"string" enum:"VideoSelectorColorSpace"` - - // Applies only if colorSpace is a value other than follow. This field controls - // how the value in the colorSpace field will be used. fallback means that when - // the input does include color space data, that data will be used, but when - // the input has no color space data, the value in colorSpace will be used. - // Choose fallback if your input is sometimes missing color space data, but - // when it does have color space data, that data is correct. force means to - // always use the value in colorSpace. Choose force if your input usually has - // no color space data or might have unreliable color space data. - ColorSpaceUsage *string `locationName:"colorSpaceUsage" type:"string" enum:"VideoSelectorColorSpaceUsage"` - - // The video selector settings. - SelectorSettings *VideoSelectorSettings `locationName:"selectorSettings" type:"structure"` -} - -// String returns the string representation -func (s VideoSelector) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VideoSelector) GoString() string { - return s.String() -} - -// SetColorSpace sets the ColorSpace field's value. -func (s *VideoSelector) SetColorSpace(v string) *VideoSelector { - s.ColorSpace = &v - return s -} - -// SetColorSpaceUsage sets the ColorSpaceUsage field's value. -func (s *VideoSelector) SetColorSpaceUsage(v string) *VideoSelector { - s.ColorSpaceUsage = &v - return s -} - -// SetSelectorSettings sets the SelectorSettings field's value. -func (s *VideoSelector) SetSelectorSettings(v *VideoSelectorSettings) *VideoSelector { - s.SelectorSettings = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/VideoSelectorPid -type VideoSelectorPid struct { - _ struct{} `type:"structure"` - - // Selects a specific PID from within a video source. - Pid *int64 `locationName:"pid" type:"integer"` -} - -// String returns the string representation -func (s VideoSelectorPid) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VideoSelectorPid) GoString() string { - return s.String() -} - -// SetPid sets the Pid field's value. -func (s *VideoSelectorPid) SetPid(v int64) *VideoSelectorPid { - s.Pid = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/VideoSelectorProgramId -type VideoSelectorProgramId struct { - _ struct{} `type:"structure"` - - // Selects a specific program from within a multi-program transport stream. - // If the program doesn't exist, the first program within the transport stream - // will be selected by default. - ProgramId *int64 `locationName:"programId" type:"integer"` -} - -// String returns the string representation -func (s VideoSelectorProgramId) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VideoSelectorProgramId) GoString() string { - return s.String() -} - -// SetProgramId sets the ProgramId field's value. -func (s *VideoSelectorProgramId) SetProgramId(v int64) *VideoSelectorProgramId { - s.ProgramId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/VideoSelectorSettings -type VideoSelectorSettings struct { - _ struct{} `type:"structure"` - - VideoSelectorPid *VideoSelectorPid `locationName:"videoSelectorPid" type:"structure"` - - VideoSelectorProgramId *VideoSelectorProgramId `locationName:"videoSelectorProgramId" type:"structure"` -} - -// String returns the string representation -func (s VideoSelectorSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VideoSelectorSettings) GoString() string { - return s.String() -} - -// SetVideoSelectorPid sets the VideoSelectorPid field's value. -func (s *VideoSelectorSettings) SetVideoSelectorPid(v *VideoSelectorPid) *VideoSelectorSettings { - s.VideoSelectorPid = v - return s -} - -// SetVideoSelectorProgramId sets the VideoSelectorProgramId field's value. -func (s *VideoSelectorSettings) SetVideoSelectorProgramId(v *VideoSelectorProgramId) *VideoSelectorSettings { - s.VideoSelectorProgramId = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/WebvttDestinationSettings -type WebvttDestinationSettings struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s WebvttDestinationSettings) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WebvttDestinationSettings) GoString() string { - return s.String() -} - -const ( - // AacCodingModeAdReceiverMix is a AacCodingMode enum value - AacCodingModeAdReceiverMix = "AD_RECEIVER_MIX" - - // AacCodingModeCodingMode10 is a AacCodingMode enum value - AacCodingModeCodingMode10 = "CODING_MODE_1_0" - - // AacCodingModeCodingMode11 is a AacCodingMode enum value - AacCodingModeCodingMode11 = "CODING_MODE_1_1" - - // AacCodingModeCodingMode20 is a AacCodingMode enum value - AacCodingModeCodingMode20 = "CODING_MODE_2_0" - - // AacCodingModeCodingMode51 is a AacCodingMode enum value - AacCodingModeCodingMode51 = "CODING_MODE_5_1" -) - -const ( - // AacInputTypeBroadcasterMixedAd is a AacInputType enum value - AacInputTypeBroadcasterMixedAd = "BROADCASTER_MIXED_AD" - - // AacInputTypeNormal is a AacInputType enum value - AacInputTypeNormal = "NORMAL" -) - -const ( - // AacProfileHev1 is a AacProfile enum value - AacProfileHev1 = "HEV1" - - // AacProfileHev2 is a AacProfile enum value - AacProfileHev2 = "HEV2" - - // AacProfileLc is a AacProfile enum value - AacProfileLc = "LC" -) - -const ( - // AacRateControlModeCbr is a AacRateControlMode enum value - AacRateControlModeCbr = "CBR" - - // AacRateControlModeVbr is a AacRateControlMode enum value - AacRateControlModeVbr = "VBR" -) - -const ( - // AacRawFormatLatmLoas is a AacRawFormat enum value - AacRawFormatLatmLoas = "LATM_LOAS" - - // AacRawFormatNone is a AacRawFormat enum value - AacRawFormatNone = "NONE" -) - -const ( - // AacSpecMpeg2 is a AacSpec enum value - AacSpecMpeg2 = "MPEG2" - - // AacSpecMpeg4 is a AacSpec enum value - AacSpecMpeg4 = "MPEG4" -) - -const ( - // AacVbrQualityHigh is a AacVbrQuality enum value - AacVbrQualityHigh = "HIGH" - - // AacVbrQualityLow is a AacVbrQuality enum value - AacVbrQualityLow = "LOW" - - // AacVbrQualityMediumHigh is a AacVbrQuality enum value - AacVbrQualityMediumHigh = "MEDIUM_HIGH" - - // AacVbrQualityMediumLow is a AacVbrQuality enum value - AacVbrQualityMediumLow = "MEDIUM_LOW" -) - -const ( - // Ac3BitstreamModeCommentary is a Ac3BitstreamMode enum value - Ac3BitstreamModeCommentary = "COMMENTARY" - - // Ac3BitstreamModeCompleteMain is a Ac3BitstreamMode enum value - Ac3BitstreamModeCompleteMain = "COMPLETE_MAIN" - - // Ac3BitstreamModeDialogue is a Ac3BitstreamMode enum value - Ac3BitstreamModeDialogue = "DIALOGUE" - - // Ac3BitstreamModeEmergency is a Ac3BitstreamMode enum value - Ac3BitstreamModeEmergency = "EMERGENCY" - - // Ac3BitstreamModeHearingImpaired is a Ac3BitstreamMode enum value - Ac3BitstreamModeHearingImpaired = "HEARING_IMPAIRED" - - // Ac3BitstreamModeMusicAndEffects is a Ac3BitstreamMode enum value - Ac3BitstreamModeMusicAndEffects = "MUSIC_AND_EFFECTS" - - // Ac3BitstreamModeVisuallyImpaired is a Ac3BitstreamMode enum value - Ac3BitstreamModeVisuallyImpaired = "VISUALLY_IMPAIRED" - - // Ac3BitstreamModeVoiceOver is a Ac3BitstreamMode enum value - Ac3BitstreamModeVoiceOver = "VOICE_OVER" -) - -const ( - // Ac3CodingModeCodingMode10 is a Ac3CodingMode enum value - Ac3CodingModeCodingMode10 = "CODING_MODE_1_0" - - // Ac3CodingModeCodingMode11 is a Ac3CodingMode enum value - Ac3CodingModeCodingMode11 = "CODING_MODE_1_1" - - // Ac3CodingModeCodingMode20 is a Ac3CodingMode enum value - Ac3CodingModeCodingMode20 = "CODING_MODE_2_0" - - // Ac3CodingModeCodingMode32Lfe is a Ac3CodingMode enum value - Ac3CodingModeCodingMode32Lfe = "CODING_MODE_3_2_LFE" -) - -const ( - // Ac3DrcProfileFilmStandard is a Ac3DrcProfile enum value - Ac3DrcProfileFilmStandard = "FILM_STANDARD" - - // Ac3DrcProfileNone is a Ac3DrcProfile enum value - Ac3DrcProfileNone = "NONE" -) - -const ( - // Ac3LfeFilterDisabled is a Ac3LfeFilter enum value - Ac3LfeFilterDisabled = "DISABLED" - - // Ac3LfeFilterEnabled is a Ac3LfeFilter enum value - Ac3LfeFilterEnabled = "ENABLED" -) - -const ( - // Ac3MetadataControlFollowInput is a Ac3MetadataControl enum value - Ac3MetadataControlFollowInput = "FOLLOW_INPUT" - - // Ac3MetadataControlUseConfigured is a Ac3MetadataControl enum value - Ac3MetadataControlUseConfigured = "USE_CONFIGURED" -) - -const ( - // AfdSignalingAuto is a AfdSignaling enum value - AfdSignalingAuto = "AUTO" - - // AfdSignalingFixed is a AfdSignaling enum value - AfdSignalingFixed = "FIXED" - - // AfdSignalingNone is a AfdSignaling enum value - AfdSignalingNone = "NONE" -) - -const ( - // AudioDescriptionAudioTypeControlFollowInput is a AudioDescriptionAudioTypeControl enum value - AudioDescriptionAudioTypeControlFollowInput = "FOLLOW_INPUT" - - // AudioDescriptionAudioTypeControlUseConfigured is a AudioDescriptionAudioTypeControl enum value - AudioDescriptionAudioTypeControlUseConfigured = "USE_CONFIGURED" -) - -const ( - // AudioDescriptionLanguageCodeControlFollowInput is a AudioDescriptionLanguageCodeControl enum value - AudioDescriptionLanguageCodeControlFollowInput = "FOLLOW_INPUT" - - // AudioDescriptionLanguageCodeControlUseConfigured is a AudioDescriptionLanguageCodeControl enum value - AudioDescriptionLanguageCodeControlUseConfigured = "USE_CONFIGURED" -) - -const ( - // AudioLanguageSelectionPolicyLoose is a AudioLanguageSelectionPolicy enum value - AudioLanguageSelectionPolicyLoose = "LOOSE" - - // AudioLanguageSelectionPolicyStrict is a AudioLanguageSelectionPolicy enum value - AudioLanguageSelectionPolicyStrict = "STRICT" -) - -const ( - // AudioNormalizationAlgorithmItu17701 is a AudioNormalizationAlgorithm enum value - AudioNormalizationAlgorithmItu17701 = "ITU_1770_1" - - // AudioNormalizationAlgorithmItu17702 is a AudioNormalizationAlgorithm enum value - AudioNormalizationAlgorithmItu17702 = "ITU_1770_2" -) - -const ( - // AudioNormalizationAlgorithmControlCorrectAudio is a AudioNormalizationAlgorithmControl enum value - AudioNormalizationAlgorithmControlCorrectAudio = "CORRECT_AUDIO" -) - -const ( - // AudioOnlyHlsTrackTypeAlternateAudioAutoSelect is a AudioOnlyHlsTrackType enum value - AudioOnlyHlsTrackTypeAlternateAudioAutoSelect = "ALTERNATE_AUDIO_AUTO_SELECT" - - // AudioOnlyHlsTrackTypeAlternateAudioAutoSelectDefault is a AudioOnlyHlsTrackType enum value - AudioOnlyHlsTrackTypeAlternateAudioAutoSelectDefault = "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT" - - // AudioOnlyHlsTrackTypeAlternateAudioNotAutoSelect is a AudioOnlyHlsTrackType enum value - AudioOnlyHlsTrackTypeAlternateAudioNotAutoSelect = "ALTERNATE_AUDIO_NOT_AUTO_SELECT" - - // AudioOnlyHlsTrackTypeAudioOnlyVariantStream is a AudioOnlyHlsTrackType enum value - AudioOnlyHlsTrackTypeAudioOnlyVariantStream = "AUDIO_ONLY_VARIANT_STREAM" -) - -const ( - // AudioTypeCleanEffects is a AudioType enum value - AudioTypeCleanEffects = "CLEAN_EFFECTS" - - // AudioTypeHearingImpaired is a AudioType enum value - AudioTypeHearingImpaired = "HEARING_IMPAIRED" - - // AudioTypeUndefined is a AudioType enum value - AudioTypeUndefined = "UNDEFINED" - - // AudioTypeVisualImpairedCommentary is a AudioType enum value - AudioTypeVisualImpairedCommentary = "VISUAL_IMPAIRED_COMMENTARY" -) - -const ( - // AvailBlankingStateDisabled is a AvailBlankingState enum value - AvailBlankingStateDisabled = "DISABLED" - - // AvailBlankingStateEnabled is a AvailBlankingState enum value - AvailBlankingStateEnabled = "ENABLED" -) - -const ( - // BlackoutSlateNetworkEndBlackoutDisabled is a BlackoutSlateNetworkEndBlackout enum value - BlackoutSlateNetworkEndBlackoutDisabled = "DISABLED" - - // BlackoutSlateNetworkEndBlackoutEnabled is a BlackoutSlateNetworkEndBlackout enum value - BlackoutSlateNetworkEndBlackoutEnabled = "ENABLED" -) - -const ( - // BlackoutSlateStateDisabled is a BlackoutSlateState enum value - BlackoutSlateStateDisabled = "DISABLED" - - // BlackoutSlateStateEnabled is a BlackoutSlateState enum value - BlackoutSlateStateEnabled = "ENABLED" -) - -const ( - // BurnInAlignmentCentered is a BurnInAlignment enum value - BurnInAlignmentCentered = "CENTERED" - - // BurnInAlignmentLeft is a BurnInAlignment enum value - BurnInAlignmentLeft = "LEFT" - - // BurnInAlignmentSmart is a BurnInAlignment enum value - BurnInAlignmentSmart = "SMART" -) - -const ( - // BurnInBackgroundColorBlack is a BurnInBackgroundColor enum value - BurnInBackgroundColorBlack = "BLACK" - - // BurnInBackgroundColorNone is a BurnInBackgroundColor enum value - BurnInBackgroundColorNone = "NONE" - - // BurnInBackgroundColorWhite is a BurnInBackgroundColor enum value - BurnInBackgroundColorWhite = "WHITE" -) - -const ( - // BurnInFontColorBlack is a BurnInFontColor enum value - BurnInFontColorBlack = "BLACK" - - // BurnInFontColorBlue is a BurnInFontColor enum value - BurnInFontColorBlue = "BLUE" - - // BurnInFontColorGreen is a BurnInFontColor enum value - BurnInFontColorGreen = "GREEN" - - // BurnInFontColorRed is a BurnInFontColor enum value - BurnInFontColorRed = "RED" - - // BurnInFontColorWhite is a BurnInFontColor enum value - BurnInFontColorWhite = "WHITE" - - // BurnInFontColorYellow is a BurnInFontColor enum value - BurnInFontColorYellow = "YELLOW" -) - -const ( - // BurnInOutlineColorBlack is a BurnInOutlineColor enum value - BurnInOutlineColorBlack = "BLACK" - - // BurnInOutlineColorBlue is a BurnInOutlineColor enum value - BurnInOutlineColorBlue = "BLUE" - - // BurnInOutlineColorGreen is a BurnInOutlineColor enum value - BurnInOutlineColorGreen = "GREEN" - - // BurnInOutlineColorRed is a BurnInOutlineColor enum value - BurnInOutlineColorRed = "RED" - - // BurnInOutlineColorWhite is a BurnInOutlineColor enum value - BurnInOutlineColorWhite = "WHITE" - - // BurnInOutlineColorYellow is a BurnInOutlineColor enum value - BurnInOutlineColorYellow = "YELLOW" -) - -const ( - // BurnInShadowColorBlack is a BurnInShadowColor enum value - BurnInShadowColorBlack = "BLACK" - - // BurnInShadowColorNone is a BurnInShadowColor enum value - BurnInShadowColorNone = "NONE" - - // BurnInShadowColorWhite is a BurnInShadowColor enum value - BurnInShadowColorWhite = "WHITE" -) - -const ( - // BurnInTeletextGridControlFixed is a BurnInTeletextGridControl enum value - BurnInTeletextGridControlFixed = "FIXED" - - // BurnInTeletextGridControlScaled is a BurnInTeletextGridControl enum value - BurnInTeletextGridControlScaled = "SCALED" -) - -const ( - // ChannelStateCreating is a ChannelState enum value - ChannelStateCreating = "CREATING" - - // ChannelStateCreateFailed is a ChannelState enum value - ChannelStateCreateFailed = "CREATE_FAILED" - - // ChannelStateIdle is a ChannelState enum value - ChannelStateIdle = "IDLE" - - // ChannelStateStarting is a ChannelState enum value - ChannelStateStarting = "STARTING" - - // ChannelStateRunning is a ChannelState enum value - ChannelStateRunning = "RUNNING" - - // ChannelStateRecovering is a ChannelState enum value - ChannelStateRecovering = "RECOVERING" - - // ChannelStateStopping is a ChannelState enum value - ChannelStateStopping = "STOPPING" - - // ChannelStateDeleting is a ChannelState enum value - ChannelStateDeleting = "DELETING" - - // ChannelStateDeleted is a ChannelState enum value - ChannelStateDeleted = "DELETED" -) - -const ( - // DvbSdtOutputSdtSdtFollow is a DvbSdtOutputSdt enum value - DvbSdtOutputSdtSdtFollow = "SDT_FOLLOW" - - // DvbSdtOutputSdtSdtFollowIfPresent is a DvbSdtOutputSdt enum value - DvbSdtOutputSdtSdtFollowIfPresent = "SDT_FOLLOW_IF_PRESENT" - - // DvbSdtOutputSdtSdtManual is a DvbSdtOutputSdt enum value - DvbSdtOutputSdtSdtManual = "SDT_MANUAL" - - // DvbSdtOutputSdtSdtNone is a DvbSdtOutputSdt enum value - DvbSdtOutputSdtSdtNone = "SDT_NONE" -) - -const ( - // DvbSubDestinationAlignmentCentered is a DvbSubDestinationAlignment enum value - DvbSubDestinationAlignmentCentered = "CENTERED" - - // DvbSubDestinationAlignmentLeft is a DvbSubDestinationAlignment enum value - DvbSubDestinationAlignmentLeft = "LEFT" - - // DvbSubDestinationAlignmentSmart is a DvbSubDestinationAlignment enum value - DvbSubDestinationAlignmentSmart = "SMART" -) - -const ( - // DvbSubDestinationBackgroundColorBlack is a DvbSubDestinationBackgroundColor enum value - DvbSubDestinationBackgroundColorBlack = "BLACK" - - // DvbSubDestinationBackgroundColorNone is a DvbSubDestinationBackgroundColor enum value - DvbSubDestinationBackgroundColorNone = "NONE" - - // DvbSubDestinationBackgroundColorWhite is a DvbSubDestinationBackgroundColor enum value - DvbSubDestinationBackgroundColorWhite = "WHITE" -) - -const ( - // DvbSubDestinationFontColorBlack is a DvbSubDestinationFontColor enum value - DvbSubDestinationFontColorBlack = "BLACK" - - // DvbSubDestinationFontColorBlue is a DvbSubDestinationFontColor enum value - DvbSubDestinationFontColorBlue = "BLUE" - - // DvbSubDestinationFontColorGreen is a DvbSubDestinationFontColor enum value - DvbSubDestinationFontColorGreen = "GREEN" - - // DvbSubDestinationFontColorRed is a DvbSubDestinationFontColor enum value - DvbSubDestinationFontColorRed = "RED" - - // DvbSubDestinationFontColorWhite is a DvbSubDestinationFontColor enum value - DvbSubDestinationFontColorWhite = "WHITE" - - // DvbSubDestinationFontColorYellow is a DvbSubDestinationFontColor enum value - DvbSubDestinationFontColorYellow = "YELLOW" -) - -const ( - // DvbSubDestinationOutlineColorBlack is a DvbSubDestinationOutlineColor enum value - DvbSubDestinationOutlineColorBlack = "BLACK" - - // DvbSubDestinationOutlineColorBlue is a DvbSubDestinationOutlineColor enum value - DvbSubDestinationOutlineColorBlue = "BLUE" - - // DvbSubDestinationOutlineColorGreen is a DvbSubDestinationOutlineColor enum value - DvbSubDestinationOutlineColorGreen = "GREEN" - - // DvbSubDestinationOutlineColorRed is a DvbSubDestinationOutlineColor enum value - DvbSubDestinationOutlineColorRed = "RED" - - // DvbSubDestinationOutlineColorWhite is a DvbSubDestinationOutlineColor enum value - DvbSubDestinationOutlineColorWhite = "WHITE" - - // DvbSubDestinationOutlineColorYellow is a DvbSubDestinationOutlineColor enum value - DvbSubDestinationOutlineColorYellow = "YELLOW" -) - -const ( - // DvbSubDestinationShadowColorBlack is a DvbSubDestinationShadowColor enum value - DvbSubDestinationShadowColorBlack = "BLACK" - - // DvbSubDestinationShadowColorNone is a DvbSubDestinationShadowColor enum value - DvbSubDestinationShadowColorNone = "NONE" - - // DvbSubDestinationShadowColorWhite is a DvbSubDestinationShadowColor enum value - DvbSubDestinationShadowColorWhite = "WHITE" -) - -const ( - // DvbSubDestinationTeletextGridControlFixed is a DvbSubDestinationTeletextGridControl enum value - DvbSubDestinationTeletextGridControlFixed = "FIXED" - - // DvbSubDestinationTeletextGridControlScaled is a DvbSubDestinationTeletextGridControl enum value - DvbSubDestinationTeletextGridControlScaled = "SCALED" -) - -const ( - // Eac3AttenuationControlAttenuate3Db is a Eac3AttenuationControl enum value - Eac3AttenuationControlAttenuate3Db = "ATTENUATE_3_DB" - - // Eac3AttenuationControlNone is a Eac3AttenuationControl enum value - Eac3AttenuationControlNone = "NONE" -) - -const ( - // Eac3BitstreamModeCommentary is a Eac3BitstreamMode enum value - Eac3BitstreamModeCommentary = "COMMENTARY" - - // Eac3BitstreamModeCompleteMain is a Eac3BitstreamMode enum value - Eac3BitstreamModeCompleteMain = "COMPLETE_MAIN" - - // Eac3BitstreamModeEmergency is a Eac3BitstreamMode enum value - Eac3BitstreamModeEmergency = "EMERGENCY" - - // Eac3BitstreamModeHearingImpaired is a Eac3BitstreamMode enum value - Eac3BitstreamModeHearingImpaired = "HEARING_IMPAIRED" - - // Eac3BitstreamModeVisuallyImpaired is a Eac3BitstreamMode enum value - Eac3BitstreamModeVisuallyImpaired = "VISUALLY_IMPAIRED" -) - -const ( - // Eac3CodingModeCodingMode10 is a Eac3CodingMode enum value - Eac3CodingModeCodingMode10 = "CODING_MODE_1_0" - - // Eac3CodingModeCodingMode20 is a Eac3CodingMode enum value - Eac3CodingModeCodingMode20 = "CODING_MODE_2_0" - - // Eac3CodingModeCodingMode32 is a Eac3CodingMode enum value - Eac3CodingModeCodingMode32 = "CODING_MODE_3_2" -) - -const ( - // Eac3DcFilterDisabled is a Eac3DcFilter enum value - Eac3DcFilterDisabled = "DISABLED" - - // Eac3DcFilterEnabled is a Eac3DcFilter enum value - Eac3DcFilterEnabled = "ENABLED" -) - -const ( - // Eac3DrcLineFilmLight is a Eac3DrcLine enum value - Eac3DrcLineFilmLight = "FILM_LIGHT" - - // Eac3DrcLineFilmStandard is a Eac3DrcLine enum value - Eac3DrcLineFilmStandard = "FILM_STANDARD" - - // Eac3DrcLineMusicLight is a Eac3DrcLine enum value - Eac3DrcLineMusicLight = "MUSIC_LIGHT" - - // Eac3DrcLineMusicStandard is a Eac3DrcLine enum value - Eac3DrcLineMusicStandard = "MUSIC_STANDARD" - - // Eac3DrcLineNone is a Eac3DrcLine enum value - Eac3DrcLineNone = "NONE" - - // Eac3DrcLineSpeech is a Eac3DrcLine enum value - Eac3DrcLineSpeech = "SPEECH" -) - -const ( - // Eac3DrcRfFilmLight is a Eac3DrcRf enum value - Eac3DrcRfFilmLight = "FILM_LIGHT" - - // Eac3DrcRfFilmStandard is a Eac3DrcRf enum value - Eac3DrcRfFilmStandard = "FILM_STANDARD" - - // Eac3DrcRfMusicLight is a Eac3DrcRf enum value - Eac3DrcRfMusicLight = "MUSIC_LIGHT" - - // Eac3DrcRfMusicStandard is a Eac3DrcRf enum value - Eac3DrcRfMusicStandard = "MUSIC_STANDARD" - - // Eac3DrcRfNone is a Eac3DrcRf enum value - Eac3DrcRfNone = "NONE" - - // Eac3DrcRfSpeech is a Eac3DrcRf enum value - Eac3DrcRfSpeech = "SPEECH" -) - -const ( - // Eac3LfeControlLfe is a Eac3LfeControl enum value - Eac3LfeControlLfe = "LFE" - - // Eac3LfeControlNoLfe is a Eac3LfeControl enum value - Eac3LfeControlNoLfe = "NO_LFE" -) - -const ( - // Eac3LfeFilterDisabled is a Eac3LfeFilter enum value - Eac3LfeFilterDisabled = "DISABLED" - - // Eac3LfeFilterEnabled is a Eac3LfeFilter enum value - Eac3LfeFilterEnabled = "ENABLED" -) - -const ( - // Eac3MetadataControlFollowInput is a Eac3MetadataControl enum value - Eac3MetadataControlFollowInput = "FOLLOW_INPUT" - - // Eac3MetadataControlUseConfigured is a Eac3MetadataControl enum value - Eac3MetadataControlUseConfigured = "USE_CONFIGURED" -) - -const ( - // Eac3PassthroughControlNoPassthrough is a Eac3PassthroughControl enum value - Eac3PassthroughControlNoPassthrough = "NO_PASSTHROUGH" - - // Eac3PassthroughControlWhenPossible is a Eac3PassthroughControl enum value - Eac3PassthroughControlWhenPossible = "WHEN_POSSIBLE" -) - -const ( - // Eac3PhaseControlNoShift is a Eac3PhaseControl enum value - Eac3PhaseControlNoShift = "NO_SHIFT" - - // Eac3PhaseControlShift90Degrees is a Eac3PhaseControl enum value - Eac3PhaseControlShift90Degrees = "SHIFT_90_DEGREES" -) - -const ( - // Eac3StereoDownmixDpl2 is a Eac3StereoDownmix enum value - Eac3StereoDownmixDpl2 = "DPL2" - - // Eac3StereoDownmixLoRo is a Eac3StereoDownmix enum value - Eac3StereoDownmixLoRo = "LO_RO" - - // Eac3StereoDownmixLtRt is a Eac3StereoDownmix enum value - Eac3StereoDownmixLtRt = "LT_RT" - - // Eac3StereoDownmixNotIndicated is a Eac3StereoDownmix enum value - Eac3StereoDownmixNotIndicated = "NOT_INDICATED" -) - -const ( - // Eac3SurroundExModeDisabled is a Eac3SurroundExMode enum value - Eac3SurroundExModeDisabled = "DISABLED" - - // Eac3SurroundExModeEnabled is a Eac3SurroundExMode enum value - Eac3SurroundExModeEnabled = "ENABLED" - - // Eac3SurroundExModeNotIndicated is a Eac3SurroundExMode enum value - Eac3SurroundExModeNotIndicated = "NOT_INDICATED" -) - -const ( - // Eac3SurroundModeDisabled is a Eac3SurroundMode enum value - Eac3SurroundModeDisabled = "DISABLED" - - // Eac3SurroundModeEnabled is a Eac3SurroundMode enum value - Eac3SurroundModeEnabled = "ENABLED" - - // Eac3SurroundModeNotIndicated is a Eac3SurroundMode enum value - Eac3SurroundModeNotIndicated = "NOT_INDICATED" -) - -const ( - // EmbeddedConvert608To708Disabled is a EmbeddedConvert608To708 enum value - EmbeddedConvert608To708Disabled = "DISABLED" - - // EmbeddedConvert608To708Upconvert is a EmbeddedConvert608To708 enum value - EmbeddedConvert608To708Upconvert = "UPCONVERT" -) - -const ( - // EmbeddedScte20DetectionAuto is a EmbeddedScte20Detection enum value - EmbeddedScte20DetectionAuto = "AUTO" - - // EmbeddedScte20DetectionOff is a EmbeddedScte20Detection enum value - EmbeddedScte20DetectionOff = "OFF" -) - -const ( - // FecOutputIncludeFecColumn is a FecOutputIncludeFec enum value - FecOutputIncludeFecColumn = "COLUMN" - - // FecOutputIncludeFecColumnAndRow is a FecOutputIncludeFec enum value - FecOutputIncludeFecColumnAndRow = "COLUMN_AND_ROW" -) - -const ( - // FixedAfdAfd0000 is a FixedAfd enum value - FixedAfdAfd0000 = "AFD_0000" - - // FixedAfdAfd0010 is a FixedAfd enum value - FixedAfdAfd0010 = "AFD_0010" - - // FixedAfdAfd0011 is a FixedAfd enum value - FixedAfdAfd0011 = "AFD_0011" - - // FixedAfdAfd0100 is a FixedAfd enum value - FixedAfdAfd0100 = "AFD_0100" - - // FixedAfdAfd1000 is a FixedAfd enum value - FixedAfdAfd1000 = "AFD_1000" - - // FixedAfdAfd1001 is a FixedAfd enum value - FixedAfdAfd1001 = "AFD_1001" - - // FixedAfdAfd1010 is a FixedAfd enum value - FixedAfdAfd1010 = "AFD_1010" - - // FixedAfdAfd1011 is a FixedAfd enum value - FixedAfdAfd1011 = "AFD_1011" - - // FixedAfdAfd1101 is a FixedAfd enum value - FixedAfdAfd1101 = "AFD_1101" - - // FixedAfdAfd1110 is a FixedAfd enum value - FixedAfdAfd1110 = "AFD_1110" - - // FixedAfdAfd1111 is a FixedAfd enum value - FixedAfdAfd1111 = "AFD_1111" -) - -const ( - // GlobalConfigurationInputEndActionNone is a GlobalConfigurationInputEndAction enum value - GlobalConfigurationInputEndActionNone = "NONE" - - // GlobalConfigurationInputEndActionSwitchAndLoopInputs is a GlobalConfigurationInputEndAction enum value - GlobalConfigurationInputEndActionSwitchAndLoopInputs = "SWITCH_AND_LOOP_INPUTS" -) - -const ( - // GlobalConfigurationLowFramerateInputsDisabled is a GlobalConfigurationLowFramerateInputs enum value - GlobalConfigurationLowFramerateInputsDisabled = "DISABLED" - - // GlobalConfigurationLowFramerateInputsEnabled is a GlobalConfigurationLowFramerateInputs enum value - GlobalConfigurationLowFramerateInputsEnabled = "ENABLED" -) - -const ( - // GlobalConfigurationOutputTimingSourceInputClock is a GlobalConfigurationOutputTimingSource enum value - GlobalConfigurationOutputTimingSourceInputClock = "INPUT_CLOCK" - - // GlobalConfigurationOutputTimingSourceSystemClock is a GlobalConfigurationOutputTimingSource enum value - GlobalConfigurationOutputTimingSourceSystemClock = "SYSTEM_CLOCK" -) - -const ( - // H264AdaptiveQuantizationHigh is a H264AdaptiveQuantization enum value - H264AdaptiveQuantizationHigh = "HIGH" - - // H264AdaptiveQuantizationHigher is a H264AdaptiveQuantization enum value - H264AdaptiveQuantizationHigher = "HIGHER" - - // H264AdaptiveQuantizationLow is a H264AdaptiveQuantization enum value - H264AdaptiveQuantizationLow = "LOW" - - // H264AdaptiveQuantizationMax is a H264AdaptiveQuantization enum value - H264AdaptiveQuantizationMax = "MAX" - - // H264AdaptiveQuantizationMedium is a H264AdaptiveQuantization enum value - H264AdaptiveQuantizationMedium = "MEDIUM" - - // H264AdaptiveQuantizationOff is a H264AdaptiveQuantization enum value - H264AdaptiveQuantizationOff = "OFF" -) - -const ( - // H264ColorMetadataIgnore is a H264ColorMetadata enum value - H264ColorMetadataIgnore = "IGNORE" - - // H264ColorMetadataInsert is a H264ColorMetadata enum value - H264ColorMetadataInsert = "INSERT" -) - -const ( - // H264EntropyEncodingCabac is a H264EntropyEncoding enum value - H264EntropyEncodingCabac = "CABAC" - - // H264EntropyEncodingCavlc is a H264EntropyEncoding enum value - H264EntropyEncodingCavlc = "CAVLC" -) - -const ( - // H264FlickerAqDisabled is a H264FlickerAq enum value - H264FlickerAqDisabled = "DISABLED" - - // H264FlickerAqEnabled is a H264FlickerAq enum value - H264FlickerAqEnabled = "ENABLED" -) - -const ( - // H264FramerateControlInitializeFromSource is a H264FramerateControl enum value - H264FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" - - // H264FramerateControlSpecified is a H264FramerateControl enum value - H264FramerateControlSpecified = "SPECIFIED" -) - -const ( - // H264GopBReferenceDisabled is a H264GopBReference enum value - H264GopBReferenceDisabled = "DISABLED" - - // H264GopBReferenceEnabled is a H264GopBReference enum value - H264GopBReferenceEnabled = "ENABLED" -) - -const ( - // H264GopSizeUnitsFrames is a H264GopSizeUnits enum value - H264GopSizeUnitsFrames = "FRAMES" - - // H264GopSizeUnitsSeconds is a H264GopSizeUnits enum value - H264GopSizeUnitsSeconds = "SECONDS" -) - -const ( - // H264LevelH264Level1 is a H264Level enum value - H264LevelH264Level1 = "H264_LEVEL_1" - - // H264LevelH264Level11 is a H264Level enum value - H264LevelH264Level11 = "H264_LEVEL_1_1" - - // H264LevelH264Level12 is a H264Level enum value - H264LevelH264Level12 = "H264_LEVEL_1_2" - - // H264LevelH264Level13 is a H264Level enum value - H264LevelH264Level13 = "H264_LEVEL_1_3" - - // H264LevelH264Level2 is a H264Level enum value - H264LevelH264Level2 = "H264_LEVEL_2" - - // H264LevelH264Level21 is a H264Level enum value - H264LevelH264Level21 = "H264_LEVEL_2_1" - - // H264LevelH264Level22 is a H264Level enum value - H264LevelH264Level22 = "H264_LEVEL_2_2" - - // H264LevelH264Level3 is a H264Level enum value - H264LevelH264Level3 = "H264_LEVEL_3" - - // H264LevelH264Level31 is a H264Level enum value - H264LevelH264Level31 = "H264_LEVEL_3_1" - - // H264LevelH264Level32 is a H264Level enum value - H264LevelH264Level32 = "H264_LEVEL_3_2" - - // H264LevelH264Level4 is a H264Level enum value - H264LevelH264Level4 = "H264_LEVEL_4" - - // H264LevelH264Level41 is a H264Level enum value - H264LevelH264Level41 = "H264_LEVEL_4_1" - - // H264LevelH264Level42 is a H264Level enum value - H264LevelH264Level42 = "H264_LEVEL_4_2" - - // H264LevelH264Level5 is a H264Level enum value - H264LevelH264Level5 = "H264_LEVEL_5" - - // H264LevelH264Level51 is a H264Level enum value - H264LevelH264Level51 = "H264_LEVEL_5_1" - - // H264LevelH264Level52 is a H264Level enum value - H264LevelH264Level52 = "H264_LEVEL_5_2" - - // H264LevelH264LevelAuto is a H264Level enum value - H264LevelH264LevelAuto = "H264_LEVEL_AUTO" -) - -const ( - // H264LookAheadRateControlHigh is a H264LookAheadRateControl enum value - H264LookAheadRateControlHigh = "HIGH" - - // H264LookAheadRateControlLow is a H264LookAheadRateControl enum value - H264LookAheadRateControlLow = "LOW" - - // H264LookAheadRateControlMedium is a H264LookAheadRateControl enum value - H264LookAheadRateControlMedium = "MEDIUM" -) - -const ( - // H264ParControlInitializeFromSource is a H264ParControl enum value - H264ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" - - // H264ParControlSpecified is a H264ParControl enum value - H264ParControlSpecified = "SPECIFIED" -) - -const ( - // H264ProfileBaseline is a H264Profile enum value - H264ProfileBaseline = "BASELINE" - - // H264ProfileHigh is a H264Profile enum value - H264ProfileHigh = "HIGH" - - // H264ProfileHigh10bit is a H264Profile enum value - H264ProfileHigh10bit = "HIGH_10BIT" - - // H264ProfileHigh422 is a H264Profile enum value - H264ProfileHigh422 = "HIGH_422" - - // H264ProfileHigh42210bit is a H264Profile enum value - H264ProfileHigh42210bit = "HIGH_422_10BIT" - - // H264ProfileMain is a H264Profile enum value - H264ProfileMain = "MAIN" -) - -const ( - // H264RateControlModeCbr is a H264RateControlMode enum value - H264RateControlModeCbr = "CBR" - - // H264RateControlModeVbr is a H264RateControlMode enum value - H264RateControlModeVbr = "VBR" -) - -const ( - // H264ScanTypeInterlaced is a H264ScanType enum value - H264ScanTypeInterlaced = "INTERLACED" - - // H264ScanTypeProgressive is a H264ScanType enum value - H264ScanTypeProgressive = "PROGRESSIVE" -) - -const ( - // H264SceneChangeDetectDisabled is a H264SceneChangeDetect enum value - H264SceneChangeDetectDisabled = "DISABLED" - - // H264SceneChangeDetectEnabled is a H264SceneChangeDetect enum value - H264SceneChangeDetectEnabled = "ENABLED" -) - -const ( - // H264SpatialAqDisabled is a H264SpatialAq enum value - H264SpatialAqDisabled = "DISABLED" - - // H264SpatialAqEnabled is a H264SpatialAq enum value - H264SpatialAqEnabled = "ENABLED" -) - -const ( - // H264SyntaxDefault is a H264Syntax enum value - H264SyntaxDefault = "DEFAULT" - - // H264SyntaxRp2027 is a H264Syntax enum value - H264SyntaxRp2027 = "RP2027" -) - -const ( - // H264TemporalAqDisabled is a H264TemporalAq enum value - H264TemporalAqDisabled = "DISABLED" - - // H264TemporalAqEnabled is a H264TemporalAq enum value - H264TemporalAqEnabled = "ENABLED" -) - -const ( - // H264TimecodeInsertionBehaviorDisabled is a H264TimecodeInsertionBehavior enum value - H264TimecodeInsertionBehaviorDisabled = "DISABLED" - - // H264TimecodeInsertionBehaviorPicTimingSei is a H264TimecodeInsertionBehavior enum value - H264TimecodeInsertionBehaviorPicTimingSei = "PIC_TIMING_SEI" -) - -const ( - // HlsAdMarkersAdobe is a HlsAdMarkers enum value - HlsAdMarkersAdobe = "ADOBE" - - // HlsAdMarkersElemental is a HlsAdMarkers enum value - HlsAdMarkersElemental = "ELEMENTAL" - - // HlsAdMarkersElementalScte35 is a HlsAdMarkers enum value - HlsAdMarkersElementalScte35 = "ELEMENTAL_SCTE35" -) - -const ( - // HlsAkamaiHttpTransferModeChunked is a HlsAkamaiHttpTransferMode enum value - HlsAkamaiHttpTransferModeChunked = "CHUNKED" - - // HlsAkamaiHttpTransferModeNonChunked is a HlsAkamaiHttpTransferMode enum value - HlsAkamaiHttpTransferModeNonChunked = "NON_CHUNKED" -) - -const ( - // HlsCaptionLanguageSettingInsert is a HlsCaptionLanguageSetting enum value - HlsCaptionLanguageSettingInsert = "INSERT" - - // HlsCaptionLanguageSettingNone is a HlsCaptionLanguageSetting enum value - HlsCaptionLanguageSettingNone = "NONE" - - // HlsCaptionLanguageSettingOmit is a HlsCaptionLanguageSetting enum value - HlsCaptionLanguageSettingOmit = "OMIT" -) - -const ( - // HlsClientCacheDisabled is a HlsClientCache enum value - HlsClientCacheDisabled = "DISABLED" - - // HlsClientCacheEnabled is a HlsClientCache enum value - HlsClientCacheEnabled = "ENABLED" -) - -const ( - // HlsCodecSpecificationRfc4281 is a HlsCodecSpecification enum value - HlsCodecSpecificationRfc4281 = "RFC_4281" - - // HlsCodecSpecificationRfc6381 is a HlsCodecSpecification enum value - HlsCodecSpecificationRfc6381 = "RFC_6381" -) - -const ( - // HlsDirectoryStructureSingleDirectory is a HlsDirectoryStructure enum value - HlsDirectoryStructureSingleDirectory = "SINGLE_DIRECTORY" - - // HlsDirectoryStructureSubdirectoryPerStream is a HlsDirectoryStructure enum value - HlsDirectoryStructureSubdirectoryPerStream = "SUBDIRECTORY_PER_STREAM" -) - -const ( - // HlsEncryptionTypeAes128 is a HlsEncryptionType enum value - HlsEncryptionTypeAes128 = "AES128" - - // HlsEncryptionTypeSampleAes is a HlsEncryptionType enum value - HlsEncryptionTypeSampleAes = "SAMPLE_AES" -) - -const ( - // HlsIvInManifestExclude is a HlsIvInManifest enum value - HlsIvInManifestExclude = "EXCLUDE" - - // HlsIvInManifestInclude is a HlsIvInManifest enum value - HlsIvInManifestInclude = "INCLUDE" -) - -const ( - // HlsIvSourceExplicit is a HlsIvSource enum value - HlsIvSourceExplicit = "EXPLICIT" - - // HlsIvSourceFollowsSegmentNumber is a HlsIvSource enum value - HlsIvSourceFollowsSegmentNumber = "FOLLOWS_SEGMENT_NUMBER" -) - -const ( - // HlsManifestCompressionGzip is a HlsManifestCompression enum value - HlsManifestCompressionGzip = "GZIP" - - // HlsManifestCompressionNone is a HlsManifestCompression enum value - HlsManifestCompressionNone = "NONE" -) - -const ( - // HlsManifestDurationFormatFloatingPoint is a HlsManifestDurationFormat enum value - HlsManifestDurationFormatFloatingPoint = "FLOATING_POINT" - - // HlsManifestDurationFormatInteger is a HlsManifestDurationFormat enum value - HlsManifestDurationFormatInteger = "INTEGER" -) - -const ( - // HlsMediaStoreStorageClassTemporal is a HlsMediaStoreStorageClass enum value - HlsMediaStoreStorageClassTemporal = "TEMPORAL" -) - -const ( - // HlsModeLive is a HlsMode enum value - HlsModeLive = "LIVE" - - // HlsModeVod is a HlsMode enum value - HlsModeVod = "VOD" -) - -const ( - // HlsOutputSelectionManifestsAndSegments is a HlsOutputSelection enum value - HlsOutputSelectionManifestsAndSegments = "MANIFESTS_AND_SEGMENTS" - - // HlsOutputSelectionSegmentsOnly is a HlsOutputSelection enum value - HlsOutputSelectionSegmentsOnly = "SEGMENTS_ONLY" -) - -const ( - // HlsProgramDateTimeExclude is a HlsProgramDateTime enum value - HlsProgramDateTimeExclude = "EXCLUDE" - - // HlsProgramDateTimeInclude is a HlsProgramDateTime enum value - HlsProgramDateTimeInclude = "INCLUDE" -) - -const ( - // HlsSegmentationModeUseInputSegmentation is a HlsSegmentationMode enum value - HlsSegmentationModeUseInputSegmentation = "USE_INPUT_SEGMENTATION" - - // HlsSegmentationModeUseSegmentDuration is a HlsSegmentationMode enum value - HlsSegmentationModeUseSegmentDuration = "USE_SEGMENT_DURATION" -) - -const ( - // HlsStreamInfResolutionExclude is a HlsStreamInfResolution enum value - HlsStreamInfResolutionExclude = "EXCLUDE" - - // HlsStreamInfResolutionInclude is a HlsStreamInfResolution enum value - HlsStreamInfResolutionInclude = "INCLUDE" -) - -const ( - // HlsTimedMetadataId3FrameNone is a HlsTimedMetadataId3Frame enum value - HlsTimedMetadataId3FrameNone = "NONE" - - // HlsTimedMetadataId3FramePriv is a HlsTimedMetadataId3Frame enum value - HlsTimedMetadataId3FramePriv = "PRIV" - - // HlsTimedMetadataId3FrameTdrl is a HlsTimedMetadataId3Frame enum value - HlsTimedMetadataId3FrameTdrl = "TDRL" -) - -const ( - // HlsTsFileModeSegmentedFiles is a HlsTsFileMode enum value - HlsTsFileModeSegmentedFiles = "SEGMENTED_FILES" - - // HlsTsFileModeSingleFile is a HlsTsFileMode enum value - HlsTsFileModeSingleFile = "SINGLE_FILE" -) - -const ( - // HlsWebdavHttpTransferModeChunked is a HlsWebdavHttpTransferMode enum value - HlsWebdavHttpTransferModeChunked = "CHUNKED" - - // HlsWebdavHttpTransferModeNonChunked is a HlsWebdavHttpTransferMode enum value - HlsWebdavHttpTransferModeNonChunked = "NON_CHUNKED" -) - -const ( - // InputDeblockFilterDisabled is a InputDeblockFilter enum value - InputDeblockFilterDisabled = "DISABLED" - - // InputDeblockFilterEnabled is a InputDeblockFilter enum value - InputDeblockFilterEnabled = "ENABLED" -) - -const ( - // InputDenoiseFilterDisabled is a InputDenoiseFilter enum value - InputDenoiseFilterDisabled = "DISABLED" - - // InputDenoiseFilterEnabled is a InputDenoiseFilter enum value - InputDenoiseFilterEnabled = "ENABLED" -) - -const ( - // InputFilterAuto is a InputFilter enum value - InputFilterAuto = "AUTO" - - // InputFilterDisabled is a InputFilter enum value - InputFilterDisabled = "DISABLED" - - // InputFilterForced is a InputFilter enum value - InputFilterForced = "FORCED" -) - -const ( - // InputLossActionForHlsOutEmitOutput is a InputLossActionForHlsOut enum value - InputLossActionForHlsOutEmitOutput = "EMIT_OUTPUT" - - // InputLossActionForHlsOutPauseOutput is a InputLossActionForHlsOut enum value - InputLossActionForHlsOutPauseOutput = "PAUSE_OUTPUT" -) - -const ( - // InputLossActionForMsSmoothOutEmitOutput is a InputLossActionForMsSmoothOut enum value - InputLossActionForMsSmoothOutEmitOutput = "EMIT_OUTPUT" - - // InputLossActionForMsSmoothOutPauseOutput is a InputLossActionForMsSmoothOut enum value - InputLossActionForMsSmoothOutPauseOutput = "PAUSE_OUTPUT" -) - -const ( - // InputLossActionForUdpOutDropProgram is a InputLossActionForUdpOut enum value - InputLossActionForUdpOutDropProgram = "DROP_PROGRAM" - - // InputLossActionForUdpOutDropTs is a InputLossActionForUdpOut enum value - InputLossActionForUdpOutDropTs = "DROP_TS" - - // InputLossActionForUdpOutEmitProgram is a InputLossActionForUdpOut enum value - InputLossActionForUdpOutEmitProgram = "EMIT_PROGRAM" -) - -const ( - // InputLossImageTypeColor is a InputLossImageType enum value - InputLossImageTypeColor = "COLOR" - - // InputLossImageTypeSlate is a InputLossImageType enum value - InputLossImageTypeSlate = "SLATE" -) - -const ( - // InputSourceEndBehaviorContinue is a InputSourceEndBehavior enum value - InputSourceEndBehaviorContinue = "CONTINUE" - - // InputSourceEndBehaviorLoop is a InputSourceEndBehavior enum value - InputSourceEndBehaviorLoop = "LOOP" -) - -const ( - // InputStateCreating is a InputState enum value - InputStateCreating = "CREATING" - - // InputStateDetached is a InputState enum value - InputStateDetached = "DETACHED" - - // InputStateAttached is a InputState enum value - InputStateAttached = "ATTACHED" - - // InputStateDeleting is a InputState enum value - InputStateDeleting = "DELETING" - - // InputStateDeleted is a InputState enum value - InputStateDeleted = "DELETED" -) - -const ( - // InputTypeUdpPush is a InputType enum value - InputTypeUdpPush = "UDP_PUSH" - - // InputTypeRtpPush is a InputType enum value - InputTypeRtpPush = "RTP_PUSH" - - // InputTypeRtmpPush is a InputType enum value - InputTypeRtmpPush = "RTMP_PUSH" - - // InputTypeRtmpPull is a InputType enum value - InputTypeRtmpPull = "RTMP_PULL" - - // InputTypeUrlPull is a InputType enum value - InputTypeUrlPull = "URL_PULL" -) - -const ( - // M2tsAbsentInputAudioBehaviorDrop is a M2tsAbsentInputAudioBehavior enum value - M2tsAbsentInputAudioBehaviorDrop = "DROP" - - // M2tsAbsentInputAudioBehaviorEncodeSilence is a M2tsAbsentInputAudioBehavior enum value - M2tsAbsentInputAudioBehaviorEncodeSilence = "ENCODE_SILENCE" -) - -const ( - // M2tsAribDisabled is a M2tsArib enum value - M2tsAribDisabled = "DISABLED" - - // M2tsAribEnabled is a M2tsArib enum value - M2tsAribEnabled = "ENABLED" -) - -const ( - // M2tsAribCaptionsPidControlAuto is a M2tsAribCaptionsPidControl enum value - M2tsAribCaptionsPidControlAuto = "AUTO" - - // M2tsAribCaptionsPidControlUseConfigured is a M2tsAribCaptionsPidControl enum value - M2tsAribCaptionsPidControlUseConfigured = "USE_CONFIGURED" -) - -const ( - // M2tsAudioBufferModelAtsc is a M2tsAudioBufferModel enum value - M2tsAudioBufferModelAtsc = "ATSC" - - // M2tsAudioBufferModelDvb is a M2tsAudioBufferModel enum value - M2tsAudioBufferModelDvb = "DVB" -) - -const ( - // M2tsAudioIntervalVideoAndFixedIntervals is a M2tsAudioInterval enum value - M2tsAudioIntervalVideoAndFixedIntervals = "VIDEO_AND_FIXED_INTERVALS" - - // M2tsAudioIntervalVideoInterval is a M2tsAudioInterval enum value - M2tsAudioIntervalVideoInterval = "VIDEO_INTERVAL" -) - -const ( - // M2tsAudioStreamTypeAtsc is a M2tsAudioStreamType enum value - M2tsAudioStreamTypeAtsc = "ATSC" - - // M2tsAudioStreamTypeDvb is a M2tsAudioStreamType enum value - M2tsAudioStreamTypeDvb = "DVB" -) - -const ( - // M2tsBufferModelMultiplex is a M2tsBufferModel enum value - M2tsBufferModelMultiplex = "MULTIPLEX" - - // M2tsBufferModelNone is a M2tsBufferModel enum value - M2tsBufferModelNone = "NONE" -) - -const ( - // M2tsCcDescriptorDisabled is a M2tsCcDescriptor enum value - M2tsCcDescriptorDisabled = "DISABLED" - - // M2tsCcDescriptorEnabled is a M2tsCcDescriptor enum value - M2tsCcDescriptorEnabled = "ENABLED" -) - -const ( - // M2tsEbifControlNone is a M2tsEbifControl enum value - M2tsEbifControlNone = "NONE" - - // M2tsEbifControlPassthrough is a M2tsEbifControl enum value - M2tsEbifControlPassthrough = "PASSTHROUGH" -) - -const ( - // M2tsEbpPlacementVideoAndAudioPids is a M2tsEbpPlacement enum value - M2tsEbpPlacementVideoAndAudioPids = "VIDEO_AND_AUDIO_PIDS" - - // M2tsEbpPlacementVideoPid is a M2tsEbpPlacement enum value - M2tsEbpPlacementVideoPid = "VIDEO_PID" -) - -const ( - // M2tsEsRateInPesExclude is a M2tsEsRateInPes enum value - M2tsEsRateInPesExclude = "EXCLUDE" - - // M2tsEsRateInPesInclude is a M2tsEsRateInPes enum value - M2tsEsRateInPesInclude = "INCLUDE" -) - -const ( - // M2tsKlvNone is a M2tsKlv enum value - M2tsKlvNone = "NONE" - - // M2tsKlvPassthrough is a M2tsKlv enum value - M2tsKlvPassthrough = "PASSTHROUGH" -) - -const ( - // M2tsPcrControlConfiguredPcrPeriod is a M2tsPcrControl enum value - M2tsPcrControlConfiguredPcrPeriod = "CONFIGURED_PCR_PERIOD" - - // M2tsPcrControlPcrEveryPesPacket is a M2tsPcrControl enum value - M2tsPcrControlPcrEveryPesPacket = "PCR_EVERY_PES_PACKET" -) - -const ( - // M2tsRateModeCbr is a M2tsRateMode enum value - M2tsRateModeCbr = "CBR" - - // M2tsRateModeVbr is a M2tsRateMode enum value - M2tsRateModeVbr = "VBR" -) - -const ( - // M2tsScte35ControlNone is a M2tsScte35Control enum value - M2tsScte35ControlNone = "NONE" - - // M2tsScte35ControlPassthrough is a M2tsScte35Control enum value - M2tsScte35ControlPassthrough = "PASSTHROUGH" -) - -const ( - // M2tsSegmentationMarkersEbp is a M2tsSegmentationMarkers enum value - M2tsSegmentationMarkersEbp = "EBP" - - // M2tsSegmentationMarkersEbpLegacy is a M2tsSegmentationMarkers enum value - M2tsSegmentationMarkersEbpLegacy = "EBP_LEGACY" - - // M2tsSegmentationMarkersNone is a M2tsSegmentationMarkers enum value - M2tsSegmentationMarkersNone = "NONE" - - // M2tsSegmentationMarkersPsiSegstart is a M2tsSegmentationMarkers enum value - M2tsSegmentationMarkersPsiSegstart = "PSI_SEGSTART" - - // M2tsSegmentationMarkersRaiAdapt is a M2tsSegmentationMarkers enum value - M2tsSegmentationMarkersRaiAdapt = "RAI_ADAPT" - - // M2tsSegmentationMarkersRaiSegstart is a M2tsSegmentationMarkers enum value - M2tsSegmentationMarkersRaiSegstart = "RAI_SEGSTART" -) - -const ( - // M2tsSegmentationStyleMaintainCadence is a M2tsSegmentationStyle enum value - M2tsSegmentationStyleMaintainCadence = "MAINTAIN_CADENCE" - - // M2tsSegmentationStyleResetCadence is a M2tsSegmentationStyle enum value - M2tsSegmentationStyleResetCadence = "RESET_CADENCE" -) - -const ( - // M2tsTimedMetadataBehaviorNoPassthrough is a M2tsTimedMetadataBehavior enum value - M2tsTimedMetadataBehaviorNoPassthrough = "NO_PASSTHROUGH" - - // M2tsTimedMetadataBehaviorPassthrough is a M2tsTimedMetadataBehavior enum value - M2tsTimedMetadataBehaviorPassthrough = "PASSTHROUGH" -) - -const ( - // M3u8PcrControlConfiguredPcrPeriod is a M3u8PcrControl enum value - M3u8PcrControlConfiguredPcrPeriod = "CONFIGURED_PCR_PERIOD" - - // M3u8PcrControlPcrEveryPesPacket is a M3u8PcrControl enum value - M3u8PcrControlPcrEveryPesPacket = "PCR_EVERY_PES_PACKET" -) - -const ( - // M3u8Scte35BehaviorNoPassthrough is a M3u8Scte35Behavior enum value - M3u8Scte35BehaviorNoPassthrough = "NO_PASSTHROUGH" - - // M3u8Scte35BehaviorPassthrough is a M3u8Scte35Behavior enum value - M3u8Scte35BehaviorPassthrough = "PASSTHROUGH" -) - -const ( - // M3u8TimedMetadataBehaviorNoPassthrough is a M3u8TimedMetadataBehavior enum value - M3u8TimedMetadataBehaviorNoPassthrough = "NO_PASSTHROUGH" - - // M3u8TimedMetadataBehaviorPassthrough is a M3u8TimedMetadataBehavior enum value - M3u8TimedMetadataBehaviorPassthrough = "PASSTHROUGH" -) - -const ( - // Mp2CodingModeCodingMode10 is a Mp2CodingMode enum value - Mp2CodingModeCodingMode10 = "CODING_MODE_1_0" - - // Mp2CodingModeCodingMode20 is a Mp2CodingMode enum value - Mp2CodingModeCodingMode20 = "CODING_MODE_2_0" -) - -const ( - // NetworkInputServerValidationCheckCryptographyAndValidateName is a NetworkInputServerValidation enum value - NetworkInputServerValidationCheckCryptographyAndValidateName = "CHECK_CRYPTOGRAPHY_AND_VALIDATE_NAME" - - // NetworkInputServerValidationCheckCryptographyOnly is a NetworkInputServerValidation enum value - NetworkInputServerValidationCheckCryptographyOnly = "CHECK_CRYPTOGRAPHY_ONLY" -) - -const ( - // Scte20Convert608To708Disabled is a Scte20Convert608To708 enum value - Scte20Convert608To708Disabled = "DISABLED" - - // Scte20Convert608To708Upconvert is a Scte20Convert608To708 enum value - Scte20Convert608To708Upconvert = "UPCONVERT" -) - -const ( - // Scte35AposNoRegionalBlackoutBehaviorFollow is a Scte35AposNoRegionalBlackoutBehavior enum value - Scte35AposNoRegionalBlackoutBehaviorFollow = "FOLLOW" - - // Scte35AposNoRegionalBlackoutBehaviorIgnore is a Scte35AposNoRegionalBlackoutBehavior enum value - Scte35AposNoRegionalBlackoutBehaviorIgnore = "IGNORE" -) - -const ( - // Scte35AposWebDeliveryAllowedBehaviorFollow is a Scte35AposWebDeliveryAllowedBehavior enum value - Scte35AposWebDeliveryAllowedBehaviorFollow = "FOLLOW" - - // Scte35AposWebDeliveryAllowedBehaviorIgnore is a Scte35AposWebDeliveryAllowedBehavior enum value - Scte35AposWebDeliveryAllowedBehaviorIgnore = "IGNORE" -) - -const ( - // Scte35SpliceInsertNoRegionalBlackoutBehaviorFollow is a Scte35SpliceInsertNoRegionalBlackoutBehavior enum value - Scte35SpliceInsertNoRegionalBlackoutBehaviorFollow = "FOLLOW" - - // Scte35SpliceInsertNoRegionalBlackoutBehaviorIgnore is a Scte35SpliceInsertNoRegionalBlackoutBehavior enum value - Scte35SpliceInsertNoRegionalBlackoutBehaviorIgnore = "IGNORE" -) - -const ( - // Scte35SpliceInsertWebDeliveryAllowedBehaviorFollow is a Scte35SpliceInsertWebDeliveryAllowedBehavior enum value - Scte35SpliceInsertWebDeliveryAllowedBehaviorFollow = "FOLLOW" - - // Scte35SpliceInsertWebDeliveryAllowedBehaviorIgnore is a Scte35SpliceInsertWebDeliveryAllowedBehavior enum value - Scte35SpliceInsertWebDeliveryAllowedBehaviorIgnore = "IGNORE" -) - -const ( - // SmoothGroupAudioOnlyTimecodeControlPassthrough is a SmoothGroupAudioOnlyTimecodeControl enum value - SmoothGroupAudioOnlyTimecodeControlPassthrough = "PASSTHROUGH" - - // SmoothGroupAudioOnlyTimecodeControlUseConfiguredClock is a SmoothGroupAudioOnlyTimecodeControl enum value - SmoothGroupAudioOnlyTimecodeControlUseConfiguredClock = "USE_CONFIGURED_CLOCK" -) - -const ( - // SmoothGroupCertificateModeSelfSigned is a SmoothGroupCertificateMode enum value - SmoothGroupCertificateModeSelfSigned = "SELF_SIGNED" - - // SmoothGroupCertificateModeVerifyAuthenticity is a SmoothGroupCertificateMode enum value - SmoothGroupCertificateModeVerifyAuthenticity = "VERIFY_AUTHENTICITY" -) - -const ( - // SmoothGroupEventIdModeNoEventId is a SmoothGroupEventIdMode enum value - SmoothGroupEventIdModeNoEventId = "NO_EVENT_ID" - - // SmoothGroupEventIdModeUseConfigured is a SmoothGroupEventIdMode enum value - SmoothGroupEventIdModeUseConfigured = "USE_CONFIGURED" - - // SmoothGroupEventIdModeUseTimestamp is a SmoothGroupEventIdMode enum value - SmoothGroupEventIdModeUseTimestamp = "USE_TIMESTAMP" -) - -const ( - // SmoothGroupEventStopBehaviorNone is a SmoothGroupEventStopBehavior enum value - SmoothGroupEventStopBehaviorNone = "NONE" - - // SmoothGroupEventStopBehaviorSendEos is a SmoothGroupEventStopBehavior enum value - SmoothGroupEventStopBehaviorSendEos = "SEND_EOS" -) - -const ( - // SmoothGroupSegmentationModeUseInputSegmentation is a SmoothGroupSegmentationMode enum value - SmoothGroupSegmentationModeUseInputSegmentation = "USE_INPUT_SEGMENTATION" - - // SmoothGroupSegmentationModeUseSegmentDuration is a SmoothGroupSegmentationMode enum value - SmoothGroupSegmentationModeUseSegmentDuration = "USE_SEGMENT_DURATION" -) - -const ( - // SmoothGroupSparseTrackTypeNone is a SmoothGroupSparseTrackType enum value - SmoothGroupSparseTrackTypeNone = "NONE" - - // SmoothGroupSparseTrackTypeScte35 is a SmoothGroupSparseTrackType enum value - SmoothGroupSparseTrackTypeScte35 = "SCTE_35" -) - -const ( - // SmoothGroupStreamManifestBehaviorDoNotSend is a SmoothGroupStreamManifestBehavior enum value - SmoothGroupStreamManifestBehaviorDoNotSend = "DO_NOT_SEND" - - // SmoothGroupStreamManifestBehaviorSend is a SmoothGroupStreamManifestBehavior enum value - SmoothGroupStreamManifestBehaviorSend = "SEND" -) - -const ( - // SmoothGroupTimestampOffsetModeUseConfiguredOffset is a SmoothGroupTimestampOffsetMode enum value - SmoothGroupTimestampOffsetModeUseConfiguredOffset = "USE_CONFIGURED_OFFSET" - - // SmoothGroupTimestampOffsetModeUseEventStartDate is a SmoothGroupTimestampOffsetMode enum value - SmoothGroupTimestampOffsetModeUseEventStartDate = "USE_EVENT_START_DATE" -) - -const ( - // TimecodeConfigSourceEmbedded is a TimecodeConfigSource enum value - TimecodeConfigSourceEmbedded = "EMBEDDED" - - // TimecodeConfigSourceSystemclock is a TimecodeConfigSource enum value - TimecodeConfigSourceSystemclock = "SYSTEMCLOCK" - - // TimecodeConfigSourceZerobased is a TimecodeConfigSource enum value - TimecodeConfigSourceZerobased = "ZEROBASED" -) - -const ( - // TtmlDestinationStyleControlPassthrough is a TtmlDestinationStyleControl enum value - TtmlDestinationStyleControlPassthrough = "PASSTHROUGH" - - // TtmlDestinationStyleControlUseConfigured is a TtmlDestinationStyleControl enum value - TtmlDestinationStyleControlUseConfigured = "USE_CONFIGURED" -) - -const ( - // UdpTimedMetadataId3FrameNone is a UdpTimedMetadataId3Frame enum value - UdpTimedMetadataId3FrameNone = "NONE" - - // UdpTimedMetadataId3FramePriv is a UdpTimedMetadataId3Frame enum value - UdpTimedMetadataId3FramePriv = "PRIV" - - // UdpTimedMetadataId3FrameTdrl is a UdpTimedMetadataId3Frame enum value - UdpTimedMetadataId3FrameTdrl = "TDRL" -) - -const ( - // VideoDescriptionRespondToAfdNone is a VideoDescriptionRespondToAfd enum value - VideoDescriptionRespondToAfdNone = "NONE" - - // VideoDescriptionRespondToAfdPassthrough is a VideoDescriptionRespondToAfd enum value - VideoDescriptionRespondToAfdPassthrough = "PASSTHROUGH" - - // VideoDescriptionRespondToAfdRespond is a VideoDescriptionRespondToAfd enum value - VideoDescriptionRespondToAfdRespond = "RESPOND" -) - -const ( - // VideoDescriptionScalingBehaviorDefault is a VideoDescriptionScalingBehavior enum value - VideoDescriptionScalingBehaviorDefault = "DEFAULT" - - // VideoDescriptionScalingBehaviorStretchToOutput is a VideoDescriptionScalingBehavior enum value - VideoDescriptionScalingBehaviorStretchToOutput = "STRETCH_TO_OUTPUT" -) - -const ( - // VideoSelectorColorSpaceFollow is a VideoSelectorColorSpace enum value - VideoSelectorColorSpaceFollow = "FOLLOW" - - // VideoSelectorColorSpaceRec601 is a VideoSelectorColorSpace enum value - VideoSelectorColorSpaceRec601 = "REC_601" - - // VideoSelectorColorSpaceRec709 is a VideoSelectorColorSpace enum value - VideoSelectorColorSpaceRec709 = "REC_709" -) - -const ( - // VideoSelectorColorSpaceUsageFallback is a VideoSelectorColorSpaceUsage enum value - VideoSelectorColorSpaceUsageFallback = "FALLBACK" - - // VideoSelectorColorSpaceUsageForce is a VideoSelectorColorSpaceUsage enum value - VideoSelectorColorSpaceUsageForce = "FORCE" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/medialive/doc.go b/vendor/github.com/aws/aws-sdk-go/service/medialive/doc.go deleted file mode 100644 index 22c90987203..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/medialive/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package medialive provides the client and types for making API -// requests to AWS Elemental MediaLive. -// -// API for AWS Elemental MediaLive -// -// See https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14 for more information on this service. -// -// See medialive package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/medialive/ -// -// Using the Client -// -// To contact AWS Elemental MediaLive with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Elemental MediaLive client MediaLive for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/medialive/#New -package medialive diff --git a/vendor/github.com/aws/aws-sdk-go/service/medialive/errors.go b/vendor/github.com/aws/aws-sdk-go/service/medialive/errors.go deleted file mode 100644 index 247fb52c70c..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/medialive/errors.go +++ /dev/null @@ -1,42 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package medialive - -const ( - - // ErrCodeBadGatewayException for service response error code - // "BadGatewayException". - ErrCodeBadGatewayException = "BadGatewayException" - - // ErrCodeBadRequestException for service response error code - // "BadRequestException". - ErrCodeBadRequestException = "BadRequestException" - - // ErrCodeConflictException for service response error code - // "ConflictException". - ErrCodeConflictException = "ConflictException" - - // ErrCodeForbiddenException for service response error code - // "ForbiddenException". - ErrCodeForbiddenException = "ForbiddenException" - - // ErrCodeGatewayTimeoutException for service response error code - // "GatewayTimeoutException". - ErrCodeGatewayTimeoutException = "GatewayTimeoutException" - - // ErrCodeInternalServerErrorException for service response error code - // "InternalServerErrorException". - ErrCodeInternalServerErrorException = "InternalServerErrorException" - - // ErrCodeNotFoundException for service response error code - // "NotFoundException". - ErrCodeNotFoundException = "NotFoundException" - - // ErrCodeTooManyRequestsException for service response error code - // "TooManyRequestsException". - ErrCodeTooManyRequestsException = "TooManyRequestsException" - - // ErrCodeUnprocessableEntityException for service response error code - // "UnprocessableEntityException". - ErrCodeUnprocessableEntityException = "UnprocessableEntityException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go b/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go deleted file mode 100644 index 96e7506c17f..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go +++ /dev/null @@ -1,97 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package medialive - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/restjson" -) - -// MediaLive provides the API operation methods for making requests to -// AWS Elemental MediaLive. See this package's package overview docs -// for details on the service. -// -// MediaLive methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type MediaLive struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "medialive" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the MediaLive client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a MediaLive client from just a session. -// svc := medialive.New(mySession) -// -// // Create a MediaLive client with additional configuration -// svc := medialive.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaLive { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaLive { - if len(signingName) == 0 { - signingName = "medialive" - } - svc := &MediaLive{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2017-10-14", - JSONVersion: "1.1", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a MediaLive operation and runs any -// custom request initialization. -func (c *MediaLive) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/api.go b/vendor/github.com/aws/aws-sdk-go/service/mediapackage/api.go deleted file mode 100644 index 6f605fe78be..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/api.go +++ /dev/null @@ -1,3242 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package mediapackage - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opCreateChannel = "CreateChannel" - -// CreateChannelRequest generates a "aws/request.Request" representing the -// client's request for the CreateChannel operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateChannel for more information on using the CreateChannel -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateChannelRequest method. -// req, resp := client.CreateChannelRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/CreateChannel -func (c *MediaPackage) CreateChannelRequest(input *CreateChannelInput) (req *request.Request, output *CreateChannelOutput) { - op := &request.Operation{ - Name: opCreateChannel, - HTTPMethod: "POST", - HTTPPath: "/channels", - } - - if input == nil { - input = &CreateChannelInput{} - } - - output = &CreateChannelOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateChannel API operation for AWS Elemental MediaPackage. -// -// Creates a new Channel. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaPackage's -// API operation CreateChannel for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeServiceUnavailableException "ServiceUnavailableException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/CreateChannel -func (c *MediaPackage) CreateChannel(input *CreateChannelInput) (*CreateChannelOutput, error) { - req, out := c.CreateChannelRequest(input) - return out, req.Send() -} - -// CreateChannelWithContext is the same as CreateChannel with the addition of -// the ability to pass a context and additional request options. -// -// See CreateChannel for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaPackage) CreateChannelWithContext(ctx aws.Context, input *CreateChannelInput, opts ...request.Option) (*CreateChannelOutput, error) { - req, out := c.CreateChannelRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateOriginEndpoint = "CreateOriginEndpoint" - -// CreateOriginEndpointRequest generates a "aws/request.Request" representing the -// client's request for the CreateOriginEndpoint operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateOriginEndpoint for more information on using the CreateOriginEndpoint -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateOriginEndpointRequest method. -// req, resp := client.CreateOriginEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/CreateOriginEndpoint -func (c *MediaPackage) CreateOriginEndpointRequest(input *CreateOriginEndpointInput) (req *request.Request, output *CreateOriginEndpointOutput) { - op := &request.Operation{ - Name: opCreateOriginEndpoint, - HTTPMethod: "POST", - HTTPPath: "/origin_endpoints", - } - - if input == nil { - input = &CreateOriginEndpointInput{} - } - - output = &CreateOriginEndpointOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateOriginEndpoint API operation for AWS Elemental MediaPackage. -// -// Creates a new OriginEndpoint record. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaPackage's -// API operation CreateOriginEndpoint for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeServiceUnavailableException "ServiceUnavailableException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/CreateOriginEndpoint -func (c *MediaPackage) CreateOriginEndpoint(input *CreateOriginEndpointInput) (*CreateOriginEndpointOutput, error) { - req, out := c.CreateOriginEndpointRequest(input) - return out, req.Send() -} - -// CreateOriginEndpointWithContext is the same as CreateOriginEndpoint with the addition of -// the ability to pass a context and additional request options. -// -// See CreateOriginEndpoint for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaPackage) CreateOriginEndpointWithContext(ctx aws.Context, input *CreateOriginEndpointInput, opts ...request.Option) (*CreateOriginEndpointOutput, error) { - req, out := c.CreateOriginEndpointRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteChannel = "DeleteChannel" - -// DeleteChannelRequest generates a "aws/request.Request" representing the -// client's request for the DeleteChannel operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteChannel for more information on using the DeleteChannel -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteChannelRequest method. -// req, resp := client.DeleteChannelRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DeleteChannel -func (c *MediaPackage) DeleteChannelRequest(input *DeleteChannelInput) (req *request.Request, output *DeleteChannelOutput) { - op := &request.Operation{ - Name: opDeleteChannel, - HTTPMethod: "DELETE", - HTTPPath: "/channels/{id}", - } - - if input == nil { - input = &DeleteChannelInput{} - } - - output = &DeleteChannelOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteChannel API operation for AWS Elemental MediaPackage. -// -// Deletes an existing Channel. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaPackage's -// API operation DeleteChannel for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeServiceUnavailableException "ServiceUnavailableException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DeleteChannel -func (c *MediaPackage) DeleteChannel(input *DeleteChannelInput) (*DeleteChannelOutput, error) { - req, out := c.DeleteChannelRequest(input) - return out, req.Send() -} - -// DeleteChannelWithContext is the same as DeleteChannel with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteChannel for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaPackage) DeleteChannelWithContext(ctx aws.Context, input *DeleteChannelInput, opts ...request.Option) (*DeleteChannelOutput, error) { - req, out := c.DeleteChannelRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteOriginEndpoint = "DeleteOriginEndpoint" - -// DeleteOriginEndpointRequest generates a "aws/request.Request" representing the -// client's request for the DeleteOriginEndpoint operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteOriginEndpoint for more information on using the DeleteOriginEndpoint -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteOriginEndpointRequest method. -// req, resp := client.DeleteOriginEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DeleteOriginEndpoint -func (c *MediaPackage) DeleteOriginEndpointRequest(input *DeleteOriginEndpointInput) (req *request.Request, output *DeleteOriginEndpointOutput) { - op := &request.Operation{ - Name: opDeleteOriginEndpoint, - HTTPMethod: "DELETE", - HTTPPath: "/origin_endpoints/{id}", - } - - if input == nil { - input = &DeleteOriginEndpointInput{} - } - - output = &DeleteOriginEndpointOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteOriginEndpoint API operation for AWS Elemental MediaPackage. -// -// Deletes an existing OriginEndpoint. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaPackage's -// API operation DeleteOriginEndpoint for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeServiceUnavailableException "ServiceUnavailableException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DeleteOriginEndpoint -func (c *MediaPackage) DeleteOriginEndpoint(input *DeleteOriginEndpointInput) (*DeleteOriginEndpointOutput, error) { - req, out := c.DeleteOriginEndpointRequest(input) - return out, req.Send() -} - -// DeleteOriginEndpointWithContext is the same as DeleteOriginEndpoint with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteOriginEndpoint for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaPackage) DeleteOriginEndpointWithContext(ctx aws.Context, input *DeleteOriginEndpointInput, opts ...request.Option) (*DeleteOriginEndpointOutput, error) { - req, out := c.DeleteOriginEndpointRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeChannel = "DescribeChannel" - -// DescribeChannelRequest generates a "aws/request.Request" representing the -// client's request for the DescribeChannel operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeChannel for more information on using the DescribeChannel -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeChannelRequest method. -// req, resp := client.DescribeChannelRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DescribeChannel -func (c *MediaPackage) DescribeChannelRequest(input *DescribeChannelInput) (req *request.Request, output *DescribeChannelOutput) { - op := &request.Operation{ - Name: opDescribeChannel, - HTTPMethod: "GET", - HTTPPath: "/channels/{id}", - } - - if input == nil { - input = &DescribeChannelInput{} - } - - output = &DescribeChannelOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeChannel API operation for AWS Elemental MediaPackage. -// -// Gets details about a Channel. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaPackage's -// API operation DescribeChannel for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeServiceUnavailableException "ServiceUnavailableException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DescribeChannel -func (c *MediaPackage) DescribeChannel(input *DescribeChannelInput) (*DescribeChannelOutput, error) { - req, out := c.DescribeChannelRequest(input) - return out, req.Send() -} - -// DescribeChannelWithContext is the same as DescribeChannel with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeChannel for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaPackage) DescribeChannelWithContext(ctx aws.Context, input *DescribeChannelInput, opts ...request.Option) (*DescribeChannelOutput, error) { - req, out := c.DescribeChannelRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeOriginEndpoint = "DescribeOriginEndpoint" - -// DescribeOriginEndpointRequest generates a "aws/request.Request" representing the -// client's request for the DescribeOriginEndpoint operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeOriginEndpoint for more information on using the DescribeOriginEndpoint -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeOriginEndpointRequest method. -// req, resp := client.DescribeOriginEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DescribeOriginEndpoint -func (c *MediaPackage) DescribeOriginEndpointRequest(input *DescribeOriginEndpointInput) (req *request.Request, output *DescribeOriginEndpointOutput) { - op := &request.Operation{ - Name: opDescribeOriginEndpoint, - HTTPMethod: "GET", - HTTPPath: "/origin_endpoints/{id}", - } - - if input == nil { - input = &DescribeOriginEndpointInput{} - } - - output = &DescribeOriginEndpointOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeOriginEndpoint API operation for AWS Elemental MediaPackage. -// -// Gets details about an existing OriginEndpoint. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaPackage's -// API operation DescribeOriginEndpoint for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeServiceUnavailableException "ServiceUnavailableException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DescribeOriginEndpoint -func (c *MediaPackage) DescribeOriginEndpoint(input *DescribeOriginEndpointInput) (*DescribeOriginEndpointOutput, error) { - req, out := c.DescribeOriginEndpointRequest(input) - return out, req.Send() -} - -// DescribeOriginEndpointWithContext is the same as DescribeOriginEndpoint with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeOriginEndpoint for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaPackage) DescribeOriginEndpointWithContext(ctx aws.Context, input *DescribeOriginEndpointInput, opts ...request.Option) (*DescribeOriginEndpointOutput, error) { - req, out := c.DescribeOriginEndpointRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListChannels = "ListChannels" - -// ListChannelsRequest generates a "aws/request.Request" representing the -// client's request for the ListChannels operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListChannels for more information on using the ListChannels -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListChannelsRequest method. -// req, resp := client.ListChannelsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/ListChannels -func (c *MediaPackage) ListChannelsRequest(input *ListChannelsInput) (req *request.Request, output *ListChannelsOutput) { - op := &request.Operation{ - Name: opListChannels, - HTTPMethod: "GET", - HTTPPath: "/channels", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListChannelsInput{} - } - - output = &ListChannelsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListChannels API operation for AWS Elemental MediaPackage. -// -// Returns a collection of Channels. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaPackage's -// API operation ListChannels for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeServiceUnavailableException "ServiceUnavailableException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/ListChannels -func (c *MediaPackage) ListChannels(input *ListChannelsInput) (*ListChannelsOutput, error) { - req, out := c.ListChannelsRequest(input) - return out, req.Send() -} - -// ListChannelsWithContext is the same as ListChannels with the addition of -// the ability to pass a context and additional request options. -// -// See ListChannels for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaPackage) ListChannelsWithContext(ctx aws.Context, input *ListChannelsInput, opts ...request.Option) (*ListChannelsOutput, error) { - req, out := c.ListChannelsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListChannelsPages iterates over the pages of a ListChannels operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListChannels method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListChannels operation. -// pageNum := 0 -// err := client.ListChannelsPages(params, -// func(page *ListChannelsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *MediaPackage) ListChannelsPages(input *ListChannelsInput, fn func(*ListChannelsOutput, bool) bool) error { - return c.ListChannelsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListChannelsPagesWithContext same as ListChannelsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaPackage) ListChannelsPagesWithContext(ctx aws.Context, input *ListChannelsInput, fn func(*ListChannelsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListChannelsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListChannelsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListChannelsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListOriginEndpoints = "ListOriginEndpoints" - -// ListOriginEndpointsRequest generates a "aws/request.Request" representing the -// client's request for the ListOriginEndpoints operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListOriginEndpoints for more information on using the ListOriginEndpoints -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListOriginEndpointsRequest method. -// req, resp := client.ListOriginEndpointsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/ListOriginEndpoints -func (c *MediaPackage) ListOriginEndpointsRequest(input *ListOriginEndpointsInput) (req *request.Request, output *ListOriginEndpointsOutput) { - op := &request.Operation{ - Name: opListOriginEndpoints, - HTTPMethod: "GET", - HTTPPath: "/origin_endpoints", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListOriginEndpointsInput{} - } - - output = &ListOriginEndpointsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListOriginEndpoints API operation for AWS Elemental MediaPackage. -// -// Returns a collection of OriginEndpoint records. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaPackage's -// API operation ListOriginEndpoints for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeServiceUnavailableException "ServiceUnavailableException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/ListOriginEndpoints -func (c *MediaPackage) ListOriginEndpoints(input *ListOriginEndpointsInput) (*ListOriginEndpointsOutput, error) { - req, out := c.ListOriginEndpointsRequest(input) - return out, req.Send() -} - -// ListOriginEndpointsWithContext is the same as ListOriginEndpoints with the addition of -// the ability to pass a context and additional request options. -// -// See ListOriginEndpoints for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaPackage) ListOriginEndpointsWithContext(ctx aws.Context, input *ListOriginEndpointsInput, opts ...request.Option) (*ListOriginEndpointsOutput, error) { - req, out := c.ListOriginEndpointsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListOriginEndpointsPages iterates over the pages of a ListOriginEndpoints operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListOriginEndpoints method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListOriginEndpoints operation. -// pageNum := 0 -// err := client.ListOriginEndpointsPages(params, -// func(page *ListOriginEndpointsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *MediaPackage) ListOriginEndpointsPages(input *ListOriginEndpointsInput, fn func(*ListOriginEndpointsOutput, bool) bool) error { - return c.ListOriginEndpointsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListOriginEndpointsPagesWithContext same as ListOriginEndpointsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaPackage) ListOriginEndpointsPagesWithContext(ctx aws.Context, input *ListOriginEndpointsInput, fn func(*ListOriginEndpointsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListOriginEndpointsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListOriginEndpointsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListOriginEndpointsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opRotateChannelCredentials = "RotateChannelCredentials" - -// RotateChannelCredentialsRequest generates a "aws/request.Request" representing the -// client's request for the RotateChannelCredentials operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RotateChannelCredentials for more information on using the RotateChannelCredentials -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RotateChannelCredentialsRequest method. -// req, resp := client.RotateChannelCredentialsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/RotateChannelCredentials -func (c *MediaPackage) RotateChannelCredentialsRequest(input *RotateChannelCredentialsInput) (req *request.Request, output *RotateChannelCredentialsOutput) { - op := &request.Operation{ - Name: opRotateChannelCredentials, - HTTPMethod: "PUT", - HTTPPath: "/channels/{id}/credentials", - } - - if input == nil { - input = &RotateChannelCredentialsInput{} - } - - output = &RotateChannelCredentialsOutput{} - req = c.newRequest(op, input, output) - return -} - -// RotateChannelCredentials API operation for AWS Elemental MediaPackage. -// -// Changes the Channel ingest username and password. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaPackage's -// API operation RotateChannelCredentials for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeServiceUnavailableException "ServiceUnavailableException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/RotateChannelCredentials -func (c *MediaPackage) RotateChannelCredentials(input *RotateChannelCredentialsInput) (*RotateChannelCredentialsOutput, error) { - req, out := c.RotateChannelCredentialsRequest(input) - return out, req.Send() -} - -// RotateChannelCredentialsWithContext is the same as RotateChannelCredentials with the addition of -// the ability to pass a context and additional request options. -// -// See RotateChannelCredentials for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaPackage) RotateChannelCredentialsWithContext(ctx aws.Context, input *RotateChannelCredentialsInput, opts ...request.Option) (*RotateChannelCredentialsOutput, error) { - req, out := c.RotateChannelCredentialsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateChannel = "UpdateChannel" - -// UpdateChannelRequest generates a "aws/request.Request" representing the -// client's request for the UpdateChannel operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateChannel for more information on using the UpdateChannel -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateChannelRequest method. -// req, resp := client.UpdateChannelRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/UpdateChannel -func (c *MediaPackage) UpdateChannelRequest(input *UpdateChannelInput) (req *request.Request, output *UpdateChannelOutput) { - op := &request.Operation{ - Name: opUpdateChannel, - HTTPMethod: "PUT", - HTTPPath: "/channels/{id}", - } - - if input == nil { - input = &UpdateChannelInput{} - } - - output = &UpdateChannelOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateChannel API operation for AWS Elemental MediaPackage. -// -// Updates an existing Channel. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaPackage's -// API operation UpdateChannel for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeServiceUnavailableException "ServiceUnavailableException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/UpdateChannel -func (c *MediaPackage) UpdateChannel(input *UpdateChannelInput) (*UpdateChannelOutput, error) { - req, out := c.UpdateChannelRequest(input) - return out, req.Send() -} - -// UpdateChannelWithContext is the same as UpdateChannel with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateChannel for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaPackage) UpdateChannelWithContext(ctx aws.Context, input *UpdateChannelInput, opts ...request.Option) (*UpdateChannelOutput, error) { - req, out := c.UpdateChannelRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateOriginEndpoint = "UpdateOriginEndpoint" - -// UpdateOriginEndpointRequest generates a "aws/request.Request" representing the -// client's request for the UpdateOriginEndpoint operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateOriginEndpoint for more information on using the UpdateOriginEndpoint -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateOriginEndpointRequest method. -// req, resp := client.UpdateOriginEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/UpdateOriginEndpoint -func (c *MediaPackage) UpdateOriginEndpointRequest(input *UpdateOriginEndpointInput) (req *request.Request, output *UpdateOriginEndpointOutput) { - op := &request.Operation{ - Name: opUpdateOriginEndpoint, - HTTPMethod: "PUT", - HTTPPath: "/origin_endpoints/{id}", - } - - if input == nil { - input = &UpdateOriginEndpointInput{} - } - - output = &UpdateOriginEndpointOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateOriginEndpoint API operation for AWS Elemental MediaPackage. -// -// Updates an existing OriginEndpoint. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaPackage's -// API operation UpdateOriginEndpoint for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// -// * ErrCodeForbiddenException "ForbiddenException" -// -// * ErrCodeNotFoundException "NotFoundException" -// -// * ErrCodeServiceUnavailableException "ServiceUnavailableException" -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/UpdateOriginEndpoint -func (c *MediaPackage) UpdateOriginEndpoint(input *UpdateOriginEndpointInput) (*UpdateOriginEndpointOutput, error) { - req, out := c.UpdateOriginEndpointRequest(input) - return out, req.Send() -} - -// UpdateOriginEndpointWithContext is the same as UpdateOriginEndpoint with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateOriginEndpoint for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaPackage) UpdateOriginEndpointWithContext(ctx aws.Context, input *UpdateOriginEndpointInput, opts ...request.Option) (*UpdateOriginEndpointOutput, error) { - req, out := c.UpdateOriginEndpointRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// A Channel resource configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/Channel -type Channel struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) assigned to the Channel. - Arn *string `locationName:"arn" type:"string"` - - // A short text description of the Channel. - Description *string `locationName:"description" type:"string"` - - // An HTTP Live Streaming (HLS) ingest resource configuration. - HlsIngest *HlsIngest `locationName:"hlsIngest" type:"structure"` - - // The ID of the Channel. - Id *string `locationName:"id" type:"string"` -} - -// String returns the string representation -func (s Channel) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Channel) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *Channel) SetArn(v string) *Channel { - s.Arn = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *Channel) SetDescription(v string) *Channel { - s.Description = &v - return s -} - -// SetHlsIngest sets the HlsIngest field's value. -func (s *Channel) SetHlsIngest(v *HlsIngest) *Channel { - s.HlsIngest = v - return s -} - -// SetId sets the Id field's value. -func (s *Channel) SetId(v string) *Channel { - s.Id = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/CreateChannelRequest -type CreateChannelInput struct { - _ struct{} `type:"structure"` - - Description *string `locationName:"description" type:"string"` - - // Id is a required field - Id *string `locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateChannelInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateChannelInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateChannelInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *CreateChannelInput) SetDescription(v string) *CreateChannelInput { - s.Description = &v - return s -} - -// SetId sets the Id field's value. -func (s *CreateChannelInput) SetId(v string) *CreateChannelInput { - s.Id = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/CreateChannelResponse -type CreateChannelOutput struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - Description *string `locationName:"description" type:"string"` - - // An HTTP Live Streaming (HLS) ingest resource configuration. - HlsIngest *HlsIngest `locationName:"hlsIngest" type:"structure"` - - Id *string `locationName:"id" type:"string"` -} - -// String returns the string representation -func (s CreateChannelOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateChannelOutput) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *CreateChannelOutput) SetArn(v string) *CreateChannelOutput { - s.Arn = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateChannelOutput) SetDescription(v string) *CreateChannelOutput { - s.Description = &v - return s -} - -// SetHlsIngest sets the HlsIngest field's value. -func (s *CreateChannelOutput) SetHlsIngest(v *HlsIngest) *CreateChannelOutput { - s.HlsIngest = v - return s -} - -// SetId sets the Id field's value. -func (s *CreateChannelOutput) SetId(v string) *CreateChannelOutput { - s.Id = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/CreateOriginEndpointRequest -type CreateOriginEndpointInput struct { - _ struct{} `type:"structure"` - - // ChannelId is a required field - ChannelId *string `locationName:"channelId" type:"string" required:"true"` - - // A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration. - DashPackage *DashPackage `locationName:"dashPackage" type:"structure"` - - Description *string `locationName:"description" type:"string"` - - // An HTTP Live Streaming (HLS) packaging configuration. - HlsPackage *HlsPackage `locationName:"hlsPackage" type:"structure"` - - // Id is a required field - Id *string `locationName:"id" type:"string" required:"true"` - - ManifestName *string `locationName:"manifestName" type:"string"` - - // A Microsoft Smooth Streaming (MSS) packaging configuration. - MssPackage *MssPackage `locationName:"mssPackage" type:"structure"` - - StartoverWindowSeconds *int64 `locationName:"startoverWindowSeconds" type:"integer"` - - TimeDelaySeconds *int64 `locationName:"timeDelaySeconds" type:"integer"` - - Whitelist []*string `locationName:"whitelist" type:"list"` -} - -// String returns the string representation -func (s CreateOriginEndpointInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateOriginEndpointInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateOriginEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateOriginEndpointInput"} - if s.ChannelId == nil { - invalidParams.Add(request.NewErrParamRequired("ChannelId")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.DashPackage != nil { - if err := s.DashPackage.Validate(); err != nil { - invalidParams.AddNested("DashPackage", err.(request.ErrInvalidParams)) - } - } - if s.HlsPackage != nil { - if err := s.HlsPackage.Validate(); err != nil { - invalidParams.AddNested("HlsPackage", err.(request.ErrInvalidParams)) - } - } - if s.MssPackage != nil { - if err := s.MssPackage.Validate(); err != nil { - invalidParams.AddNested("MssPackage", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChannelId sets the ChannelId field's value. -func (s *CreateOriginEndpointInput) SetChannelId(v string) *CreateOriginEndpointInput { - s.ChannelId = &v - return s -} - -// SetDashPackage sets the DashPackage field's value. -func (s *CreateOriginEndpointInput) SetDashPackage(v *DashPackage) *CreateOriginEndpointInput { - s.DashPackage = v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateOriginEndpointInput) SetDescription(v string) *CreateOriginEndpointInput { - s.Description = &v - return s -} - -// SetHlsPackage sets the HlsPackage field's value. -func (s *CreateOriginEndpointInput) SetHlsPackage(v *HlsPackage) *CreateOriginEndpointInput { - s.HlsPackage = v - return s -} - -// SetId sets the Id field's value. -func (s *CreateOriginEndpointInput) SetId(v string) *CreateOriginEndpointInput { - s.Id = &v - return s -} - -// SetManifestName sets the ManifestName field's value. -func (s *CreateOriginEndpointInput) SetManifestName(v string) *CreateOriginEndpointInput { - s.ManifestName = &v - return s -} - -// SetMssPackage sets the MssPackage field's value. -func (s *CreateOriginEndpointInput) SetMssPackage(v *MssPackage) *CreateOriginEndpointInput { - s.MssPackage = v - return s -} - -// SetStartoverWindowSeconds sets the StartoverWindowSeconds field's value. -func (s *CreateOriginEndpointInput) SetStartoverWindowSeconds(v int64) *CreateOriginEndpointInput { - s.StartoverWindowSeconds = &v - return s -} - -// SetTimeDelaySeconds sets the TimeDelaySeconds field's value. -func (s *CreateOriginEndpointInput) SetTimeDelaySeconds(v int64) *CreateOriginEndpointInput { - s.TimeDelaySeconds = &v - return s -} - -// SetWhitelist sets the Whitelist field's value. -func (s *CreateOriginEndpointInput) SetWhitelist(v []*string) *CreateOriginEndpointInput { - s.Whitelist = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/CreateOriginEndpointResponse -type CreateOriginEndpointOutput struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - ChannelId *string `locationName:"channelId" type:"string"` - - // A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration. - DashPackage *DashPackage `locationName:"dashPackage" type:"structure"` - - Description *string `locationName:"description" type:"string"` - - // An HTTP Live Streaming (HLS) packaging configuration. - HlsPackage *HlsPackage `locationName:"hlsPackage" type:"structure"` - - Id *string `locationName:"id" type:"string"` - - ManifestName *string `locationName:"manifestName" type:"string"` - - // A Microsoft Smooth Streaming (MSS) packaging configuration. - MssPackage *MssPackage `locationName:"mssPackage" type:"structure"` - - StartoverWindowSeconds *int64 `locationName:"startoverWindowSeconds" type:"integer"` - - TimeDelaySeconds *int64 `locationName:"timeDelaySeconds" type:"integer"` - - Url *string `locationName:"url" type:"string"` - - Whitelist []*string `locationName:"whitelist" type:"list"` -} - -// String returns the string representation -func (s CreateOriginEndpointOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateOriginEndpointOutput) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *CreateOriginEndpointOutput) SetArn(v string) *CreateOriginEndpointOutput { - s.Arn = &v - return s -} - -// SetChannelId sets the ChannelId field's value. -func (s *CreateOriginEndpointOutput) SetChannelId(v string) *CreateOriginEndpointOutput { - s.ChannelId = &v - return s -} - -// SetDashPackage sets the DashPackage field's value. -func (s *CreateOriginEndpointOutput) SetDashPackage(v *DashPackage) *CreateOriginEndpointOutput { - s.DashPackage = v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateOriginEndpointOutput) SetDescription(v string) *CreateOriginEndpointOutput { - s.Description = &v - return s -} - -// SetHlsPackage sets the HlsPackage field's value. -func (s *CreateOriginEndpointOutput) SetHlsPackage(v *HlsPackage) *CreateOriginEndpointOutput { - s.HlsPackage = v - return s -} - -// SetId sets the Id field's value. -func (s *CreateOriginEndpointOutput) SetId(v string) *CreateOriginEndpointOutput { - s.Id = &v - return s -} - -// SetManifestName sets the ManifestName field's value. -func (s *CreateOriginEndpointOutput) SetManifestName(v string) *CreateOriginEndpointOutput { - s.ManifestName = &v - return s -} - -// SetMssPackage sets the MssPackage field's value. -func (s *CreateOriginEndpointOutput) SetMssPackage(v *MssPackage) *CreateOriginEndpointOutput { - s.MssPackage = v - return s -} - -// SetStartoverWindowSeconds sets the StartoverWindowSeconds field's value. -func (s *CreateOriginEndpointOutput) SetStartoverWindowSeconds(v int64) *CreateOriginEndpointOutput { - s.StartoverWindowSeconds = &v - return s -} - -// SetTimeDelaySeconds sets the TimeDelaySeconds field's value. -func (s *CreateOriginEndpointOutput) SetTimeDelaySeconds(v int64) *CreateOriginEndpointOutput { - s.TimeDelaySeconds = &v - return s -} - -// SetUrl sets the Url field's value. -func (s *CreateOriginEndpointOutput) SetUrl(v string) *CreateOriginEndpointOutput { - s.Url = &v - return s -} - -// SetWhitelist sets the Whitelist field's value. -func (s *CreateOriginEndpointOutput) SetWhitelist(v []*string) *CreateOriginEndpointOutput { - s.Whitelist = v - return s -} - -// A Dynamic Adaptive Streaming over HTTP (DASH) encryption configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DashEncryption -type DashEncryption struct { - _ struct{} `type:"structure"` - - // Time (in seconds) between each encryption key rotation. - KeyRotationIntervalSeconds *int64 `locationName:"keyRotationIntervalSeconds" type:"integer"` - - // A configuration for accessing an external Secure Packager and Encoder Key - // Exchange (SPEKE) service that will provide encryption keys. - // - // SpekeKeyProvider is a required field - SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure" required:"true"` -} - -// String returns the string representation -func (s DashEncryption) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DashEncryption) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DashEncryption) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DashEncryption"} - if s.SpekeKeyProvider == nil { - invalidParams.Add(request.NewErrParamRequired("SpekeKeyProvider")) - } - if s.SpekeKeyProvider != nil { - if err := s.SpekeKeyProvider.Validate(); err != nil { - invalidParams.AddNested("SpekeKeyProvider", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyRotationIntervalSeconds sets the KeyRotationIntervalSeconds field's value. -func (s *DashEncryption) SetKeyRotationIntervalSeconds(v int64) *DashEncryption { - s.KeyRotationIntervalSeconds = &v - return s -} - -// SetSpekeKeyProvider sets the SpekeKeyProvider field's value. -func (s *DashEncryption) SetSpekeKeyProvider(v *SpekeKeyProvider) *DashEncryption { - s.SpekeKeyProvider = v - return s -} - -// A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DashPackage -type DashPackage struct { - _ struct{} `type:"structure"` - - // A Dynamic Adaptive Streaming over HTTP (DASH) encryption configuration. - Encryption *DashEncryption `locationName:"encryption" type:"structure"` - - // Time window (in seconds) contained in each manifest. - ManifestWindowSeconds *int64 `locationName:"manifestWindowSeconds" type:"integer"` - - // Minimum duration (in seconds) that a player will buffer media before starting - // the presentation. - MinBufferTimeSeconds *int64 `locationName:"minBufferTimeSeconds" type:"integer"` - - // Minimum duration (in seconds) between potential changes to the Dynamic Adaptive - // Streaming over HTTP (DASH) Media Presentation Description (MPD). - MinUpdatePeriodSeconds *int64 `locationName:"minUpdatePeriodSeconds" type:"integer"` - - // The Dynamic Adaptive Streaming over HTTP (DASH) profile type. When set to - // "HBBTV_1_5", HbbTV 1.5 compliant output is enabled. - Profile *string `locationName:"profile" type:"string" enum:"Profile"` - - // Duration (in seconds) of each segment. Actual segments will berounded to - // the nearest multiple of the source segment duration. - SegmentDurationSeconds *int64 `locationName:"segmentDurationSeconds" type:"integer"` - - // A StreamSelection configuration. - StreamSelection *StreamSelection `locationName:"streamSelection" type:"structure"` - - // Duration (in seconds) to delay live content before presentation. - SuggestedPresentationDelaySeconds *int64 `locationName:"suggestedPresentationDelaySeconds" type:"integer"` -} - -// String returns the string representation -func (s DashPackage) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DashPackage) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DashPackage) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DashPackage"} - if s.Encryption != nil { - if err := s.Encryption.Validate(); err != nil { - invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryption sets the Encryption field's value. -func (s *DashPackage) SetEncryption(v *DashEncryption) *DashPackage { - s.Encryption = v - return s -} - -// SetManifestWindowSeconds sets the ManifestWindowSeconds field's value. -func (s *DashPackage) SetManifestWindowSeconds(v int64) *DashPackage { - s.ManifestWindowSeconds = &v - return s -} - -// SetMinBufferTimeSeconds sets the MinBufferTimeSeconds field's value. -func (s *DashPackage) SetMinBufferTimeSeconds(v int64) *DashPackage { - s.MinBufferTimeSeconds = &v - return s -} - -// SetMinUpdatePeriodSeconds sets the MinUpdatePeriodSeconds field's value. -func (s *DashPackage) SetMinUpdatePeriodSeconds(v int64) *DashPackage { - s.MinUpdatePeriodSeconds = &v - return s -} - -// SetProfile sets the Profile field's value. -func (s *DashPackage) SetProfile(v string) *DashPackage { - s.Profile = &v - return s -} - -// SetSegmentDurationSeconds sets the SegmentDurationSeconds field's value. -func (s *DashPackage) SetSegmentDurationSeconds(v int64) *DashPackage { - s.SegmentDurationSeconds = &v - return s -} - -// SetStreamSelection sets the StreamSelection field's value. -func (s *DashPackage) SetStreamSelection(v *StreamSelection) *DashPackage { - s.StreamSelection = v - return s -} - -// SetSuggestedPresentationDelaySeconds sets the SuggestedPresentationDelaySeconds field's value. -func (s *DashPackage) SetSuggestedPresentationDelaySeconds(v int64) *DashPackage { - s.SuggestedPresentationDelaySeconds = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DeleteChannelRequest -type DeleteChannelInput struct { - _ struct{} `type:"structure"` - - // Id is a required field - Id *string `location:"uri" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteChannelInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteChannelInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteChannelInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetId sets the Id field's value. -func (s *DeleteChannelInput) SetId(v string) *DeleteChannelInput { - s.Id = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DeleteChannelResponse -type DeleteChannelOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteChannelOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteChannelOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DeleteOriginEndpointRequest -type DeleteOriginEndpointInput struct { - _ struct{} `type:"structure"` - - // Id is a required field - Id *string `location:"uri" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteOriginEndpointInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteOriginEndpointInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteOriginEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteOriginEndpointInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetId sets the Id field's value. -func (s *DeleteOriginEndpointInput) SetId(v string) *DeleteOriginEndpointInput { - s.Id = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DeleteOriginEndpointResponse -type DeleteOriginEndpointOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteOriginEndpointOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteOriginEndpointOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DescribeChannelRequest -type DescribeChannelInput struct { - _ struct{} `type:"structure"` - - // Id is a required field - Id *string `location:"uri" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeChannelInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeChannelInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeChannelInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetId sets the Id field's value. -func (s *DescribeChannelInput) SetId(v string) *DescribeChannelInput { - s.Id = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DescribeChannelResponse -type DescribeChannelOutput struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - Description *string `locationName:"description" type:"string"` - - // An HTTP Live Streaming (HLS) ingest resource configuration. - HlsIngest *HlsIngest `locationName:"hlsIngest" type:"structure"` - - Id *string `locationName:"id" type:"string"` -} - -// String returns the string representation -func (s DescribeChannelOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeChannelOutput) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *DescribeChannelOutput) SetArn(v string) *DescribeChannelOutput { - s.Arn = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *DescribeChannelOutput) SetDescription(v string) *DescribeChannelOutput { - s.Description = &v - return s -} - -// SetHlsIngest sets the HlsIngest field's value. -func (s *DescribeChannelOutput) SetHlsIngest(v *HlsIngest) *DescribeChannelOutput { - s.HlsIngest = v - return s -} - -// SetId sets the Id field's value. -func (s *DescribeChannelOutput) SetId(v string) *DescribeChannelOutput { - s.Id = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DescribeOriginEndpointRequest -type DescribeOriginEndpointInput struct { - _ struct{} `type:"structure"` - - // Id is a required field - Id *string `location:"uri" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeOriginEndpointInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeOriginEndpointInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeOriginEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeOriginEndpointInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetId sets the Id field's value. -func (s *DescribeOriginEndpointInput) SetId(v string) *DescribeOriginEndpointInput { - s.Id = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DescribeOriginEndpointResponse -type DescribeOriginEndpointOutput struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - ChannelId *string `locationName:"channelId" type:"string"` - - // A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration. - DashPackage *DashPackage `locationName:"dashPackage" type:"structure"` - - Description *string `locationName:"description" type:"string"` - - // An HTTP Live Streaming (HLS) packaging configuration. - HlsPackage *HlsPackage `locationName:"hlsPackage" type:"structure"` - - Id *string `locationName:"id" type:"string"` - - ManifestName *string `locationName:"manifestName" type:"string"` - - // A Microsoft Smooth Streaming (MSS) packaging configuration. - MssPackage *MssPackage `locationName:"mssPackage" type:"structure"` - - StartoverWindowSeconds *int64 `locationName:"startoverWindowSeconds" type:"integer"` - - TimeDelaySeconds *int64 `locationName:"timeDelaySeconds" type:"integer"` - - Url *string `locationName:"url" type:"string"` - - Whitelist []*string `locationName:"whitelist" type:"list"` -} - -// String returns the string representation -func (s DescribeOriginEndpointOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeOriginEndpointOutput) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *DescribeOriginEndpointOutput) SetArn(v string) *DescribeOriginEndpointOutput { - s.Arn = &v - return s -} - -// SetChannelId sets the ChannelId field's value. -func (s *DescribeOriginEndpointOutput) SetChannelId(v string) *DescribeOriginEndpointOutput { - s.ChannelId = &v - return s -} - -// SetDashPackage sets the DashPackage field's value. -func (s *DescribeOriginEndpointOutput) SetDashPackage(v *DashPackage) *DescribeOriginEndpointOutput { - s.DashPackage = v - return s -} - -// SetDescription sets the Description field's value. -func (s *DescribeOriginEndpointOutput) SetDescription(v string) *DescribeOriginEndpointOutput { - s.Description = &v - return s -} - -// SetHlsPackage sets the HlsPackage field's value. -func (s *DescribeOriginEndpointOutput) SetHlsPackage(v *HlsPackage) *DescribeOriginEndpointOutput { - s.HlsPackage = v - return s -} - -// SetId sets the Id field's value. -func (s *DescribeOriginEndpointOutput) SetId(v string) *DescribeOriginEndpointOutput { - s.Id = &v - return s -} - -// SetManifestName sets the ManifestName field's value. -func (s *DescribeOriginEndpointOutput) SetManifestName(v string) *DescribeOriginEndpointOutput { - s.ManifestName = &v - return s -} - -// SetMssPackage sets the MssPackage field's value. -func (s *DescribeOriginEndpointOutput) SetMssPackage(v *MssPackage) *DescribeOriginEndpointOutput { - s.MssPackage = v - return s -} - -// SetStartoverWindowSeconds sets the StartoverWindowSeconds field's value. -func (s *DescribeOriginEndpointOutput) SetStartoverWindowSeconds(v int64) *DescribeOriginEndpointOutput { - s.StartoverWindowSeconds = &v - return s -} - -// SetTimeDelaySeconds sets the TimeDelaySeconds field's value. -func (s *DescribeOriginEndpointOutput) SetTimeDelaySeconds(v int64) *DescribeOriginEndpointOutput { - s.TimeDelaySeconds = &v - return s -} - -// SetUrl sets the Url field's value. -func (s *DescribeOriginEndpointOutput) SetUrl(v string) *DescribeOriginEndpointOutput { - s.Url = &v - return s -} - -// SetWhitelist sets the Whitelist field's value. -func (s *DescribeOriginEndpointOutput) SetWhitelist(v []*string) *DescribeOriginEndpointOutput { - s.Whitelist = v - return s -} - -// An HTTP Live Streaming (HLS) encryption configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/HlsEncryption -type HlsEncryption struct { - _ struct{} `type:"structure"` - - // A constant initialization vector for encryption (optional).When not specified - // the initialization vector will be periodically rotated. - ConstantInitializationVector *string `locationName:"constantInitializationVector" type:"string"` - - // The encryption method to use. - EncryptionMethod *string `locationName:"encryptionMethod" type:"string" enum:"EncryptionMethod"` - - // Interval (in seconds) between each encryption key rotation. - KeyRotationIntervalSeconds *int64 `locationName:"keyRotationIntervalSeconds" type:"integer"` - - // When enabled, the EXT-X-KEY tag will be repeated in output manifests. - RepeatExtXKey *bool `locationName:"repeatExtXKey" type:"boolean"` - - // A configuration for accessing an external Secure Packager and Encoder Key - // Exchange (SPEKE) service that will provide encryption keys. - // - // SpekeKeyProvider is a required field - SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure" required:"true"` -} - -// String returns the string representation -func (s HlsEncryption) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsEncryption) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *HlsEncryption) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HlsEncryption"} - if s.SpekeKeyProvider == nil { - invalidParams.Add(request.NewErrParamRequired("SpekeKeyProvider")) - } - if s.SpekeKeyProvider != nil { - if err := s.SpekeKeyProvider.Validate(); err != nil { - invalidParams.AddNested("SpekeKeyProvider", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConstantInitializationVector sets the ConstantInitializationVector field's value. -func (s *HlsEncryption) SetConstantInitializationVector(v string) *HlsEncryption { - s.ConstantInitializationVector = &v - return s -} - -// SetEncryptionMethod sets the EncryptionMethod field's value. -func (s *HlsEncryption) SetEncryptionMethod(v string) *HlsEncryption { - s.EncryptionMethod = &v - return s -} - -// SetKeyRotationIntervalSeconds sets the KeyRotationIntervalSeconds field's value. -func (s *HlsEncryption) SetKeyRotationIntervalSeconds(v int64) *HlsEncryption { - s.KeyRotationIntervalSeconds = &v - return s -} - -// SetRepeatExtXKey sets the RepeatExtXKey field's value. -func (s *HlsEncryption) SetRepeatExtXKey(v bool) *HlsEncryption { - s.RepeatExtXKey = &v - return s -} - -// SetSpekeKeyProvider sets the SpekeKeyProvider field's value. -func (s *HlsEncryption) SetSpekeKeyProvider(v *SpekeKeyProvider) *HlsEncryption { - s.SpekeKeyProvider = v - return s -} - -// An HTTP Live Streaming (HLS) ingest resource configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/HlsIngest -type HlsIngest struct { - _ struct{} `type:"structure"` - - // A list of endpoints to which the source stream should be sent. - IngestEndpoints []*IngestEndpoint `locationName:"ingestEndpoints" type:"list"` -} - -// String returns the string representation -func (s HlsIngest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsIngest) GoString() string { - return s.String() -} - -// SetIngestEndpoints sets the IngestEndpoints field's value. -func (s *HlsIngest) SetIngestEndpoints(v []*IngestEndpoint) *HlsIngest { - s.IngestEndpoints = v - return s -} - -// An HTTP Live Streaming (HLS) packaging configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/HlsPackage -type HlsPackage struct { - _ struct{} `type:"structure"` - - // This setting controls how ad markers are included in the packaged OriginEndpoint."NONE" - // will omit all SCTE-35 ad markers from the output."PASSTHROUGH" causes the - // manifest to contain a copy of the SCTE-35 admarkers (comments) taken directly - // from the input HTTP Live Streaming (HLS) manifest."SCTE35_ENHANCED" generates - // ad markers and blackout tags based on SCTE-35messages in the input source. - AdMarkers *string `locationName:"adMarkers" type:"string" enum:"AdMarkers"` - - // An HTTP Live Streaming (HLS) encryption configuration. - Encryption *HlsEncryption `locationName:"encryption" type:"structure"` - - // When enabled, an I-Frame only stream will be included in the output. - IncludeIframeOnlyStream *bool `locationName:"includeIframeOnlyStream" type:"boolean"` - - // The HTTP Live Streaming (HLS) playlist type.When either "EVENT" or "VOD" - // is specified, a corresponding EXT-X-PLAYLIST-TYPEentry will be included in - // the media playlist. - PlaylistType *string `locationName:"playlistType" type:"string" enum:"PlaylistType"` - - // Time window (in seconds) contained in each parent manifest. - PlaylistWindowSeconds *int64 `locationName:"playlistWindowSeconds" type:"integer"` - - // The interval (in seconds) between each EXT-X-PROGRAM-DATE-TIME taginserted - // into manifests. Additionally, when an interval is specifiedID3Timed Metadata - // messages will be generated every 5 seconds using the ingest time of the content.If - // the interval is not specified, or set to 0, thenno EXT-X-PROGRAM-DATE-TIME - // tags will be inserted into manifests and noID3Timed Metadata messages will - // be generated. Note that irrespectiveof this parameter, if any ID3 Timed Metadata - // is found in HTTP Live Streaming (HLS) input,it will be passed through to - // HLS output. - ProgramDateTimeIntervalSeconds *int64 `locationName:"programDateTimeIntervalSeconds" type:"integer"` - - // Duration (in seconds) of each fragment. Actual fragments will berounded to - // the nearest multiple of the source fragment duration. - SegmentDurationSeconds *int64 `locationName:"segmentDurationSeconds" type:"integer"` - - // A StreamSelection configuration. - StreamSelection *StreamSelection `locationName:"streamSelection" type:"structure"` - - // When enabled, audio streams will be placed in rendition groups in the output. - UseAudioRenditionGroup *bool `locationName:"useAudioRenditionGroup" type:"boolean"` -} - -// String returns the string representation -func (s HlsPackage) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HlsPackage) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *HlsPackage) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HlsPackage"} - if s.Encryption != nil { - if err := s.Encryption.Validate(); err != nil { - invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAdMarkers sets the AdMarkers field's value. -func (s *HlsPackage) SetAdMarkers(v string) *HlsPackage { - s.AdMarkers = &v - return s -} - -// SetEncryption sets the Encryption field's value. -func (s *HlsPackage) SetEncryption(v *HlsEncryption) *HlsPackage { - s.Encryption = v - return s -} - -// SetIncludeIframeOnlyStream sets the IncludeIframeOnlyStream field's value. -func (s *HlsPackage) SetIncludeIframeOnlyStream(v bool) *HlsPackage { - s.IncludeIframeOnlyStream = &v - return s -} - -// SetPlaylistType sets the PlaylistType field's value. -func (s *HlsPackage) SetPlaylistType(v string) *HlsPackage { - s.PlaylistType = &v - return s -} - -// SetPlaylistWindowSeconds sets the PlaylistWindowSeconds field's value. -func (s *HlsPackage) SetPlaylistWindowSeconds(v int64) *HlsPackage { - s.PlaylistWindowSeconds = &v - return s -} - -// SetProgramDateTimeIntervalSeconds sets the ProgramDateTimeIntervalSeconds field's value. -func (s *HlsPackage) SetProgramDateTimeIntervalSeconds(v int64) *HlsPackage { - s.ProgramDateTimeIntervalSeconds = &v - return s -} - -// SetSegmentDurationSeconds sets the SegmentDurationSeconds field's value. -func (s *HlsPackage) SetSegmentDurationSeconds(v int64) *HlsPackage { - s.SegmentDurationSeconds = &v - return s -} - -// SetStreamSelection sets the StreamSelection field's value. -func (s *HlsPackage) SetStreamSelection(v *StreamSelection) *HlsPackage { - s.StreamSelection = v - return s -} - -// SetUseAudioRenditionGroup sets the UseAudioRenditionGroup field's value. -func (s *HlsPackage) SetUseAudioRenditionGroup(v bool) *HlsPackage { - s.UseAudioRenditionGroup = &v - return s -} - -// An endpoint for ingesting source content for a Channel. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/IngestEndpoint -type IngestEndpoint struct { - _ struct{} `type:"structure"` - - // The system generated password for ingest authentication. - Password *string `locationName:"password" type:"string"` - - // The ingest URL to which the source stream should be sent. - Url *string `locationName:"url" type:"string"` - - // The system generated username for ingest authentication. - Username *string `locationName:"username" type:"string"` -} - -// String returns the string representation -func (s IngestEndpoint) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s IngestEndpoint) GoString() string { - return s.String() -} - -// SetPassword sets the Password field's value. -func (s *IngestEndpoint) SetPassword(v string) *IngestEndpoint { - s.Password = &v - return s -} - -// SetUrl sets the Url field's value. -func (s *IngestEndpoint) SetUrl(v string) *IngestEndpoint { - s.Url = &v - return s -} - -// SetUsername sets the Username field's value. -func (s *IngestEndpoint) SetUsername(v string) *IngestEndpoint { - s.Username = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/ListChannelsRequest -type ListChannelsInput struct { - _ struct{} `type:"structure"` - - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListChannelsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListChannelsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListChannelsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListChannelsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListChannelsInput) SetMaxResults(v int64) *ListChannelsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListChannelsInput) SetNextToken(v string) *ListChannelsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/ListChannelsResponse -type ListChannelsOutput struct { - _ struct{} `type:"structure"` - - Channels []*Channel `locationName:"channels" type:"list"` - - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListChannelsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListChannelsOutput) GoString() string { - return s.String() -} - -// SetChannels sets the Channels field's value. -func (s *ListChannelsOutput) SetChannels(v []*Channel) *ListChannelsOutput { - s.Channels = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListChannelsOutput) SetNextToken(v string) *ListChannelsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/ListOriginEndpointsRequest -type ListOriginEndpointsInput struct { - _ struct{} `type:"structure"` - - ChannelId *string `location:"querystring" locationName:"channelId" type:"string"` - - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` -} - -// String returns the string representation -func (s ListOriginEndpointsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListOriginEndpointsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListOriginEndpointsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListOriginEndpointsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChannelId sets the ChannelId field's value. -func (s *ListOriginEndpointsInput) SetChannelId(v string) *ListOriginEndpointsInput { - s.ChannelId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListOriginEndpointsInput) SetMaxResults(v int64) *ListOriginEndpointsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListOriginEndpointsInput) SetNextToken(v string) *ListOriginEndpointsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/ListOriginEndpointsResponse -type ListOriginEndpointsOutput struct { - _ struct{} `type:"structure"` - - NextToken *string `locationName:"nextToken" type:"string"` - - OriginEndpoints []*OriginEndpoint `locationName:"originEndpoints" type:"list"` -} - -// String returns the string representation -func (s ListOriginEndpointsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListOriginEndpointsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListOriginEndpointsOutput) SetNextToken(v string) *ListOriginEndpointsOutput { - s.NextToken = &v - return s -} - -// SetOriginEndpoints sets the OriginEndpoints field's value. -func (s *ListOriginEndpointsOutput) SetOriginEndpoints(v []*OriginEndpoint) *ListOriginEndpointsOutput { - s.OriginEndpoints = v - return s -} - -// A Microsoft Smooth Streaming (MSS) encryption configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/MssEncryption -type MssEncryption struct { - _ struct{} `type:"structure"` - - // A configuration for accessing an external Secure Packager and Encoder Key - // Exchange (SPEKE) service that will provide encryption keys. - // - // SpekeKeyProvider is a required field - SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure" required:"true"` -} - -// String returns the string representation -func (s MssEncryption) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MssEncryption) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MssEncryption) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MssEncryption"} - if s.SpekeKeyProvider == nil { - invalidParams.Add(request.NewErrParamRequired("SpekeKeyProvider")) - } - if s.SpekeKeyProvider != nil { - if err := s.SpekeKeyProvider.Validate(); err != nil { - invalidParams.AddNested("SpekeKeyProvider", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSpekeKeyProvider sets the SpekeKeyProvider field's value. -func (s *MssEncryption) SetSpekeKeyProvider(v *SpekeKeyProvider) *MssEncryption { - s.SpekeKeyProvider = v - return s -} - -// A Microsoft Smooth Streaming (MSS) packaging configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/MssPackage -type MssPackage struct { - _ struct{} `type:"structure"` - - // A Microsoft Smooth Streaming (MSS) encryption configuration. - Encryption *MssEncryption `locationName:"encryption" type:"structure"` - - // The time window (in seconds) contained in each manifest. - ManifestWindowSeconds *int64 `locationName:"manifestWindowSeconds" type:"integer"` - - // The duration (in seconds) of each segment. - SegmentDurationSeconds *int64 `locationName:"segmentDurationSeconds" type:"integer"` - - // A StreamSelection configuration. - StreamSelection *StreamSelection `locationName:"streamSelection" type:"structure"` -} - -// String returns the string representation -func (s MssPackage) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MssPackage) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MssPackage) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MssPackage"} - if s.Encryption != nil { - if err := s.Encryption.Validate(); err != nil { - invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryption sets the Encryption field's value. -func (s *MssPackage) SetEncryption(v *MssEncryption) *MssPackage { - s.Encryption = v - return s -} - -// SetManifestWindowSeconds sets the ManifestWindowSeconds field's value. -func (s *MssPackage) SetManifestWindowSeconds(v int64) *MssPackage { - s.ManifestWindowSeconds = &v - return s -} - -// SetSegmentDurationSeconds sets the SegmentDurationSeconds field's value. -func (s *MssPackage) SetSegmentDurationSeconds(v int64) *MssPackage { - s.SegmentDurationSeconds = &v - return s -} - -// SetStreamSelection sets the StreamSelection field's value. -func (s *MssPackage) SetStreamSelection(v *StreamSelection) *MssPackage { - s.StreamSelection = v - return s -} - -// An OriginEndpoint resource configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/OriginEndpoint -type OriginEndpoint struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) assigned to the OriginEndpoint. - Arn *string `locationName:"arn" type:"string"` - - // The ID of the Channel the OriginEndpoint is associated with. - ChannelId *string `locationName:"channelId" type:"string"` - - // A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration. - DashPackage *DashPackage `locationName:"dashPackage" type:"structure"` - - // A short text description of the OriginEndpoint. - Description *string `locationName:"description" type:"string"` - - // An HTTP Live Streaming (HLS) packaging configuration. - HlsPackage *HlsPackage `locationName:"hlsPackage" type:"structure"` - - // The ID of the OriginEndpoint. - Id *string `locationName:"id" type:"string"` - - // A short string appended to the end of the OriginEndpoint URL. - ManifestName *string `locationName:"manifestName" type:"string"` - - // A Microsoft Smooth Streaming (MSS) packaging configuration. - MssPackage *MssPackage `locationName:"mssPackage" type:"structure"` - - // Maximum duration (seconds) of content to retain for startover playback.If - // not specified, startover playback will be disabled for the OriginEndpoint. - StartoverWindowSeconds *int64 `locationName:"startoverWindowSeconds" type:"integer"` - - // Amount of delay (seconds) to enforce on the playback of live content.If not - // specified, there will be no time delay in effect for the OriginEndpoint. - TimeDelaySeconds *int64 `locationName:"timeDelaySeconds" type:"integer"` - - // The URL of the packaged OriginEndpoint for consumption. - Url *string `locationName:"url" type:"string"` - - // A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint. - Whitelist []*string `locationName:"whitelist" type:"list"` -} - -// String returns the string representation -func (s OriginEndpoint) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OriginEndpoint) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *OriginEndpoint) SetArn(v string) *OriginEndpoint { - s.Arn = &v - return s -} - -// SetChannelId sets the ChannelId field's value. -func (s *OriginEndpoint) SetChannelId(v string) *OriginEndpoint { - s.ChannelId = &v - return s -} - -// SetDashPackage sets the DashPackage field's value. -func (s *OriginEndpoint) SetDashPackage(v *DashPackage) *OriginEndpoint { - s.DashPackage = v - return s -} - -// SetDescription sets the Description field's value. -func (s *OriginEndpoint) SetDescription(v string) *OriginEndpoint { - s.Description = &v - return s -} - -// SetHlsPackage sets the HlsPackage field's value. -func (s *OriginEndpoint) SetHlsPackage(v *HlsPackage) *OriginEndpoint { - s.HlsPackage = v - return s -} - -// SetId sets the Id field's value. -func (s *OriginEndpoint) SetId(v string) *OriginEndpoint { - s.Id = &v - return s -} - -// SetManifestName sets the ManifestName field's value. -func (s *OriginEndpoint) SetManifestName(v string) *OriginEndpoint { - s.ManifestName = &v - return s -} - -// SetMssPackage sets the MssPackage field's value. -func (s *OriginEndpoint) SetMssPackage(v *MssPackage) *OriginEndpoint { - s.MssPackage = v - return s -} - -// SetStartoverWindowSeconds sets the StartoverWindowSeconds field's value. -func (s *OriginEndpoint) SetStartoverWindowSeconds(v int64) *OriginEndpoint { - s.StartoverWindowSeconds = &v - return s -} - -// SetTimeDelaySeconds sets the TimeDelaySeconds field's value. -func (s *OriginEndpoint) SetTimeDelaySeconds(v int64) *OriginEndpoint { - s.TimeDelaySeconds = &v - return s -} - -// SetUrl sets the Url field's value. -func (s *OriginEndpoint) SetUrl(v string) *OriginEndpoint { - s.Url = &v - return s -} - -// SetWhitelist sets the Whitelist field's value. -func (s *OriginEndpoint) SetWhitelist(v []*string) *OriginEndpoint { - s.Whitelist = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/RotateChannelCredentialsRequest -type RotateChannelCredentialsInput struct { - _ struct{} `type:"structure"` - - // Id is a required field - Id *string `location:"uri" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s RotateChannelCredentialsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RotateChannelCredentialsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RotateChannelCredentialsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RotateChannelCredentialsInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetId sets the Id field's value. -func (s *RotateChannelCredentialsInput) SetId(v string) *RotateChannelCredentialsInput { - s.Id = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/RotateChannelCredentialsResponse -type RotateChannelCredentialsOutput struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - Description *string `locationName:"description" type:"string"` - - // An HTTP Live Streaming (HLS) ingest resource configuration. - HlsIngest *HlsIngest `locationName:"hlsIngest" type:"structure"` - - Id *string `locationName:"id" type:"string"` -} - -// String returns the string representation -func (s RotateChannelCredentialsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RotateChannelCredentialsOutput) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *RotateChannelCredentialsOutput) SetArn(v string) *RotateChannelCredentialsOutput { - s.Arn = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *RotateChannelCredentialsOutput) SetDescription(v string) *RotateChannelCredentialsOutput { - s.Description = &v - return s -} - -// SetHlsIngest sets the HlsIngest field's value. -func (s *RotateChannelCredentialsOutput) SetHlsIngest(v *HlsIngest) *RotateChannelCredentialsOutput { - s.HlsIngest = v - return s -} - -// SetId sets the Id field's value. -func (s *RotateChannelCredentialsOutput) SetId(v string) *RotateChannelCredentialsOutput { - s.Id = &v - return s -} - -// A configuration for accessing an external Secure Packager and Encoder Key -// Exchange (SPEKE) service that will provide encryption keys. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/SpekeKeyProvider -type SpekeKeyProvider struct { - _ struct{} `type:"structure"` - - // The resource ID to include in key requests. - // - // ResourceId is a required field - ResourceId *string `locationName:"resourceId" type:"string" required:"true"` - - // An Amazon Resource Name (ARN) of an IAM role that AWS ElementalMediaPackage - // will assume when accessing the key provider service. - // - // RoleArn is a required field - RoleArn *string `locationName:"roleArn" type:"string" required:"true"` - - // The system IDs to include in key requests. - // - // SystemIds is a required field - SystemIds []*string `locationName:"systemIds" type:"list" required:"true"` - - // The URL of the external key provider service. - // - // Url is a required field - Url *string `locationName:"url" type:"string" required:"true"` -} - -// String returns the string representation -func (s SpekeKeyProvider) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SpekeKeyProvider) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SpekeKeyProvider) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SpekeKeyProvider"} - if s.ResourceId == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceId")) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.SystemIds == nil { - invalidParams.Add(request.NewErrParamRequired("SystemIds")) - } - if s.Url == nil { - invalidParams.Add(request.NewErrParamRequired("Url")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceId sets the ResourceId field's value. -func (s *SpekeKeyProvider) SetResourceId(v string) *SpekeKeyProvider { - s.ResourceId = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *SpekeKeyProvider) SetRoleArn(v string) *SpekeKeyProvider { - s.RoleArn = &v - return s -} - -// SetSystemIds sets the SystemIds field's value. -func (s *SpekeKeyProvider) SetSystemIds(v []*string) *SpekeKeyProvider { - s.SystemIds = v - return s -} - -// SetUrl sets the Url field's value. -func (s *SpekeKeyProvider) SetUrl(v string) *SpekeKeyProvider { - s.Url = &v - return s -} - -// A StreamSelection configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/StreamSelection -type StreamSelection struct { - _ struct{} `type:"structure"` - - // The maximum video bitrate (bps) to include in output. - MaxVideoBitsPerSecond *int64 `locationName:"maxVideoBitsPerSecond" type:"integer"` - - // The minimum video bitrate (bps) to include in output. - MinVideoBitsPerSecond *int64 `locationName:"minVideoBitsPerSecond" type:"integer"` - - // A directive that determines the order of streams in the output. - StreamOrder *string `locationName:"streamOrder" type:"string" enum:"StreamOrder"` -} - -// String returns the string representation -func (s StreamSelection) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StreamSelection) GoString() string { - return s.String() -} - -// SetMaxVideoBitsPerSecond sets the MaxVideoBitsPerSecond field's value. -func (s *StreamSelection) SetMaxVideoBitsPerSecond(v int64) *StreamSelection { - s.MaxVideoBitsPerSecond = &v - return s -} - -// SetMinVideoBitsPerSecond sets the MinVideoBitsPerSecond field's value. -func (s *StreamSelection) SetMinVideoBitsPerSecond(v int64) *StreamSelection { - s.MinVideoBitsPerSecond = &v - return s -} - -// SetStreamOrder sets the StreamOrder field's value. -func (s *StreamSelection) SetStreamOrder(v string) *StreamSelection { - s.StreamOrder = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/UpdateChannelRequest -type UpdateChannelInput struct { - _ struct{} `type:"structure"` - - Description *string `locationName:"description" type:"string"` - - // Id is a required field - Id *string `location:"uri" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s UpdateChannelInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateChannelInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateChannelInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *UpdateChannelInput) SetDescription(v string) *UpdateChannelInput { - s.Description = &v - return s -} - -// SetId sets the Id field's value. -func (s *UpdateChannelInput) SetId(v string) *UpdateChannelInput { - s.Id = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/UpdateChannelResponse -type UpdateChannelOutput struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - Description *string `locationName:"description" type:"string"` - - // An HTTP Live Streaming (HLS) ingest resource configuration. - HlsIngest *HlsIngest `locationName:"hlsIngest" type:"structure"` - - Id *string `locationName:"id" type:"string"` -} - -// String returns the string representation -func (s UpdateChannelOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateChannelOutput) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *UpdateChannelOutput) SetArn(v string) *UpdateChannelOutput { - s.Arn = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *UpdateChannelOutput) SetDescription(v string) *UpdateChannelOutput { - s.Description = &v - return s -} - -// SetHlsIngest sets the HlsIngest field's value. -func (s *UpdateChannelOutput) SetHlsIngest(v *HlsIngest) *UpdateChannelOutput { - s.HlsIngest = v - return s -} - -// SetId sets the Id field's value. -func (s *UpdateChannelOutput) SetId(v string) *UpdateChannelOutput { - s.Id = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/UpdateOriginEndpointRequest -type UpdateOriginEndpointInput struct { - _ struct{} `type:"structure"` - - // A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration. - DashPackage *DashPackage `locationName:"dashPackage" type:"structure"` - - Description *string `locationName:"description" type:"string"` - - // An HTTP Live Streaming (HLS) packaging configuration. - HlsPackage *HlsPackage `locationName:"hlsPackage" type:"structure"` - - // Id is a required field - Id *string `location:"uri" locationName:"id" type:"string" required:"true"` - - ManifestName *string `locationName:"manifestName" type:"string"` - - // A Microsoft Smooth Streaming (MSS) packaging configuration. - MssPackage *MssPackage `locationName:"mssPackage" type:"structure"` - - StartoverWindowSeconds *int64 `locationName:"startoverWindowSeconds" type:"integer"` - - TimeDelaySeconds *int64 `locationName:"timeDelaySeconds" type:"integer"` - - Whitelist []*string `locationName:"whitelist" type:"list"` -} - -// String returns the string representation -func (s UpdateOriginEndpointInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateOriginEndpointInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateOriginEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateOriginEndpointInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.DashPackage != nil { - if err := s.DashPackage.Validate(); err != nil { - invalidParams.AddNested("DashPackage", err.(request.ErrInvalidParams)) - } - } - if s.HlsPackage != nil { - if err := s.HlsPackage.Validate(); err != nil { - invalidParams.AddNested("HlsPackage", err.(request.ErrInvalidParams)) - } - } - if s.MssPackage != nil { - if err := s.MssPackage.Validate(); err != nil { - invalidParams.AddNested("MssPackage", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDashPackage sets the DashPackage field's value. -func (s *UpdateOriginEndpointInput) SetDashPackage(v *DashPackage) *UpdateOriginEndpointInput { - s.DashPackage = v - return s -} - -// SetDescription sets the Description field's value. -func (s *UpdateOriginEndpointInput) SetDescription(v string) *UpdateOriginEndpointInput { - s.Description = &v - return s -} - -// SetHlsPackage sets the HlsPackage field's value. -func (s *UpdateOriginEndpointInput) SetHlsPackage(v *HlsPackage) *UpdateOriginEndpointInput { - s.HlsPackage = v - return s -} - -// SetId sets the Id field's value. -func (s *UpdateOriginEndpointInput) SetId(v string) *UpdateOriginEndpointInput { - s.Id = &v - return s -} - -// SetManifestName sets the ManifestName field's value. -func (s *UpdateOriginEndpointInput) SetManifestName(v string) *UpdateOriginEndpointInput { - s.ManifestName = &v - return s -} - -// SetMssPackage sets the MssPackage field's value. -func (s *UpdateOriginEndpointInput) SetMssPackage(v *MssPackage) *UpdateOriginEndpointInput { - s.MssPackage = v - return s -} - -// SetStartoverWindowSeconds sets the StartoverWindowSeconds field's value. -func (s *UpdateOriginEndpointInput) SetStartoverWindowSeconds(v int64) *UpdateOriginEndpointInput { - s.StartoverWindowSeconds = &v - return s -} - -// SetTimeDelaySeconds sets the TimeDelaySeconds field's value. -func (s *UpdateOriginEndpointInput) SetTimeDelaySeconds(v int64) *UpdateOriginEndpointInput { - s.TimeDelaySeconds = &v - return s -} - -// SetWhitelist sets the Whitelist field's value. -func (s *UpdateOriginEndpointInput) SetWhitelist(v []*string) *UpdateOriginEndpointInput { - s.Whitelist = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/UpdateOriginEndpointResponse -type UpdateOriginEndpointOutput struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - ChannelId *string `locationName:"channelId" type:"string"` - - // A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration. - DashPackage *DashPackage `locationName:"dashPackage" type:"structure"` - - Description *string `locationName:"description" type:"string"` - - // An HTTP Live Streaming (HLS) packaging configuration. - HlsPackage *HlsPackage `locationName:"hlsPackage" type:"structure"` - - Id *string `locationName:"id" type:"string"` - - ManifestName *string `locationName:"manifestName" type:"string"` - - // A Microsoft Smooth Streaming (MSS) packaging configuration. - MssPackage *MssPackage `locationName:"mssPackage" type:"structure"` - - StartoverWindowSeconds *int64 `locationName:"startoverWindowSeconds" type:"integer"` - - TimeDelaySeconds *int64 `locationName:"timeDelaySeconds" type:"integer"` - - Url *string `locationName:"url" type:"string"` - - Whitelist []*string `locationName:"whitelist" type:"list"` -} - -// String returns the string representation -func (s UpdateOriginEndpointOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateOriginEndpointOutput) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *UpdateOriginEndpointOutput) SetArn(v string) *UpdateOriginEndpointOutput { - s.Arn = &v - return s -} - -// SetChannelId sets the ChannelId field's value. -func (s *UpdateOriginEndpointOutput) SetChannelId(v string) *UpdateOriginEndpointOutput { - s.ChannelId = &v - return s -} - -// SetDashPackage sets the DashPackage field's value. -func (s *UpdateOriginEndpointOutput) SetDashPackage(v *DashPackage) *UpdateOriginEndpointOutput { - s.DashPackage = v - return s -} - -// SetDescription sets the Description field's value. -func (s *UpdateOriginEndpointOutput) SetDescription(v string) *UpdateOriginEndpointOutput { - s.Description = &v - return s -} - -// SetHlsPackage sets the HlsPackage field's value. -func (s *UpdateOriginEndpointOutput) SetHlsPackage(v *HlsPackage) *UpdateOriginEndpointOutput { - s.HlsPackage = v - return s -} - -// SetId sets the Id field's value. -func (s *UpdateOriginEndpointOutput) SetId(v string) *UpdateOriginEndpointOutput { - s.Id = &v - return s -} - -// SetManifestName sets the ManifestName field's value. -func (s *UpdateOriginEndpointOutput) SetManifestName(v string) *UpdateOriginEndpointOutput { - s.ManifestName = &v - return s -} - -// SetMssPackage sets the MssPackage field's value. -func (s *UpdateOriginEndpointOutput) SetMssPackage(v *MssPackage) *UpdateOriginEndpointOutput { - s.MssPackage = v - return s -} - -// SetStartoverWindowSeconds sets the StartoverWindowSeconds field's value. -func (s *UpdateOriginEndpointOutput) SetStartoverWindowSeconds(v int64) *UpdateOriginEndpointOutput { - s.StartoverWindowSeconds = &v - return s -} - -// SetTimeDelaySeconds sets the TimeDelaySeconds field's value. -func (s *UpdateOriginEndpointOutput) SetTimeDelaySeconds(v int64) *UpdateOriginEndpointOutput { - s.TimeDelaySeconds = &v - return s -} - -// SetUrl sets the Url field's value. -func (s *UpdateOriginEndpointOutput) SetUrl(v string) *UpdateOriginEndpointOutput { - s.Url = &v - return s -} - -// SetWhitelist sets the Whitelist field's value. -func (s *UpdateOriginEndpointOutput) SetWhitelist(v []*string) *UpdateOriginEndpointOutput { - s.Whitelist = v - return s -} - -const ( - // AdMarkersNone is a AdMarkers enum value - AdMarkersNone = "NONE" - - // AdMarkersScte35Enhanced is a AdMarkers enum value - AdMarkersScte35Enhanced = "SCTE35_ENHANCED" - - // AdMarkersPassthrough is a AdMarkers enum value - AdMarkersPassthrough = "PASSTHROUGH" -) - -const ( - // EncryptionMethodAes128 is a EncryptionMethod enum value - EncryptionMethodAes128 = "AES_128" - - // EncryptionMethodSampleAes is a EncryptionMethod enum value - EncryptionMethodSampleAes = "SAMPLE_AES" -) - -const ( - // PlaylistTypeNone is a PlaylistType enum value - PlaylistTypeNone = "NONE" - - // PlaylistTypeEvent is a PlaylistType enum value - PlaylistTypeEvent = "EVENT" - - // PlaylistTypeVod is a PlaylistType enum value - PlaylistTypeVod = "VOD" -) - -const ( - // ProfileNone is a Profile enum value - ProfileNone = "NONE" - - // ProfileHbbtv15 is a Profile enum value - ProfileHbbtv15 = "HBBTV_1_5" -) - -const ( - // StreamOrderOriginal is a StreamOrder enum value - StreamOrderOriginal = "ORIGINAL" - - // StreamOrderVideoBitrateAscending is a StreamOrder enum value - StreamOrderVideoBitrateAscending = "VIDEO_BITRATE_ASCENDING" - - // StreamOrderVideoBitrateDescending is a StreamOrder enum value - StreamOrderVideoBitrateDescending = "VIDEO_BITRATE_DESCENDING" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/doc.go b/vendor/github.com/aws/aws-sdk-go/service/mediapackage/doc.go deleted file mode 100644 index 03c86834991..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package mediapackage provides the client and types for making API -// requests to AWS Elemental MediaPackage. -// -// AWS Elemental MediaPackage -// -// See https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12 for more information on this service. -// -// See mediapackage package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/mediapackage/ -// -// Using the Client -// -// To contact AWS Elemental MediaPackage with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Elemental MediaPackage client MediaPackage for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/mediapackage/#New -package mediapackage diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/errors.go b/vendor/github.com/aws/aws-sdk-go/service/mediapackage/errors.go deleted file mode 100644 index c6ba3649e5c..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/errors.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package mediapackage - -const ( - - // ErrCodeForbiddenException for service response error code - // "ForbiddenException". - ErrCodeForbiddenException = "ForbiddenException" - - // ErrCodeInternalServerErrorException for service response error code - // "InternalServerErrorException". - ErrCodeInternalServerErrorException = "InternalServerErrorException" - - // ErrCodeNotFoundException for service response error code - // "NotFoundException". - ErrCodeNotFoundException = "NotFoundException" - - // ErrCodeServiceUnavailableException for service response error code - // "ServiceUnavailableException". - ErrCodeServiceUnavailableException = "ServiceUnavailableException" - - // ErrCodeTooManyRequestsException for service response error code - // "TooManyRequestsException". - ErrCodeTooManyRequestsException = "TooManyRequestsException" - - // ErrCodeUnprocessableEntityException for service response error code - // "UnprocessableEntityException". - ErrCodeUnprocessableEntityException = "UnprocessableEntityException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go deleted file mode 100644 index 2447a2092bf..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go +++ /dev/null @@ -1,97 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package mediapackage - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/restjson" -) - -// MediaPackage provides the API operation methods for making requests to -// AWS Elemental MediaPackage. See this package's package overview docs -// for details on the service. -// -// MediaPackage methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type MediaPackage struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "mediapackage" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the MediaPackage client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a MediaPackage client from just a session. -// svc := mediapackage.New(mySession) -// -// // Create a MediaPackage client with additional configuration -// svc := mediapackage.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaPackage { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaPackage { - if len(signingName) == 0 { - signingName = "mediapackage" - } - svc := &MediaPackage{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2017-10-12", - JSONVersion: "1.1", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a MediaPackage operation and runs any -// custom request initialization. -func (c *MediaPackage) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go deleted file mode 100644 index 601f06ce87d..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go +++ /dev/null @@ -1,1103 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package mediastoredata - -import ( - "io" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" -) - -const opDeleteObject = "DeleteObject" - -// DeleteObjectRequest generates a "aws/request.Request" representing the -// client's request for the DeleteObject operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteObject for more information on using the DeleteObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteObjectRequest method. -// req, resp := client.DeleteObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/DeleteObject -func (c *MediaStoreData) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { - op := &request.Operation{ - Name: opDeleteObject, - HTTPMethod: "DELETE", - HTTPPath: "/{Path+}", - } - - if input == nil { - input = &DeleteObjectInput{} - } - - output = &DeleteObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteObject API operation for AWS Elemental MediaStore Data Plane. -// -// Deletes an object at the specified path. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaStore Data Plane's -// API operation DeleteObject for usage and error information. -// -// Returned Error Codes: -// * ErrCodeContainerNotFoundException "ContainerNotFoundException" -// The specified container was not found for the specified account. -// -// * ErrCodeObjectNotFoundException "ObjectNotFoundException" -// Could not perform an operation on an object that does not exist. -// -// * ErrCodeInternalServerError "InternalServerError" -// The service is temporarily unavailable. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/DeleteObject -func (c *MediaStoreData) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { - req, out := c.DeleteObjectRequest(input) - return out, req.Send() -} - -// DeleteObjectWithContext is the same as DeleteObject with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaStoreData) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { - req, out := c.DeleteObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeObject = "DescribeObject" - -// DescribeObjectRequest generates a "aws/request.Request" representing the -// client's request for the DescribeObject operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeObject for more information on using the DescribeObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeObjectRequest method. -// req, resp := client.DescribeObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/DescribeObject -func (c *MediaStoreData) DescribeObjectRequest(input *DescribeObjectInput) (req *request.Request, output *DescribeObjectOutput) { - op := &request.Operation{ - Name: opDescribeObject, - HTTPMethod: "HEAD", - HTTPPath: "/{Path+}", - } - - if input == nil { - input = &DescribeObjectInput{} - } - - output = &DescribeObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeObject API operation for AWS Elemental MediaStore Data Plane. -// -// Gets the headers for an object at the specified path. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaStore Data Plane's -// API operation DescribeObject for usage and error information. -// -// Returned Error Codes: -// * ErrCodeContainerNotFoundException "ContainerNotFoundException" -// The specified container was not found for the specified account. -// -// * ErrCodeObjectNotFoundException "ObjectNotFoundException" -// Could not perform an operation on an object that does not exist. -// -// * ErrCodeInternalServerError "InternalServerError" -// The service is temporarily unavailable. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/DescribeObject -func (c *MediaStoreData) DescribeObject(input *DescribeObjectInput) (*DescribeObjectOutput, error) { - req, out := c.DescribeObjectRequest(input) - return out, req.Send() -} - -// DescribeObjectWithContext is the same as DescribeObject with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaStoreData) DescribeObjectWithContext(ctx aws.Context, input *DescribeObjectInput, opts ...request.Option) (*DescribeObjectOutput, error) { - req, out := c.DescribeObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObject = "GetObject" - -// GetObjectRequest generates a "aws/request.Request" representing the -// client's request for the GetObject operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObject for more information on using the GetObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetObjectRequest method. -// req, resp := client.GetObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/GetObject -func (c *MediaStoreData) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { - op := &request.Operation{ - Name: opGetObject, - HTTPMethod: "GET", - HTTPPath: "/{Path+}", - } - - if input == nil { - input = &GetObjectInput{} - } - - output = &GetObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObject API operation for AWS Elemental MediaStore Data Plane. -// -// Downloads the object at the specified path. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaStore Data Plane's -// API operation GetObject for usage and error information. -// -// Returned Error Codes: -// * ErrCodeContainerNotFoundException "ContainerNotFoundException" -// The specified container was not found for the specified account. -// -// * ErrCodeObjectNotFoundException "ObjectNotFoundException" -// Could not perform an operation on an object that does not exist. -// -// * ErrCodeRequestedRangeNotSatisfiableException "RequestedRangeNotSatisfiableException" -// The requested content range is not valid. -// -// * ErrCodeInternalServerError "InternalServerError" -// The service is temporarily unavailable. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/GetObject -func (c *MediaStoreData) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { - req, out := c.GetObjectRequest(input) - return out, req.Send() -} - -// GetObjectWithContext is the same as GetObject with the addition of -// the ability to pass a context and additional request options. -// -// See GetObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaStoreData) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { - req, out := c.GetObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListItems = "ListItems" - -// ListItemsRequest generates a "aws/request.Request" representing the -// client's request for the ListItems operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListItems for more information on using the ListItems -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListItemsRequest method. -// req, resp := client.ListItemsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/ListItems -func (c *MediaStoreData) ListItemsRequest(input *ListItemsInput) (req *request.Request, output *ListItemsOutput) { - op := &request.Operation{ - Name: opListItems, - HTTPMethod: "GET", - HTTPPath: "/", - } - - if input == nil { - input = &ListItemsInput{} - } - - output = &ListItemsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListItems API operation for AWS Elemental MediaStore Data Plane. -// -// Provides a list of metadata entries about folders and objects in the specified -// folder. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaStore Data Plane's -// API operation ListItems for usage and error information. -// -// Returned Error Codes: -// * ErrCodeContainerNotFoundException "ContainerNotFoundException" -// The specified container was not found for the specified account. -// -// * ErrCodeInternalServerError "InternalServerError" -// The service is temporarily unavailable. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/ListItems -func (c *MediaStoreData) ListItems(input *ListItemsInput) (*ListItemsOutput, error) { - req, out := c.ListItemsRequest(input) - return out, req.Send() -} - -// ListItemsWithContext is the same as ListItems with the addition of -// the ability to pass a context and additional request options. -// -// See ListItems for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaStoreData) ListItemsWithContext(ctx aws.Context, input *ListItemsInput, opts ...request.Option) (*ListItemsOutput, error) { - req, out := c.ListItemsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObject = "PutObject" - -// PutObjectRequest generates a "aws/request.Request" representing the -// client's request for the PutObject operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutObject for more information on using the PutObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutObjectRequest method. -// req, resp := client.PutObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/PutObject -func (c *MediaStoreData) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { - op := &request.Operation{ - Name: opPutObject, - HTTPMethod: "PUT", - HTTPPath: "/{Path+}", - } - - if input == nil { - input = &PutObjectInput{} - } - - output = &PutObjectOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Sign.Remove(v4.SignRequestHandler) - handler := v4.BuildNamedHandler("v4.CustomSignerHandler", v4.WithUnsignedPayload) - req.Handlers.Sign.PushFrontNamed(handler) - return -} - -// PutObject API operation for AWS Elemental MediaStore Data Plane. -// -// Uploads an object to the specified path. Object sizes are limited to 10 MB. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Elemental MediaStore Data Plane's -// API operation PutObject for usage and error information. -// -// Returned Error Codes: -// * ErrCodeContainerNotFoundException "ContainerNotFoundException" -// The specified container was not found for the specified account. -// -// * ErrCodeInternalServerError "InternalServerError" -// The service is temporarily unavailable. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/PutObject -func (c *MediaStoreData) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) - return out, req.Send() -} - -// PutObjectWithContext is the same as PutObject with the addition of -// the ability to pass a context and additional request options. -// -// See PutObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *MediaStoreData) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/DeleteObjectRequest -type DeleteObjectInput struct { - _ struct{} `type:"structure"` - - // The path (including the file name) where the object is stored in the container. - // Format: // - // - // Path is a required field - Path *string `location:"uri" locationName:"Path" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} - if s.Path == nil { - invalidParams.Add(request.NewErrParamRequired("Path")) - } - if s.Path != nil && len(*s.Path) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Path", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPath sets the Path field's value. -func (s *DeleteObjectInput) SetPath(v string) *DeleteObjectInput { - s.Path = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/DeleteObjectResponse -type DeleteObjectOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/DescribeObjectRequest -type DescribeObjectInput struct { - _ struct{} `type:"structure"` - - // The path (including the file name) where the object is stored in the container. - // Format: // - // - // Path is a required field - Path *string `location:"uri" locationName:"Path" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeObjectInput"} - if s.Path == nil { - invalidParams.Add(request.NewErrParamRequired("Path")) - } - if s.Path != nil && len(*s.Path) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Path", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPath sets the Path field's value. -func (s *DescribeObjectInput) SetPath(v string) *DescribeObjectInput { - s.Path = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/DescribeObjectResponse -type DescribeObjectOutput struct { - _ struct{} `type:"structure"` - - // An optional CacheControl header that allows the caller to control the object's - // cache behavior. Headers can be passed in as specified in the HTTP at https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 - // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). - // - // Headers with a custom user-defined value are also accepted. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // The length of the object in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // The content type of the object. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The ETag that represents a unique instance of the object. - ETag *string `location:"header" locationName:"ETag" min:"1" type:"string"` - - // The date and time that the object was last modified. - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` -} - -// String returns the string representation -func (s DescribeObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeObjectOutput) GoString() string { - return s.String() -} - -// SetCacheControl sets the CacheControl field's value. -func (s *DescribeObjectOutput) SetCacheControl(v string) *DescribeObjectOutput { - s.CacheControl = &v - return s -} - -// SetContentLength sets the ContentLength field's value. -func (s *DescribeObjectOutput) SetContentLength(v int64) *DescribeObjectOutput { - s.ContentLength = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *DescribeObjectOutput) SetContentType(v string) *DescribeObjectOutput { - s.ContentType = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *DescribeObjectOutput) SetETag(v string) *DescribeObjectOutput { - s.ETag = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *DescribeObjectOutput) SetLastModified(v time.Time) *DescribeObjectOutput { - s.LastModified = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/GetObjectRequest -type GetObjectInput struct { - _ struct{} `type:"structure"` - - // The path (including the file name) where the object is stored in the container. - // Format: // - // - // For example, to upload the file mlaw.avi to the folder path premium\canada - // in the container movies, enter the path premium/canada/mlaw.avi. - // - // Do not include the container name in this path. - // - // If the path includes any folders that don't exist yet, the service creates - // them. For example, suppose you have an existing premium/usa subfolder. If - // you specify premium/canada, the service creates a canada subfolder in the - // premium folder. You then have two subfolders, usa and canada, in the premium - // folder. - // - // There is no correlation between the path to the source and the path (folders) - // in the container in AWS Elemental MediaStore. - // - // For more information about folders and how they exist in a container, see - // the AWS Elemental MediaStore User Guide (http://docs.aws.amazon.com/mediastore/latest/ug/). - // - // The file name is the name that is assigned to the file that you upload. The - // file can have the same name inside and outside of AWS Elemental MediaStore, - // or it can have the same name. The file name can include or omit an extension. - // - // Path is a required field - Path *string `location:"uri" locationName:"Path" min:"1" type:"string" required:"true"` - - // The range bytes of an object to retrieve. For more information about the - // Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). - Range *string `location:"header" locationName:"Range" type:"string"` -} - -// String returns the string representation -func (s GetObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} - if s.Path == nil { - invalidParams.Add(request.NewErrParamRequired("Path")) - } - if s.Path != nil && len(*s.Path) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Path", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPath sets the Path field's value. -func (s *GetObjectInput) SetPath(v string) *GetObjectInput { - s.Path = &v - return s -} - -// SetRange sets the Range field's value. -func (s *GetObjectInput) SetRange(v string) *GetObjectInput { - s.Range = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/GetObjectResponse -type GetObjectOutput struct { - _ struct{} `type:"structure" payload:"Body"` - - // The bytes of the object. - Body io.ReadCloser `type:"blob"` - - // An optional CacheControl header that allows the caller to control the object's - // cache behavior. Headers can be passed in as specified in the HTTP spec at - // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). - // - // Headers with a custom user-defined value are also accepted. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // The length of the object in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // The range of bytes to retrieve. - ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` - - // The content type of the object. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The ETag that represents a unique instance of the object. - ETag *string `location:"header" locationName:"ETag" min:"1" type:"string"` - - // The date and time that the object was last modified. - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` - - // The HTML status code of the request. Status codes ranging from 200 to 299 - // indicate success. All other status codes indicate the type of error that - // occurred. - // - // StatusCode is a required field - StatusCode *int64 `location:"statusCode" type:"integer" required:"true"` -} - -// String returns the string representation -func (s GetObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectOutput) GoString() string { - return s.String() -} - -// SetBody sets the Body field's value. -func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { - s.Body = v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { - s.CacheControl = &v - return s -} - -// SetContentLength sets the ContentLength field's value. -func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { - s.ContentLength = &v - return s -} - -// SetContentRange sets the ContentRange field's value. -func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { - s.ContentRange = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { - s.ContentType = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { - s.ETag = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { - s.LastModified = &v - return s -} - -// SetStatusCode sets the StatusCode field's value. -func (s *GetObjectOutput) SetStatusCode(v int64) *GetObjectOutput { - s.StatusCode = &v - return s -} - -// A metadata entry for a folder or object. -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/Item -type Item struct { - _ struct{} `type:"structure"` - - // The length of the item in bytes. - ContentLength *int64 `type:"long"` - - // The content type of the item. - ContentType *string `type:"string"` - - // The ETag that represents a unique instance of the item. - ETag *string `min:"1" type:"string"` - - // The date and time that the item was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The name of the item. - Name *string `type:"string"` - - // The item type (folder or object). - Type *string `type:"string" enum:"ItemType"` -} - -// String returns the string representation -func (s Item) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Item) GoString() string { - return s.String() -} - -// SetContentLength sets the ContentLength field's value. -func (s *Item) SetContentLength(v int64) *Item { - s.ContentLength = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *Item) SetContentType(v string) *Item { - s.ContentType = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *Item) SetETag(v string) *Item { - s.ETag = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *Item) SetLastModified(v time.Time) *Item { - s.LastModified = &v - return s -} - -// SetName sets the Name field's value. -func (s *Item) SetName(v string) *Item { - s.Name = &v - return s -} - -// SetType sets the Type field's value. -func (s *Item) SetType(v string) *Item { - s.Type = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/ListItemsRequest -type ListItemsInput struct { - _ struct{} `type:"structure"` - - // The maximum results to return. The service might return fewer results. - MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` - - // The NextToken received in the ListItemsResponse for the same container and - // path. Tokens expire after 15 minutes. - NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` - - // The path in the container from which to retrieve items. Format: // - Path *string `location:"querystring" locationName:"Path" type:"string"` -} - -// String returns the string representation -func (s ListItemsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListItemsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListItemsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListItemsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListItemsInput) SetMaxResults(v int64) *ListItemsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListItemsInput) SetNextToken(v string) *ListItemsInput { - s.NextToken = &v - return s -} - -// SetPath sets the Path field's value. -func (s *ListItemsInput) SetPath(v string) *ListItemsInput { - s.Path = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/ListItemsResponse -type ListItemsOutput struct { - _ struct{} `type:"structure"` - - // Metadata entries for the folders and objects at the requested path. - Items []*Item `type:"list"` - - // The NextToken used to request the next page of results using ListItems. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s ListItemsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListItemsOutput) GoString() string { - return s.String() -} - -// SetItems sets the Items field's value. -func (s *ListItemsOutput) SetItems(v []*Item) *ListItemsOutput { - s.Items = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListItemsOutput) SetNextToken(v string) *ListItemsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/PutObjectRequest -type PutObjectInput struct { - _ struct{} `type:"structure" payload:"Body"` - - // The bytes to be stored. - // - // Body is a required field - Body io.ReadSeeker `type:"blob" required:"true"` - - // An optional CacheControl header that allows the caller to control the object's - // cache behavior. Headers can be passed in as specified in the HTTP at https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 - // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). - // - // Headers with a custom user-defined value are also accepted. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // The content type of the object. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The path (including the file name) where the object is stored in the container. - // Format: // - // - // For example, to upload the file mlaw.avi to the folder path premium\canada - // in the container movies, enter the path premium/canada/mlaw.avi. - // - // Do not include the container name in this path. - // - // If the path includes any folders that don't exist yet, the service creates - // them. For example, suppose you have an existing premium/usa subfolder. If - // you specify premium/canada, the service creates a canada subfolder in the - // premium folder. You then have two subfolders, usa and canada, in the premium - // folder. - // - // There is no correlation between the path to the source and the path (folders) - // in the container in AWS Elemental MediaStore. - // - // For more information about folders and how they exist in a container, see - // the AWS Elemental MediaStore User Guide (http://docs.aws.amazon.com/mediastore/latest/ug/). - // - // The file name is the name that is assigned to the file that you upload. The - // file can have the same name inside and outside of AWS Elemental MediaStore, - // or it can have the same name. The file name can include or omit an extension. - // - // Path is a required field - Path *string `location:"uri" locationName:"Path" min:"1" type:"string" required:"true"` - - // Indicates the storage class of a Put request. Defaults to high-performance - // temporal storage class, and objects are persisted into durable storage shortly - // after being received. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" min:"1" type:"string" enum:"StorageClass"` -} - -// String returns the string representation -func (s PutObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} - if s.Body == nil { - invalidParams.Add(request.NewErrParamRequired("Body")) - } - if s.Path == nil { - invalidParams.Add(request.NewErrParamRequired("Path")) - } - if s.Path != nil && len(*s.Path) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Path", 1)) - } - if s.StorageClass != nil && len(*s.StorageClass) < 1 { - invalidParams.Add(request.NewErrParamMinLen("StorageClass", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBody sets the Body field's value. -func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { - s.Body = v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { - s.CacheControl = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { - s.ContentType = &v - return s -} - -// SetPath sets the Path field's value. -func (s *PutObjectInput) SetPath(v string) *PutObjectInput { - s.Path = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { - s.StorageClass = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01/PutObjectResponse -type PutObjectOutput struct { - _ struct{} `type:"structure"` - - // The SHA256 digest of the object that is persisted. - ContentSHA256 *string `min:"64" type:"string"` - - // Unique identifier of the object in the container. - ETag *string `min:"1" type:"string"` - - // The storage class where the object was persisted. Should be “Temporal”. - StorageClass *string `min:"1" type:"string" enum:"StorageClass"` -} - -// String returns the string representation -func (s PutObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectOutput) GoString() string { - return s.String() -} - -// SetContentSHA256 sets the ContentSHA256 field's value. -func (s *PutObjectOutput) SetContentSHA256(v string) *PutObjectOutput { - s.ContentSHA256 = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { - s.ETag = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *PutObjectOutput) SetStorageClass(v string) *PutObjectOutput { - s.StorageClass = &v - return s -} - -const ( - // ItemTypeObject is a ItemType enum value - ItemTypeObject = "OBJECT" - - // ItemTypeFolder is a ItemType enum value - ItemTypeFolder = "FOLDER" -) - -const ( - // StorageClassTemporal is a StorageClass enum value - StorageClassTemporal = "TEMPORAL" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/doc.go b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/doc.go deleted file mode 100644 index 2d0c4fbeade..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/doc.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package mediastoredata provides the client and types for making API -// requests to AWS Elemental MediaStore Data Plane. -// -// An AWS Elemental MediaStore asset is an object, similar to an object in the -// Amazon S3 service. Objects are the fundamental entities that are stored in -// AWS Elemental MediaStore. -// -// See https://docs.aws.amazon.com/goto/WebAPI/mediastore-data-2017-09-01 for more information on this service. -// -// See mediastoredata package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/mediastoredata/ -// -// Using the Client -// -// To contact AWS Elemental MediaStore Data Plane with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Elemental MediaStore Data Plane client MediaStoreData for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/mediastoredata/#New -package mediastoredata diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/errors.go b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/errors.go deleted file mode 100644 index 95f0acdbd0f..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/errors.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package mediastoredata - -const ( - - // ErrCodeContainerNotFoundException for service response error code - // "ContainerNotFoundException". - // - // The specified container was not found for the specified account. - ErrCodeContainerNotFoundException = "ContainerNotFoundException" - - // ErrCodeInternalServerError for service response error code - // "InternalServerError". - // - // The service is temporarily unavailable. - ErrCodeInternalServerError = "InternalServerError" - - // ErrCodeObjectNotFoundException for service response error code - // "ObjectNotFoundException". - // - // Could not perform an operation on an object that does not exist. - ErrCodeObjectNotFoundException = "ObjectNotFoundException" - - // ErrCodeRequestedRangeNotSatisfiableException for service response error code - // "RequestedRangeNotSatisfiableException". - // - // The requested content range is not valid. - ErrCodeRequestedRangeNotSatisfiableException = "RequestedRangeNotSatisfiableException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go deleted file mode 100644 index 8947fd8ce45..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go +++ /dev/null @@ -1,96 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package mediastoredata - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/restjson" -) - -// MediaStoreData provides the API operation methods for making requests to -// AWS Elemental MediaStore Data Plane. See this package's package overview docs -// for details on the service. -// -// MediaStoreData methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type MediaStoreData struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "data.mediastore" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the MediaStoreData client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a MediaStoreData client from just a session. -// svc := mediastoredata.New(mySession) -// -// // Create a MediaStoreData client with additional configuration -// svc := mediastoredata.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaStoreData { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaStoreData { - if len(signingName) == 0 { - signingName = "mediastore" - } - svc := &MediaStoreData{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2017-09-01", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a MediaStoreData operation and runs any -// custom request initialization. -func (c *MediaStoreData) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go deleted file mode 100644 index 96a70c949d8..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go +++ /dev/null @@ -1,7569 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sagemaker - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -const opAddTags = "AddTags" - -// AddTagsRequest generates a "aws/request.Request" representing the -// client's request for the AddTags operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AddTags for more information on using the AddTags -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AddTagsRequest method. -// req, resp := client.AddTagsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AddTags -func (c *SageMaker) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { - op := &request.Operation{ - Name: opAddTags, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AddTagsInput{} - } - - output = &AddTagsOutput{} - req = c.newRequest(op, input, output) - return -} - -// AddTags API operation for Amazon SageMaker Service. -// -// Adds or overwrites one or more tags for the specified Amazon SageMaker resource. -// You can add tags to notebook instances, training jobs, models, endpoint configurations, -// and endpoints. -// -// Each tag consists of a key and an optional value. Tag keys must be unique -// per resource. For more information about tags, see Using Cost Allocation -// Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) -// in the AWS Billing and Cost Management User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation AddTags for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AddTags -func (c *SageMaker) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { - req, out := c.AddTagsRequest(input) - return out, req.Send() -} - -// AddTagsWithContext is the same as AddTags with the addition of -// the ability to pass a context and additional request options. -// -// See AddTags for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) AddTagsWithContext(ctx aws.Context, input *AddTagsInput, opts ...request.Option) (*AddTagsOutput, error) { - req, out := c.AddTagsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateEndpoint = "CreateEndpoint" - -// CreateEndpointRequest generates a "aws/request.Request" representing the -// client's request for the CreateEndpoint operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateEndpoint for more information on using the CreateEndpoint -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateEndpointRequest method. -// req, resp := client.CreateEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpoint -func (c *SageMaker) CreateEndpointRequest(input *CreateEndpointInput) (req *request.Request, output *CreateEndpointOutput) { - op := &request.Operation{ - Name: opCreateEndpoint, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateEndpointInput{} - } - - output = &CreateEndpointOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateEndpoint API operation for Amazon SageMaker Service. -// -// Creates an endpoint using the endpoint configuration specified in the request. -// Amazon SageMaker uses the endpoint to provision resources and deploy models. -// You create the endpoint configuration with the CreateEndpointConfig (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpointConfig.html) -// API. -// -// Use this API only for hosting models using Amazon SageMaker hosting services. -// -// The endpoint name must be unique within an AWS Region in your AWS account. -// -// When it receives the request, Amazon SageMaker creates the endpoint, launches -// the resources (ML compute instances), and deploys the model(s) on them. -// -// When Amazon SageMaker receives the request, it sets the endpoint status to -// Creating. After it creates the endpoint, it sets the status to InService. -// Amazon SageMaker can then process incoming requests for inferences. To check -// the status of an endpoint, use the DescribeEndpoint (http://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html) -// API. -// -// For an example, see Exercise 1: Using the K-Means Algorithm Provided by Amazon -// SageMaker (http://docs.aws.amazon.com/sagemaker/latest/dg/ex1.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation CreateEndpoint for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" -// You have exceeded an Amazon SageMaker resource limit. For example, you might -// have too many training jobs created. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpoint -func (c *SageMaker) CreateEndpoint(input *CreateEndpointInput) (*CreateEndpointOutput, error) { - req, out := c.CreateEndpointRequest(input) - return out, req.Send() -} - -// CreateEndpointWithContext is the same as CreateEndpoint with the addition of -// the ability to pass a context and additional request options. -// -// See CreateEndpoint for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) CreateEndpointWithContext(ctx aws.Context, input *CreateEndpointInput, opts ...request.Option) (*CreateEndpointOutput, error) { - req, out := c.CreateEndpointRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateEndpointConfig = "CreateEndpointConfig" - -// CreateEndpointConfigRequest generates a "aws/request.Request" representing the -// client's request for the CreateEndpointConfig operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateEndpointConfig for more information on using the CreateEndpointConfig -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateEndpointConfigRequest method. -// req, resp := client.CreateEndpointConfigRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpointConfig -func (c *SageMaker) CreateEndpointConfigRequest(input *CreateEndpointConfigInput) (req *request.Request, output *CreateEndpointConfigOutput) { - op := &request.Operation{ - Name: opCreateEndpointConfig, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateEndpointConfigInput{} - } - - output = &CreateEndpointConfigOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateEndpointConfig API operation for Amazon SageMaker Service. -// -// Creates an endpoint configuration that Amazon SageMaker hosting services -// uses to deploy models. In the configuration, you identify one or more models, -// created using the CreateModel API, to deploy and the resources that you want -// Amazon SageMaker to provision. Then you call the CreateEndpoint (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html) -// API. -// -// Use this API only if you want to use Amazon SageMaker hosting services to -// deploy models into production. -// -// In the request, you define one or more ProductionVariants, each of which -// identifies a model. Each ProductionVariant parameter also describes the resources -// that you want Amazon SageMaker to provision. This includes the number and -// type of ML compute instances to deploy. -// -// If you are hosting multiple models, you also assign a VariantWeight to specify -// how much traffic you want to allocate to each model. For example, suppose -// that you want to host two models, A and B, and you assign traffic weight -// 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds -// of the traffic to Model A, and one-third to model B. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation CreateEndpointConfig for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" -// You have exceeded an Amazon SageMaker resource limit. For example, you might -// have too many training jobs created. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpointConfig -func (c *SageMaker) CreateEndpointConfig(input *CreateEndpointConfigInput) (*CreateEndpointConfigOutput, error) { - req, out := c.CreateEndpointConfigRequest(input) - return out, req.Send() -} - -// CreateEndpointConfigWithContext is the same as CreateEndpointConfig with the addition of -// the ability to pass a context and additional request options. -// -// See CreateEndpointConfig for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) CreateEndpointConfigWithContext(ctx aws.Context, input *CreateEndpointConfigInput, opts ...request.Option) (*CreateEndpointConfigOutput, error) { - req, out := c.CreateEndpointConfigRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateModel = "CreateModel" - -// CreateModelRequest generates a "aws/request.Request" representing the -// client's request for the CreateModel operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateModel for more information on using the CreateModel -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateModelRequest method. -// req, resp := client.CreateModelRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateModel -func (c *SageMaker) CreateModelRequest(input *CreateModelInput) (req *request.Request, output *CreateModelOutput) { - op := &request.Operation{ - Name: opCreateModel, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateModelInput{} - } - - output = &CreateModelOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateModel API operation for Amazon SageMaker Service. -// -// Creates a model in Amazon SageMaker. In the request, you name the model and -// describe one or more containers. For each container, you specify the docker -// image containing inference code, artifacts (from prior training), and custom -// environment map that the inference code uses when you deploy the model into -// production. -// -// Use this API to create a model only if you want to use Amazon SageMaker hosting -// services. To host your model, you create an endpoint configuration with the -// CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint -// API. -// -// Amazon SageMaker then deploys all of the containers that you defined for -// the model in the hosting environment. -// -// In the CreateModel request, you must define a container with the PrimaryContainer -// parameter. -// -// In the request, you also provide an IAM role that Amazon SageMaker can assume -// to access model artifacts and docker image for deployment on ML compute hosting -// instances. In addition, you also use the IAM role to manage permissions the -// inference code needs. For example, if the inference code access any other -// AWS resources, you grant necessary permissions via this role. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation CreateModel for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" -// You have exceeded an Amazon SageMaker resource limit. For example, you might -// have too many training jobs created. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateModel -func (c *SageMaker) CreateModel(input *CreateModelInput) (*CreateModelOutput, error) { - req, out := c.CreateModelRequest(input) - return out, req.Send() -} - -// CreateModelWithContext is the same as CreateModel with the addition of -// the ability to pass a context and additional request options. -// -// See CreateModel for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) CreateModelWithContext(ctx aws.Context, input *CreateModelInput, opts ...request.Option) (*CreateModelOutput, error) { - req, out := c.CreateModelRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateNotebookInstance = "CreateNotebookInstance" - -// CreateNotebookInstanceRequest generates a "aws/request.Request" representing the -// client's request for the CreateNotebookInstance operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateNotebookInstance for more information on using the CreateNotebookInstance -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateNotebookInstanceRequest method. -// req, resp := client.CreateNotebookInstanceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateNotebookInstance -func (c *SageMaker) CreateNotebookInstanceRequest(input *CreateNotebookInstanceInput) (req *request.Request, output *CreateNotebookInstanceOutput) { - op := &request.Operation{ - Name: opCreateNotebookInstance, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateNotebookInstanceInput{} - } - - output = &CreateNotebookInstanceOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateNotebookInstance API operation for Amazon SageMaker Service. -// -// Creates an Amazon SageMaker notebook instance. A notebook instance is an -// ML compute instance running on a Jupyter notebook. -// -// In a CreateNotebookInstance request, you specify the type of ML compute instance -// that you want to run. Amazon SageMaker launches the instance, installs common -// libraries that you can use to explore datasets for model training, and attaches -// an ML storage volume to the notebook instance. -// -// Amazon SageMaker also provides a set of example notebooks. Each notebook -// demonstrates how to use Amazon SageMaker with a specific an algorithm or -// with a machine learning framework. -// -// After receiving the request, Amazon SageMaker does the following: -// -// Creates a network interface in the Amazon SageMaker VPC. -// -// (Option) If you specified SubnetId, creates a network interface in your own -// VPC, which is inferred from the subnet ID that you provide in the input. -// When creating this network interface, Amazon SageMaker attaches the security -// group that you specified in the request to the network interface that it -// creates in your VPC. -// -// Launches an EC2 instance of the type specified in the request in the Amazon -// SageMaker VPC. If you specified SubnetId of your VPC, Amazon SageMaker specifies -// both network interfaces when launching this instance. This enables inbound -// traffic from your own VPC to the notebook instance, assuming that the security -// groups allow it. -// -// After creating the notebook instance, Amazon SageMaker returns its Amazon -// Resource Name (ARN). -// -// After Amazon SageMaker creates the notebook instance, you can connect to -// the Jupyter server and work in Jupyter notebooks. For example, you can write -// code to explore a dataset that you can use for model training, train a model, -// host models by creating Amazon SageMaker endpoints, and validate hosted models. -// -// For more information, see How It Works (http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation CreateNotebookInstance for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" -// You have exceeded an Amazon SageMaker resource limit. For example, you might -// have too many training jobs created. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateNotebookInstance -func (c *SageMaker) CreateNotebookInstance(input *CreateNotebookInstanceInput) (*CreateNotebookInstanceOutput, error) { - req, out := c.CreateNotebookInstanceRequest(input) - return out, req.Send() -} - -// CreateNotebookInstanceWithContext is the same as CreateNotebookInstance with the addition of -// the ability to pass a context and additional request options. -// -// See CreateNotebookInstance for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) CreateNotebookInstanceWithContext(ctx aws.Context, input *CreateNotebookInstanceInput, opts ...request.Option) (*CreateNotebookInstanceOutput, error) { - req, out := c.CreateNotebookInstanceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreatePresignedNotebookInstanceUrl = "CreatePresignedNotebookInstanceUrl" - -// CreatePresignedNotebookInstanceUrlRequest generates a "aws/request.Request" representing the -// client's request for the CreatePresignedNotebookInstanceUrl operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreatePresignedNotebookInstanceUrl for more information on using the CreatePresignedNotebookInstanceUrl -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreatePresignedNotebookInstanceUrlRequest method. -// req, resp := client.CreatePresignedNotebookInstanceUrlRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreatePresignedNotebookInstanceUrl -func (c *SageMaker) CreatePresignedNotebookInstanceUrlRequest(input *CreatePresignedNotebookInstanceUrlInput) (req *request.Request, output *CreatePresignedNotebookInstanceUrlOutput) { - op := &request.Operation{ - Name: opCreatePresignedNotebookInstanceUrl, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreatePresignedNotebookInstanceUrlInput{} - } - - output = &CreatePresignedNotebookInstanceUrlOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreatePresignedNotebookInstanceUrl API operation for Amazon SageMaker Service. -// -// Returns a URL that you can use to connect to the Juypter server from a notebook -// instance. In the Amazon SageMaker console, when you choose Open next to a -// notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server -// home page from the notebook instance. The console uses this API to get the -// URL and show the page. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation CreatePresignedNotebookInstanceUrl for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreatePresignedNotebookInstanceUrl -func (c *SageMaker) CreatePresignedNotebookInstanceUrl(input *CreatePresignedNotebookInstanceUrlInput) (*CreatePresignedNotebookInstanceUrlOutput, error) { - req, out := c.CreatePresignedNotebookInstanceUrlRequest(input) - return out, req.Send() -} - -// CreatePresignedNotebookInstanceUrlWithContext is the same as CreatePresignedNotebookInstanceUrl with the addition of -// the ability to pass a context and additional request options. -// -// See CreatePresignedNotebookInstanceUrl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) CreatePresignedNotebookInstanceUrlWithContext(ctx aws.Context, input *CreatePresignedNotebookInstanceUrlInput, opts ...request.Option) (*CreatePresignedNotebookInstanceUrlOutput, error) { - req, out := c.CreatePresignedNotebookInstanceUrlRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateTrainingJob = "CreateTrainingJob" - -// CreateTrainingJobRequest generates a "aws/request.Request" representing the -// client's request for the CreateTrainingJob operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateTrainingJob for more information on using the CreateTrainingJob -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateTrainingJobRequest method. -// req, resp := client.CreateTrainingJobRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateTrainingJob -func (c *SageMaker) CreateTrainingJobRequest(input *CreateTrainingJobInput) (req *request.Request, output *CreateTrainingJobOutput) { - op := &request.Operation{ - Name: opCreateTrainingJob, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateTrainingJobInput{} - } - - output = &CreateTrainingJobOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateTrainingJob API operation for Amazon SageMaker Service. -// -// Starts a model training job. After training completes, Amazon SageMaker saves -// the resulting model artifacts to an Amazon S3 location that you specify. -// -// If you choose to host your model using Amazon SageMaker hosting services, -// you can use the resulting model artifacts as part of the model. You can also -// use the artifacts in a deep learning service other than Amazon SageMaker, -// provided that you know how to use them for inferences. -// -// In the request body, you provide the following: -// -// * AlgorithmSpecification - Identifies the training algorithm to use. -// -// * HyperParameters - Specify these algorithm-specific parameters to influence -// the quality of the final model. For a list of hyperparameters for each -// training algorithm provided by Amazon SageMaker, see Algorithms (http://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). -// -// -// * InputDataConfig - Describes the training dataset and the Amazon S3 location -// where it is stored. -// -// * OutputDataConfig - Identifies the Amazon S3 location where you want -// Amazon SageMaker to save the results of model training. -// -// * ResourceConfig - Identifies the resources, ML compute instances, and -// ML storage volumes to deploy for model training. In distributed training, -// you specify more than one instance. -// -// * RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes -// to perform tasks on your behalf during model training. You must grant -// this role the necessary permissions so that Amazon SageMaker can successfully -// complete model training. -// -// * StoppingCondition - Sets a duration for training. Use this parameter -// to cap model training costs. -// -// For more information about Amazon SageMaker, see How It Works (http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation CreateTrainingJob for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceInUse "ResourceInUse" -// Resource being accessed is in use. -// -// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" -// You have exceeded an Amazon SageMaker resource limit. For example, you might -// have too many training jobs created. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateTrainingJob -func (c *SageMaker) CreateTrainingJob(input *CreateTrainingJobInput) (*CreateTrainingJobOutput, error) { - req, out := c.CreateTrainingJobRequest(input) - return out, req.Send() -} - -// CreateTrainingJobWithContext is the same as CreateTrainingJob with the addition of -// the ability to pass a context and additional request options. -// -// See CreateTrainingJob for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) CreateTrainingJobWithContext(ctx aws.Context, input *CreateTrainingJobInput, opts ...request.Option) (*CreateTrainingJobOutput, error) { - req, out := c.CreateTrainingJobRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteEndpoint = "DeleteEndpoint" - -// DeleteEndpointRequest generates a "aws/request.Request" representing the -// client's request for the DeleteEndpoint operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteEndpoint for more information on using the DeleteEndpoint -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteEndpointRequest method. -// req, resp := client.DeleteEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpoint -func (c *SageMaker) DeleteEndpointRequest(input *DeleteEndpointInput) (req *request.Request, output *DeleteEndpointOutput) { - op := &request.Operation{ - Name: opDeleteEndpoint, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteEndpointInput{} - } - - output = &DeleteEndpointOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteEndpoint API operation for Amazon SageMaker Service. -// -// Deletes an endpoint. Amazon SageMaker frees up all of the resources that -// were deployed when the endpoint was created. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation DeleteEndpoint for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpoint -func (c *SageMaker) DeleteEndpoint(input *DeleteEndpointInput) (*DeleteEndpointOutput, error) { - req, out := c.DeleteEndpointRequest(input) - return out, req.Send() -} - -// DeleteEndpointWithContext is the same as DeleteEndpoint with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteEndpoint for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) DeleteEndpointWithContext(ctx aws.Context, input *DeleteEndpointInput, opts ...request.Option) (*DeleteEndpointOutput, error) { - req, out := c.DeleteEndpointRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteEndpointConfig = "DeleteEndpointConfig" - -// DeleteEndpointConfigRequest generates a "aws/request.Request" representing the -// client's request for the DeleteEndpointConfig operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteEndpointConfig for more information on using the DeleteEndpointConfig -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteEndpointConfigRequest method. -// req, resp := client.DeleteEndpointConfigRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpointConfig -func (c *SageMaker) DeleteEndpointConfigRequest(input *DeleteEndpointConfigInput) (req *request.Request, output *DeleteEndpointConfigOutput) { - op := &request.Operation{ - Name: opDeleteEndpointConfig, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteEndpointConfigInput{} - } - - output = &DeleteEndpointConfigOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteEndpointConfig API operation for Amazon SageMaker Service. -// -// Deletes an endpoint configuration. The DeleteEndpoingConfig API deletes only -// the specified configuration. It does not delete endpoints created using the -// configuration. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation DeleteEndpointConfig for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpointConfig -func (c *SageMaker) DeleteEndpointConfig(input *DeleteEndpointConfigInput) (*DeleteEndpointConfigOutput, error) { - req, out := c.DeleteEndpointConfigRequest(input) - return out, req.Send() -} - -// DeleteEndpointConfigWithContext is the same as DeleteEndpointConfig with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteEndpointConfig for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) DeleteEndpointConfigWithContext(ctx aws.Context, input *DeleteEndpointConfigInput, opts ...request.Option) (*DeleteEndpointConfigOutput, error) { - req, out := c.DeleteEndpointConfigRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteModel = "DeleteModel" - -// DeleteModelRequest generates a "aws/request.Request" representing the -// client's request for the DeleteModel operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteModel for more information on using the DeleteModel -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteModelRequest method. -// req, resp := client.DeleteModelRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteModel -func (c *SageMaker) DeleteModelRequest(input *DeleteModelInput) (req *request.Request, output *DeleteModelOutput) { - op := &request.Operation{ - Name: opDeleteModel, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteModelInput{} - } - - output = &DeleteModelOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteModel API operation for Amazon SageMaker Service. -// -// Deletes a model. The DeleteModel API deletes only the model entry that was -// created in Amazon SageMaker when you called the CreateModel (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateModel.html) -// API. It does not delete model artifacts, inference code, or the IAM role -// that you specified when creating the model. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation DeleteModel for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteModel -func (c *SageMaker) DeleteModel(input *DeleteModelInput) (*DeleteModelOutput, error) { - req, out := c.DeleteModelRequest(input) - return out, req.Send() -} - -// DeleteModelWithContext is the same as DeleteModel with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteModel for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) DeleteModelWithContext(ctx aws.Context, input *DeleteModelInput, opts ...request.Option) (*DeleteModelOutput, error) { - req, out := c.DeleteModelRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteNotebookInstance = "DeleteNotebookInstance" - -// DeleteNotebookInstanceRequest generates a "aws/request.Request" representing the -// client's request for the DeleteNotebookInstance operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteNotebookInstance for more information on using the DeleteNotebookInstance -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteNotebookInstanceRequest method. -// req, resp := client.DeleteNotebookInstanceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteNotebookInstance -func (c *SageMaker) DeleteNotebookInstanceRequest(input *DeleteNotebookInstanceInput) (req *request.Request, output *DeleteNotebookInstanceOutput) { - op := &request.Operation{ - Name: opDeleteNotebookInstance, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteNotebookInstanceInput{} - } - - output = &DeleteNotebookInstanceOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteNotebookInstance API operation for Amazon SageMaker Service. -// -// Deletes an Amazon SageMaker notebook instance. Before you can delete a notebook -// instance, you must call the StopNotebookInstance API. -// -// When you delete a notebook instance, you lose all of your data. Amazon SageMaker -// removes the ML compute instance, and deletes the ML storage volume and the -// network interface associated with the notebook instance. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation DeleteNotebookInstance for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteNotebookInstance -func (c *SageMaker) DeleteNotebookInstance(input *DeleteNotebookInstanceInput) (*DeleteNotebookInstanceOutput, error) { - req, out := c.DeleteNotebookInstanceRequest(input) - return out, req.Send() -} - -// DeleteNotebookInstanceWithContext is the same as DeleteNotebookInstance with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteNotebookInstance for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) DeleteNotebookInstanceWithContext(ctx aws.Context, input *DeleteNotebookInstanceInput, opts ...request.Option) (*DeleteNotebookInstanceOutput, error) { - req, out := c.DeleteNotebookInstanceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteTags = "DeleteTags" - -// DeleteTagsRequest generates a "aws/request.Request" representing the -// client's request for the DeleteTags operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteTags for more information on using the DeleteTags -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteTagsRequest method. -// req, resp := client.DeleteTagsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteTags -func (c *SageMaker) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { - op := &request.Operation{ - Name: opDeleteTags, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteTagsInput{} - } - - output = &DeleteTagsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteTags API operation for Amazon SageMaker Service. -// -// Deletes the specified tags from an Amazon SageMaker resource. -// -// To list a resource's tags, use the ListTags API. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation DeleteTags for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteTags -func (c *SageMaker) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { - req, out := c.DeleteTagsRequest(input) - return out, req.Send() -} - -// DeleteTagsWithContext is the same as DeleteTags with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteTags for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) DeleteTagsWithContext(ctx aws.Context, input *DeleteTagsInput, opts ...request.Option) (*DeleteTagsOutput, error) { - req, out := c.DeleteTagsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeEndpoint = "DescribeEndpoint" - -// DescribeEndpointRequest generates a "aws/request.Request" representing the -// client's request for the DescribeEndpoint operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeEndpoint for more information on using the DescribeEndpoint -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeEndpointRequest method. -// req, resp := client.DescribeEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpoint -func (c *SageMaker) DescribeEndpointRequest(input *DescribeEndpointInput) (req *request.Request, output *DescribeEndpointOutput) { - op := &request.Operation{ - Name: opDescribeEndpoint, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeEndpointInput{} - } - - output = &DescribeEndpointOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeEndpoint API operation for Amazon SageMaker Service. -// -// Returns the description of an endpoint. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation DescribeEndpoint for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpoint -func (c *SageMaker) DescribeEndpoint(input *DescribeEndpointInput) (*DescribeEndpointOutput, error) { - req, out := c.DescribeEndpointRequest(input) - return out, req.Send() -} - -// DescribeEndpointWithContext is the same as DescribeEndpoint with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeEndpoint for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) DescribeEndpointWithContext(ctx aws.Context, input *DescribeEndpointInput, opts ...request.Option) (*DescribeEndpointOutput, error) { - req, out := c.DescribeEndpointRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeEndpointConfig = "DescribeEndpointConfig" - -// DescribeEndpointConfigRequest generates a "aws/request.Request" representing the -// client's request for the DescribeEndpointConfig operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeEndpointConfig for more information on using the DescribeEndpointConfig -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeEndpointConfigRequest method. -// req, resp := client.DescribeEndpointConfigRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpointConfig -func (c *SageMaker) DescribeEndpointConfigRequest(input *DescribeEndpointConfigInput) (req *request.Request, output *DescribeEndpointConfigOutput) { - op := &request.Operation{ - Name: opDescribeEndpointConfig, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeEndpointConfigInput{} - } - - output = &DescribeEndpointConfigOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeEndpointConfig API operation for Amazon SageMaker Service. -// -// Returns the description of an endpoint configuration created using the CreateEndpointConfig -// API. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation DescribeEndpointConfig for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpointConfig -func (c *SageMaker) DescribeEndpointConfig(input *DescribeEndpointConfigInput) (*DescribeEndpointConfigOutput, error) { - req, out := c.DescribeEndpointConfigRequest(input) - return out, req.Send() -} - -// DescribeEndpointConfigWithContext is the same as DescribeEndpointConfig with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeEndpointConfig for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) DescribeEndpointConfigWithContext(ctx aws.Context, input *DescribeEndpointConfigInput, opts ...request.Option) (*DescribeEndpointConfigOutput, error) { - req, out := c.DescribeEndpointConfigRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeModel = "DescribeModel" - -// DescribeModelRequest generates a "aws/request.Request" representing the -// client's request for the DescribeModel operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeModel for more information on using the DescribeModel -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeModelRequest method. -// req, resp := client.DescribeModelRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeModel -func (c *SageMaker) DescribeModelRequest(input *DescribeModelInput) (req *request.Request, output *DescribeModelOutput) { - op := &request.Operation{ - Name: opDescribeModel, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeModelInput{} - } - - output = &DescribeModelOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeModel API operation for Amazon SageMaker Service. -// -// Describes a model that you created using the CreateModel API. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation DescribeModel for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeModel -func (c *SageMaker) DescribeModel(input *DescribeModelInput) (*DescribeModelOutput, error) { - req, out := c.DescribeModelRequest(input) - return out, req.Send() -} - -// DescribeModelWithContext is the same as DescribeModel with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeModel for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) DescribeModelWithContext(ctx aws.Context, input *DescribeModelInput, opts ...request.Option) (*DescribeModelOutput, error) { - req, out := c.DescribeModelRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeNotebookInstance = "DescribeNotebookInstance" - -// DescribeNotebookInstanceRequest generates a "aws/request.Request" representing the -// client's request for the DescribeNotebookInstance operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeNotebookInstance for more information on using the DescribeNotebookInstance -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeNotebookInstanceRequest method. -// req, resp := client.DescribeNotebookInstanceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeNotebookInstance -func (c *SageMaker) DescribeNotebookInstanceRequest(input *DescribeNotebookInstanceInput) (req *request.Request, output *DescribeNotebookInstanceOutput) { - op := &request.Operation{ - Name: opDescribeNotebookInstance, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeNotebookInstanceInput{} - } - - output = &DescribeNotebookInstanceOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeNotebookInstance API operation for Amazon SageMaker Service. -// -// Returns information about a notebook instance. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation DescribeNotebookInstance for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeNotebookInstance -func (c *SageMaker) DescribeNotebookInstance(input *DescribeNotebookInstanceInput) (*DescribeNotebookInstanceOutput, error) { - req, out := c.DescribeNotebookInstanceRequest(input) - return out, req.Send() -} - -// DescribeNotebookInstanceWithContext is the same as DescribeNotebookInstance with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeNotebookInstance for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) DescribeNotebookInstanceWithContext(ctx aws.Context, input *DescribeNotebookInstanceInput, opts ...request.Option) (*DescribeNotebookInstanceOutput, error) { - req, out := c.DescribeNotebookInstanceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeTrainingJob = "DescribeTrainingJob" - -// DescribeTrainingJobRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTrainingJob operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeTrainingJob for more information on using the DescribeTrainingJob -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeTrainingJobRequest method. -// req, resp := client.DescribeTrainingJobRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeTrainingJob -func (c *SageMaker) DescribeTrainingJobRequest(input *DescribeTrainingJobInput) (req *request.Request, output *DescribeTrainingJobOutput) { - op := &request.Operation{ - Name: opDescribeTrainingJob, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeTrainingJobInput{} - } - - output = &DescribeTrainingJobOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeTrainingJob API operation for Amazon SageMaker Service. -// -// Returns information about a training job. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation DescribeTrainingJob for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceNotFound "ResourceNotFound" -// Resource being access is not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeTrainingJob -func (c *SageMaker) DescribeTrainingJob(input *DescribeTrainingJobInput) (*DescribeTrainingJobOutput, error) { - req, out := c.DescribeTrainingJobRequest(input) - return out, req.Send() -} - -// DescribeTrainingJobWithContext is the same as DescribeTrainingJob with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeTrainingJob for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) DescribeTrainingJobWithContext(ctx aws.Context, input *DescribeTrainingJobInput, opts ...request.Option) (*DescribeTrainingJobOutput, error) { - req, out := c.DescribeTrainingJobRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListEndpointConfigs = "ListEndpointConfigs" - -// ListEndpointConfigsRequest generates a "aws/request.Request" representing the -// client's request for the ListEndpointConfigs operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListEndpointConfigs for more information on using the ListEndpointConfigs -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListEndpointConfigsRequest method. -// req, resp := client.ListEndpointConfigsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpointConfigs -func (c *SageMaker) ListEndpointConfigsRequest(input *ListEndpointConfigsInput) (req *request.Request, output *ListEndpointConfigsOutput) { - op := &request.Operation{ - Name: opListEndpointConfigs, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListEndpointConfigsInput{} - } - - output = &ListEndpointConfigsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListEndpointConfigs API operation for Amazon SageMaker Service. -// -// Lists endpoint configurations. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation ListEndpointConfigs for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpointConfigs -func (c *SageMaker) ListEndpointConfigs(input *ListEndpointConfigsInput) (*ListEndpointConfigsOutput, error) { - req, out := c.ListEndpointConfigsRequest(input) - return out, req.Send() -} - -// ListEndpointConfigsWithContext is the same as ListEndpointConfigs with the addition of -// the ability to pass a context and additional request options. -// -// See ListEndpointConfigs for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) ListEndpointConfigsWithContext(ctx aws.Context, input *ListEndpointConfigsInput, opts ...request.Option) (*ListEndpointConfigsOutput, error) { - req, out := c.ListEndpointConfigsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListEndpointConfigsPages iterates over the pages of a ListEndpointConfigs operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListEndpointConfigs method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListEndpointConfigs operation. -// pageNum := 0 -// err := client.ListEndpointConfigsPages(params, -// func(page *ListEndpointConfigsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SageMaker) ListEndpointConfigsPages(input *ListEndpointConfigsInput, fn func(*ListEndpointConfigsOutput, bool) bool) error { - return c.ListEndpointConfigsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListEndpointConfigsPagesWithContext same as ListEndpointConfigsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) ListEndpointConfigsPagesWithContext(ctx aws.Context, input *ListEndpointConfigsInput, fn func(*ListEndpointConfigsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListEndpointConfigsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListEndpointConfigsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListEndpointConfigsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListEndpoints = "ListEndpoints" - -// ListEndpointsRequest generates a "aws/request.Request" representing the -// client's request for the ListEndpoints operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListEndpoints for more information on using the ListEndpoints -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListEndpointsRequest method. -// req, resp := client.ListEndpointsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpoints -func (c *SageMaker) ListEndpointsRequest(input *ListEndpointsInput) (req *request.Request, output *ListEndpointsOutput) { - op := &request.Operation{ - Name: opListEndpoints, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListEndpointsInput{} - } - - output = &ListEndpointsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListEndpoints API operation for Amazon SageMaker Service. -// -// Lists endpoints. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation ListEndpoints for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpoints -func (c *SageMaker) ListEndpoints(input *ListEndpointsInput) (*ListEndpointsOutput, error) { - req, out := c.ListEndpointsRequest(input) - return out, req.Send() -} - -// ListEndpointsWithContext is the same as ListEndpoints with the addition of -// the ability to pass a context and additional request options. -// -// See ListEndpoints for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) ListEndpointsWithContext(ctx aws.Context, input *ListEndpointsInput, opts ...request.Option) (*ListEndpointsOutput, error) { - req, out := c.ListEndpointsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListEndpointsPages iterates over the pages of a ListEndpoints operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListEndpoints method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListEndpoints operation. -// pageNum := 0 -// err := client.ListEndpointsPages(params, -// func(page *ListEndpointsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SageMaker) ListEndpointsPages(input *ListEndpointsInput, fn func(*ListEndpointsOutput, bool) bool) error { - return c.ListEndpointsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListEndpointsPagesWithContext same as ListEndpointsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) ListEndpointsPagesWithContext(ctx aws.Context, input *ListEndpointsInput, fn func(*ListEndpointsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListEndpointsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListEndpointsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListEndpointsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListModels = "ListModels" - -// ListModelsRequest generates a "aws/request.Request" representing the -// client's request for the ListModels operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListModels for more information on using the ListModels -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListModelsRequest method. -// req, resp := client.ListModelsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListModels -func (c *SageMaker) ListModelsRequest(input *ListModelsInput) (req *request.Request, output *ListModelsOutput) { - op := &request.Operation{ - Name: opListModels, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListModelsInput{} - } - - output = &ListModelsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListModels API operation for Amazon SageMaker Service. -// -// Lists models created with the CreateModel (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateModel.html) -// API. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation ListModels for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListModels -func (c *SageMaker) ListModels(input *ListModelsInput) (*ListModelsOutput, error) { - req, out := c.ListModelsRequest(input) - return out, req.Send() -} - -// ListModelsWithContext is the same as ListModels with the addition of -// the ability to pass a context and additional request options. -// -// See ListModels for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) ListModelsWithContext(ctx aws.Context, input *ListModelsInput, opts ...request.Option) (*ListModelsOutput, error) { - req, out := c.ListModelsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListModelsPages iterates over the pages of a ListModels operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListModels method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListModels operation. -// pageNum := 0 -// err := client.ListModelsPages(params, -// func(page *ListModelsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SageMaker) ListModelsPages(input *ListModelsInput, fn func(*ListModelsOutput, bool) bool) error { - return c.ListModelsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListModelsPagesWithContext same as ListModelsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) ListModelsPagesWithContext(ctx aws.Context, input *ListModelsInput, fn func(*ListModelsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListModelsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListModelsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListModelsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListNotebookInstances = "ListNotebookInstances" - -// ListNotebookInstancesRequest generates a "aws/request.Request" representing the -// client's request for the ListNotebookInstances operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListNotebookInstances for more information on using the ListNotebookInstances -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListNotebookInstancesRequest method. -// req, resp := client.ListNotebookInstancesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListNotebookInstances -func (c *SageMaker) ListNotebookInstancesRequest(input *ListNotebookInstancesInput) (req *request.Request, output *ListNotebookInstancesOutput) { - op := &request.Operation{ - Name: opListNotebookInstances, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListNotebookInstancesInput{} - } - - output = &ListNotebookInstancesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListNotebookInstances API operation for Amazon SageMaker Service. -// -// Returns a list of the Amazon SageMaker notebook instances in the requester's -// account in an AWS Region. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation ListNotebookInstances for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListNotebookInstances -func (c *SageMaker) ListNotebookInstances(input *ListNotebookInstancesInput) (*ListNotebookInstancesOutput, error) { - req, out := c.ListNotebookInstancesRequest(input) - return out, req.Send() -} - -// ListNotebookInstancesWithContext is the same as ListNotebookInstances with the addition of -// the ability to pass a context and additional request options. -// -// See ListNotebookInstances for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) ListNotebookInstancesWithContext(ctx aws.Context, input *ListNotebookInstancesInput, opts ...request.Option) (*ListNotebookInstancesOutput, error) { - req, out := c.ListNotebookInstancesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListNotebookInstancesPages iterates over the pages of a ListNotebookInstances operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListNotebookInstances method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListNotebookInstances operation. -// pageNum := 0 -// err := client.ListNotebookInstancesPages(params, -// func(page *ListNotebookInstancesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SageMaker) ListNotebookInstancesPages(input *ListNotebookInstancesInput, fn func(*ListNotebookInstancesOutput, bool) bool) error { - return c.ListNotebookInstancesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListNotebookInstancesPagesWithContext same as ListNotebookInstancesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) ListNotebookInstancesPagesWithContext(ctx aws.Context, input *ListNotebookInstancesInput, fn func(*ListNotebookInstancesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListNotebookInstancesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListNotebookInstancesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListNotebookInstancesOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListTags = "ListTags" - -// ListTagsRequest generates a "aws/request.Request" representing the -// client's request for the ListTags operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListTags for more information on using the ListTags -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListTagsRequest method. -// req, resp := client.ListTagsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTags -func (c *SageMaker) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { - op := &request.Operation{ - Name: opListTags, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListTagsInput{} - } - - output = &ListTagsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListTags API operation for Amazon SageMaker Service. -// -// Returns the tags for the specified Amazon SageMaker resource. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation ListTags for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTags -func (c *SageMaker) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { - req, out := c.ListTagsRequest(input) - return out, req.Send() -} - -// ListTagsWithContext is the same as ListTags with the addition of -// the ability to pass a context and additional request options. -// -// See ListTags for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) ListTagsWithContext(ctx aws.Context, input *ListTagsInput, opts ...request.Option) (*ListTagsOutput, error) { - req, out := c.ListTagsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListTagsPages iterates over the pages of a ListTags operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListTags method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListTags operation. -// pageNum := 0 -// err := client.ListTagsPages(params, -// func(page *ListTagsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SageMaker) ListTagsPages(input *ListTagsInput, fn func(*ListTagsOutput, bool) bool) error { - return c.ListTagsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListTagsPagesWithContext same as ListTagsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) ListTagsPagesWithContext(ctx aws.Context, input *ListTagsInput, fn func(*ListTagsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListTagsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListTagsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTagsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListTrainingJobs = "ListTrainingJobs" - -// ListTrainingJobsRequest generates a "aws/request.Request" representing the -// client's request for the ListTrainingJobs operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListTrainingJobs for more information on using the ListTrainingJobs -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListTrainingJobsRequest method. -// req, resp := client.ListTrainingJobsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTrainingJobs -func (c *SageMaker) ListTrainingJobsRequest(input *ListTrainingJobsInput) (req *request.Request, output *ListTrainingJobsOutput) { - op := &request.Operation{ - Name: opListTrainingJobs, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListTrainingJobsInput{} - } - - output = &ListTrainingJobsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListTrainingJobs API operation for Amazon SageMaker Service. -// -// Lists training jobs. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation ListTrainingJobs for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTrainingJobs -func (c *SageMaker) ListTrainingJobs(input *ListTrainingJobsInput) (*ListTrainingJobsOutput, error) { - req, out := c.ListTrainingJobsRequest(input) - return out, req.Send() -} - -// ListTrainingJobsWithContext is the same as ListTrainingJobs with the addition of -// the ability to pass a context and additional request options. -// -// See ListTrainingJobs for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) ListTrainingJobsWithContext(ctx aws.Context, input *ListTrainingJobsInput, opts ...request.Option) (*ListTrainingJobsOutput, error) { - req, out := c.ListTrainingJobsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListTrainingJobsPages iterates over the pages of a ListTrainingJobs operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListTrainingJobs method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListTrainingJobs operation. -// pageNum := 0 -// err := client.ListTrainingJobsPages(params, -// func(page *ListTrainingJobsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SageMaker) ListTrainingJobsPages(input *ListTrainingJobsInput, fn func(*ListTrainingJobsOutput, bool) bool) error { - return c.ListTrainingJobsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListTrainingJobsPagesWithContext same as ListTrainingJobsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) ListTrainingJobsPagesWithContext(ctx aws.Context, input *ListTrainingJobsInput, fn func(*ListTrainingJobsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListTrainingJobsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListTrainingJobsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTrainingJobsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opStartNotebookInstance = "StartNotebookInstance" - -// StartNotebookInstanceRequest generates a "aws/request.Request" representing the -// client's request for the StartNotebookInstance operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StartNotebookInstance for more information on using the StartNotebookInstance -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StartNotebookInstanceRequest method. -// req, resp := client.StartNotebookInstanceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StartNotebookInstance -func (c *SageMaker) StartNotebookInstanceRequest(input *StartNotebookInstanceInput) (req *request.Request, output *StartNotebookInstanceOutput) { - op := &request.Operation{ - Name: opStartNotebookInstance, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StartNotebookInstanceInput{} - } - - output = &StartNotebookInstanceOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// StartNotebookInstance API operation for Amazon SageMaker Service. -// -// Launches an ML compute instance with the latest version of the libraries -// and attaches your ML storage volume. After configuring the notebook instance, -// Amazon SageMaker sets the notebook instance status to InService. A notebook -// instance's status must be InService before you can connect to your Jupyter -// notebook. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation StartNotebookInstance for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" -// You have exceeded an Amazon SageMaker resource limit. For example, you might -// have too many training jobs created. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StartNotebookInstance -func (c *SageMaker) StartNotebookInstance(input *StartNotebookInstanceInput) (*StartNotebookInstanceOutput, error) { - req, out := c.StartNotebookInstanceRequest(input) - return out, req.Send() -} - -// StartNotebookInstanceWithContext is the same as StartNotebookInstance with the addition of -// the ability to pass a context and additional request options. -// -// See StartNotebookInstance for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) StartNotebookInstanceWithContext(ctx aws.Context, input *StartNotebookInstanceInput, opts ...request.Option) (*StartNotebookInstanceOutput, error) { - req, out := c.StartNotebookInstanceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStopNotebookInstance = "StopNotebookInstance" - -// StopNotebookInstanceRequest generates a "aws/request.Request" representing the -// client's request for the StopNotebookInstance operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StopNotebookInstance for more information on using the StopNotebookInstance -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StopNotebookInstanceRequest method. -// req, resp := client.StopNotebookInstanceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopNotebookInstance -func (c *SageMaker) StopNotebookInstanceRequest(input *StopNotebookInstanceInput) (req *request.Request, output *StopNotebookInstanceOutput) { - op := &request.Operation{ - Name: opStopNotebookInstance, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StopNotebookInstanceInput{} - } - - output = &StopNotebookInstanceOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// StopNotebookInstance API operation for Amazon SageMaker Service. -// -// Terminates the ML compute instance. Before terminating the instance, Amazon -// SageMaker disconnects the ML storage volume from it. Amazon SageMaker preserves -// the ML storage volume. -// -// To access data on the ML storage volume for a notebook instance that has -// been terminated, call the StartNotebookInstance API. StartNotebookInstance -// launches another ML compute instance, configures it, and attaches the preserved -// ML storage volume so you can continue your work. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation StopNotebookInstance for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopNotebookInstance -func (c *SageMaker) StopNotebookInstance(input *StopNotebookInstanceInput) (*StopNotebookInstanceOutput, error) { - req, out := c.StopNotebookInstanceRequest(input) - return out, req.Send() -} - -// StopNotebookInstanceWithContext is the same as StopNotebookInstance with the addition of -// the ability to pass a context and additional request options. -// -// See StopNotebookInstance for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) StopNotebookInstanceWithContext(ctx aws.Context, input *StopNotebookInstanceInput, opts ...request.Option) (*StopNotebookInstanceOutput, error) { - req, out := c.StopNotebookInstanceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStopTrainingJob = "StopTrainingJob" - -// StopTrainingJobRequest generates a "aws/request.Request" representing the -// client's request for the StopTrainingJob operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StopTrainingJob for more information on using the StopTrainingJob -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StopTrainingJobRequest method. -// req, resp := client.StopTrainingJobRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopTrainingJob -func (c *SageMaker) StopTrainingJobRequest(input *StopTrainingJobInput) (req *request.Request, output *StopTrainingJobOutput) { - op := &request.Operation{ - Name: opStopTrainingJob, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StopTrainingJobInput{} - } - - output = &StopTrainingJobOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// StopTrainingJob API operation for Amazon SageMaker Service. -// -// Stops a training job. To stop a job, Amazon SageMaker sends the algorithm -// the SIGTERM signal, which delays job termination for 120 seconds. Algorithms -// might use this 120-second window to save the model artifacts, so the results -// of the training is not lost. -// -// Training algorithms provided by Amazon SageMaker save the intermediate results -// of a model training job. This intermediate data is a valid model artifact. -// You can use the model artifacts that are saved when Amazon SageMaker stops -// a training job to create a model. -// -// When it receives a StopTrainingJob request, Amazon SageMaker changes the -// status of the job to Stopping. After Amazon SageMaker stops the job, it sets -// the status to Stopped. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation StopTrainingJob for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceNotFound "ResourceNotFound" -// Resource being access is not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopTrainingJob -func (c *SageMaker) StopTrainingJob(input *StopTrainingJobInput) (*StopTrainingJobOutput, error) { - req, out := c.StopTrainingJobRequest(input) - return out, req.Send() -} - -// StopTrainingJobWithContext is the same as StopTrainingJob with the addition of -// the ability to pass a context and additional request options. -// -// See StopTrainingJob for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) StopTrainingJobWithContext(ctx aws.Context, input *StopTrainingJobInput, opts ...request.Option) (*StopTrainingJobOutput, error) { - req, out := c.StopTrainingJobRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateEndpoint = "UpdateEndpoint" - -// UpdateEndpointRequest generates a "aws/request.Request" representing the -// client's request for the UpdateEndpoint operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateEndpoint for more information on using the UpdateEndpoint -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateEndpointRequest method. -// req, resp := client.UpdateEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpoint -func (c *SageMaker) UpdateEndpointRequest(input *UpdateEndpointInput) (req *request.Request, output *UpdateEndpointOutput) { - op := &request.Operation{ - Name: opUpdateEndpoint, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateEndpointInput{} - } - - output = &UpdateEndpointOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateEndpoint API operation for Amazon SageMaker Service. -// -// Deploys the new EndpointConfig specified in the request, switches to using -// newly created endpoint, and then deletes resources provisioned for the endpoint -// using the previous EndpointConfig (there is no availability loss). -// -// When Amazon SageMaker receives the request, it sets the endpoint status to -// Updating. After updating the endpoint, it sets the status to InService. To -// check the status of an endpoint, use the DescribeEndpoint (http://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html) -// API. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation UpdateEndpoint for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" -// You have exceeded an Amazon SageMaker resource limit. For example, you might -// have too many training jobs created. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpoint -func (c *SageMaker) UpdateEndpoint(input *UpdateEndpointInput) (*UpdateEndpointOutput, error) { - req, out := c.UpdateEndpointRequest(input) - return out, req.Send() -} - -// UpdateEndpointWithContext is the same as UpdateEndpoint with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateEndpoint for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) UpdateEndpointWithContext(ctx aws.Context, input *UpdateEndpointInput, opts ...request.Option) (*UpdateEndpointOutput, error) { - req, out := c.UpdateEndpointRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateEndpointWeightsAndCapacities = "UpdateEndpointWeightsAndCapacities" - -// UpdateEndpointWeightsAndCapacitiesRequest generates a "aws/request.Request" representing the -// client's request for the UpdateEndpointWeightsAndCapacities operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateEndpointWeightsAndCapacities for more information on using the UpdateEndpointWeightsAndCapacities -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateEndpointWeightsAndCapacitiesRequest method. -// req, resp := client.UpdateEndpointWeightsAndCapacitiesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpointWeightsAndCapacities -func (c *SageMaker) UpdateEndpointWeightsAndCapacitiesRequest(input *UpdateEndpointWeightsAndCapacitiesInput) (req *request.Request, output *UpdateEndpointWeightsAndCapacitiesOutput) { - op := &request.Operation{ - Name: opUpdateEndpointWeightsAndCapacities, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateEndpointWeightsAndCapacitiesInput{} - } - - output = &UpdateEndpointWeightsAndCapacitiesOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateEndpointWeightsAndCapacities API operation for Amazon SageMaker Service. -// -// Updates variant weight, capacity, or both of one or more variants associated -// with an endpoint. This operation updates weight, capacity, or both for the -// previously provisioned endpoint. When it receives the request, Amazon SageMaker -// sets the endpoint status to Updating. After updating the endpoint, it sets -// the status to InService. To check the status of an endpoint, use the DescribeEndpoint -// (http://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html) -// API. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation UpdateEndpointWeightsAndCapacities for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" -// You have exceeded an Amazon SageMaker resource limit. For example, you might -// have too many training jobs created. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpointWeightsAndCapacities -func (c *SageMaker) UpdateEndpointWeightsAndCapacities(input *UpdateEndpointWeightsAndCapacitiesInput) (*UpdateEndpointWeightsAndCapacitiesOutput, error) { - req, out := c.UpdateEndpointWeightsAndCapacitiesRequest(input) - return out, req.Send() -} - -// UpdateEndpointWeightsAndCapacitiesWithContext is the same as UpdateEndpointWeightsAndCapacities with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateEndpointWeightsAndCapacities for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) UpdateEndpointWeightsAndCapacitiesWithContext(ctx aws.Context, input *UpdateEndpointWeightsAndCapacitiesInput, opts ...request.Option) (*UpdateEndpointWeightsAndCapacitiesOutput, error) { - req, out := c.UpdateEndpointWeightsAndCapacitiesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateNotebookInstance = "UpdateNotebookInstance" - -// UpdateNotebookInstanceRequest generates a "aws/request.Request" representing the -// client's request for the UpdateNotebookInstance operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateNotebookInstance for more information on using the UpdateNotebookInstance -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateNotebookInstanceRequest method. -// req, resp := client.UpdateNotebookInstanceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateNotebookInstance -func (c *SageMaker) UpdateNotebookInstanceRequest(input *UpdateNotebookInstanceInput) (req *request.Request, output *UpdateNotebookInstanceOutput) { - op := &request.Operation{ - Name: opUpdateNotebookInstance, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateNotebookInstanceInput{} - } - - output = &UpdateNotebookInstanceOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateNotebookInstance API operation for Amazon SageMaker Service. -// -// Updates a notebook instance. NotebookInstance updates include upgrading or -// downgrading the ML compute instance used for your notebook instance to accommodate -// changes in your workload requirements. You can also update the VPC security -// groups. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon SageMaker Service's -// API operation UpdateNotebookInstance for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceLimitExceeded "ResourceLimitExceeded" -// You have exceeded an Amazon SageMaker resource limit. For example, you might -// have too many training jobs created. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateNotebookInstance -func (c *SageMaker) UpdateNotebookInstance(input *UpdateNotebookInstanceInput) (*UpdateNotebookInstanceOutput, error) { - req, out := c.UpdateNotebookInstanceRequest(input) - return out, req.Send() -} - -// UpdateNotebookInstanceWithContext is the same as UpdateNotebookInstance with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateNotebookInstance for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) UpdateNotebookInstanceWithContext(ctx aws.Context, input *UpdateNotebookInstanceInput, opts ...request.Option) (*UpdateNotebookInstanceOutput, error) { - req, out := c.UpdateNotebookInstanceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AddTagsInput -type AddTagsInput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the resource that you want to tag. - // - // ResourceArn is a required field - ResourceArn *string `type:"string" required:"true"` - - // An array of Tag objects. Each tag is a key-value pair. Only the key parameter - // is required. If you don't specify a value, Amazon SageMaker sets the value - // to an empty string. - // - // Tags is a required field - Tags []*Tag `type:"list" required:"true"` -} - -// String returns the string representation -func (s AddTagsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AddTagsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AddTagsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AddTagsInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *AddTagsInput) SetResourceArn(v string) *AddTagsInput { - s.ResourceArn = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *AddTagsInput) SetTags(v []*Tag) *AddTagsInput { - s.Tags = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AddTagsOutput -type AddTagsOutput struct { - _ struct{} `type:"structure"` - - // A list of tags associated with the Amazon SageMaker resource. - Tags []*Tag `type:"list"` -} - -// String returns the string representation -func (s AddTagsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AddTagsOutput) GoString() string { - return s.String() -} - -// SetTags sets the Tags field's value. -func (s *AddTagsOutput) SetTags(v []*Tag) *AddTagsOutput { - s.Tags = v - return s -} - -// Specifies the training algorithm to use in a CreateTrainingJob (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateTrainingJob.html) -// request. -// -// For more information about algorithms provided by Amazon SageMaker, see Algorithms -// (http://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). For information -// about using your own algorithms, see Bring Your Own Algorithms (http://docs.aws.amazon.com/sagemaker/latest/dg/adv-topics-own-algo.html). -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AlgorithmSpecification -type AlgorithmSpecification struct { - _ struct{} `type:"structure"` - - // The registry path of the Docker image that contains the training algorithm. - // For information about using your own algorithms, see Docker Registry Paths - // for Algorithms Provided by Amazon SageMaker (http://docs.aws.amazon.com/sagemaker/latest/dg/algos-docker-registry-paths.html). - // - // TrainingImage is a required field - TrainingImage *string `type:"string" required:"true"` - - // The input mode that the algorithm supports. For the input modes that Amazon - // SageMaker algorithms support, see Algorithms (http://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). - // If an algorithm supports the File input mode, Amazon SageMaker downloads - // the training data from S3 to the provisioned ML storage Volume, and mounts - // the directory to docker volume for training container. If an algorithm supports - // the Pipe input mode, Amazon SageMaker streams data directly from S3 to the - // container. - // - // In File mode, make sure you provision ML storage volume with sufficient capacity - // to accomodate the data download from S3. In addition to the training data, - // the ML storage volume also stores the output model. The algorithm container - // use ML storage volume to also store intermediate information, if any. - // - // For distributed algorithms using File mode, training data is distributed - // uniformly, and your training duration is predictable if the input data objects - // size is approximately same. Amazon SageMaker does not split the files any - // further for model training. If the object sizes are skewed, training won't - // be optimal as the data distribution is also skewed where one host in a training - // cluster is overloaded, thus becoming bottleneck in training. - // - // TrainingInputMode is a required field - TrainingInputMode *string `type:"string" required:"true" enum:"TrainingInputMode"` -} - -// String returns the string representation -func (s AlgorithmSpecification) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AlgorithmSpecification) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AlgorithmSpecification) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AlgorithmSpecification"} - if s.TrainingImage == nil { - invalidParams.Add(request.NewErrParamRequired("TrainingImage")) - } - if s.TrainingInputMode == nil { - invalidParams.Add(request.NewErrParamRequired("TrainingInputMode")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTrainingImage sets the TrainingImage field's value. -func (s *AlgorithmSpecification) SetTrainingImage(v string) *AlgorithmSpecification { - s.TrainingImage = &v - return s -} - -// SetTrainingInputMode sets the TrainingInputMode field's value. -func (s *AlgorithmSpecification) SetTrainingInputMode(v string) *AlgorithmSpecification { - s.TrainingInputMode = &v - return s -} - -// A channel is a named input source that training algorithms can consume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/Channel -type Channel struct { - _ struct{} `type:"structure"` - - // The name of the channel. - // - // ChannelName is a required field - ChannelName *string `min:"1" type:"string" required:"true"` - - // If training data is compressed, the compression type. The default value is - // None. CompressionType is used only in PIPE input mode. In FILE mode, leave - // this field unset or set it to None. - CompressionType *string `type:"string" enum:"CompressionType"` - - // The MIME type of the data. - ContentType *string `type:"string"` - - // The location of the channel data. - // - // DataSource is a required field - DataSource *DataSource `type:"structure" required:"true"` - - // Specify RecordIO as the value when input data is in raw format but the training - // algorithm requires the RecordIO format, in which caseAmazon SageMaker wraps - // each individual S3 object in a RecordIO record. If the input data is already - // in RecordIO format, you don't need to set this attribute. For more information, - // see Create a Dataset Using RecordIO (https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) - RecordWrapperType *string `type:"string" enum:"RecordWrapper"` -} - -// String returns the string representation -func (s Channel) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Channel) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Channel) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Channel"} - if s.ChannelName == nil { - invalidParams.Add(request.NewErrParamRequired("ChannelName")) - } - if s.ChannelName != nil && len(*s.ChannelName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChannelName", 1)) - } - if s.DataSource == nil { - invalidParams.Add(request.NewErrParamRequired("DataSource")) - } - if s.DataSource != nil { - if err := s.DataSource.Validate(); err != nil { - invalidParams.AddNested("DataSource", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChannelName sets the ChannelName field's value. -func (s *Channel) SetChannelName(v string) *Channel { - s.ChannelName = &v - return s -} - -// SetCompressionType sets the CompressionType field's value. -func (s *Channel) SetCompressionType(v string) *Channel { - s.CompressionType = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *Channel) SetContentType(v string) *Channel { - s.ContentType = &v - return s -} - -// SetDataSource sets the DataSource field's value. -func (s *Channel) SetDataSource(v *DataSource) *Channel { - s.DataSource = v - return s -} - -// SetRecordWrapperType sets the RecordWrapperType field's value. -func (s *Channel) SetRecordWrapperType(v string) *Channel { - s.RecordWrapperType = &v - return s -} - -// Describes the container, as part of model definition. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ContainerDefinition -type ContainerDefinition struct { - _ struct{} `type:"structure"` - - // The DNS host name for the container after Amazon SageMaker deploys it. - ContainerHostname *string `type:"string"` - - // The environment variables to set in the Docker container. Each key and value - // in the Environment string to string map can have length of up to 1024. We - // support up to 16 entries in the map. - Environment map[string]*string `type:"map"` - - // The Amazon EC2 Container Registry (Amazon ECR) path where inference code - // is stored. If you are using your own custom algorithm instead of an algorithm - // provided by Amazon SageMaker, the inference code must meet Amazon SageMaker - // requirements. For more information, see Using Your Own Algorithms with Amazon - // SageMaker (http://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html) - // - // Image is a required field - Image *string `type:"string" required:"true"` - - // The S3 path where the model artifacts, which result from model training, - // are stored. This path must point to a single gzip compressed tar archive - // (.tar.gz suffix). - ModelDataUrl *string `type:"string"` -} - -// String returns the string representation -func (s ContainerDefinition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ContainerDefinition) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ContainerDefinition) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ContainerDefinition"} - if s.Image == nil { - invalidParams.Add(request.NewErrParamRequired("Image")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetContainerHostname sets the ContainerHostname field's value. -func (s *ContainerDefinition) SetContainerHostname(v string) *ContainerDefinition { - s.ContainerHostname = &v - return s -} - -// SetEnvironment sets the Environment field's value. -func (s *ContainerDefinition) SetEnvironment(v map[string]*string) *ContainerDefinition { - s.Environment = v - return s -} - -// SetImage sets the Image field's value. -func (s *ContainerDefinition) SetImage(v string) *ContainerDefinition { - s.Image = &v - return s -} - -// SetModelDataUrl sets the ModelDataUrl field's value. -func (s *ContainerDefinition) SetModelDataUrl(v string) *ContainerDefinition { - s.ModelDataUrl = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpointConfigInput -type CreateEndpointConfigInput struct { - _ struct{} `type:"structure"` - - // The name of the endpoint configuration. You specify this name in a CreateEndpoint - // (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html) - // request. - // - // EndpointConfigName is a required field - EndpointConfigName *string `type:"string" required:"true"` - - // An array of ProductionVariant objects, one for each model that you want to - // host at this endpoint. - // - // ProductionVariants is a required field - ProductionVariants []*ProductionVariant `min:"1" type:"list" required:"true"` - - // An array of key-value pairs. For more information, see Using Cost Allocation - // Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) - // in the AWS Billing and Cost Management User Guide. - Tags []*Tag `type:"list"` -} - -// String returns the string representation -func (s CreateEndpointConfigInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateEndpointConfigInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateEndpointConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateEndpointConfigInput"} - if s.EndpointConfigName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointConfigName")) - } - if s.ProductionVariants == nil { - invalidParams.Add(request.NewErrParamRequired("ProductionVariants")) - } - if s.ProductionVariants != nil && len(s.ProductionVariants) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ProductionVariants", 1)) - } - if s.ProductionVariants != nil { - for i, v := range s.ProductionVariants { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ProductionVariants", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndpointConfigName sets the EndpointConfigName field's value. -func (s *CreateEndpointConfigInput) SetEndpointConfigName(v string) *CreateEndpointConfigInput { - s.EndpointConfigName = &v - return s -} - -// SetProductionVariants sets the ProductionVariants field's value. -func (s *CreateEndpointConfigInput) SetProductionVariants(v []*ProductionVariant) *CreateEndpointConfigInput { - s.ProductionVariants = v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateEndpointConfigInput) SetTags(v []*Tag) *CreateEndpointConfigInput { - s.Tags = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpointConfigOutput -type CreateEndpointConfigOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the endpoint configuration. - // - // EndpointConfigArn is a required field - EndpointConfigArn *string `min:"20" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateEndpointConfigOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateEndpointConfigOutput) GoString() string { - return s.String() -} - -// SetEndpointConfigArn sets the EndpointConfigArn field's value. -func (s *CreateEndpointConfigOutput) SetEndpointConfigArn(v string) *CreateEndpointConfigOutput { - s.EndpointConfigArn = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpointInput -type CreateEndpointInput struct { - _ struct{} `type:"structure"` - - // The name of an endpoint configuration. For more information, see CreateEndpointConfig - // (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpointConfig.html). - // - // EndpointConfigName is a required field - EndpointConfigName *string `type:"string" required:"true"` - - // The name of the endpoint. The name must be unique within an AWS Region in - // your AWS account. - // - // EndpointName is a required field - EndpointName *string `type:"string" required:"true"` - - // An array of key-value pairs. For more information, see Using Cost Allocation - // Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what)in - // the AWS Billing and Cost Management User Guide. - Tags []*Tag `type:"list"` -} - -// String returns the string representation -func (s CreateEndpointInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateEndpointInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateEndpointInput"} - if s.EndpointConfigName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointConfigName")) - } - if s.EndpointName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointName")) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndpointConfigName sets the EndpointConfigName field's value. -func (s *CreateEndpointInput) SetEndpointConfigName(v string) *CreateEndpointInput { - s.EndpointConfigName = &v - return s -} - -// SetEndpointName sets the EndpointName field's value. -func (s *CreateEndpointInput) SetEndpointName(v string) *CreateEndpointInput { - s.EndpointName = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateEndpointInput) SetTags(v []*Tag) *CreateEndpointInput { - s.Tags = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateEndpointOutput -type CreateEndpointOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the endpoint. - // - // EndpointArn is a required field - EndpointArn *string `min:"20" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateEndpointOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateEndpointOutput) GoString() string { - return s.String() -} - -// SetEndpointArn sets the EndpointArn field's value. -func (s *CreateEndpointOutput) SetEndpointArn(v string) *CreateEndpointOutput { - s.EndpointArn = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateModelInput -type CreateModelInput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can - // assume to access model artifacts and docker image for deployment on ML compute - // instances. Deploying on ML compute instances is part of model hosting. For - // more information, see Amazon SageMaker Roles (http://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). - // - // ExecutionRoleArn is a required field - ExecutionRoleArn *string `min:"20" type:"string" required:"true"` - - // The name of the new model. - // - // ModelName is a required field - ModelName *string `type:"string" required:"true"` - - // The location of the primary docker image containing inference code, associated - // artifacts, and custom environment map that the inference code uses when the - // model is deployed into production. - // - // PrimaryContainer is a required field - PrimaryContainer *ContainerDefinition `type:"structure" required:"true"` - - // An array of key-value pairs. For more information, see Using Cost Allocation - // Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) - // in the AWS Billing and Cost Management User Guide. - Tags []*Tag `type:"list"` -} - -// String returns the string representation -func (s CreateModelInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateModelInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateModelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateModelInput"} - if s.ExecutionRoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("ExecutionRoleArn")) - } - if s.ExecutionRoleArn != nil && len(*s.ExecutionRoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("ExecutionRoleArn", 20)) - } - if s.ModelName == nil { - invalidParams.Add(request.NewErrParamRequired("ModelName")) - } - if s.PrimaryContainer == nil { - invalidParams.Add(request.NewErrParamRequired("PrimaryContainer")) - } - if s.PrimaryContainer != nil { - if err := s.PrimaryContainer.Validate(); err != nil { - invalidParams.AddNested("PrimaryContainer", err.(request.ErrInvalidParams)) - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetExecutionRoleArn sets the ExecutionRoleArn field's value. -func (s *CreateModelInput) SetExecutionRoleArn(v string) *CreateModelInput { - s.ExecutionRoleArn = &v - return s -} - -// SetModelName sets the ModelName field's value. -func (s *CreateModelInput) SetModelName(v string) *CreateModelInput { - s.ModelName = &v - return s -} - -// SetPrimaryContainer sets the PrimaryContainer field's value. -func (s *CreateModelInput) SetPrimaryContainer(v *ContainerDefinition) *CreateModelInput { - s.PrimaryContainer = v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateModelInput) SetTags(v []*Tag) *CreateModelInput { - s.Tags = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateModelOutput -type CreateModelOutput struct { - _ struct{} `type:"structure"` - - // The ARN of the model created in Amazon SageMaker. - // - // ModelArn is a required field - ModelArn *string `min:"20" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateModelOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateModelOutput) GoString() string { - return s.String() -} - -// SetModelArn sets the ModelArn field's value. -func (s *CreateModelOutput) SetModelArn(v string) *CreateModelOutput { - s.ModelArn = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateNotebookInstanceInput -type CreateNotebookInstanceInput struct { - _ struct{} `type:"structure"` - - // The type of ML compute instance to launch for the notebook instance. - // - // InstanceType is a required field - InstanceType *string `type:"string" required:"true" enum:"InstanceType"` - - // If you provide a AWS KMS key ID, Amazon SageMaker uses it to encrypt data - // at rest on the ML storage volume that is attached to your notebook instance. - KmsKeyId *string `type:"string"` - - // The name of the new notebook instance. - // - // NotebookInstanceName is a required field - NotebookInstanceName *string `type:"string" required:"true"` - - // When you send any requests to AWS resources from the notebook instance, Amazon - // SageMaker assumes this role to perform tasks on your behalf. You must grant - // this role necessary permissions so Amazon SageMaker can perform these tasks. - // The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) - // permissions to assume this role. For more information, see Amazon SageMaker - // Roles (http://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // The VPC security group IDs, in the form sg-xxxxxxxx. The security groups - // must be for the same VPC as specified in the subnet. - SecurityGroupIds []*string `type:"list"` - - // The ID of the subnet in a VPC to which you would like to have a connectivity - // from your ML compute instance. - SubnetId *string `type:"string"` - - // A list of tags to associate with the notebook instance. You can add tags - // later by using the CreateTags API. - Tags []*Tag `type:"list"` -} - -// String returns the string representation -func (s CreateNotebookInstanceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateNotebookInstanceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateNotebookInstanceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateNotebookInstanceInput"} - if s.InstanceType == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceType")) - } - if s.NotebookInstanceName == nil { - invalidParams.Add(request.NewErrParamRequired("NotebookInstanceName")) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetInstanceType sets the InstanceType field's value. -func (s *CreateNotebookInstanceInput) SetInstanceType(v string) *CreateNotebookInstanceInput { - s.InstanceType = &v - return s -} - -// SetKmsKeyId sets the KmsKeyId field's value. -func (s *CreateNotebookInstanceInput) SetKmsKeyId(v string) *CreateNotebookInstanceInput { - s.KmsKeyId = &v - return s -} - -// SetNotebookInstanceName sets the NotebookInstanceName field's value. -func (s *CreateNotebookInstanceInput) SetNotebookInstanceName(v string) *CreateNotebookInstanceInput { - s.NotebookInstanceName = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *CreateNotebookInstanceInput) SetRoleArn(v string) *CreateNotebookInstanceInput { - s.RoleArn = &v - return s -} - -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *CreateNotebookInstanceInput) SetSecurityGroupIds(v []*string) *CreateNotebookInstanceInput { - s.SecurityGroupIds = v - return s -} - -// SetSubnetId sets the SubnetId field's value. -func (s *CreateNotebookInstanceInput) SetSubnetId(v string) *CreateNotebookInstanceInput { - s.SubnetId = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateNotebookInstanceInput) SetTags(v []*Tag) *CreateNotebookInstanceInput { - s.Tags = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateNotebookInstanceOutput -type CreateNotebookInstanceOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the notebook instance. - NotebookInstanceArn *string `type:"string"` -} - -// String returns the string representation -func (s CreateNotebookInstanceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateNotebookInstanceOutput) GoString() string { - return s.String() -} - -// SetNotebookInstanceArn sets the NotebookInstanceArn field's value. -func (s *CreateNotebookInstanceOutput) SetNotebookInstanceArn(v string) *CreateNotebookInstanceOutput { - s.NotebookInstanceArn = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreatePresignedNotebookInstanceUrlInput -type CreatePresignedNotebookInstanceUrlInput struct { - _ struct{} `type:"structure"` - - // The name of the notebook instance. - // - // NotebookInstanceName is a required field - NotebookInstanceName *string `type:"string" required:"true"` - - // The duration of the session, in seconds. The default is 12 hours. - SessionExpirationDurationInSeconds *int64 `min:"1800" type:"integer"` -} - -// String returns the string representation -func (s CreatePresignedNotebookInstanceUrlInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreatePresignedNotebookInstanceUrlInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreatePresignedNotebookInstanceUrlInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreatePresignedNotebookInstanceUrlInput"} - if s.NotebookInstanceName == nil { - invalidParams.Add(request.NewErrParamRequired("NotebookInstanceName")) - } - if s.SessionExpirationDurationInSeconds != nil && *s.SessionExpirationDurationInSeconds < 1800 { - invalidParams.Add(request.NewErrParamMinValue("SessionExpirationDurationInSeconds", 1800)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNotebookInstanceName sets the NotebookInstanceName field's value. -func (s *CreatePresignedNotebookInstanceUrlInput) SetNotebookInstanceName(v string) *CreatePresignedNotebookInstanceUrlInput { - s.NotebookInstanceName = &v - return s -} - -// SetSessionExpirationDurationInSeconds sets the SessionExpirationDurationInSeconds field's value. -func (s *CreatePresignedNotebookInstanceUrlInput) SetSessionExpirationDurationInSeconds(v int64) *CreatePresignedNotebookInstanceUrlInput { - s.SessionExpirationDurationInSeconds = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreatePresignedNotebookInstanceUrlOutput -type CreatePresignedNotebookInstanceUrlOutput struct { - _ struct{} `type:"structure"` - - // A JSON object that contains the URL string. - AuthorizedUrl *string `type:"string"` -} - -// String returns the string representation -func (s CreatePresignedNotebookInstanceUrlOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreatePresignedNotebookInstanceUrlOutput) GoString() string { - return s.String() -} - -// SetAuthorizedUrl sets the AuthorizedUrl field's value. -func (s *CreatePresignedNotebookInstanceUrlOutput) SetAuthorizedUrl(v string) *CreatePresignedNotebookInstanceUrlOutput { - s.AuthorizedUrl = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateTrainingJobRequest -type CreateTrainingJobInput struct { - _ struct{} `type:"structure"` - - // The registry path of the Docker image that contains the training algorithm - // and algorithm-specific metadata, including the input mode. For more information - // about algorithms provided by Amazon SageMaker, see Algorithms (http://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). - // For information about providing your own algorithms, see Bring Your Own Algorithms - // (http://docs.aws.amazon.com/sagemaker/latest/dg/adv-topics-own-algo.html). - // - // AlgorithmSpecification is a required field - AlgorithmSpecification *AlgorithmSpecification `type:"structure" required:"true"` - - // Algorithm-specific parameters. You set hyperparameters before you start the - // learning process. Hyperparameters influence the quality of the model. For - // a list of hyperparameters for each training algorithm provided by Amazon - // SageMaker, see Algorithms (http://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). - // - // You can specify a maximum of 100 hyperparameters. Each hyperparameter is - // a key-value pair. Each key and value is limited to 256 characters, as specified - // by the Length Constraint. - HyperParameters map[string]*string `type:"map"` - - // An array of Channel objects. Each channel is a named input source. InputDataConfig - // describes the input data and its location. - // - // Algorithms can accept input data from one or more channels. For example, - // an algorithm might have two channels of input data, training_data and validation_data. - // The configuration for each channel provides the S3 location where the input - // data is stored. It also provides information about the stored data: the MIME - // type, compression method, and whether the data is wrapped in RecordIO format. - // - // Depending on the input mode that the algorithm supports, Amazon SageMaker - // either copies input data files from an S3 bucket to a local directory in - // the Docker container, or makes it available as input streams. - // - // InputDataConfig is a required field - InputDataConfig []*Channel `min:"1" type:"list" required:"true"` - - // Specifies the path to the S3 bucket where you want to store model artifacts. - // Amazon SageMaker creates subfolders for the artifacts. - // - // OutputDataConfig is a required field - OutputDataConfig *OutputDataConfig `type:"structure" required:"true"` - - // The resources, including the ML compute instances and ML storage volumes, - // to use for model training. - // - // ML storage volumes store model artifacts and incremental states. Training - // algorithms might also use ML storage volumes for scratch space. If you want - // Amazon SageMaker to use the ML storage volume to store the training data, - // choose File as the TrainingInputMode in the algorithm specification. For - // distributed training algorithms, specify an instance count greater than 1. - // - // ResourceConfig is a required field - ResourceConfig *ResourceConfig `type:"structure" required:"true"` - - // The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume - // to perform tasks on your behalf. - // - // During model training, Amazon SageMaker needs your permission to read input - // data from an S3 bucket, download a Docker image that contains training code, - // write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, - // and publish metrics to Amazon CloudWatch. You grant permissions for all of - // these tasks to an IAM role. For more information, see Amazon SageMaker Roles - // (http://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // Sets a duration for training. Use this parameter to cap model training costs. - // To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which - // delays job termination for 120 seconds. Algorithms might use this 120-second - // window to save the model artifacts. - // - // When Amazon SageMaker terminates a job because the stopping condition has - // been met, training algorithms provided by Amazon SageMaker save the intermediate - // results of the job. This intermediate data is a valid model artifact. You - // can use it to create a model using the CreateModel API. - // - // StoppingCondition is a required field - StoppingCondition *StoppingCondition `type:"structure" required:"true"` - - // An array of key-value pairs. For more information, see Using Cost Allocation - // Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) - // in the AWS Billing and Cost Management User Guide. - Tags []*Tag `type:"list"` - - // The name of the training job. The name must be unique within an AWS Region - // in an AWS account. It appears in the Amazon SageMaker console. - // - // TrainingJobName is a required field - TrainingJobName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateTrainingJobInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateTrainingJobInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateTrainingJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateTrainingJobInput"} - if s.AlgorithmSpecification == nil { - invalidParams.Add(request.NewErrParamRequired("AlgorithmSpecification")) - } - if s.InputDataConfig == nil { - invalidParams.Add(request.NewErrParamRequired("InputDataConfig")) - } - if s.InputDataConfig != nil && len(s.InputDataConfig) < 1 { - invalidParams.Add(request.NewErrParamMinLen("InputDataConfig", 1)) - } - if s.OutputDataConfig == nil { - invalidParams.Add(request.NewErrParamRequired("OutputDataConfig")) - } - if s.ResourceConfig == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceConfig")) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.StoppingCondition == nil { - invalidParams.Add(request.NewErrParamRequired("StoppingCondition")) - } - if s.TrainingJobName == nil { - invalidParams.Add(request.NewErrParamRequired("TrainingJobName")) - } - if s.TrainingJobName != nil && len(*s.TrainingJobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TrainingJobName", 1)) - } - if s.AlgorithmSpecification != nil { - if err := s.AlgorithmSpecification.Validate(); err != nil { - invalidParams.AddNested("AlgorithmSpecification", err.(request.ErrInvalidParams)) - } - } - if s.InputDataConfig != nil { - for i, v := range s.InputDataConfig { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InputDataConfig", i), err.(request.ErrInvalidParams)) - } - } - } - if s.OutputDataConfig != nil { - if err := s.OutputDataConfig.Validate(); err != nil { - invalidParams.AddNested("OutputDataConfig", err.(request.ErrInvalidParams)) - } - } - if s.ResourceConfig != nil { - if err := s.ResourceConfig.Validate(); err != nil { - invalidParams.AddNested("ResourceConfig", err.(request.ErrInvalidParams)) - } - } - if s.StoppingCondition != nil { - if err := s.StoppingCondition.Validate(); err != nil { - invalidParams.AddNested("StoppingCondition", err.(request.ErrInvalidParams)) - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAlgorithmSpecification sets the AlgorithmSpecification field's value. -func (s *CreateTrainingJobInput) SetAlgorithmSpecification(v *AlgorithmSpecification) *CreateTrainingJobInput { - s.AlgorithmSpecification = v - return s -} - -// SetHyperParameters sets the HyperParameters field's value. -func (s *CreateTrainingJobInput) SetHyperParameters(v map[string]*string) *CreateTrainingJobInput { - s.HyperParameters = v - return s -} - -// SetInputDataConfig sets the InputDataConfig field's value. -func (s *CreateTrainingJobInput) SetInputDataConfig(v []*Channel) *CreateTrainingJobInput { - s.InputDataConfig = v - return s -} - -// SetOutputDataConfig sets the OutputDataConfig field's value. -func (s *CreateTrainingJobInput) SetOutputDataConfig(v *OutputDataConfig) *CreateTrainingJobInput { - s.OutputDataConfig = v - return s -} - -// SetResourceConfig sets the ResourceConfig field's value. -func (s *CreateTrainingJobInput) SetResourceConfig(v *ResourceConfig) *CreateTrainingJobInput { - s.ResourceConfig = v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *CreateTrainingJobInput) SetRoleArn(v string) *CreateTrainingJobInput { - s.RoleArn = &v - return s -} - -// SetStoppingCondition sets the StoppingCondition field's value. -func (s *CreateTrainingJobInput) SetStoppingCondition(v *StoppingCondition) *CreateTrainingJobInput { - s.StoppingCondition = v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateTrainingJobInput) SetTags(v []*Tag) *CreateTrainingJobInput { - s.Tags = v - return s -} - -// SetTrainingJobName sets the TrainingJobName field's value. -func (s *CreateTrainingJobInput) SetTrainingJobName(v string) *CreateTrainingJobInput { - s.TrainingJobName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateTrainingJobResponse -type CreateTrainingJobOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the training job. - // - // TrainingJobArn is a required field - TrainingJobArn *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateTrainingJobOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateTrainingJobOutput) GoString() string { - return s.String() -} - -// SetTrainingJobArn sets the TrainingJobArn field's value. -func (s *CreateTrainingJobOutput) SetTrainingJobArn(v string) *CreateTrainingJobOutput { - s.TrainingJobArn = &v - return s -} - -// Describes the location of the channel data. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DataSource -type DataSource struct { - _ struct{} `type:"structure"` - - // The S3 location of the data source that is associated with a channel. - // - // S3DataSource is a required field - S3DataSource *S3DataSource `type:"structure" required:"true"` -} - -// String returns the string representation -func (s DataSource) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DataSource) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DataSource) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DataSource"} - if s.S3DataSource == nil { - invalidParams.Add(request.NewErrParamRequired("S3DataSource")) - } - if s.S3DataSource != nil { - if err := s.S3DataSource.Validate(); err != nil { - invalidParams.AddNested("S3DataSource", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetS3DataSource sets the S3DataSource field's value. -func (s *DataSource) SetS3DataSource(v *S3DataSource) *DataSource { - s.S3DataSource = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpointConfigInput -type DeleteEndpointConfigInput struct { - _ struct{} `type:"structure"` - - // The name of the endpoint configuration that you want to delete. - // - // EndpointConfigName is a required field - EndpointConfigName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteEndpointConfigInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteEndpointConfigInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteEndpointConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteEndpointConfigInput"} - if s.EndpointConfigName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointConfigName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndpointConfigName sets the EndpointConfigName field's value. -func (s *DeleteEndpointConfigInput) SetEndpointConfigName(v string) *DeleteEndpointConfigInput { - s.EndpointConfigName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpointConfigOutput -type DeleteEndpointConfigOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteEndpointConfigOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteEndpointConfigOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpointInput -type DeleteEndpointInput struct { - _ struct{} `type:"structure"` - - // The name of the endpoint that you want to delete. - // - // EndpointName is a required field - EndpointName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteEndpointInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteEndpointInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteEndpointInput"} - if s.EndpointName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndpointName sets the EndpointName field's value. -func (s *DeleteEndpointInput) SetEndpointName(v string) *DeleteEndpointInput { - s.EndpointName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteEndpointOutput -type DeleteEndpointOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteEndpointOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteEndpointOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteModelInput -type DeleteModelInput struct { - _ struct{} `type:"structure"` - - // The name of the model to delete. - // - // ModelName is a required field - ModelName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteModelInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteModelInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteModelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteModelInput"} - if s.ModelName == nil { - invalidParams.Add(request.NewErrParamRequired("ModelName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetModelName sets the ModelName field's value. -func (s *DeleteModelInput) SetModelName(v string) *DeleteModelInput { - s.ModelName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteModelOutput -type DeleteModelOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteModelOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteModelOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteNotebookInstanceInput -type DeleteNotebookInstanceInput struct { - _ struct{} `type:"structure"` - - // The name of the Amazon SageMaker notebook instance to delete. - // - // NotebookInstanceName is a required field - NotebookInstanceName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteNotebookInstanceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteNotebookInstanceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteNotebookInstanceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteNotebookInstanceInput"} - if s.NotebookInstanceName == nil { - invalidParams.Add(request.NewErrParamRequired("NotebookInstanceName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNotebookInstanceName sets the NotebookInstanceName field's value. -func (s *DeleteNotebookInstanceInput) SetNotebookInstanceName(v string) *DeleteNotebookInstanceInput { - s.NotebookInstanceName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteNotebookInstanceOutput -type DeleteNotebookInstanceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteNotebookInstanceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteNotebookInstanceOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteTagsInput -type DeleteTagsInput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the resource whose tags you want to delete. - // - // ResourceArn is a required field - ResourceArn *string `type:"string" required:"true"` - - // An array or one or more tag keys to delete. - // - // TagKeys is a required field - TagKeys []*string `min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s DeleteTagsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteTagsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteTagsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteTagsInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.TagKeys == nil { - invalidParams.Add(request.NewErrParamRequired("TagKeys")) - } - if s.TagKeys != nil && len(s.TagKeys) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *DeleteTagsInput) SetResourceArn(v string) *DeleteTagsInput { - s.ResourceArn = &v - return s -} - -// SetTagKeys sets the TagKeys field's value. -func (s *DeleteTagsInput) SetTagKeys(v []*string) *DeleteTagsInput { - s.TagKeys = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteTagsOutput -type DeleteTagsOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteTagsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteTagsOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpointConfigInput -type DescribeEndpointConfigInput struct { - _ struct{} `type:"structure"` - - // The name of the endpoint configuration. - // - // EndpointConfigName is a required field - EndpointConfigName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeEndpointConfigInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeEndpointConfigInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeEndpointConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeEndpointConfigInput"} - if s.EndpointConfigName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointConfigName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndpointConfigName sets the EndpointConfigName field's value. -func (s *DescribeEndpointConfigInput) SetEndpointConfigName(v string) *DescribeEndpointConfigInput { - s.EndpointConfigName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpointConfigOutput -type DescribeEndpointConfigOutput struct { - _ struct{} `type:"structure"` - - // A timestamp that shows when the endpoint configuration was created. - // - // CreationTime is a required field - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` - - // The Amazon Resource Name (ARN) of the endpoint configuration. - // - // EndpointConfigArn is a required field - EndpointConfigArn *string `min:"20" type:"string" required:"true"` - - // Name of the Amazon SageMaker endpoint configuration. - // - // EndpointConfigName is a required field - EndpointConfigName *string `type:"string" required:"true"` - - // An array of ProductionVariant objects, one for each model that you want to - // host at this endpoint. - // - // ProductionVariants is a required field - ProductionVariants []*ProductionVariant `min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s DescribeEndpointConfigOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeEndpointConfigOutput) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *DescribeEndpointConfigOutput) SetCreationTime(v time.Time) *DescribeEndpointConfigOutput { - s.CreationTime = &v - return s -} - -// SetEndpointConfigArn sets the EndpointConfigArn field's value. -func (s *DescribeEndpointConfigOutput) SetEndpointConfigArn(v string) *DescribeEndpointConfigOutput { - s.EndpointConfigArn = &v - return s -} - -// SetEndpointConfigName sets the EndpointConfigName field's value. -func (s *DescribeEndpointConfigOutput) SetEndpointConfigName(v string) *DescribeEndpointConfigOutput { - s.EndpointConfigName = &v - return s -} - -// SetProductionVariants sets the ProductionVariants field's value. -func (s *DescribeEndpointConfigOutput) SetProductionVariants(v []*ProductionVariant) *DescribeEndpointConfigOutput { - s.ProductionVariants = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpointInput -type DescribeEndpointInput struct { - _ struct{} `type:"structure"` - - // The name of the endpoint. - // - // EndpointName is a required field - EndpointName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeEndpointInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeEndpointInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeEndpointInput"} - if s.EndpointName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndpointName sets the EndpointName field's value. -func (s *DescribeEndpointInput) SetEndpointName(v string) *DescribeEndpointInput { - s.EndpointName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeEndpointOutput -type DescribeEndpointOutput struct { - _ struct{} `type:"structure"` - - // A timestamp that shows when the endpoint was created. - // - // CreationTime is a required field - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` - - // The Amazon Resource Name (ARN) of the endpoint. - // - // EndpointArn is a required field - EndpointArn *string `min:"20" type:"string" required:"true"` - - // The name of the endpoint configuration associated with this endpoint. - // - // EndpointConfigName is a required field - EndpointConfigName *string `type:"string" required:"true"` - - // Name of the endpoint. - // - // EndpointName is a required field - EndpointName *string `type:"string" required:"true"` - - // The status of the endpoint. - // - // EndpointStatus is a required field - EndpointStatus *string `type:"string" required:"true" enum:"EndpointStatus"` - - // If the status of the endpoint is Failed, the reason why it failed. - FailureReason *string `type:"string"` - - // A timestamp that shows when the endpoint was last modified. - // - // LastModifiedTime is a required field - LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` - - // An array of ProductionVariant objects, one for each model hosted behind this - // endpoint. - ProductionVariants []*ProductionVariantSummary `min:"1" type:"list"` -} - -// String returns the string representation -func (s DescribeEndpointOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeEndpointOutput) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *DescribeEndpointOutput) SetCreationTime(v time.Time) *DescribeEndpointOutput { - s.CreationTime = &v - return s -} - -// SetEndpointArn sets the EndpointArn field's value. -func (s *DescribeEndpointOutput) SetEndpointArn(v string) *DescribeEndpointOutput { - s.EndpointArn = &v - return s -} - -// SetEndpointConfigName sets the EndpointConfigName field's value. -func (s *DescribeEndpointOutput) SetEndpointConfigName(v string) *DescribeEndpointOutput { - s.EndpointConfigName = &v - return s -} - -// SetEndpointName sets the EndpointName field's value. -func (s *DescribeEndpointOutput) SetEndpointName(v string) *DescribeEndpointOutput { - s.EndpointName = &v - return s -} - -// SetEndpointStatus sets the EndpointStatus field's value. -func (s *DescribeEndpointOutput) SetEndpointStatus(v string) *DescribeEndpointOutput { - s.EndpointStatus = &v - return s -} - -// SetFailureReason sets the FailureReason field's value. -func (s *DescribeEndpointOutput) SetFailureReason(v string) *DescribeEndpointOutput { - s.FailureReason = &v - return s -} - -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *DescribeEndpointOutput) SetLastModifiedTime(v time.Time) *DescribeEndpointOutput { - s.LastModifiedTime = &v - return s -} - -// SetProductionVariants sets the ProductionVariants field's value. -func (s *DescribeEndpointOutput) SetProductionVariants(v []*ProductionVariantSummary) *DescribeEndpointOutput { - s.ProductionVariants = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeModelInput -type DescribeModelInput struct { - _ struct{} `type:"structure"` - - // The name of the model. - // - // ModelName is a required field - ModelName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeModelInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeModelInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeModelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeModelInput"} - if s.ModelName == nil { - invalidParams.Add(request.NewErrParamRequired("ModelName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetModelName sets the ModelName field's value. -func (s *DescribeModelInput) SetModelName(v string) *DescribeModelInput { - s.ModelName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeModelOutput -type DescribeModelOutput struct { - _ struct{} `type:"structure"` - - // A timestamp that shows when the model was created. - // - // CreationTime is a required field - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` - - // The Amazon Resource Name (ARN) of the IAM role that you specified for the - // model. - // - // ExecutionRoleArn is a required field - ExecutionRoleArn *string `min:"20" type:"string" required:"true"` - - // The Amazon Resource Name (ARN) of the model. - // - // ModelArn is a required field - ModelArn *string `min:"20" type:"string" required:"true"` - - // Name of the Amazon SageMaker model. - // - // ModelName is a required field - ModelName *string `type:"string" required:"true"` - - // The location of the primary inference code, associated artifacts, and custom - // environment map that the inference code uses when it is deployed in production. - // - // PrimaryContainer is a required field - PrimaryContainer *ContainerDefinition `type:"structure" required:"true"` -} - -// String returns the string representation -func (s DescribeModelOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeModelOutput) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *DescribeModelOutput) SetCreationTime(v time.Time) *DescribeModelOutput { - s.CreationTime = &v - return s -} - -// SetExecutionRoleArn sets the ExecutionRoleArn field's value. -func (s *DescribeModelOutput) SetExecutionRoleArn(v string) *DescribeModelOutput { - s.ExecutionRoleArn = &v - return s -} - -// SetModelArn sets the ModelArn field's value. -func (s *DescribeModelOutput) SetModelArn(v string) *DescribeModelOutput { - s.ModelArn = &v - return s -} - -// SetModelName sets the ModelName field's value. -func (s *DescribeModelOutput) SetModelName(v string) *DescribeModelOutput { - s.ModelName = &v - return s -} - -// SetPrimaryContainer sets the PrimaryContainer field's value. -func (s *DescribeModelOutput) SetPrimaryContainer(v *ContainerDefinition) *DescribeModelOutput { - s.PrimaryContainer = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeNotebookInstanceInput -type DescribeNotebookInstanceInput struct { - _ struct{} `type:"structure"` - - // The name of the notebook instance that you want information about. - // - // NotebookInstanceName is a required field - NotebookInstanceName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeNotebookInstanceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeNotebookInstanceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeNotebookInstanceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeNotebookInstanceInput"} - if s.NotebookInstanceName == nil { - invalidParams.Add(request.NewErrParamRequired("NotebookInstanceName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNotebookInstanceName sets the NotebookInstanceName field's value. -func (s *DescribeNotebookInstanceInput) SetNotebookInstanceName(v string) *DescribeNotebookInstanceInput { - s.NotebookInstanceName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeNotebookInstanceOutput -type DescribeNotebookInstanceOutput struct { - _ struct{} `type:"structure"` - - // A timestamp. Use this parameter to return the time when the notebook instance - // was created - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // If staus is failed, the reason it failed. - FailureReason *string `type:"string"` - - // The type of ML compute instance running on the notebook instance. - InstanceType *string `type:"string" enum:"InstanceType"` - - // AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it on the - // ML storage volume attached to the instance. - KmsKeyId *string `type:"string"` - - // A timestamp. Use this parameter to retrieve the time when the notebook instance - // was last modified. - LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Network interface IDs that Amazon SageMaker created at the time of creating - // the instance. - NetworkInterfaceId *string `type:"string"` - - // The Amazon Resource Name (ARN) of the notebook instance. - NotebookInstanceArn *string `type:"string"` - - // Name of the Amazon SageMaker notebook instance. - NotebookInstanceName *string `type:"string"` - - // The status of the notebook instance. - NotebookInstanceStatus *string `type:"string" enum:"NotebookInstanceStatus"` - - // Amazon Resource Name (ARN) of the IAM role associated with the instance. - RoleArn *string `min:"20" type:"string"` - - // The IDs of the VPC security groups. - SecurityGroups []*string `type:"list"` - - // The ID of the VPC subnet. - SubnetId *string `type:"string"` - - // The URL that you use to connect to the Jupyter notebook that is running in - // your notebook instance. - Url *string `type:"string"` -} - -// String returns the string representation -func (s DescribeNotebookInstanceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeNotebookInstanceOutput) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *DescribeNotebookInstanceOutput) SetCreationTime(v time.Time) *DescribeNotebookInstanceOutput { - s.CreationTime = &v - return s -} - -// SetFailureReason sets the FailureReason field's value. -func (s *DescribeNotebookInstanceOutput) SetFailureReason(v string) *DescribeNotebookInstanceOutput { - s.FailureReason = &v - return s -} - -// SetInstanceType sets the InstanceType field's value. -func (s *DescribeNotebookInstanceOutput) SetInstanceType(v string) *DescribeNotebookInstanceOutput { - s.InstanceType = &v - return s -} - -// SetKmsKeyId sets the KmsKeyId field's value. -func (s *DescribeNotebookInstanceOutput) SetKmsKeyId(v string) *DescribeNotebookInstanceOutput { - s.KmsKeyId = &v - return s -} - -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *DescribeNotebookInstanceOutput) SetLastModifiedTime(v time.Time) *DescribeNotebookInstanceOutput { - s.LastModifiedTime = &v - return s -} - -// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. -func (s *DescribeNotebookInstanceOutput) SetNetworkInterfaceId(v string) *DescribeNotebookInstanceOutput { - s.NetworkInterfaceId = &v - return s -} - -// SetNotebookInstanceArn sets the NotebookInstanceArn field's value. -func (s *DescribeNotebookInstanceOutput) SetNotebookInstanceArn(v string) *DescribeNotebookInstanceOutput { - s.NotebookInstanceArn = &v - return s -} - -// SetNotebookInstanceName sets the NotebookInstanceName field's value. -func (s *DescribeNotebookInstanceOutput) SetNotebookInstanceName(v string) *DescribeNotebookInstanceOutput { - s.NotebookInstanceName = &v - return s -} - -// SetNotebookInstanceStatus sets the NotebookInstanceStatus field's value. -func (s *DescribeNotebookInstanceOutput) SetNotebookInstanceStatus(v string) *DescribeNotebookInstanceOutput { - s.NotebookInstanceStatus = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *DescribeNotebookInstanceOutput) SetRoleArn(v string) *DescribeNotebookInstanceOutput { - s.RoleArn = &v - return s -} - -// SetSecurityGroups sets the SecurityGroups field's value. -func (s *DescribeNotebookInstanceOutput) SetSecurityGroups(v []*string) *DescribeNotebookInstanceOutput { - s.SecurityGroups = v - return s -} - -// SetSubnetId sets the SubnetId field's value. -func (s *DescribeNotebookInstanceOutput) SetSubnetId(v string) *DescribeNotebookInstanceOutput { - s.SubnetId = &v - return s -} - -// SetUrl sets the Url field's value. -func (s *DescribeNotebookInstanceOutput) SetUrl(v string) *DescribeNotebookInstanceOutput { - s.Url = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeTrainingJobRequest -type DescribeTrainingJobInput struct { - _ struct{} `type:"structure"` - - // The name of the training job. - // - // TrainingJobName is a required field - TrainingJobName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeTrainingJobInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeTrainingJobInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeTrainingJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeTrainingJobInput"} - if s.TrainingJobName == nil { - invalidParams.Add(request.NewErrParamRequired("TrainingJobName")) - } - if s.TrainingJobName != nil && len(*s.TrainingJobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TrainingJobName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTrainingJobName sets the TrainingJobName field's value. -func (s *DescribeTrainingJobInput) SetTrainingJobName(v string) *DescribeTrainingJobInput { - s.TrainingJobName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeTrainingJobResponse -type DescribeTrainingJobOutput struct { - _ struct{} `type:"structure"` - - // Information about the algorithm used for training, and algorithm metadata. - // - // AlgorithmSpecification is a required field - AlgorithmSpecification *AlgorithmSpecification `type:"structure" required:"true"` - - // A timestamp that indicates when the training job was created. - // - // CreationTime is a required field - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` - - // If the training job failed, the reason it failed. - FailureReason *string `type:"string"` - - // Algorithm-specific parameters. - HyperParameters map[string]*string `type:"map"` - - // An array of Channel objects that describes each data input channel. - // - // InputDataConfig is a required field - InputDataConfig []*Channel `min:"1" type:"list" required:"true"` - - // A timestamp that indicates when the status of the training job was last modified. - LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // Information about the Amazon S3 location that is configured for storing model - // artifacts. - // - // ModelArtifacts is a required field - ModelArtifacts *ModelArtifacts `type:"structure" required:"true"` - - // The S3 path where model artifacts that you configured when creating the job - // are stored. Amazon SageMaker creates subfolders for model artifacts. - OutputDataConfig *OutputDataConfig `type:"structure"` - - // Resources, including ML compute instances and ML storage volumes, that are - // configured for model training. - // - // ResourceConfig is a required field - ResourceConfig *ResourceConfig `type:"structure" required:"true"` - - // The AWS Identity and Access Management (IAM) role configured for the training - // job. - RoleArn *string `min:"20" type:"string"` - - // Provides granular information about the system state. For more information, - // see TrainingJobStatus. - // - // SecondaryStatus is a required field - SecondaryStatus *string `type:"string" required:"true" enum:"SecondaryStatus"` - - // The condition under which to stop the training job. - // - // StoppingCondition is a required field - StoppingCondition *StoppingCondition `type:"structure" required:"true"` - - // A timestamp that indicates when model training ended. - TrainingEndTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The Amazon Resource Name (ARN) of the training job. - // - // TrainingJobArn is a required field - TrainingJobArn *string `type:"string" required:"true"` - - // Name of the model training job. - // - // TrainingJobName is a required field - TrainingJobName *string `min:"1" type:"string" required:"true"` - - // The status of the training job. - // - // For the InProgress status, Amazon SageMaker can return these secondary statuses: - // - // * Starting - Preparing for training. - // - // * Downloading - Optional stage for algorithms that support File training - // input mode. It indicates data is being downloaded to ML storage volumes. - // - // * Training - Training is in progress. - // - // * Uploading - Training is complete and model upload is in progress. - // - // For the Stopped training status, Amazon SageMaker can return these secondary - // statuses: - // - // * MaxRuntimeExceeded - Job stopped as a result of maximum allowed runtime - // exceeded. - // - // TrainingJobStatus is a required field - TrainingJobStatus *string `type:"string" required:"true" enum:"TrainingJobStatus"` - - // A timestamp that indicates when training started. - TrainingStartTime *time.Time `type:"timestamp" timestampFormat:"unix"` -} - -// String returns the string representation -func (s DescribeTrainingJobOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeTrainingJobOutput) GoString() string { - return s.String() -} - -// SetAlgorithmSpecification sets the AlgorithmSpecification field's value. -func (s *DescribeTrainingJobOutput) SetAlgorithmSpecification(v *AlgorithmSpecification) *DescribeTrainingJobOutput { - s.AlgorithmSpecification = v - return s -} - -// SetCreationTime sets the CreationTime field's value. -func (s *DescribeTrainingJobOutput) SetCreationTime(v time.Time) *DescribeTrainingJobOutput { - s.CreationTime = &v - return s -} - -// SetFailureReason sets the FailureReason field's value. -func (s *DescribeTrainingJobOutput) SetFailureReason(v string) *DescribeTrainingJobOutput { - s.FailureReason = &v - return s -} - -// SetHyperParameters sets the HyperParameters field's value. -func (s *DescribeTrainingJobOutput) SetHyperParameters(v map[string]*string) *DescribeTrainingJobOutput { - s.HyperParameters = v - return s -} - -// SetInputDataConfig sets the InputDataConfig field's value. -func (s *DescribeTrainingJobOutput) SetInputDataConfig(v []*Channel) *DescribeTrainingJobOutput { - s.InputDataConfig = v - return s -} - -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *DescribeTrainingJobOutput) SetLastModifiedTime(v time.Time) *DescribeTrainingJobOutput { - s.LastModifiedTime = &v - return s -} - -// SetModelArtifacts sets the ModelArtifacts field's value. -func (s *DescribeTrainingJobOutput) SetModelArtifacts(v *ModelArtifacts) *DescribeTrainingJobOutput { - s.ModelArtifacts = v - return s -} - -// SetOutputDataConfig sets the OutputDataConfig field's value. -func (s *DescribeTrainingJobOutput) SetOutputDataConfig(v *OutputDataConfig) *DescribeTrainingJobOutput { - s.OutputDataConfig = v - return s -} - -// SetResourceConfig sets the ResourceConfig field's value. -func (s *DescribeTrainingJobOutput) SetResourceConfig(v *ResourceConfig) *DescribeTrainingJobOutput { - s.ResourceConfig = v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *DescribeTrainingJobOutput) SetRoleArn(v string) *DescribeTrainingJobOutput { - s.RoleArn = &v - return s -} - -// SetSecondaryStatus sets the SecondaryStatus field's value. -func (s *DescribeTrainingJobOutput) SetSecondaryStatus(v string) *DescribeTrainingJobOutput { - s.SecondaryStatus = &v - return s -} - -// SetStoppingCondition sets the StoppingCondition field's value. -func (s *DescribeTrainingJobOutput) SetStoppingCondition(v *StoppingCondition) *DescribeTrainingJobOutput { - s.StoppingCondition = v - return s -} - -// SetTrainingEndTime sets the TrainingEndTime field's value. -func (s *DescribeTrainingJobOutput) SetTrainingEndTime(v time.Time) *DescribeTrainingJobOutput { - s.TrainingEndTime = &v - return s -} - -// SetTrainingJobArn sets the TrainingJobArn field's value. -func (s *DescribeTrainingJobOutput) SetTrainingJobArn(v string) *DescribeTrainingJobOutput { - s.TrainingJobArn = &v - return s -} - -// SetTrainingJobName sets the TrainingJobName field's value. -func (s *DescribeTrainingJobOutput) SetTrainingJobName(v string) *DescribeTrainingJobOutput { - s.TrainingJobName = &v - return s -} - -// SetTrainingJobStatus sets the TrainingJobStatus field's value. -func (s *DescribeTrainingJobOutput) SetTrainingJobStatus(v string) *DescribeTrainingJobOutput { - s.TrainingJobStatus = &v - return s -} - -// SetTrainingStartTime sets the TrainingStartTime field's value. -func (s *DescribeTrainingJobOutput) SetTrainingStartTime(v time.Time) *DescribeTrainingJobOutput { - s.TrainingStartTime = &v - return s -} - -// Specifies weight and capacity values for a production variant. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DesiredWeightAndCapacity -type DesiredWeightAndCapacity struct { - _ struct{} `type:"structure"` - - // The variant's capacity. - DesiredInstanceCount *int64 `min:"1" type:"integer"` - - // The variant's weight. - DesiredWeight *float64 `type:"float"` - - // The name of the variant to update. - // - // VariantName is a required field - VariantName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DesiredWeightAndCapacity) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DesiredWeightAndCapacity) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DesiredWeightAndCapacity) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DesiredWeightAndCapacity"} - if s.DesiredInstanceCount != nil && *s.DesiredInstanceCount < 1 { - invalidParams.Add(request.NewErrParamMinValue("DesiredInstanceCount", 1)) - } - if s.VariantName == nil { - invalidParams.Add(request.NewErrParamRequired("VariantName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDesiredInstanceCount sets the DesiredInstanceCount field's value. -func (s *DesiredWeightAndCapacity) SetDesiredInstanceCount(v int64) *DesiredWeightAndCapacity { - s.DesiredInstanceCount = &v - return s -} - -// SetDesiredWeight sets the DesiredWeight field's value. -func (s *DesiredWeightAndCapacity) SetDesiredWeight(v float64) *DesiredWeightAndCapacity { - s.DesiredWeight = &v - return s -} - -// SetVariantName sets the VariantName field's value. -func (s *DesiredWeightAndCapacity) SetVariantName(v string) *DesiredWeightAndCapacity { - s.VariantName = &v - return s -} - -// Provides summary information for an endpoint configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/EndpointConfigSummary -type EndpointConfigSummary struct { - _ struct{} `type:"structure"` - - // A timestamp that shows when the endpoint configuration was created. - // - // CreationTime is a required field - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` - - // The Amazon Resource Name (ARN) of the endpoint configuration. - // - // EndpointConfigArn is a required field - EndpointConfigArn *string `min:"20" type:"string" required:"true"` - - // The name of the endpoint configuration. - // - // EndpointConfigName is a required field - EndpointConfigName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s EndpointConfigSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s EndpointConfigSummary) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *EndpointConfigSummary) SetCreationTime(v time.Time) *EndpointConfigSummary { - s.CreationTime = &v - return s -} - -// SetEndpointConfigArn sets the EndpointConfigArn field's value. -func (s *EndpointConfigSummary) SetEndpointConfigArn(v string) *EndpointConfigSummary { - s.EndpointConfigArn = &v - return s -} - -// SetEndpointConfigName sets the EndpointConfigName field's value. -func (s *EndpointConfigSummary) SetEndpointConfigName(v string) *EndpointConfigSummary { - s.EndpointConfigName = &v - return s -} - -// Provides summary information for an endpoint. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/EndpointSummary -type EndpointSummary struct { - _ struct{} `type:"structure"` - - // A timestamp that shows when the endpoint was created. - // - // CreationTime is a required field - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` - - // The Amazon Resource Name (ARN) of the endpoint. - // - // EndpointArn is a required field - EndpointArn *string `min:"20" type:"string" required:"true"` - - // The name of the endpoint. - // - // EndpointName is a required field - EndpointName *string `type:"string" required:"true"` - - // The status of the endpoint. - // - // EndpointStatus is a required field - EndpointStatus *string `type:"string" required:"true" enum:"EndpointStatus"` - - // A timestamp that shows when the endpoint was last modified. - // - // LastModifiedTime is a required field - LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` -} - -// String returns the string representation -func (s EndpointSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s EndpointSummary) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *EndpointSummary) SetCreationTime(v time.Time) *EndpointSummary { - s.CreationTime = &v - return s -} - -// SetEndpointArn sets the EndpointArn field's value. -func (s *EndpointSummary) SetEndpointArn(v string) *EndpointSummary { - s.EndpointArn = &v - return s -} - -// SetEndpointName sets the EndpointName field's value. -func (s *EndpointSummary) SetEndpointName(v string) *EndpointSummary { - s.EndpointName = &v - return s -} - -// SetEndpointStatus sets the EndpointStatus field's value. -func (s *EndpointSummary) SetEndpointStatus(v string) *EndpointSummary { - s.EndpointStatus = &v - return s -} - -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *EndpointSummary) SetLastModifiedTime(v time.Time) *EndpointSummary { - s.LastModifiedTime = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpointConfigsInput -type ListEndpointConfigsInput struct { - _ struct{} `type:"structure"` - - // A filter that returns only endpoint configurations created after the specified - // time (timestamp). - CreationTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A filter that returns only endpoint configurations created before the specified - // time (timestamp). - CreationTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The maximum number of training jobs to return in the response. - MaxResults *int64 `min:"1" type:"integer"` - - // A string in the endpoint configuration name. This filter returns only endpoint - // configurations whose name contains the specified string. - NameContains *string `type:"string"` - - // If the result of the previous ListEndpointConfig request was truncated, the - // response includes a NextToken. To retrieve the next set of endpoint configurations, - // use the token in the next request. - NextToken *string `type:"string"` - - // The field to sort results by. The default is CreationTime. - SortBy *string `type:"string" enum:"EndpointConfigSortKey"` - - // The sort order for results. The default is Ascending. - SortOrder *string `type:"string" enum:"OrderKey"` -} - -// String returns the string representation -func (s ListEndpointConfigsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListEndpointConfigsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListEndpointConfigsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListEndpointConfigsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCreationTimeAfter sets the CreationTimeAfter field's value. -func (s *ListEndpointConfigsInput) SetCreationTimeAfter(v time.Time) *ListEndpointConfigsInput { - s.CreationTimeAfter = &v - return s -} - -// SetCreationTimeBefore sets the CreationTimeBefore field's value. -func (s *ListEndpointConfigsInput) SetCreationTimeBefore(v time.Time) *ListEndpointConfigsInput { - s.CreationTimeBefore = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListEndpointConfigsInput) SetMaxResults(v int64) *ListEndpointConfigsInput { - s.MaxResults = &v - return s -} - -// SetNameContains sets the NameContains field's value. -func (s *ListEndpointConfigsInput) SetNameContains(v string) *ListEndpointConfigsInput { - s.NameContains = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListEndpointConfigsInput) SetNextToken(v string) *ListEndpointConfigsInput { - s.NextToken = &v - return s -} - -// SetSortBy sets the SortBy field's value. -func (s *ListEndpointConfigsInput) SetSortBy(v string) *ListEndpointConfigsInput { - s.SortBy = &v - return s -} - -// SetSortOrder sets the SortOrder field's value. -func (s *ListEndpointConfigsInput) SetSortOrder(v string) *ListEndpointConfigsInput { - s.SortOrder = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpointConfigsOutput -type ListEndpointConfigsOutput struct { - _ struct{} `type:"structure"` - - // An array of endpoint configurations. - // - // EndpointConfigs is a required field - EndpointConfigs []*EndpointConfigSummary `type:"list" required:"true"` - - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of endpoint configurations, use it in the subsequent request - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s ListEndpointConfigsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListEndpointConfigsOutput) GoString() string { - return s.String() -} - -// SetEndpointConfigs sets the EndpointConfigs field's value. -func (s *ListEndpointConfigsOutput) SetEndpointConfigs(v []*EndpointConfigSummary) *ListEndpointConfigsOutput { - s.EndpointConfigs = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListEndpointConfigsOutput) SetNextToken(v string) *ListEndpointConfigsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpointsInput -type ListEndpointsInput struct { - _ struct{} `type:"structure"` - - // A filter that returns only endpoints that were created after the specified - // time (timestamp). - CreationTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A filter that returns only endpoints that were created before the specified - // time (timestamp). - CreationTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A filter that returns only endpoints that were modified after the specified - // timestamp. - LastModifiedTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A filter that returns only endpoints that were modified before the specified - // timestamp. - LastModifiedTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The maximum number of endpoints to return in the response. - MaxResults *int64 `min:"1" type:"integer"` - - // A string in endpoint names. This filter returns only endpoints whose name - // contains the specified string. - NameContains *string `type:"string"` - - // If the result of a ListEndpoints request was truncated, the response includes - // a NextToken. To retrieve the next set of endpoints, use the token in the - // next request. - NextToken *string `type:"string"` - - // Sorts the list of results. The default is CreationTime. - SortBy *string `type:"string" enum:"EndpointSortKey"` - - // The sort order for results. The default is Ascending. - SortOrder *string `type:"string" enum:"OrderKey"` - - // A filter that returns only endpoints with the specified status. - StatusEquals *string `type:"string" enum:"EndpointStatus"` -} - -// String returns the string representation -func (s ListEndpointsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListEndpointsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListEndpointsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListEndpointsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCreationTimeAfter sets the CreationTimeAfter field's value. -func (s *ListEndpointsInput) SetCreationTimeAfter(v time.Time) *ListEndpointsInput { - s.CreationTimeAfter = &v - return s -} - -// SetCreationTimeBefore sets the CreationTimeBefore field's value. -func (s *ListEndpointsInput) SetCreationTimeBefore(v time.Time) *ListEndpointsInput { - s.CreationTimeBefore = &v - return s -} - -// SetLastModifiedTimeAfter sets the LastModifiedTimeAfter field's value. -func (s *ListEndpointsInput) SetLastModifiedTimeAfter(v time.Time) *ListEndpointsInput { - s.LastModifiedTimeAfter = &v - return s -} - -// SetLastModifiedTimeBefore sets the LastModifiedTimeBefore field's value. -func (s *ListEndpointsInput) SetLastModifiedTimeBefore(v time.Time) *ListEndpointsInput { - s.LastModifiedTimeBefore = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListEndpointsInput) SetMaxResults(v int64) *ListEndpointsInput { - s.MaxResults = &v - return s -} - -// SetNameContains sets the NameContains field's value. -func (s *ListEndpointsInput) SetNameContains(v string) *ListEndpointsInput { - s.NameContains = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListEndpointsInput) SetNextToken(v string) *ListEndpointsInput { - s.NextToken = &v - return s -} - -// SetSortBy sets the SortBy field's value. -func (s *ListEndpointsInput) SetSortBy(v string) *ListEndpointsInput { - s.SortBy = &v - return s -} - -// SetSortOrder sets the SortOrder field's value. -func (s *ListEndpointsInput) SetSortOrder(v string) *ListEndpointsInput { - s.SortOrder = &v - return s -} - -// SetStatusEquals sets the StatusEquals field's value. -func (s *ListEndpointsInput) SetStatusEquals(v string) *ListEndpointsInput { - s.StatusEquals = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListEndpointsOutput -type ListEndpointsOutput struct { - _ struct{} `type:"structure"` - - // An array or endpoint objects. - // - // Endpoints is a required field - Endpoints []*EndpointSummary `type:"list" required:"true"` - - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of training jobs, use it in the subsequent request. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s ListEndpointsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListEndpointsOutput) GoString() string { - return s.String() -} - -// SetEndpoints sets the Endpoints field's value. -func (s *ListEndpointsOutput) SetEndpoints(v []*EndpointSummary) *ListEndpointsOutput { - s.Endpoints = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListEndpointsOutput) SetNextToken(v string) *ListEndpointsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListModelsInput -type ListModelsInput struct { - _ struct{} `type:"structure"` - - // A filter that returns only models created after the specified time (timestamp). - CreationTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A filter that returns only models created before the specified time (timestamp). - CreationTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The maximum number of models to return in the response. - MaxResults *int64 `min:"1" type:"integer"` - - // A string in the training job name. This filter returns only models in the - // training job whose name contains the specified string. - NameContains *string `type:"string"` - - // If the response to a previous ListModels request was truncated, the response - // includes a NextToken. To retrieve the next set of models, use the token in - // the next request. - NextToken *string `type:"string"` - - // Sorts the list of results. The default is CreationTime. - SortBy *string `type:"string" enum:"ModelSortKey"` - - // The sort order for results. The default is Ascending. - SortOrder *string `type:"string" enum:"OrderKey"` -} - -// String returns the string representation -func (s ListModelsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListModelsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListModelsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListModelsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCreationTimeAfter sets the CreationTimeAfter field's value. -func (s *ListModelsInput) SetCreationTimeAfter(v time.Time) *ListModelsInput { - s.CreationTimeAfter = &v - return s -} - -// SetCreationTimeBefore sets the CreationTimeBefore field's value. -func (s *ListModelsInput) SetCreationTimeBefore(v time.Time) *ListModelsInput { - s.CreationTimeBefore = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListModelsInput) SetMaxResults(v int64) *ListModelsInput { - s.MaxResults = &v - return s -} - -// SetNameContains sets the NameContains field's value. -func (s *ListModelsInput) SetNameContains(v string) *ListModelsInput { - s.NameContains = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListModelsInput) SetNextToken(v string) *ListModelsInput { - s.NextToken = &v - return s -} - -// SetSortBy sets the SortBy field's value. -func (s *ListModelsInput) SetSortBy(v string) *ListModelsInput { - s.SortBy = &v - return s -} - -// SetSortOrder sets the SortOrder field's value. -func (s *ListModelsInput) SetSortOrder(v string) *ListModelsInput { - s.SortOrder = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListModelsOutput -type ListModelsOutput struct { - _ struct{} `type:"structure"` - - // An array of ModelSummary objects, each of which lists a model. - // - // Models is a required field - Models []*ModelSummary `type:"list" required:"true"` - - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of models, use it in the subsequent request. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s ListModelsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListModelsOutput) GoString() string { - return s.String() -} - -// SetModels sets the Models field's value. -func (s *ListModelsOutput) SetModels(v []*ModelSummary) *ListModelsOutput { - s.Models = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListModelsOutput) SetNextToken(v string) *ListModelsOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListNotebookInstancesInput -type ListNotebookInstancesInput struct { - _ struct{} `type:"structure"` - - // A filter that returns only notebook instances that were created after the - // specified time (timestamp). - CreationTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A filter that returns only notebook instances that were created before the - // specified time (timestamp). - CreationTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A filter that returns only notebook instances that were modified after the - // specified time (timestamp). - LastModifiedTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A filter that returns only notebook instances that were modified before the - // specified time (timestamp). - LastModifiedTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The maximum number of notebook instances to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A string in the notebook instances' name. This filter returns only notebook - // instances whose name contains the specified string. - NameContains *string `type:"string"` - - // If the previous call to the ListNotebookInstances is truncated, the response - // includes a NextToken. You can use this token in your subsequent ListNotebookInstances - // request to fetch the next set of notebook instances. - // - // You might specify a filter or a sort order in your request. When response - // is truncated, you must use the same values for the filer and sort order in - // the next request. - NextToken *string `type:"string"` - - // The field to sort results by. The default is Name. - SortBy *string `type:"string" enum:"NotebookInstanceSortKey"` - - // The sort order for results. - SortOrder *string `type:"string" enum:"NotebookInstanceSortOrder"` - - // A filter that returns only notebook instances with the specified status. - StatusEquals *string `type:"string" enum:"NotebookInstanceStatus"` -} - -// String returns the string representation -func (s ListNotebookInstancesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListNotebookInstancesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListNotebookInstancesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListNotebookInstancesInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCreationTimeAfter sets the CreationTimeAfter field's value. -func (s *ListNotebookInstancesInput) SetCreationTimeAfter(v time.Time) *ListNotebookInstancesInput { - s.CreationTimeAfter = &v - return s -} - -// SetCreationTimeBefore sets the CreationTimeBefore field's value. -func (s *ListNotebookInstancesInput) SetCreationTimeBefore(v time.Time) *ListNotebookInstancesInput { - s.CreationTimeBefore = &v - return s -} - -// SetLastModifiedTimeAfter sets the LastModifiedTimeAfter field's value. -func (s *ListNotebookInstancesInput) SetLastModifiedTimeAfter(v time.Time) *ListNotebookInstancesInput { - s.LastModifiedTimeAfter = &v - return s -} - -// SetLastModifiedTimeBefore sets the LastModifiedTimeBefore field's value. -func (s *ListNotebookInstancesInput) SetLastModifiedTimeBefore(v time.Time) *ListNotebookInstancesInput { - s.LastModifiedTimeBefore = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListNotebookInstancesInput) SetMaxResults(v int64) *ListNotebookInstancesInput { - s.MaxResults = &v - return s -} - -// SetNameContains sets the NameContains field's value. -func (s *ListNotebookInstancesInput) SetNameContains(v string) *ListNotebookInstancesInput { - s.NameContains = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListNotebookInstancesInput) SetNextToken(v string) *ListNotebookInstancesInput { - s.NextToken = &v - return s -} - -// SetSortBy sets the SortBy field's value. -func (s *ListNotebookInstancesInput) SetSortBy(v string) *ListNotebookInstancesInput { - s.SortBy = &v - return s -} - -// SetSortOrder sets the SortOrder field's value. -func (s *ListNotebookInstancesInput) SetSortOrder(v string) *ListNotebookInstancesInput { - s.SortOrder = &v - return s -} - -// SetStatusEquals sets the StatusEquals field's value. -func (s *ListNotebookInstancesInput) SetStatusEquals(v string) *ListNotebookInstancesInput { - s.StatusEquals = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListNotebookInstancesOutput -type ListNotebookInstancesOutput struct { - _ struct{} `type:"structure"` - - // If the response to the previous ListNotebookInstances request was truncated, - // Amazon SageMaker returns this token. To retrieve the next set of notebook - // instances, use the token in the next request. - NextToken *string `type:"string"` - - // An array of NotebookInstanceSummary objects, one for each notebook instance. - NotebookInstances []*NotebookInstanceSummary `type:"list"` -} - -// String returns the string representation -func (s ListNotebookInstancesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListNotebookInstancesOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListNotebookInstancesOutput) SetNextToken(v string) *ListNotebookInstancesOutput { - s.NextToken = &v - return s -} - -// SetNotebookInstances sets the NotebookInstances field's value. -func (s *ListNotebookInstancesOutput) SetNotebookInstances(v []*NotebookInstanceSummary) *ListNotebookInstancesOutput { - s.NotebookInstances = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTagsInput -type ListTagsInput struct { - _ struct{} `type:"structure"` - - // Maximum number of tags to return. - MaxResults *int64 `min:"50" type:"integer"` - - // If the response to the previous ListTags request is truncated, Amazon SageMaker - // returns this token. To retrieve the next set of tags, use it in the subsequent - // request. - NextToken *string `type:"string"` - - // The Amazon Resource Name (ARN) of the resource whose tags you want to retrieve. - // - // ResourceArn is a required field - ResourceArn *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s ListTagsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListTagsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTagsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTagsInput"} - if s.MaxResults != nil && *s.MaxResults < 50 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 50)) - } - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListTagsInput) SetMaxResults(v int64) *ListTagsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTagsInput) SetNextToken(v string) *ListTagsInput { - s.NextToken = &v - return s -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *ListTagsInput) SetResourceArn(v string) *ListTagsInput { - s.ResourceArn = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTagsOutput -type ListTagsOutput struct { - _ struct{} `type:"structure"` - - // If response is truncated, Amazon SageMaker includes a token in the response. - // You can use this token in your subsequent request to fetch next set of tokens. - NextToken *string `type:"string"` - - // An array of Tag objects, each with a tag key and a value. - Tags []*Tag `type:"list"` -} - -// String returns the string representation -func (s ListTagsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListTagsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTagsOutput) SetNextToken(v string) *ListTagsOutput { - s.NextToken = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *ListTagsOutput) SetTags(v []*Tag) *ListTagsOutput { - s.Tags = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTrainingJobsRequest -type ListTrainingJobsInput struct { - _ struct{} `type:"structure"` - - // A filter that only training jobs created after the specified time (timestamp). - CreationTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A filter that returns only training jobs created before the specified time - // (timestamp). - CreationTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A filter that returns only training jobs modified after the specified time - // (timestamp). - LastModifiedTimeAfter *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A filter that returns only training jobs modified before the specified time - // (timestamp). - LastModifiedTimeBefore *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The maximum number of training jobs to return in the response. - MaxResults *int64 `min:"1" type:"integer"` - - // A string in the training job name. This filter returns only models whose - // name contains the specified string. - NameContains *string `type:"string"` - - // If the result of the previous ListTrainingJobs request was truncated, the - // response includes a NextToken. To retrieve the next set of training jobs, - // use the token in the next request. - NextToken *string `type:"string"` - - // The field to sort results by. The default is CreationTime. - SortBy *string `type:"string" enum:"SortBy"` - - // The sort order for results. The default is Ascending. - SortOrder *string `type:"string" enum:"SortOrder"` - - // A filter that retrieves only training jobs with a specific status. - StatusEquals *string `type:"string" enum:"TrainingJobStatus"` -} - -// String returns the string representation -func (s ListTrainingJobsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListTrainingJobsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTrainingJobsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTrainingJobsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCreationTimeAfter sets the CreationTimeAfter field's value. -func (s *ListTrainingJobsInput) SetCreationTimeAfter(v time.Time) *ListTrainingJobsInput { - s.CreationTimeAfter = &v - return s -} - -// SetCreationTimeBefore sets the CreationTimeBefore field's value. -func (s *ListTrainingJobsInput) SetCreationTimeBefore(v time.Time) *ListTrainingJobsInput { - s.CreationTimeBefore = &v - return s -} - -// SetLastModifiedTimeAfter sets the LastModifiedTimeAfter field's value. -func (s *ListTrainingJobsInput) SetLastModifiedTimeAfter(v time.Time) *ListTrainingJobsInput { - s.LastModifiedTimeAfter = &v - return s -} - -// SetLastModifiedTimeBefore sets the LastModifiedTimeBefore field's value. -func (s *ListTrainingJobsInput) SetLastModifiedTimeBefore(v time.Time) *ListTrainingJobsInput { - s.LastModifiedTimeBefore = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListTrainingJobsInput) SetMaxResults(v int64) *ListTrainingJobsInput { - s.MaxResults = &v - return s -} - -// SetNameContains sets the NameContains field's value. -func (s *ListTrainingJobsInput) SetNameContains(v string) *ListTrainingJobsInput { - s.NameContains = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTrainingJobsInput) SetNextToken(v string) *ListTrainingJobsInput { - s.NextToken = &v - return s -} - -// SetSortBy sets the SortBy field's value. -func (s *ListTrainingJobsInput) SetSortBy(v string) *ListTrainingJobsInput { - s.SortBy = &v - return s -} - -// SetSortOrder sets the SortOrder field's value. -func (s *ListTrainingJobsInput) SetSortOrder(v string) *ListTrainingJobsInput { - s.SortOrder = &v - return s -} - -// SetStatusEquals sets the StatusEquals field's value. -func (s *ListTrainingJobsInput) SetStatusEquals(v string) *ListTrainingJobsInput { - s.StatusEquals = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListTrainingJobsResponse -type ListTrainingJobsOutput struct { - _ struct{} `type:"structure"` - - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of training jobs, use it in the subsequent request. - NextToken *string `type:"string"` - - // An array of TrainingJobSummary objects, each listing a training job. - // - // TrainingJobSummaries is a required field - TrainingJobSummaries []*TrainingJobSummary `type:"list" required:"true"` -} - -// String returns the string representation -func (s ListTrainingJobsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListTrainingJobsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTrainingJobsOutput) SetNextToken(v string) *ListTrainingJobsOutput { - s.NextToken = &v - return s -} - -// SetTrainingJobSummaries sets the TrainingJobSummaries field's value. -func (s *ListTrainingJobsOutput) SetTrainingJobSummaries(v []*TrainingJobSummary) *ListTrainingJobsOutput { - s.TrainingJobSummaries = v - return s -} - -// Provides information about the location that is configured for storing model -// artifacts. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ModelArtifacts -type ModelArtifacts struct { - _ struct{} `type:"structure"` - - // The path of the S3 object that contains the model artifacts. For example, - // s3://bucket-name/keynameprefix/model.tar.gz. - // - // S3ModelArtifacts is a required field - S3ModelArtifacts *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s ModelArtifacts) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ModelArtifacts) GoString() string { - return s.String() -} - -// SetS3ModelArtifacts sets the S3ModelArtifacts field's value. -func (s *ModelArtifacts) SetS3ModelArtifacts(v string) *ModelArtifacts { - s.S3ModelArtifacts = &v - return s -} - -// Provides summary information about a model. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ModelSummary -type ModelSummary struct { - _ struct{} `type:"structure"` - - // A timestamp that indicates when the model was created. - // - // CreationTime is a required field - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` - - // The Amazon Resource Name (ARN) of the model. - // - // ModelArn is a required field - ModelArn *string `min:"20" type:"string" required:"true"` - - // The name of the model that you want a summary for. - // - // ModelName is a required field - ModelName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s ModelSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ModelSummary) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *ModelSummary) SetCreationTime(v time.Time) *ModelSummary { - s.CreationTime = &v - return s -} - -// SetModelArn sets the ModelArn field's value. -func (s *ModelSummary) SetModelArn(v string) *ModelSummary { - s.ModelArn = &v - return s -} - -// SetModelName sets the ModelName field's value. -func (s *ModelSummary) SetModelName(v string) *ModelSummary { - s.ModelName = &v - return s -} - -// Provides summary information for an Amazon SageMaker notebook instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/NotebookInstanceSummary -type NotebookInstanceSummary struct { - _ struct{} `type:"structure"` - - // A timestamp that shows when the notebook instance was created. - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The type of ML compute instance that the notebook instance is running on. - InstanceType *string `type:"string" enum:"InstanceType"` - - // A timestamp that shows when the notebook instance was last modified. - LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The Amazon Resource Name (ARN) of the notebook instance. - // - // NotebookInstanceArn is a required field - NotebookInstanceArn *string `type:"string" required:"true"` - - // The name of the notebook instance that you want a summary for. - // - // NotebookInstanceName is a required field - NotebookInstanceName *string `type:"string" required:"true"` - - // The status of the notebook instance. - NotebookInstanceStatus *string `type:"string" enum:"NotebookInstanceStatus"` - - // The URL that you use to connect to the Jupyter instance running in your notebook - // instance. - Url *string `type:"string"` -} - -// String returns the string representation -func (s NotebookInstanceSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NotebookInstanceSummary) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *NotebookInstanceSummary) SetCreationTime(v time.Time) *NotebookInstanceSummary { - s.CreationTime = &v - return s -} - -// SetInstanceType sets the InstanceType field's value. -func (s *NotebookInstanceSummary) SetInstanceType(v string) *NotebookInstanceSummary { - s.InstanceType = &v - return s -} - -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *NotebookInstanceSummary) SetLastModifiedTime(v time.Time) *NotebookInstanceSummary { - s.LastModifiedTime = &v - return s -} - -// SetNotebookInstanceArn sets the NotebookInstanceArn field's value. -func (s *NotebookInstanceSummary) SetNotebookInstanceArn(v string) *NotebookInstanceSummary { - s.NotebookInstanceArn = &v - return s -} - -// SetNotebookInstanceName sets the NotebookInstanceName field's value. -func (s *NotebookInstanceSummary) SetNotebookInstanceName(v string) *NotebookInstanceSummary { - s.NotebookInstanceName = &v - return s -} - -// SetNotebookInstanceStatus sets the NotebookInstanceStatus field's value. -func (s *NotebookInstanceSummary) SetNotebookInstanceStatus(v string) *NotebookInstanceSummary { - s.NotebookInstanceStatus = &v - return s -} - -// SetUrl sets the Url field's value. -func (s *NotebookInstanceSummary) SetUrl(v string) *NotebookInstanceSummary { - s.Url = &v - return s -} - -// Provides information about how to store model training results (model artifacts). -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/OutputDataConfig -type OutputDataConfig struct { - _ struct{} `type:"structure"` - - // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to - // encrypt the model artifacts at rest using Amazon S3 server-side encryption. - // - // If the configuration of the output S3 bucket requires server-side encryption - // for objects, and you don't provide the KMS key ID, Amazon SageMaker uses - // the default service key. For more information, see KMS-Managed Encryption - // Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) - // in Amazon Simple Storage Service developer guide. - // - // The KMS key policy must grant permission to the IAM role you specify in your - // CreateTrainingJob request. Using Key Policies in AWS KMS (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) - // in the AWS Key Management Service Developer Guide. - KmsKeyId *string `type:"string"` - - // Identifies the S3 path where you want Amazon SageMaker to store the model - // artifacts. For example, s3://bucket-name/key-name-prefix. - // - // S3OutputPath is a required field - S3OutputPath *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s OutputDataConfig) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputDataConfig) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *OutputDataConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "OutputDataConfig"} - if s.S3OutputPath == nil { - invalidParams.Add(request.NewErrParamRequired("S3OutputPath")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKmsKeyId sets the KmsKeyId field's value. -func (s *OutputDataConfig) SetKmsKeyId(v string) *OutputDataConfig { - s.KmsKeyId = &v - return s -} - -// SetS3OutputPath sets the S3OutputPath field's value. -func (s *OutputDataConfig) SetS3OutputPath(v string) *OutputDataConfig { - s.S3OutputPath = &v - return s -} - -// Identifies a model that you want to host and the resources to deploy for -// hosting it. If you are deploying multiple models, tell Amazon SageMaker how -// to distribute traffic among the models by specifying variant weights. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ProductionVariant -type ProductionVariant struct { - _ struct{} `type:"structure"` - - // Number of instances to launch initially. - // - // InitialInstanceCount is a required field - InitialInstanceCount *int64 `min:"1" type:"integer" required:"true"` - - // Determines initial traffic distribution among all of the models that you - // specify in the endpoint configuration. The traffic to a production variant - // is determined by the ratio of the VariantWeight to the sum of all VariantWeight - // values across all ProductionVariants. If unspecified, it defaults to 1.0. - InitialVariantWeight *float64 `type:"float"` - - // The ML compute instance type. - // - // InstanceType is a required field - InstanceType *string `type:"string" required:"true" enum:"ProductionVariantInstanceType"` - - // The name of the model that you want to host. This is the name that you specified - // when creating the model. - // - // ModelName is a required field - ModelName *string `type:"string" required:"true"` - - // The name of the production variant. - // - // VariantName is a required field - VariantName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s ProductionVariant) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ProductionVariant) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ProductionVariant) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ProductionVariant"} - if s.InitialInstanceCount == nil { - invalidParams.Add(request.NewErrParamRequired("InitialInstanceCount")) - } - if s.InitialInstanceCount != nil && *s.InitialInstanceCount < 1 { - invalidParams.Add(request.NewErrParamMinValue("InitialInstanceCount", 1)) - } - if s.InstanceType == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceType")) - } - if s.ModelName == nil { - invalidParams.Add(request.NewErrParamRequired("ModelName")) - } - if s.VariantName == nil { - invalidParams.Add(request.NewErrParamRequired("VariantName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetInitialInstanceCount sets the InitialInstanceCount field's value. -func (s *ProductionVariant) SetInitialInstanceCount(v int64) *ProductionVariant { - s.InitialInstanceCount = &v - return s -} - -// SetInitialVariantWeight sets the InitialVariantWeight field's value. -func (s *ProductionVariant) SetInitialVariantWeight(v float64) *ProductionVariant { - s.InitialVariantWeight = &v - return s -} - -// SetInstanceType sets the InstanceType field's value. -func (s *ProductionVariant) SetInstanceType(v string) *ProductionVariant { - s.InstanceType = &v - return s -} - -// SetModelName sets the ModelName field's value. -func (s *ProductionVariant) SetModelName(v string) *ProductionVariant { - s.ModelName = &v - return s -} - -// SetVariantName sets the VariantName field's value. -func (s *ProductionVariant) SetVariantName(v string) *ProductionVariant { - s.VariantName = &v - return s -} - -// Describes weight and capacities for a production variant associated with -// an endpoint. If you sent a request to the UpdateWeightAndCapacities API and -// the endpoint status is Updating, you get different desired and current values. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ProductionVariantSummary -type ProductionVariantSummary struct { - _ struct{} `type:"structure"` - - // The number of instances associated with the variant. - CurrentInstanceCount *int64 `min:"1" type:"integer"` - - // The weight associated with the variant. - CurrentWeight *float64 `type:"float"` - - // The number of instances requested in the UpdateWeightAndCapacities request. - DesiredInstanceCount *int64 `min:"1" type:"integer"` - - // The requested weight, as specified in the UpdateWeightAndCapacities request. - DesiredWeight *float64 `type:"float"` - - // The name of the variant. - // - // VariantName is a required field - VariantName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s ProductionVariantSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ProductionVariantSummary) GoString() string { - return s.String() -} - -// SetCurrentInstanceCount sets the CurrentInstanceCount field's value. -func (s *ProductionVariantSummary) SetCurrentInstanceCount(v int64) *ProductionVariantSummary { - s.CurrentInstanceCount = &v - return s -} - -// SetCurrentWeight sets the CurrentWeight field's value. -func (s *ProductionVariantSummary) SetCurrentWeight(v float64) *ProductionVariantSummary { - s.CurrentWeight = &v - return s -} - -// SetDesiredInstanceCount sets the DesiredInstanceCount field's value. -func (s *ProductionVariantSummary) SetDesiredInstanceCount(v int64) *ProductionVariantSummary { - s.DesiredInstanceCount = &v - return s -} - -// SetDesiredWeight sets the DesiredWeight field's value. -func (s *ProductionVariantSummary) SetDesiredWeight(v float64) *ProductionVariantSummary { - s.DesiredWeight = &v - return s -} - -// SetVariantName sets the VariantName field's value. -func (s *ProductionVariantSummary) SetVariantName(v string) *ProductionVariantSummary { - s.VariantName = &v - return s -} - -// Describes the resources, including ML compute instances and ML storage volumes, -// to use for model training. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ResourceConfig -type ResourceConfig struct { - _ struct{} `type:"structure"` - - // The number of ML compute instances to use. For distributed training, provide - // a value greater than 1. - // - // InstanceCount is a required field - InstanceCount *int64 `min:"1" type:"integer" required:"true"` - - // The ML compute instance type. - // - // InstanceType is a required field - InstanceType *string `type:"string" required:"true" enum:"TrainingInstanceType"` - - // The size of the ML storage volume that you want to provision. - // - // ML storage volumes store model artifacts and incremental states. Training - // algorithms might also use the ML storage volume for scratch space. If you - // want to store the training data in the ML storage volume, choose File as - // the TrainingInputMode in the algorithm specification. - // - // You must specify sufficient ML storage for your scenario. - // - // Amazon SageMaker supports only the General Purpose SSD (gp2) ML storage volume - // type. - // - // VolumeSizeInGB is a required field - VolumeSizeInGB *int64 `min:"1" type:"integer" required:"true"` -} - -// String returns the string representation -func (s ResourceConfig) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ResourceConfig) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ResourceConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ResourceConfig"} - if s.InstanceCount == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceCount")) - } - if s.InstanceCount != nil && *s.InstanceCount < 1 { - invalidParams.Add(request.NewErrParamMinValue("InstanceCount", 1)) - } - if s.InstanceType == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceType")) - } - if s.VolumeSizeInGB == nil { - invalidParams.Add(request.NewErrParamRequired("VolumeSizeInGB")) - } - if s.VolumeSizeInGB != nil && *s.VolumeSizeInGB < 1 { - invalidParams.Add(request.NewErrParamMinValue("VolumeSizeInGB", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetInstanceCount sets the InstanceCount field's value. -func (s *ResourceConfig) SetInstanceCount(v int64) *ResourceConfig { - s.InstanceCount = &v - return s -} - -// SetInstanceType sets the InstanceType field's value. -func (s *ResourceConfig) SetInstanceType(v string) *ResourceConfig { - s.InstanceType = &v - return s -} - -// SetVolumeSizeInGB sets the VolumeSizeInGB field's value. -func (s *ResourceConfig) SetVolumeSizeInGB(v int64) *ResourceConfig { - s.VolumeSizeInGB = &v - return s -} - -// Describes the S3 data source. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/S3DataSource -type S3DataSource struct { - _ struct{} `type:"structure"` - - // If you want Amazon SageMaker to replicate the entire dataset on each ML compute - // instance that is launched for model training, specify FullyReplicated. - // - // If you want Amazon SageMaker to replicate a subset of data on each ML compute - // instance that is launched for model training, specify ShardedByS3Key. If - // there are n ML compute instances launched for a training job, each instance - // gets approximately 1/n of the number of S3 objects. In this case, model training - // on each machine uses only the subset of training data. - // - // Don't choose more ML compute instances for training than available S3 objects. - // If you do, some nodes won't get any data and you will pay for nodes that - // aren't getting any training data. This applies in both FILE and PIPE modes. - // Keep this in mind when developing algorithms. - // - // In distributed training, where you use multiple ML compute EC2 instances, - // you might choose ShardedByS3Key. If the algorithm requires copying training - // data to the ML storage volume (when TrainingInputMode is set to File), this - // copies 1/n of the number of objects. - S3DataDistributionType *string `type:"string" enum:"S3DataDistribution"` - - // If you choose S3Prefix, S3Uri identifies a key name prefix. Amazon SageMaker - // uses all objects with the specified key name prefix for model training. - // - // If you choose ManifestFile, S3Uri identifies an object that is a manifest - // file containing a list of object keys that you want Amazon SageMaker to use - // for model training. - // - // S3DataType is a required field - S3DataType *string `type:"string" required:"true" enum:"S3DataType"` - - // Depending on the value specified for the S3DataType, identifies either a - // key name prefix or a manifest. For example: - // - // * A key name prefix might look like this: s3://bucketname/exampleprefix. - // - // - // * A manifest might look like this: s3://bucketname/example.manifest - // - // The manifest is an S3 object which is a JSON file with the following format: - // - // - // [ - // - // {"prefix": "s3://customer_bucket/some/prefix/"}, - // - // "relative/path/to/custdata-1", - // - // "relative/path/custdata-2", - // - // ... - // - // ] - // - // The preceding JSON matches the following s3Uris: - // - // s3://customer_bucket/some/prefix/relative/path/to/custdata-1 - // - // s3://customer_bucket/some/prefix/relative/path/custdata-1 - // - // ... - // - // The complete set of s3uris in this manifest constitutes the input data for - // the channel for this datasource. The object that each s3uris points to - // must readable by the IAM role that Amazon SageMaker uses to perform tasks - // on your behalf. - // - // S3Uri is a required field - S3Uri *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s S3DataSource) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s S3DataSource) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *S3DataSource) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "S3DataSource"} - if s.S3DataType == nil { - invalidParams.Add(request.NewErrParamRequired("S3DataType")) - } - if s.S3Uri == nil { - invalidParams.Add(request.NewErrParamRequired("S3Uri")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetS3DataDistributionType sets the S3DataDistributionType field's value. -func (s *S3DataSource) SetS3DataDistributionType(v string) *S3DataSource { - s.S3DataDistributionType = &v - return s -} - -// SetS3DataType sets the S3DataType field's value. -func (s *S3DataSource) SetS3DataType(v string) *S3DataSource { - s.S3DataType = &v - return s -} - -// SetS3Uri sets the S3Uri field's value. -func (s *S3DataSource) SetS3Uri(v string) *S3DataSource { - s.S3Uri = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StartNotebookInstanceInput -type StartNotebookInstanceInput struct { - _ struct{} `type:"structure"` - - // The name of the notebook instance to start. - // - // NotebookInstanceName is a required field - NotebookInstanceName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s StartNotebookInstanceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartNotebookInstanceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartNotebookInstanceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartNotebookInstanceInput"} - if s.NotebookInstanceName == nil { - invalidParams.Add(request.NewErrParamRequired("NotebookInstanceName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNotebookInstanceName sets the NotebookInstanceName field's value. -func (s *StartNotebookInstanceInput) SetNotebookInstanceName(v string) *StartNotebookInstanceInput { - s.NotebookInstanceName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StartNotebookInstanceOutput -type StartNotebookInstanceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s StartNotebookInstanceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartNotebookInstanceOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopNotebookInstanceInput -type StopNotebookInstanceInput struct { - _ struct{} `type:"structure"` - - // The name of the notebook instance to terminate. - // - // NotebookInstanceName is a required field - NotebookInstanceName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s StopNotebookInstanceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopNotebookInstanceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StopNotebookInstanceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StopNotebookInstanceInput"} - if s.NotebookInstanceName == nil { - invalidParams.Add(request.NewErrParamRequired("NotebookInstanceName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNotebookInstanceName sets the NotebookInstanceName field's value. -func (s *StopNotebookInstanceInput) SetNotebookInstanceName(v string) *StopNotebookInstanceInput { - s.NotebookInstanceName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopNotebookInstanceOutput -type StopNotebookInstanceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s StopNotebookInstanceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopNotebookInstanceOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopTrainingJobRequest -type StopTrainingJobInput struct { - _ struct{} `type:"structure"` - - // The name of the training job to stop. - // - // TrainingJobName is a required field - TrainingJobName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s StopTrainingJobInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopTrainingJobInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StopTrainingJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StopTrainingJobInput"} - if s.TrainingJobName == nil { - invalidParams.Add(request.NewErrParamRequired("TrainingJobName")) - } - if s.TrainingJobName != nil && len(*s.TrainingJobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TrainingJobName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTrainingJobName sets the TrainingJobName field's value. -func (s *StopTrainingJobInput) SetTrainingJobName(v string) *StopTrainingJobInput { - s.TrainingJobName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopTrainingJobOutput -type StopTrainingJobOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s StopTrainingJobOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopTrainingJobOutput) GoString() string { - return s.String() -} - -// Specifies how long model training can run. When model training reaches the -// limit, Amazon SageMaker ends the training job. Use this API to cap model -// training cost. -// -// To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which -// delays job termination for120 seconds. Algorithms might use this 120-second -// window to save the model artifacts, so the results of training is not lost. -// -// Training algorithms provided by Amazon SageMaker automatically saves the -// intermediate results of a model training job (it is best effort case, as -// model might not be ready to save as some stages, for example training just -// started). This intermediate data is a valid model artifact. You can use it -// to create a model (CreateModel). -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StoppingCondition -type StoppingCondition struct { - _ struct{} `type:"structure"` - - // The maximum length of time, in seconds, that the training job can run. If - // model training does not complete during this time, Amazon SageMaker ends - // the job. If value is not specified, default value is 1 day. Maximum value - // is 5 days. - MaxRuntimeInSeconds *int64 `min:"1" type:"integer"` -} - -// String returns the string representation -func (s StoppingCondition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StoppingCondition) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StoppingCondition) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StoppingCondition"} - if s.MaxRuntimeInSeconds != nil && *s.MaxRuntimeInSeconds < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxRuntimeInSeconds", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxRuntimeInSeconds sets the MaxRuntimeInSeconds field's value. -func (s *StoppingCondition) SetMaxRuntimeInSeconds(v int64) *StoppingCondition { - s.MaxRuntimeInSeconds = &v - return s -} - -// Describes a tag. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/Tag -type Tag struct { - _ struct{} `type:"structure"` - - // The tag key. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // The tag value. - // - // Value is a required field - Value *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v - return s -} - -// Provides summary information about a training job. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/TrainingJobSummary -type TrainingJobSummary struct { - _ struct{} `type:"structure"` - - // A timestamp that shows when the training job was created. - // - // CreationTime is a required field - CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` - - // Timestamp when the training job was last modified. - LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A timestamp that shows when the training job ended. This field is set only - // if the training job has one of the terminal statuses (Completed, Failed, - // or Stopped). - TrainingEndTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The Amazon Resource Name (ARN) of the training job. - // - // TrainingJobArn is a required field - TrainingJobArn *string `type:"string" required:"true"` - - // The name of the training job that you want a summary for. - // - // TrainingJobName is a required field - TrainingJobName *string `min:"1" type:"string" required:"true"` - - // The status of the training job. - // - // TrainingJobStatus is a required field - TrainingJobStatus *string `type:"string" required:"true" enum:"TrainingJobStatus"` -} - -// String returns the string representation -func (s TrainingJobSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TrainingJobSummary) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *TrainingJobSummary) SetCreationTime(v time.Time) *TrainingJobSummary { - s.CreationTime = &v - return s -} - -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *TrainingJobSummary) SetLastModifiedTime(v time.Time) *TrainingJobSummary { - s.LastModifiedTime = &v - return s -} - -// SetTrainingEndTime sets the TrainingEndTime field's value. -func (s *TrainingJobSummary) SetTrainingEndTime(v time.Time) *TrainingJobSummary { - s.TrainingEndTime = &v - return s -} - -// SetTrainingJobArn sets the TrainingJobArn field's value. -func (s *TrainingJobSummary) SetTrainingJobArn(v string) *TrainingJobSummary { - s.TrainingJobArn = &v - return s -} - -// SetTrainingJobName sets the TrainingJobName field's value. -func (s *TrainingJobSummary) SetTrainingJobName(v string) *TrainingJobSummary { - s.TrainingJobName = &v - return s -} - -// SetTrainingJobStatus sets the TrainingJobStatus field's value. -func (s *TrainingJobSummary) SetTrainingJobStatus(v string) *TrainingJobSummary { - s.TrainingJobStatus = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpointInput -type UpdateEndpointInput struct { - _ struct{} `type:"structure"` - - // The name of the new endpoint configuration. - // - // EndpointConfigName is a required field - EndpointConfigName *string `type:"string" required:"true"` - - // The name of the endpoint whose configuration you want to update. - // - // EndpointName is a required field - EndpointName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s UpdateEndpointInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateEndpointInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateEndpointInput"} - if s.EndpointConfigName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointConfigName")) - } - if s.EndpointName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndpointConfigName sets the EndpointConfigName field's value. -func (s *UpdateEndpointInput) SetEndpointConfigName(v string) *UpdateEndpointInput { - s.EndpointConfigName = &v - return s -} - -// SetEndpointName sets the EndpointName field's value. -func (s *UpdateEndpointInput) SetEndpointName(v string) *UpdateEndpointInput { - s.EndpointName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpointOutput -type UpdateEndpointOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the endpoint. - // - // EndpointArn is a required field - EndpointArn *string `min:"20" type:"string" required:"true"` -} - -// String returns the string representation -func (s UpdateEndpointOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateEndpointOutput) GoString() string { - return s.String() -} - -// SetEndpointArn sets the EndpointArn field's value. -func (s *UpdateEndpointOutput) SetEndpointArn(v string) *UpdateEndpointOutput { - s.EndpointArn = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpointWeightsAndCapacitiesInput -type UpdateEndpointWeightsAndCapacitiesInput struct { - _ struct{} `type:"structure"` - - // An object that provides new capacity and weight values for a variant. - // - // DesiredWeightsAndCapacities is a required field - DesiredWeightsAndCapacities []*DesiredWeightAndCapacity `min:"1" type:"list" required:"true"` - - // The name of an existing Amazon SageMaker endpoint. - // - // EndpointName is a required field - EndpointName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s UpdateEndpointWeightsAndCapacitiesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateEndpointWeightsAndCapacitiesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateEndpointWeightsAndCapacitiesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateEndpointWeightsAndCapacitiesInput"} - if s.DesiredWeightsAndCapacities == nil { - invalidParams.Add(request.NewErrParamRequired("DesiredWeightsAndCapacities")) - } - if s.DesiredWeightsAndCapacities != nil && len(s.DesiredWeightsAndCapacities) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DesiredWeightsAndCapacities", 1)) - } - if s.EndpointName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointName")) - } - if s.DesiredWeightsAndCapacities != nil { - for i, v := range s.DesiredWeightsAndCapacities { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DesiredWeightsAndCapacities", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDesiredWeightsAndCapacities sets the DesiredWeightsAndCapacities field's value. -func (s *UpdateEndpointWeightsAndCapacitiesInput) SetDesiredWeightsAndCapacities(v []*DesiredWeightAndCapacity) *UpdateEndpointWeightsAndCapacitiesInput { - s.DesiredWeightsAndCapacities = v - return s -} - -// SetEndpointName sets the EndpointName field's value. -func (s *UpdateEndpointWeightsAndCapacitiesInput) SetEndpointName(v string) *UpdateEndpointWeightsAndCapacitiesInput { - s.EndpointName = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateEndpointWeightsAndCapacitiesOutput -type UpdateEndpointWeightsAndCapacitiesOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the updated endpoint. - // - // EndpointArn is a required field - EndpointArn *string `min:"20" type:"string" required:"true"` -} - -// String returns the string representation -func (s UpdateEndpointWeightsAndCapacitiesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateEndpointWeightsAndCapacitiesOutput) GoString() string { - return s.String() -} - -// SetEndpointArn sets the EndpointArn field's value. -func (s *UpdateEndpointWeightsAndCapacitiesOutput) SetEndpointArn(v string) *UpdateEndpointWeightsAndCapacitiesOutput { - s.EndpointArn = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateNotebookInstanceInput -type UpdateNotebookInstanceInput struct { - _ struct{} `type:"structure"` - - // The Amazon ML compute instance type. - InstanceType *string `type:"string" enum:"InstanceType"` - - // The name of the notebook instance to update. - // - // NotebookInstanceName is a required field - NotebookInstanceName *string `type:"string" required:"true"` - - // Amazon Resource Name (ARN) of the IAM role to associate with the instance. - RoleArn *string `min:"20" type:"string"` -} - -// String returns the string representation -func (s UpdateNotebookInstanceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateNotebookInstanceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateNotebookInstanceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateNotebookInstanceInput"} - if s.NotebookInstanceName == nil { - invalidParams.Add(request.NewErrParamRequired("NotebookInstanceName")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetInstanceType sets the InstanceType field's value. -func (s *UpdateNotebookInstanceInput) SetInstanceType(v string) *UpdateNotebookInstanceInput { - s.InstanceType = &v - return s -} - -// SetNotebookInstanceName sets the NotebookInstanceName field's value. -func (s *UpdateNotebookInstanceInput) SetNotebookInstanceName(v string) *UpdateNotebookInstanceInput { - s.NotebookInstanceName = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *UpdateNotebookInstanceInput) SetRoleArn(v string) *UpdateNotebookInstanceInput { - s.RoleArn = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/UpdateNotebookInstanceOutput -type UpdateNotebookInstanceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s UpdateNotebookInstanceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateNotebookInstanceOutput) GoString() string { - return s.String() -} - -const ( - // CompressionTypeNone is a CompressionType enum value - CompressionTypeNone = "None" - - // CompressionTypeGzip is a CompressionType enum value - CompressionTypeGzip = "Gzip" -) - -const ( - // EndpointConfigSortKeyName is a EndpointConfigSortKey enum value - EndpointConfigSortKeyName = "Name" - - // EndpointConfigSortKeyCreationTime is a EndpointConfigSortKey enum value - EndpointConfigSortKeyCreationTime = "CreationTime" -) - -const ( - // EndpointSortKeyName is a EndpointSortKey enum value - EndpointSortKeyName = "Name" - - // EndpointSortKeyCreationTime is a EndpointSortKey enum value - EndpointSortKeyCreationTime = "CreationTime" - - // EndpointSortKeyStatus is a EndpointSortKey enum value - EndpointSortKeyStatus = "Status" -) - -const ( - // EndpointStatusOutOfService is a EndpointStatus enum value - EndpointStatusOutOfService = "OutOfService" - - // EndpointStatusCreating is a EndpointStatus enum value - EndpointStatusCreating = "Creating" - - // EndpointStatusUpdating is a EndpointStatus enum value - EndpointStatusUpdating = "Updating" - - // EndpointStatusRollingBack is a EndpointStatus enum value - EndpointStatusRollingBack = "RollingBack" - - // EndpointStatusInService is a EndpointStatus enum value - EndpointStatusInService = "InService" - - // EndpointStatusDeleting is a EndpointStatus enum value - EndpointStatusDeleting = "Deleting" - - // EndpointStatusFailed is a EndpointStatus enum value - EndpointStatusFailed = "Failed" -) - -const ( - // InstanceTypeMlT2Medium is a InstanceType enum value - InstanceTypeMlT2Medium = "ml.t2.medium" - - // InstanceTypeMlM4Xlarge is a InstanceType enum value - InstanceTypeMlM4Xlarge = "ml.m4.xlarge" - - // InstanceTypeMlP2Xlarge is a InstanceType enum value - InstanceTypeMlP2Xlarge = "ml.p2.xlarge" -) - -const ( - // ModelSortKeyName is a ModelSortKey enum value - ModelSortKeyName = "Name" - - // ModelSortKeyCreationTime is a ModelSortKey enum value - ModelSortKeyCreationTime = "CreationTime" -) - -const ( - // NotebookInstanceSortKeyName is a NotebookInstanceSortKey enum value - NotebookInstanceSortKeyName = "Name" - - // NotebookInstanceSortKeyCreationTime is a NotebookInstanceSortKey enum value - NotebookInstanceSortKeyCreationTime = "CreationTime" - - // NotebookInstanceSortKeyStatus is a NotebookInstanceSortKey enum value - NotebookInstanceSortKeyStatus = "Status" -) - -const ( - // NotebookInstanceSortOrderAscending is a NotebookInstanceSortOrder enum value - NotebookInstanceSortOrderAscending = "Ascending" - - // NotebookInstanceSortOrderDescending is a NotebookInstanceSortOrder enum value - NotebookInstanceSortOrderDescending = "Descending" -) - -const ( - // NotebookInstanceStatusPending is a NotebookInstanceStatus enum value - NotebookInstanceStatusPending = "Pending" - - // NotebookInstanceStatusInService is a NotebookInstanceStatus enum value - NotebookInstanceStatusInService = "InService" - - // NotebookInstanceStatusStopping is a NotebookInstanceStatus enum value - NotebookInstanceStatusStopping = "Stopping" - - // NotebookInstanceStatusStopped is a NotebookInstanceStatus enum value - NotebookInstanceStatusStopped = "Stopped" - - // NotebookInstanceStatusFailed is a NotebookInstanceStatus enum value - NotebookInstanceStatusFailed = "Failed" - - // NotebookInstanceStatusDeleting is a NotebookInstanceStatus enum value - NotebookInstanceStatusDeleting = "Deleting" -) - -const ( - // OrderKeyAscending is a OrderKey enum value - OrderKeyAscending = "Ascending" - - // OrderKeyDescending is a OrderKey enum value - OrderKeyDescending = "Descending" -) - -const ( - // ProductionVariantInstanceTypeMlC42xlarge is a ProductionVariantInstanceType enum value - ProductionVariantInstanceTypeMlC42xlarge = "ml.c4.2xlarge" - - // ProductionVariantInstanceTypeMlC48xlarge is a ProductionVariantInstanceType enum value - ProductionVariantInstanceTypeMlC48xlarge = "ml.c4.8xlarge" - - // ProductionVariantInstanceTypeMlC4Xlarge is a ProductionVariantInstanceType enum value - ProductionVariantInstanceTypeMlC4Xlarge = "ml.c4.xlarge" - - // ProductionVariantInstanceTypeMlC52xlarge is a ProductionVariantInstanceType enum value - ProductionVariantInstanceTypeMlC52xlarge = "ml.c5.2xlarge" - - // ProductionVariantInstanceTypeMlC59xlarge is a ProductionVariantInstanceType enum value - ProductionVariantInstanceTypeMlC59xlarge = "ml.c5.9xlarge" - - // ProductionVariantInstanceTypeMlC5Xlarge is a ProductionVariantInstanceType enum value - ProductionVariantInstanceTypeMlC5Xlarge = "ml.c5.xlarge" - - // ProductionVariantInstanceTypeMlM4Xlarge is a ProductionVariantInstanceType enum value - ProductionVariantInstanceTypeMlM4Xlarge = "ml.m4.xlarge" - - // ProductionVariantInstanceTypeMlP2Xlarge is a ProductionVariantInstanceType enum value - ProductionVariantInstanceTypeMlP2Xlarge = "ml.p2.xlarge" - - // ProductionVariantInstanceTypeMlP32xlarge is a ProductionVariantInstanceType enum value - ProductionVariantInstanceTypeMlP32xlarge = "ml.p3.2xlarge" - - // ProductionVariantInstanceTypeMlT2Medium is a ProductionVariantInstanceType enum value - ProductionVariantInstanceTypeMlT2Medium = "ml.t2.medium" -) - -const ( - // RecordWrapperNone is a RecordWrapper enum value - RecordWrapperNone = "None" - - // RecordWrapperRecordIo is a RecordWrapper enum value - RecordWrapperRecordIo = "RecordIO" -) - -const ( - // S3DataDistributionFullyReplicated is a S3DataDistribution enum value - S3DataDistributionFullyReplicated = "FullyReplicated" - - // S3DataDistributionShardedByS3key is a S3DataDistribution enum value - S3DataDistributionShardedByS3key = "ShardedByS3Key" -) - -const ( - // S3DataTypeManifestFile is a S3DataType enum value - S3DataTypeManifestFile = "ManifestFile" - - // S3DataTypeS3prefix is a S3DataType enum value - S3DataTypeS3prefix = "S3Prefix" -) - -const ( - // SecondaryStatusStarting is a SecondaryStatus enum value - SecondaryStatusStarting = "Starting" - - // SecondaryStatusDownloading is a SecondaryStatus enum value - SecondaryStatusDownloading = "Downloading" - - // SecondaryStatusTraining is a SecondaryStatus enum value - SecondaryStatusTraining = "Training" - - // SecondaryStatusUploading is a SecondaryStatus enum value - SecondaryStatusUploading = "Uploading" - - // SecondaryStatusStopping is a SecondaryStatus enum value - SecondaryStatusStopping = "Stopping" - - // SecondaryStatusStopped is a SecondaryStatus enum value - SecondaryStatusStopped = "Stopped" - - // SecondaryStatusMaxRuntimeExceeded is a SecondaryStatus enum value - SecondaryStatusMaxRuntimeExceeded = "MaxRuntimeExceeded" - - // SecondaryStatusCompleted is a SecondaryStatus enum value - SecondaryStatusCompleted = "Completed" - - // SecondaryStatusFailed is a SecondaryStatus enum value - SecondaryStatusFailed = "Failed" -) - -const ( - // SortByName is a SortBy enum value - SortByName = "Name" - - // SortByCreationTime is a SortBy enum value - SortByCreationTime = "CreationTime" - - // SortByStatus is a SortBy enum value - SortByStatus = "Status" -) - -const ( - // SortOrderAscending is a SortOrder enum value - SortOrderAscending = "Ascending" - - // SortOrderDescending is a SortOrder enum value - SortOrderDescending = "Descending" -) - -const ( - // TrainingInputModePipe is a TrainingInputMode enum value - TrainingInputModePipe = "Pipe" - - // TrainingInputModeFile is a TrainingInputMode enum value - TrainingInputModeFile = "File" -) - -const ( - // TrainingInstanceTypeMlM4Xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlM4Xlarge = "ml.m4.xlarge" - - // TrainingInstanceTypeMlM44xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlM44xlarge = "ml.m4.4xlarge" - - // TrainingInstanceTypeMlM410xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlM410xlarge = "ml.m4.10xlarge" - - // TrainingInstanceTypeMlC4Xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlC4Xlarge = "ml.c4.xlarge" - - // TrainingInstanceTypeMlC42xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlC42xlarge = "ml.c4.2xlarge" - - // TrainingInstanceTypeMlC48xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlC48xlarge = "ml.c4.8xlarge" - - // TrainingInstanceTypeMlP2Xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlP2Xlarge = "ml.p2.xlarge" - - // TrainingInstanceTypeMlP28xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlP28xlarge = "ml.p2.8xlarge" - - // TrainingInstanceTypeMlP216xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlP216xlarge = "ml.p2.16xlarge" - - // TrainingInstanceTypeMlP32xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlP32xlarge = "ml.p3.2xlarge" - - // TrainingInstanceTypeMlP38xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlP38xlarge = "ml.p3.8xlarge" - - // TrainingInstanceTypeMlP316xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlP316xlarge = "ml.p3.16xlarge" - - // TrainingInstanceTypeMlC5Xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlC5Xlarge = "ml.c5.xlarge" - - // TrainingInstanceTypeMlC52xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlC52xlarge = "ml.c5.2xlarge" - - // TrainingInstanceTypeMlC54xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlC54xlarge = "ml.c5.4xlarge" - - // TrainingInstanceTypeMlC59xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlC59xlarge = "ml.c5.9xlarge" - - // TrainingInstanceTypeMlC518xlarge is a TrainingInstanceType enum value - TrainingInstanceTypeMlC518xlarge = "ml.c5.18xlarge" -) - -const ( - // TrainingJobStatusInProgress is a TrainingJobStatus enum value - TrainingJobStatusInProgress = "InProgress" - - // TrainingJobStatusCompleted is a TrainingJobStatus enum value - TrainingJobStatusCompleted = "Completed" - - // TrainingJobStatusFailed is a TrainingJobStatus enum value - TrainingJobStatusFailed = "Failed" - - // TrainingJobStatusStopping is a TrainingJobStatus enum value - TrainingJobStatusStopping = "Stopping" - - // TrainingJobStatusStopped is a TrainingJobStatus enum value - TrainingJobStatusStopped = "Stopped" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/doc.go deleted file mode 100644 index d74f40297cc..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package sagemaker provides the client and types for making API -// requests to Amazon SageMaker Service. -// -// Definition of the public APIs exposed by SageMaker -// -// See https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24 for more information on this service. -// -// See sagemaker package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sagemaker/ -// -// Using the Client -// -// To contact Amazon SageMaker Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the Amazon SageMaker Service client SageMaker for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sagemaker/#New -package sagemaker diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/errors.go deleted file mode 100644 index d7fd2c45754..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/errors.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sagemaker - -const ( - - // ErrCodeResourceInUse for service response error code - // "ResourceInUse". - // - // Resource being accessed is in use. - ErrCodeResourceInUse = "ResourceInUse" - - // ErrCodeResourceLimitExceeded for service response error code - // "ResourceLimitExceeded". - // - // You have exceeded an Amazon SageMaker resource limit. For example, you might - // have too many training jobs created. - ErrCodeResourceLimitExceeded = "ResourceLimitExceeded" - - // ErrCodeResourceNotFound for service response error code - // "ResourceNotFound". - // - // Resource being access is not found. - ErrCodeResourceNotFound = "ResourceNotFound" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go deleted file mode 100644 index fac6d92bae3..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go +++ /dev/null @@ -1,98 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sagemaker - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -// SageMaker provides the API operation methods for making requests to -// Amazon SageMaker Service. See this package's package overview docs -// for details on the service. -// -// SageMaker methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type SageMaker struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "sagemaker" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the SageMaker client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a SageMaker client from just a session. -// svc := sagemaker.New(mySession) -// -// // Create a SageMaker client with additional configuration -// svc := sagemaker.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *SageMaker { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SageMaker { - if len(signingName) == 0 { - signingName = "sagemaker" - } - svc := &SageMaker{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2017-07-24", - JSONVersion: "1.1", - TargetPrefix: "SageMaker", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a SageMaker operation and runs any -// custom request initialization. -func (c *SageMaker) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/waiters.go deleted file mode 100644 index c8515cc633d..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/waiters.go +++ /dev/null @@ -1,331 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sagemaker - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// WaitUntilEndpointDeleted uses the SageMaker API operation -// DescribeEndpoint to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *SageMaker) WaitUntilEndpointDeleted(input *DescribeEndpointInput) error { - return c.WaitUntilEndpointDeletedWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilEndpointDeletedWithContext is an extended version of WaitUntilEndpointDeleted. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) WaitUntilEndpointDeletedWithContext(ctx aws.Context, input *DescribeEndpointInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilEndpointDeleted", - MaxAttempts: 60, - Delay: request.ConstantWaiterDelay(30 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.ErrorWaiterMatch, - Expected: "ValidationException", - }, - { - State: request.FailureWaiterState, - Matcher: request.PathWaiterMatch, Argument: "EndpointStatus", - Expected: "Failed", - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *DescribeEndpointInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeEndpointRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilEndpointInService uses the SageMaker API operation -// DescribeEndpoint to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *SageMaker) WaitUntilEndpointInService(input *DescribeEndpointInput) error { - return c.WaitUntilEndpointInServiceWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilEndpointInServiceWithContext is an extended version of WaitUntilEndpointInService. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) WaitUntilEndpointInServiceWithContext(ctx aws.Context, input *DescribeEndpointInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilEndpointInService", - MaxAttempts: 120, - Delay: request.ConstantWaiterDelay(30 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.PathWaiterMatch, Argument: "EndpointStatus", - Expected: "InService", - }, - { - State: request.FailureWaiterState, - Matcher: request.PathWaiterMatch, Argument: "EndpointStatus", - Expected: "Failed", - }, - { - State: request.FailureWaiterState, - Matcher: request.ErrorWaiterMatch, - Expected: "ValidationException", - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *DescribeEndpointInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeEndpointRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilNotebookInstanceDeleted uses the SageMaker API operation -// DescribeNotebookInstance to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *SageMaker) WaitUntilNotebookInstanceDeleted(input *DescribeNotebookInstanceInput) error { - return c.WaitUntilNotebookInstanceDeletedWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilNotebookInstanceDeletedWithContext is an extended version of WaitUntilNotebookInstanceDeleted. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) WaitUntilNotebookInstanceDeletedWithContext(ctx aws.Context, input *DescribeNotebookInstanceInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilNotebookInstanceDeleted", - MaxAttempts: 60, - Delay: request.ConstantWaiterDelay(30 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.ErrorWaiterMatch, - Expected: "ValidationException", - }, - { - State: request.FailureWaiterState, - Matcher: request.PathWaiterMatch, Argument: "NotebookInstanceStatus", - Expected: "Failed", - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *DescribeNotebookInstanceInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeNotebookInstanceRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilNotebookInstanceInService uses the SageMaker API operation -// DescribeNotebookInstance to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *SageMaker) WaitUntilNotebookInstanceInService(input *DescribeNotebookInstanceInput) error { - return c.WaitUntilNotebookInstanceInServiceWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilNotebookInstanceInServiceWithContext is an extended version of WaitUntilNotebookInstanceInService. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) WaitUntilNotebookInstanceInServiceWithContext(ctx aws.Context, input *DescribeNotebookInstanceInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilNotebookInstanceInService", - MaxAttempts: 60, - Delay: request.ConstantWaiterDelay(30 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.PathWaiterMatch, Argument: "NotebookInstanceStatus", - Expected: "InService", - }, - { - State: request.FailureWaiterState, - Matcher: request.PathWaiterMatch, Argument: "NotebookInstanceStatus", - Expected: "Failed", - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *DescribeNotebookInstanceInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeNotebookInstanceRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilNotebookInstanceStopped uses the SageMaker API operation -// DescribeNotebookInstance to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *SageMaker) WaitUntilNotebookInstanceStopped(input *DescribeNotebookInstanceInput) error { - return c.WaitUntilNotebookInstanceStoppedWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilNotebookInstanceStoppedWithContext is an extended version of WaitUntilNotebookInstanceStopped. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) WaitUntilNotebookInstanceStoppedWithContext(ctx aws.Context, input *DescribeNotebookInstanceInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilNotebookInstanceStopped", - MaxAttempts: 60, - Delay: request.ConstantWaiterDelay(30 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.PathWaiterMatch, Argument: "NotebookInstanceStatus", - Expected: "Stopped", - }, - { - State: request.FailureWaiterState, - Matcher: request.PathWaiterMatch, Argument: "NotebookInstanceStatus", - Expected: "Failed", - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *DescribeNotebookInstanceInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeNotebookInstanceRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilTrainingJobCompletedOrStopped uses the SageMaker API operation -// DescribeTrainingJob to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *SageMaker) WaitUntilTrainingJobCompletedOrStopped(input *DescribeTrainingJobInput) error { - return c.WaitUntilTrainingJobCompletedOrStoppedWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilTrainingJobCompletedOrStoppedWithContext is an extended version of WaitUntilTrainingJobCompletedOrStopped. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SageMaker) WaitUntilTrainingJobCompletedOrStoppedWithContext(ctx aws.Context, input *DescribeTrainingJobInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilTrainingJobCompletedOrStopped", - MaxAttempts: 180, - Delay: request.ConstantWaiterDelay(120 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.PathWaiterMatch, Argument: "TrainingJobStatus", - Expected: "Completed", - }, - { - State: request.SuccessWaiterState, - Matcher: request.PathWaiterMatch, Argument: "TrainingJobStatus", - Expected: "Stopped", - }, - { - State: request.FailureWaiterState, - Matcher: request.PathWaiterMatch, Argument: "TrainingJobStatus", - Expected: "Failed", - }, - { - State: request.FailureWaiterState, - Matcher: request.ErrorWaiterMatch, - Expected: "ValidationException", - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *DescribeTrainingJobInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeTrainingJobRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/shield/api.go b/vendor/github.com/aws/aws-sdk-go/service/shield/api.go deleted file mode 100644 index 7b1b19e8631..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/shield/api.go +++ /dev/null @@ -1,2171 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package shield - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opCreateProtection = "CreateProtection" - -// CreateProtectionRequest generates a "aws/request.Request" representing the -// client's request for the CreateProtection operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateProtection for more information on using the CreateProtection -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateProtectionRequest method. -// req, resp := client.CreateProtectionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateProtection -func (c *Shield) CreateProtectionRequest(input *CreateProtectionInput) (req *request.Request, output *CreateProtectionOutput) { - op := &request.Operation{ - Name: opCreateProtection, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateProtectionInput{} - } - - output = &CreateProtectionOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateProtection API operation for AWS Shield. -// -// Enables AWS Shield Advanced for a specific AWS resource. The resource can -// be an Amazon CloudFront distribution, Elastic Load Balancing load balancer, -// Elastic IP Address, or an Amazon Route 53 hosted zone. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Shield's -// API operation CreateProtection for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// Exception that indicates that a problem occurred with the service infrastructure. -// You can retry the request. -// -// * ErrCodeInvalidResourceException "InvalidResourceException" -// Exception that indicates that the resource is invalid. You might not have -// access to the resource, or the resource might not exist. -// -// * ErrCodeInvalidOperationException "InvalidOperationException" -// Exception that indicates that the operation would not cause any change to -// occur. -// -// * ErrCodeLimitsExceededException "LimitsExceededException" -// Exception that indicates that the operation would exceed a limit. -// -// Type is the type of limit that would be exceeded. -// -// Limit is the threshold that would be exceeded. -// -// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" -// Exception indicating the specified resource already exists. -// -// * ErrCodeOptimisticLockException "OptimisticLockException" -// Exception that indicates that the protection state has been modified by another -// client. You can retry the request. -// -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// Exception indicating the specified resource does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateProtection -func (c *Shield) CreateProtection(input *CreateProtectionInput) (*CreateProtectionOutput, error) { - req, out := c.CreateProtectionRequest(input) - return out, req.Send() -} - -// CreateProtectionWithContext is the same as CreateProtection with the addition of -// the ability to pass a context and additional request options. -// -// See CreateProtection for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Shield) CreateProtectionWithContext(ctx aws.Context, input *CreateProtectionInput, opts ...request.Option) (*CreateProtectionOutput, error) { - req, out := c.CreateProtectionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateSubscription = "CreateSubscription" - -// CreateSubscriptionRequest generates a "aws/request.Request" representing the -// client's request for the CreateSubscription operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateSubscription for more information on using the CreateSubscription -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateSubscriptionRequest method. -// req, resp := client.CreateSubscriptionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateSubscription -func (c *Shield) CreateSubscriptionRequest(input *CreateSubscriptionInput) (req *request.Request, output *CreateSubscriptionOutput) { - op := &request.Operation{ - Name: opCreateSubscription, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateSubscriptionInput{} - } - - output = &CreateSubscriptionOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateSubscription API operation for AWS Shield. -// -// Activates AWS Shield Advanced for an account. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Shield's -// API operation CreateSubscription for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// Exception that indicates that a problem occurred with the service infrastructure. -// You can retry the request. -// -// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" -// Exception indicating the specified resource already exists. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateSubscription -func (c *Shield) CreateSubscription(input *CreateSubscriptionInput) (*CreateSubscriptionOutput, error) { - req, out := c.CreateSubscriptionRequest(input) - return out, req.Send() -} - -// CreateSubscriptionWithContext is the same as CreateSubscription with the addition of -// the ability to pass a context and additional request options. -// -// See CreateSubscription for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Shield) CreateSubscriptionWithContext(ctx aws.Context, input *CreateSubscriptionInput, opts ...request.Option) (*CreateSubscriptionOutput, error) { - req, out := c.CreateSubscriptionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteProtection = "DeleteProtection" - -// DeleteProtectionRequest generates a "aws/request.Request" representing the -// client's request for the DeleteProtection operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteProtection for more information on using the DeleteProtection -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteProtectionRequest method. -// req, resp := client.DeleteProtectionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteProtection -func (c *Shield) DeleteProtectionRequest(input *DeleteProtectionInput) (req *request.Request, output *DeleteProtectionOutput) { - op := &request.Operation{ - Name: opDeleteProtection, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteProtectionInput{} - } - - output = &DeleteProtectionOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteProtection API operation for AWS Shield. -// -// Deletes an AWS Shield Advanced Protection. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Shield's -// API operation DeleteProtection for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// Exception that indicates that a problem occurred with the service infrastructure. -// You can retry the request. -// -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// Exception indicating the specified resource does not exist. -// -// * ErrCodeOptimisticLockException "OptimisticLockException" -// Exception that indicates that the protection state has been modified by another -// client. You can retry the request. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteProtection -func (c *Shield) DeleteProtection(input *DeleteProtectionInput) (*DeleteProtectionOutput, error) { - req, out := c.DeleteProtectionRequest(input) - return out, req.Send() -} - -// DeleteProtectionWithContext is the same as DeleteProtection with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteProtection for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Shield) DeleteProtectionWithContext(ctx aws.Context, input *DeleteProtectionInput, opts ...request.Option) (*DeleteProtectionOutput, error) { - req, out := c.DeleteProtectionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteSubscription = "DeleteSubscription" - -// DeleteSubscriptionRequest generates a "aws/request.Request" representing the -// client's request for the DeleteSubscription operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteSubscription for more information on using the DeleteSubscription -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteSubscriptionRequest method. -// req, resp := client.DeleteSubscriptionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteSubscription -func (c *Shield) DeleteSubscriptionRequest(input *DeleteSubscriptionInput) (req *request.Request, output *DeleteSubscriptionOutput) { - op := &request.Operation{ - Name: opDeleteSubscription, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteSubscriptionInput{} - } - - output = &DeleteSubscriptionOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteSubscription API operation for AWS Shield. -// -// Removes AWS Shield Advanced from an account. AWS Shield Advanced requires -// a 1-year subscription commitment. You cannot delete a subscription prior -// to the completion of that commitment. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Shield's -// API operation DeleteSubscription for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// Exception that indicates that a problem occurred with the service infrastructure. -// You can retry the request. -// -// * ErrCodeLockedSubscriptionException "LockedSubscriptionException" -// Exception that indicates that the subscription you are trying to delete has -// not yet completed the 1-year commitment. You cannot delete this subscription. -// -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// Exception indicating the specified resource does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteSubscription -func (c *Shield) DeleteSubscription(input *DeleteSubscriptionInput) (*DeleteSubscriptionOutput, error) { - req, out := c.DeleteSubscriptionRequest(input) - return out, req.Send() -} - -// DeleteSubscriptionWithContext is the same as DeleteSubscription with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteSubscription for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Shield) DeleteSubscriptionWithContext(ctx aws.Context, input *DeleteSubscriptionInput, opts ...request.Option) (*DeleteSubscriptionOutput, error) { - req, out := c.DeleteSubscriptionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeAttack = "DescribeAttack" - -// DescribeAttackRequest generates a "aws/request.Request" representing the -// client's request for the DescribeAttack operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeAttack for more information on using the DescribeAttack -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeAttackRequest method. -// req, resp := client.DescribeAttackRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeAttack -func (c *Shield) DescribeAttackRequest(input *DescribeAttackInput) (req *request.Request, output *DescribeAttackOutput) { - op := &request.Operation{ - Name: opDescribeAttack, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeAttackInput{} - } - - output = &DescribeAttackOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeAttack API operation for AWS Shield. -// -// Describes the details of a DDoS attack. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Shield's -// API operation DescribeAttack for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// Exception that indicates that a problem occurred with the service infrastructure. -// You can retry the request. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// Exception that indicates that the parameters passed to the API are invalid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeAttack -func (c *Shield) DescribeAttack(input *DescribeAttackInput) (*DescribeAttackOutput, error) { - req, out := c.DescribeAttackRequest(input) - return out, req.Send() -} - -// DescribeAttackWithContext is the same as DescribeAttack with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeAttack for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Shield) DescribeAttackWithContext(ctx aws.Context, input *DescribeAttackInput, opts ...request.Option) (*DescribeAttackOutput, error) { - req, out := c.DescribeAttackRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeProtection = "DescribeProtection" - -// DescribeProtectionRequest generates a "aws/request.Request" representing the -// client's request for the DescribeProtection operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeProtection for more information on using the DescribeProtection -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeProtectionRequest method. -// req, resp := client.DescribeProtectionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeProtection -func (c *Shield) DescribeProtectionRequest(input *DescribeProtectionInput) (req *request.Request, output *DescribeProtectionOutput) { - op := &request.Operation{ - Name: opDescribeProtection, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeProtectionInput{} - } - - output = &DescribeProtectionOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeProtection API operation for AWS Shield. -// -// Lists the details of a Protection object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Shield's -// API operation DescribeProtection for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// Exception that indicates that a problem occurred with the service infrastructure. -// You can retry the request. -// -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// Exception indicating the specified resource does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeProtection -func (c *Shield) DescribeProtection(input *DescribeProtectionInput) (*DescribeProtectionOutput, error) { - req, out := c.DescribeProtectionRequest(input) - return out, req.Send() -} - -// DescribeProtectionWithContext is the same as DescribeProtection with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeProtection for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Shield) DescribeProtectionWithContext(ctx aws.Context, input *DescribeProtectionInput, opts ...request.Option) (*DescribeProtectionOutput, error) { - req, out := c.DescribeProtectionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeSubscription = "DescribeSubscription" - -// DescribeSubscriptionRequest generates a "aws/request.Request" representing the -// client's request for the DescribeSubscription operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeSubscription for more information on using the DescribeSubscription -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeSubscriptionRequest method. -// req, resp := client.DescribeSubscriptionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeSubscription -func (c *Shield) DescribeSubscriptionRequest(input *DescribeSubscriptionInput) (req *request.Request, output *DescribeSubscriptionOutput) { - op := &request.Operation{ - Name: opDescribeSubscription, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeSubscriptionInput{} - } - - output = &DescribeSubscriptionOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeSubscription API operation for AWS Shield. -// -// Provides details about the AWS Shield Advanced subscription for an account. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Shield's -// API operation DescribeSubscription for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// Exception that indicates that a problem occurred with the service infrastructure. -// You can retry the request. -// -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// Exception indicating the specified resource does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeSubscription -func (c *Shield) DescribeSubscription(input *DescribeSubscriptionInput) (*DescribeSubscriptionOutput, error) { - req, out := c.DescribeSubscriptionRequest(input) - return out, req.Send() -} - -// DescribeSubscriptionWithContext is the same as DescribeSubscription with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeSubscription for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Shield) DescribeSubscriptionWithContext(ctx aws.Context, input *DescribeSubscriptionInput, opts ...request.Option) (*DescribeSubscriptionOutput, error) { - req, out := c.DescribeSubscriptionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetSubscriptionState = "GetSubscriptionState" - -// GetSubscriptionStateRequest generates a "aws/request.Request" representing the -// client's request for the GetSubscriptionState operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSubscriptionState for more information on using the GetSubscriptionState -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetSubscriptionStateRequest method. -// req, resp := client.GetSubscriptionStateRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/GetSubscriptionState -func (c *Shield) GetSubscriptionStateRequest(input *GetSubscriptionStateInput) (req *request.Request, output *GetSubscriptionStateOutput) { - op := &request.Operation{ - Name: opGetSubscriptionState, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetSubscriptionStateInput{} - } - - output = &GetSubscriptionStateOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSubscriptionState API operation for AWS Shield. -// -// Returns the SubscriptionState, either Active or Inactive. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Shield's -// API operation GetSubscriptionState for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// Exception that indicates that a problem occurred with the service infrastructure. -// You can retry the request. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/GetSubscriptionState -func (c *Shield) GetSubscriptionState(input *GetSubscriptionStateInput) (*GetSubscriptionStateOutput, error) { - req, out := c.GetSubscriptionStateRequest(input) - return out, req.Send() -} - -// GetSubscriptionStateWithContext is the same as GetSubscriptionState with the addition of -// the ability to pass a context and additional request options. -// -// See GetSubscriptionState for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Shield) GetSubscriptionStateWithContext(ctx aws.Context, input *GetSubscriptionStateInput, opts ...request.Option) (*GetSubscriptionStateOutput, error) { - req, out := c.GetSubscriptionStateRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListAttacks = "ListAttacks" - -// ListAttacksRequest generates a "aws/request.Request" representing the -// client's request for the ListAttacks operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListAttacks for more information on using the ListAttacks -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListAttacksRequest method. -// req, resp := client.ListAttacksRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListAttacks -func (c *Shield) ListAttacksRequest(input *ListAttacksInput) (req *request.Request, output *ListAttacksOutput) { - op := &request.Operation{ - Name: opListAttacks, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListAttacksInput{} - } - - output = &ListAttacksOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListAttacks API operation for AWS Shield. -// -// Returns all ongoing DDoS attacks or all DDoS attacks during a specified time -// period. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Shield's -// API operation ListAttacks for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// Exception that indicates that a problem occurred with the service infrastructure. -// You can retry the request. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// Exception that indicates that the parameters passed to the API are invalid. -// -// * ErrCodeInvalidOperationException "InvalidOperationException" -// Exception that indicates that the operation would not cause any change to -// occur. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListAttacks -func (c *Shield) ListAttacks(input *ListAttacksInput) (*ListAttacksOutput, error) { - req, out := c.ListAttacksRequest(input) - return out, req.Send() -} - -// ListAttacksWithContext is the same as ListAttacks with the addition of -// the ability to pass a context and additional request options. -// -// See ListAttacks for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Shield) ListAttacksWithContext(ctx aws.Context, input *ListAttacksInput, opts ...request.Option) (*ListAttacksOutput, error) { - req, out := c.ListAttacksRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListProtections = "ListProtections" - -// ListProtectionsRequest generates a "aws/request.Request" representing the -// client's request for the ListProtections operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListProtections for more information on using the ListProtections -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListProtectionsRequest method. -// req, resp := client.ListProtectionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListProtections -func (c *Shield) ListProtectionsRequest(input *ListProtectionsInput) (req *request.Request, output *ListProtectionsOutput) { - op := &request.Operation{ - Name: opListProtections, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListProtectionsInput{} - } - - output = &ListProtectionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListProtections API operation for AWS Shield. -// -// Lists all Protection objects for the account. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Shield's -// API operation ListProtections for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalErrorException" -// Exception that indicates that a problem occurred with the service infrastructure. -// You can retry the request. -// -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// Exception indicating the specified resource does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListProtections -func (c *Shield) ListProtections(input *ListProtectionsInput) (*ListProtectionsOutput, error) { - req, out := c.ListProtectionsRequest(input) - return out, req.Send() -} - -// ListProtectionsWithContext is the same as ListProtections with the addition of -// the ability to pass a context and additional request options. -// -// See ListProtections for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Shield) ListProtectionsWithContext(ctx aws.Context, input *ListProtectionsInput, opts ...request.Option) (*ListProtectionsOutput, error) { - req, out := c.ListProtectionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// The details of a DDoS attack. -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/AttackDetail -type AttackDetail struct { - _ struct{} `type:"structure"` - - // List of counters that describe the attack for the specified time period. - AttackCounters []*SummarizedCounter `type:"list"` - - // The unique identifier (ID) of the attack. - AttackId *string `min:"1" type:"string"` - - // The array of AttackProperty objects. - AttackProperties []*AttackProperty `type:"list"` - - // The time the attack ended, in Unix time in seconds. For more information - // see timestamp (http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#parameter-types). - EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // List of mitigation actions taken for the attack. - Mitigations []*Mitigation `type:"list"` - - // The ARN (Amazon Resource Name) of the resource that was attacked. - ResourceArn *string `min:"1" type:"string"` - - // The time the attack started, in Unix time in seconds. For more information - // see timestamp (http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#parameter-types). - StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // If applicable, additional detail about the resource being attacked, for example, - // IP address or URL. - SubResources []*SubResourceSummary `type:"list"` -} - -// String returns the string representation -func (s AttackDetail) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AttackDetail) GoString() string { - return s.String() -} - -// SetAttackCounters sets the AttackCounters field's value. -func (s *AttackDetail) SetAttackCounters(v []*SummarizedCounter) *AttackDetail { - s.AttackCounters = v - return s -} - -// SetAttackId sets the AttackId field's value. -func (s *AttackDetail) SetAttackId(v string) *AttackDetail { - s.AttackId = &v - return s -} - -// SetAttackProperties sets the AttackProperties field's value. -func (s *AttackDetail) SetAttackProperties(v []*AttackProperty) *AttackDetail { - s.AttackProperties = v - return s -} - -// SetEndTime sets the EndTime field's value. -func (s *AttackDetail) SetEndTime(v time.Time) *AttackDetail { - s.EndTime = &v - return s -} - -// SetMitigations sets the Mitigations field's value. -func (s *AttackDetail) SetMitigations(v []*Mitigation) *AttackDetail { - s.Mitigations = v - return s -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *AttackDetail) SetResourceArn(v string) *AttackDetail { - s.ResourceArn = &v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *AttackDetail) SetStartTime(v time.Time) *AttackDetail { - s.StartTime = &v - return s -} - -// SetSubResources sets the SubResources field's value. -func (s *AttackDetail) SetSubResources(v []*SubResourceSummary) *AttackDetail { - s.SubResources = v - return s -} - -// Details of the described attack. -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/AttackProperty -type AttackProperty struct { - _ struct{} `type:"structure"` - - // The type of DDoS event that was observed. NETWORK indicates layer 3 and layer - // 4 events and APPLICATION indicates layer 7 events. - AttackLayer *string `type:"string" enum:"AttackLayer"` - - // Defines the DDoS attack property information that is provided. - AttackPropertyIdentifier *string `type:"string" enum:"AttackPropertyIdentifier"` - - // The array of Contributor objects that includes the top five contributors - // to an attack. - TopContributors []*Contributor `type:"list"` - - // The total contributions made to this attack by all contributors, not just - // the five listed in the TopContributors list. - Total *int64 `type:"long"` - - // The unit of the Value of the contributions. - Unit *string `type:"string" enum:"Unit"` -} - -// String returns the string representation -func (s AttackProperty) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AttackProperty) GoString() string { - return s.String() -} - -// SetAttackLayer sets the AttackLayer field's value. -func (s *AttackProperty) SetAttackLayer(v string) *AttackProperty { - s.AttackLayer = &v - return s -} - -// SetAttackPropertyIdentifier sets the AttackPropertyIdentifier field's value. -func (s *AttackProperty) SetAttackPropertyIdentifier(v string) *AttackProperty { - s.AttackPropertyIdentifier = &v - return s -} - -// SetTopContributors sets the TopContributors field's value. -func (s *AttackProperty) SetTopContributors(v []*Contributor) *AttackProperty { - s.TopContributors = v - return s -} - -// SetTotal sets the Total field's value. -func (s *AttackProperty) SetTotal(v int64) *AttackProperty { - s.Total = &v - return s -} - -// SetUnit sets the Unit field's value. -func (s *AttackProperty) SetUnit(v string) *AttackProperty { - s.Unit = &v - return s -} - -// Summarizes all DDoS attacks for a specified time period. -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/AttackSummary -type AttackSummary struct { - _ struct{} `type:"structure"` - - // The unique identifier (ID) of the attack. - AttackId *string `type:"string"` - - // The list of attacks for a specified time period. - AttackVectors []*AttackVectorDescription `type:"list"` - - // The end time of the attack, in Unix time in seconds. For more information - // see timestamp (http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#parameter-types). - EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The ARN (Amazon Resource Name) of the resource that was attacked. - ResourceArn *string `type:"string"` - - // The start time of the attack, in Unix time in seconds. For more information - // see timestamp (http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#parameter-types). - StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` -} - -// String returns the string representation -func (s AttackSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AttackSummary) GoString() string { - return s.String() -} - -// SetAttackId sets the AttackId field's value. -func (s *AttackSummary) SetAttackId(v string) *AttackSummary { - s.AttackId = &v - return s -} - -// SetAttackVectors sets the AttackVectors field's value. -func (s *AttackSummary) SetAttackVectors(v []*AttackVectorDescription) *AttackSummary { - s.AttackVectors = v - return s -} - -// SetEndTime sets the EndTime field's value. -func (s *AttackSummary) SetEndTime(v time.Time) *AttackSummary { - s.EndTime = &v - return s -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *AttackSummary) SetResourceArn(v string) *AttackSummary { - s.ResourceArn = &v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *AttackSummary) SetStartTime(v time.Time) *AttackSummary { - s.StartTime = &v - return s -} - -// Describes the attack. -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/AttackVectorDescription -type AttackVectorDescription struct { - _ struct{} `type:"structure"` - - // The attack type. Valid values: - // - // * UDP_TRAFFIC - // - // * UDP_FRAGMENT - // - // * GENERIC_UDP_REFLECTION - // - // * DNS_REFLECTION - // - // * NTP_REFLECTION - // - // * CHARGEN_REFLECTION - // - // * SSDP_REFLECTION - // - // * PORT_MAPPER - // - // * RIP_REFLECTION - // - // * SNMP_REFLECTION - // - // * MSSQL_REFLECTION - // - // * NET_BIOS_REFLECTION - // - // * SYN_FLOOD - // - // * ACK_FLOOD - // - // * REQUEST_FLOOD - // - // VectorType is a required field - VectorType *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s AttackVectorDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AttackVectorDescription) GoString() string { - return s.String() -} - -// SetVectorType sets the VectorType field's value. -func (s *AttackVectorDescription) SetVectorType(v string) *AttackVectorDescription { - s.VectorType = &v - return s -} - -// A contributor to the attack and their contribution. -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/Contributor -type Contributor struct { - _ struct{} `type:"structure"` - - // The name of the contributor. This is dependent on the AttackPropertyIdentifier. - // For example, if the AttackPropertyIdentifier is SOURCE_COUNTRY, the Name - // could be United States. - Name *string `type:"string"` - - // The contribution of this contributor expressed in Protection units. For example - // 10,000. - Value *int64 `type:"long"` -} - -// String returns the string representation -func (s Contributor) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Contributor) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *Contributor) SetName(v string) *Contributor { - s.Name = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Contributor) SetValue(v int64) *Contributor { - s.Value = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateProtectionRequest -type CreateProtectionInput struct { - _ struct{} `type:"structure"` - - // Friendly name for the Protection you are creating. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The ARN (Amazon Resource Name) of the resource to be protected. - // - // The ARN should be in one of the following formats: - // - // * For an Application Load Balancer: arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id - // - // * For an Elastic Load Balancer (Classic Load Balancer): arn:aws:elasticloadbalancing:region:account-id:loadbalancer/load-balancer-name - // - // * For AWS CloudFront distribution: arn:aws:cloudfront::account-id:distribution/distribution-id - // - // * For Amazon Route 53: arn:aws:route53::account-id:hostedzone/hosted-zone-id - // - // * For an Elastic IP address: arn:aws:ec2:region:account-id:eip-allocation/allocation-id - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateProtectionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateProtectionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateProtectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateProtectionInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *CreateProtectionInput) SetName(v string) *CreateProtectionInput { - s.Name = &v - return s -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *CreateProtectionInput) SetResourceArn(v string) *CreateProtectionInput { - s.ResourceArn = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateProtectionResponse -type CreateProtectionOutput struct { - _ struct{} `type:"structure"` - - // The unique identifier (ID) for the Protection object that is created. - ProtectionId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s CreateProtectionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateProtectionOutput) GoString() string { - return s.String() -} - -// SetProtectionId sets the ProtectionId field's value. -func (s *CreateProtectionOutput) SetProtectionId(v string) *CreateProtectionOutput { - s.ProtectionId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateSubscriptionRequest -type CreateSubscriptionInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateSubscriptionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateSubscriptionInput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateSubscriptionResponse -type CreateSubscriptionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateSubscriptionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateSubscriptionOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteProtectionRequest -type DeleteProtectionInput struct { - _ struct{} `type:"structure"` - - // The unique identifier (ID) for the Protection object to be deleted. - // - // ProtectionId is a required field - ProtectionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteProtectionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteProtectionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteProtectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteProtectionInput"} - if s.ProtectionId == nil { - invalidParams.Add(request.NewErrParamRequired("ProtectionId")) - } - if s.ProtectionId != nil && len(*s.ProtectionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ProtectionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetProtectionId sets the ProtectionId field's value. -func (s *DeleteProtectionInput) SetProtectionId(v string) *DeleteProtectionInput { - s.ProtectionId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteProtectionResponse -type DeleteProtectionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteProtectionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteProtectionOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteSubscriptionRequest -type DeleteSubscriptionInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteSubscriptionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteSubscriptionInput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteSubscriptionResponse -type DeleteSubscriptionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteSubscriptionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteSubscriptionOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeAttackRequest -type DescribeAttackInput struct { - _ struct{} `type:"structure"` - - // The unique identifier (ID) for the attack that to be described. - // - // AttackId is a required field - AttackId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeAttackInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeAttackInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeAttackInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeAttackInput"} - if s.AttackId == nil { - invalidParams.Add(request.NewErrParamRequired("AttackId")) - } - if s.AttackId != nil && len(*s.AttackId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AttackId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttackId sets the AttackId field's value. -func (s *DescribeAttackInput) SetAttackId(v string) *DescribeAttackInput { - s.AttackId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeAttackResponse -type DescribeAttackOutput struct { - _ struct{} `type:"structure"` - - // The attack that is described. - Attack *AttackDetail `type:"structure"` -} - -// String returns the string representation -func (s DescribeAttackOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeAttackOutput) GoString() string { - return s.String() -} - -// SetAttack sets the Attack field's value. -func (s *DescribeAttackOutput) SetAttack(v *AttackDetail) *DescribeAttackOutput { - s.Attack = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeProtectionRequest -type DescribeProtectionInput struct { - _ struct{} `type:"structure"` - - // The unique identifier (ID) for the Protection object that is described. - // - // ProtectionId is a required field - ProtectionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeProtectionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeProtectionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeProtectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeProtectionInput"} - if s.ProtectionId == nil { - invalidParams.Add(request.NewErrParamRequired("ProtectionId")) - } - if s.ProtectionId != nil && len(*s.ProtectionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ProtectionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetProtectionId sets the ProtectionId field's value. -func (s *DescribeProtectionInput) SetProtectionId(v string) *DescribeProtectionInput { - s.ProtectionId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeProtectionResponse -type DescribeProtectionOutput struct { - _ struct{} `type:"structure"` - - // The Protection object that is described. - Protection *Protection `type:"structure"` -} - -// String returns the string representation -func (s DescribeProtectionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeProtectionOutput) GoString() string { - return s.String() -} - -// SetProtection sets the Protection field's value. -func (s *DescribeProtectionOutput) SetProtection(v *Protection) *DescribeProtectionOutput { - s.Protection = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeSubscriptionRequest -type DescribeSubscriptionInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DescribeSubscriptionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeSubscriptionInput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeSubscriptionResponse -type DescribeSubscriptionOutput struct { - _ struct{} `type:"structure"` - - // The AWS Shield Advanced subscription details for an account. - Subscription *Subscription `type:"structure"` -} - -// String returns the string representation -func (s DescribeSubscriptionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeSubscriptionOutput) GoString() string { - return s.String() -} - -// SetSubscription sets the Subscription field's value. -func (s *DescribeSubscriptionOutput) SetSubscription(v *Subscription) *DescribeSubscriptionOutput { - s.Subscription = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/GetSubscriptionStateRequest -type GetSubscriptionStateInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s GetSubscriptionStateInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSubscriptionStateInput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/GetSubscriptionStateResponse -type GetSubscriptionStateOutput struct { - _ struct{} `type:"structure"` - - // The status of the subscription. - // - // SubscriptionState is a required field - SubscriptionState *string `type:"string" required:"true" enum:"SubscriptionState"` -} - -// String returns the string representation -func (s GetSubscriptionStateOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSubscriptionStateOutput) GoString() string { - return s.String() -} - -// SetSubscriptionState sets the SubscriptionState field's value. -func (s *GetSubscriptionStateOutput) SetSubscriptionState(v string) *GetSubscriptionStateOutput { - s.SubscriptionState = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListAttacksRequest -type ListAttacksInput struct { - _ struct{} `type:"structure"` - - // The end of the time period for the attacks. This is a timestamp type. The - // sample request above indicates a number type because the default used by - // WAF is Unix time in seconds. However any valid timestamp format (http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#parameter-types) - // is allowed. - EndTime *TimeRange `type:"structure"` - - // The maximum number of AttackSummary objects to be returned. If this is left - // blank, the first 20 results will be returned. - MaxResults *int64 `type:"integer"` - - // The ListAttacksRequest.NextMarker value from a previous call to ListAttacksRequest. - // Pass null if this is the first call. - NextToken *string `min:"1" type:"string"` - - // The ARN (Amazon Resource Name) of the resource that was attacked. If this - // is left blank, all applicable resources for this account will be included. - ResourceArns []*string `type:"list"` - - // The start of the time period for the attacks. This is a timestamp type. The - // sample request above indicates a number type because the default used by - // WAF is Unix time in seconds. However any valid timestamp format (http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#parameter-types) - // is allowed. - StartTime *TimeRange `type:"structure"` -} - -// String returns the string representation -func (s ListAttacksInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListAttacksInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListAttacksInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListAttacksInput"} - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndTime sets the EndTime field's value. -func (s *ListAttacksInput) SetEndTime(v *TimeRange) *ListAttacksInput { - s.EndTime = v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListAttacksInput) SetMaxResults(v int64) *ListAttacksInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListAttacksInput) SetNextToken(v string) *ListAttacksInput { - s.NextToken = &v - return s -} - -// SetResourceArns sets the ResourceArns field's value. -func (s *ListAttacksInput) SetResourceArns(v []*string) *ListAttacksInput { - s.ResourceArns = v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *ListAttacksInput) SetStartTime(v *TimeRange) *ListAttacksInput { - s.StartTime = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListAttacksResponse -type ListAttacksOutput struct { - _ struct{} `type:"structure"` - - // The attack information for the specified time range. - AttackSummaries []*AttackSummary `type:"list"` - - // The token returned by a previous call to indicate that there is more data - // available. If not null, more results are available. Pass this value for the - // NextMarker parameter in a subsequent call to ListAttacks to retrieve the - // next set of items. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s ListAttacksOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListAttacksOutput) GoString() string { - return s.String() -} - -// SetAttackSummaries sets the AttackSummaries field's value. -func (s *ListAttacksOutput) SetAttackSummaries(v []*AttackSummary) *ListAttacksOutput { - s.AttackSummaries = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListAttacksOutput) SetNextToken(v string) *ListAttacksOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListProtectionsRequest -type ListProtectionsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of Protection objects to be returned. If this is left - // blank the first 20 results will be returned. - MaxResults *int64 `type:"integer"` - - // The ListProtectionsRequest.NextToken value from a previous call to ListProtections. - // Pass null if this is the first call. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s ListProtectionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListProtectionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListProtectionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListProtectionsInput"} - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListProtectionsInput) SetMaxResults(v int64) *ListProtectionsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListProtectionsInput) SetNextToken(v string) *ListProtectionsInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListProtectionsResponse -type ListProtectionsOutput struct { - _ struct{} `type:"structure"` - - // If you specify a value for MaxResults and you have more Protections than - // the value of MaxResults, AWS Shield Advanced returns a NextToken value in - // the response that allows you to list another group of Protections. For the - // second and subsequent ListProtections requests, specify the value of NextToken - // from the previous response to get information about another batch of Protections. - NextToken *string `min:"1" type:"string"` - - // The array of enabled Protection objects. - Protections []*Protection `type:"list"` -} - -// String returns the string representation -func (s ListProtectionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListProtectionsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListProtectionsOutput) SetNextToken(v string) *ListProtectionsOutput { - s.NextToken = &v - return s -} - -// SetProtections sets the Protections field's value. -func (s *ListProtectionsOutput) SetProtections(v []*Protection) *ListProtectionsOutput { - s.Protections = v - return s -} - -// The mitigation applied to a DDoS attack. -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/Mitigation -type Mitigation struct { - _ struct{} `type:"structure"` - - // The name of the mitigation taken for this attack. - MitigationName *string `type:"string"` -} - -// String returns the string representation -func (s Mitigation) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Mitigation) GoString() string { - return s.String() -} - -// SetMitigationName sets the MitigationName field's value. -func (s *Mitigation) SetMitigationName(v string) *Mitigation { - s.MitigationName = &v - return s -} - -// An object that represents a resource that is under DDoS protection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/Protection -type Protection struct { - _ struct{} `type:"structure"` - - // The unique identifier (ID) of the protection. - Id *string `min:"1" type:"string"` - - // The friendly name of the protection. For example, My CloudFront distributions. - Name *string `min:"1" type:"string"` - - // The ARN (Amazon Resource Name) of the AWS resource that is protected. - ResourceArn *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s Protection) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Protection) GoString() string { - return s.String() -} - -// SetId sets the Id field's value. -func (s *Protection) SetId(v string) *Protection { - s.Id = &v - return s -} - -// SetName sets the Name field's value. -func (s *Protection) SetName(v string) *Protection { - s.Name = &v - return s -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *Protection) SetResourceArn(v string) *Protection { - s.ResourceArn = &v - return s -} - -// The attack information for the specified SubResource. -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/SubResourceSummary -type SubResourceSummary struct { - _ struct{} `type:"structure"` - - // The list of attack types and associated counters. - AttackVectors []*SummarizedAttackVector `type:"list"` - - // The counters that describe the details of the attack. - Counters []*SummarizedCounter `type:"list"` - - // The unique identifier (ID) of the SubResource. - Id *string `type:"string"` - - // The SubResource type. - Type *string `type:"string" enum:"SubResourceType"` -} - -// String returns the string representation -func (s SubResourceSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SubResourceSummary) GoString() string { - return s.String() -} - -// SetAttackVectors sets the AttackVectors field's value. -func (s *SubResourceSummary) SetAttackVectors(v []*SummarizedAttackVector) *SubResourceSummary { - s.AttackVectors = v - return s -} - -// SetCounters sets the Counters field's value. -func (s *SubResourceSummary) SetCounters(v []*SummarizedCounter) *SubResourceSummary { - s.Counters = v - return s -} - -// SetId sets the Id field's value. -func (s *SubResourceSummary) SetId(v string) *SubResourceSummary { - s.Id = &v - return s -} - -// SetType sets the Type field's value. -func (s *SubResourceSummary) SetType(v string) *SubResourceSummary { - s.Type = &v - return s -} - -// Information about the AWS Shield Advanced subscription for an account. -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/Subscription -type Subscription struct { - _ struct{} `type:"structure"` - - // The start time of the subscription, in Unix time in seconds. For more information - // see timestamp (http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#parameter-types). - StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The length, in seconds, of the AWS Shield Advanced subscription for the account. - TimeCommitmentInSeconds *int64 `type:"long"` -} - -// String returns the string representation -func (s Subscription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Subscription) GoString() string { - return s.String() -} - -// SetStartTime sets the StartTime field's value. -func (s *Subscription) SetStartTime(v time.Time) *Subscription { - s.StartTime = &v - return s -} - -// SetTimeCommitmentInSeconds sets the TimeCommitmentInSeconds field's value. -func (s *Subscription) SetTimeCommitmentInSeconds(v int64) *Subscription { - s.TimeCommitmentInSeconds = &v - return s -} - -// A summary of information about the attack. -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/SummarizedAttackVector -type SummarizedAttackVector struct { - _ struct{} `type:"structure"` - - // The list of counters that describe the details of the attack. - VectorCounters []*SummarizedCounter `type:"list"` - - // The attack type, for example, SNMP reflection or SYN flood. - // - // VectorType is a required field - VectorType *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s SummarizedAttackVector) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SummarizedAttackVector) GoString() string { - return s.String() -} - -// SetVectorCounters sets the VectorCounters field's value. -func (s *SummarizedAttackVector) SetVectorCounters(v []*SummarizedCounter) *SummarizedAttackVector { - s.VectorCounters = v - return s -} - -// SetVectorType sets the VectorType field's value. -func (s *SummarizedAttackVector) SetVectorType(v string) *SummarizedAttackVector { - s.VectorType = &v - return s -} - -// The counter that describes a DDoS attack. -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/SummarizedCounter -type SummarizedCounter struct { - _ struct{} `type:"structure"` - - // The average value of the counter for a specified time period. - Average *float64 `type:"double"` - - // The maximum value of the counter for a specified time period. - Max *float64 `type:"double"` - - // The number of counters for a specified time period. - N *int64 `type:"integer"` - - // The counter name. - Name *string `type:"string"` - - // The total of counter values for a specified time period. - Sum *float64 `type:"double"` - - // The unit of the counters. - Unit *string `type:"string"` -} - -// String returns the string representation -func (s SummarizedCounter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SummarizedCounter) GoString() string { - return s.String() -} - -// SetAverage sets the Average field's value. -func (s *SummarizedCounter) SetAverage(v float64) *SummarizedCounter { - s.Average = &v - return s -} - -// SetMax sets the Max field's value. -func (s *SummarizedCounter) SetMax(v float64) *SummarizedCounter { - s.Max = &v - return s -} - -// SetN sets the N field's value. -func (s *SummarizedCounter) SetN(v int64) *SummarizedCounter { - s.N = &v - return s -} - -// SetName sets the Name field's value. -func (s *SummarizedCounter) SetName(v string) *SummarizedCounter { - s.Name = &v - return s -} - -// SetSum sets the Sum field's value. -func (s *SummarizedCounter) SetSum(v float64) *SummarizedCounter { - s.Sum = &v - return s -} - -// SetUnit sets the Unit field's value. -func (s *SummarizedCounter) SetUnit(v string) *SummarizedCounter { - s.Unit = &v - return s -} - -// The time range. -// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/TimeRange -type TimeRange struct { - _ struct{} `type:"structure"` - - // The start time, in Unix time in seconds. For more information see timestamp - // (http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#parameter-types). - FromInclusive *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The end time, in Unix time in seconds. For more information see timestamp - // (http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#parameter-types). - ToExclusive *time.Time `type:"timestamp" timestampFormat:"unix"` -} - -// String returns the string representation -func (s TimeRange) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TimeRange) GoString() string { - return s.String() -} - -// SetFromInclusive sets the FromInclusive field's value. -func (s *TimeRange) SetFromInclusive(v time.Time) *TimeRange { - s.FromInclusive = &v - return s -} - -// SetToExclusive sets the ToExclusive field's value. -func (s *TimeRange) SetToExclusive(v time.Time) *TimeRange { - s.ToExclusive = &v - return s -} - -const ( - // AttackLayerNetwork is a AttackLayer enum value - AttackLayerNetwork = "NETWORK" - - // AttackLayerApplication is a AttackLayer enum value - AttackLayerApplication = "APPLICATION" -) - -const ( - // AttackPropertyIdentifierDestinationUrl is a AttackPropertyIdentifier enum value - AttackPropertyIdentifierDestinationUrl = "DESTINATION_URL" - - // AttackPropertyIdentifierReferrer is a AttackPropertyIdentifier enum value - AttackPropertyIdentifierReferrer = "REFERRER" - - // AttackPropertyIdentifierSourceAsn is a AttackPropertyIdentifier enum value - AttackPropertyIdentifierSourceAsn = "SOURCE_ASN" - - // AttackPropertyIdentifierSourceCountry is a AttackPropertyIdentifier enum value - AttackPropertyIdentifierSourceCountry = "SOURCE_COUNTRY" - - // AttackPropertyIdentifierSourceIpAddress is a AttackPropertyIdentifier enum value - AttackPropertyIdentifierSourceIpAddress = "SOURCE_IP_ADDRESS" - - // AttackPropertyIdentifierSourceUserAgent is a AttackPropertyIdentifier enum value - AttackPropertyIdentifierSourceUserAgent = "SOURCE_USER_AGENT" -) - -const ( - // SubResourceTypeIp is a SubResourceType enum value - SubResourceTypeIp = "IP" - - // SubResourceTypeUrl is a SubResourceType enum value - SubResourceTypeUrl = "URL" -) - -const ( - // SubscriptionStateActive is a SubscriptionState enum value - SubscriptionStateActive = "ACTIVE" - - // SubscriptionStateInactive is a SubscriptionState enum value - SubscriptionStateInactive = "INACTIVE" -) - -const ( - // UnitBits is a Unit enum value - UnitBits = "BITS" - - // UnitBytes is a Unit enum value - UnitBytes = "BYTES" - - // UnitPackets is a Unit enum value - UnitPackets = "PACKETS" - - // UnitRequests is a Unit enum value - UnitRequests = "REQUESTS" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/shield/doc.go b/vendor/github.com/aws/aws-sdk-go/service/shield/doc.go deleted file mode 100644 index 6926ced46a3..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/shield/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package shield provides the client and types for making API -// requests to AWS Shield. -// -// This is the AWS Shield Advanced API Reference. This guide is for developers -// who need detailed information about the AWS Shield Advanced API actions, -// data types, and errors. For detailed information about AWS WAF and AWS Shield -// Advanced features and an overview of how to use the AWS WAF and AWS Shield -// Advanced APIs, see the AWS WAF and AWS Shield Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). -// -// See https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02 for more information on this service. -// -// See shield package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/shield/ -// -// Using the Client -// -// To contact AWS Shield with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Shield client Shield for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/shield/#New -package shield diff --git a/vendor/github.com/aws/aws-sdk-go/service/shield/errors.go b/vendor/github.com/aws/aws-sdk-go/service/shield/errors.go deleted file mode 100644 index 3410e89cfea..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/shield/errors.go +++ /dev/null @@ -1,69 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package shield - -const ( - - // ErrCodeInternalErrorException for service response error code - // "InternalErrorException". - // - // Exception that indicates that a problem occurred with the service infrastructure. - // You can retry the request. - ErrCodeInternalErrorException = "InternalErrorException" - - // ErrCodeInvalidOperationException for service response error code - // "InvalidOperationException". - // - // Exception that indicates that the operation would not cause any change to - // occur. - ErrCodeInvalidOperationException = "InvalidOperationException" - - // ErrCodeInvalidParameterException for service response error code - // "InvalidParameterException". - // - // Exception that indicates that the parameters passed to the API are invalid. - ErrCodeInvalidParameterException = "InvalidParameterException" - - // ErrCodeInvalidResourceException for service response error code - // "InvalidResourceException". - // - // Exception that indicates that the resource is invalid. You might not have - // access to the resource, or the resource might not exist. - ErrCodeInvalidResourceException = "InvalidResourceException" - - // ErrCodeLimitsExceededException for service response error code - // "LimitsExceededException". - // - // Exception that indicates that the operation would exceed a limit. - // - // Type is the type of limit that would be exceeded. - // - // Limit is the threshold that would be exceeded. - ErrCodeLimitsExceededException = "LimitsExceededException" - - // ErrCodeLockedSubscriptionException for service response error code - // "LockedSubscriptionException". - // - // Exception that indicates that the subscription you are trying to delete has - // not yet completed the 1-year commitment. You cannot delete this subscription. - ErrCodeLockedSubscriptionException = "LockedSubscriptionException" - - // ErrCodeOptimisticLockException for service response error code - // "OptimisticLockException". - // - // Exception that indicates that the protection state has been modified by another - // client. You can retry the request. - ErrCodeOptimisticLockException = "OptimisticLockException" - - // ErrCodeResourceAlreadyExistsException for service response error code - // "ResourceAlreadyExistsException". - // - // Exception indicating the specified resource already exists. - ErrCodeResourceAlreadyExistsException = "ResourceAlreadyExistsException" - - // ErrCodeResourceNotFoundException for service response error code - // "ResourceNotFoundException". - // - // Exception indicating the specified resource does not exist. - ErrCodeResourceNotFoundException = "ResourceNotFoundException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/shield/service.go b/vendor/github.com/aws/aws-sdk-go/service/shield/service.go deleted file mode 100644 index 31680396c5a..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/shield/service.go +++ /dev/null @@ -1,95 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package shield - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -// Shield provides the API operation methods for making requests to -// AWS Shield. See this package's package overview docs -// for details on the service. -// -// Shield methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type Shield struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "shield" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the Shield client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a Shield client from just a session. -// svc := shield.New(mySession) -// -// // Create a Shield client with additional configuration -// svc := shield.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *Shield { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Shield { - svc := &Shield{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2016-06-02", - JSONVersion: "1.1", - TargetPrefix: "AWSShield_20160616", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a Shield operation and runs any -// custom request initialization. -func (c *Shield) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/swf/api.go b/vendor/github.com/aws/aws-sdk-go/service/swf/api.go deleted file mode 100644 index a69f7b2887c..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/swf/api.go +++ /dev/null @@ -1,15446 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package swf - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -const opCountClosedWorkflowExecutions = "CountClosedWorkflowExecutions" - -// CountClosedWorkflowExecutionsRequest generates a "aws/request.Request" representing the -// client's request for the CountClosedWorkflowExecutions operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CountClosedWorkflowExecutions for more information on using the CountClosedWorkflowExecutions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CountClosedWorkflowExecutionsRequest method. -// req, resp := client.CountClosedWorkflowExecutionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) CountClosedWorkflowExecutionsRequest(input *CountClosedWorkflowExecutionsInput) (req *request.Request, output *WorkflowExecutionCount) { - op := &request.Operation{ - Name: opCountClosedWorkflowExecutions, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CountClosedWorkflowExecutionsInput{} - } - - output = &WorkflowExecutionCount{} - req = c.newRequest(op, input, output) - return -} - -// CountClosedWorkflowExecutions API operation for Amazon Simple Workflow Service. -// -// Returns the number of closed workflow executions within the given domain -// that meet the specified filtering criteria. -// -// This operation is eventually consistent. The results are best effort and -// may not exactly reflect recent updates and changes. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// tagFilter.tag: String constraint. The key is swf:tagFilter.tag. -// -// typeFilter.name: String constraint. The key is swf:typeFilter.name. -// -// typeFilter.version: String constraint. The key is swf:typeFilter.version. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation CountClosedWorkflowExecutions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) CountClosedWorkflowExecutions(input *CountClosedWorkflowExecutionsInput) (*WorkflowExecutionCount, error) { - req, out := c.CountClosedWorkflowExecutionsRequest(input) - return out, req.Send() -} - -// CountClosedWorkflowExecutionsWithContext is the same as CountClosedWorkflowExecutions with the addition of -// the ability to pass a context and additional request options. -// -// See CountClosedWorkflowExecutions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) CountClosedWorkflowExecutionsWithContext(ctx aws.Context, input *CountClosedWorkflowExecutionsInput, opts ...request.Option) (*WorkflowExecutionCount, error) { - req, out := c.CountClosedWorkflowExecutionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCountOpenWorkflowExecutions = "CountOpenWorkflowExecutions" - -// CountOpenWorkflowExecutionsRequest generates a "aws/request.Request" representing the -// client's request for the CountOpenWorkflowExecutions operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CountOpenWorkflowExecutions for more information on using the CountOpenWorkflowExecutions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CountOpenWorkflowExecutionsRequest method. -// req, resp := client.CountOpenWorkflowExecutionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) CountOpenWorkflowExecutionsRequest(input *CountOpenWorkflowExecutionsInput) (req *request.Request, output *WorkflowExecutionCount) { - op := &request.Operation{ - Name: opCountOpenWorkflowExecutions, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CountOpenWorkflowExecutionsInput{} - } - - output = &WorkflowExecutionCount{} - req = c.newRequest(op, input, output) - return -} - -// CountOpenWorkflowExecutions API operation for Amazon Simple Workflow Service. -// -// Returns the number of open workflow executions within the given domain that -// meet the specified filtering criteria. -// -// This operation is eventually consistent. The results are best effort and -// may not exactly reflect recent updates and changes. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// tagFilter.tag: String constraint. The key is swf:tagFilter.tag. -// -// typeFilter.name: String constraint. The key is swf:typeFilter.name. -// -// typeFilter.version: String constraint. The key is swf:typeFilter.version. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation CountOpenWorkflowExecutions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) CountOpenWorkflowExecutions(input *CountOpenWorkflowExecutionsInput) (*WorkflowExecutionCount, error) { - req, out := c.CountOpenWorkflowExecutionsRequest(input) - return out, req.Send() -} - -// CountOpenWorkflowExecutionsWithContext is the same as CountOpenWorkflowExecutions with the addition of -// the ability to pass a context and additional request options. -// -// See CountOpenWorkflowExecutions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) CountOpenWorkflowExecutionsWithContext(ctx aws.Context, input *CountOpenWorkflowExecutionsInput, opts ...request.Option) (*WorkflowExecutionCount, error) { - req, out := c.CountOpenWorkflowExecutionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCountPendingActivityTasks = "CountPendingActivityTasks" - -// CountPendingActivityTasksRequest generates a "aws/request.Request" representing the -// client's request for the CountPendingActivityTasks operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CountPendingActivityTasks for more information on using the CountPendingActivityTasks -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CountPendingActivityTasksRequest method. -// req, resp := client.CountPendingActivityTasksRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) CountPendingActivityTasksRequest(input *CountPendingActivityTasksInput) (req *request.Request, output *PendingTaskCount) { - op := &request.Operation{ - Name: opCountPendingActivityTasks, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CountPendingActivityTasksInput{} - } - - output = &PendingTaskCount{} - req = c.newRequest(op, input, output) - return -} - -// CountPendingActivityTasks API operation for Amazon Simple Workflow Service. -// -// Returns the estimated number of activity tasks in the specified task list. -// The count returned is an approximation and isn't guaranteed to be exact. -// If you specify a task list that no activity task was ever scheduled in then -// 0 is returned. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the taskList.name parameter by using a Condition element with -// the swf:taskList.name key to allow the action to access only certain task -// lists. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation CountPendingActivityTasks for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) CountPendingActivityTasks(input *CountPendingActivityTasksInput) (*PendingTaskCount, error) { - req, out := c.CountPendingActivityTasksRequest(input) - return out, req.Send() -} - -// CountPendingActivityTasksWithContext is the same as CountPendingActivityTasks with the addition of -// the ability to pass a context and additional request options. -// -// See CountPendingActivityTasks for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) CountPendingActivityTasksWithContext(ctx aws.Context, input *CountPendingActivityTasksInput, opts ...request.Option) (*PendingTaskCount, error) { - req, out := c.CountPendingActivityTasksRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCountPendingDecisionTasks = "CountPendingDecisionTasks" - -// CountPendingDecisionTasksRequest generates a "aws/request.Request" representing the -// client's request for the CountPendingDecisionTasks operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CountPendingDecisionTasks for more information on using the CountPendingDecisionTasks -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CountPendingDecisionTasksRequest method. -// req, resp := client.CountPendingDecisionTasksRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) CountPendingDecisionTasksRequest(input *CountPendingDecisionTasksInput) (req *request.Request, output *PendingTaskCount) { - op := &request.Operation{ - Name: opCountPendingDecisionTasks, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CountPendingDecisionTasksInput{} - } - - output = &PendingTaskCount{} - req = c.newRequest(op, input, output) - return -} - -// CountPendingDecisionTasks API operation for Amazon Simple Workflow Service. -// -// Returns the estimated number of decision tasks in the specified task list. -// The count returned is an approximation and isn't guaranteed to be exact. -// If you specify a task list that no decision task was ever scheduled in then -// 0 is returned. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the taskList.name parameter by using a Condition element with -// the swf:taskList.name key to allow the action to access only certain task -// lists. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation CountPendingDecisionTasks for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) CountPendingDecisionTasks(input *CountPendingDecisionTasksInput) (*PendingTaskCount, error) { - req, out := c.CountPendingDecisionTasksRequest(input) - return out, req.Send() -} - -// CountPendingDecisionTasksWithContext is the same as CountPendingDecisionTasks with the addition of -// the ability to pass a context and additional request options. -// -// See CountPendingDecisionTasks for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) CountPendingDecisionTasksWithContext(ctx aws.Context, input *CountPendingDecisionTasksInput, opts ...request.Option) (*PendingTaskCount, error) { - req, out := c.CountPendingDecisionTasksRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeprecateActivityType = "DeprecateActivityType" - -// DeprecateActivityTypeRequest generates a "aws/request.Request" representing the -// client's request for the DeprecateActivityType operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeprecateActivityType for more information on using the DeprecateActivityType -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeprecateActivityTypeRequest method. -// req, resp := client.DeprecateActivityTypeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) DeprecateActivityTypeRequest(input *DeprecateActivityTypeInput) (req *request.Request, output *DeprecateActivityTypeOutput) { - op := &request.Operation{ - Name: opDeprecateActivityType, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeprecateActivityTypeInput{} - } - - output = &DeprecateActivityTypeOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeprecateActivityType API operation for Amazon Simple Workflow Service. -// -// Deprecates the specified activity type. After an activity type has been deprecated, -// you cannot create new tasks of that activity type. Tasks of this type that -// were scheduled before the type was deprecated continue to run. -// -// This operation is eventually consistent. The results are best effort and -// may not exactly reflect recent updates and changes. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// activityType.name: String constraint. The key is swf:activityType.name. -// -// activityType.version: String constraint. The key is swf:activityType.version. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation DeprecateActivityType for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeTypeDeprecatedFault "TypeDeprecatedFault" -// Returned when the specified activity or workflow type was already deprecated. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) DeprecateActivityType(input *DeprecateActivityTypeInput) (*DeprecateActivityTypeOutput, error) { - req, out := c.DeprecateActivityTypeRequest(input) - return out, req.Send() -} - -// DeprecateActivityTypeWithContext is the same as DeprecateActivityType with the addition of -// the ability to pass a context and additional request options. -// -// See DeprecateActivityType for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) DeprecateActivityTypeWithContext(ctx aws.Context, input *DeprecateActivityTypeInput, opts ...request.Option) (*DeprecateActivityTypeOutput, error) { - req, out := c.DeprecateActivityTypeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeprecateDomain = "DeprecateDomain" - -// DeprecateDomainRequest generates a "aws/request.Request" representing the -// client's request for the DeprecateDomain operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeprecateDomain for more information on using the DeprecateDomain -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeprecateDomainRequest method. -// req, resp := client.DeprecateDomainRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) DeprecateDomainRequest(input *DeprecateDomainInput) (req *request.Request, output *DeprecateDomainOutput) { - op := &request.Operation{ - Name: opDeprecateDomain, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeprecateDomainInput{} - } - - output = &DeprecateDomainOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeprecateDomain API operation for Amazon Simple Workflow Service. -// -// Deprecates the specified domain. After a domain has been deprecated it cannot -// be used to create new workflow executions or register new types. However, -// you can still use visibility actions on this domain. Deprecating a domain -// also deprecates all activity and workflow types registered in the domain. -// Executions that were started before the domain was deprecated continues to -// run. -// -// This operation is eventually consistent. The results are best effort and -// may not exactly reflect recent updates and changes. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation DeprecateDomain for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeDomainDeprecatedFault "DomainDeprecatedFault" -// Returned when the specified domain has been deprecated. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) DeprecateDomain(input *DeprecateDomainInput) (*DeprecateDomainOutput, error) { - req, out := c.DeprecateDomainRequest(input) - return out, req.Send() -} - -// DeprecateDomainWithContext is the same as DeprecateDomain with the addition of -// the ability to pass a context and additional request options. -// -// See DeprecateDomain for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) DeprecateDomainWithContext(ctx aws.Context, input *DeprecateDomainInput, opts ...request.Option) (*DeprecateDomainOutput, error) { - req, out := c.DeprecateDomainRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeprecateWorkflowType = "DeprecateWorkflowType" - -// DeprecateWorkflowTypeRequest generates a "aws/request.Request" representing the -// client's request for the DeprecateWorkflowType operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeprecateWorkflowType for more information on using the DeprecateWorkflowType -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeprecateWorkflowTypeRequest method. -// req, resp := client.DeprecateWorkflowTypeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) DeprecateWorkflowTypeRequest(input *DeprecateWorkflowTypeInput) (req *request.Request, output *DeprecateWorkflowTypeOutput) { - op := &request.Operation{ - Name: opDeprecateWorkflowType, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeprecateWorkflowTypeInput{} - } - - output = &DeprecateWorkflowTypeOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeprecateWorkflowType API operation for Amazon Simple Workflow Service. -// -// Deprecates the specified workflow type. After a workflow type has been deprecated, -// you cannot create new executions of that type. Executions that were started -// before the type was deprecated continues to run. A deprecated workflow type -// may still be used when calling visibility actions. -// -// This operation is eventually consistent. The results are best effort and -// may not exactly reflect recent updates and changes. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// workflowType.name: String constraint. The key is swf:workflowType.name. -// -// workflowType.version: String constraint. The key is swf:workflowType.version. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation DeprecateWorkflowType for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeTypeDeprecatedFault "TypeDeprecatedFault" -// Returned when the specified activity or workflow type was already deprecated. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) DeprecateWorkflowType(input *DeprecateWorkflowTypeInput) (*DeprecateWorkflowTypeOutput, error) { - req, out := c.DeprecateWorkflowTypeRequest(input) - return out, req.Send() -} - -// DeprecateWorkflowTypeWithContext is the same as DeprecateWorkflowType with the addition of -// the ability to pass a context and additional request options. -// -// See DeprecateWorkflowType for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) DeprecateWorkflowTypeWithContext(ctx aws.Context, input *DeprecateWorkflowTypeInput, opts ...request.Option) (*DeprecateWorkflowTypeOutput, error) { - req, out := c.DeprecateWorkflowTypeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeActivityType = "DescribeActivityType" - -// DescribeActivityTypeRequest generates a "aws/request.Request" representing the -// client's request for the DescribeActivityType operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeActivityType for more information on using the DescribeActivityType -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeActivityTypeRequest method. -// req, resp := client.DescribeActivityTypeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) DescribeActivityTypeRequest(input *DescribeActivityTypeInput) (req *request.Request, output *DescribeActivityTypeOutput) { - op := &request.Operation{ - Name: opDescribeActivityType, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeActivityTypeInput{} - } - - output = &DescribeActivityTypeOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeActivityType API operation for Amazon Simple Workflow Service. -// -// Returns information about the specified activity type. This includes configuration -// settings provided when the type was registered and other general information -// about the type. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// activityType.name: String constraint. The key is swf:activityType.name. -// -// activityType.version: String constraint. The key is swf:activityType.version. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation DescribeActivityType for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) DescribeActivityType(input *DescribeActivityTypeInput) (*DescribeActivityTypeOutput, error) { - req, out := c.DescribeActivityTypeRequest(input) - return out, req.Send() -} - -// DescribeActivityTypeWithContext is the same as DescribeActivityType with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeActivityType for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) DescribeActivityTypeWithContext(ctx aws.Context, input *DescribeActivityTypeInput, opts ...request.Option) (*DescribeActivityTypeOutput, error) { - req, out := c.DescribeActivityTypeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeDomain = "DescribeDomain" - -// DescribeDomainRequest generates a "aws/request.Request" representing the -// client's request for the DescribeDomain operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeDomain for more information on using the DescribeDomain -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeDomainRequest method. -// req, resp := client.DescribeDomainRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) DescribeDomainRequest(input *DescribeDomainInput) (req *request.Request, output *DescribeDomainOutput) { - op := &request.Operation{ - Name: opDescribeDomain, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeDomainInput{} - } - - output = &DescribeDomainOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeDomain API operation for Amazon Simple Workflow Service. -// -// Returns information about the specified domain, including description and -// status. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation DescribeDomain for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) DescribeDomain(input *DescribeDomainInput) (*DescribeDomainOutput, error) { - req, out := c.DescribeDomainRequest(input) - return out, req.Send() -} - -// DescribeDomainWithContext is the same as DescribeDomain with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeDomain for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) DescribeDomainWithContext(ctx aws.Context, input *DescribeDomainInput, opts ...request.Option) (*DescribeDomainOutput, error) { - req, out := c.DescribeDomainRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeWorkflowExecution = "DescribeWorkflowExecution" - -// DescribeWorkflowExecutionRequest generates a "aws/request.Request" representing the -// client's request for the DescribeWorkflowExecution operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeWorkflowExecution for more information on using the DescribeWorkflowExecution -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeWorkflowExecutionRequest method. -// req, resp := client.DescribeWorkflowExecutionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) DescribeWorkflowExecutionRequest(input *DescribeWorkflowExecutionInput) (req *request.Request, output *DescribeWorkflowExecutionOutput) { - op := &request.Operation{ - Name: opDescribeWorkflowExecution, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeWorkflowExecutionInput{} - } - - output = &DescribeWorkflowExecutionOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeWorkflowExecution API operation for Amazon Simple Workflow Service. -// -// Returns information about the specified workflow execution including its -// type and some statistics. -// -// This operation is eventually consistent. The results are best effort and -// may not exactly reflect recent updates and changes. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation DescribeWorkflowExecution for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) DescribeWorkflowExecution(input *DescribeWorkflowExecutionInput) (*DescribeWorkflowExecutionOutput, error) { - req, out := c.DescribeWorkflowExecutionRequest(input) - return out, req.Send() -} - -// DescribeWorkflowExecutionWithContext is the same as DescribeWorkflowExecution with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeWorkflowExecution for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) DescribeWorkflowExecutionWithContext(ctx aws.Context, input *DescribeWorkflowExecutionInput, opts ...request.Option) (*DescribeWorkflowExecutionOutput, error) { - req, out := c.DescribeWorkflowExecutionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeWorkflowType = "DescribeWorkflowType" - -// DescribeWorkflowTypeRequest generates a "aws/request.Request" representing the -// client's request for the DescribeWorkflowType operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeWorkflowType for more information on using the DescribeWorkflowType -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeWorkflowTypeRequest method. -// req, resp := client.DescribeWorkflowTypeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) DescribeWorkflowTypeRequest(input *DescribeWorkflowTypeInput) (req *request.Request, output *DescribeWorkflowTypeOutput) { - op := &request.Operation{ - Name: opDescribeWorkflowType, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeWorkflowTypeInput{} - } - - output = &DescribeWorkflowTypeOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeWorkflowType API operation for Amazon Simple Workflow Service. -// -// Returns information about the specified workflow type. This includes configuration -// settings specified when the type was registered and other information such -// as creation date, current status, etc. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// workflowType.name: String constraint. The key is swf:workflowType.name. -// -// workflowType.version: String constraint. The key is swf:workflowType.version. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation DescribeWorkflowType for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) DescribeWorkflowType(input *DescribeWorkflowTypeInput) (*DescribeWorkflowTypeOutput, error) { - req, out := c.DescribeWorkflowTypeRequest(input) - return out, req.Send() -} - -// DescribeWorkflowTypeWithContext is the same as DescribeWorkflowType with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeWorkflowType for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) DescribeWorkflowTypeWithContext(ctx aws.Context, input *DescribeWorkflowTypeInput, opts ...request.Option) (*DescribeWorkflowTypeOutput, error) { - req, out := c.DescribeWorkflowTypeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetWorkflowExecutionHistory = "GetWorkflowExecutionHistory" - -// GetWorkflowExecutionHistoryRequest generates a "aws/request.Request" representing the -// client's request for the GetWorkflowExecutionHistory operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetWorkflowExecutionHistory for more information on using the GetWorkflowExecutionHistory -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetWorkflowExecutionHistoryRequest method. -// req, resp := client.GetWorkflowExecutionHistoryRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) GetWorkflowExecutionHistoryRequest(input *GetWorkflowExecutionHistoryInput) (req *request.Request, output *GetWorkflowExecutionHistoryOutput) { - op := &request.Operation{ - Name: opGetWorkflowExecutionHistory, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextPageToken"}, - OutputTokens: []string{"nextPageToken"}, - LimitToken: "maximumPageSize", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetWorkflowExecutionHistoryInput{} - } - - output = &GetWorkflowExecutionHistoryOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetWorkflowExecutionHistory API operation for Amazon Simple Workflow Service. -// -// Returns the history of the specified workflow execution. The results may -// be split into multiple pages. To retrieve subsequent pages, make the call -// again using the nextPageToken returned by the initial call. -// -// This operation is eventually consistent. The results are best effort and -// may not exactly reflect recent updates and changes. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation GetWorkflowExecutionHistory for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) GetWorkflowExecutionHistory(input *GetWorkflowExecutionHistoryInput) (*GetWorkflowExecutionHistoryOutput, error) { - req, out := c.GetWorkflowExecutionHistoryRequest(input) - return out, req.Send() -} - -// GetWorkflowExecutionHistoryWithContext is the same as GetWorkflowExecutionHistory with the addition of -// the ability to pass a context and additional request options. -// -// See GetWorkflowExecutionHistory for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) GetWorkflowExecutionHistoryWithContext(ctx aws.Context, input *GetWorkflowExecutionHistoryInput, opts ...request.Option) (*GetWorkflowExecutionHistoryOutput, error) { - req, out := c.GetWorkflowExecutionHistoryRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetWorkflowExecutionHistoryPages iterates over the pages of a GetWorkflowExecutionHistory operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetWorkflowExecutionHistory method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetWorkflowExecutionHistory operation. -// pageNum := 0 -// err := client.GetWorkflowExecutionHistoryPages(params, -// func(page *GetWorkflowExecutionHistoryOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SWF) GetWorkflowExecutionHistoryPages(input *GetWorkflowExecutionHistoryInput, fn func(*GetWorkflowExecutionHistoryOutput, bool) bool) error { - return c.GetWorkflowExecutionHistoryPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetWorkflowExecutionHistoryPagesWithContext same as GetWorkflowExecutionHistoryPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) GetWorkflowExecutionHistoryPagesWithContext(ctx aws.Context, input *GetWorkflowExecutionHistoryInput, fn func(*GetWorkflowExecutionHistoryOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetWorkflowExecutionHistoryInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetWorkflowExecutionHistoryRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetWorkflowExecutionHistoryOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListActivityTypes = "ListActivityTypes" - -// ListActivityTypesRequest generates a "aws/request.Request" representing the -// client's request for the ListActivityTypes operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListActivityTypes for more information on using the ListActivityTypes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListActivityTypesRequest method. -// req, resp := client.ListActivityTypesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) ListActivityTypesRequest(input *ListActivityTypesInput) (req *request.Request, output *ListActivityTypesOutput) { - op := &request.Operation{ - Name: opListActivityTypes, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextPageToken"}, - OutputTokens: []string{"nextPageToken"}, - LimitToken: "maximumPageSize", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListActivityTypesInput{} - } - - output = &ListActivityTypesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListActivityTypes API operation for Amazon Simple Workflow Service. -// -// Returns information about all activities registered in the specified domain -// that match the specified name and registration status. The result includes -// information like creation date, current status of the activity, etc. The -// results may be split into multiple pages. To retrieve subsequent pages, make -// the call again using the nextPageToken returned by the initial call. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation ListActivityTypes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -func (c *SWF) ListActivityTypes(input *ListActivityTypesInput) (*ListActivityTypesOutput, error) { - req, out := c.ListActivityTypesRequest(input) - return out, req.Send() -} - -// ListActivityTypesWithContext is the same as ListActivityTypes with the addition of -// the ability to pass a context and additional request options. -// -// See ListActivityTypes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) ListActivityTypesWithContext(ctx aws.Context, input *ListActivityTypesInput, opts ...request.Option) (*ListActivityTypesOutput, error) { - req, out := c.ListActivityTypesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListActivityTypesPages iterates over the pages of a ListActivityTypes operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListActivityTypes method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListActivityTypes operation. -// pageNum := 0 -// err := client.ListActivityTypesPages(params, -// func(page *ListActivityTypesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SWF) ListActivityTypesPages(input *ListActivityTypesInput, fn func(*ListActivityTypesOutput, bool) bool) error { - return c.ListActivityTypesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListActivityTypesPagesWithContext same as ListActivityTypesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) ListActivityTypesPagesWithContext(ctx aws.Context, input *ListActivityTypesInput, fn func(*ListActivityTypesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListActivityTypesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListActivityTypesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListActivityTypesOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListClosedWorkflowExecutions = "ListClosedWorkflowExecutions" - -// ListClosedWorkflowExecutionsRequest generates a "aws/request.Request" representing the -// client's request for the ListClosedWorkflowExecutions operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListClosedWorkflowExecutions for more information on using the ListClosedWorkflowExecutions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListClosedWorkflowExecutionsRequest method. -// req, resp := client.ListClosedWorkflowExecutionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) ListClosedWorkflowExecutionsRequest(input *ListClosedWorkflowExecutionsInput) (req *request.Request, output *WorkflowExecutionInfos) { - op := &request.Operation{ - Name: opListClosedWorkflowExecutions, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextPageToken"}, - OutputTokens: []string{"nextPageToken"}, - LimitToken: "maximumPageSize", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListClosedWorkflowExecutionsInput{} - } - - output = &WorkflowExecutionInfos{} - req = c.newRequest(op, input, output) - return -} - -// ListClosedWorkflowExecutions API operation for Amazon Simple Workflow Service. -// -// Returns a list of closed workflow executions in the specified domain that -// meet the filtering criteria. The results may be split into multiple pages. -// To retrieve subsequent pages, make the call again using the nextPageToken -// returned by the initial call. -// -// This operation is eventually consistent. The results are best effort and -// may not exactly reflect recent updates and changes. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// tagFilter.tag: String constraint. The key is swf:tagFilter.tag. -// -// typeFilter.name: String constraint. The key is swf:typeFilter.name. -// -// typeFilter.version: String constraint. The key is swf:typeFilter.version. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation ListClosedWorkflowExecutions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) ListClosedWorkflowExecutions(input *ListClosedWorkflowExecutionsInput) (*WorkflowExecutionInfos, error) { - req, out := c.ListClosedWorkflowExecutionsRequest(input) - return out, req.Send() -} - -// ListClosedWorkflowExecutionsWithContext is the same as ListClosedWorkflowExecutions with the addition of -// the ability to pass a context and additional request options. -// -// See ListClosedWorkflowExecutions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) ListClosedWorkflowExecutionsWithContext(ctx aws.Context, input *ListClosedWorkflowExecutionsInput, opts ...request.Option) (*WorkflowExecutionInfos, error) { - req, out := c.ListClosedWorkflowExecutionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListClosedWorkflowExecutionsPages iterates over the pages of a ListClosedWorkflowExecutions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListClosedWorkflowExecutions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListClosedWorkflowExecutions operation. -// pageNum := 0 -// err := client.ListClosedWorkflowExecutionsPages(params, -// func(page *WorkflowExecutionInfos, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SWF) ListClosedWorkflowExecutionsPages(input *ListClosedWorkflowExecutionsInput, fn func(*WorkflowExecutionInfos, bool) bool) error { - return c.ListClosedWorkflowExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListClosedWorkflowExecutionsPagesWithContext same as ListClosedWorkflowExecutionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) ListClosedWorkflowExecutionsPagesWithContext(ctx aws.Context, input *ListClosedWorkflowExecutionsInput, fn func(*WorkflowExecutionInfos, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListClosedWorkflowExecutionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListClosedWorkflowExecutionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*WorkflowExecutionInfos), !p.HasNextPage()) - } - return p.Err() -} - -const opListDomains = "ListDomains" - -// ListDomainsRequest generates a "aws/request.Request" representing the -// client's request for the ListDomains operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListDomains for more information on using the ListDomains -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListDomainsRequest method. -// req, resp := client.ListDomainsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) ListDomainsRequest(input *ListDomainsInput) (req *request.Request, output *ListDomainsOutput) { - op := &request.Operation{ - Name: opListDomains, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextPageToken"}, - OutputTokens: []string{"nextPageToken"}, - LimitToken: "maximumPageSize", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListDomainsInput{} - } - - output = &ListDomainsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListDomains API operation for Amazon Simple Workflow Service. -// -// Returns the list of domains registered in the account. The results may be -// split into multiple pages. To retrieve subsequent pages, make the call again -// using the nextPageToken returned by the initial call. -// -// This operation is eventually consistent. The results are best effort and -// may not exactly reflect recent updates and changes. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. The element must be set to arn:aws:swf::AccountID:domain/*, -// where AccountID is the account ID, with no dashes. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation ListDomains for usage and error information. -// -// Returned Error Codes: -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) ListDomains(input *ListDomainsInput) (*ListDomainsOutput, error) { - req, out := c.ListDomainsRequest(input) - return out, req.Send() -} - -// ListDomainsWithContext is the same as ListDomains with the addition of -// the ability to pass a context and additional request options. -// -// See ListDomains for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) ListDomainsWithContext(ctx aws.Context, input *ListDomainsInput, opts ...request.Option) (*ListDomainsOutput, error) { - req, out := c.ListDomainsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListDomainsPages iterates over the pages of a ListDomains operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListDomains method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListDomains operation. -// pageNum := 0 -// err := client.ListDomainsPages(params, -// func(page *ListDomainsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SWF) ListDomainsPages(input *ListDomainsInput, fn func(*ListDomainsOutput, bool) bool) error { - return c.ListDomainsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListDomainsPagesWithContext same as ListDomainsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) ListDomainsPagesWithContext(ctx aws.Context, input *ListDomainsInput, fn func(*ListDomainsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListDomainsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListDomainsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDomainsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListOpenWorkflowExecutions = "ListOpenWorkflowExecutions" - -// ListOpenWorkflowExecutionsRequest generates a "aws/request.Request" representing the -// client's request for the ListOpenWorkflowExecutions operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListOpenWorkflowExecutions for more information on using the ListOpenWorkflowExecutions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListOpenWorkflowExecutionsRequest method. -// req, resp := client.ListOpenWorkflowExecutionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) ListOpenWorkflowExecutionsRequest(input *ListOpenWorkflowExecutionsInput) (req *request.Request, output *WorkflowExecutionInfos) { - op := &request.Operation{ - Name: opListOpenWorkflowExecutions, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextPageToken"}, - OutputTokens: []string{"nextPageToken"}, - LimitToken: "maximumPageSize", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListOpenWorkflowExecutionsInput{} - } - - output = &WorkflowExecutionInfos{} - req = c.newRequest(op, input, output) - return -} - -// ListOpenWorkflowExecutions API operation for Amazon Simple Workflow Service. -// -// Returns a list of open workflow executions in the specified domain that meet -// the filtering criteria. The results may be split into multiple pages. To -// retrieve subsequent pages, make the call again using the nextPageToken returned -// by the initial call. -// -// This operation is eventually consistent. The results are best effort and -// may not exactly reflect recent updates and changes. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// tagFilter.tag: String constraint. The key is swf:tagFilter.tag. -// -// typeFilter.name: String constraint. The key is swf:typeFilter.name. -// -// typeFilter.version: String constraint. The key is swf:typeFilter.version. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation ListOpenWorkflowExecutions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) ListOpenWorkflowExecutions(input *ListOpenWorkflowExecutionsInput) (*WorkflowExecutionInfos, error) { - req, out := c.ListOpenWorkflowExecutionsRequest(input) - return out, req.Send() -} - -// ListOpenWorkflowExecutionsWithContext is the same as ListOpenWorkflowExecutions with the addition of -// the ability to pass a context and additional request options. -// -// See ListOpenWorkflowExecutions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) ListOpenWorkflowExecutionsWithContext(ctx aws.Context, input *ListOpenWorkflowExecutionsInput, opts ...request.Option) (*WorkflowExecutionInfos, error) { - req, out := c.ListOpenWorkflowExecutionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListOpenWorkflowExecutionsPages iterates over the pages of a ListOpenWorkflowExecutions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListOpenWorkflowExecutions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListOpenWorkflowExecutions operation. -// pageNum := 0 -// err := client.ListOpenWorkflowExecutionsPages(params, -// func(page *WorkflowExecutionInfos, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SWF) ListOpenWorkflowExecutionsPages(input *ListOpenWorkflowExecutionsInput, fn func(*WorkflowExecutionInfos, bool) bool) error { - return c.ListOpenWorkflowExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListOpenWorkflowExecutionsPagesWithContext same as ListOpenWorkflowExecutionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) ListOpenWorkflowExecutionsPagesWithContext(ctx aws.Context, input *ListOpenWorkflowExecutionsInput, fn func(*WorkflowExecutionInfos, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListOpenWorkflowExecutionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListOpenWorkflowExecutionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*WorkflowExecutionInfos), !p.HasNextPage()) - } - return p.Err() -} - -const opListWorkflowTypes = "ListWorkflowTypes" - -// ListWorkflowTypesRequest generates a "aws/request.Request" representing the -// client's request for the ListWorkflowTypes operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListWorkflowTypes for more information on using the ListWorkflowTypes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListWorkflowTypesRequest method. -// req, resp := client.ListWorkflowTypesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) ListWorkflowTypesRequest(input *ListWorkflowTypesInput) (req *request.Request, output *ListWorkflowTypesOutput) { - op := &request.Operation{ - Name: opListWorkflowTypes, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextPageToken"}, - OutputTokens: []string{"nextPageToken"}, - LimitToken: "maximumPageSize", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListWorkflowTypesInput{} - } - - output = &ListWorkflowTypesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListWorkflowTypes API operation for Amazon Simple Workflow Service. -// -// Returns information about workflow types in the specified domain. The results -// may be split into multiple pages that can be retrieved by making the call -// repeatedly. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation ListWorkflowTypes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -func (c *SWF) ListWorkflowTypes(input *ListWorkflowTypesInput) (*ListWorkflowTypesOutput, error) { - req, out := c.ListWorkflowTypesRequest(input) - return out, req.Send() -} - -// ListWorkflowTypesWithContext is the same as ListWorkflowTypes with the addition of -// the ability to pass a context and additional request options. -// -// See ListWorkflowTypes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) ListWorkflowTypesWithContext(ctx aws.Context, input *ListWorkflowTypesInput, opts ...request.Option) (*ListWorkflowTypesOutput, error) { - req, out := c.ListWorkflowTypesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListWorkflowTypesPages iterates over the pages of a ListWorkflowTypes operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListWorkflowTypes method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListWorkflowTypes operation. -// pageNum := 0 -// err := client.ListWorkflowTypesPages(params, -// func(page *ListWorkflowTypesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SWF) ListWorkflowTypesPages(input *ListWorkflowTypesInput, fn func(*ListWorkflowTypesOutput, bool) bool) error { - return c.ListWorkflowTypesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListWorkflowTypesPagesWithContext same as ListWorkflowTypesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) ListWorkflowTypesPagesWithContext(ctx aws.Context, input *ListWorkflowTypesInput, fn func(*ListWorkflowTypesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListWorkflowTypesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListWorkflowTypesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListWorkflowTypesOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opPollForActivityTask = "PollForActivityTask" - -// PollForActivityTaskRequest generates a "aws/request.Request" representing the -// client's request for the PollForActivityTask operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PollForActivityTask for more information on using the PollForActivityTask -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PollForActivityTaskRequest method. -// req, resp := client.PollForActivityTaskRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) PollForActivityTaskRequest(input *PollForActivityTaskInput) (req *request.Request, output *PollForActivityTaskOutput) { - op := &request.Operation{ - Name: opPollForActivityTask, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PollForActivityTaskInput{} - } - - output = &PollForActivityTaskOutput{} - req = c.newRequest(op, input, output) - return -} - -// PollForActivityTask API operation for Amazon Simple Workflow Service. -// -// Used by workers to get an ActivityTask from the specified activity taskList. -// This initiates a long poll, where the service holds the HTTP connection open -// and responds as soon as a task becomes available. The maximum time the service -// holds on to the request before responding is 60 seconds. If no task is available -// within 60 seconds, the poll returns an empty result. An empty result, in -// this context, means that an ActivityTask is returned, but that the value -// of taskToken is an empty string. If a task is returned, the worker should -// use its type to identify and process it correctly. -// -// Workers should set their client side socket timeout to at least 70 seconds -// (10 seconds higher than the maximum time service may hold the poll request). -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the taskList.name parameter by using a Condition element with -// the swf:taskList.name key to allow the action to access only certain task -// lists. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation PollForActivityTask for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -// * ErrCodeLimitExceededFault "LimitExceededFault" -// Returned by any operation if a system imposed limitation has been reached. -// To address this fault you should either clean up unused resources or increase -// the limit by contacting AWS. -// -func (c *SWF) PollForActivityTask(input *PollForActivityTaskInput) (*PollForActivityTaskOutput, error) { - req, out := c.PollForActivityTaskRequest(input) - return out, req.Send() -} - -// PollForActivityTaskWithContext is the same as PollForActivityTask with the addition of -// the ability to pass a context and additional request options. -// -// See PollForActivityTask for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) PollForActivityTaskWithContext(ctx aws.Context, input *PollForActivityTaskInput, opts ...request.Option) (*PollForActivityTaskOutput, error) { - req, out := c.PollForActivityTaskRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPollForDecisionTask = "PollForDecisionTask" - -// PollForDecisionTaskRequest generates a "aws/request.Request" representing the -// client's request for the PollForDecisionTask operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PollForDecisionTask for more information on using the PollForDecisionTask -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PollForDecisionTaskRequest method. -// req, resp := client.PollForDecisionTaskRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) PollForDecisionTaskRequest(input *PollForDecisionTaskInput) (req *request.Request, output *PollForDecisionTaskOutput) { - op := &request.Operation{ - Name: opPollForDecisionTask, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextPageToken"}, - OutputTokens: []string{"nextPageToken"}, - LimitToken: "maximumPageSize", - TruncationToken: "", - }, - } - - if input == nil { - input = &PollForDecisionTaskInput{} - } - - output = &PollForDecisionTaskOutput{} - req = c.newRequest(op, input, output) - return -} - -// PollForDecisionTask API operation for Amazon Simple Workflow Service. -// -// Used by deciders to get a DecisionTask from the specified decision taskList. -// A decision task may be returned for any open workflow execution that is using -// the specified task list. The task includes a paginated view of the history -// of the workflow execution. The decider should use the workflow type and the -// history to determine how to properly handle the task. -// -// This action initiates a long poll, where the service holds the HTTP connection -// open and responds as soon a task becomes available. If no decision task is -// available in the specified task list before the timeout of 60 seconds expires, -// an empty result is returned. An empty result, in this context, means that -// a DecisionTask is returned, but that the value of taskToken is an empty string. -// -// Deciders should set their client side socket timeout to at least 70 seconds -// (10 seconds higher than the timeout). -// -// Because the number of workflow history events for a single workflow execution -// might be very large, the result returned might be split up across a number -// of pages. To retrieve subsequent pages, make additional calls to PollForDecisionTask -// using the nextPageToken returned by the initial call. Note that you do not -// call GetWorkflowExecutionHistory with this nextPageToken. Instead, call PollForDecisionTask -// again. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the taskList.name parameter by using a Condition element with -// the swf:taskList.name key to allow the action to access only certain task -// lists. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation PollForDecisionTask for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -// * ErrCodeLimitExceededFault "LimitExceededFault" -// Returned by any operation if a system imposed limitation has been reached. -// To address this fault you should either clean up unused resources or increase -// the limit by contacting AWS. -// -func (c *SWF) PollForDecisionTask(input *PollForDecisionTaskInput) (*PollForDecisionTaskOutput, error) { - req, out := c.PollForDecisionTaskRequest(input) - return out, req.Send() -} - -// PollForDecisionTaskWithContext is the same as PollForDecisionTask with the addition of -// the ability to pass a context and additional request options. -// -// See PollForDecisionTask for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) PollForDecisionTaskWithContext(ctx aws.Context, input *PollForDecisionTaskInput, opts ...request.Option) (*PollForDecisionTaskOutput, error) { - req, out := c.PollForDecisionTaskRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// PollForDecisionTaskPages iterates over the pages of a PollForDecisionTask operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See PollForDecisionTask method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a PollForDecisionTask operation. -// pageNum := 0 -// err := client.PollForDecisionTaskPages(params, -// func(page *PollForDecisionTaskOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SWF) PollForDecisionTaskPages(input *PollForDecisionTaskInput, fn func(*PollForDecisionTaskOutput, bool) bool) error { - return c.PollForDecisionTaskPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// PollForDecisionTaskPagesWithContext same as PollForDecisionTaskPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) PollForDecisionTaskPagesWithContext(ctx aws.Context, input *PollForDecisionTaskInput, fn func(*PollForDecisionTaskOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *PollForDecisionTaskInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.PollForDecisionTaskRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*PollForDecisionTaskOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opRecordActivityTaskHeartbeat = "RecordActivityTaskHeartbeat" - -// RecordActivityTaskHeartbeatRequest generates a "aws/request.Request" representing the -// client's request for the RecordActivityTaskHeartbeat operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RecordActivityTaskHeartbeat for more information on using the RecordActivityTaskHeartbeat -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RecordActivityTaskHeartbeatRequest method. -// req, resp := client.RecordActivityTaskHeartbeatRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) RecordActivityTaskHeartbeatRequest(input *RecordActivityTaskHeartbeatInput) (req *request.Request, output *RecordActivityTaskHeartbeatOutput) { - op := &request.Operation{ - Name: opRecordActivityTaskHeartbeat, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RecordActivityTaskHeartbeatInput{} - } - - output = &RecordActivityTaskHeartbeatOutput{} - req = c.newRequest(op, input, output) - return -} - -// RecordActivityTaskHeartbeat API operation for Amazon Simple Workflow Service. -// -// Used by activity workers to report to the service that the ActivityTask represented -// by the specified taskToken is still making progress. The worker can also -// specify details of the progress, for example percent complete, using the -// details parameter. This action can also be used by the worker as a mechanism -// to check if cancellation is being requested for the activity task. If a cancellation -// is being attempted for the specified task, then the boolean cancelRequested -// flag returned by the service is set to true. -// -// This action resets the taskHeartbeatTimeout clock. The taskHeartbeatTimeout -// is specified in RegisterActivityType. -// -// This action doesn't in itself create an event in the workflow execution history. -// However, if the task times out, the workflow execution history contains a -// ActivityTaskTimedOut event that contains the information from the last heartbeat -// generated by the activity worker. -// -// The taskStartToCloseTimeout of an activity type is the maximum duration of -// an activity task, regardless of the number of RecordActivityTaskHeartbeat -// requests received. The taskStartToCloseTimeout is also specified in RegisterActivityType. -// -// This operation is only useful for long-lived activities to report liveliness -// of the task and to determine if a cancellation is being attempted. -// -// If the cancelRequested flag returns true, a cancellation is being attempted. -// If the worker can cancel the activity, it should respond with RespondActivityTaskCanceled. -// Otherwise, it should ignore the cancellation request. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation RecordActivityTaskHeartbeat for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) RecordActivityTaskHeartbeat(input *RecordActivityTaskHeartbeatInput) (*RecordActivityTaskHeartbeatOutput, error) { - req, out := c.RecordActivityTaskHeartbeatRequest(input) - return out, req.Send() -} - -// RecordActivityTaskHeartbeatWithContext is the same as RecordActivityTaskHeartbeat with the addition of -// the ability to pass a context and additional request options. -// -// See RecordActivityTaskHeartbeat for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) RecordActivityTaskHeartbeatWithContext(ctx aws.Context, input *RecordActivityTaskHeartbeatInput, opts ...request.Option) (*RecordActivityTaskHeartbeatOutput, error) { - req, out := c.RecordActivityTaskHeartbeatRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRegisterActivityType = "RegisterActivityType" - -// RegisterActivityTypeRequest generates a "aws/request.Request" representing the -// client's request for the RegisterActivityType operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RegisterActivityType for more information on using the RegisterActivityType -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RegisterActivityTypeRequest method. -// req, resp := client.RegisterActivityTypeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) RegisterActivityTypeRequest(input *RegisterActivityTypeInput) (req *request.Request, output *RegisterActivityTypeOutput) { - op := &request.Operation{ - Name: opRegisterActivityType, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RegisterActivityTypeInput{} - } - - output = &RegisterActivityTypeOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// RegisterActivityType API operation for Amazon Simple Workflow Service. -// -// Registers a new activity type along with its configuration settings in the -// specified domain. -// -// A TypeAlreadyExists fault is returned if the type already exists in the domain. -// You cannot change any configuration settings of the type after its registration, -// and it must be registered as a new version. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name. -// -// name: String constraint. The key is swf:name. -// -// version: String constraint. The key is swf:version. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation RegisterActivityType for usage and error information. -// -// Returned Error Codes: -// * ErrCodeTypeAlreadyExistsFault "TypeAlreadyExistsFault" -// Returned if the type already exists in the specified domain. You get this -// fault even if the existing type is in deprecated status. You can specify -// another version if the intent is to create a new distinct version of the -// type. -// -// * ErrCodeLimitExceededFault "LimitExceededFault" -// Returned by any operation if a system imposed limitation has been reached. -// To address this fault you should either clean up unused resources or increase -// the limit by contacting AWS. -// -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) RegisterActivityType(input *RegisterActivityTypeInput) (*RegisterActivityTypeOutput, error) { - req, out := c.RegisterActivityTypeRequest(input) - return out, req.Send() -} - -// RegisterActivityTypeWithContext is the same as RegisterActivityType with the addition of -// the ability to pass a context and additional request options. -// -// See RegisterActivityType for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) RegisterActivityTypeWithContext(ctx aws.Context, input *RegisterActivityTypeInput, opts ...request.Option) (*RegisterActivityTypeOutput, error) { - req, out := c.RegisterActivityTypeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRegisterDomain = "RegisterDomain" - -// RegisterDomainRequest generates a "aws/request.Request" representing the -// client's request for the RegisterDomain operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RegisterDomain for more information on using the RegisterDomain -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RegisterDomainRequest method. -// req, resp := client.RegisterDomainRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) RegisterDomainRequest(input *RegisterDomainInput) (req *request.Request, output *RegisterDomainOutput) { - op := &request.Operation{ - Name: opRegisterDomain, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RegisterDomainInput{} - } - - output = &RegisterDomainOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// RegisterDomain API operation for Amazon Simple Workflow Service. -// -// Registers a new domain. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * You cannot use an IAM policy to control domain access for this action. -// The name of the domain being registered is available as the resource of -// this action. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation RegisterDomain for usage and error information. -// -// Returned Error Codes: -// * ErrCodeDomainAlreadyExistsFault "DomainAlreadyExistsFault" -// Returned if the specified domain already exists. You get this fault even -// if the existing domain is in deprecated status. -// -// * ErrCodeLimitExceededFault "LimitExceededFault" -// Returned by any operation if a system imposed limitation has been reached. -// To address this fault you should either clean up unused resources or increase -// the limit by contacting AWS. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) RegisterDomain(input *RegisterDomainInput) (*RegisterDomainOutput, error) { - req, out := c.RegisterDomainRequest(input) - return out, req.Send() -} - -// RegisterDomainWithContext is the same as RegisterDomain with the addition of -// the ability to pass a context and additional request options. -// -// See RegisterDomain for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) RegisterDomainWithContext(ctx aws.Context, input *RegisterDomainInput, opts ...request.Option) (*RegisterDomainOutput, error) { - req, out := c.RegisterDomainRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRegisterWorkflowType = "RegisterWorkflowType" - -// RegisterWorkflowTypeRequest generates a "aws/request.Request" representing the -// client's request for the RegisterWorkflowType operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RegisterWorkflowType for more information on using the RegisterWorkflowType -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RegisterWorkflowTypeRequest method. -// req, resp := client.RegisterWorkflowTypeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) RegisterWorkflowTypeRequest(input *RegisterWorkflowTypeInput) (req *request.Request, output *RegisterWorkflowTypeOutput) { - op := &request.Operation{ - Name: opRegisterWorkflowType, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RegisterWorkflowTypeInput{} - } - - output = &RegisterWorkflowTypeOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// RegisterWorkflowType API operation for Amazon Simple Workflow Service. -// -// Registers a new workflow type and its configuration settings in the specified -// domain. -// -// The retention period for the workflow history is set by the RegisterDomain -// action. -// -// If the type already exists, then a TypeAlreadyExists fault is returned. You -// cannot change the configuration settings of a workflow type once it is registered -// and it must be registered as a new version. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name. -// -// name: String constraint. The key is swf:name. -// -// version: String constraint. The key is swf:version. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation RegisterWorkflowType for usage and error information. -// -// Returned Error Codes: -// * ErrCodeTypeAlreadyExistsFault "TypeAlreadyExistsFault" -// Returned if the type already exists in the specified domain. You get this -// fault even if the existing type is in deprecated status. You can specify -// another version if the intent is to create a new distinct version of the -// type. -// -// * ErrCodeLimitExceededFault "LimitExceededFault" -// Returned by any operation if a system imposed limitation has been reached. -// To address this fault you should either clean up unused resources or increase -// the limit by contacting AWS. -// -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) RegisterWorkflowType(input *RegisterWorkflowTypeInput) (*RegisterWorkflowTypeOutput, error) { - req, out := c.RegisterWorkflowTypeRequest(input) - return out, req.Send() -} - -// RegisterWorkflowTypeWithContext is the same as RegisterWorkflowType with the addition of -// the ability to pass a context and additional request options. -// -// See RegisterWorkflowType for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) RegisterWorkflowTypeWithContext(ctx aws.Context, input *RegisterWorkflowTypeInput, opts ...request.Option) (*RegisterWorkflowTypeOutput, error) { - req, out := c.RegisterWorkflowTypeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRequestCancelWorkflowExecution = "RequestCancelWorkflowExecution" - -// RequestCancelWorkflowExecutionRequest generates a "aws/request.Request" representing the -// client's request for the RequestCancelWorkflowExecution operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RequestCancelWorkflowExecution for more information on using the RequestCancelWorkflowExecution -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RequestCancelWorkflowExecutionRequest method. -// req, resp := client.RequestCancelWorkflowExecutionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) RequestCancelWorkflowExecutionRequest(input *RequestCancelWorkflowExecutionInput) (req *request.Request, output *RequestCancelWorkflowExecutionOutput) { - op := &request.Operation{ - Name: opRequestCancelWorkflowExecution, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RequestCancelWorkflowExecutionInput{} - } - - output = &RequestCancelWorkflowExecutionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// RequestCancelWorkflowExecution API operation for Amazon Simple Workflow Service. -// -// Records a WorkflowExecutionCancelRequested event in the currently running -// workflow execution identified by the given domain, workflowId, and runId. -// This logically requests the cancellation of the workflow execution as a whole. -// It is up to the decider to take appropriate actions when it receives an execution -// history with this event. -// -// If the runId isn't specified, the WorkflowExecutionCancelRequested event -// is recorded in the history of the current open workflow execution with the -// specified workflowId in the domain. -// -// Because this action allows the workflow to properly clean up and gracefully -// close, it should be used instead of TerminateWorkflowExecution when possible. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation RequestCancelWorkflowExecution for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) RequestCancelWorkflowExecution(input *RequestCancelWorkflowExecutionInput) (*RequestCancelWorkflowExecutionOutput, error) { - req, out := c.RequestCancelWorkflowExecutionRequest(input) - return out, req.Send() -} - -// RequestCancelWorkflowExecutionWithContext is the same as RequestCancelWorkflowExecution with the addition of -// the ability to pass a context and additional request options. -// -// See RequestCancelWorkflowExecution for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) RequestCancelWorkflowExecutionWithContext(ctx aws.Context, input *RequestCancelWorkflowExecutionInput, opts ...request.Option) (*RequestCancelWorkflowExecutionOutput, error) { - req, out := c.RequestCancelWorkflowExecutionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRespondActivityTaskCanceled = "RespondActivityTaskCanceled" - -// RespondActivityTaskCanceledRequest generates a "aws/request.Request" representing the -// client's request for the RespondActivityTaskCanceled operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RespondActivityTaskCanceled for more information on using the RespondActivityTaskCanceled -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RespondActivityTaskCanceledRequest method. -// req, resp := client.RespondActivityTaskCanceledRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) RespondActivityTaskCanceledRequest(input *RespondActivityTaskCanceledInput) (req *request.Request, output *RespondActivityTaskCanceledOutput) { - op := &request.Operation{ - Name: opRespondActivityTaskCanceled, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RespondActivityTaskCanceledInput{} - } - - output = &RespondActivityTaskCanceledOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// RespondActivityTaskCanceled API operation for Amazon Simple Workflow Service. -// -// Used by workers to tell the service that the ActivityTask identified by the -// taskToken was successfully canceled. Additional details can be provided using -// the details argument. -// -// These details (if provided) appear in the ActivityTaskCanceled event added -// to the workflow history. -// -// Only use this operation if the canceled flag of a RecordActivityTaskHeartbeat -// request returns true and if the activity can be safely undone or abandoned. -// -// A task is considered open from the time that it is scheduled until it is -// closed. Therefore a task is reported as open while a worker is processing -// it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, -// RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed -// out (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation RespondActivityTaskCanceled for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) RespondActivityTaskCanceled(input *RespondActivityTaskCanceledInput) (*RespondActivityTaskCanceledOutput, error) { - req, out := c.RespondActivityTaskCanceledRequest(input) - return out, req.Send() -} - -// RespondActivityTaskCanceledWithContext is the same as RespondActivityTaskCanceled with the addition of -// the ability to pass a context and additional request options. -// -// See RespondActivityTaskCanceled for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) RespondActivityTaskCanceledWithContext(ctx aws.Context, input *RespondActivityTaskCanceledInput, opts ...request.Option) (*RespondActivityTaskCanceledOutput, error) { - req, out := c.RespondActivityTaskCanceledRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRespondActivityTaskCompleted = "RespondActivityTaskCompleted" - -// RespondActivityTaskCompletedRequest generates a "aws/request.Request" representing the -// client's request for the RespondActivityTaskCompleted operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RespondActivityTaskCompleted for more information on using the RespondActivityTaskCompleted -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RespondActivityTaskCompletedRequest method. -// req, resp := client.RespondActivityTaskCompletedRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) RespondActivityTaskCompletedRequest(input *RespondActivityTaskCompletedInput) (req *request.Request, output *RespondActivityTaskCompletedOutput) { - op := &request.Operation{ - Name: opRespondActivityTaskCompleted, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RespondActivityTaskCompletedInput{} - } - - output = &RespondActivityTaskCompletedOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// RespondActivityTaskCompleted API operation for Amazon Simple Workflow Service. -// -// Used by workers to tell the service that the ActivityTask identified by the -// taskToken completed successfully with a result (if provided). The result -// appears in the ActivityTaskCompleted event in the workflow history. -// -// If the requested task doesn't complete successfully, use RespondActivityTaskFailed -// instead. If the worker finds that the task is canceled through the canceled -// flag returned by RecordActivityTaskHeartbeat, it should cancel the task, -// clean up and then call RespondActivityTaskCanceled. -// -// A task is considered open from the time that it is scheduled until it is -// closed. Therefore a task is reported as open while a worker is processing -// it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, -// RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed -// out (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation RespondActivityTaskCompleted for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) RespondActivityTaskCompleted(input *RespondActivityTaskCompletedInput) (*RespondActivityTaskCompletedOutput, error) { - req, out := c.RespondActivityTaskCompletedRequest(input) - return out, req.Send() -} - -// RespondActivityTaskCompletedWithContext is the same as RespondActivityTaskCompleted with the addition of -// the ability to pass a context and additional request options. -// -// See RespondActivityTaskCompleted for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) RespondActivityTaskCompletedWithContext(ctx aws.Context, input *RespondActivityTaskCompletedInput, opts ...request.Option) (*RespondActivityTaskCompletedOutput, error) { - req, out := c.RespondActivityTaskCompletedRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRespondActivityTaskFailed = "RespondActivityTaskFailed" - -// RespondActivityTaskFailedRequest generates a "aws/request.Request" representing the -// client's request for the RespondActivityTaskFailed operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RespondActivityTaskFailed for more information on using the RespondActivityTaskFailed -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RespondActivityTaskFailedRequest method. -// req, resp := client.RespondActivityTaskFailedRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) RespondActivityTaskFailedRequest(input *RespondActivityTaskFailedInput) (req *request.Request, output *RespondActivityTaskFailedOutput) { - op := &request.Operation{ - Name: opRespondActivityTaskFailed, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RespondActivityTaskFailedInput{} - } - - output = &RespondActivityTaskFailedOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// RespondActivityTaskFailed API operation for Amazon Simple Workflow Service. -// -// Used by workers to tell the service that the ActivityTask identified by the -// taskToken has failed with reason (if specified). The reason and details appear -// in the ActivityTaskFailed event added to the workflow history. -// -// A task is considered open from the time that it is scheduled until it is -// closed. Therefore a task is reported as open while a worker is processing -// it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, -// RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed -// out (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation RespondActivityTaskFailed for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) RespondActivityTaskFailed(input *RespondActivityTaskFailedInput) (*RespondActivityTaskFailedOutput, error) { - req, out := c.RespondActivityTaskFailedRequest(input) - return out, req.Send() -} - -// RespondActivityTaskFailedWithContext is the same as RespondActivityTaskFailed with the addition of -// the ability to pass a context and additional request options. -// -// See RespondActivityTaskFailed for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) RespondActivityTaskFailedWithContext(ctx aws.Context, input *RespondActivityTaskFailedInput, opts ...request.Option) (*RespondActivityTaskFailedOutput, error) { - req, out := c.RespondActivityTaskFailedRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRespondDecisionTaskCompleted = "RespondDecisionTaskCompleted" - -// RespondDecisionTaskCompletedRequest generates a "aws/request.Request" representing the -// client's request for the RespondDecisionTaskCompleted operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RespondDecisionTaskCompleted for more information on using the RespondDecisionTaskCompleted -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RespondDecisionTaskCompletedRequest method. -// req, resp := client.RespondDecisionTaskCompletedRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) RespondDecisionTaskCompletedRequest(input *RespondDecisionTaskCompletedInput) (req *request.Request, output *RespondDecisionTaskCompletedOutput) { - op := &request.Operation{ - Name: opRespondDecisionTaskCompleted, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RespondDecisionTaskCompletedInput{} - } - - output = &RespondDecisionTaskCompletedOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// RespondDecisionTaskCompleted API operation for Amazon Simple Workflow Service. -// -// Used by deciders to tell the service that the DecisionTask identified by -// the taskToken has successfully completed. The decisions argument specifies -// the list of decisions made while processing the task. -// -// A DecisionTaskCompleted event is added to the workflow history. The executionContext -// specified is attached to the event in the workflow execution history. -// -// Access Control -// -// If an IAM policy grants permission to use RespondDecisionTaskCompleted, it -// can express permissions for the list of decisions in the decisions parameter. -// Each of the decisions has one or more parameters, much like a regular API -// call. To allow for policies to be as readable as possible, you can express -// permissions on decisions as if they were actual API calls, including applying -// conditions to some parameters. For more information, see Using IAM to Manage -// Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation RespondDecisionTaskCompleted for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) RespondDecisionTaskCompleted(input *RespondDecisionTaskCompletedInput) (*RespondDecisionTaskCompletedOutput, error) { - req, out := c.RespondDecisionTaskCompletedRequest(input) - return out, req.Send() -} - -// RespondDecisionTaskCompletedWithContext is the same as RespondDecisionTaskCompleted with the addition of -// the ability to pass a context and additional request options. -// -// See RespondDecisionTaskCompleted for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) RespondDecisionTaskCompletedWithContext(ctx aws.Context, input *RespondDecisionTaskCompletedInput, opts ...request.Option) (*RespondDecisionTaskCompletedOutput, error) { - req, out := c.RespondDecisionTaskCompletedRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSignalWorkflowExecution = "SignalWorkflowExecution" - -// SignalWorkflowExecutionRequest generates a "aws/request.Request" representing the -// client's request for the SignalWorkflowExecution operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See SignalWorkflowExecution for more information on using the SignalWorkflowExecution -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SignalWorkflowExecutionRequest method. -// req, resp := client.SignalWorkflowExecutionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) SignalWorkflowExecutionRequest(input *SignalWorkflowExecutionInput) (req *request.Request, output *SignalWorkflowExecutionOutput) { - op := &request.Operation{ - Name: opSignalWorkflowExecution, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &SignalWorkflowExecutionInput{} - } - - output = &SignalWorkflowExecutionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// SignalWorkflowExecution API operation for Amazon Simple Workflow Service. -// -// Records a WorkflowExecutionSignaled event in the workflow execution history -// and creates a decision task for the workflow execution identified by the -// given domain, workflowId and runId. The event is recorded with the specified -// user defined signalName and input (if provided). -// -// If a runId isn't specified, then the WorkflowExecutionSignaled event is recorded -// in the history of the current open workflow with the matching workflowId -// in the domain. -// -// If the specified workflow execution isn't open, this method fails with UnknownResource. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation SignalWorkflowExecution for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) SignalWorkflowExecution(input *SignalWorkflowExecutionInput) (*SignalWorkflowExecutionOutput, error) { - req, out := c.SignalWorkflowExecutionRequest(input) - return out, req.Send() -} - -// SignalWorkflowExecutionWithContext is the same as SignalWorkflowExecution with the addition of -// the ability to pass a context and additional request options. -// -// See SignalWorkflowExecution for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) SignalWorkflowExecutionWithContext(ctx aws.Context, input *SignalWorkflowExecutionInput, opts ...request.Option) (*SignalWorkflowExecutionOutput, error) { - req, out := c.SignalWorkflowExecutionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStartWorkflowExecution = "StartWorkflowExecution" - -// StartWorkflowExecutionRequest generates a "aws/request.Request" representing the -// client's request for the StartWorkflowExecution operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StartWorkflowExecution for more information on using the StartWorkflowExecution -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StartWorkflowExecutionRequest method. -// req, resp := client.StartWorkflowExecutionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) StartWorkflowExecutionRequest(input *StartWorkflowExecutionInput) (req *request.Request, output *StartWorkflowExecutionOutput) { - op := &request.Operation{ - Name: opStartWorkflowExecution, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StartWorkflowExecutionInput{} - } - - output = &StartWorkflowExecutionOutput{} - req = c.newRequest(op, input, output) - return -} - -// StartWorkflowExecution API operation for Amazon Simple Workflow Service. -// -// Starts an execution of the workflow type in the specified domain using the -// provided workflowId and input data. -// -// This action returns the newly started workflow execution. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// tagList.member.0: The key is swf:tagList.member.0. -// -// tagList.member.1: The key is swf:tagList.member.1. -// -// tagList.member.2: The key is swf:tagList.member.2. -// -// tagList.member.3: The key is swf:tagList.member.3. -// -// tagList.member.4: The key is swf:tagList.member.4. -// -// taskList: String constraint. The key is swf:taskList.name. -// -// workflowType.name: String constraint. The key is swf:workflowType.name. -// -// workflowType.version: String constraint. The key is swf:workflowType.version. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation StartWorkflowExecution for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeTypeDeprecatedFault "TypeDeprecatedFault" -// Returned when the specified activity or workflow type was already deprecated. -// -// * ErrCodeWorkflowExecutionAlreadyStartedFault "WorkflowExecutionAlreadyStartedFault" -// Returned by StartWorkflowExecution when an open execution with the same workflowId -// is already running in the specified domain. -// -// * ErrCodeLimitExceededFault "LimitExceededFault" -// Returned by any operation if a system imposed limitation has been reached. -// To address this fault you should either clean up unused resources or increase -// the limit by contacting AWS. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -// * ErrCodeDefaultUndefinedFault "DefaultUndefinedFault" -// The StartWorkflowExecution API action was called without the required parameters -// set. -// -// Some workflow execution parameters, such as the decision taskList, must be -// set to start the execution. However, these parameters might have been set -// as defaults when the workflow type was registered. In this case, you can -// omit these parameters from the StartWorkflowExecution call and Amazon SWF -// uses the values defined in the workflow type. -// -// If these parameters aren't set and no default parameters were defined in -// the workflow type, this error is displayed. -// -func (c *SWF) StartWorkflowExecution(input *StartWorkflowExecutionInput) (*StartWorkflowExecutionOutput, error) { - req, out := c.StartWorkflowExecutionRequest(input) - return out, req.Send() -} - -// StartWorkflowExecutionWithContext is the same as StartWorkflowExecution with the addition of -// the ability to pass a context and additional request options. -// -// See StartWorkflowExecution for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) StartWorkflowExecutionWithContext(ctx aws.Context, input *StartWorkflowExecutionInput, opts ...request.Option) (*StartWorkflowExecutionOutput, error) { - req, out := c.StartWorkflowExecutionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opTerminateWorkflowExecution = "TerminateWorkflowExecution" - -// TerminateWorkflowExecutionRequest generates a "aws/request.Request" representing the -// client's request for the TerminateWorkflowExecution operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See TerminateWorkflowExecution for more information on using the TerminateWorkflowExecution -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the TerminateWorkflowExecutionRequest method. -// req, resp := client.TerminateWorkflowExecutionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -func (c *SWF) TerminateWorkflowExecutionRequest(input *TerminateWorkflowExecutionInput) (req *request.Request, output *TerminateWorkflowExecutionOutput) { - op := &request.Operation{ - Name: opTerminateWorkflowExecution, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &TerminateWorkflowExecutionInput{} - } - - output = &TerminateWorkflowExecutionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// TerminateWorkflowExecution API operation for Amazon Simple Workflow Service. -// -// Records a WorkflowExecutionTerminated event and forces closure of the workflow -// execution identified by the given domain, runId, and workflowId. The child -// policy, registered with the workflow type or specified when starting this -// execution, is applied to any open child workflow executions of this workflow -// execution. -// -// If the identified workflow execution was in progress, it is terminated immediately. -// -// If a runId isn't specified, then the WorkflowExecutionTerminated event is -// recorded in the history of the current open workflow with the matching workflowId -// in the domain. -// -// You should consider using RequestCancelWorkflowExecution action instead because -// it allows the workflow to gracefully close while TerminateWorkflowExecution -// doesn't. -// -// Access Control -// -// You can use IAM policies to control this action's access to Amazon SWF resources -// as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Workflow Service's -// API operation TerminateWorkflowExecution for usage and error information. -// -// Returned Error Codes: -// * ErrCodeUnknownResourceFault "UnknownResourceFault" -// Returned when the named resource cannot be found with in the scope of this -// operation (region or domain). This could happen if the named resource was -// never created or is no longer available for this operation. -// -// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" -// Returned when the caller doesn't have sufficient permissions to invoke the -// action. -// -func (c *SWF) TerminateWorkflowExecution(input *TerminateWorkflowExecutionInput) (*TerminateWorkflowExecutionOutput, error) { - req, out := c.TerminateWorkflowExecutionRequest(input) - return out, req.Send() -} - -// TerminateWorkflowExecutionWithContext is the same as TerminateWorkflowExecution with the addition of -// the ability to pass a context and additional request options. -// -// See TerminateWorkflowExecution for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SWF) TerminateWorkflowExecutionWithContext(ctx aws.Context, input *TerminateWorkflowExecutionInput, opts ...request.Option) (*TerminateWorkflowExecutionOutput, error) { - req, out := c.TerminateWorkflowExecutionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Provides the details of the ActivityTaskCancelRequested event. -type ActivityTaskCancelRequestedEventAttributes struct { - _ struct{} `type:"structure"` - - // The unique ID of the task. - // - // ActivityId is a required field - ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the RequestCancelActivityTask decision for this cancellation - // request. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s ActivityTaskCancelRequestedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ActivityTaskCancelRequestedEventAttributes) GoString() string { - return s.String() -} - -// SetActivityId sets the ActivityId field's value. -func (s *ActivityTaskCancelRequestedEventAttributes) SetActivityId(v string) *ActivityTaskCancelRequestedEventAttributes { - s.ActivityId = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *ActivityTaskCancelRequestedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *ActivityTaskCancelRequestedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// Provides the details of the ActivityTaskCanceled event. -type ActivityTaskCanceledEventAttributes struct { - _ struct{} `type:"structure"` - - // Details of the cancellation. - Details *string `locationName:"details" type:"string"` - - // If set, contains the ID of the last ActivityTaskCancelRequested event recorded - // for this activity task. This information can be useful for diagnosing problems - // by tracing back the chain of events leading up to this event. - LatestCancelRequestedEventId *int64 `locationName:"latestCancelRequestedEventId" type:"long"` - - // The ID of the ActivityTaskScheduled event that was recorded when this activity - // task was scheduled. This information can be useful for diagnosing problems - // by tracing back the chain of events leading up to this event. - // - // ScheduledEventId is a required field - ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` - - // The ID of the ActivityTaskStarted event recorded when this activity task - // was started. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s ActivityTaskCanceledEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ActivityTaskCanceledEventAttributes) GoString() string { - return s.String() -} - -// SetDetails sets the Details field's value. -func (s *ActivityTaskCanceledEventAttributes) SetDetails(v string) *ActivityTaskCanceledEventAttributes { - s.Details = &v - return s -} - -// SetLatestCancelRequestedEventId sets the LatestCancelRequestedEventId field's value. -func (s *ActivityTaskCanceledEventAttributes) SetLatestCancelRequestedEventId(v int64) *ActivityTaskCanceledEventAttributes { - s.LatestCancelRequestedEventId = &v - return s -} - -// SetScheduledEventId sets the ScheduledEventId field's value. -func (s *ActivityTaskCanceledEventAttributes) SetScheduledEventId(v int64) *ActivityTaskCanceledEventAttributes { - s.ScheduledEventId = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *ActivityTaskCanceledEventAttributes) SetStartedEventId(v int64) *ActivityTaskCanceledEventAttributes { - s.StartedEventId = &v - return s -} - -// Provides the details of the ActivityTaskCompleted event. -type ActivityTaskCompletedEventAttributes struct { - _ struct{} `type:"structure"` - - // The results of the activity task. - Result *string `locationName:"result" type:"string"` - - // The ID of the ActivityTaskScheduled event that was recorded when this activity - // task was scheduled. This information can be useful for diagnosing problems - // by tracing back the chain of events leading up to this event. - // - // ScheduledEventId is a required field - ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` - - // The ID of the ActivityTaskStarted event recorded when this activity task - // was started. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s ActivityTaskCompletedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ActivityTaskCompletedEventAttributes) GoString() string { - return s.String() -} - -// SetResult sets the Result field's value. -func (s *ActivityTaskCompletedEventAttributes) SetResult(v string) *ActivityTaskCompletedEventAttributes { - s.Result = &v - return s -} - -// SetScheduledEventId sets the ScheduledEventId field's value. -func (s *ActivityTaskCompletedEventAttributes) SetScheduledEventId(v int64) *ActivityTaskCompletedEventAttributes { - s.ScheduledEventId = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *ActivityTaskCompletedEventAttributes) SetStartedEventId(v int64) *ActivityTaskCompletedEventAttributes { - s.StartedEventId = &v - return s -} - -// Provides the details of the ActivityTaskFailed event. -type ActivityTaskFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The details of the failure. - Details *string `locationName:"details" type:"string"` - - // The reason provided for the failure. - Reason *string `locationName:"reason" type:"string"` - - // The ID of the ActivityTaskScheduled event that was recorded when this activity - // task was scheduled. This information can be useful for diagnosing problems - // by tracing back the chain of events leading up to this event. - // - // ScheduledEventId is a required field - ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` - - // The ID of the ActivityTaskStarted event recorded when this activity task - // was started. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s ActivityTaskFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ActivityTaskFailedEventAttributes) GoString() string { - return s.String() -} - -// SetDetails sets the Details field's value. -func (s *ActivityTaskFailedEventAttributes) SetDetails(v string) *ActivityTaskFailedEventAttributes { - s.Details = &v - return s -} - -// SetReason sets the Reason field's value. -func (s *ActivityTaskFailedEventAttributes) SetReason(v string) *ActivityTaskFailedEventAttributes { - s.Reason = &v - return s -} - -// SetScheduledEventId sets the ScheduledEventId field's value. -func (s *ActivityTaskFailedEventAttributes) SetScheduledEventId(v int64) *ActivityTaskFailedEventAttributes { - s.ScheduledEventId = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *ActivityTaskFailedEventAttributes) SetStartedEventId(v int64) *ActivityTaskFailedEventAttributes { - s.StartedEventId = &v - return s -} - -// Provides the details of the ActivityTaskScheduled event. -type ActivityTaskScheduledEventAttributes struct { - _ struct{} `type:"structure"` - - // The unique ID of the activity task. - // - // ActivityId is a required field - ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` - - // The type of the activity task. - // - // ActivityType is a required field - ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` - - // Data attached to the event that can be used by the decider in subsequent - // workflow tasks. This data isn't sent to the activity. - Control *string `locationName:"control" type:"string"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision that - // resulted in the scheduling of this activity task. This information can be - // useful for diagnosing problems by tracing back the chain of events leading - // up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The maximum time before which the worker processing this task must report - // progress by calling RecordActivityTaskHeartbeat. If the timeout is exceeded, - // the activity task is automatically timed out. If the worker subsequently - // attempts to record a heartbeat or return a result, it is ignored. - HeartbeatTimeout *string `locationName:"heartbeatTimeout" type:"string"` - - // The input provided to the activity task. - Input *string `locationName:"input" type:"string"` - - // The maximum amount of time for this activity task. - ScheduleToCloseTimeout *string `locationName:"scheduleToCloseTimeout" type:"string"` - - // The maximum amount of time the activity task can wait to be assigned to a - // worker. - ScheduleToStartTimeout *string `locationName:"scheduleToStartTimeout" type:"string"` - - // The maximum amount of time a worker may take to process the activity task. - StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` - - // The task list in which the activity task has been scheduled. - // - // TaskList is a required field - TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` - - // The priority to assign to the scheduled activity task. If set, this overrides - // any default priority value that was assigned when the activity type was registered. - // - // Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) - // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. - // - // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) - // in the Amazon SWF Developer Guide. - TaskPriority *string `locationName:"taskPriority" type:"string"` -} - -// String returns the string representation -func (s ActivityTaskScheduledEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ActivityTaskScheduledEventAttributes) GoString() string { - return s.String() -} - -// SetActivityId sets the ActivityId field's value. -func (s *ActivityTaskScheduledEventAttributes) SetActivityId(v string) *ActivityTaskScheduledEventAttributes { - s.ActivityId = &v - return s -} - -// SetActivityType sets the ActivityType field's value. -func (s *ActivityTaskScheduledEventAttributes) SetActivityType(v *ActivityType) *ActivityTaskScheduledEventAttributes { - s.ActivityType = v - return s -} - -// SetControl sets the Control field's value. -func (s *ActivityTaskScheduledEventAttributes) SetControl(v string) *ActivityTaskScheduledEventAttributes { - s.Control = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *ActivityTaskScheduledEventAttributes) SetDecisionTaskCompletedEventId(v int64) *ActivityTaskScheduledEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetHeartbeatTimeout sets the HeartbeatTimeout field's value. -func (s *ActivityTaskScheduledEventAttributes) SetHeartbeatTimeout(v string) *ActivityTaskScheduledEventAttributes { - s.HeartbeatTimeout = &v - return s -} - -// SetInput sets the Input field's value. -func (s *ActivityTaskScheduledEventAttributes) SetInput(v string) *ActivityTaskScheduledEventAttributes { - s.Input = &v - return s -} - -// SetScheduleToCloseTimeout sets the ScheduleToCloseTimeout field's value. -func (s *ActivityTaskScheduledEventAttributes) SetScheduleToCloseTimeout(v string) *ActivityTaskScheduledEventAttributes { - s.ScheduleToCloseTimeout = &v - return s -} - -// SetScheduleToStartTimeout sets the ScheduleToStartTimeout field's value. -func (s *ActivityTaskScheduledEventAttributes) SetScheduleToStartTimeout(v string) *ActivityTaskScheduledEventAttributes { - s.ScheduleToStartTimeout = &v - return s -} - -// SetStartToCloseTimeout sets the StartToCloseTimeout field's value. -func (s *ActivityTaskScheduledEventAttributes) SetStartToCloseTimeout(v string) *ActivityTaskScheduledEventAttributes { - s.StartToCloseTimeout = &v - return s -} - -// SetTaskList sets the TaskList field's value. -func (s *ActivityTaskScheduledEventAttributes) SetTaskList(v *TaskList) *ActivityTaskScheduledEventAttributes { - s.TaskList = v - return s -} - -// SetTaskPriority sets the TaskPriority field's value. -func (s *ActivityTaskScheduledEventAttributes) SetTaskPriority(v string) *ActivityTaskScheduledEventAttributes { - s.TaskPriority = &v - return s -} - -// Provides the details of the ActivityTaskStarted event. -type ActivityTaskStartedEventAttributes struct { - _ struct{} `type:"structure"` - - // Identity of the worker that was assigned this task. This aids diagnostics - // when problems arise. The form of this identity is user defined. - Identity *string `locationName:"identity" type:"string"` - - // The ID of the ActivityTaskScheduled event that was recorded when this activity - // task was scheduled. This information can be useful for diagnosing problems - // by tracing back the chain of events leading up to this event. - // - // ScheduledEventId is a required field - ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s ActivityTaskStartedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ActivityTaskStartedEventAttributes) GoString() string { - return s.String() -} - -// SetIdentity sets the Identity field's value. -func (s *ActivityTaskStartedEventAttributes) SetIdentity(v string) *ActivityTaskStartedEventAttributes { - s.Identity = &v - return s -} - -// SetScheduledEventId sets the ScheduledEventId field's value. -func (s *ActivityTaskStartedEventAttributes) SetScheduledEventId(v int64) *ActivityTaskStartedEventAttributes { - s.ScheduledEventId = &v - return s -} - -// Provides the details of the ActivityTaskTimedOut event. -type ActivityTaskTimedOutEventAttributes struct { - _ struct{} `type:"structure"` - - // Contains the content of the details parameter for the last call made by the - // activity to RecordActivityTaskHeartbeat. - Details *string `locationName:"details" type:"string"` - - // The ID of the ActivityTaskScheduled event that was recorded when this activity - // task was scheduled. This information can be useful for diagnosing problems - // by tracing back the chain of events leading up to this event. - // - // ScheduledEventId is a required field - ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` - - // The ID of the ActivityTaskStarted event recorded when this activity task - // was started. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` - - // The type of the timeout that caused this event. - // - // TimeoutType is a required field - TimeoutType *string `locationName:"timeoutType" type:"string" required:"true" enum:"ActivityTaskTimeoutType"` -} - -// String returns the string representation -func (s ActivityTaskTimedOutEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ActivityTaskTimedOutEventAttributes) GoString() string { - return s.String() -} - -// SetDetails sets the Details field's value. -func (s *ActivityTaskTimedOutEventAttributes) SetDetails(v string) *ActivityTaskTimedOutEventAttributes { - s.Details = &v - return s -} - -// SetScheduledEventId sets the ScheduledEventId field's value. -func (s *ActivityTaskTimedOutEventAttributes) SetScheduledEventId(v int64) *ActivityTaskTimedOutEventAttributes { - s.ScheduledEventId = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *ActivityTaskTimedOutEventAttributes) SetStartedEventId(v int64) *ActivityTaskTimedOutEventAttributes { - s.StartedEventId = &v - return s -} - -// SetTimeoutType sets the TimeoutType field's value. -func (s *ActivityTaskTimedOutEventAttributes) SetTimeoutType(v string) *ActivityTaskTimedOutEventAttributes { - s.TimeoutType = &v - return s -} - -// Represents an activity type. -type ActivityType struct { - _ struct{} `type:"structure"` - - // The name of this activity. - // - // The combination of activity type name and version must be unique within a - // domain. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` - - // The version of this activity. - // - // The combination of activity type name and version must be unique with in - // a domain. - // - // Version is a required field - Version *string `locationName:"version" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s ActivityType) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ActivityType) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ActivityType) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ActivityType"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Version == nil { - invalidParams.Add(request.NewErrParamRequired("Version")) - } - if s.Version != nil && len(*s.Version) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Version", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *ActivityType) SetName(v string) *ActivityType { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *ActivityType) SetVersion(v string) *ActivityType { - s.Version = &v - return s -} - -// Configuration settings registered with the activity type. -type ActivityTypeConfiguration struct { - _ struct{} `type:"structure"` - - // The default maximum time, in seconds, before which a worker processing a - // task must report progress by calling RecordActivityTaskHeartbeat. - // - // You can specify this value only when registering an activity type. The registered - // default value can be overridden when you schedule a task through the ScheduleActivityTaskDecision. - // If the activity worker subsequently attempts to record a heartbeat or returns - // a result, the activity worker receives an UnknownResource fault. In this - // case, Amazon SWF no longer considers the activity task to be valid; the activity - // worker should clean up the activity task. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - DefaultTaskHeartbeatTimeout *string `locationName:"defaultTaskHeartbeatTimeout" type:"string"` - - // The default task list specified for this activity type at registration. This - // default is used if a task list isn't provided when a task is scheduled through - // the ScheduleActivityTaskDecision. You can override the default registered - // task list when scheduling a task through the ScheduleActivityTaskDecision. - DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` - - // The default task priority for tasks of this activity type, specified at registration. - // If not set, then 0 is used as the default priority. This default can be overridden - // when scheduling an activity task. - // - // Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) - // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. - // - // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) - // in the Amazon SWF Developer Guide. - DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` - - // The default maximum duration, specified when registering the activity type, - // for tasks of this activity type. You can override this default when scheduling - // a task through the ScheduleActivityTaskDecision. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - DefaultTaskScheduleToCloseTimeout *string `locationName:"defaultTaskScheduleToCloseTimeout" type:"string"` - - // The default maximum duration, specified when registering the activity type, - // that a task of an activity type can wait before being assigned to a worker. - // You can override this default when scheduling a task through the ScheduleActivityTaskDecision. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - DefaultTaskScheduleToStartTimeout *string `locationName:"defaultTaskScheduleToStartTimeout" type:"string"` - - // The default maximum duration for tasks of an activity type specified when - // registering the activity type. You can override this default when scheduling - // a task through the ScheduleActivityTaskDecision. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - DefaultTaskStartToCloseTimeout *string `locationName:"defaultTaskStartToCloseTimeout" type:"string"` -} - -// String returns the string representation -func (s ActivityTypeConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ActivityTypeConfiguration) GoString() string { - return s.String() -} - -// SetDefaultTaskHeartbeatTimeout sets the DefaultTaskHeartbeatTimeout field's value. -func (s *ActivityTypeConfiguration) SetDefaultTaskHeartbeatTimeout(v string) *ActivityTypeConfiguration { - s.DefaultTaskHeartbeatTimeout = &v - return s -} - -// SetDefaultTaskList sets the DefaultTaskList field's value. -func (s *ActivityTypeConfiguration) SetDefaultTaskList(v *TaskList) *ActivityTypeConfiguration { - s.DefaultTaskList = v - return s -} - -// SetDefaultTaskPriority sets the DefaultTaskPriority field's value. -func (s *ActivityTypeConfiguration) SetDefaultTaskPriority(v string) *ActivityTypeConfiguration { - s.DefaultTaskPriority = &v - return s -} - -// SetDefaultTaskScheduleToCloseTimeout sets the DefaultTaskScheduleToCloseTimeout field's value. -func (s *ActivityTypeConfiguration) SetDefaultTaskScheduleToCloseTimeout(v string) *ActivityTypeConfiguration { - s.DefaultTaskScheduleToCloseTimeout = &v - return s -} - -// SetDefaultTaskScheduleToStartTimeout sets the DefaultTaskScheduleToStartTimeout field's value. -func (s *ActivityTypeConfiguration) SetDefaultTaskScheduleToStartTimeout(v string) *ActivityTypeConfiguration { - s.DefaultTaskScheduleToStartTimeout = &v - return s -} - -// SetDefaultTaskStartToCloseTimeout sets the DefaultTaskStartToCloseTimeout field's value. -func (s *ActivityTypeConfiguration) SetDefaultTaskStartToCloseTimeout(v string) *ActivityTypeConfiguration { - s.DefaultTaskStartToCloseTimeout = &v - return s -} - -// Detailed information about an activity type. -type ActivityTypeInfo struct { - _ struct{} `type:"structure"` - - // The ActivityType type structure representing the activity type. - // - // ActivityType is a required field - ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` - - // The date and time this activity type was created through RegisterActivityType. - // - // CreationDate is a required field - CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix" required:"true"` - - // If DEPRECATED, the date and time DeprecateActivityType was called. - DeprecationDate *time.Time `locationName:"deprecationDate" type:"timestamp" timestampFormat:"unix"` - - // The description of the activity type provided in RegisterActivityType. - Description *string `locationName:"description" type:"string"` - - // The current status of the activity type. - // - // Status is a required field - Status *string `locationName:"status" type:"string" required:"true" enum:"RegistrationStatus"` -} - -// String returns the string representation -func (s ActivityTypeInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ActivityTypeInfo) GoString() string { - return s.String() -} - -// SetActivityType sets the ActivityType field's value. -func (s *ActivityTypeInfo) SetActivityType(v *ActivityType) *ActivityTypeInfo { - s.ActivityType = v - return s -} - -// SetCreationDate sets the CreationDate field's value. -func (s *ActivityTypeInfo) SetCreationDate(v time.Time) *ActivityTypeInfo { - s.CreationDate = &v - return s -} - -// SetDeprecationDate sets the DeprecationDate field's value. -func (s *ActivityTypeInfo) SetDeprecationDate(v time.Time) *ActivityTypeInfo { - s.DeprecationDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *ActivityTypeInfo) SetDescription(v string) *ActivityTypeInfo { - s.Description = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *ActivityTypeInfo) SetStatus(v string) *ActivityTypeInfo { - s.Status = &v - return s -} - -// Provides the details of the CancelTimer decision. -// -// Access Control -// -// You can use IAM policies to control this decision's access to Amazon SWF -// resources as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -type CancelTimerDecisionAttributes struct { - _ struct{} `type:"structure"` - - // The unique ID of the timer to cancel. - // - // TimerId is a required field - TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CancelTimerDecisionAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CancelTimerDecisionAttributes) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CancelTimerDecisionAttributes) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CancelTimerDecisionAttributes"} - if s.TimerId == nil { - invalidParams.Add(request.NewErrParamRequired("TimerId")) - } - if s.TimerId != nil && len(*s.TimerId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TimerId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTimerId sets the TimerId field's value. -func (s *CancelTimerDecisionAttributes) SetTimerId(v string) *CancelTimerDecisionAttributes { - s.TimerId = &v - return s -} - -// Provides the details of the CancelTimerFailed event. -type CancelTimerFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The cause of the failure. This information is generated by the system and - // can be useful for diagnostic purposes. - // - // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it - // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) - // in the Amazon SWF Developer Guide. - // - // Cause is a required field - Cause *string `locationName:"cause" type:"string" required:"true" enum:"CancelTimerFailedCause"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the CancelTimer decision to cancel this timer. This information - // can be useful for diagnosing problems by tracing back the chain of events - // leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The timerId provided in the CancelTimer decision that failed. - // - // TimerId is a required field - TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CancelTimerFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CancelTimerFailedEventAttributes) GoString() string { - return s.String() -} - -// SetCause sets the Cause field's value. -func (s *CancelTimerFailedEventAttributes) SetCause(v string) *CancelTimerFailedEventAttributes { - s.Cause = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *CancelTimerFailedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *CancelTimerFailedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetTimerId sets the TimerId field's value. -func (s *CancelTimerFailedEventAttributes) SetTimerId(v string) *CancelTimerFailedEventAttributes { - s.TimerId = &v - return s -} - -// Provides the details of the CancelWorkflowExecution decision. -// -// Access Control -// -// You can use IAM policies to control this decision's access to Amazon SWF -// resources as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -type CancelWorkflowExecutionDecisionAttributes struct { - _ struct{} `type:"structure"` - - // Details of the cancellation. - Details *string `locationName:"details" type:"string"` -} - -// String returns the string representation -func (s CancelWorkflowExecutionDecisionAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CancelWorkflowExecutionDecisionAttributes) GoString() string { - return s.String() -} - -// SetDetails sets the Details field's value. -func (s *CancelWorkflowExecutionDecisionAttributes) SetDetails(v string) *CancelWorkflowExecutionDecisionAttributes { - s.Details = &v - return s -} - -// Provides the details of the CancelWorkflowExecutionFailed event. -type CancelWorkflowExecutionFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The cause of the failure. This information is generated by the system and - // can be useful for diagnostic purposes. - // - // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it - // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) - // in the Amazon SWF Developer Guide. - // - // Cause is a required field - Cause *string `locationName:"cause" type:"string" required:"true" enum:"CancelWorkflowExecutionFailedCause"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the CancelWorkflowExecution decision for this cancellation - // request. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s CancelWorkflowExecutionFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CancelWorkflowExecutionFailedEventAttributes) GoString() string { - return s.String() -} - -// SetCause sets the Cause field's value. -func (s *CancelWorkflowExecutionFailedEventAttributes) SetCause(v string) *CancelWorkflowExecutionFailedEventAttributes { - s.Cause = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *CancelWorkflowExecutionFailedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *CancelWorkflowExecutionFailedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// Provide details of the ChildWorkflowExecutionCanceled event. -type ChildWorkflowExecutionCanceledEventAttributes struct { - _ struct{} `type:"structure"` - - // Details of the cancellation (if provided). - Details *string `locationName:"details" type:"string"` - - // The ID of the StartChildWorkflowExecutionInitiated event corresponding to - // the StartChildWorkflowExecutionDecision to start this child workflow execution. - // This information can be useful for diagnosing problems by tracing back the - // chain of events leading up to this event. - // - // InitiatedEventId is a required field - InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` - - // The ID of the ChildWorkflowExecutionStarted event recorded when this child - // workflow execution was started. This information can be useful for diagnosing - // problems by tracing back the chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` - - // The child workflow execution that was canceled. - // - // WorkflowExecution is a required field - WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` - - // The type of the child workflow execution. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s ChildWorkflowExecutionCanceledEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChildWorkflowExecutionCanceledEventAttributes) GoString() string { - return s.String() -} - -// SetDetails sets the Details field's value. -func (s *ChildWorkflowExecutionCanceledEventAttributes) SetDetails(v string) *ChildWorkflowExecutionCanceledEventAttributes { - s.Details = &v - return s -} - -// SetInitiatedEventId sets the InitiatedEventId field's value. -func (s *ChildWorkflowExecutionCanceledEventAttributes) SetInitiatedEventId(v int64) *ChildWorkflowExecutionCanceledEventAttributes { - s.InitiatedEventId = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *ChildWorkflowExecutionCanceledEventAttributes) SetStartedEventId(v int64) *ChildWorkflowExecutionCanceledEventAttributes { - s.StartedEventId = &v - return s -} - -// SetWorkflowExecution sets the WorkflowExecution field's value. -func (s *ChildWorkflowExecutionCanceledEventAttributes) SetWorkflowExecution(v *WorkflowExecution) *ChildWorkflowExecutionCanceledEventAttributes { - s.WorkflowExecution = v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *ChildWorkflowExecutionCanceledEventAttributes) SetWorkflowType(v *WorkflowType) *ChildWorkflowExecutionCanceledEventAttributes { - s.WorkflowType = v - return s -} - -// Provides the details of the ChildWorkflowExecutionCompleted event. -type ChildWorkflowExecutionCompletedEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the StartChildWorkflowExecutionInitiated event corresponding to - // the StartChildWorkflowExecutionDecision to start this child workflow execution. - // This information can be useful for diagnosing problems by tracing back the - // chain of events leading up to this event. - // - // InitiatedEventId is a required field - InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` - - // The result of the child workflow execution. - Result *string `locationName:"result" type:"string"` - - // The ID of the ChildWorkflowExecutionStarted event recorded when this child - // workflow execution was started. This information can be useful for diagnosing - // problems by tracing back the chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` - - // The child workflow execution that was completed. - // - // WorkflowExecution is a required field - WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` - - // The type of the child workflow execution. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s ChildWorkflowExecutionCompletedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChildWorkflowExecutionCompletedEventAttributes) GoString() string { - return s.String() -} - -// SetInitiatedEventId sets the InitiatedEventId field's value. -func (s *ChildWorkflowExecutionCompletedEventAttributes) SetInitiatedEventId(v int64) *ChildWorkflowExecutionCompletedEventAttributes { - s.InitiatedEventId = &v - return s -} - -// SetResult sets the Result field's value. -func (s *ChildWorkflowExecutionCompletedEventAttributes) SetResult(v string) *ChildWorkflowExecutionCompletedEventAttributes { - s.Result = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *ChildWorkflowExecutionCompletedEventAttributes) SetStartedEventId(v int64) *ChildWorkflowExecutionCompletedEventAttributes { - s.StartedEventId = &v - return s -} - -// SetWorkflowExecution sets the WorkflowExecution field's value. -func (s *ChildWorkflowExecutionCompletedEventAttributes) SetWorkflowExecution(v *WorkflowExecution) *ChildWorkflowExecutionCompletedEventAttributes { - s.WorkflowExecution = v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *ChildWorkflowExecutionCompletedEventAttributes) SetWorkflowType(v *WorkflowType) *ChildWorkflowExecutionCompletedEventAttributes { - s.WorkflowType = v - return s -} - -// Provides the details of the ChildWorkflowExecutionFailed event. -type ChildWorkflowExecutionFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The details of the failure (if provided). - Details *string `locationName:"details" type:"string"` - - // The ID of the StartChildWorkflowExecutionInitiated event corresponding to - // the StartChildWorkflowExecutionDecision to start this child workflow execution. - // This information can be useful for diagnosing problems by tracing back the - // chain of events leading up to this event. - // - // InitiatedEventId is a required field - InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` - - // The reason for the failure (if provided). - Reason *string `locationName:"reason" type:"string"` - - // The ID of the ChildWorkflowExecutionStarted event recorded when this child - // workflow execution was started. This information can be useful for diagnosing - // problems by tracing back the chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` - - // The child workflow execution that failed. - // - // WorkflowExecution is a required field - WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` - - // The type of the child workflow execution. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s ChildWorkflowExecutionFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChildWorkflowExecutionFailedEventAttributes) GoString() string { - return s.String() -} - -// SetDetails sets the Details field's value. -func (s *ChildWorkflowExecutionFailedEventAttributes) SetDetails(v string) *ChildWorkflowExecutionFailedEventAttributes { - s.Details = &v - return s -} - -// SetInitiatedEventId sets the InitiatedEventId field's value. -func (s *ChildWorkflowExecutionFailedEventAttributes) SetInitiatedEventId(v int64) *ChildWorkflowExecutionFailedEventAttributes { - s.InitiatedEventId = &v - return s -} - -// SetReason sets the Reason field's value. -func (s *ChildWorkflowExecutionFailedEventAttributes) SetReason(v string) *ChildWorkflowExecutionFailedEventAttributes { - s.Reason = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *ChildWorkflowExecutionFailedEventAttributes) SetStartedEventId(v int64) *ChildWorkflowExecutionFailedEventAttributes { - s.StartedEventId = &v - return s -} - -// SetWorkflowExecution sets the WorkflowExecution field's value. -func (s *ChildWorkflowExecutionFailedEventAttributes) SetWorkflowExecution(v *WorkflowExecution) *ChildWorkflowExecutionFailedEventAttributes { - s.WorkflowExecution = v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *ChildWorkflowExecutionFailedEventAttributes) SetWorkflowType(v *WorkflowType) *ChildWorkflowExecutionFailedEventAttributes { - s.WorkflowType = v - return s -} - -// Provides the details of the ChildWorkflowExecutionStarted event. -type ChildWorkflowExecutionStartedEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the StartChildWorkflowExecutionInitiated event corresponding to - // the StartChildWorkflowExecutionDecision to start this child workflow execution. - // This information can be useful for diagnosing problems by tracing back the - // chain of events leading up to this event. - // - // InitiatedEventId is a required field - InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` - - // The child workflow execution that was started. - // - // WorkflowExecution is a required field - WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` - - // The type of the child workflow execution. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s ChildWorkflowExecutionStartedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChildWorkflowExecutionStartedEventAttributes) GoString() string { - return s.String() -} - -// SetInitiatedEventId sets the InitiatedEventId field's value. -func (s *ChildWorkflowExecutionStartedEventAttributes) SetInitiatedEventId(v int64) *ChildWorkflowExecutionStartedEventAttributes { - s.InitiatedEventId = &v - return s -} - -// SetWorkflowExecution sets the WorkflowExecution field's value. -func (s *ChildWorkflowExecutionStartedEventAttributes) SetWorkflowExecution(v *WorkflowExecution) *ChildWorkflowExecutionStartedEventAttributes { - s.WorkflowExecution = v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *ChildWorkflowExecutionStartedEventAttributes) SetWorkflowType(v *WorkflowType) *ChildWorkflowExecutionStartedEventAttributes { - s.WorkflowType = v - return s -} - -// Provides the details of the ChildWorkflowExecutionTerminated event. -type ChildWorkflowExecutionTerminatedEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the StartChildWorkflowExecutionInitiated event corresponding to - // the StartChildWorkflowExecutionDecision to start this child workflow execution. - // This information can be useful for diagnosing problems by tracing back the - // chain of events leading up to this event. - // - // InitiatedEventId is a required field - InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` - - // The ID of the ChildWorkflowExecutionStarted event recorded when this child - // workflow execution was started. This information can be useful for diagnosing - // problems by tracing back the chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` - - // The child workflow execution that was terminated. - // - // WorkflowExecution is a required field - WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` - - // The type of the child workflow execution. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s ChildWorkflowExecutionTerminatedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChildWorkflowExecutionTerminatedEventAttributes) GoString() string { - return s.String() -} - -// SetInitiatedEventId sets the InitiatedEventId field's value. -func (s *ChildWorkflowExecutionTerminatedEventAttributes) SetInitiatedEventId(v int64) *ChildWorkflowExecutionTerminatedEventAttributes { - s.InitiatedEventId = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *ChildWorkflowExecutionTerminatedEventAttributes) SetStartedEventId(v int64) *ChildWorkflowExecutionTerminatedEventAttributes { - s.StartedEventId = &v - return s -} - -// SetWorkflowExecution sets the WorkflowExecution field's value. -func (s *ChildWorkflowExecutionTerminatedEventAttributes) SetWorkflowExecution(v *WorkflowExecution) *ChildWorkflowExecutionTerminatedEventAttributes { - s.WorkflowExecution = v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *ChildWorkflowExecutionTerminatedEventAttributes) SetWorkflowType(v *WorkflowType) *ChildWorkflowExecutionTerminatedEventAttributes { - s.WorkflowType = v - return s -} - -// Provides the details of the ChildWorkflowExecutionTimedOut event. -type ChildWorkflowExecutionTimedOutEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the StartChildWorkflowExecutionInitiated event corresponding to - // the StartChildWorkflowExecutionDecision to start this child workflow execution. - // This information can be useful for diagnosing problems by tracing back the - // chain of events leading up to this event. - // - // InitiatedEventId is a required field - InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` - - // The ID of the ChildWorkflowExecutionStarted event recorded when this child - // workflow execution was started. This information can be useful for diagnosing - // problems by tracing back the chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` - - // The type of the timeout that caused the child workflow execution to time - // out. - // - // TimeoutType is a required field - TimeoutType *string `locationName:"timeoutType" type:"string" required:"true" enum:"WorkflowExecutionTimeoutType"` - - // The child workflow execution that timed out. - // - // WorkflowExecution is a required field - WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` - - // The type of the child workflow execution. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s ChildWorkflowExecutionTimedOutEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChildWorkflowExecutionTimedOutEventAttributes) GoString() string { - return s.String() -} - -// SetInitiatedEventId sets the InitiatedEventId field's value. -func (s *ChildWorkflowExecutionTimedOutEventAttributes) SetInitiatedEventId(v int64) *ChildWorkflowExecutionTimedOutEventAttributes { - s.InitiatedEventId = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *ChildWorkflowExecutionTimedOutEventAttributes) SetStartedEventId(v int64) *ChildWorkflowExecutionTimedOutEventAttributes { - s.StartedEventId = &v - return s -} - -// SetTimeoutType sets the TimeoutType field's value. -func (s *ChildWorkflowExecutionTimedOutEventAttributes) SetTimeoutType(v string) *ChildWorkflowExecutionTimedOutEventAttributes { - s.TimeoutType = &v - return s -} - -// SetWorkflowExecution sets the WorkflowExecution field's value. -func (s *ChildWorkflowExecutionTimedOutEventAttributes) SetWorkflowExecution(v *WorkflowExecution) *ChildWorkflowExecutionTimedOutEventAttributes { - s.WorkflowExecution = v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *ChildWorkflowExecutionTimedOutEventAttributes) SetWorkflowType(v *WorkflowType) *ChildWorkflowExecutionTimedOutEventAttributes { - s.WorkflowType = v - return s -} - -// Used to filter the closed workflow executions in visibility APIs by their -// close status. -type CloseStatusFilter struct { - _ struct{} `type:"structure"` - - // The close status that must match the close status of an execution for it - // to meet the criteria of this filter. - // - // Status is a required field - Status *string `locationName:"status" type:"string" required:"true" enum:"CloseStatus"` -} - -// String returns the string representation -func (s CloseStatusFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CloseStatusFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CloseStatusFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CloseStatusFilter"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStatus sets the Status field's value. -func (s *CloseStatusFilter) SetStatus(v string) *CloseStatusFilter { - s.Status = &v - return s -} - -// Provides the details of the CompleteWorkflowExecution decision. -// -// Access Control -// -// You can use IAM policies to control this decision's access to Amazon SWF -// resources as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -type CompleteWorkflowExecutionDecisionAttributes struct { - _ struct{} `type:"structure"` - - // The result of the workflow execution. The form of the result is implementation - // defined. - Result *string `locationName:"result" type:"string"` -} - -// String returns the string representation -func (s CompleteWorkflowExecutionDecisionAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompleteWorkflowExecutionDecisionAttributes) GoString() string { - return s.String() -} - -// SetResult sets the Result field's value. -func (s *CompleteWorkflowExecutionDecisionAttributes) SetResult(v string) *CompleteWorkflowExecutionDecisionAttributes { - s.Result = &v - return s -} - -// Provides the details of the CompleteWorkflowExecutionFailed event. -type CompleteWorkflowExecutionFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The cause of the failure. This information is generated by the system and - // can be useful for diagnostic purposes. - // - // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it - // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) - // in the Amazon SWF Developer Guide. - // - // Cause is a required field - Cause *string `locationName:"cause" type:"string" required:"true" enum:"CompleteWorkflowExecutionFailedCause"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the CompleteWorkflowExecution decision to complete this - // execution. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s CompleteWorkflowExecutionFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompleteWorkflowExecutionFailedEventAttributes) GoString() string { - return s.String() -} - -// SetCause sets the Cause field's value. -func (s *CompleteWorkflowExecutionFailedEventAttributes) SetCause(v string) *CompleteWorkflowExecutionFailedEventAttributes { - s.Cause = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *CompleteWorkflowExecutionFailedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *CompleteWorkflowExecutionFailedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// Provides the details of the ContinueAsNewWorkflowExecution decision. -// -// Access Control -// -// You can use IAM policies to control this decision's access to Amazon SWF -// resources as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// tag – A tag used to identify the workflow execution -// -// taskList – String constraint. The key is swf:taskList.name. -// -// workflowType.version – String constraint. The key is swf:workflowType.version. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -type ContinueAsNewWorkflowExecutionDecisionAttributes struct { - _ struct{} `type:"structure"` - - // If set, specifies the policy to use for the child workflow executions of - // the new execution if it is terminated by calling the TerminateWorkflowExecution - // action explicitly or due to an expired timeout. This policy overrides the - // default child policy specified when registering the workflow type using RegisterWorkflowType. - // - // The supported child policies are: - // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A request to cancel is attempted for each child execution - // by recording a WorkflowExecutionCancelRequested event in its history. - // It is up to the decider to take appropriate actions when it receives an - // execution history with this event. - // - // * ABANDON – No action is taken. The child executions continue to run. - // - // A child policy for this workflow execution must be specified either as a - // default for the workflow type or through this parameter. If neither this - // parameter is set nor a default child policy was specified at registration - // time then a fault is returned. - ChildPolicy *string `locationName:"childPolicy" type:"string" enum:"ChildPolicy"` - - // If set, specifies the total duration for this workflow execution. This overrides - // the defaultExecutionStartToCloseTimeout specified when registering the workflow - // type. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - // - // An execution start-to-close timeout for this workflow execution must be specified - // either as a default for the workflow type or through this field. If neither - // this field is set nor a default execution start-to-close timeout was specified - // at registration time then a fault is returned. - ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` - - // The input provided to the new workflow execution. - Input *string `locationName:"input" type:"string"` - - // The IAM role to attach to the new (continued) execution. - LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` - - // The list of tags to associate with the new workflow execution. A maximum - // of 5 tags can be specified. You can list workflow executions with a specific - // tag by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions - // and specifying a TagFilter. - TagList []*string `locationName:"tagList" type:"list"` - - // The task list to use for the decisions of the new (continued) workflow execution. - TaskList *TaskList `locationName:"taskList" type:"structure"` - - // The task priority that, if set, specifies the priority for the decision tasks - // for this workflow execution. This overrides the defaultTaskPriority specified - // when registering the workflow type. Valid values are integers that range - // from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). - // Higher numbers indicate higher priority. - // - // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) - // in the Amazon SWF Developer Guide. - TaskPriority *string `locationName:"taskPriority" type:"string"` - - // Specifies the maximum duration of decision tasks for the new workflow execution. - // This parameter overrides the defaultTaskStartToCloseTimout specified when - // registering the workflow type using RegisterWorkflowType. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - // - // A task start-to-close timeout for the new workflow execution must be specified - // either as a default for the workflow type or through this parameter. If neither - // this parameter is set nor a default task start-to-close timeout was specified - // at registration time then a fault is returned. - TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` - - // The version of the workflow to start. - WorkflowTypeVersion *string `locationName:"workflowTypeVersion" min:"1" type:"string"` -} - -// String returns the string representation -func (s ContinueAsNewWorkflowExecutionDecisionAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ContinueAsNewWorkflowExecutionDecisionAttributes) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ContinueAsNewWorkflowExecutionDecisionAttributes) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ContinueAsNewWorkflowExecutionDecisionAttributes"} - if s.LambdaRole != nil && len(*s.LambdaRole) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LambdaRole", 1)) - } - if s.WorkflowTypeVersion != nil && len(*s.WorkflowTypeVersion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkflowTypeVersion", 1)) - } - if s.TaskList != nil { - if err := s.TaskList.Validate(); err != nil { - invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChildPolicy sets the ChildPolicy field's value. -func (s *ContinueAsNewWorkflowExecutionDecisionAttributes) SetChildPolicy(v string) *ContinueAsNewWorkflowExecutionDecisionAttributes { - s.ChildPolicy = &v - return s -} - -// SetExecutionStartToCloseTimeout sets the ExecutionStartToCloseTimeout field's value. -func (s *ContinueAsNewWorkflowExecutionDecisionAttributes) SetExecutionStartToCloseTimeout(v string) *ContinueAsNewWorkflowExecutionDecisionAttributes { - s.ExecutionStartToCloseTimeout = &v - return s -} - -// SetInput sets the Input field's value. -func (s *ContinueAsNewWorkflowExecutionDecisionAttributes) SetInput(v string) *ContinueAsNewWorkflowExecutionDecisionAttributes { - s.Input = &v - return s -} - -// SetLambdaRole sets the LambdaRole field's value. -func (s *ContinueAsNewWorkflowExecutionDecisionAttributes) SetLambdaRole(v string) *ContinueAsNewWorkflowExecutionDecisionAttributes { - s.LambdaRole = &v - return s -} - -// SetTagList sets the TagList field's value. -func (s *ContinueAsNewWorkflowExecutionDecisionAttributes) SetTagList(v []*string) *ContinueAsNewWorkflowExecutionDecisionAttributes { - s.TagList = v - return s -} - -// SetTaskList sets the TaskList field's value. -func (s *ContinueAsNewWorkflowExecutionDecisionAttributes) SetTaskList(v *TaskList) *ContinueAsNewWorkflowExecutionDecisionAttributes { - s.TaskList = v - return s -} - -// SetTaskPriority sets the TaskPriority field's value. -func (s *ContinueAsNewWorkflowExecutionDecisionAttributes) SetTaskPriority(v string) *ContinueAsNewWorkflowExecutionDecisionAttributes { - s.TaskPriority = &v - return s -} - -// SetTaskStartToCloseTimeout sets the TaskStartToCloseTimeout field's value. -func (s *ContinueAsNewWorkflowExecutionDecisionAttributes) SetTaskStartToCloseTimeout(v string) *ContinueAsNewWorkflowExecutionDecisionAttributes { - s.TaskStartToCloseTimeout = &v - return s -} - -// SetWorkflowTypeVersion sets the WorkflowTypeVersion field's value. -func (s *ContinueAsNewWorkflowExecutionDecisionAttributes) SetWorkflowTypeVersion(v string) *ContinueAsNewWorkflowExecutionDecisionAttributes { - s.WorkflowTypeVersion = &v - return s -} - -// Provides the details of the ContinueAsNewWorkflowExecutionFailed event. -type ContinueAsNewWorkflowExecutionFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The cause of the failure. This information is generated by the system and - // can be useful for diagnostic purposes. - // - // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it - // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) - // in the Amazon SWF Developer Guide. - // - // Cause is a required field - Cause *string `locationName:"cause" type:"string" required:"true" enum:"ContinueAsNewWorkflowExecutionFailedCause"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the ContinueAsNewWorkflowExecution decision that started - // this execution. This information can be useful for diagnosing problems by - // tracing back the chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s ContinueAsNewWorkflowExecutionFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ContinueAsNewWorkflowExecutionFailedEventAttributes) GoString() string { - return s.String() -} - -// SetCause sets the Cause field's value. -func (s *ContinueAsNewWorkflowExecutionFailedEventAttributes) SetCause(v string) *ContinueAsNewWorkflowExecutionFailedEventAttributes { - s.Cause = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *ContinueAsNewWorkflowExecutionFailedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *ContinueAsNewWorkflowExecutionFailedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -type CountClosedWorkflowExecutionsInput struct { - _ struct{} `type:"structure"` - - // If specified, only workflow executions that match this close status are counted. - // This filter has an affect only if executionStatus is specified as CLOSED. - // - // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually - // exclusive. You can specify at most one of these in a request. - CloseStatusFilter *CloseStatusFilter `locationName:"closeStatusFilter" type:"structure"` - - // If specified, only workflow executions that meet the close time criteria - // of the filter are counted. - // - // startTimeFilter and closeTimeFilter are mutually exclusive. You must specify - // one of these in a request but not both. - CloseTimeFilter *ExecutionTimeFilter `locationName:"closeTimeFilter" type:"structure"` - - // The name of the domain containing the workflow executions to count. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // If specified, only workflow executions matching the WorkflowId in the filter - // are counted. - // - // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually - // exclusive. You can specify at most one of these in a request. - ExecutionFilter *WorkflowExecutionFilter `locationName:"executionFilter" type:"structure"` - - // If specified, only workflow executions that meet the start time criteria - // of the filter are counted. - // - // startTimeFilter and closeTimeFilter are mutually exclusive. You must specify - // one of these in a request but not both. - StartTimeFilter *ExecutionTimeFilter `locationName:"startTimeFilter" type:"structure"` - - // If specified, only executions that have a tag that matches the filter are - // counted. - // - // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually - // exclusive. You can specify at most one of these in a request. - TagFilter *TagFilter `locationName:"tagFilter" type:"structure"` - - // If specified, indicates the type of the workflow executions to be counted. - // - // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually - // exclusive. You can specify at most one of these in a request. - TypeFilter *WorkflowTypeFilter `locationName:"typeFilter" type:"structure"` -} - -// String returns the string representation -func (s CountClosedWorkflowExecutionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CountClosedWorkflowExecutionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CountClosedWorkflowExecutionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CountClosedWorkflowExecutionsInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.CloseStatusFilter != nil { - if err := s.CloseStatusFilter.Validate(); err != nil { - invalidParams.AddNested("CloseStatusFilter", err.(request.ErrInvalidParams)) - } - } - if s.CloseTimeFilter != nil { - if err := s.CloseTimeFilter.Validate(); err != nil { - invalidParams.AddNested("CloseTimeFilter", err.(request.ErrInvalidParams)) - } - } - if s.ExecutionFilter != nil { - if err := s.ExecutionFilter.Validate(); err != nil { - invalidParams.AddNested("ExecutionFilter", err.(request.ErrInvalidParams)) - } - } - if s.StartTimeFilter != nil { - if err := s.StartTimeFilter.Validate(); err != nil { - invalidParams.AddNested("StartTimeFilter", err.(request.ErrInvalidParams)) - } - } - if s.TagFilter != nil { - if err := s.TagFilter.Validate(); err != nil { - invalidParams.AddNested("TagFilter", err.(request.ErrInvalidParams)) - } - } - if s.TypeFilter != nil { - if err := s.TypeFilter.Validate(); err != nil { - invalidParams.AddNested("TypeFilter", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCloseStatusFilter sets the CloseStatusFilter field's value. -func (s *CountClosedWorkflowExecutionsInput) SetCloseStatusFilter(v *CloseStatusFilter) *CountClosedWorkflowExecutionsInput { - s.CloseStatusFilter = v - return s -} - -// SetCloseTimeFilter sets the CloseTimeFilter field's value. -func (s *CountClosedWorkflowExecutionsInput) SetCloseTimeFilter(v *ExecutionTimeFilter) *CountClosedWorkflowExecutionsInput { - s.CloseTimeFilter = v - return s -} - -// SetDomain sets the Domain field's value. -func (s *CountClosedWorkflowExecutionsInput) SetDomain(v string) *CountClosedWorkflowExecutionsInput { - s.Domain = &v - return s -} - -// SetExecutionFilter sets the ExecutionFilter field's value. -func (s *CountClosedWorkflowExecutionsInput) SetExecutionFilter(v *WorkflowExecutionFilter) *CountClosedWorkflowExecutionsInput { - s.ExecutionFilter = v - return s -} - -// SetStartTimeFilter sets the StartTimeFilter field's value. -func (s *CountClosedWorkflowExecutionsInput) SetStartTimeFilter(v *ExecutionTimeFilter) *CountClosedWorkflowExecutionsInput { - s.StartTimeFilter = v - return s -} - -// SetTagFilter sets the TagFilter field's value. -func (s *CountClosedWorkflowExecutionsInput) SetTagFilter(v *TagFilter) *CountClosedWorkflowExecutionsInput { - s.TagFilter = v - return s -} - -// SetTypeFilter sets the TypeFilter field's value. -func (s *CountClosedWorkflowExecutionsInput) SetTypeFilter(v *WorkflowTypeFilter) *CountClosedWorkflowExecutionsInput { - s.TypeFilter = v - return s -} - -type CountOpenWorkflowExecutionsInput struct { - _ struct{} `type:"structure"` - - // The name of the domain containing the workflow executions to count. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // If specified, only workflow executions matching the WorkflowId in the filter - // are counted. - // - // executionFilter, typeFilter and tagFilter are mutually exclusive. You can - // specify at most one of these in a request. - ExecutionFilter *WorkflowExecutionFilter `locationName:"executionFilter" type:"structure"` - - // Specifies the start time criteria that workflow executions must meet in order - // to be counted. - // - // StartTimeFilter is a required field - StartTimeFilter *ExecutionTimeFilter `locationName:"startTimeFilter" type:"structure" required:"true"` - - // If specified, only executions that have a tag that matches the filter are - // counted. - // - // executionFilter, typeFilter and tagFilter are mutually exclusive. You can - // specify at most one of these in a request. - TagFilter *TagFilter `locationName:"tagFilter" type:"structure"` - - // Specifies the type of the workflow executions to be counted. - // - // executionFilter, typeFilter and tagFilter are mutually exclusive. You can - // specify at most one of these in a request. - TypeFilter *WorkflowTypeFilter `locationName:"typeFilter" type:"structure"` -} - -// String returns the string representation -func (s CountOpenWorkflowExecutionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CountOpenWorkflowExecutionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CountOpenWorkflowExecutionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CountOpenWorkflowExecutionsInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.StartTimeFilter == nil { - invalidParams.Add(request.NewErrParamRequired("StartTimeFilter")) - } - if s.ExecutionFilter != nil { - if err := s.ExecutionFilter.Validate(); err != nil { - invalidParams.AddNested("ExecutionFilter", err.(request.ErrInvalidParams)) - } - } - if s.StartTimeFilter != nil { - if err := s.StartTimeFilter.Validate(); err != nil { - invalidParams.AddNested("StartTimeFilter", err.(request.ErrInvalidParams)) - } - } - if s.TagFilter != nil { - if err := s.TagFilter.Validate(); err != nil { - invalidParams.AddNested("TagFilter", err.(request.ErrInvalidParams)) - } - } - if s.TypeFilter != nil { - if err := s.TypeFilter.Validate(); err != nil { - invalidParams.AddNested("TypeFilter", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomain sets the Domain field's value. -func (s *CountOpenWorkflowExecutionsInput) SetDomain(v string) *CountOpenWorkflowExecutionsInput { - s.Domain = &v - return s -} - -// SetExecutionFilter sets the ExecutionFilter field's value. -func (s *CountOpenWorkflowExecutionsInput) SetExecutionFilter(v *WorkflowExecutionFilter) *CountOpenWorkflowExecutionsInput { - s.ExecutionFilter = v - return s -} - -// SetStartTimeFilter sets the StartTimeFilter field's value. -func (s *CountOpenWorkflowExecutionsInput) SetStartTimeFilter(v *ExecutionTimeFilter) *CountOpenWorkflowExecutionsInput { - s.StartTimeFilter = v - return s -} - -// SetTagFilter sets the TagFilter field's value. -func (s *CountOpenWorkflowExecutionsInput) SetTagFilter(v *TagFilter) *CountOpenWorkflowExecutionsInput { - s.TagFilter = v - return s -} - -// SetTypeFilter sets the TypeFilter field's value. -func (s *CountOpenWorkflowExecutionsInput) SetTypeFilter(v *WorkflowTypeFilter) *CountOpenWorkflowExecutionsInput { - s.TypeFilter = v - return s -} - -type CountPendingActivityTasksInput struct { - _ struct{} `type:"structure"` - - // The name of the domain that contains the task list. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // The name of the task list. - // - // TaskList is a required field - TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` -} - -// String returns the string representation -func (s CountPendingActivityTasksInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CountPendingActivityTasksInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CountPendingActivityTasksInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CountPendingActivityTasksInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.TaskList == nil { - invalidParams.Add(request.NewErrParamRequired("TaskList")) - } - if s.TaskList != nil { - if err := s.TaskList.Validate(); err != nil { - invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomain sets the Domain field's value. -func (s *CountPendingActivityTasksInput) SetDomain(v string) *CountPendingActivityTasksInput { - s.Domain = &v - return s -} - -// SetTaskList sets the TaskList field's value. -func (s *CountPendingActivityTasksInput) SetTaskList(v *TaskList) *CountPendingActivityTasksInput { - s.TaskList = v - return s -} - -type CountPendingDecisionTasksInput struct { - _ struct{} `type:"structure"` - - // The name of the domain that contains the task list. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // The name of the task list. - // - // TaskList is a required field - TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` -} - -// String returns the string representation -func (s CountPendingDecisionTasksInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CountPendingDecisionTasksInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CountPendingDecisionTasksInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CountPendingDecisionTasksInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.TaskList == nil { - invalidParams.Add(request.NewErrParamRequired("TaskList")) - } - if s.TaskList != nil { - if err := s.TaskList.Validate(); err != nil { - invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomain sets the Domain field's value. -func (s *CountPendingDecisionTasksInput) SetDomain(v string) *CountPendingDecisionTasksInput { - s.Domain = &v - return s -} - -// SetTaskList sets the TaskList field's value. -func (s *CountPendingDecisionTasksInput) SetTaskList(v *TaskList) *CountPendingDecisionTasksInput { - s.TaskList = v - return s -} - -// Specifies a decision made by the decider. A decision can be one of these -// types: -// -// * CancelTimer – Cancels a previously started timer and records a TimerCanceled -// event in the history. -// -// * CancelWorkflowExecution – Closes the workflow execution and records -// a WorkflowExecutionCanceled event in the history. -// -// * CompleteWorkflowExecution – Closes the workflow execution and records -// a WorkflowExecutionCompleted event in the history . -// -// * ContinueAsNewWorkflowExecution – Closes the workflow execution and starts -// a new workflow execution of the same type using the same workflow ID and -// a unique run Id. A WorkflowExecutionContinuedAsNew event is recorded in -// the history. -// -// * FailWorkflowExecution – Closes the workflow execution and records a -// WorkflowExecutionFailed event in the history. -// -// * RecordMarker – Records a MarkerRecorded event in the history. Markers -// can be used for adding custom information in the history for instance -// to let deciders know that they don't need to look at the history beyond -// the marker event. -// -// * RequestCancelActivityTask – Attempts to cancel a previously scheduled -// activity task. If the activity task was scheduled but has not been assigned -// to a worker, then it is canceled. If the activity task was already assigned -// to a worker, then the worker is informed that cancellation has been requested -// in the response to RecordActivityTaskHeartbeat. -// -// * RequestCancelExternalWorkflowExecution – Requests that a request be -// made to cancel the specified external workflow execution and records a -// RequestCancelExternalWorkflowExecutionInitiated event in the history. -// -// * ScheduleActivityTask – Schedules an activity task. -// -// * SignalExternalWorkflowExecution – Requests a signal to be delivered -// to the specified external workflow execution and records a SignalExternalWorkflowExecutionInitiated -// event in the history. -// -// * StartChildWorkflowExecution – Requests that a child workflow execution -// be started and records a StartChildWorkflowExecutionInitiated event in -// the history. The child workflow execution is a separate workflow execution -// with its own history. -// -// * StartTimer – Starts a timer for this workflow execution and records -// a TimerStarted event in the history. This timer fires after the specified -// delay and record a TimerFired event. -// -// Access Control -// -// If you grant permission to use RespondDecisionTaskCompleted, you can use -// IAM policies to express permissions for the list of decisions returned by -// this action as if they were members of the API. Treating decisions as a pseudo -// API maintains a uniform conceptual model and helps keep policies readable. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// Decision Failure -// -// Decisions can fail for several reasons -// -// * The ordering of decisions should follow a logical flow. Some decisions -// might not make sense in the current context of the workflow execution -// and therefore fails. -// -// * A limit on your account was reached. -// -// * The decision lacks sufficient permissions. -// -// One of the following events might be added to the history to indicate an -// error. The event attribute's cause parameter indicates the cause. If cause -// is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked -// sufficient permissions. For details and example IAM policies, see Using IAM -// to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -// -// * ScheduleActivityTaskFailed – A ScheduleActivityTask decision failed. -// This could happen if the activity type specified in the decision isn't -// registered, is in a deprecated state, or the decision isn't properly configured. -// -// * RequestCancelActivityTaskFailed – A RequestCancelActivityTask decision -// failed. This could happen if there is no open activity task with the specified -// activityId. -// -// * StartTimerFailed – A StartTimer decision failed. This could happen if -// there is another open timer with the same timerId. -// -// * CancelTimerFailed – A CancelTimer decision failed. This could happen -// if there is no open timer with the specified timerId. -// -// * StartChildWorkflowExecutionFailed – A StartChildWorkflowExecution decision -// failed. This could happen if the workflow type specified isn't registered, -// is deprecated, or the decision isn't properly configured. -// -// * SignalExternalWorkflowExecutionFailed – A SignalExternalWorkflowExecution -// decision failed. This could happen if the workflowID specified in the -// decision was incorrect. -// -// * RequestCancelExternalWorkflowExecutionFailed – A RequestCancelExternalWorkflowExecution -// decision failed. This could happen if the workflowID specified in the -// decision was incorrect. -// -// * CancelWorkflowExecutionFailed – A CancelWorkflowExecution decision failed. -// This could happen if there is an unhandled decision task pending in the -// workflow execution. -// -// * CompleteWorkflowExecutionFailed – A CompleteWorkflowExecution decision -// failed. This could happen if there is an unhandled decision task pending -// in the workflow execution. -// -// * ContinueAsNewWorkflowExecutionFailed – A ContinueAsNewWorkflowExecution -// decision failed. This could happen if there is an unhandled decision task -// pending in the workflow execution or the ContinueAsNewWorkflowExecution -// decision was not configured correctly. -// -// * FailWorkflowExecutionFailed – A FailWorkflowExecution decision failed. -// This could happen if there is an unhandled decision task pending in the -// workflow execution. -// -// The preceding error events might occur due to an error in the decider logic, -// which might put the workflow execution in an unstable state The cause field -// in the event structure for the error event indicates the cause of the error. -// -// A workflow execution may be closed by the decider by returning one of the -// following decisions when completing a decision task: CompleteWorkflowExecution, -// FailWorkflowExecution, CancelWorkflowExecution and ContinueAsNewWorkflowExecution. -// An UnhandledDecision fault is returned if a workflow closing decision is -// specified and a signal or activity event had been added to the history while -// the decision task was being performed by the decider. Unlike the above situations -// which are logic issues, this fault is always possible because of race conditions -// in a distributed system. The right action here is to call RespondDecisionTaskCompleted -// without any decisions. This would result in another decision task with these -// new events included in the history. The decider should handle the new events -// and may decide to close the workflow execution. -// -// How to Code a Decision -// -// You code a decision by first setting the decision type field to one of the -// above decision values, and then set the corresponding attributes field shown -// below: -// -// * ScheduleActivityTaskDecisionAttributes -// -// * RequestCancelActivityTaskDecisionAttributes -// -// * CompleteWorkflowExecutionDecisionAttributes -// -// * FailWorkflowExecutionDecisionAttributes -// -// * CancelWorkflowExecutionDecisionAttributes -// -// * ContinueAsNewWorkflowExecutionDecisionAttributes -// -// * RecordMarkerDecisionAttributes -// -// * StartTimerDecisionAttributes -// -// * CancelTimerDecisionAttributes -// -// * SignalExternalWorkflowExecutionDecisionAttributes -// -// * RequestCancelExternalWorkflowExecutionDecisionAttributes -// -// * StartChildWorkflowExecutionDecisionAttributes -type Decision struct { - _ struct{} `type:"structure"` - - // Provides the details of the CancelTimer decision. It isn't set for other - // decision types. - CancelTimerDecisionAttributes *CancelTimerDecisionAttributes `locationName:"cancelTimerDecisionAttributes" type:"structure"` - - // Provides the details of the CancelWorkflowExecution decision. It isn't set - // for other decision types. - CancelWorkflowExecutionDecisionAttributes *CancelWorkflowExecutionDecisionAttributes `locationName:"cancelWorkflowExecutionDecisionAttributes" type:"structure"` - - // Provides the details of the CompleteWorkflowExecution decision. It isn't - // set for other decision types. - CompleteWorkflowExecutionDecisionAttributes *CompleteWorkflowExecutionDecisionAttributes `locationName:"completeWorkflowExecutionDecisionAttributes" type:"structure"` - - // Provides the details of the ContinueAsNewWorkflowExecution decision. It isn't - // set for other decision types. - ContinueAsNewWorkflowExecutionDecisionAttributes *ContinueAsNewWorkflowExecutionDecisionAttributes `locationName:"continueAsNewWorkflowExecutionDecisionAttributes" type:"structure"` - - // Specifies the type of the decision. - // - // DecisionType is a required field - DecisionType *string `locationName:"decisionType" type:"string" required:"true" enum:"DecisionType"` - - // Provides the details of the FailWorkflowExecution decision. It isn't set - // for other decision types. - FailWorkflowExecutionDecisionAttributes *FailWorkflowExecutionDecisionAttributes `locationName:"failWorkflowExecutionDecisionAttributes" type:"structure"` - - // Provides the details of the RecordMarker decision. It isn't set for other - // decision types. - RecordMarkerDecisionAttributes *RecordMarkerDecisionAttributes `locationName:"recordMarkerDecisionAttributes" type:"structure"` - - // Provides the details of the RequestCancelActivityTask decision. It isn't - // set for other decision types. - RequestCancelActivityTaskDecisionAttributes *RequestCancelActivityTaskDecisionAttributes `locationName:"requestCancelActivityTaskDecisionAttributes" type:"structure"` - - // Provides the details of the RequestCancelExternalWorkflowExecution decision. - // It isn't set for other decision types. - RequestCancelExternalWorkflowExecutionDecisionAttributes *RequestCancelExternalWorkflowExecutionDecisionAttributes `locationName:"requestCancelExternalWorkflowExecutionDecisionAttributes" type:"structure"` - - // Provides the details of the ScheduleActivityTask decision. It isn't set for - // other decision types. - ScheduleActivityTaskDecisionAttributes *ScheduleActivityTaskDecisionAttributes `locationName:"scheduleActivityTaskDecisionAttributes" type:"structure"` - - // Provides the details of the ScheduleLambdaFunction decision. It isn't set - // for other decision types. - ScheduleLambdaFunctionDecisionAttributes *ScheduleLambdaFunctionDecisionAttributes `locationName:"scheduleLambdaFunctionDecisionAttributes" type:"structure"` - - // Provides the details of the SignalExternalWorkflowExecution decision. It - // isn't set for other decision types. - SignalExternalWorkflowExecutionDecisionAttributes *SignalExternalWorkflowExecutionDecisionAttributes `locationName:"signalExternalWorkflowExecutionDecisionAttributes" type:"structure"` - - // Provides the details of the StartChildWorkflowExecution decision. It isn't - // set for other decision types. - StartChildWorkflowExecutionDecisionAttributes *StartChildWorkflowExecutionDecisionAttributes `locationName:"startChildWorkflowExecutionDecisionAttributes" type:"structure"` - - // Provides the details of the StartTimer decision. It isn't set for other decision - // types. - StartTimerDecisionAttributes *StartTimerDecisionAttributes `locationName:"startTimerDecisionAttributes" type:"structure"` -} - -// String returns the string representation -func (s Decision) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Decision) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Decision) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Decision"} - if s.DecisionType == nil { - invalidParams.Add(request.NewErrParamRequired("DecisionType")) - } - if s.CancelTimerDecisionAttributes != nil { - if err := s.CancelTimerDecisionAttributes.Validate(); err != nil { - invalidParams.AddNested("CancelTimerDecisionAttributes", err.(request.ErrInvalidParams)) - } - } - if s.ContinueAsNewWorkflowExecutionDecisionAttributes != nil { - if err := s.ContinueAsNewWorkflowExecutionDecisionAttributes.Validate(); err != nil { - invalidParams.AddNested("ContinueAsNewWorkflowExecutionDecisionAttributes", err.(request.ErrInvalidParams)) - } - } - if s.RecordMarkerDecisionAttributes != nil { - if err := s.RecordMarkerDecisionAttributes.Validate(); err != nil { - invalidParams.AddNested("RecordMarkerDecisionAttributes", err.(request.ErrInvalidParams)) - } - } - if s.RequestCancelActivityTaskDecisionAttributes != nil { - if err := s.RequestCancelActivityTaskDecisionAttributes.Validate(); err != nil { - invalidParams.AddNested("RequestCancelActivityTaskDecisionAttributes", err.(request.ErrInvalidParams)) - } - } - if s.RequestCancelExternalWorkflowExecutionDecisionAttributes != nil { - if err := s.RequestCancelExternalWorkflowExecutionDecisionAttributes.Validate(); err != nil { - invalidParams.AddNested("RequestCancelExternalWorkflowExecutionDecisionAttributes", err.(request.ErrInvalidParams)) - } - } - if s.ScheduleActivityTaskDecisionAttributes != nil { - if err := s.ScheduleActivityTaskDecisionAttributes.Validate(); err != nil { - invalidParams.AddNested("ScheduleActivityTaskDecisionAttributes", err.(request.ErrInvalidParams)) - } - } - if s.ScheduleLambdaFunctionDecisionAttributes != nil { - if err := s.ScheduleLambdaFunctionDecisionAttributes.Validate(); err != nil { - invalidParams.AddNested("ScheduleLambdaFunctionDecisionAttributes", err.(request.ErrInvalidParams)) - } - } - if s.SignalExternalWorkflowExecutionDecisionAttributes != nil { - if err := s.SignalExternalWorkflowExecutionDecisionAttributes.Validate(); err != nil { - invalidParams.AddNested("SignalExternalWorkflowExecutionDecisionAttributes", err.(request.ErrInvalidParams)) - } - } - if s.StartChildWorkflowExecutionDecisionAttributes != nil { - if err := s.StartChildWorkflowExecutionDecisionAttributes.Validate(); err != nil { - invalidParams.AddNested("StartChildWorkflowExecutionDecisionAttributes", err.(request.ErrInvalidParams)) - } - } - if s.StartTimerDecisionAttributes != nil { - if err := s.StartTimerDecisionAttributes.Validate(); err != nil { - invalidParams.AddNested("StartTimerDecisionAttributes", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCancelTimerDecisionAttributes sets the CancelTimerDecisionAttributes field's value. -func (s *Decision) SetCancelTimerDecisionAttributes(v *CancelTimerDecisionAttributes) *Decision { - s.CancelTimerDecisionAttributes = v - return s -} - -// SetCancelWorkflowExecutionDecisionAttributes sets the CancelWorkflowExecutionDecisionAttributes field's value. -func (s *Decision) SetCancelWorkflowExecutionDecisionAttributes(v *CancelWorkflowExecutionDecisionAttributes) *Decision { - s.CancelWorkflowExecutionDecisionAttributes = v - return s -} - -// SetCompleteWorkflowExecutionDecisionAttributes sets the CompleteWorkflowExecutionDecisionAttributes field's value. -func (s *Decision) SetCompleteWorkflowExecutionDecisionAttributes(v *CompleteWorkflowExecutionDecisionAttributes) *Decision { - s.CompleteWorkflowExecutionDecisionAttributes = v - return s -} - -// SetContinueAsNewWorkflowExecutionDecisionAttributes sets the ContinueAsNewWorkflowExecutionDecisionAttributes field's value. -func (s *Decision) SetContinueAsNewWorkflowExecutionDecisionAttributes(v *ContinueAsNewWorkflowExecutionDecisionAttributes) *Decision { - s.ContinueAsNewWorkflowExecutionDecisionAttributes = v - return s -} - -// SetDecisionType sets the DecisionType field's value. -func (s *Decision) SetDecisionType(v string) *Decision { - s.DecisionType = &v - return s -} - -// SetFailWorkflowExecutionDecisionAttributes sets the FailWorkflowExecutionDecisionAttributes field's value. -func (s *Decision) SetFailWorkflowExecutionDecisionAttributes(v *FailWorkflowExecutionDecisionAttributes) *Decision { - s.FailWorkflowExecutionDecisionAttributes = v - return s -} - -// SetRecordMarkerDecisionAttributes sets the RecordMarkerDecisionAttributes field's value. -func (s *Decision) SetRecordMarkerDecisionAttributes(v *RecordMarkerDecisionAttributes) *Decision { - s.RecordMarkerDecisionAttributes = v - return s -} - -// SetRequestCancelActivityTaskDecisionAttributes sets the RequestCancelActivityTaskDecisionAttributes field's value. -func (s *Decision) SetRequestCancelActivityTaskDecisionAttributes(v *RequestCancelActivityTaskDecisionAttributes) *Decision { - s.RequestCancelActivityTaskDecisionAttributes = v - return s -} - -// SetRequestCancelExternalWorkflowExecutionDecisionAttributes sets the RequestCancelExternalWorkflowExecutionDecisionAttributes field's value. -func (s *Decision) SetRequestCancelExternalWorkflowExecutionDecisionAttributes(v *RequestCancelExternalWorkflowExecutionDecisionAttributes) *Decision { - s.RequestCancelExternalWorkflowExecutionDecisionAttributes = v - return s -} - -// SetScheduleActivityTaskDecisionAttributes sets the ScheduleActivityTaskDecisionAttributes field's value. -func (s *Decision) SetScheduleActivityTaskDecisionAttributes(v *ScheduleActivityTaskDecisionAttributes) *Decision { - s.ScheduleActivityTaskDecisionAttributes = v - return s -} - -// SetScheduleLambdaFunctionDecisionAttributes sets the ScheduleLambdaFunctionDecisionAttributes field's value. -func (s *Decision) SetScheduleLambdaFunctionDecisionAttributes(v *ScheduleLambdaFunctionDecisionAttributes) *Decision { - s.ScheduleLambdaFunctionDecisionAttributes = v - return s -} - -// SetSignalExternalWorkflowExecutionDecisionAttributes sets the SignalExternalWorkflowExecutionDecisionAttributes field's value. -func (s *Decision) SetSignalExternalWorkflowExecutionDecisionAttributes(v *SignalExternalWorkflowExecutionDecisionAttributes) *Decision { - s.SignalExternalWorkflowExecutionDecisionAttributes = v - return s -} - -// SetStartChildWorkflowExecutionDecisionAttributes sets the StartChildWorkflowExecutionDecisionAttributes field's value. -func (s *Decision) SetStartChildWorkflowExecutionDecisionAttributes(v *StartChildWorkflowExecutionDecisionAttributes) *Decision { - s.StartChildWorkflowExecutionDecisionAttributes = v - return s -} - -// SetStartTimerDecisionAttributes sets the StartTimerDecisionAttributes field's value. -func (s *Decision) SetStartTimerDecisionAttributes(v *StartTimerDecisionAttributes) *Decision { - s.StartTimerDecisionAttributes = v - return s -} - -// Provides the details of the DecisionTaskCompleted event. -type DecisionTaskCompletedEventAttributes struct { - _ struct{} `type:"structure"` - - // User defined context for the workflow execution. - ExecutionContext *string `locationName:"executionContext" type:"string"` - - // The ID of the DecisionTaskScheduled event that was recorded when this decision - // task was scheduled. This information can be useful for diagnosing problems - // by tracing back the chain of events leading up to this event. - // - // ScheduledEventId is a required field - ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` - - // The ID of the DecisionTaskStarted event recorded when this decision task - // was started. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s DecisionTaskCompletedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecisionTaskCompletedEventAttributes) GoString() string { - return s.String() -} - -// SetExecutionContext sets the ExecutionContext field's value. -func (s *DecisionTaskCompletedEventAttributes) SetExecutionContext(v string) *DecisionTaskCompletedEventAttributes { - s.ExecutionContext = &v - return s -} - -// SetScheduledEventId sets the ScheduledEventId field's value. -func (s *DecisionTaskCompletedEventAttributes) SetScheduledEventId(v int64) *DecisionTaskCompletedEventAttributes { - s.ScheduledEventId = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *DecisionTaskCompletedEventAttributes) SetStartedEventId(v int64) *DecisionTaskCompletedEventAttributes { - s.StartedEventId = &v - return s -} - -// Provides details about the DecisionTaskScheduled event. -type DecisionTaskScheduledEventAttributes struct { - _ struct{} `type:"structure"` - - // The maximum duration for this decision task. The task is considered timed - // out if it doesn't completed within this duration. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` - - // The name of the task list in which the decision task was scheduled. - // - // TaskList is a required field - TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` - - // A task priority that, if set, specifies the priority for this decision task. - // Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) - // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. - // - // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) - // in the Amazon SWF Developer Guide. - TaskPriority *string `locationName:"taskPriority" type:"string"` -} - -// String returns the string representation -func (s DecisionTaskScheduledEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecisionTaskScheduledEventAttributes) GoString() string { - return s.String() -} - -// SetStartToCloseTimeout sets the StartToCloseTimeout field's value. -func (s *DecisionTaskScheduledEventAttributes) SetStartToCloseTimeout(v string) *DecisionTaskScheduledEventAttributes { - s.StartToCloseTimeout = &v - return s -} - -// SetTaskList sets the TaskList field's value. -func (s *DecisionTaskScheduledEventAttributes) SetTaskList(v *TaskList) *DecisionTaskScheduledEventAttributes { - s.TaskList = v - return s -} - -// SetTaskPriority sets the TaskPriority field's value. -func (s *DecisionTaskScheduledEventAttributes) SetTaskPriority(v string) *DecisionTaskScheduledEventAttributes { - s.TaskPriority = &v - return s -} - -// Provides the details of the DecisionTaskStarted event. -type DecisionTaskStartedEventAttributes struct { - _ struct{} `type:"structure"` - - // Identity of the decider making the request. This enables diagnostic tracing - // when problems arise. The form of this identity is user defined. - Identity *string `locationName:"identity" type:"string"` - - // The ID of the DecisionTaskScheduled event that was recorded when this decision - // task was scheduled. This information can be useful for diagnosing problems - // by tracing back the chain of events leading up to this event. - // - // ScheduledEventId is a required field - ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s DecisionTaskStartedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecisionTaskStartedEventAttributes) GoString() string { - return s.String() -} - -// SetIdentity sets the Identity field's value. -func (s *DecisionTaskStartedEventAttributes) SetIdentity(v string) *DecisionTaskStartedEventAttributes { - s.Identity = &v - return s -} - -// SetScheduledEventId sets the ScheduledEventId field's value. -func (s *DecisionTaskStartedEventAttributes) SetScheduledEventId(v int64) *DecisionTaskStartedEventAttributes { - s.ScheduledEventId = &v - return s -} - -// Provides the details of the DecisionTaskTimedOut event. -type DecisionTaskTimedOutEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the DecisionTaskScheduled event that was recorded when this decision - // task was scheduled. This information can be useful for diagnosing problems - // by tracing back the chain of events leading up to this event. - // - // ScheduledEventId is a required field - ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` - - // The ID of the DecisionTaskStarted event recorded when this decision task - // was started. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` - - // The type of timeout that expired before the decision task could be completed. - // - // TimeoutType is a required field - TimeoutType *string `locationName:"timeoutType" type:"string" required:"true" enum:"DecisionTaskTimeoutType"` -} - -// String returns the string representation -func (s DecisionTaskTimedOutEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecisionTaskTimedOutEventAttributes) GoString() string { - return s.String() -} - -// SetScheduledEventId sets the ScheduledEventId field's value. -func (s *DecisionTaskTimedOutEventAttributes) SetScheduledEventId(v int64) *DecisionTaskTimedOutEventAttributes { - s.ScheduledEventId = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *DecisionTaskTimedOutEventAttributes) SetStartedEventId(v int64) *DecisionTaskTimedOutEventAttributes { - s.StartedEventId = &v - return s -} - -// SetTimeoutType sets the TimeoutType field's value. -func (s *DecisionTaskTimedOutEventAttributes) SetTimeoutType(v string) *DecisionTaskTimedOutEventAttributes { - s.TimeoutType = &v - return s -} - -type DeprecateActivityTypeInput struct { - _ struct{} `type:"structure"` - - // The activity type to deprecate. - // - // ActivityType is a required field - ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` - - // The name of the domain in which the activity type is registered. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeprecateActivityTypeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeprecateActivityTypeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeprecateActivityTypeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeprecateActivityTypeInput"} - if s.ActivityType == nil { - invalidParams.Add(request.NewErrParamRequired("ActivityType")) - } - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.ActivityType != nil { - if err := s.ActivityType.Validate(); err != nil { - invalidParams.AddNested("ActivityType", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetActivityType sets the ActivityType field's value. -func (s *DeprecateActivityTypeInput) SetActivityType(v *ActivityType) *DeprecateActivityTypeInput { - s.ActivityType = v - return s -} - -// SetDomain sets the Domain field's value. -func (s *DeprecateActivityTypeInput) SetDomain(v string) *DeprecateActivityTypeInput { - s.Domain = &v - return s -} - -type DeprecateActivityTypeOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeprecateActivityTypeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeprecateActivityTypeOutput) GoString() string { - return s.String() -} - -type DeprecateDomainInput struct { - _ struct{} `type:"structure"` - - // The name of the domain to deprecate. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeprecateDomainInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeprecateDomainInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeprecateDomainInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeprecateDomainInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeprecateDomainInput) SetName(v string) *DeprecateDomainInput { - s.Name = &v - return s -} - -type DeprecateDomainOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeprecateDomainOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeprecateDomainOutput) GoString() string { - return s.String() -} - -type DeprecateWorkflowTypeInput struct { - _ struct{} `type:"structure"` - - // The name of the domain in which the workflow type is registered. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // The workflow type to deprecate. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s DeprecateWorkflowTypeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeprecateWorkflowTypeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeprecateWorkflowTypeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeprecateWorkflowTypeInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.WorkflowType == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowType")) - } - if s.WorkflowType != nil { - if err := s.WorkflowType.Validate(); err != nil { - invalidParams.AddNested("WorkflowType", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomain sets the Domain field's value. -func (s *DeprecateWorkflowTypeInput) SetDomain(v string) *DeprecateWorkflowTypeInput { - s.Domain = &v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *DeprecateWorkflowTypeInput) SetWorkflowType(v *WorkflowType) *DeprecateWorkflowTypeInput { - s.WorkflowType = v - return s -} - -type DeprecateWorkflowTypeOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeprecateWorkflowTypeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeprecateWorkflowTypeOutput) GoString() string { - return s.String() -} - -type DescribeActivityTypeInput struct { - _ struct{} `type:"structure"` - - // The activity type to get information about. Activity types are identified - // by the name and version that were supplied when the activity was registered. - // - // ActivityType is a required field - ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` - - // The name of the domain in which the activity type is registered. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeActivityTypeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeActivityTypeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeActivityTypeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeActivityTypeInput"} - if s.ActivityType == nil { - invalidParams.Add(request.NewErrParamRequired("ActivityType")) - } - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.ActivityType != nil { - if err := s.ActivityType.Validate(); err != nil { - invalidParams.AddNested("ActivityType", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetActivityType sets the ActivityType field's value. -func (s *DescribeActivityTypeInput) SetActivityType(v *ActivityType) *DescribeActivityTypeInput { - s.ActivityType = v - return s -} - -// SetDomain sets the Domain field's value. -func (s *DescribeActivityTypeInput) SetDomain(v string) *DescribeActivityTypeInput { - s.Domain = &v - return s -} - -// Detailed information about an activity type. -type DescribeActivityTypeOutput struct { - _ struct{} `type:"structure"` - - // The configuration settings registered with the activity type. - // - // Configuration is a required field - Configuration *ActivityTypeConfiguration `locationName:"configuration" type:"structure" required:"true"` - - // General information about the activity type. - // - // The status of activity type (returned in the ActivityTypeInfo structure) - // can be one of the following. - // - // * REGISTERED – The type is registered and available. Workers supporting - // this type should be running. - // - // * DEPRECATED – The type was deprecated using DeprecateActivityType, but - // is still in use. You should keep workers supporting this type running. - // You cannot create new tasks of this type. - // - // TypeInfo is a required field - TypeInfo *ActivityTypeInfo `locationName:"typeInfo" type:"structure" required:"true"` -} - -// String returns the string representation -func (s DescribeActivityTypeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeActivityTypeOutput) GoString() string { - return s.String() -} - -// SetConfiguration sets the Configuration field's value. -func (s *DescribeActivityTypeOutput) SetConfiguration(v *ActivityTypeConfiguration) *DescribeActivityTypeOutput { - s.Configuration = v - return s -} - -// SetTypeInfo sets the TypeInfo field's value. -func (s *DescribeActivityTypeOutput) SetTypeInfo(v *ActivityTypeInfo) *DescribeActivityTypeOutput { - s.TypeInfo = v - return s -} - -type DescribeDomainInput struct { - _ struct{} `type:"structure"` - - // The name of the domain to describe. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeDomainInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeDomainInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeDomainInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeDomainInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DescribeDomainInput) SetName(v string) *DescribeDomainInput { - s.Name = &v - return s -} - -// Contains details of a domain. -type DescribeDomainOutput struct { - _ struct{} `type:"structure"` - - // The domain configuration. Currently, this includes only the domain's retention - // period. - // - // Configuration is a required field - Configuration *DomainConfiguration `locationName:"configuration" type:"structure" required:"true"` - - // The basic information about a domain, such as its name, status, and description. - // - // DomainInfo is a required field - DomainInfo *DomainInfo `locationName:"domainInfo" type:"structure" required:"true"` -} - -// String returns the string representation -func (s DescribeDomainOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeDomainOutput) GoString() string { - return s.String() -} - -// SetConfiguration sets the Configuration field's value. -func (s *DescribeDomainOutput) SetConfiguration(v *DomainConfiguration) *DescribeDomainOutput { - s.Configuration = v - return s -} - -// SetDomainInfo sets the DomainInfo field's value. -func (s *DescribeDomainOutput) SetDomainInfo(v *DomainInfo) *DescribeDomainOutput { - s.DomainInfo = v - return s -} - -type DescribeWorkflowExecutionInput struct { - _ struct{} `type:"structure"` - - // The name of the domain containing the workflow execution. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // The workflow execution to describe. - // - // Execution is a required field - Execution *WorkflowExecution `locationName:"execution" type:"structure" required:"true"` -} - -// String returns the string representation -func (s DescribeWorkflowExecutionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeWorkflowExecutionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeWorkflowExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeWorkflowExecutionInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.Execution == nil { - invalidParams.Add(request.NewErrParamRequired("Execution")) - } - if s.Execution != nil { - if err := s.Execution.Validate(); err != nil { - invalidParams.AddNested("Execution", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomain sets the Domain field's value. -func (s *DescribeWorkflowExecutionInput) SetDomain(v string) *DescribeWorkflowExecutionInput { - s.Domain = &v - return s -} - -// SetExecution sets the Execution field's value. -func (s *DescribeWorkflowExecutionInput) SetExecution(v *WorkflowExecution) *DescribeWorkflowExecutionInput { - s.Execution = v - return s -} - -// Contains details about a workflow execution. -type DescribeWorkflowExecutionOutput struct { - _ struct{} `type:"structure"` - - // The configuration settings for this workflow execution including timeout - // values, tasklist etc. - // - // ExecutionConfiguration is a required field - ExecutionConfiguration *WorkflowExecutionConfiguration `locationName:"executionConfiguration" type:"structure" required:"true"` - - // Information about the workflow execution. - // - // ExecutionInfo is a required field - ExecutionInfo *WorkflowExecutionInfo `locationName:"executionInfo" type:"structure" required:"true"` - - // The time when the last activity task was scheduled for this workflow execution. - // You can use this information to determine if the workflow has not made progress - // for an unusually long period of time and might require a corrective action. - LatestActivityTaskTimestamp *time.Time `locationName:"latestActivityTaskTimestamp" type:"timestamp" timestampFormat:"unix"` - - // The latest executionContext provided by the decider for this workflow execution. - // A decider can provide an executionContext (a free-form string) when closing - // a decision task using RespondDecisionTaskCompleted. - LatestExecutionContext *string `locationName:"latestExecutionContext" type:"string"` - - // The number of tasks for this workflow execution. This includes open and closed - // tasks of all types. - // - // OpenCounts is a required field - OpenCounts *WorkflowExecutionOpenCounts `locationName:"openCounts" type:"structure" required:"true"` -} - -// String returns the string representation -func (s DescribeWorkflowExecutionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeWorkflowExecutionOutput) GoString() string { - return s.String() -} - -// SetExecutionConfiguration sets the ExecutionConfiguration field's value. -func (s *DescribeWorkflowExecutionOutput) SetExecutionConfiguration(v *WorkflowExecutionConfiguration) *DescribeWorkflowExecutionOutput { - s.ExecutionConfiguration = v - return s -} - -// SetExecutionInfo sets the ExecutionInfo field's value. -func (s *DescribeWorkflowExecutionOutput) SetExecutionInfo(v *WorkflowExecutionInfo) *DescribeWorkflowExecutionOutput { - s.ExecutionInfo = v - return s -} - -// SetLatestActivityTaskTimestamp sets the LatestActivityTaskTimestamp field's value. -func (s *DescribeWorkflowExecutionOutput) SetLatestActivityTaskTimestamp(v time.Time) *DescribeWorkflowExecutionOutput { - s.LatestActivityTaskTimestamp = &v - return s -} - -// SetLatestExecutionContext sets the LatestExecutionContext field's value. -func (s *DescribeWorkflowExecutionOutput) SetLatestExecutionContext(v string) *DescribeWorkflowExecutionOutput { - s.LatestExecutionContext = &v - return s -} - -// SetOpenCounts sets the OpenCounts field's value. -func (s *DescribeWorkflowExecutionOutput) SetOpenCounts(v *WorkflowExecutionOpenCounts) *DescribeWorkflowExecutionOutput { - s.OpenCounts = v - return s -} - -type DescribeWorkflowTypeInput struct { - _ struct{} `type:"structure"` - - // The name of the domain in which this workflow type is registered. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // The workflow type to describe. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s DescribeWorkflowTypeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeWorkflowTypeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeWorkflowTypeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeWorkflowTypeInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.WorkflowType == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowType")) - } - if s.WorkflowType != nil { - if err := s.WorkflowType.Validate(); err != nil { - invalidParams.AddNested("WorkflowType", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomain sets the Domain field's value. -func (s *DescribeWorkflowTypeInput) SetDomain(v string) *DescribeWorkflowTypeInput { - s.Domain = &v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *DescribeWorkflowTypeInput) SetWorkflowType(v *WorkflowType) *DescribeWorkflowTypeInput { - s.WorkflowType = v - return s -} - -// Contains details about a workflow type. -type DescribeWorkflowTypeOutput struct { - _ struct{} `type:"structure"` - - // Configuration settings of the workflow type registered through RegisterWorkflowType - // - // Configuration is a required field - Configuration *WorkflowTypeConfiguration `locationName:"configuration" type:"structure" required:"true"` - - // General information about the workflow type. - // - // The status of the workflow type (returned in the WorkflowTypeInfo structure) - // can be one of the following. - // - // * REGISTERED – The type is registered and available. Workers supporting - // this type should be running. - // - // * DEPRECATED – The type was deprecated using DeprecateWorkflowType, but - // is still in use. You should keep workers supporting this type running. - // You cannot create new workflow executions of this type. - // - // TypeInfo is a required field - TypeInfo *WorkflowTypeInfo `locationName:"typeInfo" type:"structure" required:"true"` -} - -// String returns the string representation -func (s DescribeWorkflowTypeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeWorkflowTypeOutput) GoString() string { - return s.String() -} - -// SetConfiguration sets the Configuration field's value. -func (s *DescribeWorkflowTypeOutput) SetConfiguration(v *WorkflowTypeConfiguration) *DescribeWorkflowTypeOutput { - s.Configuration = v - return s -} - -// SetTypeInfo sets the TypeInfo field's value. -func (s *DescribeWorkflowTypeOutput) SetTypeInfo(v *WorkflowTypeInfo) *DescribeWorkflowTypeOutput { - s.TypeInfo = v - return s -} - -// Contains the configuration settings of a domain. -type DomainConfiguration struct { - _ struct{} `type:"structure"` - - // The retention period for workflow executions in this domain. - // - // WorkflowExecutionRetentionPeriodInDays is a required field - WorkflowExecutionRetentionPeriodInDays *string `locationName:"workflowExecutionRetentionPeriodInDays" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DomainConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DomainConfiguration) GoString() string { - return s.String() -} - -// SetWorkflowExecutionRetentionPeriodInDays sets the WorkflowExecutionRetentionPeriodInDays field's value. -func (s *DomainConfiguration) SetWorkflowExecutionRetentionPeriodInDays(v string) *DomainConfiguration { - s.WorkflowExecutionRetentionPeriodInDays = &v - return s -} - -// Contains general information about a domain. -type DomainInfo struct { - _ struct{} `type:"structure"` - - // The description of the domain provided through RegisterDomain. - Description *string `locationName:"description" type:"string"` - - // The name of the domain. This name is unique within the account. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` - - // The status of the domain: - // - // * REGISTERED – The domain is properly registered and available. You can - // use this domain for registering types and creating new workflow executions. - // - // - // * DEPRECATED – The domain was deprecated using DeprecateDomain, but is - // still in use. You should not create new workflow executions in this domain. - // - // Status is a required field - Status *string `locationName:"status" type:"string" required:"true" enum:"RegistrationStatus"` -} - -// String returns the string representation -func (s DomainInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DomainInfo) GoString() string { - return s.String() -} - -// SetDescription sets the Description field's value. -func (s *DomainInfo) SetDescription(v string) *DomainInfo { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *DomainInfo) SetName(v string) *DomainInfo { - s.Name = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *DomainInfo) SetStatus(v string) *DomainInfo { - s.Status = &v - return s -} - -// Used to filter the workflow executions in visibility APIs by various time-based -// rules. Each parameter, if specified, defines a rule that must be satisfied -// by each returned query result. The parameter values are in the Unix Time -// format (https://en.wikipedia.org/wiki/Unix_time). For example: "oldestDate": -// 1325376070. -type ExecutionTimeFilter struct { - _ struct{} `type:"structure"` - - // Specifies the latest start or close date and time to return. - LatestDate *time.Time `locationName:"latestDate" type:"timestamp" timestampFormat:"unix"` - - // Specifies the oldest start or close date and time to return. - // - // OldestDate is a required field - OldestDate *time.Time `locationName:"oldestDate" type:"timestamp" timestampFormat:"unix" required:"true"` -} - -// String returns the string representation -func (s ExecutionTimeFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ExecutionTimeFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ExecutionTimeFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ExecutionTimeFilter"} - if s.OldestDate == nil { - invalidParams.Add(request.NewErrParamRequired("OldestDate")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLatestDate sets the LatestDate field's value. -func (s *ExecutionTimeFilter) SetLatestDate(v time.Time) *ExecutionTimeFilter { - s.LatestDate = &v - return s -} - -// SetOldestDate sets the OldestDate field's value. -func (s *ExecutionTimeFilter) SetOldestDate(v time.Time) *ExecutionTimeFilter { - s.OldestDate = &v - return s -} - -// Provides the details of the ExternalWorkflowExecutionCancelRequested event. -type ExternalWorkflowExecutionCancelRequestedEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding - // to the RequestCancelExternalWorkflowExecution decision to cancel this external - // workflow execution. This information can be useful for diagnosing problems - // by tracing back the chain of events leading up to this event. - // - // InitiatedEventId is a required field - InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` - - // The external workflow execution to which the cancellation request was delivered. - // - // WorkflowExecution is a required field - WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` -} - -// String returns the string representation -func (s ExternalWorkflowExecutionCancelRequestedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ExternalWorkflowExecutionCancelRequestedEventAttributes) GoString() string { - return s.String() -} - -// SetInitiatedEventId sets the InitiatedEventId field's value. -func (s *ExternalWorkflowExecutionCancelRequestedEventAttributes) SetInitiatedEventId(v int64) *ExternalWorkflowExecutionCancelRequestedEventAttributes { - s.InitiatedEventId = &v - return s -} - -// SetWorkflowExecution sets the WorkflowExecution field's value. -func (s *ExternalWorkflowExecutionCancelRequestedEventAttributes) SetWorkflowExecution(v *WorkflowExecution) *ExternalWorkflowExecutionCancelRequestedEventAttributes { - s.WorkflowExecution = v - return s -} - -// Provides the details of the ExternalWorkflowExecutionSignaled event. -type ExternalWorkflowExecutionSignaledEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the SignalExternalWorkflowExecutionInitiated event corresponding - // to the SignalExternalWorkflowExecution decision to request this signal. This - // information can be useful for diagnosing problems by tracing back the chain - // of events leading up to this event. - // - // InitiatedEventId is a required field - InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` - - // The external workflow execution that the signal was delivered to. - // - // WorkflowExecution is a required field - WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` -} - -// String returns the string representation -func (s ExternalWorkflowExecutionSignaledEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ExternalWorkflowExecutionSignaledEventAttributes) GoString() string { - return s.String() -} - -// SetInitiatedEventId sets the InitiatedEventId field's value. -func (s *ExternalWorkflowExecutionSignaledEventAttributes) SetInitiatedEventId(v int64) *ExternalWorkflowExecutionSignaledEventAttributes { - s.InitiatedEventId = &v - return s -} - -// SetWorkflowExecution sets the WorkflowExecution field's value. -func (s *ExternalWorkflowExecutionSignaledEventAttributes) SetWorkflowExecution(v *WorkflowExecution) *ExternalWorkflowExecutionSignaledEventAttributes { - s.WorkflowExecution = v - return s -} - -// Provides the details of the FailWorkflowExecution decision. -// -// Access Control -// -// You can use IAM policies to control this decision's access to Amazon SWF -// resources as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -type FailWorkflowExecutionDecisionAttributes struct { - _ struct{} `type:"structure"` - - // Details of the failure. - Details *string `locationName:"details" type:"string"` - - // A descriptive reason for the failure that may help in diagnostics. - Reason *string `locationName:"reason" type:"string"` -} - -// String returns the string representation -func (s FailWorkflowExecutionDecisionAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FailWorkflowExecutionDecisionAttributes) GoString() string { - return s.String() -} - -// SetDetails sets the Details field's value. -func (s *FailWorkflowExecutionDecisionAttributes) SetDetails(v string) *FailWorkflowExecutionDecisionAttributes { - s.Details = &v - return s -} - -// SetReason sets the Reason field's value. -func (s *FailWorkflowExecutionDecisionAttributes) SetReason(v string) *FailWorkflowExecutionDecisionAttributes { - s.Reason = &v - return s -} - -// Provides the details of the FailWorkflowExecutionFailed event. -type FailWorkflowExecutionFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The cause of the failure. This information is generated by the system and - // can be useful for diagnostic purposes. - // - // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it - // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) - // in the Amazon SWF Developer Guide. - // - // Cause is a required field - Cause *string `locationName:"cause" type:"string" required:"true" enum:"FailWorkflowExecutionFailedCause"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the FailWorkflowExecution decision to fail this execution. - // This information can be useful for diagnosing problems by tracing back the - // chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s FailWorkflowExecutionFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FailWorkflowExecutionFailedEventAttributes) GoString() string { - return s.String() -} - -// SetCause sets the Cause field's value. -func (s *FailWorkflowExecutionFailedEventAttributes) SetCause(v string) *FailWorkflowExecutionFailedEventAttributes { - s.Cause = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *FailWorkflowExecutionFailedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *FailWorkflowExecutionFailedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -type GetWorkflowExecutionHistoryInput struct { - _ struct{} `type:"structure"` - - // The name of the domain containing the workflow execution. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // Specifies the workflow execution for which to return the history. - // - // Execution is a required field - Execution *WorkflowExecution `locationName:"execution" type:"structure" required:"true"` - - // The maximum number of results that are returned per call. nextPageToken can - // be used to obtain futher pages of results. The default is 1000, which is - // the maximum allowed page size. You can, however, specify a page size smaller - // than the maximum. - // - // This is an upper limit only; the actual number of results returned per call - // may be fewer than the specified maximum. - MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` - - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. - // - // The configured maximumPageSize determines how many results can be returned - // in a single call. - NextPageToken *string `locationName:"nextPageToken" type:"string"` - - // When set to true, returns the events in reverse order. By default the results - // are returned in ascending order of the eventTimeStamp of the events. - ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` -} - -// String returns the string representation -func (s GetWorkflowExecutionHistoryInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetWorkflowExecutionHistoryInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetWorkflowExecutionHistoryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetWorkflowExecutionHistoryInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.Execution == nil { - invalidParams.Add(request.NewErrParamRequired("Execution")) - } - if s.Execution != nil { - if err := s.Execution.Validate(); err != nil { - invalidParams.AddNested("Execution", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomain sets the Domain field's value. -func (s *GetWorkflowExecutionHistoryInput) SetDomain(v string) *GetWorkflowExecutionHistoryInput { - s.Domain = &v - return s -} - -// SetExecution sets the Execution field's value. -func (s *GetWorkflowExecutionHistoryInput) SetExecution(v *WorkflowExecution) *GetWorkflowExecutionHistoryInput { - s.Execution = v - return s -} - -// SetMaximumPageSize sets the MaximumPageSize field's value. -func (s *GetWorkflowExecutionHistoryInput) SetMaximumPageSize(v int64) *GetWorkflowExecutionHistoryInput { - s.MaximumPageSize = &v - return s -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *GetWorkflowExecutionHistoryInput) SetNextPageToken(v string) *GetWorkflowExecutionHistoryInput { - s.NextPageToken = &v - return s -} - -// SetReverseOrder sets the ReverseOrder field's value. -func (s *GetWorkflowExecutionHistoryInput) SetReverseOrder(v bool) *GetWorkflowExecutionHistoryInput { - s.ReverseOrder = &v - return s -} - -// Paginated representation of a workflow history for a workflow execution. -// This is the up to date, complete and authoritative record of the events related -// to all tasks and events in the life of the workflow execution. -type GetWorkflowExecutionHistoryOutput struct { - _ struct{} `type:"structure"` - - // The list of history events. - // - // Events is a required field - Events []*HistoryEvent `locationName:"events" type:"list" required:"true"` - - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. - // - // The configured maximumPageSize determines how many results can be returned - // in a single call. - NextPageToken *string `locationName:"nextPageToken" type:"string"` -} - -// String returns the string representation -func (s GetWorkflowExecutionHistoryOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetWorkflowExecutionHistoryOutput) GoString() string { - return s.String() -} - -// SetEvents sets the Events field's value. -func (s *GetWorkflowExecutionHistoryOutput) SetEvents(v []*HistoryEvent) *GetWorkflowExecutionHistoryOutput { - s.Events = v - return s -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *GetWorkflowExecutionHistoryOutput) SetNextPageToken(v string) *GetWorkflowExecutionHistoryOutput { - s.NextPageToken = &v - return s -} - -// Event within a workflow execution. A history event can be one of these types: -// -// * ActivityTaskCancelRequested – A RequestCancelActivityTask decision was -// received by the system. -// -// * ActivityTaskCanceled – The activity task was successfully canceled. -// -// * ActivityTaskCompleted – An activity worker successfully completed an -// activity task by calling RespondActivityTaskCompleted. -// -// * ActivityTaskFailed – An activity worker failed an activity task by calling -// RespondActivityTaskFailed. -// -// * ActivityTaskScheduled – An activity task was scheduled for execution. -// -// * ActivityTaskStarted – The scheduled activity task was dispatched to -// a worker. -// -// * ActivityTaskTimedOut – The activity task timed out. -// -// * CancelTimerFailed – Failed to process CancelTimer decision. This happens -// when the decision isn't configured properly, for example no timer exists -// with the specified timer Id. -// -// * CancelWorkflowExecutionFailed – A request to cancel a workflow execution -// failed. -// -// * ChildWorkflowExecutionCanceled – A child workflow execution, started -// by this workflow execution, was canceled and closed. -// -// * ChildWorkflowExecutionCompleted – A child workflow execution, started -// by this workflow execution, completed successfully and was closed. -// -// * ChildWorkflowExecutionFailed – A child workflow execution, started by -// this workflow execution, failed to complete successfully and was closed. -// -// * ChildWorkflowExecutionStarted – A child workflow execution was successfully -// started. -// -// * ChildWorkflowExecutionTerminated – A child workflow execution, started -// by this workflow execution, was terminated. -// -// * ChildWorkflowExecutionTimedOut – A child workflow execution, started -// by this workflow execution, timed out and was closed. -// -// * CompleteWorkflowExecutionFailed – The workflow execution failed to complete. -// -// * ContinueAsNewWorkflowExecutionFailed – The workflow execution failed -// to complete after being continued as a new workflow execution. -// -// * DecisionTaskCompleted – The decider successfully completed a decision -// task by calling RespondDecisionTaskCompleted. -// -// * DecisionTaskScheduled – A decision task was scheduled for the workflow -// execution. -// -// * DecisionTaskStarted – The decision task was dispatched to a decider. -// -// * DecisionTaskTimedOut – The decision task timed out. -// -// * ExternalWorkflowExecutionCancelRequested – Request to cancel an external -// workflow execution was successfully delivered to the target execution. -// -// * ExternalWorkflowExecutionSignaled – A signal, requested by this workflow -// execution, was successfully delivered to the target external workflow -// execution. -// -// * FailWorkflowExecutionFailed – A request to mark a workflow execution -// as failed, itself failed. -// -// * MarkerRecorded – A marker was recorded in the workflow history as the -// result of a RecordMarker decision. -// -// * RecordMarkerFailed – A RecordMarker decision was returned as failed. -// -// * RequestCancelActivityTaskFailed – Failed to process RequestCancelActivityTask -// decision. This happens when the decision isn't configured properly. -// -// * RequestCancelExternalWorkflowExecutionFailed – Request to cancel an -// external workflow execution failed. -// -// * RequestCancelExternalWorkflowExecutionInitiated – A request was made -// to request the cancellation of an external workflow execution. -// -// * ScheduleActivityTaskFailed – Failed to process ScheduleActivityTask -// decision. This happens when the decision isn't configured properly, for -// example the activity type specified isn't registered. -// -// * SignalExternalWorkflowExecutionFailed – The request to signal an external -// workflow execution failed. -// -// * SignalExternalWorkflowExecutionInitiated – A request to signal an external -// workflow was made. -// -// * StartActivityTaskFailed – A scheduled activity task failed to start. -// -// * StartChildWorkflowExecutionFailed – Failed to process StartChildWorkflowExecution -// decision. This happens when the decision isn't configured properly, for -// example the workflow type specified isn't registered. -// -// * StartChildWorkflowExecutionInitiated – A request was made to start a -// child workflow execution. -// -// * StartTimerFailed – Failed to process StartTimer decision. This happens -// when the decision isn't configured properly, for example a timer already -// exists with the specified timer Id. -// -// * TimerCanceled – A timer, previously started for this workflow execution, -// was successfully canceled. -// -// * TimerFired – A timer, previously started for this workflow execution, -// fired. -// -// * TimerStarted – A timer was started for the workflow execution due to -// a StartTimer decision. -// -// * WorkflowExecutionCancelRequested – A request to cancel this workflow -// execution was made. -// -// * WorkflowExecutionCanceled – The workflow execution was successfully -// canceled and closed. -// -// * WorkflowExecutionCompleted – The workflow execution was closed due to -// successful completion. -// -// * WorkflowExecutionContinuedAsNew – The workflow execution was closed -// and a new execution of the same type was created with the same workflowId. -// -// * WorkflowExecutionFailed – The workflow execution closed due to a failure. -// -// * WorkflowExecutionSignaled – An external signal was received for the -// workflow execution. -// -// * WorkflowExecutionStarted – The workflow execution was started. -// -// * WorkflowExecutionTerminated – The workflow execution was terminated. -// -// * WorkflowExecutionTimedOut – The workflow execution was closed because -// a time out was exceeded. -type HistoryEvent struct { - _ struct{} `type:"structure"` - - // If the event is of type ActivityTaskcancelRequested then this member is set - // and provides detailed information about the event. It isn't set for other - // event types. - ActivityTaskCancelRequestedEventAttributes *ActivityTaskCancelRequestedEventAttributes `locationName:"activityTaskCancelRequestedEventAttributes" type:"structure"` - - // If the event is of type ActivityTaskCanceled then this member is set and - // provides detailed information about the event. It isn't set for other event - // types. - ActivityTaskCanceledEventAttributes *ActivityTaskCanceledEventAttributes `locationName:"activityTaskCanceledEventAttributes" type:"structure"` - - // If the event is of type ActivityTaskCompleted then this member is set and - // provides detailed information about the event. It isn't set for other event - // types. - ActivityTaskCompletedEventAttributes *ActivityTaskCompletedEventAttributes `locationName:"activityTaskCompletedEventAttributes" type:"structure"` - - // If the event is of type ActivityTaskFailed then this member is set and provides - // detailed information about the event. It isn't set for other event types. - ActivityTaskFailedEventAttributes *ActivityTaskFailedEventAttributes `locationName:"activityTaskFailedEventAttributes" type:"structure"` - - // If the event is of type ActivityTaskScheduled then this member is set and - // provides detailed information about the event. It isn't set for other event - // types. - ActivityTaskScheduledEventAttributes *ActivityTaskScheduledEventAttributes `locationName:"activityTaskScheduledEventAttributes" type:"structure"` - - // If the event is of type ActivityTaskStarted then this member is set and provides - // detailed information about the event. It isn't set for other event types. - ActivityTaskStartedEventAttributes *ActivityTaskStartedEventAttributes `locationName:"activityTaskStartedEventAttributes" type:"structure"` - - // If the event is of type ActivityTaskTimedOut then this member is set and - // provides detailed information about the event. It isn't set for other event - // types. - ActivityTaskTimedOutEventAttributes *ActivityTaskTimedOutEventAttributes `locationName:"activityTaskTimedOutEventAttributes" type:"structure"` - - // If the event is of type CancelTimerFailed then this member is set and provides - // detailed information about the event. It isn't set for other event types. - CancelTimerFailedEventAttributes *CancelTimerFailedEventAttributes `locationName:"cancelTimerFailedEventAttributes" type:"structure"` - - // If the event is of type CancelWorkflowExecutionFailed then this member is - // set and provides detailed information about the event. It isn't set for other - // event types. - CancelWorkflowExecutionFailedEventAttributes *CancelWorkflowExecutionFailedEventAttributes `locationName:"cancelWorkflowExecutionFailedEventAttributes" type:"structure"` - - // If the event is of type ChildWorkflowExecutionCanceled then this member is - // set and provides detailed information about the event. It isn't set for other - // event types. - ChildWorkflowExecutionCanceledEventAttributes *ChildWorkflowExecutionCanceledEventAttributes `locationName:"childWorkflowExecutionCanceledEventAttributes" type:"structure"` - - // If the event is of type ChildWorkflowExecutionCompleted then this member - // is set and provides detailed information about the event. It isn't set for - // other event types. - ChildWorkflowExecutionCompletedEventAttributes *ChildWorkflowExecutionCompletedEventAttributes `locationName:"childWorkflowExecutionCompletedEventAttributes" type:"structure"` - - // If the event is of type ChildWorkflowExecutionFailed then this member is - // set and provides detailed information about the event. It isn't set for other - // event types. - ChildWorkflowExecutionFailedEventAttributes *ChildWorkflowExecutionFailedEventAttributes `locationName:"childWorkflowExecutionFailedEventAttributes" type:"structure"` - - // If the event is of type ChildWorkflowExecutionStarted then this member is - // set and provides detailed information about the event. It isn't set for other - // event types. - ChildWorkflowExecutionStartedEventAttributes *ChildWorkflowExecutionStartedEventAttributes `locationName:"childWorkflowExecutionStartedEventAttributes" type:"structure"` - - // If the event is of type ChildWorkflowExecutionTerminated then this member - // is set and provides detailed information about the event. It isn't set for - // other event types. - ChildWorkflowExecutionTerminatedEventAttributes *ChildWorkflowExecutionTerminatedEventAttributes `locationName:"childWorkflowExecutionTerminatedEventAttributes" type:"structure"` - - // If the event is of type ChildWorkflowExecutionTimedOut then this member is - // set and provides detailed information about the event. It isn't set for other - // event types. - ChildWorkflowExecutionTimedOutEventAttributes *ChildWorkflowExecutionTimedOutEventAttributes `locationName:"childWorkflowExecutionTimedOutEventAttributes" type:"structure"` - - // If the event is of type CompleteWorkflowExecutionFailed then this member - // is set and provides detailed information about the event. It isn't set for - // other event types. - CompleteWorkflowExecutionFailedEventAttributes *CompleteWorkflowExecutionFailedEventAttributes `locationName:"completeWorkflowExecutionFailedEventAttributes" type:"structure"` - - // If the event is of type ContinueAsNewWorkflowExecutionFailed then this member - // is set and provides detailed information about the event. It isn't set for - // other event types. - ContinueAsNewWorkflowExecutionFailedEventAttributes *ContinueAsNewWorkflowExecutionFailedEventAttributes `locationName:"continueAsNewWorkflowExecutionFailedEventAttributes" type:"structure"` - - // If the event is of type DecisionTaskCompleted then this member is set and - // provides detailed information about the event. It isn't set for other event - // types. - DecisionTaskCompletedEventAttributes *DecisionTaskCompletedEventAttributes `locationName:"decisionTaskCompletedEventAttributes" type:"structure"` - - // If the event is of type DecisionTaskScheduled then this member is set and - // provides detailed information about the event. It isn't set for other event - // types. - DecisionTaskScheduledEventAttributes *DecisionTaskScheduledEventAttributes `locationName:"decisionTaskScheduledEventAttributes" type:"structure"` - - // If the event is of type DecisionTaskStarted then this member is set and provides - // detailed information about the event. It isn't set for other event types. - DecisionTaskStartedEventAttributes *DecisionTaskStartedEventAttributes `locationName:"decisionTaskStartedEventAttributes" type:"structure"` - - // If the event is of type DecisionTaskTimedOut then this member is set and - // provides detailed information about the event. It isn't set for other event - // types. - DecisionTaskTimedOutEventAttributes *DecisionTaskTimedOutEventAttributes `locationName:"decisionTaskTimedOutEventAttributes" type:"structure"` - - // The system generated ID of the event. This ID uniquely identifies the event - // with in the workflow execution history. - // - // EventId is a required field - EventId *int64 `locationName:"eventId" type:"long" required:"true"` - - // The date and time when the event occurred. - // - // EventTimestamp is a required field - EventTimestamp *time.Time `locationName:"eventTimestamp" type:"timestamp" timestampFormat:"unix" required:"true"` - - // The type of the history event. - // - // EventType is a required field - EventType *string `locationName:"eventType" type:"string" required:"true" enum:"EventType"` - - // If the event is of type ExternalWorkflowExecutionCancelRequested then this - // member is set and provides detailed information about the event. It isn't - // set for other event types. - ExternalWorkflowExecutionCancelRequestedEventAttributes *ExternalWorkflowExecutionCancelRequestedEventAttributes `locationName:"externalWorkflowExecutionCancelRequestedEventAttributes" type:"structure"` - - // If the event is of type ExternalWorkflowExecutionSignaled then this member - // is set and provides detailed information about the event. It isn't set for - // other event types. - ExternalWorkflowExecutionSignaledEventAttributes *ExternalWorkflowExecutionSignaledEventAttributes `locationName:"externalWorkflowExecutionSignaledEventAttributes" type:"structure"` - - // If the event is of type FailWorkflowExecutionFailed then this member is set - // and provides detailed information about the event. It isn't set for other - // event types. - FailWorkflowExecutionFailedEventAttributes *FailWorkflowExecutionFailedEventAttributes `locationName:"failWorkflowExecutionFailedEventAttributes" type:"structure"` - - // Provides the details of the LambdaFunctionCompleted event. It isn't set for - // other event types. - LambdaFunctionCompletedEventAttributes *LambdaFunctionCompletedEventAttributes `locationName:"lambdaFunctionCompletedEventAttributes" type:"structure"` - - // Provides the details of the LambdaFunctionFailed event. It isn't set for - // other event types. - LambdaFunctionFailedEventAttributes *LambdaFunctionFailedEventAttributes `locationName:"lambdaFunctionFailedEventAttributes" type:"structure"` - - // Provides the details of the LambdaFunctionScheduled event. It isn't set for - // other event types. - LambdaFunctionScheduledEventAttributes *LambdaFunctionScheduledEventAttributes `locationName:"lambdaFunctionScheduledEventAttributes" type:"structure"` - - // Provides the details of the LambdaFunctionStarted event. It isn't set for - // other event types. - LambdaFunctionStartedEventAttributes *LambdaFunctionStartedEventAttributes `locationName:"lambdaFunctionStartedEventAttributes" type:"structure"` - - // Provides the details of the LambdaFunctionTimedOut event. It isn't set for - // other event types. - LambdaFunctionTimedOutEventAttributes *LambdaFunctionTimedOutEventAttributes `locationName:"lambdaFunctionTimedOutEventAttributes" type:"structure"` - - // If the event is of type MarkerRecorded then this member is set and provides - // detailed information about the event. It isn't set for other event types. - MarkerRecordedEventAttributes *MarkerRecordedEventAttributes `locationName:"markerRecordedEventAttributes" type:"structure"` - - // If the event is of type DecisionTaskFailed then this member is set and provides - // detailed information about the event. It isn't set for other event types. - RecordMarkerFailedEventAttributes *RecordMarkerFailedEventAttributes `locationName:"recordMarkerFailedEventAttributes" type:"structure"` - - // If the event is of type RequestCancelActivityTaskFailed then this member - // is set and provides detailed information about the event. It isn't set for - // other event types. - RequestCancelActivityTaskFailedEventAttributes *RequestCancelActivityTaskFailedEventAttributes `locationName:"requestCancelActivityTaskFailedEventAttributes" type:"structure"` - - // If the event is of type RequestCancelExternalWorkflowExecutionFailed then - // this member is set and provides detailed information about the event. It - // isn't set for other event types. - RequestCancelExternalWorkflowExecutionFailedEventAttributes *RequestCancelExternalWorkflowExecutionFailedEventAttributes `locationName:"requestCancelExternalWorkflowExecutionFailedEventAttributes" type:"structure"` - - // If the event is of type RequestCancelExternalWorkflowExecutionInitiated then - // this member is set and provides detailed information about the event. It - // isn't set for other event types. - RequestCancelExternalWorkflowExecutionInitiatedEventAttributes *RequestCancelExternalWorkflowExecutionInitiatedEventAttributes `locationName:"requestCancelExternalWorkflowExecutionInitiatedEventAttributes" type:"structure"` - - // If the event is of type ScheduleActivityTaskFailed then this member is set - // and provides detailed information about the event. It isn't set for other - // event types. - ScheduleActivityTaskFailedEventAttributes *ScheduleActivityTaskFailedEventAttributes `locationName:"scheduleActivityTaskFailedEventAttributes" type:"structure"` - - // Provides the details of the ScheduleLambdaFunctionFailed event. It isn't - // set for other event types. - ScheduleLambdaFunctionFailedEventAttributes *ScheduleLambdaFunctionFailedEventAttributes `locationName:"scheduleLambdaFunctionFailedEventAttributes" type:"structure"` - - // If the event is of type SignalExternalWorkflowExecutionFailed then this member - // is set and provides detailed information about the event. It isn't set for - // other event types. - SignalExternalWorkflowExecutionFailedEventAttributes *SignalExternalWorkflowExecutionFailedEventAttributes `locationName:"signalExternalWorkflowExecutionFailedEventAttributes" type:"structure"` - - // If the event is of type SignalExternalWorkflowExecutionInitiated then this - // member is set and provides detailed information about the event. It isn't - // set for other event types. - SignalExternalWorkflowExecutionInitiatedEventAttributes *SignalExternalWorkflowExecutionInitiatedEventAttributes `locationName:"signalExternalWorkflowExecutionInitiatedEventAttributes" type:"structure"` - - // If the event is of type StartChildWorkflowExecutionFailed then this member - // is set and provides detailed information about the event. It isn't set for - // other event types. - StartChildWorkflowExecutionFailedEventAttributes *StartChildWorkflowExecutionFailedEventAttributes `locationName:"startChildWorkflowExecutionFailedEventAttributes" type:"structure"` - - // If the event is of type StartChildWorkflowExecutionInitiated then this member - // is set and provides detailed information about the event. It isn't set for - // other event types. - StartChildWorkflowExecutionInitiatedEventAttributes *StartChildWorkflowExecutionInitiatedEventAttributes `locationName:"startChildWorkflowExecutionInitiatedEventAttributes" type:"structure"` - - // Provides the details of the StartLambdaFunctionFailed event. It isn't set - // for other event types. - StartLambdaFunctionFailedEventAttributes *StartLambdaFunctionFailedEventAttributes `locationName:"startLambdaFunctionFailedEventAttributes" type:"structure"` - - // If the event is of type StartTimerFailed then this member is set and provides - // detailed information about the event. It isn't set for other event types. - StartTimerFailedEventAttributes *StartTimerFailedEventAttributes `locationName:"startTimerFailedEventAttributes" type:"structure"` - - // If the event is of type TimerCanceled then this member is set and provides - // detailed information about the event. It isn't set for other event types. - TimerCanceledEventAttributes *TimerCanceledEventAttributes `locationName:"timerCanceledEventAttributes" type:"structure"` - - // If the event is of type TimerFired then this member is set and provides detailed - // information about the event. It isn't set for other event types. - TimerFiredEventAttributes *TimerFiredEventAttributes `locationName:"timerFiredEventAttributes" type:"structure"` - - // If the event is of type TimerStarted then this member is set and provides - // detailed information about the event. It isn't set for other event types. - TimerStartedEventAttributes *TimerStartedEventAttributes `locationName:"timerStartedEventAttributes" type:"structure"` - - // If the event is of type WorkflowExecutionCancelRequested then this member - // is set and provides detailed information about the event. It isn't set for - // other event types. - WorkflowExecutionCancelRequestedEventAttributes *WorkflowExecutionCancelRequestedEventAttributes `locationName:"workflowExecutionCancelRequestedEventAttributes" type:"structure"` - - // If the event is of type WorkflowExecutionCanceled then this member is set - // and provides detailed information about the event. It isn't set for other - // event types. - WorkflowExecutionCanceledEventAttributes *WorkflowExecutionCanceledEventAttributes `locationName:"workflowExecutionCanceledEventAttributes" type:"structure"` - - // If the event is of type WorkflowExecutionCompleted then this member is set - // and provides detailed information about the event. It isn't set for other - // event types. - WorkflowExecutionCompletedEventAttributes *WorkflowExecutionCompletedEventAttributes `locationName:"workflowExecutionCompletedEventAttributes" type:"structure"` - - // If the event is of type WorkflowExecutionContinuedAsNew then this member - // is set and provides detailed information about the event. It isn't set for - // other event types. - WorkflowExecutionContinuedAsNewEventAttributes *WorkflowExecutionContinuedAsNewEventAttributes `locationName:"workflowExecutionContinuedAsNewEventAttributes" type:"structure"` - - // If the event is of type WorkflowExecutionFailed then this member is set and - // provides detailed information about the event. It isn't set for other event - // types. - WorkflowExecutionFailedEventAttributes *WorkflowExecutionFailedEventAttributes `locationName:"workflowExecutionFailedEventAttributes" type:"structure"` - - // If the event is of type WorkflowExecutionSignaled then this member is set - // and provides detailed information about the event. It isn't set for other - // event types. - WorkflowExecutionSignaledEventAttributes *WorkflowExecutionSignaledEventAttributes `locationName:"workflowExecutionSignaledEventAttributes" type:"structure"` - - // If the event is of type WorkflowExecutionStarted then this member is set - // and provides detailed information about the event. It isn't set for other - // event types. - WorkflowExecutionStartedEventAttributes *WorkflowExecutionStartedEventAttributes `locationName:"workflowExecutionStartedEventAttributes" type:"structure"` - - // If the event is of type WorkflowExecutionTerminated then this member is set - // and provides detailed information about the event. It isn't set for other - // event types. - WorkflowExecutionTerminatedEventAttributes *WorkflowExecutionTerminatedEventAttributes `locationName:"workflowExecutionTerminatedEventAttributes" type:"structure"` - - // If the event is of type WorkflowExecutionTimedOut then this member is set - // and provides detailed information about the event. It isn't set for other - // event types. - WorkflowExecutionTimedOutEventAttributes *WorkflowExecutionTimedOutEventAttributes `locationName:"workflowExecutionTimedOutEventAttributes" type:"structure"` -} - -// String returns the string representation -func (s HistoryEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HistoryEvent) GoString() string { - return s.String() -} - -// SetActivityTaskCancelRequestedEventAttributes sets the ActivityTaskCancelRequestedEventAttributes field's value. -func (s *HistoryEvent) SetActivityTaskCancelRequestedEventAttributes(v *ActivityTaskCancelRequestedEventAttributes) *HistoryEvent { - s.ActivityTaskCancelRequestedEventAttributes = v - return s -} - -// SetActivityTaskCanceledEventAttributes sets the ActivityTaskCanceledEventAttributes field's value. -func (s *HistoryEvent) SetActivityTaskCanceledEventAttributes(v *ActivityTaskCanceledEventAttributes) *HistoryEvent { - s.ActivityTaskCanceledEventAttributes = v - return s -} - -// SetActivityTaskCompletedEventAttributes sets the ActivityTaskCompletedEventAttributes field's value. -func (s *HistoryEvent) SetActivityTaskCompletedEventAttributes(v *ActivityTaskCompletedEventAttributes) *HistoryEvent { - s.ActivityTaskCompletedEventAttributes = v - return s -} - -// SetActivityTaskFailedEventAttributes sets the ActivityTaskFailedEventAttributes field's value. -func (s *HistoryEvent) SetActivityTaskFailedEventAttributes(v *ActivityTaskFailedEventAttributes) *HistoryEvent { - s.ActivityTaskFailedEventAttributes = v - return s -} - -// SetActivityTaskScheduledEventAttributes sets the ActivityTaskScheduledEventAttributes field's value. -func (s *HistoryEvent) SetActivityTaskScheduledEventAttributes(v *ActivityTaskScheduledEventAttributes) *HistoryEvent { - s.ActivityTaskScheduledEventAttributes = v - return s -} - -// SetActivityTaskStartedEventAttributes sets the ActivityTaskStartedEventAttributes field's value. -func (s *HistoryEvent) SetActivityTaskStartedEventAttributes(v *ActivityTaskStartedEventAttributes) *HistoryEvent { - s.ActivityTaskStartedEventAttributes = v - return s -} - -// SetActivityTaskTimedOutEventAttributes sets the ActivityTaskTimedOutEventAttributes field's value. -func (s *HistoryEvent) SetActivityTaskTimedOutEventAttributes(v *ActivityTaskTimedOutEventAttributes) *HistoryEvent { - s.ActivityTaskTimedOutEventAttributes = v - return s -} - -// SetCancelTimerFailedEventAttributes sets the CancelTimerFailedEventAttributes field's value. -func (s *HistoryEvent) SetCancelTimerFailedEventAttributes(v *CancelTimerFailedEventAttributes) *HistoryEvent { - s.CancelTimerFailedEventAttributes = v - return s -} - -// SetCancelWorkflowExecutionFailedEventAttributes sets the CancelWorkflowExecutionFailedEventAttributes field's value. -func (s *HistoryEvent) SetCancelWorkflowExecutionFailedEventAttributes(v *CancelWorkflowExecutionFailedEventAttributes) *HistoryEvent { - s.CancelWorkflowExecutionFailedEventAttributes = v - return s -} - -// SetChildWorkflowExecutionCanceledEventAttributes sets the ChildWorkflowExecutionCanceledEventAttributes field's value. -func (s *HistoryEvent) SetChildWorkflowExecutionCanceledEventAttributes(v *ChildWorkflowExecutionCanceledEventAttributes) *HistoryEvent { - s.ChildWorkflowExecutionCanceledEventAttributes = v - return s -} - -// SetChildWorkflowExecutionCompletedEventAttributes sets the ChildWorkflowExecutionCompletedEventAttributes field's value. -func (s *HistoryEvent) SetChildWorkflowExecutionCompletedEventAttributes(v *ChildWorkflowExecutionCompletedEventAttributes) *HistoryEvent { - s.ChildWorkflowExecutionCompletedEventAttributes = v - return s -} - -// SetChildWorkflowExecutionFailedEventAttributes sets the ChildWorkflowExecutionFailedEventAttributes field's value. -func (s *HistoryEvent) SetChildWorkflowExecutionFailedEventAttributes(v *ChildWorkflowExecutionFailedEventAttributes) *HistoryEvent { - s.ChildWorkflowExecutionFailedEventAttributes = v - return s -} - -// SetChildWorkflowExecutionStartedEventAttributes sets the ChildWorkflowExecutionStartedEventAttributes field's value. -func (s *HistoryEvent) SetChildWorkflowExecutionStartedEventAttributes(v *ChildWorkflowExecutionStartedEventAttributes) *HistoryEvent { - s.ChildWorkflowExecutionStartedEventAttributes = v - return s -} - -// SetChildWorkflowExecutionTerminatedEventAttributes sets the ChildWorkflowExecutionTerminatedEventAttributes field's value. -func (s *HistoryEvent) SetChildWorkflowExecutionTerminatedEventAttributes(v *ChildWorkflowExecutionTerminatedEventAttributes) *HistoryEvent { - s.ChildWorkflowExecutionTerminatedEventAttributes = v - return s -} - -// SetChildWorkflowExecutionTimedOutEventAttributes sets the ChildWorkflowExecutionTimedOutEventAttributes field's value. -func (s *HistoryEvent) SetChildWorkflowExecutionTimedOutEventAttributes(v *ChildWorkflowExecutionTimedOutEventAttributes) *HistoryEvent { - s.ChildWorkflowExecutionTimedOutEventAttributes = v - return s -} - -// SetCompleteWorkflowExecutionFailedEventAttributes sets the CompleteWorkflowExecutionFailedEventAttributes field's value. -func (s *HistoryEvent) SetCompleteWorkflowExecutionFailedEventAttributes(v *CompleteWorkflowExecutionFailedEventAttributes) *HistoryEvent { - s.CompleteWorkflowExecutionFailedEventAttributes = v - return s -} - -// SetContinueAsNewWorkflowExecutionFailedEventAttributes sets the ContinueAsNewWorkflowExecutionFailedEventAttributes field's value. -func (s *HistoryEvent) SetContinueAsNewWorkflowExecutionFailedEventAttributes(v *ContinueAsNewWorkflowExecutionFailedEventAttributes) *HistoryEvent { - s.ContinueAsNewWorkflowExecutionFailedEventAttributes = v - return s -} - -// SetDecisionTaskCompletedEventAttributes sets the DecisionTaskCompletedEventAttributes field's value. -func (s *HistoryEvent) SetDecisionTaskCompletedEventAttributes(v *DecisionTaskCompletedEventAttributes) *HistoryEvent { - s.DecisionTaskCompletedEventAttributes = v - return s -} - -// SetDecisionTaskScheduledEventAttributes sets the DecisionTaskScheduledEventAttributes field's value. -func (s *HistoryEvent) SetDecisionTaskScheduledEventAttributes(v *DecisionTaskScheduledEventAttributes) *HistoryEvent { - s.DecisionTaskScheduledEventAttributes = v - return s -} - -// SetDecisionTaskStartedEventAttributes sets the DecisionTaskStartedEventAttributes field's value. -func (s *HistoryEvent) SetDecisionTaskStartedEventAttributes(v *DecisionTaskStartedEventAttributes) *HistoryEvent { - s.DecisionTaskStartedEventAttributes = v - return s -} - -// SetDecisionTaskTimedOutEventAttributes sets the DecisionTaskTimedOutEventAttributes field's value. -func (s *HistoryEvent) SetDecisionTaskTimedOutEventAttributes(v *DecisionTaskTimedOutEventAttributes) *HistoryEvent { - s.DecisionTaskTimedOutEventAttributes = v - return s -} - -// SetEventId sets the EventId field's value. -func (s *HistoryEvent) SetEventId(v int64) *HistoryEvent { - s.EventId = &v - return s -} - -// SetEventTimestamp sets the EventTimestamp field's value. -func (s *HistoryEvent) SetEventTimestamp(v time.Time) *HistoryEvent { - s.EventTimestamp = &v - return s -} - -// SetEventType sets the EventType field's value. -func (s *HistoryEvent) SetEventType(v string) *HistoryEvent { - s.EventType = &v - return s -} - -// SetExternalWorkflowExecutionCancelRequestedEventAttributes sets the ExternalWorkflowExecutionCancelRequestedEventAttributes field's value. -func (s *HistoryEvent) SetExternalWorkflowExecutionCancelRequestedEventAttributes(v *ExternalWorkflowExecutionCancelRequestedEventAttributes) *HistoryEvent { - s.ExternalWorkflowExecutionCancelRequestedEventAttributes = v - return s -} - -// SetExternalWorkflowExecutionSignaledEventAttributes sets the ExternalWorkflowExecutionSignaledEventAttributes field's value. -func (s *HistoryEvent) SetExternalWorkflowExecutionSignaledEventAttributes(v *ExternalWorkflowExecutionSignaledEventAttributes) *HistoryEvent { - s.ExternalWorkflowExecutionSignaledEventAttributes = v - return s -} - -// SetFailWorkflowExecutionFailedEventAttributes sets the FailWorkflowExecutionFailedEventAttributes field's value. -func (s *HistoryEvent) SetFailWorkflowExecutionFailedEventAttributes(v *FailWorkflowExecutionFailedEventAttributes) *HistoryEvent { - s.FailWorkflowExecutionFailedEventAttributes = v - return s -} - -// SetLambdaFunctionCompletedEventAttributes sets the LambdaFunctionCompletedEventAttributes field's value. -func (s *HistoryEvent) SetLambdaFunctionCompletedEventAttributes(v *LambdaFunctionCompletedEventAttributes) *HistoryEvent { - s.LambdaFunctionCompletedEventAttributes = v - return s -} - -// SetLambdaFunctionFailedEventAttributes sets the LambdaFunctionFailedEventAttributes field's value. -func (s *HistoryEvent) SetLambdaFunctionFailedEventAttributes(v *LambdaFunctionFailedEventAttributes) *HistoryEvent { - s.LambdaFunctionFailedEventAttributes = v - return s -} - -// SetLambdaFunctionScheduledEventAttributes sets the LambdaFunctionScheduledEventAttributes field's value. -func (s *HistoryEvent) SetLambdaFunctionScheduledEventAttributes(v *LambdaFunctionScheduledEventAttributes) *HistoryEvent { - s.LambdaFunctionScheduledEventAttributes = v - return s -} - -// SetLambdaFunctionStartedEventAttributes sets the LambdaFunctionStartedEventAttributes field's value. -func (s *HistoryEvent) SetLambdaFunctionStartedEventAttributes(v *LambdaFunctionStartedEventAttributes) *HistoryEvent { - s.LambdaFunctionStartedEventAttributes = v - return s -} - -// SetLambdaFunctionTimedOutEventAttributes sets the LambdaFunctionTimedOutEventAttributes field's value. -func (s *HistoryEvent) SetLambdaFunctionTimedOutEventAttributes(v *LambdaFunctionTimedOutEventAttributes) *HistoryEvent { - s.LambdaFunctionTimedOutEventAttributes = v - return s -} - -// SetMarkerRecordedEventAttributes sets the MarkerRecordedEventAttributes field's value. -func (s *HistoryEvent) SetMarkerRecordedEventAttributes(v *MarkerRecordedEventAttributes) *HistoryEvent { - s.MarkerRecordedEventAttributes = v - return s -} - -// SetRecordMarkerFailedEventAttributes sets the RecordMarkerFailedEventAttributes field's value. -func (s *HistoryEvent) SetRecordMarkerFailedEventAttributes(v *RecordMarkerFailedEventAttributes) *HistoryEvent { - s.RecordMarkerFailedEventAttributes = v - return s -} - -// SetRequestCancelActivityTaskFailedEventAttributes sets the RequestCancelActivityTaskFailedEventAttributes field's value. -func (s *HistoryEvent) SetRequestCancelActivityTaskFailedEventAttributes(v *RequestCancelActivityTaskFailedEventAttributes) *HistoryEvent { - s.RequestCancelActivityTaskFailedEventAttributes = v - return s -} - -// SetRequestCancelExternalWorkflowExecutionFailedEventAttributes sets the RequestCancelExternalWorkflowExecutionFailedEventAttributes field's value. -func (s *HistoryEvent) SetRequestCancelExternalWorkflowExecutionFailedEventAttributes(v *RequestCancelExternalWorkflowExecutionFailedEventAttributes) *HistoryEvent { - s.RequestCancelExternalWorkflowExecutionFailedEventAttributes = v - return s -} - -// SetRequestCancelExternalWorkflowExecutionInitiatedEventAttributes sets the RequestCancelExternalWorkflowExecutionInitiatedEventAttributes field's value. -func (s *HistoryEvent) SetRequestCancelExternalWorkflowExecutionInitiatedEventAttributes(v *RequestCancelExternalWorkflowExecutionInitiatedEventAttributes) *HistoryEvent { - s.RequestCancelExternalWorkflowExecutionInitiatedEventAttributes = v - return s -} - -// SetScheduleActivityTaskFailedEventAttributes sets the ScheduleActivityTaskFailedEventAttributes field's value. -func (s *HistoryEvent) SetScheduleActivityTaskFailedEventAttributes(v *ScheduleActivityTaskFailedEventAttributes) *HistoryEvent { - s.ScheduleActivityTaskFailedEventAttributes = v - return s -} - -// SetScheduleLambdaFunctionFailedEventAttributes sets the ScheduleLambdaFunctionFailedEventAttributes field's value. -func (s *HistoryEvent) SetScheduleLambdaFunctionFailedEventAttributes(v *ScheduleLambdaFunctionFailedEventAttributes) *HistoryEvent { - s.ScheduleLambdaFunctionFailedEventAttributes = v - return s -} - -// SetSignalExternalWorkflowExecutionFailedEventAttributes sets the SignalExternalWorkflowExecutionFailedEventAttributes field's value. -func (s *HistoryEvent) SetSignalExternalWorkflowExecutionFailedEventAttributes(v *SignalExternalWorkflowExecutionFailedEventAttributes) *HistoryEvent { - s.SignalExternalWorkflowExecutionFailedEventAttributes = v - return s -} - -// SetSignalExternalWorkflowExecutionInitiatedEventAttributes sets the SignalExternalWorkflowExecutionInitiatedEventAttributes field's value. -func (s *HistoryEvent) SetSignalExternalWorkflowExecutionInitiatedEventAttributes(v *SignalExternalWorkflowExecutionInitiatedEventAttributes) *HistoryEvent { - s.SignalExternalWorkflowExecutionInitiatedEventAttributes = v - return s -} - -// SetStartChildWorkflowExecutionFailedEventAttributes sets the StartChildWorkflowExecutionFailedEventAttributes field's value. -func (s *HistoryEvent) SetStartChildWorkflowExecutionFailedEventAttributes(v *StartChildWorkflowExecutionFailedEventAttributes) *HistoryEvent { - s.StartChildWorkflowExecutionFailedEventAttributes = v - return s -} - -// SetStartChildWorkflowExecutionInitiatedEventAttributes sets the StartChildWorkflowExecutionInitiatedEventAttributes field's value. -func (s *HistoryEvent) SetStartChildWorkflowExecutionInitiatedEventAttributes(v *StartChildWorkflowExecutionInitiatedEventAttributes) *HistoryEvent { - s.StartChildWorkflowExecutionInitiatedEventAttributes = v - return s -} - -// SetStartLambdaFunctionFailedEventAttributes sets the StartLambdaFunctionFailedEventAttributes field's value. -func (s *HistoryEvent) SetStartLambdaFunctionFailedEventAttributes(v *StartLambdaFunctionFailedEventAttributes) *HistoryEvent { - s.StartLambdaFunctionFailedEventAttributes = v - return s -} - -// SetStartTimerFailedEventAttributes sets the StartTimerFailedEventAttributes field's value. -func (s *HistoryEvent) SetStartTimerFailedEventAttributes(v *StartTimerFailedEventAttributes) *HistoryEvent { - s.StartTimerFailedEventAttributes = v - return s -} - -// SetTimerCanceledEventAttributes sets the TimerCanceledEventAttributes field's value. -func (s *HistoryEvent) SetTimerCanceledEventAttributes(v *TimerCanceledEventAttributes) *HistoryEvent { - s.TimerCanceledEventAttributes = v - return s -} - -// SetTimerFiredEventAttributes sets the TimerFiredEventAttributes field's value. -func (s *HistoryEvent) SetTimerFiredEventAttributes(v *TimerFiredEventAttributes) *HistoryEvent { - s.TimerFiredEventAttributes = v - return s -} - -// SetTimerStartedEventAttributes sets the TimerStartedEventAttributes field's value. -func (s *HistoryEvent) SetTimerStartedEventAttributes(v *TimerStartedEventAttributes) *HistoryEvent { - s.TimerStartedEventAttributes = v - return s -} - -// SetWorkflowExecutionCancelRequestedEventAttributes sets the WorkflowExecutionCancelRequestedEventAttributes field's value. -func (s *HistoryEvent) SetWorkflowExecutionCancelRequestedEventAttributes(v *WorkflowExecutionCancelRequestedEventAttributes) *HistoryEvent { - s.WorkflowExecutionCancelRequestedEventAttributes = v - return s -} - -// SetWorkflowExecutionCanceledEventAttributes sets the WorkflowExecutionCanceledEventAttributes field's value. -func (s *HistoryEvent) SetWorkflowExecutionCanceledEventAttributes(v *WorkflowExecutionCanceledEventAttributes) *HistoryEvent { - s.WorkflowExecutionCanceledEventAttributes = v - return s -} - -// SetWorkflowExecutionCompletedEventAttributes sets the WorkflowExecutionCompletedEventAttributes field's value. -func (s *HistoryEvent) SetWorkflowExecutionCompletedEventAttributes(v *WorkflowExecutionCompletedEventAttributes) *HistoryEvent { - s.WorkflowExecutionCompletedEventAttributes = v - return s -} - -// SetWorkflowExecutionContinuedAsNewEventAttributes sets the WorkflowExecutionContinuedAsNewEventAttributes field's value. -func (s *HistoryEvent) SetWorkflowExecutionContinuedAsNewEventAttributes(v *WorkflowExecutionContinuedAsNewEventAttributes) *HistoryEvent { - s.WorkflowExecutionContinuedAsNewEventAttributes = v - return s -} - -// SetWorkflowExecutionFailedEventAttributes sets the WorkflowExecutionFailedEventAttributes field's value. -func (s *HistoryEvent) SetWorkflowExecutionFailedEventAttributes(v *WorkflowExecutionFailedEventAttributes) *HistoryEvent { - s.WorkflowExecutionFailedEventAttributes = v - return s -} - -// SetWorkflowExecutionSignaledEventAttributes sets the WorkflowExecutionSignaledEventAttributes field's value. -func (s *HistoryEvent) SetWorkflowExecutionSignaledEventAttributes(v *WorkflowExecutionSignaledEventAttributes) *HistoryEvent { - s.WorkflowExecutionSignaledEventAttributes = v - return s -} - -// SetWorkflowExecutionStartedEventAttributes sets the WorkflowExecutionStartedEventAttributes field's value. -func (s *HistoryEvent) SetWorkflowExecutionStartedEventAttributes(v *WorkflowExecutionStartedEventAttributes) *HistoryEvent { - s.WorkflowExecutionStartedEventAttributes = v - return s -} - -// SetWorkflowExecutionTerminatedEventAttributes sets the WorkflowExecutionTerminatedEventAttributes field's value. -func (s *HistoryEvent) SetWorkflowExecutionTerminatedEventAttributes(v *WorkflowExecutionTerminatedEventAttributes) *HistoryEvent { - s.WorkflowExecutionTerminatedEventAttributes = v - return s -} - -// SetWorkflowExecutionTimedOutEventAttributes sets the WorkflowExecutionTimedOutEventAttributes field's value. -func (s *HistoryEvent) SetWorkflowExecutionTimedOutEventAttributes(v *WorkflowExecutionTimedOutEventAttributes) *HistoryEvent { - s.WorkflowExecutionTimedOutEventAttributes = v - return s -} - -// Provides the details of the LambdaFunctionCompleted event. It isn't set for -// other event types. -type LambdaFunctionCompletedEventAttributes struct { - _ struct{} `type:"structure"` - - // The results of the Lambda task. - Result *string `locationName:"result" type:"string"` - - // The ID of the LambdaFunctionScheduled event that was recorded when this Lambda - // task was scheduled. To help diagnose issues, use this information to trace - // back the chain of events leading up to this event. - // - // ScheduledEventId is a required field - ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` - - // The ID of the LambdaFunctionStarted event recorded when this activity task - // started. To help diagnose issues, use this information to trace back the - // chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s LambdaFunctionCompletedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LambdaFunctionCompletedEventAttributes) GoString() string { - return s.String() -} - -// SetResult sets the Result field's value. -func (s *LambdaFunctionCompletedEventAttributes) SetResult(v string) *LambdaFunctionCompletedEventAttributes { - s.Result = &v - return s -} - -// SetScheduledEventId sets the ScheduledEventId field's value. -func (s *LambdaFunctionCompletedEventAttributes) SetScheduledEventId(v int64) *LambdaFunctionCompletedEventAttributes { - s.ScheduledEventId = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *LambdaFunctionCompletedEventAttributes) SetStartedEventId(v int64) *LambdaFunctionCompletedEventAttributes { - s.StartedEventId = &v - return s -} - -// Provides the details of the LambdaFunctionFailed event. It isn't set for -// other event types. -type LambdaFunctionFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The details of the failure. - Details *string `locationName:"details" type:"string"` - - // The reason provided for the failure. - Reason *string `locationName:"reason" type:"string"` - - // The ID of the LambdaFunctionScheduled event that was recorded when this activity - // task was scheduled. To help diagnose issues, use this information to trace - // back the chain of events leading up to this event. - // - // ScheduledEventId is a required field - ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` - - // The ID of the LambdaFunctionStarted event recorded when this activity task - // started. To help diagnose issues, use this information to trace back the - // chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s LambdaFunctionFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LambdaFunctionFailedEventAttributes) GoString() string { - return s.String() -} - -// SetDetails sets the Details field's value. -func (s *LambdaFunctionFailedEventAttributes) SetDetails(v string) *LambdaFunctionFailedEventAttributes { - s.Details = &v - return s -} - -// SetReason sets the Reason field's value. -func (s *LambdaFunctionFailedEventAttributes) SetReason(v string) *LambdaFunctionFailedEventAttributes { - s.Reason = &v - return s -} - -// SetScheduledEventId sets the ScheduledEventId field's value. -func (s *LambdaFunctionFailedEventAttributes) SetScheduledEventId(v int64) *LambdaFunctionFailedEventAttributes { - s.ScheduledEventId = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *LambdaFunctionFailedEventAttributes) SetStartedEventId(v int64) *LambdaFunctionFailedEventAttributes { - s.StartedEventId = &v - return s -} - -// Provides the details of the LambdaFunctionScheduled event. It isn't set for -// other event types. -type LambdaFunctionScheduledEventAttributes struct { - _ struct{} `type:"structure"` - - // Data attached to the event that the decider can use in subsequent workflow - // tasks. This data isn't sent to the Lambda task. - Control *string `locationName:"control" type:"string"` - - // The ID of the LambdaFunctionCompleted event corresponding to the decision - // that resulted in scheduling this activity task. To help diagnose issues, - // use this information to trace back the chain of events leading up to this - // event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The unique ID of the Lambda task. - // - // Id is a required field - Id *string `locationName:"id" min:"1" type:"string" required:"true"` - - // The input provided to the Lambda task. - Input *string `locationName:"input" type:"string"` - - // The name of the Lambda function. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` - - // The maximum amount of time a worker can take to process the Lambda task. - StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` -} - -// String returns the string representation -func (s LambdaFunctionScheduledEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LambdaFunctionScheduledEventAttributes) GoString() string { - return s.String() -} - -// SetControl sets the Control field's value. -func (s *LambdaFunctionScheduledEventAttributes) SetControl(v string) *LambdaFunctionScheduledEventAttributes { - s.Control = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *LambdaFunctionScheduledEventAttributes) SetDecisionTaskCompletedEventId(v int64) *LambdaFunctionScheduledEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetId sets the Id field's value. -func (s *LambdaFunctionScheduledEventAttributes) SetId(v string) *LambdaFunctionScheduledEventAttributes { - s.Id = &v - return s -} - -// SetInput sets the Input field's value. -func (s *LambdaFunctionScheduledEventAttributes) SetInput(v string) *LambdaFunctionScheduledEventAttributes { - s.Input = &v - return s -} - -// SetName sets the Name field's value. -func (s *LambdaFunctionScheduledEventAttributes) SetName(v string) *LambdaFunctionScheduledEventAttributes { - s.Name = &v - return s -} - -// SetStartToCloseTimeout sets the StartToCloseTimeout field's value. -func (s *LambdaFunctionScheduledEventAttributes) SetStartToCloseTimeout(v string) *LambdaFunctionScheduledEventAttributes { - s.StartToCloseTimeout = &v - return s -} - -// Provides the details of the LambdaFunctionStarted event. It isn't set for -// other event types. -type LambdaFunctionStartedEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the LambdaFunctionScheduled event that was recorded when this activity - // task was scheduled. To help diagnose issues, use this information to trace - // back the chain of events leading up to this event. - // - // ScheduledEventId is a required field - ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s LambdaFunctionStartedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LambdaFunctionStartedEventAttributes) GoString() string { - return s.String() -} - -// SetScheduledEventId sets the ScheduledEventId field's value. -func (s *LambdaFunctionStartedEventAttributes) SetScheduledEventId(v int64) *LambdaFunctionStartedEventAttributes { - s.ScheduledEventId = &v - return s -} - -// Provides details of the LambdaFunctionTimedOut event. -type LambdaFunctionTimedOutEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the LambdaFunctionScheduled event that was recorded when this activity - // task was scheduled. To help diagnose issues, use this information to trace - // back the chain of events leading up to this event. - // - // ScheduledEventId is a required field - ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` - - // The ID of the ActivityTaskStarted event that was recorded when this activity - // task started. To help diagnose issues, use this information to trace back - // the chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` - - // The type of the timeout that caused this event. - TimeoutType *string `locationName:"timeoutType" type:"string" enum:"LambdaFunctionTimeoutType"` -} - -// String returns the string representation -func (s LambdaFunctionTimedOutEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LambdaFunctionTimedOutEventAttributes) GoString() string { - return s.String() -} - -// SetScheduledEventId sets the ScheduledEventId field's value. -func (s *LambdaFunctionTimedOutEventAttributes) SetScheduledEventId(v int64) *LambdaFunctionTimedOutEventAttributes { - s.ScheduledEventId = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *LambdaFunctionTimedOutEventAttributes) SetStartedEventId(v int64) *LambdaFunctionTimedOutEventAttributes { - s.StartedEventId = &v - return s -} - -// SetTimeoutType sets the TimeoutType field's value. -func (s *LambdaFunctionTimedOutEventAttributes) SetTimeoutType(v string) *LambdaFunctionTimedOutEventAttributes { - s.TimeoutType = &v - return s -} - -type ListActivityTypesInput struct { - _ struct{} `type:"structure"` - - // The name of the domain in which the activity types have been registered. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // The maximum number of results that are returned per call. nextPageToken can - // be used to obtain futher pages of results. The default is 1000, which is - // the maximum allowed page size. You can, however, specify a page size smaller - // than the maximum. - // - // This is an upper limit only; the actual number of results returned per call - // may be fewer than the specified maximum. - MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` - - // If specified, only lists the activity types that have this name. - Name *string `locationName:"name" min:"1" type:"string"` - - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. - // - // The configured maximumPageSize determines how many results can be returned - // in a single call. - NextPageToken *string `locationName:"nextPageToken" type:"string"` - - // Specifies the registration status of the activity types to list. - // - // RegistrationStatus is a required field - RegistrationStatus *string `locationName:"registrationStatus" type:"string" required:"true" enum:"RegistrationStatus"` - - // When set to true, returns the results in reverse order. By default, the results - // are returned in ascending alphabetical order by name of the activity types. - ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` -} - -// String returns the string representation -func (s ListActivityTypesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListActivityTypesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListActivityTypesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListActivityTypesInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.RegistrationStatus == nil { - invalidParams.Add(request.NewErrParamRequired("RegistrationStatus")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomain sets the Domain field's value. -func (s *ListActivityTypesInput) SetDomain(v string) *ListActivityTypesInput { - s.Domain = &v - return s -} - -// SetMaximumPageSize sets the MaximumPageSize field's value. -func (s *ListActivityTypesInput) SetMaximumPageSize(v int64) *ListActivityTypesInput { - s.MaximumPageSize = &v - return s -} - -// SetName sets the Name field's value. -func (s *ListActivityTypesInput) SetName(v string) *ListActivityTypesInput { - s.Name = &v - return s -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *ListActivityTypesInput) SetNextPageToken(v string) *ListActivityTypesInput { - s.NextPageToken = &v - return s -} - -// SetRegistrationStatus sets the RegistrationStatus field's value. -func (s *ListActivityTypesInput) SetRegistrationStatus(v string) *ListActivityTypesInput { - s.RegistrationStatus = &v - return s -} - -// SetReverseOrder sets the ReverseOrder field's value. -func (s *ListActivityTypesInput) SetReverseOrder(v bool) *ListActivityTypesInput { - s.ReverseOrder = &v - return s -} - -// Contains a paginated list of activity type information structures. -type ListActivityTypesOutput struct { - _ struct{} `type:"structure"` - - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. - // - // The configured maximumPageSize determines how many results can be returned - // in a single call. - NextPageToken *string `locationName:"nextPageToken" type:"string"` - - // List of activity type information. - // - // TypeInfos is a required field - TypeInfos []*ActivityTypeInfo `locationName:"typeInfos" type:"list" required:"true"` -} - -// String returns the string representation -func (s ListActivityTypesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListActivityTypesOutput) GoString() string { - return s.String() -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *ListActivityTypesOutput) SetNextPageToken(v string) *ListActivityTypesOutput { - s.NextPageToken = &v - return s -} - -// SetTypeInfos sets the TypeInfos field's value. -func (s *ListActivityTypesOutput) SetTypeInfos(v []*ActivityTypeInfo) *ListActivityTypesOutput { - s.TypeInfos = v - return s -} - -type ListClosedWorkflowExecutionsInput struct { - _ struct{} `type:"structure"` - - // If specified, only workflow executions that match this close status are listed. - // For example, if TERMINATED is specified, then only TERMINATED workflow executions - // are listed. - // - // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually - // exclusive. You can specify at most one of these in a request. - CloseStatusFilter *CloseStatusFilter `locationName:"closeStatusFilter" type:"structure"` - - // If specified, the workflow executions are included in the returned results - // based on whether their close times are within the range specified by this - // filter. Also, if this parameter is specified, the returned results are ordered - // by their close times. - // - // startTimeFilter and closeTimeFilter are mutually exclusive. You must specify - // one of these in a request but not both. - CloseTimeFilter *ExecutionTimeFilter `locationName:"closeTimeFilter" type:"structure"` - - // The name of the domain that contains the workflow executions to list. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // If specified, only workflow executions matching the workflow ID specified - // in the filter are returned. - // - // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually - // exclusive. You can specify at most one of these in a request. - ExecutionFilter *WorkflowExecutionFilter `locationName:"executionFilter" type:"structure"` - - // The maximum number of results that are returned per call. nextPageToken can - // be used to obtain futher pages of results. The default is 1000, which is - // the maximum allowed page size. You can, however, specify a page size smaller - // than the maximum. - // - // This is an upper limit only; the actual number of results returned per call - // may be fewer than the specified maximum. - MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` - - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. - // - // The configured maximumPageSize determines how many results can be returned - // in a single call. - NextPageToken *string `locationName:"nextPageToken" type:"string"` - - // When set to true, returns the results in reverse order. By default the results - // are returned in descending order of the start or the close time of the executions. - ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` - - // If specified, the workflow executions are included in the returned results - // based on whether their start times are within the range specified by this - // filter. Also, if this parameter is specified, the returned results are ordered - // by their start times. - // - // startTimeFilter and closeTimeFilter are mutually exclusive. You must specify - // one of these in a request but not both. - StartTimeFilter *ExecutionTimeFilter `locationName:"startTimeFilter" type:"structure"` - - // If specified, only executions that have the matching tag are listed. - // - // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually - // exclusive. You can specify at most one of these in a request. - TagFilter *TagFilter `locationName:"tagFilter" type:"structure"` - - // If specified, only executions of the type specified in the filter are returned. - // - // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually - // exclusive. You can specify at most one of these in a request. - TypeFilter *WorkflowTypeFilter `locationName:"typeFilter" type:"structure"` -} - -// String returns the string representation -func (s ListClosedWorkflowExecutionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListClosedWorkflowExecutionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListClosedWorkflowExecutionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListClosedWorkflowExecutionsInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.CloseStatusFilter != nil { - if err := s.CloseStatusFilter.Validate(); err != nil { - invalidParams.AddNested("CloseStatusFilter", err.(request.ErrInvalidParams)) - } - } - if s.CloseTimeFilter != nil { - if err := s.CloseTimeFilter.Validate(); err != nil { - invalidParams.AddNested("CloseTimeFilter", err.(request.ErrInvalidParams)) - } - } - if s.ExecutionFilter != nil { - if err := s.ExecutionFilter.Validate(); err != nil { - invalidParams.AddNested("ExecutionFilter", err.(request.ErrInvalidParams)) - } - } - if s.StartTimeFilter != nil { - if err := s.StartTimeFilter.Validate(); err != nil { - invalidParams.AddNested("StartTimeFilter", err.(request.ErrInvalidParams)) - } - } - if s.TagFilter != nil { - if err := s.TagFilter.Validate(); err != nil { - invalidParams.AddNested("TagFilter", err.(request.ErrInvalidParams)) - } - } - if s.TypeFilter != nil { - if err := s.TypeFilter.Validate(); err != nil { - invalidParams.AddNested("TypeFilter", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCloseStatusFilter sets the CloseStatusFilter field's value. -func (s *ListClosedWorkflowExecutionsInput) SetCloseStatusFilter(v *CloseStatusFilter) *ListClosedWorkflowExecutionsInput { - s.CloseStatusFilter = v - return s -} - -// SetCloseTimeFilter sets the CloseTimeFilter field's value. -func (s *ListClosedWorkflowExecutionsInput) SetCloseTimeFilter(v *ExecutionTimeFilter) *ListClosedWorkflowExecutionsInput { - s.CloseTimeFilter = v - return s -} - -// SetDomain sets the Domain field's value. -func (s *ListClosedWorkflowExecutionsInput) SetDomain(v string) *ListClosedWorkflowExecutionsInput { - s.Domain = &v - return s -} - -// SetExecutionFilter sets the ExecutionFilter field's value. -func (s *ListClosedWorkflowExecutionsInput) SetExecutionFilter(v *WorkflowExecutionFilter) *ListClosedWorkflowExecutionsInput { - s.ExecutionFilter = v - return s -} - -// SetMaximumPageSize sets the MaximumPageSize field's value. -func (s *ListClosedWorkflowExecutionsInput) SetMaximumPageSize(v int64) *ListClosedWorkflowExecutionsInput { - s.MaximumPageSize = &v - return s -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *ListClosedWorkflowExecutionsInput) SetNextPageToken(v string) *ListClosedWorkflowExecutionsInput { - s.NextPageToken = &v - return s -} - -// SetReverseOrder sets the ReverseOrder field's value. -func (s *ListClosedWorkflowExecutionsInput) SetReverseOrder(v bool) *ListClosedWorkflowExecutionsInput { - s.ReverseOrder = &v - return s -} - -// SetStartTimeFilter sets the StartTimeFilter field's value. -func (s *ListClosedWorkflowExecutionsInput) SetStartTimeFilter(v *ExecutionTimeFilter) *ListClosedWorkflowExecutionsInput { - s.StartTimeFilter = v - return s -} - -// SetTagFilter sets the TagFilter field's value. -func (s *ListClosedWorkflowExecutionsInput) SetTagFilter(v *TagFilter) *ListClosedWorkflowExecutionsInput { - s.TagFilter = v - return s -} - -// SetTypeFilter sets the TypeFilter field's value. -func (s *ListClosedWorkflowExecutionsInput) SetTypeFilter(v *WorkflowTypeFilter) *ListClosedWorkflowExecutionsInput { - s.TypeFilter = v - return s -} - -type ListDomainsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of results that are returned per call. nextPageToken can - // be used to obtain futher pages of results. The default is 1000, which is - // the maximum allowed page size. You can, however, specify a page size smaller - // than the maximum. - // - // This is an upper limit only; the actual number of results returned per call - // may be fewer than the specified maximum. - MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` - - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. - // - // The configured maximumPageSize determines how many results can be returned - // in a single call. - NextPageToken *string `locationName:"nextPageToken" type:"string"` - - // Specifies the registration status of the domains to list. - // - // RegistrationStatus is a required field - RegistrationStatus *string `locationName:"registrationStatus" type:"string" required:"true" enum:"RegistrationStatus"` - - // When set to true, returns the results in reverse order. By default, the results - // are returned in ascending alphabetical order by name of the domains. - ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` -} - -// String returns the string representation -func (s ListDomainsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListDomainsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListDomainsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListDomainsInput"} - if s.RegistrationStatus == nil { - invalidParams.Add(request.NewErrParamRequired("RegistrationStatus")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaximumPageSize sets the MaximumPageSize field's value. -func (s *ListDomainsInput) SetMaximumPageSize(v int64) *ListDomainsInput { - s.MaximumPageSize = &v - return s -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *ListDomainsInput) SetNextPageToken(v string) *ListDomainsInput { - s.NextPageToken = &v - return s -} - -// SetRegistrationStatus sets the RegistrationStatus field's value. -func (s *ListDomainsInput) SetRegistrationStatus(v string) *ListDomainsInput { - s.RegistrationStatus = &v - return s -} - -// SetReverseOrder sets the ReverseOrder field's value. -func (s *ListDomainsInput) SetReverseOrder(v bool) *ListDomainsInput { - s.ReverseOrder = &v - return s -} - -// Contains a paginated collection of DomainInfo structures. -type ListDomainsOutput struct { - _ struct{} `type:"structure"` - - // A list of DomainInfo structures. - // - // DomainInfos is a required field - DomainInfos []*DomainInfo `locationName:"domainInfos" type:"list" required:"true"` - - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. - // - // The configured maximumPageSize determines how many results can be returned - // in a single call. - NextPageToken *string `locationName:"nextPageToken" type:"string"` -} - -// String returns the string representation -func (s ListDomainsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListDomainsOutput) GoString() string { - return s.String() -} - -// SetDomainInfos sets the DomainInfos field's value. -func (s *ListDomainsOutput) SetDomainInfos(v []*DomainInfo) *ListDomainsOutput { - s.DomainInfos = v - return s -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *ListDomainsOutput) SetNextPageToken(v string) *ListDomainsOutput { - s.NextPageToken = &v - return s -} - -type ListOpenWorkflowExecutionsInput struct { - _ struct{} `type:"structure"` - - // The name of the domain that contains the workflow executions to list. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // If specified, only workflow executions matching the workflow ID specified - // in the filter are returned. - // - // executionFilter, typeFilter and tagFilter are mutually exclusive. You can - // specify at most one of these in a request. - ExecutionFilter *WorkflowExecutionFilter `locationName:"executionFilter" type:"structure"` - - // The maximum number of results that are returned per call. nextPageToken can - // be used to obtain futher pages of results. The default is 1000, which is - // the maximum allowed page size. You can, however, specify a page size smaller - // than the maximum. - // - // This is an upper limit only; the actual number of results returned per call - // may be fewer than the specified maximum. - MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` - - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. - // - // The configured maximumPageSize determines how many results can be returned - // in a single call. - NextPageToken *string `locationName:"nextPageToken" type:"string"` - - // When set to true, returns the results in reverse order. By default the results - // are returned in descending order of the start time of the executions. - ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` - - // Workflow executions are included in the returned results based on whether - // their start times are within the range specified by this filter. - // - // StartTimeFilter is a required field - StartTimeFilter *ExecutionTimeFilter `locationName:"startTimeFilter" type:"structure" required:"true"` - - // If specified, only executions that have the matching tag are listed. - // - // executionFilter, typeFilter and tagFilter are mutually exclusive. You can - // specify at most one of these in a request. - TagFilter *TagFilter `locationName:"tagFilter" type:"structure"` - - // If specified, only executions of the type specified in the filter are returned. - // - // executionFilter, typeFilter and tagFilter are mutually exclusive. You can - // specify at most one of these in a request. - TypeFilter *WorkflowTypeFilter `locationName:"typeFilter" type:"structure"` -} - -// String returns the string representation -func (s ListOpenWorkflowExecutionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListOpenWorkflowExecutionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListOpenWorkflowExecutionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListOpenWorkflowExecutionsInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.StartTimeFilter == nil { - invalidParams.Add(request.NewErrParamRequired("StartTimeFilter")) - } - if s.ExecutionFilter != nil { - if err := s.ExecutionFilter.Validate(); err != nil { - invalidParams.AddNested("ExecutionFilter", err.(request.ErrInvalidParams)) - } - } - if s.StartTimeFilter != nil { - if err := s.StartTimeFilter.Validate(); err != nil { - invalidParams.AddNested("StartTimeFilter", err.(request.ErrInvalidParams)) - } - } - if s.TagFilter != nil { - if err := s.TagFilter.Validate(); err != nil { - invalidParams.AddNested("TagFilter", err.(request.ErrInvalidParams)) - } - } - if s.TypeFilter != nil { - if err := s.TypeFilter.Validate(); err != nil { - invalidParams.AddNested("TypeFilter", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomain sets the Domain field's value. -func (s *ListOpenWorkflowExecutionsInput) SetDomain(v string) *ListOpenWorkflowExecutionsInput { - s.Domain = &v - return s -} - -// SetExecutionFilter sets the ExecutionFilter field's value. -func (s *ListOpenWorkflowExecutionsInput) SetExecutionFilter(v *WorkflowExecutionFilter) *ListOpenWorkflowExecutionsInput { - s.ExecutionFilter = v - return s -} - -// SetMaximumPageSize sets the MaximumPageSize field's value. -func (s *ListOpenWorkflowExecutionsInput) SetMaximumPageSize(v int64) *ListOpenWorkflowExecutionsInput { - s.MaximumPageSize = &v - return s -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *ListOpenWorkflowExecutionsInput) SetNextPageToken(v string) *ListOpenWorkflowExecutionsInput { - s.NextPageToken = &v - return s -} - -// SetReverseOrder sets the ReverseOrder field's value. -func (s *ListOpenWorkflowExecutionsInput) SetReverseOrder(v bool) *ListOpenWorkflowExecutionsInput { - s.ReverseOrder = &v - return s -} - -// SetStartTimeFilter sets the StartTimeFilter field's value. -func (s *ListOpenWorkflowExecutionsInput) SetStartTimeFilter(v *ExecutionTimeFilter) *ListOpenWorkflowExecutionsInput { - s.StartTimeFilter = v - return s -} - -// SetTagFilter sets the TagFilter field's value. -func (s *ListOpenWorkflowExecutionsInput) SetTagFilter(v *TagFilter) *ListOpenWorkflowExecutionsInput { - s.TagFilter = v - return s -} - -// SetTypeFilter sets the TypeFilter field's value. -func (s *ListOpenWorkflowExecutionsInput) SetTypeFilter(v *WorkflowTypeFilter) *ListOpenWorkflowExecutionsInput { - s.TypeFilter = v - return s -} - -type ListWorkflowTypesInput struct { - _ struct{} `type:"structure"` - - // The name of the domain in which the workflow types have been registered. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // The maximum number of results that are returned per call. nextPageToken can - // be used to obtain futher pages of results. The default is 1000, which is - // the maximum allowed page size. You can, however, specify a page size smaller - // than the maximum. - // - // This is an upper limit only; the actual number of results returned per call - // may be fewer than the specified maximum. - MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` - - // If specified, lists the workflow type with this name. - Name *string `locationName:"name" min:"1" type:"string"` - - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. - // - // The configured maximumPageSize determines how many results can be returned - // in a single call. - NextPageToken *string `locationName:"nextPageToken" type:"string"` - - // Specifies the registration status of the workflow types to list. - // - // RegistrationStatus is a required field - RegistrationStatus *string `locationName:"registrationStatus" type:"string" required:"true" enum:"RegistrationStatus"` - - // When set to true, returns the results in reverse order. By default the results - // are returned in ascending alphabetical order of the name of the workflow - // types. - ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` -} - -// String returns the string representation -func (s ListWorkflowTypesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListWorkflowTypesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListWorkflowTypesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListWorkflowTypesInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.RegistrationStatus == nil { - invalidParams.Add(request.NewErrParamRequired("RegistrationStatus")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomain sets the Domain field's value. -func (s *ListWorkflowTypesInput) SetDomain(v string) *ListWorkflowTypesInput { - s.Domain = &v - return s -} - -// SetMaximumPageSize sets the MaximumPageSize field's value. -func (s *ListWorkflowTypesInput) SetMaximumPageSize(v int64) *ListWorkflowTypesInput { - s.MaximumPageSize = &v - return s -} - -// SetName sets the Name field's value. -func (s *ListWorkflowTypesInput) SetName(v string) *ListWorkflowTypesInput { - s.Name = &v - return s -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *ListWorkflowTypesInput) SetNextPageToken(v string) *ListWorkflowTypesInput { - s.NextPageToken = &v - return s -} - -// SetRegistrationStatus sets the RegistrationStatus field's value. -func (s *ListWorkflowTypesInput) SetRegistrationStatus(v string) *ListWorkflowTypesInput { - s.RegistrationStatus = &v - return s -} - -// SetReverseOrder sets the ReverseOrder field's value. -func (s *ListWorkflowTypesInput) SetReverseOrder(v bool) *ListWorkflowTypesInput { - s.ReverseOrder = &v - return s -} - -// Contains a paginated list of information structures about workflow types. -type ListWorkflowTypesOutput struct { - _ struct{} `type:"structure"` - - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. - // - // The configured maximumPageSize determines how many results can be returned - // in a single call. - NextPageToken *string `locationName:"nextPageToken" type:"string"` - - // The list of workflow type information. - // - // TypeInfos is a required field - TypeInfos []*WorkflowTypeInfo `locationName:"typeInfos" type:"list" required:"true"` -} - -// String returns the string representation -func (s ListWorkflowTypesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListWorkflowTypesOutput) GoString() string { - return s.String() -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *ListWorkflowTypesOutput) SetNextPageToken(v string) *ListWorkflowTypesOutput { - s.NextPageToken = &v - return s -} - -// SetTypeInfos sets the TypeInfos field's value. -func (s *ListWorkflowTypesOutput) SetTypeInfos(v []*WorkflowTypeInfo) *ListWorkflowTypesOutput { - s.TypeInfos = v - return s -} - -// Provides the details of the MarkerRecorded event. -type MarkerRecordedEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the RecordMarker decision that requested this marker. This - // information can be useful for diagnosing problems by tracing back the chain - // of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The details of the marker. - Details *string `locationName:"details" type:"string"` - - // The name of the marker. - // - // MarkerName is a required field - MarkerName *string `locationName:"markerName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s MarkerRecordedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MarkerRecordedEventAttributes) GoString() string { - return s.String() -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *MarkerRecordedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *MarkerRecordedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetDetails sets the Details field's value. -func (s *MarkerRecordedEventAttributes) SetDetails(v string) *MarkerRecordedEventAttributes { - s.Details = &v - return s -} - -// SetMarkerName sets the MarkerName field's value. -func (s *MarkerRecordedEventAttributes) SetMarkerName(v string) *MarkerRecordedEventAttributes { - s.MarkerName = &v - return s -} - -// Contains the count of tasks in a task list. -type PendingTaskCount struct { - _ struct{} `type:"structure"` - - // The number of tasks in the task list. - // - // Count is a required field - Count *int64 `locationName:"count" type:"integer" required:"true"` - - // If set to true, indicates that the actual count was more than the maximum - // supported by this API and the count returned is the truncated value. - Truncated *bool `locationName:"truncated" type:"boolean"` -} - -// String returns the string representation -func (s PendingTaskCount) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PendingTaskCount) GoString() string { - return s.String() -} - -// SetCount sets the Count field's value. -func (s *PendingTaskCount) SetCount(v int64) *PendingTaskCount { - s.Count = &v - return s -} - -// SetTruncated sets the Truncated field's value. -func (s *PendingTaskCount) SetTruncated(v bool) *PendingTaskCount { - s.Truncated = &v - return s -} - -type PollForActivityTaskInput struct { - _ struct{} `type:"structure"` - - // The name of the domain that contains the task lists being polled. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // Identity of the worker making the request, recorded in the ActivityTaskStarted - // event in the workflow history. This enables diagnostic tracing when problems - // arise. The form of this identity is user defined. - Identity *string `locationName:"identity" type:"string"` - - // Specifies the task list to poll for activity tasks. - // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. - // - // TaskList is a required field - TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` -} - -// String returns the string representation -func (s PollForActivityTaskInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PollForActivityTaskInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PollForActivityTaskInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PollForActivityTaskInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.TaskList == nil { - invalidParams.Add(request.NewErrParamRequired("TaskList")) - } - if s.TaskList != nil { - if err := s.TaskList.Validate(); err != nil { - invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomain sets the Domain field's value. -func (s *PollForActivityTaskInput) SetDomain(v string) *PollForActivityTaskInput { - s.Domain = &v - return s -} - -// SetIdentity sets the Identity field's value. -func (s *PollForActivityTaskInput) SetIdentity(v string) *PollForActivityTaskInput { - s.Identity = &v - return s -} - -// SetTaskList sets the TaskList field's value. -func (s *PollForActivityTaskInput) SetTaskList(v *TaskList) *PollForActivityTaskInput { - s.TaskList = v - return s -} - -// Unit of work sent to an activity worker. -type PollForActivityTaskOutput struct { - _ struct{} `type:"structure"` - - // The unique ID of the task. - // - // ActivityId is a required field - ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` - - // The type of this activity task. - // - // ActivityType is a required field - ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` - - // The inputs provided when the activity task was scheduled. The form of the - // input is user defined and should be meaningful to the activity implementation. - Input *string `locationName:"input" type:"string"` - - // The ID of the ActivityTaskStarted event recorded in the history. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` - - // The opaque string used as a handle on the task. This token is used by workers - // to communicate progress and response information back to the system about - // the task. - // - // TaskToken is a required field - TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` - - // The workflow execution that started this activity task. - // - // WorkflowExecution is a required field - WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` -} - -// String returns the string representation -func (s PollForActivityTaskOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PollForActivityTaskOutput) GoString() string { - return s.String() -} - -// SetActivityId sets the ActivityId field's value. -func (s *PollForActivityTaskOutput) SetActivityId(v string) *PollForActivityTaskOutput { - s.ActivityId = &v - return s -} - -// SetActivityType sets the ActivityType field's value. -func (s *PollForActivityTaskOutput) SetActivityType(v *ActivityType) *PollForActivityTaskOutput { - s.ActivityType = v - return s -} - -// SetInput sets the Input field's value. -func (s *PollForActivityTaskOutput) SetInput(v string) *PollForActivityTaskOutput { - s.Input = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *PollForActivityTaskOutput) SetStartedEventId(v int64) *PollForActivityTaskOutput { - s.StartedEventId = &v - return s -} - -// SetTaskToken sets the TaskToken field's value. -func (s *PollForActivityTaskOutput) SetTaskToken(v string) *PollForActivityTaskOutput { - s.TaskToken = &v - return s -} - -// SetWorkflowExecution sets the WorkflowExecution field's value. -func (s *PollForActivityTaskOutput) SetWorkflowExecution(v *WorkflowExecution) *PollForActivityTaskOutput { - s.WorkflowExecution = v - return s -} - -type PollForDecisionTaskInput struct { - _ struct{} `type:"structure"` - - // The name of the domain containing the task lists to poll. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // Identity of the decider making the request, which is recorded in the DecisionTaskStarted - // event in the workflow history. This enables diagnostic tracing when problems - // arise. The form of this identity is user defined. - Identity *string `locationName:"identity" type:"string"` - - // The maximum number of results that are returned per call. nextPageToken can - // be used to obtain futher pages of results. The default is 1000, which is - // the maximum allowed page size. You can, however, specify a page size smaller - // than the maximum. - // - // This is an upper limit only; the actual number of results returned per call - // may be fewer than the specified maximum. - MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` - - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. - // - // The configured maximumPageSize determines how many results can be returned - // in a single call. - // - // The nextPageToken returned by this action cannot be used with GetWorkflowExecutionHistory - // to get the next page. You must call PollForDecisionTask again (with the nextPageToken) - // to retrieve the next page of history records. Calling PollForDecisionTask - // with a nextPageToken doesn't return a new decision task. - NextPageToken *string `locationName:"nextPageToken" type:"string"` - - // When set to true, returns the events in reverse order. By default the results - // are returned in ascending order of the eventTimestamp of the events. - ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` - - // Specifies the task list to poll for decision tasks. - // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. - // - // TaskList is a required field - TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` -} - -// String returns the string representation -func (s PollForDecisionTaskInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PollForDecisionTaskInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PollForDecisionTaskInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PollForDecisionTaskInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.TaskList == nil { - invalidParams.Add(request.NewErrParamRequired("TaskList")) - } - if s.TaskList != nil { - if err := s.TaskList.Validate(); err != nil { - invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomain sets the Domain field's value. -func (s *PollForDecisionTaskInput) SetDomain(v string) *PollForDecisionTaskInput { - s.Domain = &v - return s -} - -// SetIdentity sets the Identity field's value. -func (s *PollForDecisionTaskInput) SetIdentity(v string) *PollForDecisionTaskInput { - s.Identity = &v - return s -} - -// SetMaximumPageSize sets the MaximumPageSize field's value. -func (s *PollForDecisionTaskInput) SetMaximumPageSize(v int64) *PollForDecisionTaskInput { - s.MaximumPageSize = &v - return s -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *PollForDecisionTaskInput) SetNextPageToken(v string) *PollForDecisionTaskInput { - s.NextPageToken = &v - return s -} - -// SetReverseOrder sets the ReverseOrder field's value. -func (s *PollForDecisionTaskInput) SetReverseOrder(v bool) *PollForDecisionTaskInput { - s.ReverseOrder = &v - return s -} - -// SetTaskList sets the TaskList field's value. -func (s *PollForDecisionTaskInput) SetTaskList(v *TaskList) *PollForDecisionTaskInput { - s.TaskList = v - return s -} - -// A structure that represents a decision task. Decision tasks are sent to deciders -// in order for them to make decisions. -type PollForDecisionTaskOutput struct { - _ struct{} `type:"structure"` - - // A paginated list of history events of the workflow execution. The decider - // uses this during the processing of the decision task. - // - // Events is a required field - Events []*HistoryEvent `locationName:"events" type:"list" required:"true"` - - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. - // - // The configured maximumPageSize determines how many results can be returned - // in a single call. - NextPageToken *string `locationName:"nextPageToken" type:"string"` - - // The ID of the DecisionTaskStarted event of the previous decision task of - // this workflow execution that was processed by the decider. This can be used - // to determine the events in the history new since the last decision task received - // by the decider. - PreviousStartedEventId *int64 `locationName:"previousStartedEventId" type:"long"` - - // The ID of the DecisionTaskStarted event recorded in the history. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` - - // The opaque string used as a handle on the task. This token is used by workers - // to communicate progress and response information back to the system about - // the task. - // - // TaskToken is a required field - TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` - - // The workflow execution for which this decision task was created. - // - // WorkflowExecution is a required field - WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` - - // The type of the workflow execution for which this decision task was created. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s PollForDecisionTaskOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PollForDecisionTaskOutput) GoString() string { - return s.String() -} - -// SetEvents sets the Events field's value. -func (s *PollForDecisionTaskOutput) SetEvents(v []*HistoryEvent) *PollForDecisionTaskOutput { - s.Events = v - return s -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *PollForDecisionTaskOutput) SetNextPageToken(v string) *PollForDecisionTaskOutput { - s.NextPageToken = &v - return s -} - -// SetPreviousStartedEventId sets the PreviousStartedEventId field's value. -func (s *PollForDecisionTaskOutput) SetPreviousStartedEventId(v int64) *PollForDecisionTaskOutput { - s.PreviousStartedEventId = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *PollForDecisionTaskOutput) SetStartedEventId(v int64) *PollForDecisionTaskOutput { - s.StartedEventId = &v - return s -} - -// SetTaskToken sets the TaskToken field's value. -func (s *PollForDecisionTaskOutput) SetTaskToken(v string) *PollForDecisionTaskOutput { - s.TaskToken = &v - return s -} - -// SetWorkflowExecution sets the WorkflowExecution field's value. -func (s *PollForDecisionTaskOutput) SetWorkflowExecution(v *WorkflowExecution) *PollForDecisionTaskOutput { - s.WorkflowExecution = v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *PollForDecisionTaskOutput) SetWorkflowType(v *WorkflowType) *PollForDecisionTaskOutput { - s.WorkflowType = v - return s -} - -type RecordActivityTaskHeartbeatInput struct { - _ struct{} `type:"structure"` - - // If specified, contains details about the progress of the task. - Details *string `locationName:"details" type:"string"` - - // The taskToken of the ActivityTask. - // - // taskToken is generated by the service and should be treated as an opaque - // value. If the task is passed to another process, its taskToken must also - // be passed. This enables it to provide its progress and respond with results. - // - // TaskToken is a required field - TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RecordActivityTaskHeartbeatInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RecordActivityTaskHeartbeatInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RecordActivityTaskHeartbeatInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RecordActivityTaskHeartbeatInput"} - if s.TaskToken == nil { - invalidParams.Add(request.NewErrParamRequired("TaskToken")) - } - if s.TaskToken != nil && len(*s.TaskToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TaskToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDetails sets the Details field's value. -func (s *RecordActivityTaskHeartbeatInput) SetDetails(v string) *RecordActivityTaskHeartbeatInput { - s.Details = &v - return s -} - -// SetTaskToken sets the TaskToken field's value. -func (s *RecordActivityTaskHeartbeatInput) SetTaskToken(v string) *RecordActivityTaskHeartbeatInput { - s.TaskToken = &v - return s -} - -// Status information about an activity task. -type RecordActivityTaskHeartbeatOutput struct { - _ struct{} `type:"structure"` - - // Set to true if cancellation of the task is requested. - // - // CancelRequested is a required field - CancelRequested *bool `locationName:"cancelRequested" type:"boolean" required:"true"` -} - -// String returns the string representation -func (s RecordActivityTaskHeartbeatOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RecordActivityTaskHeartbeatOutput) GoString() string { - return s.String() -} - -// SetCancelRequested sets the CancelRequested field's value. -func (s *RecordActivityTaskHeartbeatOutput) SetCancelRequested(v bool) *RecordActivityTaskHeartbeatOutput { - s.CancelRequested = &v - return s -} - -// Provides the details of the RecordMarker decision. -// -// Access Control -// -// You can use IAM policies to control this decision's access to Amazon SWF -// resources as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -type RecordMarkerDecisionAttributes struct { - _ struct{} `type:"structure"` - - // The details of the marker. - Details *string `locationName:"details" type:"string"` - - // The name of the marker. - // - // MarkerName is a required field - MarkerName *string `locationName:"markerName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RecordMarkerDecisionAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RecordMarkerDecisionAttributes) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RecordMarkerDecisionAttributes) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RecordMarkerDecisionAttributes"} - if s.MarkerName == nil { - invalidParams.Add(request.NewErrParamRequired("MarkerName")) - } - if s.MarkerName != nil && len(*s.MarkerName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MarkerName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDetails sets the Details field's value. -func (s *RecordMarkerDecisionAttributes) SetDetails(v string) *RecordMarkerDecisionAttributes { - s.Details = &v - return s -} - -// SetMarkerName sets the MarkerName field's value. -func (s *RecordMarkerDecisionAttributes) SetMarkerName(v string) *RecordMarkerDecisionAttributes { - s.MarkerName = &v - return s -} - -// Provides the details of the RecordMarkerFailed event. -type RecordMarkerFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The cause of the failure. This information is generated by the system and - // can be useful for diagnostic purposes. - // - // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it - // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) - // in the Amazon SWF Developer Guide. - // - // Cause is a required field - Cause *string `locationName:"cause" type:"string" required:"true" enum:"RecordMarkerFailedCause"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the RecordMarkerFailed decision for this cancellation request. - // This information can be useful for diagnosing problems by tracing back the - // chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The marker's name. - // - // MarkerName is a required field - MarkerName *string `locationName:"markerName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RecordMarkerFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RecordMarkerFailedEventAttributes) GoString() string { - return s.String() -} - -// SetCause sets the Cause field's value. -func (s *RecordMarkerFailedEventAttributes) SetCause(v string) *RecordMarkerFailedEventAttributes { - s.Cause = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *RecordMarkerFailedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *RecordMarkerFailedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetMarkerName sets the MarkerName field's value. -func (s *RecordMarkerFailedEventAttributes) SetMarkerName(v string) *RecordMarkerFailedEventAttributes { - s.MarkerName = &v - return s -} - -type RegisterActivityTypeInput struct { - _ struct{} `type:"structure"` - - // If set, specifies the default maximum time before which a worker processing - // a task of this type must report progress by calling RecordActivityTaskHeartbeat. - // If the timeout is exceeded, the activity task is automatically timed out. - // This default can be overridden when scheduling an activity task using the - // ScheduleActivityTaskDecision. If the activity worker subsequently attempts - // to record a heartbeat or returns a result, the activity worker receives an - // UnknownResource fault. In this case, Amazon SWF no longer considers the activity - // task to be valid; the activity worker should clean up the activity task. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - DefaultTaskHeartbeatTimeout *string `locationName:"defaultTaskHeartbeatTimeout" type:"string"` - - // If set, specifies the default task list to use for scheduling tasks of this - // activity type. This default task list is used if a task list isn't provided - // when a task is scheduled through the ScheduleActivityTaskDecision. - DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` - - // The default task priority to assign to the activity type. If not assigned, - // then 0 is used. Valid values are integers that range from Java's Integer.MIN_VALUE - // (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate - // higher priority. - // - // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) - // in the in the Amazon SWF Developer Guide.. - DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` - - // If set, specifies the default maximum duration for a task of this activity - // type. This default can be overridden when scheduling an activity task using - // the ScheduleActivityTaskDecision. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - DefaultTaskScheduleToCloseTimeout *string `locationName:"defaultTaskScheduleToCloseTimeout" type:"string"` - - // If set, specifies the default maximum duration that a task of this activity - // type can wait before being assigned to a worker. This default can be overridden - // when scheduling an activity task using the ScheduleActivityTaskDecision. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - DefaultTaskScheduleToStartTimeout *string `locationName:"defaultTaskScheduleToStartTimeout" type:"string"` - - // If set, specifies the default maximum duration that a worker can take to - // process tasks of this activity type. This default can be overridden when - // scheduling an activity task using the ScheduleActivityTaskDecision. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - DefaultTaskStartToCloseTimeout *string `locationName:"defaultTaskStartToCloseTimeout" type:"string"` - - // A textual description of the activity type. - Description *string `locationName:"description" type:"string"` - - // The name of the domain in which this activity is to be registered. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // The name of the activity type within the domain. - // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` - - // The version of the activity type. - // - // The activity type consists of the name and version, the combination of which - // must be unique within the domain. - // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. - // - // Version is a required field - Version *string `locationName:"version" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RegisterActivityTypeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RegisterActivityTypeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RegisterActivityTypeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RegisterActivityTypeInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Version == nil { - invalidParams.Add(request.NewErrParamRequired("Version")) - } - if s.Version != nil && len(*s.Version) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Version", 1)) - } - if s.DefaultTaskList != nil { - if err := s.DefaultTaskList.Validate(); err != nil { - invalidParams.AddNested("DefaultTaskList", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDefaultTaskHeartbeatTimeout sets the DefaultTaskHeartbeatTimeout field's value. -func (s *RegisterActivityTypeInput) SetDefaultTaskHeartbeatTimeout(v string) *RegisterActivityTypeInput { - s.DefaultTaskHeartbeatTimeout = &v - return s -} - -// SetDefaultTaskList sets the DefaultTaskList field's value. -func (s *RegisterActivityTypeInput) SetDefaultTaskList(v *TaskList) *RegisterActivityTypeInput { - s.DefaultTaskList = v - return s -} - -// SetDefaultTaskPriority sets the DefaultTaskPriority field's value. -func (s *RegisterActivityTypeInput) SetDefaultTaskPriority(v string) *RegisterActivityTypeInput { - s.DefaultTaskPriority = &v - return s -} - -// SetDefaultTaskScheduleToCloseTimeout sets the DefaultTaskScheduleToCloseTimeout field's value. -func (s *RegisterActivityTypeInput) SetDefaultTaskScheduleToCloseTimeout(v string) *RegisterActivityTypeInput { - s.DefaultTaskScheduleToCloseTimeout = &v - return s -} - -// SetDefaultTaskScheduleToStartTimeout sets the DefaultTaskScheduleToStartTimeout field's value. -func (s *RegisterActivityTypeInput) SetDefaultTaskScheduleToStartTimeout(v string) *RegisterActivityTypeInput { - s.DefaultTaskScheduleToStartTimeout = &v - return s -} - -// SetDefaultTaskStartToCloseTimeout sets the DefaultTaskStartToCloseTimeout field's value. -func (s *RegisterActivityTypeInput) SetDefaultTaskStartToCloseTimeout(v string) *RegisterActivityTypeInput { - s.DefaultTaskStartToCloseTimeout = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *RegisterActivityTypeInput) SetDescription(v string) *RegisterActivityTypeInput { - s.Description = &v - return s -} - -// SetDomain sets the Domain field's value. -func (s *RegisterActivityTypeInput) SetDomain(v string) *RegisterActivityTypeInput { - s.Domain = &v - return s -} - -// SetName sets the Name field's value. -func (s *RegisterActivityTypeInput) SetName(v string) *RegisterActivityTypeInput { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *RegisterActivityTypeInput) SetVersion(v string) *RegisterActivityTypeInput { - s.Version = &v - return s -} - -type RegisterActivityTypeOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s RegisterActivityTypeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RegisterActivityTypeOutput) GoString() string { - return s.String() -} - -type RegisterDomainInput struct { - _ struct{} `type:"structure"` - - // A text description of the domain. - Description *string `locationName:"description" type:"string"` - - // Name of the domain to register. The name must be unique in the region that - // the domain is registered in. - // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` - - // The duration (in days) that records and histories of workflow executions - // on the domain should be kept by the service. After the retention period, - // the workflow execution isn't available in the results of visibility calls. - // - // If you pass the value NONE or 0 (zero), then the workflow execution history - // isn't retained. As soon as the workflow execution completes, the execution - // record and its history are deleted. - // - // The maximum workflow execution retention period is 90 days. For more information - // about Amazon SWF service limits, see: Amazon SWF Service Limits (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-limits.html) - // in the Amazon SWF Developer Guide. - // - // WorkflowExecutionRetentionPeriodInDays is a required field - WorkflowExecutionRetentionPeriodInDays *string `locationName:"workflowExecutionRetentionPeriodInDays" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RegisterDomainInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RegisterDomainInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RegisterDomainInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RegisterDomainInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.WorkflowExecutionRetentionPeriodInDays == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowExecutionRetentionPeriodInDays")) - } - if s.WorkflowExecutionRetentionPeriodInDays != nil && len(*s.WorkflowExecutionRetentionPeriodInDays) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkflowExecutionRetentionPeriodInDays", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *RegisterDomainInput) SetDescription(v string) *RegisterDomainInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *RegisterDomainInput) SetName(v string) *RegisterDomainInput { - s.Name = &v - return s -} - -// SetWorkflowExecutionRetentionPeriodInDays sets the WorkflowExecutionRetentionPeriodInDays field's value. -func (s *RegisterDomainInput) SetWorkflowExecutionRetentionPeriodInDays(v string) *RegisterDomainInput { - s.WorkflowExecutionRetentionPeriodInDays = &v - return s -} - -type RegisterDomainOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s RegisterDomainOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RegisterDomainOutput) GoString() string { - return s.String() -} - -type RegisterWorkflowTypeInput struct { - _ struct{} `type:"structure"` - - // If set, specifies the default policy to use for the child workflow executions - // when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution - // action explicitly or due to an expired timeout. This default can be overridden - // when starting a workflow execution using the StartWorkflowExecution action - // or the StartChildWorkflowExecutionDecision. - // - // The supported child policies are: - // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A request to cancel is attempted for each child execution - // by recording a WorkflowExecutionCancelRequested event in its history. - // It is up to the decider to take appropriate actions when it receives an - // execution history with this event. - // - // * ABANDON – No action is taken. The child executions continue to run. - DefaultChildPolicy *string `locationName:"defaultChildPolicy" type:"string" enum:"ChildPolicy"` - - // If set, specifies the default maximum duration for executions of this workflow - // type. You can override this default when starting an execution through the - // StartWorkflowExecution Action or StartChildWorkflowExecutionDecision. - // - // The duration is specified in seconds; an integer greater than or equal to - // 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot - // specify a value of "NONE" for defaultExecutionStartToCloseTimeout; there - // is a one-year max limit on the time that a workflow execution can run. Exceeding - // this limit always causes the workflow execution to time out. - DefaultExecutionStartToCloseTimeout *string `locationName:"defaultExecutionStartToCloseTimeout" type:"string"` - - // The default IAM role attached to this workflow type. - // - // Executions of this workflow type need IAM roles to invoke Lambda functions. - // If you don't specify an IAM role when you start this workflow type, the default - // Lambda role is attached to the execution. For more information, see http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html) - // in the Amazon SWF Developer Guide. - DefaultLambdaRole *string `locationName:"defaultLambdaRole" min:"1" type:"string"` - - // If set, specifies the default task list to use for scheduling decision tasks - // for executions of this workflow type. This default is used only if a task - // list isn't provided when starting the execution through the StartWorkflowExecution - // Action or StartChildWorkflowExecutionDecision. - DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` - - // The default task priority to assign to the workflow type. If not assigned, - // then 0 is used. Valid values are integers that range from Java's Integer.MIN_VALUE - // (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate - // higher priority. - // - // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) - // in the Amazon SWF Developer Guide. - DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` - - // If set, specifies the default maximum duration of decision tasks for this - // workflow type. This default can be overridden when starting a workflow execution - // using the StartWorkflowExecution action or the StartChildWorkflowExecutionDecision. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - DefaultTaskStartToCloseTimeout *string `locationName:"defaultTaskStartToCloseTimeout" type:"string"` - - // Textual description of the workflow type. - Description *string `locationName:"description" type:"string"` - - // The name of the domain in which to register the workflow type. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // The name of the workflow type. - // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` - - // The version of the workflow type. - // - // The workflow type consists of the name and version, the combination of which - // must be unique within the domain. To get a list of all currently registered - // workflow types, use the ListWorkflowTypes action. - // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. - // - // Version is a required field - Version *string `locationName:"version" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RegisterWorkflowTypeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RegisterWorkflowTypeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RegisterWorkflowTypeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RegisterWorkflowTypeInput"} - if s.DefaultLambdaRole != nil && len(*s.DefaultLambdaRole) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DefaultLambdaRole", 1)) - } - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Version == nil { - invalidParams.Add(request.NewErrParamRequired("Version")) - } - if s.Version != nil && len(*s.Version) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Version", 1)) - } - if s.DefaultTaskList != nil { - if err := s.DefaultTaskList.Validate(); err != nil { - invalidParams.AddNested("DefaultTaskList", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDefaultChildPolicy sets the DefaultChildPolicy field's value. -func (s *RegisterWorkflowTypeInput) SetDefaultChildPolicy(v string) *RegisterWorkflowTypeInput { - s.DefaultChildPolicy = &v - return s -} - -// SetDefaultExecutionStartToCloseTimeout sets the DefaultExecutionStartToCloseTimeout field's value. -func (s *RegisterWorkflowTypeInput) SetDefaultExecutionStartToCloseTimeout(v string) *RegisterWorkflowTypeInput { - s.DefaultExecutionStartToCloseTimeout = &v - return s -} - -// SetDefaultLambdaRole sets the DefaultLambdaRole field's value. -func (s *RegisterWorkflowTypeInput) SetDefaultLambdaRole(v string) *RegisterWorkflowTypeInput { - s.DefaultLambdaRole = &v - return s -} - -// SetDefaultTaskList sets the DefaultTaskList field's value. -func (s *RegisterWorkflowTypeInput) SetDefaultTaskList(v *TaskList) *RegisterWorkflowTypeInput { - s.DefaultTaskList = v - return s -} - -// SetDefaultTaskPriority sets the DefaultTaskPriority field's value. -func (s *RegisterWorkflowTypeInput) SetDefaultTaskPriority(v string) *RegisterWorkflowTypeInput { - s.DefaultTaskPriority = &v - return s -} - -// SetDefaultTaskStartToCloseTimeout sets the DefaultTaskStartToCloseTimeout field's value. -func (s *RegisterWorkflowTypeInput) SetDefaultTaskStartToCloseTimeout(v string) *RegisterWorkflowTypeInput { - s.DefaultTaskStartToCloseTimeout = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *RegisterWorkflowTypeInput) SetDescription(v string) *RegisterWorkflowTypeInput { - s.Description = &v - return s -} - -// SetDomain sets the Domain field's value. -func (s *RegisterWorkflowTypeInput) SetDomain(v string) *RegisterWorkflowTypeInput { - s.Domain = &v - return s -} - -// SetName sets the Name field's value. -func (s *RegisterWorkflowTypeInput) SetName(v string) *RegisterWorkflowTypeInput { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *RegisterWorkflowTypeInput) SetVersion(v string) *RegisterWorkflowTypeInput { - s.Version = &v - return s -} - -type RegisterWorkflowTypeOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s RegisterWorkflowTypeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RegisterWorkflowTypeOutput) GoString() string { - return s.String() -} - -// Provides the details of the RequestCancelActivityTask decision. -// -// Access Control -// -// You can use IAM policies to control this decision's access to Amazon SWF -// resources as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -type RequestCancelActivityTaskDecisionAttributes struct { - _ struct{} `type:"structure"` - - // The activityId of the activity task to be canceled. - // - // ActivityId is a required field - ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RequestCancelActivityTaskDecisionAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RequestCancelActivityTaskDecisionAttributes) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RequestCancelActivityTaskDecisionAttributes) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RequestCancelActivityTaskDecisionAttributes"} - if s.ActivityId == nil { - invalidParams.Add(request.NewErrParamRequired("ActivityId")) - } - if s.ActivityId != nil && len(*s.ActivityId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ActivityId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetActivityId sets the ActivityId field's value. -func (s *RequestCancelActivityTaskDecisionAttributes) SetActivityId(v string) *RequestCancelActivityTaskDecisionAttributes { - s.ActivityId = &v - return s -} - -// Provides the details of the RequestCancelActivityTaskFailed event. -type RequestCancelActivityTaskFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The activityId provided in the RequestCancelActivityTask decision that failed. - // - // ActivityId is a required field - ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` - - // The cause of the failure. This information is generated by the system and - // can be useful for diagnostic purposes. - // - // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it - // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) - // in the Amazon SWF Developer Guide. - // - // Cause is a required field - Cause *string `locationName:"cause" type:"string" required:"true" enum:"RequestCancelActivityTaskFailedCause"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the RequestCancelActivityTask decision for this cancellation - // request. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s RequestCancelActivityTaskFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RequestCancelActivityTaskFailedEventAttributes) GoString() string { - return s.String() -} - -// SetActivityId sets the ActivityId field's value. -func (s *RequestCancelActivityTaskFailedEventAttributes) SetActivityId(v string) *RequestCancelActivityTaskFailedEventAttributes { - s.ActivityId = &v - return s -} - -// SetCause sets the Cause field's value. -func (s *RequestCancelActivityTaskFailedEventAttributes) SetCause(v string) *RequestCancelActivityTaskFailedEventAttributes { - s.Cause = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *RequestCancelActivityTaskFailedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *RequestCancelActivityTaskFailedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// Provides the details of the RequestCancelExternalWorkflowExecution decision. -// -// Access Control -// -// You can use IAM policies to control this decision's access to Amazon SWF -// resources as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -type RequestCancelExternalWorkflowExecutionDecisionAttributes struct { - _ struct{} `type:"structure"` - - // The data attached to the event that can be used by the decider in subsequent - // workflow tasks. - Control *string `locationName:"control" type:"string"` - - // The runId of the external workflow execution to cancel. - RunId *string `locationName:"runId" type:"string"` - - // The workflowId of the external workflow execution to cancel. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RequestCancelExternalWorkflowExecutionDecisionAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RequestCancelExternalWorkflowExecutionDecisionAttributes) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RequestCancelExternalWorkflowExecutionDecisionAttributes) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RequestCancelExternalWorkflowExecutionDecisionAttributes"} - if s.WorkflowId == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowId")) - } - if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetControl sets the Control field's value. -func (s *RequestCancelExternalWorkflowExecutionDecisionAttributes) SetControl(v string) *RequestCancelExternalWorkflowExecutionDecisionAttributes { - s.Control = &v - return s -} - -// SetRunId sets the RunId field's value. -func (s *RequestCancelExternalWorkflowExecutionDecisionAttributes) SetRunId(v string) *RequestCancelExternalWorkflowExecutionDecisionAttributes { - s.RunId = &v - return s -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *RequestCancelExternalWorkflowExecutionDecisionAttributes) SetWorkflowId(v string) *RequestCancelExternalWorkflowExecutionDecisionAttributes { - s.WorkflowId = &v - return s -} - -// Provides the details of the RequestCancelExternalWorkflowExecutionFailed -// event. -type RequestCancelExternalWorkflowExecutionFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The cause of the failure. This information is generated by the system and - // can be useful for diagnostic purposes. - // - // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it - // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) - // in the Amazon SWF Developer Guide. - // - // Cause is a required field - Cause *string `locationName:"cause" type:"string" required:"true" enum:"RequestCancelExternalWorkflowExecutionFailedCause"` - - // The data attached to the event that the decider can use in subsequent workflow - // tasks. This data isn't sent to the workflow execution. - Control *string `locationName:"control" type:"string"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the RequestCancelExternalWorkflowExecution decision for - // this cancellation request. This information can be useful for diagnosing - // problems by tracing back the chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding - // to the RequestCancelExternalWorkflowExecution decision to cancel this external - // workflow execution. This information can be useful for diagnosing problems - // by tracing back the chain of events leading up to this event. - // - // InitiatedEventId is a required field - InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` - - // The runId of the external workflow execution. - RunId *string `locationName:"runId" type:"string"` - - // The workflowId of the external workflow to which the cancel request was to - // be delivered. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RequestCancelExternalWorkflowExecutionFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RequestCancelExternalWorkflowExecutionFailedEventAttributes) GoString() string { - return s.String() -} - -// SetCause sets the Cause field's value. -func (s *RequestCancelExternalWorkflowExecutionFailedEventAttributes) SetCause(v string) *RequestCancelExternalWorkflowExecutionFailedEventAttributes { - s.Cause = &v - return s -} - -// SetControl sets the Control field's value. -func (s *RequestCancelExternalWorkflowExecutionFailedEventAttributes) SetControl(v string) *RequestCancelExternalWorkflowExecutionFailedEventAttributes { - s.Control = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *RequestCancelExternalWorkflowExecutionFailedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *RequestCancelExternalWorkflowExecutionFailedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetInitiatedEventId sets the InitiatedEventId field's value. -func (s *RequestCancelExternalWorkflowExecutionFailedEventAttributes) SetInitiatedEventId(v int64) *RequestCancelExternalWorkflowExecutionFailedEventAttributes { - s.InitiatedEventId = &v - return s -} - -// SetRunId sets the RunId field's value. -func (s *RequestCancelExternalWorkflowExecutionFailedEventAttributes) SetRunId(v string) *RequestCancelExternalWorkflowExecutionFailedEventAttributes { - s.RunId = &v - return s -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *RequestCancelExternalWorkflowExecutionFailedEventAttributes) SetWorkflowId(v string) *RequestCancelExternalWorkflowExecutionFailedEventAttributes { - s.WorkflowId = &v - return s -} - -// Provides the details of the RequestCancelExternalWorkflowExecutionInitiated -// event. -type RequestCancelExternalWorkflowExecutionInitiatedEventAttributes struct { - _ struct{} `type:"structure"` - - // Data attached to the event that can be used by the decider in subsequent - // workflow tasks. - Control *string `locationName:"control" type:"string"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the RequestCancelExternalWorkflowExecution decision for - // this cancellation request. This information can be useful for diagnosing - // problems by tracing back the chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The runId of the external workflow execution to be canceled. - RunId *string `locationName:"runId" type:"string"` - - // The workflowId of the external workflow execution to be canceled. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RequestCancelExternalWorkflowExecutionInitiatedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RequestCancelExternalWorkflowExecutionInitiatedEventAttributes) GoString() string { - return s.String() -} - -// SetControl sets the Control field's value. -func (s *RequestCancelExternalWorkflowExecutionInitiatedEventAttributes) SetControl(v string) *RequestCancelExternalWorkflowExecutionInitiatedEventAttributes { - s.Control = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *RequestCancelExternalWorkflowExecutionInitiatedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *RequestCancelExternalWorkflowExecutionInitiatedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetRunId sets the RunId field's value. -func (s *RequestCancelExternalWorkflowExecutionInitiatedEventAttributes) SetRunId(v string) *RequestCancelExternalWorkflowExecutionInitiatedEventAttributes { - s.RunId = &v - return s -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *RequestCancelExternalWorkflowExecutionInitiatedEventAttributes) SetWorkflowId(v string) *RequestCancelExternalWorkflowExecutionInitiatedEventAttributes { - s.WorkflowId = &v - return s -} - -type RequestCancelWorkflowExecutionInput struct { - _ struct{} `type:"structure"` - - // The name of the domain containing the workflow execution to cancel. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // The runId of the workflow execution to cancel. - RunId *string `locationName:"runId" type:"string"` - - // The workflowId of the workflow execution to cancel. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RequestCancelWorkflowExecutionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RequestCancelWorkflowExecutionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RequestCancelWorkflowExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RequestCancelWorkflowExecutionInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.WorkflowId == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowId")) - } - if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomain sets the Domain field's value. -func (s *RequestCancelWorkflowExecutionInput) SetDomain(v string) *RequestCancelWorkflowExecutionInput { - s.Domain = &v - return s -} - -// SetRunId sets the RunId field's value. -func (s *RequestCancelWorkflowExecutionInput) SetRunId(v string) *RequestCancelWorkflowExecutionInput { - s.RunId = &v - return s -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *RequestCancelWorkflowExecutionInput) SetWorkflowId(v string) *RequestCancelWorkflowExecutionInput { - s.WorkflowId = &v - return s -} - -type RequestCancelWorkflowExecutionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s RequestCancelWorkflowExecutionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RequestCancelWorkflowExecutionOutput) GoString() string { - return s.String() -} - -type RespondActivityTaskCanceledInput struct { - _ struct{} `type:"structure"` - - // Information about the cancellation. - Details *string `locationName:"details" type:"string"` - - // The taskToken of the ActivityTask. - // - // taskToken is generated by the service and should be treated as an opaque - // value. If the task is passed to another process, its taskToken must also - // be passed. This enables it to provide its progress and respond with results. - // - // TaskToken is a required field - TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RespondActivityTaskCanceledInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RespondActivityTaskCanceledInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RespondActivityTaskCanceledInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RespondActivityTaskCanceledInput"} - if s.TaskToken == nil { - invalidParams.Add(request.NewErrParamRequired("TaskToken")) - } - if s.TaskToken != nil && len(*s.TaskToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TaskToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDetails sets the Details field's value. -func (s *RespondActivityTaskCanceledInput) SetDetails(v string) *RespondActivityTaskCanceledInput { - s.Details = &v - return s -} - -// SetTaskToken sets the TaskToken field's value. -func (s *RespondActivityTaskCanceledInput) SetTaskToken(v string) *RespondActivityTaskCanceledInput { - s.TaskToken = &v - return s -} - -type RespondActivityTaskCanceledOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s RespondActivityTaskCanceledOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RespondActivityTaskCanceledOutput) GoString() string { - return s.String() -} - -type RespondActivityTaskCompletedInput struct { - _ struct{} `type:"structure"` - - // The result of the activity task. It is a free form string that is implementation - // specific. - Result *string `locationName:"result" type:"string"` - - // The taskToken of the ActivityTask. - // - // taskToken is generated by the service and should be treated as an opaque - // value. If the task is passed to another process, its taskToken must also - // be passed. This enables it to provide its progress and respond with results. - // - // TaskToken is a required field - TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RespondActivityTaskCompletedInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RespondActivityTaskCompletedInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RespondActivityTaskCompletedInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RespondActivityTaskCompletedInput"} - if s.TaskToken == nil { - invalidParams.Add(request.NewErrParamRequired("TaskToken")) - } - if s.TaskToken != nil && len(*s.TaskToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TaskToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResult sets the Result field's value. -func (s *RespondActivityTaskCompletedInput) SetResult(v string) *RespondActivityTaskCompletedInput { - s.Result = &v - return s -} - -// SetTaskToken sets the TaskToken field's value. -func (s *RespondActivityTaskCompletedInput) SetTaskToken(v string) *RespondActivityTaskCompletedInput { - s.TaskToken = &v - return s -} - -type RespondActivityTaskCompletedOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s RespondActivityTaskCompletedOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RespondActivityTaskCompletedOutput) GoString() string { - return s.String() -} - -type RespondActivityTaskFailedInput struct { - _ struct{} `type:"structure"` - - // Detailed information about the failure. - Details *string `locationName:"details" type:"string"` - - // Description of the error that may assist in diagnostics. - Reason *string `locationName:"reason" type:"string"` - - // The taskToken of the ActivityTask. - // - // taskToken is generated by the service and should be treated as an opaque - // value. If the task is passed to another process, its taskToken must also - // be passed. This enables it to provide its progress and respond with results. - // - // TaskToken is a required field - TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RespondActivityTaskFailedInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RespondActivityTaskFailedInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RespondActivityTaskFailedInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RespondActivityTaskFailedInput"} - if s.TaskToken == nil { - invalidParams.Add(request.NewErrParamRequired("TaskToken")) - } - if s.TaskToken != nil && len(*s.TaskToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TaskToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDetails sets the Details field's value. -func (s *RespondActivityTaskFailedInput) SetDetails(v string) *RespondActivityTaskFailedInput { - s.Details = &v - return s -} - -// SetReason sets the Reason field's value. -func (s *RespondActivityTaskFailedInput) SetReason(v string) *RespondActivityTaskFailedInput { - s.Reason = &v - return s -} - -// SetTaskToken sets the TaskToken field's value. -func (s *RespondActivityTaskFailedInput) SetTaskToken(v string) *RespondActivityTaskFailedInput { - s.TaskToken = &v - return s -} - -type RespondActivityTaskFailedOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s RespondActivityTaskFailedOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RespondActivityTaskFailedOutput) GoString() string { - return s.String() -} - -// Input data for a TaskCompleted response to a decision task. -type RespondDecisionTaskCompletedInput struct { - _ struct{} `type:"structure"` - - // The list of decisions (possibly empty) made by the decider while processing - // this decision task. See the docs for the Decision structure for details. - Decisions []*Decision `locationName:"decisions" type:"list"` - - // User defined context to add to workflow execution. - ExecutionContext *string `locationName:"executionContext" type:"string"` - - // The taskToken from the DecisionTask. - // - // taskToken is generated by the service and should be treated as an opaque - // value. If the task is passed to another process, its taskToken must also - // be passed. This enables it to provide its progress and respond with results. - // - // TaskToken is a required field - TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RespondDecisionTaskCompletedInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RespondDecisionTaskCompletedInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RespondDecisionTaskCompletedInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RespondDecisionTaskCompletedInput"} - if s.TaskToken == nil { - invalidParams.Add(request.NewErrParamRequired("TaskToken")) - } - if s.TaskToken != nil && len(*s.TaskToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TaskToken", 1)) - } - if s.Decisions != nil { - for i, v := range s.Decisions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Decisions", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDecisions sets the Decisions field's value. -func (s *RespondDecisionTaskCompletedInput) SetDecisions(v []*Decision) *RespondDecisionTaskCompletedInput { - s.Decisions = v - return s -} - -// SetExecutionContext sets the ExecutionContext field's value. -func (s *RespondDecisionTaskCompletedInput) SetExecutionContext(v string) *RespondDecisionTaskCompletedInput { - s.ExecutionContext = &v - return s -} - -// SetTaskToken sets the TaskToken field's value. -func (s *RespondDecisionTaskCompletedInput) SetTaskToken(v string) *RespondDecisionTaskCompletedInput { - s.TaskToken = &v - return s -} - -type RespondDecisionTaskCompletedOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s RespondDecisionTaskCompletedOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RespondDecisionTaskCompletedOutput) GoString() string { - return s.String() -} - -// Provides the details of the ScheduleActivityTask decision. -// -// Access Control -// -// You can use IAM policies to control this decision's access to Amazon SWF -// resources as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// activityType.name – String constraint. The key is swf:activityType.name. -// -// activityType.version – String constraint. The key is swf:activityType.version. -// -// taskList – String constraint. The key is swf:taskList.name. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -type ScheduleActivityTaskDecisionAttributes struct { - _ struct{} `type:"structure"` - - // The activityId of the activity task. - // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. - // - // ActivityId is a required field - ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` - - // The type of the activity task to schedule. - // - // ActivityType is a required field - ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` - - // Data attached to the event that can be used by the decider in subsequent - // workflow tasks. This data isn't sent to the activity. - Control *string `locationName:"control" type:"string"` - - // If set, specifies the maximum time before which a worker processing a task - // of this type must report progress by calling RecordActivityTaskHeartbeat. - // If the timeout is exceeded, the activity task is automatically timed out. - // If the worker subsequently attempts to record a heartbeat or returns a result, - // it is ignored. This overrides the default heartbeat timeout specified when - // registering the activity type using RegisterActivityType. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - HeartbeatTimeout *string `locationName:"heartbeatTimeout" type:"string"` - - // The input provided to the activity task. - Input *string `locationName:"input" type:"string"` - - // The maximum duration for this activity task. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - // - // A schedule-to-close timeout for this activity task must be specified either - // as a default for the activity type or through this field. If neither this - // field is set nor a default schedule-to-close timeout was specified at registration - // time then a fault is returned. - ScheduleToCloseTimeout *string `locationName:"scheduleToCloseTimeout" type:"string"` - - // If set, specifies the maximum duration the activity task can wait to be assigned - // to a worker. This overrides the default schedule-to-start timeout specified - // when registering the activity type using RegisterActivityType. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - // - // A schedule-to-start timeout for this activity task must be specified either - // as a default for the activity type or through this field. If neither this - // field is set nor a default schedule-to-start timeout was specified at registration - // time then a fault is returned. - ScheduleToStartTimeout *string `locationName:"scheduleToStartTimeout" type:"string"` - - // If set, specifies the maximum duration a worker may take to process this - // activity task. This overrides the default start-to-close timeout specified - // when registering the activity type using RegisterActivityType. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - // - // A start-to-close timeout for this activity task must be specified either - // as a default for the activity type or through this field. If neither this - // field is set nor a default start-to-close timeout was specified at registration - // time then a fault is returned. - StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` - - // If set, specifies the name of the task list in which to schedule the activity - // task. If not specified, the defaultTaskList registered with the activity - // type is used. - // - // A task list for this activity task must be specified either as a default - // for the activity type or through this field. If neither this field is set - // nor a default task list was specified at registration time then a fault is - // returned. - // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. - TaskList *TaskList `locationName:"taskList" type:"structure"` - - // If set, specifies the priority with which the activity task is to be assigned - // to a worker. This overrides the defaultTaskPriority specified when registering - // the activity type using RegisterActivityType. Valid values are integers that - // range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). - // Higher numbers indicate higher priority. - // - // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) - // in the Amazon SWF Developer Guide. - TaskPriority *string `locationName:"taskPriority" type:"string"` -} - -// String returns the string representation -func (s ScheduleActivityTaskDecisionAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ScheduleActivityTaskDecisionAttributes) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ScheduleActivityTaskDecisionAttributes) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ScheduleActivityTaskDecisionAttributes"} - if s.ActivityId == nil { - invalidParams.Add(request.NewErrParamRequired("ActivityId")) - } - if s.ActivityId != nil && len(*s.ActivityId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ActivityId", 1)) - } - if s.ActivityType == nil { - invalidParams.Add(request.NewErrParamRequired("ActivityType")) - } - if s.ActivityType != nil { - if err := s.ActivityType.Validate(); err != nil { - invalidParams.AddNested("ActivityType", err.(request.ErrInvalidParams)) - } - } - if s.TaskList != nil { - if err := s.TaskList.Validate(); err != nil { - invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetActivityId sets the ActivityId field's value. -func (s *ScheduleActivityTaskDecisionAttributes) SetActivityId(v string) *ScheduleActivityTaskDecisionAttributes { - s.ActivityId = &v - return s -} - -// SetActivityType sets the ActivityType field's value. -func (s *ScheduleActivityTaskDecisionAttributes) SetActivityType(v *ActivityType) *ScheduleActivityTaskDecisionAttributes { - s.ActivityType = v - return s -} - -// SetControl sets the Control field's value. -func (s *ScheduleActivityTaskDecisionAttributes) SetControl(v string) *ScheduleActivityTaskDecisionAttributes { - s.Control = &v - return s -} - -// SetHeartbeatTimeout sets the HeartbeatTimeout field's value. -func (s *ScheduleActivityTaskDecisionAttributes) SetHeartbeatTimeout(v string) *ScheduleActivityTaskDecisionAttributes { - s.HeartbeatTimeout = &v - return s -} - -// SetInput sets the Input field's value. -func (s *ScheduleActivityTaskDecisionAttributes) SetInput(v string) *ScheduleActivityTaskDecisionAttributes { - s.Input = &v - return s -} - -// SetScheduleToCloseTimeout sets the ScheduleToCloseTimeout field's value. -func (s *ScheduleActivityTaskDecisionAttributes) SetScheduleToCloseTimeout(v string) *ScheduleActivityTaskDecisionAttributes { - s.ScheduleToCloseTimeout = &v - return s -} - -// SetScheduleToStartTimeout sets the ScheduleToStartTimeout field's value. -func (s *ScheduleActivityTaskDecisionAttributes) SetScheduleToStartTimeout(v string) *ScheduleActivityTaskDecisionAttributes { - s.ScheduleToStartTimeout = &v - return s -} - -// SetStartToCloseTimeout sets the StartToCloseTimeout field's value. -func (s *ScheduleActivityTaskDecisionAttributes) SetStartToCloseTimeout(v string) *ScheduleActivityTaskDecisionAttributes { - s.StartToCloseTimeout = &v - return s -} - -// SetTaskList sets the TaskList field's value. -func (s *ScheduleActivityTaskDecisionAttributes) SetTaskList(v *TaskList) *ScheduleActivityTaskDecisionAttributes { - s.TaskList = v - return s -} - -// SetTaskPriority sets the TaskPriority field's value. -func (s *ScheduleActivityTaskDecisionAttributes) SetTaskPriority(v string) *ScheduleActivityTaskDecisionAttributes { - s.TaskPriority = &v - return s -} - -// Provides the details of the ScheduleActivityTaskFailed event. -type ScheduleActivityTaskFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The activityId provided in the ScheduleActivityTask decision that failed. - // - // ActivityId is a required field - ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` - - // The activity type provided in the ScheduleActivityTask decision that failed. - // - // ActivityType is a required field - ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` - - // The cause of the failure. This information is generated by the system and - // can be useful for diagnostic purposes. - // - // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it - // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) - // in the Amazon SWF Developer Guide. - // - // Cause is a required field - Cause *string `locationName:"cause" type:"string" required:"true" enum:"ScheduleActivityTaskFailedCause"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision that - // resulted in the scheduling of this activity task. This information can be - // useful for diagnosing problems by tracing back the chain of events leading - // up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s ScheduleActivityTaskFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ScheduleActivityTaskFailedEventAttributes) GoString() string { - return s.String() -} - -// SetActivityId sets the ActivityId field's value. -func (s *ScheduleActivityTaskFailedEventAttributes) SetActivityId(v string) *ScheduleActivityTaskFailedEventAttributes { - s.ActivityId = &v - return s -} - -// SetActivityType sets the ActivityType field's value. -func (s *ScheduleActivityTaskFailedEventAttributes) SetActivityType(v *ActivityType) *ScheduleActivityTaskFailedEventAttributes { - s.ActivityType = v - return s -} - -// SetCause sets the Cause field's value. -func (s *ScheduleActivityTaskFailedEventAttributes) SetCause(v string) *ScheduleActivityTaskFailedEventAttributes { - s.Cause = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *ScheduleActivityTaskFailedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *ScheduleActivityTaskFailedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// Decision attributes specified in scheduleLambdaFunctionDecisionAttributes -// within the list of decisions decisions passed to RespondDecisionTaskCompleted. -type ScheduleLambdaFunctionDecisionAttributes struct { - _ struct{} `type:"structure"` - - // The data attached to the event that the decider can use in subsequent workflow - // tasks. This data isn't sent to the Lambda task. - Control *string `locationName:"control" type:"string"` - - // A string that identifies the Lambda function execution in the event history. - // - // Id is a required field - Id *string `locationName:"id" min:"1" type:"string" required:"true"` - - // The optional input data to be supplied to the Lambda function. - Input *string `locationName:"input" type:"string"` - - // The name, or ARN, of the Lambda function to schedule. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` - - // The timeout value, in seconds, after which the Lambda function is considered - // to be failed once it has started. This can be any integer from 1-300 (1s-5m). - // If no value is supplied, than a default value of 300s is assumed. - StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` -} - -// String returns the string representation -func (s ScheduleLambdaFunctionDecisionAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ScheduleLambdaFunctionDecisionAttributes) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ScheduleLambdaFunctionDecisionAttributes) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ScheduleLambdaFunctionDecisionAttributes"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetControl sets the Control field's value. -func (s *ScheduleLambdaFunctionDecisionAttributes) SetControl(v string) *ScheduleLambdaFunctionDecisionAttributes { - s.Control = &v - return s -} - -// SetId sets the Id field's value. -func (s *ScheduleLambdaFunctionDecisionAttributes) SetId(v string) *ScheduleLambdaFunctionDecisionAttributes { - s.Id = &v - return s -} - -// SetInput sets the Input field's value. -func (s *ScheduleLambdaFunctionDecisionAttributes) SetInput(v string) *ScheduleLambdaFunctionDecisionAttributes { - s.Input = &v - return s -} - -// SetName sets the Name field's value. -func (s *ScheduleLambdaFunctionDecisionAttributes) SetName(v string) *ScheduleLambdaFunctionDecisionAttributes { - s.Name = &v - return s -} - -// SetStartToCloseTimeout sets the StartToCloseTimeout field's value. -func (s *ScheduleLambdaFunctionDecisionAttributes) SetStartToCloseTimeout(v string) *ScheduleLambdaFunctionDecisionAttributes { - s.StartToCloseTimeout = &v - return s -} - -// Provides the details of the ScheduleLambdaFunctionFailed event. It isn't -// set for other event types. -type ScheduleLambdaFunctionFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The cause of the failure. To help diagnose issues, use this information to - // trace back the chain of events leading up to this event. - // - // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it - // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) - // in the Amazon SWF Developer Guide. - // - // Cause is a required field - Cause *string `locationName:"cause" type:"string" required:"true" enum:"ScheduleLambdaFunctionFailedCause"` - - // The ID of the LambdaFunctionCompleted event corresponding to the decision - // that resulted in scheduling this Lambda task. To help diagnose issues, use - // this information to trace back the chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The ID provided in the ScheduleLambdaFunction decision that failed. - // - // Id is a required field - Id *string `locationName:"id" min:"1" type:"string" required:"true"` - - // The name of the Lambda function. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s ScheduleLambdaFunctionFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ScheduleLambdaFunctionFailedEventAttributes) GoString() string { - return s.String() -} - -// SetCause sets the Cause field's value. -func (s *ScheduleLambdaFunctionFailedEventAttributes) SetCause(v string) *ScheduleLambdaFunctionFailedEventAttributes { - s.Cause = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *ScheduleLambdaFunctionFailedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *ScheduleLambdaFunctionFailedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetId sets the Id field's value. -func (s *ScheduleLambdaFunctionFailedEventAttributes) SetId(v string) *ScheduleLambdaFunctionFailedEventAttributes { - s.Id = &v - return s -} - -// SetName sets the Name field's value. -func (s *ScheduleLambdaFunctionFailedEventAttributes) SetName(v string) *ScheduleLambdaFunctionFailedEventAttributes { - s.Name = &v - return s -} - -// Provides the details of the SignalExternalWorkflowExecution decision. -// -// Access Control -// -// You can use IAM policies to control this decision's access to Amazon SWF -// resources as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -type SignalExternalWorkflowExecutionDecisionAttributes struct { - _ struct{} `type:"structure"` - - // The data attached to the event that can be used by the decider in subsequent - // decision tasks. - Control *string `locationName:"control" type:"string"` - - // The input data to be provided with the signal. The target workflow execution - // uses the signal name and input data to process the signal. - Input *string `locationName:"input" type:"string"` - - // The runId of the workflow execution to be signaled. - RunId *string `locationName:"runId" type:"string"` - - // The name of the signal.The target workflow execution uses the signal name - // and input to process the signal. - // - // SignalName is a required field - SignalName *string `locationName:"signalName" min:"1" type:"string" required:"true"` - - // The workflowId of the workflow execution to be signaled. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s SignalExternalWorkflowExecutionDecisionAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SignalExternalWorkflowExecutionDecisionAttributes) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SignalExternalWorkflowExecutionDecisionAttributes) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SignalExternalWorkflowExecutionDecisionAttributes"} - if s.SignalName == nil { - invalidParams.Add(request.NewErrParamRequired("SignalName")) - } - if s.SignalName != nil && len(*s.SignalName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SignalName", 1)) - } - if s.WorkflowId == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowId")) - } - if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetControl sets the Control field's value. -func (s *SignalExternalWorkflowExecutionDecisionAttributes) SetControl(v string) *SignalExternalWorkflowExecutionDecisionAttributes { - s.Control = &v - return s -} - -// SetInput sets the Input field's value. -func (s *SignalExternalWorkflowExecutionDecisionAttributes) SetInput(v string) *SignalExternalWorkflowExecutionDecisionAttributes { - s.Input = &v - return s -} - -// SetRunId sets the RunId field's value. -func (s *SignalExternalWorkflowExecutionDecisionAttributes) SetRunId(v string) *SignalExternalWorkflowExecutionDecisionAttributes { - s.RunId = &v - return s -} - -// SetSignalName sets the SignalName field's value. -func (s *SignalExternalWorkflowExecutionDecisionAttributes) SetSignalName(v string) *SignalExternalWorkflowExecutionDecisionAttributes { - s.SignalName = &v - return s -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *SignalExternalWorkflowExecutionDecisionAttributes) SetWorkflowId(v string) *SignalExternalWorkflowExecutionDecisionAttributes { - s.WorkflowId = &v - return s -} - -// Provides the details of the SignalExternalWorkflowExecutionFailed event. -type SignalExternalWorkflowExecutionFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The cause of the failure. This information is generated by the system and - // can be useful for diagnostic purposes. - // - // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it - // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) - // in the Amazon SWF Developer Guide. - // - // Cause is a required field - Cause *string `locationName:"cause" type:"string" required:"true" enum:"SignalExternalWorkflowExecutionFailedCause"` - - // The data attached to the event that the decider can use in subsequent workflow - // tasks. This data isn't sent to the workflow execution. - Control *string `locationName:"control" type:"string"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the SignalExternalWorkflowExecution decision for this signal. - // This information can be useful for diagnosing problems by tracing back the - // chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The ID of the SignalExternalWorkflowExecutionInitiated event corresponding - // to the SignalExternalWorkflowExecution decision to request this signal. This - // information can be useful for diagnosing problems by tracing back the chain - // of events leading up to this event. - // - // InitiatedEventId is a required field - InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` - - // The runId of the external workflow execution that the signal was being delivered - // to. - RunId *string `locationName:"runId" type:"string"` - - // The workflowId of the external workflow execution that the signal was being - // delivered to. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s SignalExternalWorkflowExecutionFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SignalExternalWorkflowExecutionFailedEventAttributes) GoString() string { - return s.String() -} - -// SetCause sets the Cause field's value. -func (s *SignalExternalWorkflowExecutionFailedEventAttributes) SetCause(v string) *SignalExternalWorkflowExecutionFailedEventAttributes { - s.Cause = &v - return s -} - -// SetControl sets the Control field's value. -func (s *SignalExternalWorkflowExecutionFailedEventAttributes) SetControl(v string) *SignalExternalWorkflowExecutionFailedEventAttributes { - s.Control = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *SignalExternalWorkflowExecutionFailedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *SignalExternalWorkflowExecutionFailedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetInitiatedEventId sets the InitiatedEventId field's value. -func (s *SignalExternalWorkflowExecutionFailedEventAttributes) SetInitiatedEventId(v int64) *SignalExternalWorkflowExecutionFailedEventAttributes { - s.InitiatedEventId = &v - return s -} - -// SetRunId sets the RunId field's value. -func (s *SignalExternalWorkflowExecutionFailedEventAttributes) SetRunId(v string) *SignalExternalWorkflowExecutionFailedEventAttributes { - s.RunId = &v - return s -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *SignalExternalWorkflowExecutionFailedEventAttributes) SetWorkflowId(v string) *SignalExternalWorkflowExecutionFailedEventAttributes { - s.WorkflowId = &v - return s -} - -// Provides the details of the SignalExternalWorkflowExecutionInitiated event. -type SignalExternalWorkflowExecutionInitiatedEventAttributes struct { - _ struct{} `type:"structure"` - - // Data attached to the event that can be used by the decider in subsequent - // decision tasks. - Control *string `locationName:"control" type:"string"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the SignalExternalWorkflowExecution decision for this signal. - // This information can be useful for diagnosing problems by tracing back the - // chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The input provided to the signal. - Input *string `locationName:"input" type:"string"` - - // The runId of the external workflow execution to send the signal to. - RunId *string `locationName:"runId" type:"string"` - - // The name of the signal. - // - // SignalName is a required field - SignalName *string `locationName:"signalName" min:"1" type:"string" required:"true"` - - // The workflowId of the external workflow execution. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s SignalExternalWorkflowExecutionInitiatedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SignalExternalWorkflowExecutionInitiatedEventAttributes) GoString() string { - return s.String() -} - -// SetControl sets the Control field's value. -func (s *SignalExternalWorkflowExecutionInitiatedEventAttributes) SetControl(v string) *SignalExternalWorkflowExecutionInitiatedEventAttributes { - s.Control = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *SignalExternalWorkflowExecutionInitiatedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *SignalExternalWorkflowExecutionInitiatedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetInput sets the Input field's value. -func (s *SignalExternalWorkflowExecutionInitiatedEventAttributes) SetInput(v string) *SignalExternalWorkflowExecutionInitiatedEventAttributes { - s.Input = &v - return s -} - -// SetRunId sets the RunId field's value. -func (s *SignalExternalWorkflowExecutionInitiatedEventAttributes) SetRunId(v string) *SignalExternalWorkflowExecutionInitiatedEventAttributes { - s.RunId = &v - return s -} - -// SetSignalName sets the SignalName field's value. -func (s *SignalExternalWorkflowExecutionInitiatedEventAttributes) SetSignalName(v string) *SignalExternalWorkflowExecutionInitiatedEventAttributes { - s.SignalName = &v - return s -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *SignalExternalWorkflowExecutionInitiatedEventAttributes) SetWorkflowId(v string) *SignalExternalWorkflowExecutionInitiatedEventAttributes { - s.WorkflowId = &v - return s -} - -type SignalWorkflowExecutionInput struct { - _ struct{} `type:"structure"` - - // The name of the domain containing the workflow execution to signal. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // Data to attach to the WorkflowExecutionSignaled event in the target workflow - // execution's history. - Input *string `locationName:"input" type:"string"` - - // The runId of the workflow execution to signal. - RunId *string `locationName:"runId" type:"string"` - - // The name of the signal. This name must be meaningful to the target workflow. - // - // SignalName is a required field - SignalName *string `locationName:"signalName" min:"1" type:"string" required:"true"` - - // The workflowId of the workflow execution to signal. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s SignalWorkflowExecutionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SignalWorkflowExecutionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SignalWorkflowExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SignalWorkflowExecutionInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.SignalName == nil { - invalidParams.Add(request.NewErrParamRequired("SignalName")) - } - if s.SignalName != nil && len(*s.SignalName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SignalName", 1)) - } - if s.WorkflowId == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowId")) - } - if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomain sets the Domain field's value. -func (s *SignalWorkflowExecutionInput) SetDomain(v string) *SignalWorkflowExecutionInput { - s.Domain = &v - return s -} - -// SetInput sets the Input field's value. -func (s *SignalWorkflowExecutionInput) SetInput(v string) *SignalWorkflowExecutionInput { - s.Input = &v - return s -} - -// SetRunId sets the RunId field's value. -func (s *SignalWorkflowExecutionInput) SetRunId(v string) *SignalWorkflowExecutionInput { - s.RunId = &v - return s -} - -// SetSignalName sets the SignalName field's value. -func (s *SignalWorkflowExecutionInput) SetSignalName(v string) *SignalWorkflowExecutionInput { - s.SignalName = &v - return s -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *SignalWorkflowExecutionInput) SetWorkflowId(v string) *SignalWorkflowExecutionInput { - s.WorkflowId = &v - return s -} - -type SignalWorkflowExecutionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s SignalWorkflowExecutionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SignalWorkflowExecutionOutput) GoString() string { - return s.String() -} - -// Provides the details of the StartChildWorkflowExecution decision. -// -// Access Control -// -// You can use IAM policies to control this decision's access to Amazon SWF -// resources as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// tagList.member.N – The key is "swf:tagList.N" where N is the tag number from -// 0 to 4, inclusive. -// -// taskList – String constraint. The key is swf:taskList.name. -// -// workflowType.name – String constraint. The key is swf:workflowType.name. -// -// workflowType.version – String constraint. The key is swf:workflowType.version. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -type StartChildWorkflowExecutionDecisionAttributes struct { - _ struct{} `type:"structure"` - - // If set, specifies the policy to use for the child workflow executions if - // the workflow execution being started is terminated by calling the TerminateWorkflowExecution - // action explicitly or due to an expired timeout. This policy overrides the - // default child policy specified when registering the workflow type using RegisterWorkflowType. - // - // The supported child policies are: - // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A request to cancel is attempted for each child execution - // by recording a WorkflowExecutionCancelRequested event in its history. - // It is up to the decider to take appropriate actions when it receives an - // execution history with this event. - // - // * ABANDON – No action is taken. The child executions continue to run. - // - // A child policy for this workflow execution must be specified either as a - // default for the workflow type or through this parameter. If neither this - // parameter is set nor a default child policy was specified at registration - // time then a fault is returned. - ChildPolicy *string `locationName:"childPolicy" type:"string" enum:"ChildPolicy"` - - // The data attached to the event that can be used by the decider in subsequent - // workflow tasks. This data isn't sent to the child workflow execution. - Control *string `locationName:"control" type:"string"` - - // The total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout - // specified when registering the workflow type. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - // - // An execution start-to-close timeout for this workflow execution must be specified - // either as a default for the workflow type or through this parameter. If neither - // this parameter is set nor a default execution start-to-close timeout was - // specified at registration time then a fault is returned. - ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` - - // The input to be provided to the workflow execution. - Input *string `locationName:"input" type:"string"` - - // The IAM role attached to the child workflow execution. - LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` - - // The list of tags to associate with the child workflow execution. A maximum - // of 5 tags can be specified. You can list workflow executions with a specific - // tag by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions - // and specifying a TagFilter. - TagList []*string `locationName:"tagList" type:"list"` - - // The name of the task list to be used for decision tasks of the child workflow - // execution. - // - // A task list for this workflow execution must be specified either as a default - // for the workflow type or through this parameter. If neither this parameter - // is set nor a default task list was specified at registration time then a - // fault is returned. - // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. - TaskList *TaskList `locationName:"taskList" type:"structure"` - - // A task priority that, if set, specifies the priority for a decision task - // of this workflow execution. This overrides the defaultTaskPriority specified - // when registering the workflow type. Valid values are integers that range - // from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). - // Higher numbers indicate higher priority. - // - // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) - // in the Amazon SWF Developer Guide. - TaskPriority *string `locationName:"taskPriority" type:"string"` - - // Specifies the maximum duration of decision tasks for this workflow execution. - // This parameter overrides the defaultTaskStartToCloseTimout specified when - // registering the workflow type using RegisterWorkflowType. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - // - // A task start-to-close timeout for this workflow execution must be specified - // either as a default for the workflow type or through this parameter. If neither - // this parameter is set nor a default task start-to-close timeout was specified - // at registration time then a fault is returned. - TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` - - // The workflowId of the workflow execution. - // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` - - // The type of the workflow execution to be started. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s StartChildWorkflowExecutionDecisionAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartChildWorkflowExecutionDecisionAttributes) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartChildWorkflowExecutionDecisionAttributes) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartChildWorkflowExecutionDecisionAttributes"} - if s.LambdaRole != nil && len(*s.LambdaRole) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LambdaRole", 1)) - } - if s.WorkflowId == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowId")) - } - if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) - } - if s.WorkflowType == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowType")) - } - if s.TaskList != nil { - if err := s.TaskList.Validate(); err != nil { - invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) - } - } - if s.WorkflowType != nil { - if err := s.WorkflowType.Validate(); err != nil { - invalidParams.AddNested("WorkflowType", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChildPolicy sets the ChildPolicy field's value. -func (s *StartChildWorkflowExecutionDecisionAttributes) SetChildPolicy(v string) *StartChildWorkflowExecutionDecisionAttributes { - s.ChildPolicy = &v - return s -} - -// SetControl sets the Control field's value. -func (s *StartChildWorkflowExecutionDecisionAttributes) SetControl(v string) *StartChildWorkflowExecutionDecisionAttributes { - s.Control = &v - return s -} - -// SetExecutionStartToCloseTimeout sets the ExecutionStartToCloseTimeout field's value. -func (s *StartChildWorkflowExecutionDecisionAttributes) SetExecutionStartToCloseTimeout(v string) *StartChildWorkflowExecutionDecisionAttributes { - s.ExecutionStartToCloseTimeout = &v - return s -} - -// SetInput sets the Input field's value. -func (s *StartChildWorkflowExecutionDecisionAttributes) SetInput(v string) *StartChildWorkflowExecutionDecisionAttributes { - s.Input = &v - return s -} - -// SetLambdaRole sets the LambdaRole field's value. -func (s *StartChildWorkflowExecutionDecisionAttributes) SetLambdaRole(v string) *StartChildWorkflowExecutionDecisionAttributes { - s.LambdaRole = &v - return s -} - -// SetTagList sets the TagList field's value. -func (s *StartChildWorkflowExecutionDecisionAttributes) SetTagList(v []*string) *StartChildWorkflowExecutionDecisionAttributes { - s.TagList = v - return s -} - -// SetTaskList sets the TaskList field's value. -func (s *StartChildWorkflowExecutionDecisionAttributes) SetTaskList(v *TaskList) *StartChildWorkflowExecutionDecisionAttributes { - s.TaskList = v - return s -} - -// SetTaskPriority sets the TaskPriority field's value. -func (s *StartChildWorkflowExecutionDecisionAttributes) SetTaskPriority(v string) *StartChildWorkflowExecutionDecisionAttributes { - s.TaskPriority = &v - return s -} - -// SetTaskStartToCloseTimeout sets the TaskStartToCloseTimeout field's value. -func (s *StartChildWorkflowExecutionDecisionAttributes) SetTaskStartToCloseTimeout(v string) *StartChildWorkflowExecutionDecisionAttributes { - s.TaskStartToCloseTimeout = &v - return s -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *StartChildWorkflowExecutionDecisionAttributes) SetWorkflowId(v string) *StartChildWorkflowExecutionDecisionAttributes { - s.WorkflowId = &v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *StartChildWorkflowExecutionDecisionAttributes) SetWorkflowType(v *WorkflowType) *StartChildWorkflowExecutionDecisionAttributes { - s.WorkflowType = v - return s -} - -// Provides the details of the StartChildWorkflowExecutionFailed event. -type StartChildWorkflowExecutionFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The cause of the failure. This information is generated by the system and - // can be useful for diagnostic purposes. - // - // When cause is set to OPERATION_NOT_PERMITTED, the decision fails because - // it lacks sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) - // in the Amazon SWF Developer Guide. - // - // Cause is a required field - Cause *string `locationName:"cause" type:"string" required:"true" enum:"StartChildWorkflowExecutionFailedCause"` - - // The data attached to the event that the decider can use in subsequent workflow - // tasks. This data isn't sent to the child workflow execution. - Control *string `locationName:"control" type:"string"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the StartChildWorkflowExecutionDecision to request this - // child workflow execution. This information can be useful for diagnosing problems - // by tracing back the chain of events. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // When the cause is WORKFLOW_ALREADY_RUNNING, initiatedEventId is the ID of - // the StartChildWorkflowExecutionInitiated event that corresponds to the StartChildWorkflowExecutionDecision - // to start the workflow execution. You can use this information to diagnose - // problems by tracing back the chain of events leading up to this event. - // - // When the cause isn't WORKFLOW_ALREADY_RUNNING, initiatedEventId is set to - // 0 because the StartChildWorkflowExecutionInitiated event doesn't exist. - // - // InitiatedEventId is a required field - InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` - - // The workflowId of the child workflow execution. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` - - // The workflow type provided in the StartChildWorkflowExecutionDecision that - // failed. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s StartChildWorkflowExecutionFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartChildWorkflowExecutionFailedEventAttributes) GoString() string { - return s.String() -} - -// SetCause sets the Cause field's value. -func (s *StartChildWorkflowExecutionFailedEventAttributes) SetCause(v string) *StartChildWorkflowExecutionFailedEventAttributes { - s.Cause = &v - return s -} - -// SetControl sets the Control field's value. -func (s *StartChildWorkflowExecutionFailedEventAttributes) SetControl(v string) *StartChildWorkflowExecutionFailedEventAttributes { - s.Control = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *StartChildWorkflowExecutionFailedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *StartChildWorkflowExecutionFailedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetInitiatedEventId sets the InitiatedEventId field's value. -func (s *StartChildWorkflowExecutionFailedEventAttributes) SetInitiatedEventId(v int64) *StartChildWorkflowExecutionFailedEventAttributes { - s.InitiatedEventId = &v - return s -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *StartChildWorkflowExecutionFailedEventAttributes) SetWorkflowId(v string) *StartChildWorkflowExecutionFailedEventAttributes { - s.WorkflowId = &v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *StartChildWorkflowExecutionFailedEventAttributes) SetWorkflowType(v *WorkflowType) *StartChildWorkflowExecutionFailedEventAttributes { - s.WorkflowType = v - return s -} - -// Provides the details of the StartChildWorkflowExecutionInitiated event. -type StartChildWorkflowExecutionInitiatedEventAttributes struct { - _ struct{} `type:"structure"` - - // The policy to use for the child workflow executions if this execution gets - // terminated by explicitly calling the TerminateWorkflowExecution action or - // due to an expired timeout. - // - // The supported child policies are: - // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A request to cancel is attempted for each child execution - // by recording a WorkflowExecutionCancelRequested event in its history. - // It is up to the decider to take appropriate actions when it receives an - // execution history with this event. - // - // * ABANDON – No action is taken. The child executions continue to run. - // - // ChildPolicy is a required field - ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` - - // Data attached to the event that can be used by the decider in subsequent - // decision tasks. This data isn't sent to the activity. - Control *string `locationName:"control" type:"string"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the StartChildWorkflowExecutionDecision to request this - // child workflow execution. This information can be useful for diagnosing problems - // by tracing back the cause of events. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The maximum duration for the child workflow execution. If the workflow execution - // isn't closed within this duration, it is timed out and force-terminated. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` - - // The inputs provided to the child workflow execution. - Input *string `locationName:"input" type:"string"` - - // The IAM role to attach to the child workflow execution. - LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` - - // The list of tags to associated with the child workflow execution. - TagList []*string `locationName:"tagList" type:"list"` - - // The name of the task list used for the decision tasks of the child workflow - // execution. - // - // TaskList is a required field - TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` - - // The priority assigned for the decision tasks for this workflow execution. - // Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) - // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. - // - // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) - // in the Amazon SWF Developer Guide. - TaskPriority *string `locationName:"taskPriority" type:"string"` - - // The maximum duration allowed for the decision tasks for this workflow execution. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` - - // The workflowId of the child workflow execution. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` - - // The type of the child workflow execution. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s StartChildWorkflowExecutionInitiatedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartChildWorkflowExecutionInitiatedEventAttributes) GoString() string { - return s.String() -} - -// SetChildPolicy sets the ChildPolicy field's value. -func (s *StartChildWorkflowExecutionInitiatedEventAttributes) SetChildPolicy(v string) *StartChildWorkflowExecutionInitiatedEventAttributes { - s.ChildPolicy = &v - return s -} - -// SetControl sets the Control field's value. -func (s *StartChildWorkflowExecutionInitiatedEventAttributes) SetControl(v string) *StartChildWorkflowExecutionInitiatedEventAttributes { - s.Control = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *StartChildWorkflowExecutionInitiatedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *StartChildWorkflowExecutionInitiatedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetExecutionStartToCloseTimeout sets the ExecutionStartToCloseTimeout field's value. -func (s *StartChildWorkflowExecutionInitiatedEventAttributes) SetExecutionStartToCloseTimeout(v string) *StartChildWorkflowExecutionInitiatedEventAttributes { - s.ExecutionStartToCloseTimeout = &v - return s -} - -// SetInput sets the Input field's value. -func (s *StartChildWorkflowExecutionInitiatedEventAttributes) SetInput(v string) *StartChildWorkflowExecutionInitiatedEventAttributes { - s.Input = &v - return s -} - -// SetLambdaRole sets the LambdaRole field's value. -func (s *StartChildWorkflowExecutionInitiatedEventAttributes) SetLambdaRole(v string) *StartChildWorkflowExecutionInitiatedEventAttributes { - s.LambdaRole = &v - return s -} - -// SetTagList sets the TagList field's value. -func (s *StartChildWorkflowExecutionInitiatedEventAttributes) SetTagList(v []*string) *StartChildWorkflowExecutionInitiatedEventAttributes { - s.TagList = v - return s -} - -// SetTaskList sets the TaskList field's value. -func (s *StartChildWorkflowExecutionInitiatedEventAttributes) SetTaskList(v *TaskList) *StartChildWorkflowExecutionInitiatedEventAttributes { - s.TaskList = v - return s -} - -// SetTaskPriority sets the TaskPriority field's value. -func (s *StartChildWorkflowExecutionInitiatedEventAttributes) SetTaskPriority(v string) *StartChildWorkflowExecutionInitiatedEventAttributes { - s.TaskPriority = &v - return s -} - -// SetTaskStartToCloseTimeout sets the TaskStartToCloseTimeout field's value. -func (s *StartChildWorkflowExecutionInitiatedEventAttributes) SetTaskStartToCloseTimeout(v string) *StartChildWorkflowExecutionInitiatedEventAttributes { - s.TaskStartToCloseTimeout = &v - return s -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *StartChildWorkflowExecutionInitiatedEventAttributes) SetWorkflowId(v string) *StartChildWorkflowExecutionInitiatedEventAttributes { - s.WorkflowId = &v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *StartChildWorkflowExecutionInitiatedEventAttributes) SetWorkflowType(v *WorkflowType) *StartChildWorkflowExecutionInitiatedEventAttributes { - s.WorkflowType = v - return s -} - -// Provides the details of the StartLambdaFunctionFailed event. It isn't set -// for other event types. -type StartLambdaFunctionFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The cause of the failure. To help diagnose issues, use this information to - // trace back the chain of events leading up to this event. - // - // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because the - // IAM role attached to the execution lacked sufficient permissions. For details - // and example IAM policies, see Lambda Tasks (http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html) - // in the Amazon SWF Developer Guide. - Cause *string `locationName:"cause" type:"string" enum:"StartLambdaFunctionFailedCause"` - - // A description that can help diagnose the cause of the fault. - Message *string `locationName:"message" type:"string"` - - // The ID of the ActivityTaskScheduled event that was recorded when this activity - // task was scheduled. To help diagnose issues, use this information to trace - // back the chain of events leading up to this event. - ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long"` -} - -// String returns the string representation -func (s StartLambdaFunctionFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartLambdaFunctionFailedEventAttributes) GoString() string { - return s.String() -} - -// SetCause sets the Cause field's value. -func (s *StartLambdaFunctionFailedEventAttributes) SetCause(v string) *StartLambdaFunctionFailedEventAttributes { - s.Cause = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *StartLambdaFunctionFailedEventAttributes) SetMessage(v string) *StartLambdaFunctionFailedEventAttributes { - s.Message = &v - return s -} - -// SetScheduledEventId sets the ScheduledEventId field's value. -func (s *StartLambdaFunctionFailedEventAttributes) SetScheduledEventId(v int64) *StartLambdaFunctionFailedEventAttributes { - s.ScheduledEventId = &v - return s -} - -// Provides the details of the StartTimer decision. -// -// Access Control -// -// You can use IAM policies to control this decision's access to Amazon SWF -// resources as follows: -// -// * Use a Resource element with the domain name to limit the action to only -// specified domains. -// -// * Use an Action element to allow or deny permission to call this action. -// -// * You cannot use an IAM policy to constrain this action's parameters. -// -// If the caller doesn't have sufficient permissions to invoke the action, or -// the parameter values fall outside the specified constraints, the action fails. -// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) -// in the Amazon SWF Developer Guide. -type StartTimerDecisionAttributes struct { - _ struct{} `type:"structure"` - - // The data attached to the event that can be used by the decider in subsequent - // workflow tasks. - Control *string `locationName:"control" type:"string"` - - // The duration to wait before firing the timer. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. - // - // StartToFireTimeout is a required field - StartToFireTimeout *string `locationName:"startToFireTimeout" min:"1" type:"string" required:"true"` - - // The unique ID of the timer. - // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. - // - // TimerId is a required field - TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s StartTimerDecisionAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartTimerDecisionAttributes) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartTimerDecisionAttributes) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartTimerDecisionAttributes"} - if s.StartToFireTimeout == nil { - invalidParams.Add(request.NewErrParamRequired("StartToFireTimeout")) - } - if s.StartToFireTimeout != nil && len(*s.StartToFireTimeout) < 1 { - invalidParams.Add(request.NewErrParamMinLen("StartToFireTimeout", 1)) - } - if s.TimerId == nil { - invalidParams.Add(request.NewErrParamRequired("TimerId")) - } - if s.TimerId != nil && len(*s.TimerId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TimerId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetControl sets the Control field's value. -func (s *StartTimerDecisionAttributes) SetControl(v string) *StartTimerDecisionAttributes { - s.Control = &v - return s -} - -// SetStartToFireTimeout sets the StartToFireTimeout field's value. -func (s *StartTimerDecisionAttributes) SetStartToFireTimeout(v string) *StartTimerDecisionAttributes { - s.StartToFireTimeout = &v - return s -} - -// SetTimerId sets the TimerId field's value. -func (s *StartTimerDecisionAttributes) SetTimerId(v string) *StartTimerDecisionAttributes { - s.TimerId = &v - return s -} - -// Provides the details of the StartTimerFailed event. -type StartTimerFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The cause of the failure. This information is generated by the system and - // can be useful for diagnostic purposes. - // - // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it - // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) - // in the Amazon SWF Developer Guide. - // - // Cause is a required field - Cause *string `locationName:"cause" type:"string" required:"true" enum:"StartTimerFailedCause"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the StartTimer decision for this activity task. This information - // can be useful for diagnosing problems by tracing back the chain of events - // leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The timerId provided in the StartTimer decision that failed. - // - // TimerId is a required field - TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s StartTimerFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartTimerFailedEventAttributes) GoString() string { - return s.String() -} - -// SetCause sets the Cause field's value. -func (s *StartTimerFailedEventAttributes) SetCause(v string) *StartTimerFailedEventAttributes { - s.Cause = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *StartTimerFailedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *StartTimerFailedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetTimerId sets the TimerId field's value. -func (s *StartTimerFailedEventAttributes) SetTimerId(v string) *StartTimerFailedEventAttributes { - s.TimerId = &v - return s -} - -type StartWorkflowExecutionInput struct { - _ struct{} `type:"structure"` - - // If set, specifies the policy to use for the child workflow executions of - // this workflow execution if it is terminated, by calling the TerminateWorkflowExecution - // action explicitly or due to an expired timeout. This policy overrides the - // default child policy specified when registering the workflow type using RegisterWorkflowType. - // - // The supported child policies are: - // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A request to cancel is attempted for each child execution - // by recording a WorkflowExecutionCancelRequested event in its history. - // It is up to the decider to take appropriate actions when it receives an - // execution history with this event. - // - // * ABANDON – No action is taken. The child executions continue to run. - // - // A child policy for this workflow execution must be specified either as a - // default for the workflow type or through this parameter. If neither this - // parameter is set nor a default child policy was specified at registration - // time then a fault is returned. - ChildPolicy *string `locationName:"childPolicy" type:"string" enum:"ChildPolicy"` - - // The name of the domain in which the workflow execution is created. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // The total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout - // specified when registering the workflow type. - // - // The duration is specified in seconds; an integer greater than or equal to - // 0. Exceeding this limit causes the workflow execution to time out. Unlike - // some of the other timeout parameters in Amazon SWF, you cannot specify a - // value of "NONE" for this timeout; there is a one-year max limit on the time - // that a workflow execution can run. - // - // An execution start-to-close timeout must be specified either through this - // parameter or as a default when the workflow type is registered. If neither - // this parameter nor a default execution start-to-close timeout is specified, - // a fault is returned. - ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` - - // The input for the workflow execution. This is a free form string which should - // be meaningful to the workflow you are starting. This input is made available - // to the new workflow execution in the WorkflowExecutionStarted history event. - Input *string `locationName:"input" type:"string"` - - // The IAM role to attach to this workflow execution. - // - // Executions of this workflow type need IAM roles to invoke Lambda functions. - // If you don't attach an IAM role, any attempt to schedule a Lambda task fails. - // This results in a ScheduleLambdaFunctionFailed history event. For more information, - // see http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html) - // in the Amazon SWF Developer Guide. - LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` - - // The list of tags to associate with the workflow execution. You can specify - // a maximum of 5 tags. You can list workflow executions with a specific tag - // by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions and - // specifying a TagFilter. - TagList []*string `locationName:"tagList" type:"list"` - - // The task list to use for the decision tasks generated for this workflow execution. - // This overrides the defaultTaskList specified when registering the workflow - // type. - // - // A task list for this workflow execution must be specified either as a default - // for the workflow type or through this parameter. If neither this parameter - // is set nor a default task list was specified at registration time then a - // fault is returned. - // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. - TaskList *TaskList `locationName:"taskList" type:"structure"` - - // The task priority to use for this workflow execution. This overrides any - // default priority that was assigned when the workflow type was registered. - // If not set, then the default task priority for the workflow type is used. - // Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) - // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. - // - // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) - // in the Amazon SWF Developer Guide. - TaskPriority *string `locationName:"taskPriority" type:"string"` - - // Specifies the maximum duration of decision tasks for this workflow execution. - // This parameter overrides the defaultTaskStartToCloseTimout specified when - // registering the workflow type using RegisterWorkflowType. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - // - // A task start-to-close timeout for this workflow execution must be specified - // either as a default for the workflow type or through this parameter. If neither - // this parameter is set nor a default task start-to-close timeout was specified - // at registration time then a fault is returned. - TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` - - // The user defined identifier associated with the workflow execution. You can - // use this to associate a custom identifier with the workflow execution. You - // may specify the same identifier if a workflow execution is logically a restart - // of a previous execution. You cannot have two open workflow executions with - // the same workflowId at the same time. - // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` - - // The type of the workflow to start. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s StartWorkflowExecutionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartWorkflowExecutionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartWorkflowExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartWorkflowExecutionInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.LambdaRole != nil && len(*s.LambdaRole) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LambdaRole", 1)) - } - if s.WorkflowId == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowId")) - } - if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) - } - if s.WorkflowType == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowType")) - } - if s.TaskList != nil { - if err := s.TaskList.Validate(); err != nil { - invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) - } - } - if s.WorkflowType != nil { - if err := s.WorkflowType.Validate(); err != nil { - invalidParams.AddNested("WorkflowType", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChildPolicy sets the ChildPolicy field's value. -func (s *StartWorkflowExecutionInput) SetChildPolicy(v string) *StartWorkflowExecutionInput { - s.ChildPolicy = &v - return s -} - -// SetDomain sets the Domain field's value. -func (s *StartWorkflowExecutionInput) SetDomain(v string) *StartWorkflowExecutionInput { - s.Domain = &v - return s -} - -// SetExecutionStartToCloseTimeout sets the ExecutionStartToCloseTimeout field's value. -func (s *StartWorkflowExecutionInput) SetExecutionStartToCloseTimeout(v string) *StartWorkflowExecutionInput { - s.ExecutionStartToCloseTimeout = &v - return s -} - -// SetInput sets the Input field's value. -func (s *StartWorkflowExecutionInput) SetInput(v string) *StartWorkflowExecutionInput { - s.Input = &v - return s -} - -// SetLambdaRole sets the LambdaRole field's value. -func (s *StartWorkflowExecutionInput) SetLambdaRole(v string) *StartWorkflowExecutionInput { - s.LambdaRole = &v - return s -} - -// SetTagList sets the TagList field's value. -func (s *StartWorkflowExecutionInput) SetTagList(v []*string) *StartWorkflowExecutionInput { - s.TagList = v - return s -} - -// SetTaskList sets the TaskList field's value. -func (s *StartWorkflowExecutionInput) SetTaskList(v *TaskList) *StartWorkflowExecutionInput { - s.TaskList = v - return s -} - -// SetTaskPriority sets the TaskPriority field's value. -func (s *StartWorkflowExecutionInput) SetTaskPriority(v string) *StartWorkflowExecutionInput { - s.TaskPriority = &v - return s -} - -// SetTaskStartToCloseTimeout sets the TaskStartToCloseTimeout field's value. -func (s *StartWorkflowExecutionInput) SetTaskStartToCloseTimeout(v string) *StartWorkflowExecutionInput { - s.TaskStartToCloseTimeout = &v - return s -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *StartWorkflowExecutionInput) SetWorkflowId(v string) *StartWorkflowExecutionInput { - s.WorkflowId = &v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *StartWorkflowExecutionInput) SetWorkflowType(v *WorkflowType) *StartWorkflowExecutionInput { - s.WorkflowType = v - return s -} - -// Specifies the runId of a workflow execution. -type StartWorkflowExecutionOutput struct { - _ struct{} `type:"structure"` - - // The runId of a workflow execution. This ID is generated by the service and - // can be used to uniquely identify the workflow execution within a domain. - RunId *string `locationName:"runId" min:"1" type:"string"` -} - -// String returns the string representation -func (s StartWorkflowExecutionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartWorkflowExecutionOutput) GoString() string { - return s.String() -} - -// SetRunId sets the RunId field's value. -func (s *StartWorkflowExecutionOutput) SetRunId(v string) *StartWorkflowExecutionOutput { - s.RunId = &v - return s -} - -// Used to filter the workflow executions in visibility APIs based on a tag. -type TagFilter struct { - _ struct{} `type:"structure"` - - // Specifies the tag that must be associated with the execution for it to meet - // the filter criteria. - // - // Tag is a required field - Tag *string `locationName:"tag" type:"string" required:"true"` -} - -// String returns the string representation -func (s TagFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TagFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TagFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagFilter"} - if s.Tag == nil { - invalidParams.Add(request.NewErrParamRequired("Tag")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTag sets the Tag field's value. -func (s *TagFilter) SetTag(v string) *TagFilter { - s.Tag = &v - return s -} - -// Represents a task list. -type TaskList struct { - _ struct{} `type:"structure"` - - // The name of the task list. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s TaskList) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TaskList) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TaskList) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TaskList"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *TaskList) SetName(v string) *TaskList { - s.Name = &v - return s -} - -type TerminateWorkflowExecutionInput struct { - _ struct{} `type:"structure"` - - // If set, specifies the policy to use for the child workflow executions of - // the workflow execution being terminated. This policy overrides the child - // policy specified for the workflow execution at registration time or when - // starting the execution. - // - // The supported child policies are: - // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A request to cancel is attempted for each child execution - // by recording a WorkflowExecutionCancelRequested event in its history. - // It is up to the decider to take appropriate actions when it receives an - // execution history with this event. - // - // * ABANDON – No action is taken. The child executions continue to run. - // - // A child policy for this workflow execution must be specified either as a - // default for the workflow type or through this parameter. If neither this - // parameter is set nor a default child policy was specified at registration - // time then a fault is returned. - ChildPolicy *string `locationName:"childPolicy" type:"string" enum:"ChildPolicy"` - - // Details for terminating the workflow execution. - Details *string `locationName:"details" type:"string"` - - // The domain of the workflow execution to terminate. - // - // Domain is a required field - Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - - // A descriptive reason for terminating the workflow execution. - Reason *string `locationName:"reason" type:"string"` - - // The runId of the workflow execution to terminate. - RunId *string `locationName:"runId" type:"string"` - - // The workflowId of the workflow execution to terminate. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s TerminateWorkflowExecutionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TerminateWorkflowExecutionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TerminateWorkflowExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TerminateWorkflowExecutionInput"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) - } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) - } - if s.WorkflowId == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowId")) - } - if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetChildPolicy sets the ChildPolicy field's value. -func (s *TerminateWorkflowExecutionInput) SetChildPolicy(v string) *TerminateWorkflowExecutionInput { - s.ChildPolicy = &v - return s -} - -// SetDetails sets the Details field's value. -func (s *TerminateWorkflowExecutionInput) SetDetails(v string) *TerminateWorkflowExecutionInput { - s.Details = &v - return s -} - -// SetDomain sets the Domain field's value. -func (s *TerminateWorkflowExecutionInput) SetDomain(v string) *TerminateWorkflowExecutionInput { - s.Domain = &v - return s -} - -// SetReason sets the Reason field's value. -func (s *TerminateWorkflowExecutionInput) SetReason(v string) *TerminateWorkflowExecutionInput { - s.Reason = &v - return s -} - -// SetRunId sets the RunId field's value. -func (s *TerminateWorkflowExecutionInput) SetRunId(v string) *TerminateWorkflowExecutionInput { - s.RunId = &v - return s -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *TerminateWorkflowExecutionInput) SetWorkflowId(v string) *TerminateWorkflowExecutionInput { - s.WorkflowId = &v - return s -} - -type TerminateWorkflowExecutionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s TerminateWorkflowExecutionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TerminateWorkflowExecutionOutput) GoString() string { - return s.String() -} - -// Provides the details of the TimerCanceled event. -type TimerCanceledEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the CancelTimer decision to cancel this timer. This information - // can be useful for diagnosing problems by tracing back the chain of events - // leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The ID of the TimerStarted event that was recorded when this timer was started. - // This information can be useful for diagnosing problems by tracing back the - // chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` - - // The unique ID of the timer that was canceled. - // - // TimerId is a required field - TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s TimerCanceledEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TimerCanceledEventAttributes) GoString() string { - return s.String() -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *TimerCanceledEventAttributes) SetDecisionTaskCompletedEventId(v int64) *TimerCanceledEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *TimerCanceledEventAttributes) SetStartedEventId(v int64) *TimerCanceledEventAttributes { - s.StartedEventId = &v - return s -} - -// SetTimerId sets the TimerId field's value. -func (s *TimerCanceledEventAttributes) SetTimerId(v string) *TimerCanceledEventAttributes { - s.TimerId = &v - return s -} - -// Provides the details of the TimerFired event. -type TimerFiredEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the TimerStarted event that was recorded when this timer was started. - // This information can be useful for diagnosing problems by tracing back the - // chain of events leading up to this event. - // - // StartedEventId is a required field - StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` - - // The unique ID of the timer that fired. - // - // TimerId is a required field - TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s TimerFiredEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TimerFiredEventAttributes) GoString() string { - return s.String() -} - -// SetStartedEventId sets the StartedEventId field's value. -func (s *TimerFiredEventAttributes) SetStartedEventId(v int64) *TimerFiredEventAttributes { - s.StartedEventId = &v - return s -} - -// SetTimerId sets the TimerId field's value. -func (s *TimerFiredEventAttributes) SetTimerId(v string) *TimerFiredEventAttributes { - s.TimerId = &v - return s -} - -// Provides the details of the TimerStarted event. -type TimerStartedEventAttributes struct { - _ struct{} `type:"structure"` - - // Data attached to the event that can be used by the decider in subsequent - // workflow tasks. - Control *string `locationName:"control" type:"string"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the StartTimer decision for this activity task. This information - // can be useful for diagnosing problems by tracing back the chain of events - // leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The duration of time after which the timer fires. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. - // - // StartToFireTimeout is a required field - StartToFireTimeout *string `locationName:"startToFireTimeout" min:"1" type:"string" required:"true"` - - // The unique ID of the timer that was started. - // - // TimerId is a required field - TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s TimerStartedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TimerStartedEventAttributes) GoString() string { - return s.String() -} - -// SetControl sets the Control field's value. -func (s *TimerStartedEventAttributes) SetControl(v string) *TimerStartedEventAttributes { - s.Control = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *TimerStartedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *TimerStartedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetStartToFireTimeout sets the StartToFireTimeout field's value. -func (s *TimerStartedEventAttributes) SetStartToFireTimeout(v string) *TimerStartedEventAttributes { - s.StartToFireTimeout = &v - return s -} - -// SetTimerId sets the TimerId field's value. -func (s *TimerStartedEventAttributes) SetTimerId(v string) *TimerStartedEventAttributes { - s.TimerId = &v - return s -} - -// Represents a workflow execution. -type WorkflowExecution struct { - _ struct{} `type:"structure"` - - // A system-generated unique identifier for the workflow execution. - // - // RunId is a required field - RunId *string `locationName:"runId" min:"1" type:"string" required:"true"` - - // The user defined identifier associated with the workflow execution. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s WorkflowExecution) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecution) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *WorkflowExecution) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WorkflowExecution"} - if s.RunId == nil { - invalidParams.Add(request.NewErrParamRequired("RunId")) - } - if s.RunId != nil && len(*s.RunId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RunId", 1)) - } - if s.WorkflowId == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowId")) - } - if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRunId sets the RunId field's value. -func (s *WorkflowExecution) SetRunId(v string) *WorkflowExecution { - s.RunId = &v - return s -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *WorkflowExecution) SetWorkflowId(v string) *WorkflowExecution { - s.WorkflowId = &v - return s -} - -// Provides the details of the WorkflowExecutionCancelRequested event. -type WorkflowExecutionCancelRequestedEventAttributes struct { - _ struct{} `type:"structure"` - - // If set, indicates that the request to cancel the workflow execution was automatically - // generated, and specifies the cause. This happens if the parent workflow execution - // times out or is terminated, and the child policy is set to cancel child executions. - Cause *string `locationName:"cause" type:"string" enum:"WorkflowExecutionCancelRequestedCause"` - - // The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding - // to the RequestCancelExternalWorkflowExecution decision to cancel this workflow - // execution.The source event with this ID can be found in the history of the - // source workflow execution. This information can be useful for diagnosing - // problems by tracing back the chain of events leading up to this event. - ExternalInitiatedEventId *int64 `locationName:"externalInitiatedEventId" type:"long"` - - // The external workflow execution for which the cancellation was requested. - ExternalWorkflowExecution *WorkflowExecution `locationName:"externalWorkflowExecution" type:"structure"` -} - -// String returns the string representation -func (s WorkflowExecutionCancelRequestedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionCancelRequestedEventAttributes) GoString() string { - return s.String() -} - -// SetCause sets the Cause field's value. -func (s *WorkflowExecutionCancelRequestedEventAttributes) SetCause(v string) *WorkflowExecutionCancelRequestedEventAttributes { - s.Cause = &v - return s -} - -// SetExternalInitiatedEventId sets the ExternalInitiatedEventId field's value. -func (s *WorkflowExecutionCancelRequestedEventAttributes) SetExternalInitiatedEventId(v int64) *WorkflowExecutionCancelRequestedEventAttributes { - s.ExternalInitiatedEventId = &v - return s -} - -// SetExternalWorkflowExecution sets the ExternalWorkflowExecution field's value. -func (s *WorkflowExecutionCancelRequestedEventAttributes) SetExternalWorkflowExecution(v *WorkflowExecution) *WorkflowExecutionCancelRequestedEventAttributes { - s.ExternalWorkflowExecution = v - return s -} - -// Provides the details of the WorkflowExecutionCanceled event. -type WorkflowExecutionCanceledEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the CancelWorkflowExecution decision for this cancellation - // request. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The details of the cancellation. - Details *string `locationName:"details" type:"string"` -} - -// String returns the string representation -func (s WorkflowExecutionCanceledEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionCanceledEventAttributes) GoString() string { - return s.String() -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *WorkflowExecutionCanceledEventAttributes) SetDecisionTaskCompletedEventId(v int64) *WorkflowExecutionCanceledEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetDetails sets the Details field's value. -func (s *WorkflowExecutionCanceledEventAttributes) SetDetails(v string) *WorkflowExecutionCanceledEventAttributes { - s.Details = &v - return s -} - -// Provides the details of the WorkflowExecutionCompleted event. -type WorkflowExecutionCompletedEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the CompleteWorkflowExecution decision to complete this - // execution. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The result produced by the workflow execution upon successful completion. - Result *string `locationName:"result" type:"string"` -} - -// String returns the string representation -func (s WorkflowExecutionCompletedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionCompletedEventAttributes) GoString() string { - return s.String() -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *WorkflowExecutionCompletedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *WorkflowExecutionCompletedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetResult sets the Result field's value. -func (s *WorkflowExecutionCompletedEventAttributes) SetResult(v string) *WorkflowExecutionCompletedEventAttributes { - s.Result = &v - return s -} - -// The configuration settings for a workflow execution including timeout values, -// tasklist etc. These configuration settings are determined from the defaults -// specified when registering the workflow type and those specified when starting -// the workflow execution. -type WorkflowExecutionConfiguration struct { - _ struct{} `type:"structure"` - - // The policy to use for the child workflow executions if this workflow execution - // is terminated, by calling the TerminateWorkflowExecution action explicitly - // or due to an expired timeout. - // - // The supported child policies are: - // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A request to cancel is attempted for each child execution - // by recording a WorkflowExecutionCancelRequested event in its history. - // It is up to the decider to take appropriate actions when it receives an - // execution history with this event. - // - // * ABANDON – No action is taken. The child executions continue to run. - // - // ChildPolicy is a required field - ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` - - // The total duration for this workflow execution. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - // - // ExecutionStartToCloseTimeout is a required field - ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" min:"1" type:"string" required:"true"` - - // The IAM role attached to the child workflow execution. - LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` - - // The task list used for the decision tasks generated for this workflow execution. - // - // TaskList is a required field - TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` - - // The priority assigned to decision tasks for this workflow execution. Valid - // values are integers that range from Java's Integer.MIN_VALUE (-2147483648) - // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. - // - // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) - // in the Amazon SWF Developer Guide. - TaskPriority *string `locationName:"taskPriority" type:"string"` - - // The maximum duration allowed for decision tasks for this workflow execution. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - // - // TaskStartToCloseTimeout is a required field - TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s WorkflowExecutionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionConfiguration) GoString() string { - return s.String() -} - -// SetChildPolicy sets the ChildPolicy field's value. -func (s *WorkflowExecutionConfiguration) SetChildPolicy(v string) *WorkflowExecutionConfiguration { - s.ChildPolicy = &v - return s -} - -// SetExecutionStartToCloseTimeout sets the ExecutionStartToCloseTimeout field's value. -func (s *WorkflowExecutionConfiguration) SetExecutionStartToCloseTimeout(v string) *WorkflowExecutionConfiguration { - s.ExecutionStartToCloseTimeout = &v - return s -} - -// SetLambdaRole sets the LambdaRole field's value. -func (s *WorkflowExecutionConfiguration) SetLambdaRole(v string) *WorkflowExecutionConfiguration { - s.LambdaRole = &v - return s -} - -// SetTaskList sets the TaskList field's value. -func (s *WorkflowExecutionConfiguration) SetTaskList(v *TaskList) *WorkflowExecutionConfiguration { - s.TaskList = v - return s -} - -// SetTaskPriority sets the TaskPriority field's value. -func (s *WorkflowExecutionConfiguration) SetTaskPriority(v string) *WorkflowExecutionConfiguration { - s.TaskPriority = &v - return s -} - -// SetTaskStartToCloseTimeout sets the TaskStartToCloseTimeout field's value. -func (s *WorkflowExecutionConfiguration) SetTaskStartToCloseTimeout(v string) *WorkflowExecutionConfiguration { - s.TaskStartToCloseTimeout = &v - return s -} - -// Provides the details of the WorkflowExecutionContinuedAsNew event. -type WorkflowExecutionContinuedAsNewEventAttributes struct { - _ struct{} `type:"structure"` - - // The policy to use for the child workflow executions of the new execution - // if it is terminated by calling the TerminateWorkflowExecution action explicitly - // or due to an expired timeout. - // - // The supported child policies are: - // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A request to cancel is attempted for each child execution - // by recording a WorkflowExecutionCancelRequested event in its history. - // It is up to the decider to take appropriate actions when it receives an - // execution history with this event. - // - // * ABANDON – No action is taken. The child executions continue to run. - // - // ChildPolicy is a required field - ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the ContinueAsNewWorkflowExecution decision that started - // this execution. This information can be useful for diagnosing problems by - // tracing back the chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The total duration allowed for the new workflow execution. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` - - // The input provided to the new workflow execution. - Input *string `locationName:"input" type:"string"` - - // The IAM role to attach to the new (continued) workflow execution. - LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` - - // The runId of the new workflow execution. - // - // NewExecutionRunId is a required field - NewExecutionRunId *string `locationName:"newExecutionRunId" min:"1" type:"string" required:"true"` - - // The list of tags associated with the new workflow execution. - TagList []*string `locationName:"tagList" type:"list"` - - // The task list to use for the decisions of the new (continued) workflow execution. - // - // TaskList is a required field - TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` - - // The priority of the task to use for the decisions of the new (continued) - // workflow execution. - TaskPriority *string `locationName:"taskPriority" type:"string"` - - // The maximum duration of decision tasks for the new workflow execution. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` - - // The workflow type of this execution. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s WorkflowExecutionContinuedAsNewEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionContinuedAsNewEventAttributes) GoString() string { - return s.String() -} - -// SetChildPolicy sets the ChildPolicy field's value. -func (s *WorkflowExecutionContinuedAsNewEventAttributes) SetChildPolicy(v string) *WorkflowExecutionContinuedAsNewEventAttributes { - s.ChildPolicy = &v - return s -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *WorkflowExecutionContinuedAsNewEventAttributes) SetDecisionTaskCompletedEventId(v int64) *WorkflowExecutionContinuedAsNewEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetExecutionStartToCloseTimeout sets the ExecutionStartToCloseTimeout field's value. -func (s *WorkflowExecutionContinuedAsNewEventAttributes) SetExecutionStartToCloseTimeout(v string) *WorkflowExecutionContinuedAsNewEventAttributes { - s.ExecutionStartToCloseTimeout = &v - return s -} - -// SetInput sets the Input field's value. -func (s *WorkflowExecutionContinuedAsNewEventAttributes) SetInput(v string) *WorkflowExecutionContinuedAsNewEventAttributes { - s.Input = &v - return s -} - -// SetLambdaRole sets the LambdaRole field's value. -func (s *WorkflowExecutionContinuedAsNewEventAttributes) SetLambdaRole(v string) *WorkflowExecutionContinuedAsNewEventAttributes { - s.LambdaRole = &v - return s -} - -// SetNewExecutionRunId sets the NewExecutionRunId field's value. -func (s *WorkflowExecutionContinuedAsNewEventAttributes) SetNewExecutionRunId(v string) *WorkflowExecutionContinuedAsNewEventAttributes { - s.NewExecutionRunId = &v - return s -} - -// SetTagList sets the TagList field's value. -func (s *WorkflowExecutionContinuedAsNewEventAttributes) SetTagList(v []*string) *WorkflowExecutionContinuedAsNewEventAttributes { - s.TagList = v - return s -} - -// SetTaskList sets the TaskList field's value. -func (s *WorkflowExecutionContinuedAsNewEventAttributes) SetTaskList(v *TaskList) *WorkflowExecutionContinuedAsNewEventAttributes { - s.TaskList = v - return s -} - -// SetTaskPriority sets the TaskPriority field's value. -func (s *WorkflowExecutionContinuedAsNewEventAttributes) SetTaskPriority(v string) *WorkflowExecutionContinuedAsNewEventAttributes { - s.TaskPriority = &v - return s -} - -// SetTaskStartToCloseTimeout sets the TaskStartToCloseTimeout field's value. -func (s *WorkflowExecutionContinuedAsNewEventAttributes) SetTaskStartToCloseTimeout(v string) *WorkflowExecutionContinuedAsNewEventAttributes { - s.TaskStartToCloseTimeout = &v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *WorkflowExecutionContinuedAsNewEventAttributes) SetWorkflowType(v *WorkflowType) *WorkflowExecutionContinuedAsNewEventAttributes { - s.WorkflowType = v - return s -} - -// Contains the count of workflow executions returned from CountOpenWorkflowExecutions -// or CountClosedWorkflowExecutions -type WorkflowExecutionCount struct { - _ struct{} `type:"structure"` - - // The number of workflow executions. - // - // Count is a required field - Count *int64 `locationName:"count" type:"integer" required:"true"` - - // If set to true, indicates that the actual count was more than the maximum - // supported by this API and the count returned is the truncated value. - Truncated *bool `locationName:"truncated" type:"boolean"` -} - -// String returns the string representation -func (s WorkflowExecutionCount) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionCount) GoString() string { - return s.String() -} - -// SetCount sets the Count field's value. -func (s *WorkflowExecutionCount) SetCount(v int64) *WorkflowExecutionCount { - s.Count = &v - return s -} - -// SetTruncated sets the Truncated field's value. -func (s *WorkflowExecutionCount) SetTruncated(v bool) *WorkflowExecutionCount { - s.Truncated = &v - return s -} - -// Provides the details of the WorkflowExecutionFailed event. -type WorkflowExecutionFailedEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the FailWorkflowExecution decision to fail this execution. - // This information can be useful for diagnosing problems by tracing back the - // chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` - - // The details of the failure. - Details *string `locationName:"details" type:"string"` - - // The descriptive reason provided for the failure. - Reason *string `locationName:"reason" type:"string"` -} - -// String returns the string representation -func (s WorkflowExecutionFailedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionFailedEventAttributes) GoString() string { - return s.String() -} - -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *WorkflowExecutionFailedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *WorkflowExecutionFailedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s -} - -// SetDetails sets the Details field's value. -func (s *WorkflowExecutionFailedEventAttributes) SetDetails(v string) *WorkflowExecutionFailedEventAttributes { - s.Details = &v - return s -} - -// SetReason sets the Reason field's value. -func (s *WorkflowExecutionFailedEventAttributes) SetReason(v string) *WorkflowExecutionFailedEventAttributes { - s.Reason = &v - return s -} - -// Used to filter the workflow executions in visibility APIs by their workflowId. -type WorkflowExecutionFilter struct { - _ struct{} `type:"structure"` - - // The workflowId to pass of match the criteria of this filter. - // - // WorkflowId is a required field - WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s WorkflowExecutionFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *WorkflowExecutionFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WorkflowExecutionFilter"} - if s.WorkflowId == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowId")) - } - if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetWorkflowId sets the WorkflowId field's value. -func (s *WorkflowExecutionFilter) SetWorkflowId(v string) *WorkflowExecutionFilter { - s.WorkflowId = &v - return s -} - -// Contains information about a workflow execution. -type WorkflowExecutionInfo struct { - _ struct{} `type:"structure"` - - // Set to true if a cancellation is requested for this workflow execution. - CancelRequested *bool `locationName:"cancelRequested" type:"boolean"` - - // If the execution status is closed then this specifies how the execution was - // closed: - // - // * COMPLETED – the execution was successfully completed. - // - // * CANCELED – the execution was canceled.Cancellation allows the implementation - // to gracefully clean up before the execution is closed. - // - // * TERMINATED – the execution was force terminated. - // - // * FAILED – the execution failed to complete. - // - // * TIMED_OUT – the execution did not complete in the alloted time and was - // automatically timed out. - // - // * CONTINUED_AS_NEW – the execution is logically continued. This means - // the current execution was completed and a new execution was started to - // carry on the workflow. - CloseStatus *string `locationName:"closeStatus" type:"string" enum:"CloseStatus"` - - // The time when the workflow execution was closed. Set only if the execution - // status is CLOSED. - CloseTimestamp *time.Time `locationName:"closeTimestamp" type:"timestamp" timestampFormat:"unix"` - - // The workflow execution this information is about. - // - // Execution is a required field - Execution *WorkflowExecution `locationName:"execution" type:"structure" required:"true"` - - // The current status of the execution. - // - // ExecutionStatus is a required field - ExecutionStatus *string `locationName:"executionStatus" type:"string" required:"true" enum:"ExecutionStatus"` - - // If this workflow execution is a child of another execution then contains - // the workflow execution that started this execution. - Parent *WorkflowExecution `locationName:"parent" type:"structure"` - - // The time when the execution was started. - // - // StartTimestamp is a required field - StartTimestamp *time.Time `locationName:"startTimestamp" type:"timestamp" timestampFormat:"unix" required:"true"` - - // The list of tags associated with the workflow execution. Tags can be used - // to identify and list workflow executions of interest through the visibility - // APIs. A workflow execution can have a maximum of 5 tags. - TagList []*string `locationName:"tagList" type:"list"` - - // The type of the workflow execution. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s WorkflowExecutionInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionInfo) GoString() string { - return s.String() -} - -// SetCancelRequested sets the CancelRequested field's value. -func (s *WorkflowExecutionInfo) SetCancelRequested(v bool) *WorkflowExecutionInfo { - s.CancelRequested = &v - return s -} - -// SetCloseStatus sets the CloseStatus field's value. -func (s *WorkflowExecutionInfo) SetCloseStatus(v string) *WorkflowExecutionInfo { - s.CloseStatus = &v - return s -} - -// SetCloseTimestamp sets the CloseTimestamp field's value. -func (s *WorkflowExecutionInfo) SetCloseTimestamp(v time.Time) *WorkflowExecutionInfo { - s.CloseTimestamp = &v - return s -} - -// SetExecution sets the Execution field's value. -func (s *WorkflowExecutionInfo) SetExecution(v *WorkflowExecution) *WorkflowExecutionInfo { - s.Execution = v - return s -} - -// SetExecutionStatus sets the ExecutionStatus field's value. -func (s *WorkflowExecutionInfo) SetExecutionStatus(v string) *WorkflowExecutionInfo { - s.ExecutionStatus = &v - return s -} - -// SetParent sets the Parent field's value. -func (s *WorkflowExecutionInfo) SetParent(v *WorkflowExecution) *WorkflowExecutionInfo { - s.Parent = v - return s -} - -// SetStartTimestamp sets the StartTimestamp field's value. -func (s *WorkflowExecutionInfo) SetStartTimestamp(v time.Time) *WorkflowExecutionInfo { - s.StartTimestamp = &v - return s -} - -// SetTagList sets the TagList field's value. -func (s *WorkflowExecutionInfo) SetTagList(v []*string) *WorkflowExecutionInfo { - s.TagList = v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *WorkflowExecutionInfo) SetWorkflowType(v *WorkflowType) *WorkflowExecutionInfo { - s.WorkflowType = v - return s -} - -// Contains a paginated list of information about workflow executions. -type WorkflowExecutionInfos struct { - _ struct{} `type:"structure"` - - // The list of workflow information structures. - // - // ExecutionInfos is a required field - ExecutionInfos []*WorkflowExecutionInfo `locationName:"executionInfos" type:"list" required:"true"` - - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. - // - // The configured maximumPageSize determines how many results can be returned - // in a single call. - NextPageToken *string `locationName:"nextPageToken" type:"string"` -} - -// String returns the string representation -func (s WorkflowExecutionInfos) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionInfos) GoString() string { - return s.String() -} - -// SetExecutionInfos sets the ExecutionInfos field's value. -func (s *WorkflowExecutionInfos) SetExecutionInfos(v []*WorkflowExecutionInfo) *WorkflowExecutionInfos { - s.ExecutionInfos = v - return s -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *WorkflowExecutionInfos) SetNextPageToken(v string) *WorkflowExecutionInfos { - s.NextPageToken = &v - return s -} - -// Contains the counts of open tasks, child workflow executions and timers for -// a workflow execution. -type WorkflowExecutionOpenCounts struct { - _ struct{} `type:"structure"` - - // The count of activity tasks whose status is OPEN. - // - // OpenActivityTasks is a required field - OpenActivityTasks *int64 `locationName:"openActivityTasks" type:"integer" required:"true"` - - // The count of child workflow executions whose status is OPEN. - // - // OpenChildWorkflowExecutions is a required field - OpenChildWorkflowExecutions *int64 `locationName:"openChildWorkflowExecutions" type:"integer" required:"true"` - - // The count of decision tasks whose status is OPEN. A workflow execution can - // have at most one open decision task. - // - // OpenDecisionTasks is a required field - OpenDecisionTasks *int64 `locationName:"openDecisionTasks" type:"integer" required:"true"` - - // The count of Lambda tasks whose status is OPEN. - OpenLambdaFunctions *int64 `locationName:"openLambdaFunctions" type:"integer"` - - // The count of timers started by this workflow execution that have not fired - // yet. - // - // OpenTimers is a required field - OpenTimers *int64 `locationName:"openTimers" type:"integer" required:"true"` -} - -// String returns the string representation -func (s WorkflowExecutionOpenCounts) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionOpenCounts) GoString() string { - return s.String() -} - -// SetOpenActivityTasks sets the OpenActivityTasks field's value. -func (s *WorkflowExecutionOpenCounts) SetOpenActivityTasks(v int64) *WorkflowExecutionOpenCounts { - s.OpenActivityTasks = &v - return s -} - -// SetOpenChildWorkflowExecutions sets the OpenChildWorkflowExecutions field's value. -func (s *WorkflowExecutionOpenCounts) SetOpenChildWorkflowExecutions(v int64) *WorkflowExecutionOpenCounts { - s.OpenChildWorkflowExecutions = &v - return s -} - -// SetOpenDecisionTasks sets the OpenDecisionTasks field's value. -func (s *WorkflowExecutionOpenCounts) SetOpenDecisionTasks(v int64) *WorkflowExecutionOpenCounts { - s.OpenDecisionTasks = &v - return s -} - -// SetOpenLambdaFunctions sets the OpenLambdaFunctions field's value. -func (s *WorkflowExecutionOpenCounts) SetOpenLambdaFunctions(v int64) *WorkflowExecutionOpenCounts { - s.OpenLambdaFunctions = &v - return s -} - -// SetOpenTimers sets the OpenTimers field's value. -func (s *WorkflowExecutionOpenCounts) SetOpenTimers(v int64) *WorkflowExecutionOpenCounts { - s.OpenTimers = &v - return s -} - -// Provides the details of the WorkflowExecutionSignaled event. -type WorkflowExecutionSignaledEventAttributes struct { - _ struct{} `type:"structure"` - - // The ID of the SignalExternalWorkflowExecutionInitiated event corresponding - // to the SignalExternalWorkflow decision to signal this workflow execution.The - // source event with this ID can be found in the history of the source workflow - // execution. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. This field is set only - // if the signal was initiated by another workflow execution. - ExternalInitiatedEventId *int64 `locationName:"externalInitiatedEventId" type:"long"` - - // The workflow execution that sent the signal. This is set only of the signal - // was sent by another workflow execution. - ExternalWorkflowExecution *WorkflowExecution `locationName:"externalWorkflowExecution" type:"structure"` - - // The inputs provided with the signal. The decider can use the signal name - // and inputs to determine how to process the signal. - Input *string `locationName:"input" type:"string"` - - // The name of the signal received. The decider can use the signal name and - // inputs to determine how to the process the signal. - // - // SignalName is a required field - SignalName *string `locationName:"signalName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s WorkflowExecutionSignaledEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionSignaledEventAttributes) GoString() string { - return s.String() -} - -// SetExternalInitiatedEventId sets the ExternalInitiatedEventId field's value. -func (s *WorkflowExecutionSignaledEventAttributes) SetExternalInitiatedEventId(v int64) *WorkflowExecutionSignaledEventAttributes { - s.ExternalInitiatedEventId = &v - return s -} - -// SetExternalWorkflowExecution sets the ExternalWorkflowExecution field's value. -func (s *WorkflowExecutionSignaledEventAttributes) SetExternalWorkflowExecution(v *WorkflowExecution) *WorkflowExecutionSignaledEventAttributes { - s.ExternalWorkflowExecution = v - return s -} - -// SetInput sets the Input field's value. -func (s *WorkflowExecutionSignaledEventAttributes) SetInput(v string) *WorkflowExecutionSignaledEventAttributes { - s.Input = &v - return s -} - -// SetSignalName sets the SignalName field's value. -func (s *WorkflowExecutionSignaledEventAttributes) SetSignalName(v string) *WorkflowExecutionSignaledEventAttributes { - s.SignalName = &v - return s -} - -// Provides details of WorkflowExecutionStarted event. -type WorkflowExecutionStartedEventAttributes struct { - _ struct{} `type:"structure"` - - // The policy to use for the child workflow executions if this workflow execution - // is terminated, by calling the TerminateWorkflowExecution action explicitly - // or due to an expired timeout. - // - // The supported child policies are: - // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A request to cancel is attempted for each child execution - // by recording a WorkflowExecutionCancelRequested event in its history. - // It is up to the decider to take appropriate actions when it receives an - // execution history with this event. - // - // * ABANDON – No action is taken. The child executions continue to run. - // - // ChildPolicy is a required field - ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` - - // If this workflow execution was started due to a ContinueAsNewWorkflowExecution - // decision, then it contains the runId of the previous workflow execution that - // was closed and continued as this execution. - ContinuedExecutionRunId *string `locationName:"continuedExecutionRunId" type:"string"` - - // The maximum duration for this workflow execution. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` - - // The input provided to the workflow execution. - Input *string `locationName:"input" type:"string"` - - // The IAM role attached to the workflow execution. - LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` - - // The ID of the StartChildWorkflowExecutionInitiated event corresponding to - // the StartChildWorkflowExecutionDecision to start this workflow execution. - // The source event with this ID can be found in the history of the source workflow - // execution. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. - ParentInitiatedEventId *int64 `locationName:"parentInitiatedEventId" type:"long"` - - // The source workflow execution that started this workflow execution. The member - // isn't set if the workflow execution was not started by a workflow. - ParentWorkflowExecution *WorkflowExecution `locationName:"parentWorkflowExecution" type:"structure"` - - // The list of tags associated with this workflow execution. An execution can - // have up to 5 tags. - TagList []*string `locationName:"tagList" type:"list"` - - // The name of the task list for scheduling the decision tasks for this workflow - // execution. - // - // TaskList is a required field - TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` - - // The priority of the decision tasks in the workflow execution. - TaskPriority *string `locationName:"taskPriority" type:"string"` - - // The maximum duration of decision tasks for this workflow type. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` - - // The workflow type of this execution. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s WorkflowExecutionStartedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionStartedEventAttributes) GoString() string { - return s.String() -} - -// SetChildPolicy sets the ChildPolicy field's value. -func (s *WorkflowExecutionStartedEventAttributes) SetChildPolicy(v string) *WorkflowExecutionStartedEventAttributes { - s.ChildPolicy = &v - return s -} - -// SetContinuedExecutionRunId sets the ContinuedExecutionRunId field's value. -func (s *WorkflowExecutionStartedEventAttributes) SetContinuedExecutionRunId(v string) *WorkflowExecutionStartedEventAttributes { - s.ContinuedExecutionRunId = &v - return s -} - -// SetExecutionStartToCloseTimeout sets the ExecutionStartToCloseTimeout field's value. -func (s *WorkflowExecutionStartedEventAttributes) SetExecutionStartToCloseTimeout(v string) *WorkflowExecutionStartedEventAttributes { - s.ExecutionStartToCloseTimeout = &v - return s -} - -// SetInput sets the Input field's value. -func (s *WorkflowExecutionStartedEventAttributes) SetInput(v string) *WorkflowExecutionStartedEventAttributes { - s.Input = &v - return s -} - -// SetLambdaRole sets the LambdaRole field's value. -func (s *WorkflowExecutionStartedEventAttributes) SetLambdaRole(v string) *WorkflowExecutionStartedEventAttributes { - s.LambdaRole = &v - return s -} - -// SetParentInitiatedEventId sets the ParentInitiatedEventId field's value. -func (s *WorkflowExecutionStartedEventAttributes) SetParentInitiatedEventId(v int64) *WorkflowExecutionStartedEventAttributes { - s.ParentInitiatedEventId = &v - return s -} - -// SetParentWorkflowExecution sets the ParentWorkflowExecution field's value. -func (s *WorkflowExecutionStartedEventAttributes) SetParentWorkflowExecution(v *WorkflowExecution) *WorkflowExecutionStartedEventAttributes { - s.ParentWorkflowExecution = v - return s -} - -// SetTagList sets the TagList field's value. -func (s *WorkflowExecutionStartedEventAttributes) SetTagList(v []*string) *WorkflowExecutionStartedEventAttributes { - s.TagList = v - return s -} - -// SetTaskList sets the TaskList field's value. -func (s *WorkflowExecutionStartedEventAttributes) SetTaskList(v *TaskList) *WorkflowExecutionStartedEventAttributes { - s.TaskList = v - return s -} - -// SetTaskPriority sets the TaskPriority field's value. -func (s *WorkflowExecutionStartedEventAttributes) SetTaskPriority(v string) *WorkflowExecutionStartedEventAttributes { - s.TaskPriority = &v - return s -} - -// SetTaskStartToCloseTimeout sets the TaskStartToCloseTimeout field's value. -func (s *WorkflowExecutionStartedEventAttributes) SetTaskStartToCloseTimeout(v string) *WorkflowExecutionStartedEventAttributes { - s.TaskStartToCloseTimeout = &v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *WorkflowExecutionStartedEventAttributes) SetWorkflowType(v *WorkflowType) *WorkflowExecutionStartedEventAttributes { - s.WorkflowType = v - return s -} - -// Provides the details of the WorkflowExecutionTerminated event. -type WorkflowExecutionTerminatedEventAttributes struct { - _ struct{} `type:"structure"` - - // If set, indicates that the workflow execution was automatically terminated, - // and specifies the cause. This happens if the parent workflow execution times - // out or is terminated and the child policy is set to terminate child executions. - Cause *string `locationName:"cause" type:"string" enum:"WorkflowExecutionTerminatedCause"` - - // The policy used for the child workflow executions of this workflow execution. - // - // The supported child policies are: - // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A request to cancel is attempted for each child execution - // by recording a WorkflowExecutionCancelRequested event in its history. - // It is up to the decider to take appropriate actions when it receives an - // execution history with this event. - // - // * ABANDON – No action is taken. The child executions continue to run. - // - // ChildPolicy is a required field - ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` - - // The details provided for the termination. - Details *string `locationName:"details" type:"string"` - - // The reason provided for the termination. - Reason *string `locationName:"reason" type:"string"` -} - -// String returns the string representation -func (s WorkflowExecutionTerminatedEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionTerminatedEventAttributes) GoString() string { - return s.String() -} - -// SetCause sets the Cause field's value. -func (s *WorkflowExecutionTerminatedEventAttributes) SetCause(v string) *WorkflowExecutionTerminatedEventAttributes { - s.Cause = &v - return s -} - -// SetChildPolicy sets the ChildPolicy field's value. -func (s *WorkflowExecutionTerminatedEventAttributes) SetChildPolicy(v string) *WorkflowExecutionTerminatedEventAttributes { - s.ChildPolicy = &v - return s -} - -// SetDetails sets the Details field's value. -func (s *WorkflowExecutionTerminatedEventAttributes) SetDetails(v string) *WorkflowExecutionTerminatedEventAttributes { - s.Details = &v - return s -} - -// SetReason sets the Reason field's value. -func (s *WorkflowExecutionTerminatedEventAttributes) SetReason(v string) *WorkflowExecutionTerminatedEventAttributes { - s.Reason = &v - return s -} - -// Provides the details of the WorkflowExecutionTimedOut event. -type WorkflowExecutionTimedOutEventAttributes struct { - _ struct{} `type:"structure"` - - // The policy used for the child workflow executions of this workflow execution. - // - // The supported child policies are: - // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A request to cancel is attempted for each child execution - // by recording a WorkflowExecutionCancelRequested event in its history. - // It is up to the decider to take appropriate actions when it receives an - // execution history with this event. - // - // * ABANDON – No action is taken. The child executions continue to run. - // - // ChildPolicy is a required field - ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` - - // The type of timeout that caused this event. - // - // TimeoutType is a required field - TimeoutType *string `locationName:"timeoutType" type:"string" required:"true" enum:"WorkflowExecutionTimeoutType"` -} - -// String returns the string representation -func (s WorkflowExecutionTimedOutEventAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowExecutionTimedOutEventAttributes) GoString() string { - return s.String() -} - -// SetChildPolicy sets the ChildPolicy field's value. -func (s *WorkflowExecutionTimedOutEventAttributes) SetChildPolicy(v string) *WorkflowExecutionTimedOutEventAttributes { - s.ChildPolicy = &v - return s -} - -// SetTimeoutType sets the TimeoutType field's value. -func (s *WorkflowExecutionTimedOutEventAttributes) SetTimeoutType(v string) *WorkflowExecutionTimedOutEventAttributes { - s.TimeoutType = &v - return s -} - -// Represents a workflow type. -type WorkflowType struct { - _ struct{} `type:"structure"` - - // The name of the workflow type. - // - // The combination of workflow type name and version must be unique with in - // a domain. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` - - // The version of the workflow type. - // - // The combination of workflow type name and version must be unique with in - // a domain. - // - // Version is a required field - Version *string `locationName:"version" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s WorkflowType) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowType) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *WorkflowType) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WorkflowType"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Version == nil { - invalidParams.Add(request.NewErrParamRequired("Version")) - } - if s.Version != nil && len(*s.Version) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Version", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *WorkflowType) SetName(v string) *WorkflowType { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *WorkflowType) SetVersion(v string) *WorkflowType { - s.Version = &v - return s -} - -// The configuration settings of a workflow type. -type WorkflowTypeConfiguration struct { - _ struct{} `type:"structure"` - - // The default policy to use for the child workflow executions when a workflow - // execution of this type is terminated, by calling the TerminateWorkflowExecution - // action explicitly or due to an expired timeout. This default can be overridden - // when starting a workflow execution using the StartWorkflowExecution action - // or the StartChildWorkflowExecutionDecision. - // - // The supported child policies are: - // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A request to cancel is attempted for each child execution - // by recording a WorkflowExecutionCancelRequested event in its history. - // It is up to the decider to take appropriate actions when it receives an - // execution history with this event. - // - // * ABANDON – No action is taken. The child executions continue to run. - DefaultChildPolicy *string `locationName:"defaultChildPolicy" type:"string" enum:"ChildPolicy"` - - // The default maximum duration, specified when registering the workflow type, - // for executions of this workflow type. This default can be overridden when - // starting a workflow execution using the StartWorkflowExecution action or - // the StartChildWorkflowExecutionDecision. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - DefaultExecutionStartToCloseTimeout *string `locationName:"defaultExecutionStartToCloseTimeout" type:"string"` - - // The default IAM role attached to this workflow type. - // - // Executions of this workflow type need IAM roles to invoke Lambda functions. - // If you don't specify an IAM role when starting this workflow type, the default - // Lambda role is attached to the execution. For more information, see http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html) - // in the Amazon SWF Developer Guide. - DefaultLambdaRole *string `locationName:"defaultLambdaRole" min:"1" type:"string"` - - // The default task list, specified when registering the workflow type, for - // decisions tasks scheduled for workflow executions of this type. This default - // can be overridden when starting a workflow execution using the StartWorkflowExecution - // action or the StartChildWorkflowExecutionDecision. - DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` - - // The default task priority, specified when registering the workflow type, - // for all decision tasks of this workflow type. This default can be overridden - // when starting a workflow execution using the StartWorkflowExecution action - // or the StartChildWorkflowExecution decision. - // - // Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) - // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. - // - // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) - // in the Amazon SWF Developer Guide. - DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` - - // The default maximum duration, specified when registering the workflow type, - // that a decision task for executions of this workflow type might take before - // returning completion or failure. If the task doesn'tdo close in the specified - // time then the task is automatically timed out and rescheduled. If the decider - // eventually reports a completion or failure, it is ignored. This default can - // be overridden when starting a workflow execution using the StartWorkflowExecution - // action or the StartChildWorkflowExecutionDecision. - // - // The duration is specified in seconds, an integer greater than or equal to - // 0. You can use NONE to specify unlimited duration. - DefaultTaskStartToCloseTimeout *string `locationName:"defaultTaskStartToCloseTimeout" type:"string"` -} - -// String returns the string representation -func (s WorkflowTypeConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowTypeConfiguration) GoString() string { - return s.String() -} - -// SetDefaultChildPolicy sets the DefaultChildPolicy field's value. -func (s *WorkflowTypeConfiguration) SetDefaultChildPolicy(v string) *WorkflowTypeConfiguration { - s.DefaultChildPolicy = &v - return s -} - -// SetDefaultExecutionStartToCloseTimeout sets the DefaultExecutionStartToCloseTimeout field's value. -func (s *WorkflowTypeConfiguration) SetDefaultExecutionStartToCloseTimeout(v string) *WorkflowTypeConfiguration { - s.DefaultExecutionStartToCloseTimeout = &v - return s -} - -// SetDefaultLambdaRole sets the DefaultLambdaRole field's value. -func (s *WorkflowTypeConfiguration) SetDefaultLambdaRole(v string) *WorkflowTypeConfiguration { - s.DefaultLambdaRole = &v - return s -} - -// SetDefaultTaskList sets the DefaultTaskList field's value. -func (s *WorkflowTypeConfiguration) SetDefaultTaskList(v *TaskList) *WorkflowTypeConfiguration { - s.DefaultTaskList = v - return s -} - -// SetDefaultTaskPriority sets the DefaultTaskPriority field's value. -func (s *WorkflowTypeConfiguration) SetDefaultTaskPriority(v string) *WorkflowTypeConfiguration { - s.DefaultTaskPriority = &v - return s -} - -// SetDefaultTaskStartToCloseTimeout sets the DefaultTaskStartToCloseTimeout field's value. -func (s *WorkflowTypeConfiguration) SetDefaultTaskStartToCloseTimeout(v string) *WorkflowTypeConfiguration { - s.DefaultTaskStartToCloseTimeout = &v - return s -} - -// Used to filter workflow execution query results by type. Each parameter, -// if specified, defines a rule that must be satisfied by each returned result. -type WorkflowTypeFilter struct { - _ struct{} `type:"structure"` - - // Name of the workflow type. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` - - // Version of the workflow type. - Version *string `locationName:"version" type:"string"` -} - -// String returns the string representation -func (s WorkflowTypeFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowTypeFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *WorkflowTypeFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WorkflowTypeFilter"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *WorkflowTypeFilter) SetName(v string) *WorkflowTypeFilter { - s.Name = &v - return s -} - -// SetVersion sets the Version field's value. -func (s *WorkflowTypeFilter) SetVersion(v string) *WorkflowTypeFilter { - s.Version = &v - return s -} - -// Contains information about a workflow type. -type WorkflowTypeInfo struct { - _ struct{} `type:"structure"` - - // The date when this type was registered. - // - // CreationDate is a required field - CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix" required:"true"` - - // If the type is in deprecated state, then it is set to the date when the type - // was deprecated. - DeprecationDate *time.Time `locationName:"deprecationDate" type:"timestamp" timestampFormat:"unix"` - - // The description of the type registered through RegisterWorkflowType. - Description *string `locationName:"description" type:"string"` - - // The current status of the workflow type. - // - // Status is a required field - Status *string `locationName:"status" type:"string" required:"true" enum:"RegistrationStatus"` - - // The workflow type this information is about. - // - // WorkflowType is a required field - WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` -} - -// String returns the string representation -func (s WorkflowTypeInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkflowTypeInfo) GoString() string { - return s.String() -} - -// SetCreationDate sets the CreationDate field's value. -func (s *WorkflowTypeInfo) SetCreationDate(v time.Time) *WorkflowTypeInfo { - s.CreationDate = &v - return s -} - -// SetDeprecationDate sets the DeprecationDate field's value. -func (s *WorkflowTypeInfo) SetDeprecationDate(v time.Time) *WorkflowTypeInfo { - s.DeprecationDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *WorkflowTypeInfo) SetDescription(v string) *WorkflowTypeInfo { - s.Description = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *WorkflowTypeInfo) SetStatus(v string) *WorkflowTypeInfo { - s.Status = &v - return s -} - -// SetWorkflowType sets the WorkflowType field's value. -func (s *WorkflowTypeInfo) SetWorkflowType(v *WorkflowType) *WorkflowTypeInfo { - s.WorkflowType = v - return s -} - -const ( - // ActivityTaskTimeoutTypeStartToClose is a ActivityTaskTimeoutType enum value - ActivityTaskTimeoutTypeStartToClose = "START_TO_CLOSE" - - // ActivityTaskTimeoutTypeScheduleToStart is a ActivityTaskTimeoutType enum value - ActivityTaskTimeoutTypeScheduleToStart = "SCHEDULE_TO_START" - - // ActivityTaskTimeoutTypeScheduleToClose is a ActivityTaskTimeoutType enum value - ActivityTaskTimeoutTypeScheduleToClose = "SCHEDULE_TO_CLOSE" - - // ActivityTaskTimeoutTypeHeartbeat is a ActivityTaskTimeoutType enum value - ActivityTaskTimeoutTypeHeartbeat = "HEARTBEAT" -) - -const ( - // CancelTimerFailedCauseTimerIdUnknown is a CancelTimerFailedCause enum value - CancelTimerFailedCauseTimerIdUnknown = "TIMER_ID_UNKNOWN" - - // CancelTimerFailedCauseOperationNotPermitted is a CancelTimerFailedCause enum value - CancelTimerFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" -) - -const ( - // CancelWorkflowExecutionFailedCauseUnhandledDecision is a CancelWorkflowExecutionFailedCause enum value - CancelWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" - - // CancelWorkflowExecutionFailedCauseOperationNotPermitted is a CancelWorkflowExecutionFailedCause enum value - CancelWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" -) - -const ( - // ChildPolicyTerminate is a ChildPolicy enum value - ChildPolicyTerminate = "TERMINATE" - - // ChildPolicyRequestCancel is a ChildPolicy enum value - ChildPolicyRequestCancel = "REQUEST_CANCEL" - - // ChildPolicyAbandon is a ChildPolicy enum value - ChildPolicyAbandon = "ABANDON" -) - -const ( - // CloseStatusCompleted is a CloseStatus enum value - CloseStatusCompleted = "COMPLETED" - - // CloseStatusFailed is a CloseStatus enum value - CloseStatusFailed = "FAILED" - - // CloseStatusCanceled is a CloseStatus enum value - CloseStatusCanceled = "CANCELED" - - // CloseStatusTerminated is a CloseStatus enum value - CloseStatusTerminated = "TERMINATED" - - // CloseStatusContinuedAsNew is a CloseStatus enum value - CloseStatusContinuedAsNew = "CONTINUED_AS_NEW" - - // CloseStatusTimedOut is a CloseStatus enum value - CloseStatusTimedOut = "TIMED_OUT" -) - -const ( - // CompleteWorkflowExecutionFailedCauseUnhandledDecision is a CompleteWorkflowExecutionFailedCause enum value - CompleteWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" - - // CompleteWorkflowExecutionFailedCauseOperationNotPermitted is a CompleteWorkflowExecutionFailedCause enum value - CompleteWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" -) - -const ( - // ContinueAsNewWorkflowExecutionFailedCauseUnhandledDecision is a ContinueAsNewWorkflowExecutionFailedCause enum value - ContinueAsNewWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" - - // ContinueAsNewWorkflowExecutionFailedCauseWorkflowTypeDeprecated is a ContinueAsNewWorkflowExecutionFailedCause enum value - ContinueAsNewWorkflowExecutionFailedCauseWorkflowTypeDeprecated = "WORKFLOW_TYPE_DEPRECATED" - - // ContinueAsNewWorkflowExecutionFailedCauseWorkflowTypeDoesNotExist is a ContinueAsNewWorkflowExecutionFailedCause enum value - ContinueAsNewWorkflowExecutionFailedCauseWorkflowTypeDoesNotExist = "WORKFLOW_TYPE_DOES_NOT_EXIST" - - // ContinueAsNewWorkflowExecutionFailedCauseDefaultExecutionStartToCloseTimeoutUndefined is a ContinueAsNewWorkflowExecutionFailedCause enum value - ContinueAsNewWorkflowExecutionFailedCauseDefaultExecutionStartToCloseTimeoutUndefined = "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED" - - // ContinueAsNewWorkflowExecutionFailedCauseDefaultTaskStartToCloseTimeoutUndefined is a ContinueAsNewWorkflowExecutionFailedCause enum value - ContinueAsNewWorkflowExecutionFailedCauseDefaultTaskStartToCloseTimeoutUndefined = "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED" - - // ContinueAsNewWorkflowExecutionFailedCauseDefaultTaskListUndefined is a ContinueAsNewWorkflowExecutionFailedCause enum value - ContinueAsNewWorkflowExecutionFailedCauseDefaultTaskListUndefined = "DEFAULT_TASK_LIST_UNDEFINED" - - // ContinueAsNewWorkflowExecutionFailedCauseDefaultChildPolicyUndefined is a ContinueAsNewWorkflowExecutionFailedCause enum value - ContinueAsNewWorkflowExecutionFailedCauseDefaultChildPolicyUndefined = "DEFAULT_CHILD_POLICY_UNDEFINED" - - // ContinueAsNewWorkflowExecutionFailedCauseContinueAsNewWorkflowExecutionRateExceeded is a ContinueAsNewWorkflowExecutionFailedCause enum value - ContinueAsNewWorkflowExecutionFailedCauseContinueAsNewWorkflowExecutionRateExceeded = "CONTINUE_AS_NEW_WORKFLOW_EXECUTION_RATE_EXCEEDED" - - // ContinueAsNewWorkflowExecutionFailedCauseOperationNotPermitted is a ContinueAsNewWorkflowExecutionFailedCause enum value - ContinueAsNewWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" -) - -const ( - // DecisionTaskTimeoutTypeStartToClose is a DecisionTaskTimeoutType enum value - DecisionTaskTimeoutTypeStartToClose = "START_TO_CLOSE" -) - -const ( - // DecisionTypeScheduleActivityTask is a DecisionType enum value - DecisionTypeScheduleActivityTask = "ScheduleActivityTask" - - // DecisionTypeRequestCancelActivityTask is a DecisionType enum value - DecisionTypeRequestCancelActivityTask = "RequestCancelActivityTask" - - // DecisionTypeCompleteWorkflowExecution is a DecisionType enum value - DecisionTypeCompleteWorkflowExecution = "CompleteWorkflowExecution" - - // DecisionTypeFailWorkflowExecution is a DecisionType enum value - DecisionTypeFailWorkflowExecution = "FailWorkflowExecution" - - // DecisionTypeCancelWorkflowExecution is a DecisionType enum value - DecisionTypeCancelWorkflowExecution = "CancelWorkflowExecution" - - // DecisionTypeContinueAsNewWorkflowExecution is a DecisionType enum value - DecisionTypeContinueAsNewWorkflowExecution = "ContinueAsNewWorkflowExecution" - - // DecisionTypeRecordMarker is a DecisionType enum value - DecisionTypeRecordMarker = "RecordMarker" - - // DecisionTypeStartTimer is a DecisionType enum value - DecisionTypeStartTimer = "StartTimer" - - // DecisionTypeCancelTimer is a DecisionType enum value - DecisionTypeCancelTimer = "CancelTimer" - - // DecisionTypeSignalExternalWorkflowExecution is a DecisionType enum value - DecisionTypeSignalExternalWorkflowExecution = "SignalExternalWorkflowExecution" - - // DecisionTypeRequestCancelExternalWorkflowExecution is a DecisionType enum value - DecisionTypeRequestCancelExternalWorkflowExecution = "RequestCancelExternalWorkflowExecution" - - // DecisionTypeStartChildWorkflowExecution is a DecisionType enum value - DecisionTypeStartChildWorkflowExecution = "StartChildWorkflowExecution" - - // DecisionTypeScheduleLambdaFunction is a DecisionType enum value - DecisionTypeScheduleLambdaFunction = "ScheduleLambdaFunction" -) - -const ( - // EventTypeWorkflowExecutionStarted is a EventType enum value - EventTypeWorkflowExecutionStarted = "WorkflowExecutionStarted" - - // EventTypeWorkflowExecutionCancelRequested is a EventType enum value - EventTypeWorkflowExecutionCancelRequested = "WorkflowExecutionCancelRequested" - - // EventTypeWorkflowExecutionCompleted is a EventType enum value - EventTypeWorkflowExecutionCompleted = "WorkflowExecutionCompleted" - - // EventTypeCompleteWorkflowExecutionFailed is a EventType enum value - EventTypeCompleteWorkflowExecutionFailed = "CompleteWorkflowExecutionFailed" - - // EventTypeWorkflowExecutionFailed is a EventType enum value - EventTypeWorkflowExecutionFailed = "WorkflowExecutionFailed" - - // EventTypeFailWorkflowExecutionFailed is a EventType enum value - EventTypeFailWorkflowExecutionFailed = "FailWorkflowExecutionFailed" - - // EventTypeWorkflowExecutionTimedOut is a EventType enum value - EventTypeWorkflowExecutionTimedOut = "WorkflowExecutionTimedOut" - - // EventTypeWorkflowExecutionCanceled is a EventType enum value - EventTypeWorkflowExecutionCanceled = "WorkflowExecutionCanceled" - - // EventTypeCancelWorkflowExecutionFailed is a EventType enum value - EventTypeCancelWorkflowExecutionFailed = "CancelWorkflowExecutionFailed" - - // EventTypeWorkflowExecutionContinuedAsNew is a EventType enum value - EventTypeWorkflowExecutionContinuedAsNew = "WorkflowExecutionContinuedAsNew" - - // EventTypeContinueAsNewWorkflowExecutionFailed is a EventType enum value - EventTypeContinueAsNewWorkflowExecutionFailed = "ContinueAsNewWorkflowExecutionFailed" - - // EventTypeWorkflowExecutionTerminated is a EventType enum value - EventTypeWorkflowExecutionTerminated = "WorkflowExecutionTerminated" - - // EventTypeDecisionTaskScheduled is a EventType enum value - EventTypeDecisionTaskScheduled = "DecisionTaskScheduled" - - // EventTypeDecisionTaskStarted is a EventType enum value - EventTypeDecisionTaskStarted = "DecisionTaskStarted" - - // EventTypeDecisionTaskCompleted is a EventType enum value - EventTypeDecisionTaskCompleted = "DecisionTaskCompleted" - - // EventTypeDecisionTaskTimedOut is a EventType enum value - EventTypeDecisionTaskTimedOut = "DecisionTaskTimedOut" - - // EventTypeActivityTaskScheduled is a EventType enum value - EventTypeActivityTaskScheduled = "ActivityTaskScheduled" - - // EventTypeScheduleActivityTaskFailed is a EventType enum value - EventTypeScheduleActivityTaskFailed = "ScheduleActivityTaskFailed" - - // EventTypeActivityTaskStarted is a EventType enum value - EventTypeActivityTaskStarted = "ActivityTaskStarted" - - // EventTypeActivityTaskCompleted is a EventType enum value - EventTypeActivityTaskCompleted = "ActivityTaskCompleted" - - // EventTypeActivityTaskFailed is a EventType enum value - EventTypeActivityTaskFailed = "ActivityTaskFailed" - - // EventTypeActivityTaskTimedOut is a EventType enum value - EventTypeActivityTaskTimedOut = "ActivityTaskTimedOut" - - // EventTypeActivityTaskCanceled is a EventType enum value - EventTypeActivityTaskCanceled = "ActivityTaskCanceled" - - // EventTypeActivityTaskCancelRequested is a EventType enum value - EventTypeActivityTaskCancelRequested = "ActivityTaskCancelRequested" - - // EventTypeRequestCancelActivityTaskFailed is a EventType enum value - EventTypeRequestCancelActivityTaskFailed = "RequestCancelActivityTaskFailed" - - // EventTypeWorkflowExecutionSignaled is a EventType enum value - EventTypeWorkflowExecutionSignaled = "WorkflowExecutionSignaled" - - // EventTypeMarkerRecorded is a EventType enum value - EventTypeMarkerRecorded = "MarkerRecorded" - - // EventTypeRecordMarkerFailed is a EventType enum value - EventTypeRecordMarkerFailed = "RecordMarkerFailed" - - // EventTypeTimerStarted is a EventType enum value - EventTypeTimerStarted = "TimerStarted" - - // EventTypeStartTimerFailed is a EventType enum value - EventTypeStartTimerFailed = "StartTimerFailed" - - // EventTypeTimerFired is a EventType enum value - EventTypeTimerFired = "TimerFired" - - // EventTypeTimerCanceled is a EventType enum value - EventTypeTimerCanceled = "TimerCanceled" - - // EventTypeCancelTimerFailed is a EventType enum value - EventTypeCancelTimerFailed = "CancelTimerFailed" - - // EventTypeStartChildWorkflowExecutionInitiated is a EventType enum value - EventTypeStartChildWorkflowExecutionInitiated = "StartChildWorkflowExecutionInitiated" - - // EventTypeStartChildWorkflowExecutionFailed is a EventType enum value - EventTypeStartChildWorkflowExecutionFailed = "StartChildWorkflowExecutionFailed" - - // EventTypeChildWorkflowExecutionStarted is a EventType enum value - EventTypeChildWorkflowExecutionStarted = "ChildWorkflowExecutionStarted" - - // EventTypeChildWorkflowExecutionCompleted is a EventType enum value - EventTypeChildWorkflowExecutionCompleted = "ChildWorkflowExecutionCompleted" - - // EventTypeChildWorkflowExecutionFailed is a EventType enum value - EventTypeChildWorkflowExecutionFailed = "ChildWorkflowExecutionFailed" - - // EventTypeChildWorkflowExecutionTimedOut is a EventType enum value - EventTypeChildWorkflowExecutionTimedOut = "ChildWorkflowExecutionTimedOut" - - // EventTypeChildWorkflowExecutionCanceled is a EventType enum value - EventTypeChildWorkflowExecutionCanceled = "ChildWorkflowExecutionCanceled" - - // EventTypeChildWorkflowExecutionTerminated is a EventType enum value - EventTypeChildWorkflowExecutionTerminated = "ChildWorkflowExecutionTerminated" - - // EventTypeSignalExternalWorkflowExecutionInitiated is a EventType enum value - EventTypeSignalExternalWorkflowExecutionInitiated = "SignalExternalWorkflowExecutionInitiated" - - // EventTypeSignalExternalWorkflowExecutionFailed is a EventType enum value - EventTypeSignalExternalWorkflowExecutionFailed = "SignalExternalWorkflowExecutionFailed" - - // EventTypeExternalWorkflowExecutionSignaled is a EventType enum value - EventTypeExternalWorkflowExecutionSignaled = "ExternalWorkflowExecutionSignaled" - - // EventTypeRequestCancelExternalWorkflowExecutionInitiated is a EventType enum value - EventTypeRequestCancelExternalWorkflowExecutionInitiated = "RequestCancelExternalWorkflowExecutionInitiated" - - // EventTypeRequestCancelExternalWorkflowExecutionFailed is a EventType enum value - EventTypeRequestCancelExternalWorkflowExecutionFailed = "RequestCancelExternalWorkflowExecutionFailed" - - // EventTypeExternalWorkflowExecutionCancelRequested is a EventType enum value - EventTypeExternalWorkflowExecutionCancelRequested = "ExternalWorkflowExecutionCancelRequested" - - // EventTypeLambdaFunctionScheduled is a EventType enum value - EventTypeLambdaFunctionScheduled = "LambdaFunctionScheduled" - - // EventTypeLambdaFunctionStarted is a EventType enum value - EventTypeLambdaFunctionStarted = "LambdaFunctionStarted" - - // EventTypeLambdaFunctionCompleted is a EventType enum value - EventTypeLambdaFunctionCompleted = "LambdaFunctionCompleted" - - // EventTypeLambdaFunctionFailed is a EventType enum value - EventTypeLambdaFunctionFailed = "LambdaFunctionFailed" - - // EventTypeLambdaFunctionTimedOut is a EventType enum value - EventTypeLambdaFunctionTimedOut = "LambdaFunctionTimedOut" - - // EventTypeScheduleLambdaFunctionFailed is a EventType enum value - EventTypeScheduleLambdaFunctionFailed = "ScheduleLambdaFunctionFailed" - - // EventTypeStartLambdaFunctionFailed is a EventType enum value - EventTypeStartLambdaFunctionFailed = "StartLambdaFunctionFailed" -) - -const ( - // ExecutionStatusOpen is a ExecutionStatus enum value - ExecutionStatusOpen = "OPEN" - - // ExecutionStatusClosed is a ExecutionStatus enum value - ExecutionStatusClosed = "CLOSED" -) - -const ( - // FailWorkflowExecutionFailedCauseUnhandledDecision is a FailWorkflowExecutionFailedCause enum value - FailWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" - - // FailWorkflowExecutionFailedCauseOperationNotPermitted is a FailWorkflowExecutionFailedCause enum value - FailWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" -) - -const ( - // LambdaFunctionTimeoutTypeStartToClose is a LambdaFunctionTimeoutType enum value - LambdaFunctionTimeoutTypeStartToClose = "START_TO_CLOSE" -) - -const ( - // RecordMarkerFailedCauseOperationNotPermitted is a RecordMarkerFailedCause enum value - RecordMarkerFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" -) - -const ( - // RegistrationStatusRegistered is a RegistrationStatus enum value - RegistrationStatusRegistered = "REGISTERED" - - // RegistrationStatusDeprecated is a RegistrationStatus enum value - RegistrationStatusDeprecated = "DEPRECATED" -) - -const ( - // RequestCancelActivityTaskFailedCauseActivityIdUnknown is a RequestCancelActivityTaskFailedCause enum value - RequestCancelActivityTaskFailedCauseActivityIdUnknown = "ACTIVITY_ID_UNKNOWN" - - // RequestCancelActivityTaskFailedCauseOperationNotPermitted is a RequestCancelActivityTaskFailedCause enum value - RequestCancelActivityTaskFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" -) - -const ( - // RequestCancelExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution is a RequestCancelExternalWorkflowExecutionFailedCause enum value - RequestCancelExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution = "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION" - - // RequestCancelExternalWorkflowExecutionFailedCauseRequestCancelExternalWorkflowExecutionRateExceeded is a RequestCancelExternalWorkflowExecutionFailedCause enum value - RequestCancelExternalWorkflowExecutionFailedCauseRequestCancelExternalWorkflowExecutionRateExceeded = "REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED" - - // RequestCancelExternalWorkflowExecutionFailedCauseOperationNotPermitted is a RequestCancelExternalWorkflowExecutionFailedCause enum value - RequestCancelExternalWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" -) - -const ( - // ScheduleActivityTaskFailedCauseActivityTypeDeprecated is a ScheduleActivityTaskFailedCause enum value - ScheduleActivityTaskFailedCauseActivityTypeDeprecated = "ACTIVITY_TYPE_DEPRECATED" - - // ScheduleActivityTaskFailedCauseActivityTypeDoesNotExist is a ScheduleActivityTaskFailedCause enum value - ScheduleActivityTaskFailedCauseActivityTypeDoesNotExist = "ACTIVITY_TYPE_DOES_NOT_EXIST" - - // ScheduleActivityTaskFailedCauseActivityIdAlreadyInUse is a ScheduleActivityTaskFailedCause enum value - ScheduleActivityTaskFailedCauseActivityIdAlreadyInUse = "ACTIVITY_ID_ALREADY_IN_USE" - - // ScheduleActivityTaskFailedCauseOpenActivitiesLimitExceeded is a ScheduleActivityTaskFailedCause enum value - ScheduleActivityTaskFailedCauseOpenActivitiesLimitExceeded = "OPEN_ACTIVITIES_LIMIT_EXCEEDED" - - // ScheduleActivityTaskFailedCauseActivityCreationRateExceeded is a ScheduleActivityTaskFailedCause enum value - ScheduleActivityTaskFailedCauseActivityCreationRateExceeded = "ACTIVITY_CREATION_RATE_EXCEEDED" - - // ScheduleActivityTaskFailedCauseDefaultScheduleToCloseTimeoutUndefined is a ScheduleActivityTaskFailedCause enum value - ScheduleActivityTaskFailedCauseDefaultScheduleToCloseTimeoutUndefined = "DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED" - - // ScheduleActivityTaskFailedCauseDefaultTaskListUndefined is a ScheduleActivityTaskFailedCause enum value - ScheduleActivityTaskFailedCauseDefaultTaskListUndefined = "DEFAULT_TASK_LIST_UNDEFINED" - - // ScheduleActivityTaskFailedCauseDefaultScheduleToStartTimeoutUndefined is a ScheduleActivityTaskFailedCause enum value - ScheduleActivityTaskFailedCauseDefaultScheduleToStartTimeoutUndefined = "DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED" - - // ScheduleActivityTaskFailedCauseDefaultStartToCloseTimeoutUndefined is a ScheduleActivityTaskFailedCause enum value - ScheduleActivityTaskFailedCauseDefaultStartToCloseTimeoutUndefined = "DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED" - - // ScheduleActivityTaskFailedCauseDefaultHeartbeatTimeoutUndefined is a ScheduleActivityTaskFailedCause enum value - ScheduleActivityTaskFailedCauseDefaultHeartbeatTimeoutUndefined = "DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED" - - // ScheduleActivityTaskFailedCauseOperationNotPermitted is a ScheduleActivityTaskFailedCause enum value - ScheduleActivityTaskFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" -) - -const ( - // ScheduleLambdaFunctionFailedCauseIdAlreadyInUse is a ScheduleLambdaFunctionFailedCause enum value - ScheduleLambdaFunctionFailedCauseIdAlreadyInUse = "ID_ALREADY_IN_USE" - - // ScheduleLambdaFunctionFailedCauseOpenLambdaFunctionsLimitExceeded is a ScheduleLambdaFunctionFailedCause enum value - ScheduleLambdaFunctionFailedCauseOpenLambdaFunctionsLimitExceeded = "OPEN_LAMBDA_FUNCTIONS_LIMIT_EXCEEDED" - - // ScheduleLambdaFunctionFailedCauseLambdaFunctionCreationRateExceeded is a ScheduleLambdaFunctionFailedCause enum value - ScheduleLambdaFunctionFailedCauseLambdaFunctionCreationRateExceeded = "LAMBDA_FUNCTION_CREATION_RATE_EXCEEDED" - - // ScheduleLambdaFunctionFailedCauseLambdaServiceNotAvailableInRegion is a ScheduleLambdaFunctionFailedCause enum value - ScheduleLambdaFunctionFailedCauseLambdaServiceNotAvailableInRegion = "LAMBDA_SERVICE_NOT_AVAILABLE_IN_REGION" -) - -const ( - // SignalExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution is a SignalExternalWorkflowExecutionFailedCause enum value - SignalExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution = "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION" - - // SignalExternalWorkflowExecutionFailedCauseSignalExternalWorkflowExecutionRateExceeded is a SignalExternalWorkflowExecutionFailedCause enum value - SignalExternalWorkflowExecutionFailedCauseSignalExternalWorkflowExecutionRateExceeded = "SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED" - - // SignalExternalWorkflowExecutionFailedCauseOperationNotPermitted is a SignalExternalWorkflowExecutionFailedCause enum value - SignalExternalWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" -) - -const ( - // StartChildWorkflowExecutionFailedCauseWorkflowTypeDoesNotExist is a StartChildWorkflowExecutionFailedCause enum value - StartChildWorkflowExecutionFailedCauseWorkflowTypeDoesNotExist = "WORKFLOW_TYPE_DOES_NOT_EXIST" - - // StartChildWorkflowExecutionFailedCauseWorkflowTypeDeprecated is a StartChildWorkflowExecutionFailedCause enum value - StartChildWorkflowExecutionFailedCauseWorkflowTypeDeprecated = "WORKFLOW_TYPE_DEPRECATED" - - // StartChildWorkflowExecutionFailedCauseOpenChildrenLimitExceeded is a StartChildWorkflowExecutionFailedCause enum value - StartChildWorkflowExecutionFailedCauseOpenChildrenLimitExceeded = "OPEN_CHILDREN_LIMIT_EXCEEDED" - - // StartChildWorkflowExecutionFailedCauseOpenWorkflowsLimitExceeded is a StartChildWorkflowExecutionFailedCause enum value - StartChildWorkflowExecutionFailedCauseOpenWorkflowsLimitExceeded = "OPEN_WORKFLOWS_LIMIT_EXCEEDED" - - // StartChildWorkflowExecutionFailedCauseChildCreationRateExceeded is a StartChildWorkflowExecutionFailedCause enum value - StartChildWorkflowExecutionFailedCauseChildCreationRateExceeded = "CHILD_CREATION_RATE_EXCEEDED" - - // StartChildWorkflowExecutionFailedCauseWorkflowAlreadyRunning is a StartChildWorkflowExecutionFailedCause enum value - StartChildWorkflowExecutionFailedCauseWorkflowAlreadyRunning = "WORKFLOW_ALREADY_RUNNING" - - // StartChildWorkflowExecutionFailedCauseDefaultExecutionStartToCloseTimeoutUndefined is a StartChildWorkflowExecutionFailedCause enum value - StartChildWorkflowExecutionFailedCauseDefaultExecutionStartToCloseTimeoutUndefined = "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED" - - // StartChildWorkflowExecutionFailedCauseDefaultTaskListUndefined is a StartChildWorkflowExecutionFailedCause enum value - StartChildWorkflowExecutionFailedCauseDefaultTaskListUndefined = "DEFAULT_TASK_LIST_UNDEFINED" - - // StartChildWorkflowExecutionFailedCauseDefaultTaskStartToCloseTimeoutUndefined is a StartChildWorkflowExecutionFailedCause enum value - StartChildWorkflowExecutionFailedCauseDefaultTaskStartToCloseTimeoutUndefined = "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED" - - // StartChildWorkflowExecutionFailedCauseDefaultChildPolicyUndefined is a StartChildWorkflowExecutionFailedCause enum value - StartChildWorkflowExecutionFailedCauseDefaultChildPolicyUndefined = "DEFAULT_CHILD_POLICY_UNDEFINED" - - // StartChildWorkflowExecutionFailedCauseOperationNotPermitted is a StartChildWorkflowExecutionFailedCause enum value - StartChildWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" -) - -const ( - // StartLambdaFunctionFailedCauseAssumeRoleFailed is a StartLambdaFunctionFailedCause enum value - StartLambdaFunctionFailedCauseAssumeRoleFailed = "ASSUME_ROLE_FAILED" -) - -const ( - // StartTimerFailedCauseTimerIdAlreadyInUse is a StartTimerFailedCause enum value - StartTimerFailedCauseTimerIdAlreadyInUse = "TIMER_ID_ALREADY_IN_USE" - - // StartTimerFailedCauseOpenTimersLimitExceeded is a StartTimerFailedCause enum value - StartTimerFailedCauseOpenTimersLimitExceeded = "OPEN_TIMERS_LIMIT_EXCEEDED" - - // StartTimerFailedCauseTimerCreationRateExceeded is a StartTimerFailedCause enum value - StartTimerFailedCauseTimerCreationRateExceeded = "TIMER_CREATION_RATE_EXCEEDED" - - // StartTimerFailedCauseOperationNotPermitted is a StartTimerFailedCause enum value - StartTimerFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" -) - -const ( - // WorkflowExecutionCancelRequestedCauseChildPolicyApplied is a WorkflowExecutionCancelRequestedCause enum value - WorkflowExecutionCancelRequestedCauseChildPolicyApplied = "CHILD_POLICY_APPLIED" -) - -const ( - // WorkflowExecutionTerminatedCauseChildPolicyApplied is a WorkflowExecutionTerminatedCause enum value - WorkflowExecutionTerminatedCauseChildPolicyApplied = "CHILD_POLICY_APPLIED" - - // WorkflowExecutionTerminatedCauseEventLimitExceeded is a WorkflowExecutionTerminatedCause enum value - WorkflowExecutionTerminatedCauseEventLimitExceeded = "EVENT_LIMIT_EXCEEDED" - - // WorkflowExecutionTerminatedCauseOperatorInitiated is a WorkflowExecutionTerminatedCause enum value - WorkflowExecutionTerminatedCauseOperatorInitiated = "OPERATOR_INITIATED" -) - -const ( - // WorkflowExecutionTimeoutTypeStartToClose is a WorkflowExecutionTimeoutType enum value - WorkflowExecutionTimeoutTypeStartToClose = "START_TO_CLOSE" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/swf/doc.go b/vendor/github.com/aws/aws-sdk-go/service/swf/doc.go deleted file mode 100644 index bbb8f45ab5c..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/swf/doc.go +++ /dev/null @@ -1,38 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package swf provides the client and types for making API -// requests to Amazon Simple Workflow Service. -// -// The Amazon Simple Workflow Service (Amazon SWF) makes it easy to build applications -// that use Amazon's cloud to coordinate work across distributed components. -// In Amazon SWF, a task represents a logical unit of work that is performed -// by a component of your workflow. Coordinating tasks in a workflow involves -// managing intertask dependencies, scheduling, and concurrency in accordance -// with the logical flow of the application. -// -// Amazon SWF gives you full control over implementing tasks and coordinating -// them without worrying about underlying complexities such as tracking their -// progress and maintaining their state. -// -// This documentation serves as reference only. For a broader overview of the -// Amazon SWF programming model, see the Amazon SWF Developer Guide (http://docs.aws.amazon.com/amazonswf/latest/developerguide/). -// -// See swf package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/swf/ -// -// Using the Client -// -// To contact Amazon Simple Workflow Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the Amazon Simple Workflow Service client SWF for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/swf/#New -package swf diff --git a/vendor/github.com/aws/aws-sdk-go/service/swf/errors.go b/vendor/github.com/aws/aws-sdk-go/service/swf/errors.go deleted file mode 100644 index 7baff0daf68..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/swf/errors.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package swf - -const ( - - // ErrCodeDefaultUndefinedFault for service response error code - // "DefaultUndefinedFault". - // - // The StartWorkflowExecution API action was called without the required parameters - // set. - // - // Some workflow execution parameters, such as the decision taskList, must be - // set to start the execution. However, these parameters might have been set - // as defaults when the workflow type was registered. In this case, you can - // omit these parameters from the StartWorkflowExecution call and Amazon SWF - // uses the values defined in the workflow type. - // - // If these parameters aren't set and no default parameters were defined in - // the workflow type, this error is displayed. - ErrCodeDefaultUndefinedFault = "DefaultUndefinedFault" - - // ErrCodeDomainAlreadyExistsFault for service response error code - // "DomainAlreadyExistsFault". - // - // Returned if the specified domain already exists. You get this fault even - // if the existing domain is in deprecated status. - ErrCodeDomainAlreadyExistsFault = "DomainAlreadyExistsFault" - - // ErrCodeDomainDeprecatedFault for service response error code - // "DomainDeprecatedFault". - // - // Returned when the specified domain has been deprecated. - ErrCodeDomainDeprecatedFault = "DomainDeprecatedFault" - - // ErrCodeLimitExceededFault for service response error code - // "LimitExceededFault". - // - // Returned by any operation if a system imposed limitation has been reached. - // To address this fault you should either clean up unused resources or increase - // the limit by contacting AWS. - ErrCodeLimitExceededFault = "LimitExceededFault" - - // ErrCodeOperationNotPermittedFault for service response error code - // "OperationNotPermittedFault". - // - // Returned when the caller doesn't have sufficient permissions to invoke the - // action. - ErrCodeOperationNotPermittedFault = "OperationNotPermittedFault" - - // ErrCodeTypeAlreadyExistsFault for service response error code - // "TypeAlreadyExistsFault". - // - // Returned if the type already exists in the specified domain. You get this - // fault even if the existing type is in deprecated status. You can specify - // another version if the intent is to create a new distinct version of the - // type. - ErrCodeTypeAlreadyExistsFault = "TypeAlreadyExistsFault" - - // ErrCodeTypeDeprecatedFault for service response error code - // "TypeDeprecatedFault". - // - // Returned when the specified activity or workflow type was already deprecated. - ErrCodeTypeDeprecatedFault = "TypeDeprecatedFault" - - // ErrCodeUnknownResourceFault for service response error code - // "UnknownResourceFault". - // - // Returned when the named resource cannot be found with in the scope of this - // operation (region or domain). This could happen if the named resource was - // never created or is no longer available for this operation. - ErrCodeUnknownResourceFault = "UnknownResourceFault" - - // ErrCodeWorkflowExecutionAlreadyStartedFault for service response error code - // "WorkflowExecutionAlreadyStartedFault". - // - // Returned by StartWorkflowExecution when an open execution with the same workflowId - // is already running in the specified domain. - ErrCodeWorkflowExecutionAlreadyStartedFault = "WorkflowExecutionAlreadyStartedFault" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/swf/service.go b/vendor/github.com/aws/aws-sdk-go/service/swf/service.go deleted file mode 100644 index ac85f412c7a..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/swf/service.go +++ /dev/null @@ -1,95 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package swf - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -// SWF provides the API operation methods for making requests to -// Amazon Simple Workflow Service. See this package's package overview docs -// for details on the service. -// -// SWF methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type SWF struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "swf" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the SWF client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a SWF client from just a session. -// svc := swf.New(mySession) -// -// // Create a SWF client with additional configuration -// svc := swf.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *SWF { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SWF { - svc := &SWF{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2012-01-25", - JSONVersion: "1.0", - TargetPrefix: "SimpleWorkflowService", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a SWF operation and runs any -// custom request initialization. -func (c *SWF) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go deleted file mode 100644 index 1eae0467869..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go +++ /dev/null @@ -1,3686 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package workspaces - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opCreateTags = "CreateTags" - -// CreateTagsRequest generates a "aws/request.Request" representing the -// client's request for the CreateTags operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateTags for more information on using the CreateTags -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateTagsRequest method. -// req, resp := client.CreateTagsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateTags -func (c *WorkSpaces) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { - op := &request.Operation{ - Name: opCreateTags, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateTagsInput{} - } - - output = &CreateTagsOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateTags API operation for Amazon WorkSpaces. -// -// Creates tags for the specified WorkSpace. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon WorkSpaces's -// API operation CreateTags for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The resource could not be found. -// -// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" -// One or more parameter values are not valid. -// -// * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" -// Your resource limits have been exceeded. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateTags -func (c *WorkSpaces) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { - req, out := c.CreateTagsRequest(input) - return out, req.Send() -} - -// CreateTagsWithContext is the same as CreateTags with the addition of -// the ability to pass a context and additional request options. -// -// See CreateTags for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) CreateTagsWithContext(ctx aws.Context, input *CreateTagsInput, opts ...request.Option) (*CreateTagsOutput, error) { - req, out := c.CreateTagsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateWorkspaces = "CreateWorkspaces" - -// CreateWorkspacesRequest generates a "aws/request.Request" representing the -// client's request for the CreateWorkspaces operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateWorkspaces for more information on using the CreateWorkspaces -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateWorkspacesRequest method. -// req, resp := client.CreateWorkspacesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateWorkspaces -func (c *WorkSpaces) CreateWorkspacesRequest(input *CreateWorkspacesInput) (req *request.Request, output *CreateWorkspacesOutput) { - op := &request.Operation{ - Name: opCreateWorkspaces, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateWorkspacesInput{} - } - - output = &CreateWorkspacesOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateWorkspaces API operation for Amazon WorkSpaces. -// -// Creates one or more WorkSpaces. -// -// This operation is asynchronous and returns before the WorkSpaces are created. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon WorkSpaces's -// API operation CreateWorkspaces for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" -// Your resource limits have been exceeded. -// -// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" -// One or more parameter values are not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateWorkspaces -func (c *WorkSpaces) CreateWorkspaces(input *CreateWorkspacesInput) (*CreateWorkspacesOutput, error) { - req, out := c.CreateWorkspacesRequest(input) - return out, req.Send() -} - -// CreateWorkspacesWithContext is the same as CreateWorkspaces with the addition of -// the ability to pass a context and additional request options. -// -// See CreateWorkspaces for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) CreateWorkspacesWithContext(ctx aws.Context, input *CreateWorkspacesInput, opts ...request.Option) (*CreateWorkspacesOutput, error) { - req, out := c.CreateWorkspacesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteTags = "DeleteTags" - -// DeleteTagsRequest generates a "aws/request.Request" representing the -// client's request for the DeleteTags operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteTags for more information on using the DeleteTags -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteTagsRequest method. -// req, resp := client.DeleteTagsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeleteTags -func (c *WorkSpaces) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { - op := &request.Operation{ - Name: opDeleteTags, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteTagsInput{} - } - - output = &DeleteTagsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteTags API operation for Amazon WorkSpaces. -// -// Deletes the specified tags from a WorkSpace. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon WorkSpaces's -// API operation DeleteTags for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The resource could not be found. -// -// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" -// One or more parameter values are not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeleteTags -func (c *WorkSpaces) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { - req, out := c.DeleteTagsRequest(input) - return out, req.Send() -} - -// DeleteTagsWithContext is the same as DeleteTags with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteTags for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) DeleteTagsWithContext(ctx aws.Context, input *DeleteTagsInput, opts ...request.Option) (*DeleteTagsOutput, error) { - req, out := c.DeleteTagsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeTags = "DescribeTags" - -// DescribeTagsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTags operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeTags for more information on using the DescribeTags -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeTagsRequest method. -// req, resp := client.DescribeTagsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeTags -func (c *WorkSpaces) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { - op := &request.Operation{ - Name: opDescribeTags, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeTagsInput{} - } - - output = &DescribeTagsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeTags API operation for Amazon WorkSpaces. -// -// Describes the tags for the specified WorkSpace. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon WorkSpaces's -// API operation DescribeTags for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The resource could not be found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeTags -func (c *WorkSpaces) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { - req, out := c.DescribeTagsRequest(input) - return out, req.Send() -} - -// DescribeTagsWithContext is the same as DescribeTags with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeTags for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) DescribeTagsWithContext(ctx aws.Context, input *DescribeTagsInput, opts ...request.Option) (*DescribeTagsOutput, error) { - req, out := c.DescribeTagsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeWorkspaceBundles = "DescribeWorkspaceBundles" - -// DescribeWorkspaceBundlesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeWorkspaceBundles operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeWorkspaceBundles for more information on using the DescribeWorkspaceBundles -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeWorkspaceBundlesRequest method. -// req, resp := client.DescribeWorkspaceBundlesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceBundles -func (c *WorkSpaces) DescribeWorkspaceBundlesRequest(input *DescribeWorkspaceBundlesInput) (req *request.Request, output *DescribeWorkspaceBundlesOutput) { - op := &request.Operation{ - Name: opDescribeWorkspaceBundles, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeWorkspaceBundlesInput{} - } - - output = &DescribeWorkspaceBundlesOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeWorkspaceBundles API operation for Amazon WorkSpaces. -// -// Describes the available WorkSpace bundles. -// -// You can filter the results using either bundle ID or owner, but not both. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon WorkSpaces's -// API operation DescribeWorkspaceBundles for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" -// One or more parameter values are not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceBundles -func (c *WorkSpaces) DescribeWorkspaceBundles(input *DescribeWorkspaceBundlesInput) (*DescribeWorkspaceBundlesOutput, error) { - req, out := c.DescribeWorkspaceBundlesRequest(input) - return out, req.Send() -} - -// DescribeWorkspaceBundlesWithContext is the same as DescribeWorkspaceBundles with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeWorkspaceBundles for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) DescribeWorkspaceBundlesWithContext(ctx aws.Context, input *DescribeWorkspaceBundlesInput, opts ...request.Option) (*DescribeWorkspaceBundlesOutput, error) { - req, out := c.DescribeWorkspaceBundlesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// DescribeWorkspaceBundlesPages iterates over the pages of a DescribeWorkspaceBundles operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See DescribeWorkspaceBundles method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a DescribeWorkspaceBundles operation. -// pageNum := 0 -// err := client.DescribeWorkspaceBundlesPages(params, -// func(page *DescribeWorkspaceBundlesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *WorkSpaces) DescribeWorkspaceBundlesPages(input *DescribeWorkspaceBundlesInput, fn func(*DescribeWorkspaceBundlesOutput, bool) bool) error { - return c.DescribeWorkspaceBundlesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// DescribeWorkspaceBundlesPagesWithContext same as DescribeWorkspaceBundlesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) DescribeWorkspaceBundlesPagesWithContext(ctx aws.Context, input *DescribeWorkspaceBundlesInput, fn func(*DescribeWorkspaceBundlesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeWorkspaceBundlesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeWorkspaceBundlesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeWorkspaceBundlesOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opDescribeWorkspaceDirectories = "DescribeWorkspaceDirectories" - -// DescribeWorkspaceDirectoriesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeWorkspaceDirectories operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeWorkspaceDirectories for more information on using the DescribeWorkspaceDirectories -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeWorkspaceDirectoriesRequest method. -// req, resp := client.DescribeWorkspaceDirectoriesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceDirectories -func (c *WorkSpaces) DescribeWorkspaceDirectoriesRequest(input *DescribeWorkspaceDirectoriesInput) (req *request.Request, output *DescribeWorkspaceDirectoriesOutput) { - op := &request.Operation{ - Name: opDescribeWorkspaceDirectories, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeWorkspaceDirectoriesInput{} - } - - output = &DescribeWorkspaceDirectoriesOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeWorkspaceDirectories API operation for Amazon WorkSpaces. -// -// Describes the available AWS Directory Service directories that are registered -// with Amazon WorkSpaces. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon WorkSpaces's -// API operation DescribeWorkspaceDirectories for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" -// One or more parameter values are not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceDirectories -func (c *WorkSpaces) DescribeWorkspaceDirectories(input *DescribeWorkspaceDirectoriesInput) (*DescribeWorkspaceDirectoriesOutput, error) { - req, out := c.DescribeWorkspaceDirectoriesRequest(input) - return out, req.Send() -} - -// DescribeWorkspaceDirectoriesWithContext is the same as DescribeWorkspaceDirectories with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeWorkspaceDirectories for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) DescribeWorkspaceDirectoriesWithContext(ctx aws.Context, input *DescribeWorkspaceDirectoriesInput, opts ...request.Option) (*DescribeWorkspaceDirectoriesOutput, error) { - req, out := c.DescribeWorkspaceDirectoriesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// DescribeWorkspaceDirectoriesPages iterates over the pages of a DescribeWorkspaceDirectories operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See DescribeWorkspaceDirectories method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a DescribeWorkspaceDirectories operation. -// pageNum := 0 -// err := client.DescribeWorkspaceDirectoriesPages(params, -// func(page *DescribeWorkspaceDirectoriesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *WorkSpaces) DescribeWorkspaceDirectoriesPages(input *DescribeWorkspaceDirectoriesInput, fn func(*DescribeWorkspaceDirectoriesOutput, bool) bool) error { - return c.DescribeWorkspaceDirectoriesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// DescribeWorkspaceDirectoriesPagesWithContext same as DescribeWorkspaceDirectoriesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) DescribeWorkspaceDirectoriesPagesWithContext(ctx aws.Context, input *DescribeWorkspaceDirectoriesInput, fn func(*DescribeWorkspaceDirectoriesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeWorkspaceDirectoriesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeWorkspaceDirectoriesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeWorkspaceDirectoriesOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opDescribeWorkspaces = "DescribeWorkspaces" - -// DescribeWorkspacesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeWorkspaces operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeWorkspaces for more information on using the DescribeWorkspaces -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeWorkspacesRequest method. -// req, resp := client.DescribeWorkspacesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaces -func (c *WorkSpaces) DescribeWorkspacesRequest(input *DescribeWorkspacesInput) (req *request.Request, output *DescribeWorkspacesOutput) { - op := &request.Operation{ - Name: opDescribeWorkspaces, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "Limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeWorkspacesInput{} - } - - output = &DescribeWorkspacesOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeWorkspaces API operation for Amazon WorkSpaces. -// -// Describes the specified WorkSpaces. -// -// You can filter the results using bundle ID, directory ID, or owner, but you -// can specify only one filter at a time. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon WorkSpaces's -// API operation DescribeWorkspaces for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" -// One or more parameter values are not valid. -// -// * ErrCodeResourceUnavailableException "ResourceUnavailableException" -// The specified resource is not available. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaces -func (c *WorkSpaces) DescribeWorkspaces(input *DescribeWorkspacesInput) (*DescribeWorkspacesOutput, error) { - req, out := c.DescribeWorkspacesRequest(input) - return out, req.Send() -} - -// DescribeWorkspacesWithContext is the same as DescribeWorkspaces with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeWorkspaces for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) DescribeWorkspacesWithContext(ctx aws.Context, input *DescribeWorkspacesInput, opts ...request.Option) (*DescribeWorkspacesOutput, error) { - req, out := c.DescribeWorkspacesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// DescribeWorkspacesPages iterates over the pages of a DescribeWorkspaces operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See DescribeWorkspaces method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a DescribeWorkspaces operation. -// pageNum := 0 -// err := client.DescribeWorkspacesPages(params, -// func(page *DescribeWorkspacesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *WorkSpaces) DescribeWorkspacesPages(input *DescribeWorkspacesInput, fn func(*DescribeWorkspacesOutput, bool) bool) error { - return c.DescribeWorkspacesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// DescribeWorkspacesPagesWithContext same as DescribeWorkspacesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) DescribeWorkspacesPagesWithContext(ctx aws.Context, input *DescribeWorkspacesInput, fn func(*DescribeWorkspacesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeWorkspacesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeWorkspacesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeWorkspacesOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opDescribeWorkspacesConnectionStatus = "DescribeWorkspacesConnectionStatus" - -// DescribeWorkspacesConnectionStatusRequest generates a "aws/request.Request" representing the -// client's request for the DescribeWorkspacesConnectionStatus operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeWorkspacesConnectionStatus for more information on using the DescribeWorkspacesConnectionStatus -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeWorkspacesConnectionStatusRequest method. -// req, resp := client.DescribeWorkspacesConnectionStatusRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesConnectionStatus -func (c *WorkSpaces) DescribeWorkspacesConnectionStatusRequest(input *DescribeWorkspacesConnectionStatusInput) (req *request.Request, output *DescribeWorkspacesConnectionStatusOutput) { - op := &request.Operation{ - Name: opDescribeWorkspacesConnectionStatus, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeWorkspacesConnectionStatusInput{} - } - - output = &DescribeWorkspacesConnectionStatusOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeWorkspacesConnectionStatus API operation for Amazon WorkSpaces. -// -// Describes the connection status of the specified WorkSpaces. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon WorkSpaces's -// API operation DescribeWorkspacesConnectionStatus for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" -// One or more parameter values are not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesConnectionStatus -func (c *WorkSpaces) DescribeWorkspacesConnectionStatus(input *DescribeWorkspacesConnectionStatusInput) (*DescribeWorkspacesConnectionStatusOutput, error) { - req, out := c.DescribeWorkspacesConnectionStatusRequest(input) - return out, req.Send() -} - -// DescribeWorkspacesConnectionStatusWithContext is the same as DescribeWorkspacesConnectionStatus with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeWorkspacesConnectionStatus for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) DescribeWorkspacesConnectionStatusWithContext(ctx aws.Context, input *DescribeWorkspacesConnectionStatusInput, opts ...request.Option) (*DescribeWorkspacesConnectionStatusOutput, error) { - req, out := c.DescribeWorkspacesConnectionStatusRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opModifyWorkspaceProperties = "ModifyWorkspaceProperties" - -// ModifyWorkspacePropertiesRequest generates a "aws/request.Request" representing the -// client's request for the ModifyWorkspaceProperties operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ModifyWorkspaceProperties for more information on using the ModifyWorkspaceProperties -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ModifyWorkspacePropertiesRequest method. -// req, resp := client.ModifyWorkspacePropertiesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModifyWorkspaceProperties -func (c *WorkSpaces) ModifyWorkspacePropertiesRequest(input *ModifyWorkspacePropertiesInput) (req *request.Request, output *ModifyWorkspacePropertiesOutput) { - op := &request.Operation{ - Name: opModifyWorkspaceProperties, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ModifyWorkspacePropertiesInput{} - } - - output = &ModifyWorkspacePropertiesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ModifyWorkspaceProperties API operation for Amazon WorkSpaces. -// -// Modifies the specified WorkSpace properties. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon WorkSpaces's -// API operation ModifyWorkspaceProperties for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" -// One or more parameter values are not valid. -// -// * ErrCodeInvalidResourceStateException "InvalidResourceStateException" -// The state of the WorkSpace is not valid for this operation. -// -// * ErrCodeOperationInProgressException "OperationInProgressException" -// The properties of this WorkSpace are currently being modified. Try again -// in a moment. -// -// * ErrCodeUnsupportedWorkspaceConfigurationException "UnsupportedWorkspaceConfigurationException" -// The configuration of this WorkSpace is not supported for this operation. -// For more information, see the Amazon WorkSpaces Administration Guide (http://docs.aws.amazon.com/workspaces/latest/adminguide/). -// -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The resource could not be found. -// -// * ErrCodeAccessDeniedException "AccessDeniedException" -// The user is not authorized to access a resource. -// -// * ErrCodeResourceUnavailableException "ResourceUnavailableException" -// The specified resource is not available. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModifyWorkspaceProperties -func (c *WorkSpaces) ModifyWorkspaceProperties(input *ModifyWorkspacePropertiesInput) (*ModifyWorkspacePropertiesOutput, error) { - req, out := c.ModifyWorkspacePropertiesRequest(input) - return out, req.Send() -} - -// ModifyWorkspacePropertiesWithContext is the same as ModifyWorkspaceProperties with the addition of -// the ability to pass a context and additional request options. -// -// See ModifyWorkspaceProperties for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) ModifyWorkspacePropertiesWithContext(ctx aws.Context, input *ModifyWorkspacePropertiesInput, opts ...request.Option) (*ModifyWorkspacePropertiesOutput, error) { - req, out := c.ModifyWorkspacePropertiesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRebootWorkspaces = "RebootWorkspaces" - -// RebootWorkspacesRequest generates a "aws/request.Request" representing the -// client's request for the RebootWorkspaces operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RebootWorkspaces for more information on using the RebootWorkspaces -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RebootWorkspacesRequest method. -// req, resp := client.RebootWorkspacesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootWorkspaces -func (c *WorkSpaces) RebootWorkspacesRequest(input *RebootWorkspacesInput) (req *request.Request, output *RebootWorkspacesOutput) { - op := &request.Operation{ - Name: opRebootWorkspaces, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RebootWorkspacesInput{} - } - - output = &RebootWorkspacesOutput{} - req = c.newRequest(op, input, output) - return -} - -// RebootWorkspaces API operation for Amazon WorkSpaces. -// -// Reboots the specified WorkSpaces. -// -// You cannot reboot a WorkSpace unless its state is AVAILABLE, IMPAIRED, or -// INOPERABLE. -// -// This operation is asynchronous and returns before the WorkSpaces have rebooted. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon WorkSpaces's -// API operation RebootWorkspaces for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootWorkspaces -func (c *WorkSpaces) RebootWorkspaces(input *RebootWorkspacesInput) (*RebootWorkspacesOutput, error) { - req, out := c.RebootWorkspacesRequest(input) - return out, req.Send() -} - -// RebootWorkspacesWithContext is the same as RebootWorkspaces with the addition of -// the ability to pass a context and additional request options. -// -// See RebootWorkspaces for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) RebootWorkspacesWithContext(ctx aws.Context, input *RebootWorkspacesInput, opts ...request.Option) (*RebootWorkspacesOutput, error) { - req, out := c.RebootWorkspacesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRebuildWorkspaces = "RebuildWorkspaces" - -// RebuildWorkspacesRequest generates a "aws/request.Request" representing the -// client's request for the RebuildWorkspaces operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RebuildWorkspaces for more information on using the RebuildWorkspaces -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RebuildWorkspacesRequest method. -// req, resp := client.RebuildWorkspacesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildWorkspaces -func (c *WorkSpaces) RebuildWorkspacesRequest(input *RebuildWorkspacesInput) (req *request.Request, output *RebuildWorkspacesOutput) { - op := &request.Operation{ - Name: opRebuildWorkspaces, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RebuildWorkspacesInput{} - } - - output = &RebuildWorkspacesOutput{} - req = c.newRequest(op, input, output) - return -} - -// RebuildWorkspaces API operation for Amazon WorkSpaces. -// -// Rebuilds the specified WorkSpaces. -// -// You cannot rebuild a WorkSpace unless its state is AVAILABLE or ERROR. -// -// Rebuilding a WorkSpace is a potentially destructive action that can result -// in the loss of data. For more information, see Rebuild a WorkSpace (http://docs.aws.amazon.com/workspaces/latest/adminguide/reset-workspace.html). -// -// This operation is asynchronous and returns before the WorkSpaces have been -// completely rebuilt. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon WorkSpaces's -// API operation RebuildWorkspaces for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildWorkspaces -func (c *WorkSpaces) RebuildWorkspaces(input *RebuildWorkspacesInput) (*RebuildWorkspacesOutput, error) { - req, out := c.RebuildWorkspacesRequest(input) - return out, req.Send() -} - -// RebuildWorkspacesWithContext is the same as RebuildWorkspaces with the addition of -// the ability to pass a context and additional request options. -// -// See RebuildWorkspaces for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) RebuildWorkspacesWithContext(ctx aws.Context, input *RebuildWorkspacesInput, opts ...request.Option) (*RebuildWorkspacesOutput, error) { - req, out := c.RebuildWorkspacesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStartWorkspaces = "StartWorkspaces" - -// StartWorkspacesRequest generates a "aws/request.Request" representing the -// client's request for the StartWorkspaces operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StartWorkspaces for more information on using the StartWorkspaces -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StartWorkspacesRequest method. -// req, resp := client.StartWorkspacesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StartWorkspaces -func (c *WorkSpaces) StartWorkspacesRequest(input *StartWorkspacesInput) (req *request.Request, output *StartWorkspacesOutput) { - op := &request.Operation{ - Name: opStartWorkspaces, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StartWorkspacesInput{} - } - - output = &StartWorkspacesOutput{} - req = c.newRequest(op, input, output) - return -} - -// StartWorkspaces API operation for Amazon WorkSpaces. -// -// Starts the specified WorkSpaces. -// -// You cannot start a WorkSpace unless it has a running mode of AutoStop and -// a state of STOPPED. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon WorkSpaces's -// API operation StartWorkspaces for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StartWorkspaces -func (c *WorkSpaces) StartWorkspaces(input *StartWorkspacesInput) (*StartWorkspacesOutput, error) { - req, out := c.StartWorkspacesRequest(input) - return out, req.Send() -} - -// StartWorkspacesWithContext is the same as StartWorkspaces with the addition of -// the ability to pass a context and additional request options. -// -// See StartWorkspaces for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) StartWorkspacesWithContext(ctx aws.Context, input *StartWorkspacesInput, opts ...request.Option) (*StartWorkspacesOutput, error) { - req, out := c.StartWorkspacesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStopWorkspaces = "StopWorkspaces" - -// StopWorkspacesRequest generates a "aws/request.Request" representing the -// client's request for the StopWorkspaces operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StopWorkspaces for more information on using the StopWorkspaces -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the StopWorkspacesRequest method. -// req, resp := client.StopWorkspacesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StopWorkspaces -func (c *WorkSpaces) StopWorkspacesRequest(input *StopWorkspacesInput) (req *request.Request, output *StopWorkspacesOutput) { - op := &request.Operation{ - Name: opStopWorkspaces, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StopWorkspacesInput{} - } - - output = &StopWorkspacesOutput{} - req = c.newRequest(op, input, output) - return -} - -// StopWorkspaces API operation for Amazon WorkSpaces. -// -// Stops the specified WorkSpaces. -// -// You cannot stop a WorkSpace unless it has a running mode of AutoStop and -// a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon WorkSpaces's -// API operation StopWorkspaces for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StopWorkspaces -func (c *WorkSpaces) StopWorkspaces(input *StopWorkspacesInput) (*StopWorkspacesOutput, error) { - req, out := c.StopWorkspacesRequest(input) - return out, req.Send() -} - -// StopWorkspacesWithContext is the same as StopWorkspaces with the addition of -// the ability to pass a context and additional request options. -// -// See StopWorkspaces for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) StopWorkspacesWithContext(ctx aws.Context, input *StopWorkspacesInput, opts ...request.Option) (*StopWorkspacesOutput, error) { - req, out := c.StopWorkspacesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opTerminateWorkspaces = "TerminateWorkspaces" - -// TerminateWorkspacesRequest generates a "aws/request.Request" representing the -// client's request for the TerminateWorkspaces operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See TerminateWorkspaces for more information on using the TerminateWorkspaces -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the TerminateWorkspacesRequest method. -// req, resp := client.TerminateWorkspacesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateWorkspaces -func (c *WorkSpaces) TerminateWorkspacesRequest(input *TerminateWorkspacesInput) (req *request.Request, output *TerminateWorkspacesOutput) { - op := &request.Operation{ - Name: opTerminateWorkspaces, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &TerminateWorkspacesInput{} - } - - output = &TerminateWorkspacesOutput{} - req = c.newRequest(op, input, output) - return -} - -// TerminateWorkspaces API operation for Amazon WorkSpaces. -// -// Terminates the specified WorkSpaces. -// -// Terminating a WorkSpace is a permanent action and cannot be undone. The user's -// data is destroyed. If you need to archive any user data, contact Amazon Web -// Services before terminating the WorkSpace. -// -// You can terminate a WorkSpace that is in any state except SUSPENDED. -// -// This operation is asynchronous and returns before the WorkSpaces have been -// completely terminated. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon WorkSpaces's -// API operation TerminateWorkspaces for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateWorkspaces -func (c *WorkSpaces) TerminateWorkspaces(input *TerminateWorkspacesInput) (*TerminateWorkspacesOutput, error) { - req, out := c.TerminateWorkspacesRequest(input) - return out, req.Send() -} - -// TerminateWorkspacesWithContext is the same as TerminateWorkspaces with the addition of -// the ability to pass a context and additional request options. -// -// See TerminateWorkspaces for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WorkSpaces) TerminateWorkspacesWithContext(ctx aws.Context, input *TerminateWorkspacesInput, opts ...request.Option) (*TerminateWorkspacesOutput, error) { - req, out := c.TerminateWorkspacesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Information about the compute type. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ComputeType -type ComputeType struct { - _ struct{} `type:"structure"` - - // The compute type. - Name *string `type:"string" enum:"Compute"` -} - -// String returns the string representation -func (s ComputeType) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ComputeType) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *ComputeType) SetName(v string) *ComputeType { - s.Name = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateTagsRequest -type CreateTagsInput struct { - _ struct{} `type:"structure"` - - // The ID of the resource. - // - // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` - - // The tags. Each resource can have a maximum of 50 tags. - // - // Tags is a required field - Tags []*Tag `type:"list" required:"true"` -} - -// String returns the string representation -func (s CreateTagsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateTagsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateTagsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateTagsInput"} - if s.ResourceId == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceId")) - } - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceId sets the ResourceId field's value. -func (s *CreateTagsInput) SetResourceId(v string) *CreateTagsInput { - s.ResourceId = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateTagsInput) SetTags(v []*Tag) *CreateTagsInput { - s.Tags = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateTagsResult -type CreateTagsOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateTagsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateTagsOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateWorkspacesRequest -type CreateWorkspacesInput struct { - _ struct{} `type:"structure"` - - // Information about the WorkSpaces to create. - // - // Workspaces is a required field - Workspaces []*WorkspaceRequest `min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s CreateWorkspacesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateWorkspacesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateWorkspacesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateWorkspacesInput"} - if s.Workspaces == nil { - invalidParams.Add(request.NewErrParamRequired("Workspaces")) - } - if s.Workspaces != nil && len(s.Workspaces) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Workspaces", 1)) - } - if s.Workspaces != nil { - for i, v := range s.Workspaces { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Workspaces", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetWorkspaces sets the Workspaces field's value. -func (s *CreateWorkspacesInput) SetWorkspaces(v []*WorkspaceRequest) *CreateWorkspacesInput { - s.Workspaces = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateWorkspacesResult -type CreateWorkspacesOutput struct { - _ struct{} `type:"structure"` - - // Information about the WorkSpaces that could not be created. - FailedRequests []*FailedCreateWorkspaceRequest `type:"list"` - - // Information about the WorkSpaces that were created. - // - // Because this operation is asynchronous, the identifier returned is not immediately - // available for use with other operations. For example, if you call DescribeWorkspaces - // before the WorkSpace is created, the information returned can be incomplete. - PendingRequests []*Workspace `type:"list"` -} - -// String returns the string representation -func (s CreateWorkspacesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateWorkspacesOutput) GoString() string { - return s.String() -} - -// SetFailedRequests sets the FailedRequests field's value. -func (s *CreateWorkspacesOutput) SetFailedRequests(v []*FailedCreateWorkspaceRequest) *CreateWorkspacesOutput { - s.FailedRequests = v - return s -} - -// SetPendingRequests sets the PendingRequests field's value. -func (s *CreateWorkspacesOutput) SetPendingRequests(v []*Workspace) *CreateWorkspacesOutput { - s.PendingRequests = v - return s -} - -// Information about defaults used to create a WorkSpace. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DefaultWorkspaceCreationProperties -type DefaultWorkspaceCreationProperties struct { - _ struct{} `type:"structure"` - - // The identifier of any security groups to apply to WorkSpaces when they are - // created. - CustomSecurityGroupId *string `type:"string"` - - // The organizational unit (OU) in the directory for the WorkSpace machine accounts. - DefaultOu *string `type:"string"` - - // The public IP address to attach to all WorkSpaces that are created or rebuilt. - EnableInternetAccess *bool `type:"boolean"` - - // Indicates whether the directory is enabled for Amazon WorkDocs. - EnableWorkDocs *bool `type:"boolean"` - - // Indicates whether the WorkSpace user is an administrator on the WorkSpace. - UserEnabledAsLocalAdministrator *bool `type:"boolean"` -} - -// String returns the string representation -func (s DefaultWorkspaceCreationProperties) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DefaultWorkspaceCreationProperties) GoString() string { - return s.String() -} - -// SetCustomSecurityGroupId sets the CustomSecurityGroupId field's value. -func (s *DefaultWorkspaceCreationProperties) SetCustomSecurityGroupId(v string) *DefaultWorkspaceCreationProperties { - s.CustomSecurityGroupId = &v - return s -} - -// SetDefaultOu sets the DefaultOu field's value. -func (s *DefaultWorkspaceCreationProperties) SetDefaultOu(v string) *DefaultWorkspaceCreationProperties { - s.DefaultOu = &v - return s -} - -// SetEnableInternetAccess sets the EnableInternetAccess field's value. -func (s *DefaultWorkspaceCreationProperties) SetEnableInternetAccess(v bool) *DefaultWorkspaceCreationProperties { - s.EnableInternetAccess = &v - return s -} - -// SetEnableWorkDocs sets the EnableWorkDocs field's value. -func (s *DefaultWorkspaceCreationProperties) SetEnableWorkDocs(v bool) *DefaultWorkspaceCreationProperties { - s.EnableWorkDocs = &v - return s -} - -// SetUserEnabledAsLocalAdministrator sets the UserEnabledAsLocalAdministrator field's value. -func (s *DefaultWorkspaceCreationProperties) SetUserEnabledAsLocalAdministrator(v bool) *DefaultWorkspaceCreationProperties { - s.UserEnabledAsLocalAdministrator = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeleteTagsRequest -type DeleteTagsInput struct { - _ struct{} `type:"structure"` - - // The ID of the resource. - // - // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` - - // The tag keys. - // - // TagKeys is a required field - TagKeys []*string `type:"list" required:"true"` -} - -// String returns the string representation -func (s DeleteTagsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteTagsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteTagsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteTagsInput"} - if s.ResourceId == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceId")) - } - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } - if s.TagKeys == nil { - invalidParams.Add(request.NewErrParamRequired("TagKeys")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceId sets the ResourceId field's value. -func (s *DeleteTagsInput) SetResourceId(v string) *DeleteTagsInput { - s.ResourceId = &v - return s -} - -// SetTagKeys sets the TagKeys field's value. -func (s *DeleteTagsInput) SetTagKeys(v []*string) *DeleteTagsInput { - s.TagKeys = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeleteTagsResult -type DeleteTagsOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteTagsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteTagsOutput) GoString() string { - return s.String() -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeTagsRequest -type DescribeTagsInput struct { - _ struct{} `type:"structure"` - - // The ID of the resource. - // - // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DescribeTagsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeTagsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeTagsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeTagsInput"} - if s.ResourceId == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceId")) - } - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceId sets the ResourceId field's value. -func (s *DescribeTagsInput) SetResourceId(v string) *DescribeTagsInput { - s.ResourceId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeTagsResult -type DescribeTagsOutput struct { - _ struct{} `type:"structure"` - - // The tags. - TagList []*Tag `type:"list"` -} - -// String returns the string representation -func (s DescribeTagsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeTagsOutput) GoString() string { - return s.String() -} - -// SetTagList sets the TagList field's value. -func (s *DescribeTagsOutput) SetTagList(v []*Tag) *DescribeTagsOutput { - s.TagList = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceBundlesRequest -type DescribeWorkspaceBundlesInput struct { - _ struct{} `type:"structure"` - - // The IDs of the bundles. This parameter cannot be combined with any other - // filter. - BundleIds []*string `min:"1" type:"list"` - - // The token for the next set of results. (You received this token from a previous - // call.) - NextToken *string `min:"1" type:"string"` - - // The owner of the bundles. This parameter cannot be combined with any other - // filter. - // - // Specify AMAZON to describe the bundles provided by AWS or null to describe - // the bundles that belong to your account. - Owner *string `type:"string"` -} - -// String returns the string representation -func (s DescribeWorkspaceBundlesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeWorkspaceBundlesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeWorkspaceBundlesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeWorkspaceBundlesInput"} - if s.BundleIds != nil && len(s.BundleIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("BundleIds", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBundleIds sets the BundleIds field's value. -func (s *DescribeWorkspaceBundlesInput) SetBundleIds(v []*string) *DescribeWorkspaceBundlesInput { - s.BundleIds = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeWorkspaceBundlesInput) SetNextToken(v string) *DescribeWorkspaceBundlesInput { - s.NextToken = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *DescribeWorkspaceBundlesInput) SetOwner(v string) *DescribeWorkspaceBundlesInput { - s.Owner = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceBundlesResult -type DescribeWorkspaceBundlesOutput struct { - _ struct{} `type:"structure"` - - // Information about the bundles. - Bundles []*WorkspaceBundle `type:"list"` - - // The token to use to retrieve the next set of results, or null if there are - // no more results available. This token is valid for one day and must be used - // within that time frame. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeWorkspaceBundlesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeWorkspaceBundlesOutput) GoString() string { - return s.String() -} - -// SetBundles sets the Bundles field's value. -func (s *DescribeWorkspaceBundlesOutput) SetBundles(v []*WorkspaceBundle) *DescribeWorkspaceBundlesOutput { - s.Bundles = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeWorkspaceBundlesOutput) SetNextToken(v string) *DescribeWorkspaceBundlesOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceDirectoriesRequest -type DescribeWorkspaceDirectoriesInput struct { - _ struct{} `type:"structure"` - - // The identifiers of the directories. If the value is null, all directories - // are retrieved. - DirectoryIds []*string `min:"1" type:"list"` - - // The token for the next set of results. (You received this token from a previous - // call.) - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeWorkspaceDirectoriesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeWorkspaceDirectoriesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeWorkspaceDirectoriesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeWorkspaceDirectoriesInput"} - if s.DirectoryIds != nil && len(s.DirectoryIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DirectoryIds", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDirectoryIds sets the DirectoryIds field's value. -func (s *DescribeWorkspaceDirectoriesInput) SetDirectoryIds(v []*string) *DescribeWorkspaceDirectoriesInput { - s.DirectoryIds = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeWorkspaceDirectoriesInput) SetNextToken(v string) *DescribeWorkspaceDirectoriesInput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceDirectoriesResult -type DescribeWorkspaceDirectoriesOutput struct { - _ struct{} `type:"structure"` - - // Information about the directories. - Directories []*WorkspaceDirectory `type:"list"` - - // The token to use to retrieve the next set of results, or null if there are - // no more results available. This token is valid for one day and must be used - // within that time frame. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeWorkspaceDirectoriesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeWorkspaceDirectoriesOutput) GoString() string { - return s.String() -} - -// SetDirectories sets the Directories field's value. -func (s *DescribeWorkspaceDirectoriesOutput) SetDirectories(v []*WorkspaceDirectory) *DescribeWorkspaceDirectoriesOutput { - s.Directories = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeWorkspaceDirectoriesOutput) SetNextToken(v string) *DescribeWorkspaceDirectoriesOutput { - s.NextToken = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesConnectionStatusRequest -type DescribeWorkspacesConnectionStatusInput struct { - _ struct{} `type:"structure"` - - // The token for the next set of results. (You received this token from a previous - // call.) - NextToken *string `min:"1" type:"string"` - - // The identifiers of the WorkSpaces. - WorkspaceIds []*string `min:"1" type:"list"` -} - -// String returns the string representation -func (s DescribeWorkspacesConnectionStatusInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeWorkspacesConnectionStatusInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeWorkspacesConnectionStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeWorkspacesConnectionStatusInput"} - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.WorkspaceIds != nil && len(s.WorkspaceIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkspaceIds", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeWorkspacesConnectionStatusInput) SetNextToken(v string) *DescribeWorkspacesConnectionStatusInput { - s.NextToken = &v - return s -} - -// SetWorkspaceIds sets the WorkspaceIds field's value. -func (s *DescribeWorkspacesConnectionStatusInput) SetWorkspaceIds(v []*string) *DescribeWorkspacesConnectionStatusInput { - s.WorkspaceIds = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesConnectionStatusResult -type DescribeWorkspacesConnectionStatusOutput struct { - _ struct{} `type:"structure"` - - // The token to use to retrieve the next set of results, or null if there are - // no more results available. - NextToken *string `min:"1" type:"string"` - - // Information about the connection status of the WorkSpace. - WorkspacesConnectionStatus []*WorkspaceConnectionStatus `type:"list"` -} - -// String returns the string representation -func (s DescribeWorkspacesConnectionStatusOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeWorkspacesConnectionStatusOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeWorkspacesConnectionStatusOutput) SetNextToken(v string) *DescribeWorkspacesConnectionStatusOutput { - s.NextToken = &v - return s -} - -// SetWorkspacesConnectionStatus sets the WorkspacesConnectionStatus field's value. -func (s *DescribeWorkspacesConnectionStatusOutput) SetWorkspacesConnectionStatus(v []*WorkspaceConnectionStatus) *DescribeWorkspacesConnectionStatusOutput { - s.WorkspacesConnectionStatus = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesRequest -type DescribeWorkspacesInput struct { - _ struct{} `type:"structure"` - - // The ID of the bundle. All WorkSpaces that are created from this bundle are - // retrieved. This parameter cannot be combined with any other filter. - BundleId *string `type:"string"` - - // The ID of the directory. In addition, you can optionally specify a specific - // directory user (see UserName). This parameter cannot be combined with any - // other filter. - DirectoryId *string `type:"string"` - - // The maximum number of items to return. - Limit *int64 `min:"1" type:"integer"` - - // The token for the next set of results. (You received this token from a previous - // call.) - NextToken *string `min:"1" type:"string"` - - // The name of the directory user. You must specify this parameter with DirectoryId. - UserName *string `min:"1" type:"string"` - - // The IDs of the WorkSpaces. This parameter cannot be combined with any other - // filter. - // - // Because the CreateWorkspaces operation is asynchronous, the identifier it - // returns is not immediately available. If you immediately call DescribeWorkspaces - // with this identifier, no information is returned. - WorkspaceIds []*string `min:"1" type:"list"` -} - -// String returns the string representation -func (s DescribeWorkspacesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeWorkspacesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeWorkspacesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeWorkspacesInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.UserName != nil && len(*s.UserName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) - } - if s.WorkspaceIds != nil && len(s.WorkspaceIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkspaceIds", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBundleId sets the BundleId field's value. -func (s *DescribeWorkspacesInput) SetBundleId(v string) *DescribeWorkspacesInput { - s.BundleId = &v - return s -} - -// SetDirectoryId sets the DirectoryId field's value. -func (s *DescribeWorkspacesInput) SetDirectoryId(v string) *DescribeWorkspacesInput { - s.DirectoryId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *DescribeWorkspacesInput) SetLimit(v int64) *DescribeWorkspacesInput { - s.Limit = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeWorkspacesInput) SetNextToken(v string) *DescribeWorkspacesInput { - s.NextToken = &v - return s -} - -// SetUserName sets the UserName field's value. -func (s *DescribeWorkspacesInput) SetUserName(v string) *DescribeWorkspacesInput { - s.UserName = &v - return s -} - -// SetWorkspaceIds sets the WorkspaceIds field's value. -func (s *DescribeWorkspacesInput) SetWorkspaceIds(v []*string) *DescribeWorkspacesInput { - s.WorkspaceIds = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesResult -type DescribeWorkspacesOutput struct { - _ struct{} `type:"structure"` - - // The token to use to retrieve the next set of results, or null if there are - // no more results available. This token is valid for one day and must be used - // within that time frame. - NextToken *string `min:"1" type:"string"` - - // Information about the WorkSpaces. - // - // Because CreateWorkspaces is an asynchronous operation, some of the returned - // information could be incomplete. - Workspaces []*Workspace `type:"list"` -} - -// String returns the string representation -func (s DescribeWorkspacesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeWorkspacesOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeWorkspacesOutput) SetNextToken(v string) *DescribeWorkspacesOutput { - s.NextToken = &v - return s -} - -// SetWorkspaces sets the Workspaces field's value. -func (s *DescribeWorkspacesOutput) SetWorkspaces(v []*Workspace) *DescribeWorkspacesOutput { - s.Workspaces = v - return s -} - -// Information about a WorkSpace that could not be created. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/FailedCreateWorkspaceRequest -type FailedCreateWorkspaceRequest struct { - _ struct{} `type:"structure"` - - // The error code. - ErrorCode *string `type:"string"` - - // The textual error message. - ErrorMessage *string `type:"string"` - - // Information about the WorkSpace. - WorkspaceRequest *WorkspaceRequest `type:"structure"` -} - -// String returns the string representation -func (s FailedCreateWorkspaceRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FailedCreateWorkspaceRequest) GoString() string { - return s.String() -} - -// SetErrorCode sets the ErrorCode field's value. -func (s *FailedCreateWorkspaceRequest) SetErrorCode(v string) *FailedCreateWorkspaceRequest { - s.ErrorCode = &v - return s -} - -// SetErrorMessage sets the ErrorMessage field's value. -func (s *FailedCreateWorkspaceRequest) SetErrorMessage(v string) *FailedCreateWorkspaceRequest { - s.ErrorMessage = &v - return s -} - -// SetWorkspaceRequest sets the WorkspaceRequest field's value. -func (s *FailedCreateWorkspaceRequest) SetWorkspaceRequest(v *WorkspaceRequest) *FailedCreateWorkspaceRequest { - s.WorkspaceRequest = v - return s -} - -// Information about a WorkSpace that could not be rebooted (RebootWorkspaces), -// rebuilt (RebuildWorkspaces), terminated (TerminateWorkspaces), started (StartWorkspaces), -// or stopped (StopWorkspaces). -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/FailedWorkspaceChangeRequest -type FailedWorkspaceChangeRequest struct { - _ struct{} `type:"structure"` - - // The error code. - ErrorCode *string `type:"string"` - - // The textual error message. - ErrorMessage *string `type:"string"` - - // The identifier of the WorkSpace. - WorkspaceId *string `type:"string"` -} - -// String returns the string representation -func (s FailedWorkspaceChangeRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FailedWorkspaceChangeRequest) GoString() string { - return s.String() -} - -// SetErrorCode sets the ErrorCode field's value. -func (s *FailedWorkspaceChangeRequest) SetErrorCode(v string) *FailedWorkspaceChangeRequest { - s.ErrorCode = &v - return s -} - -// SetErrorMessage sets the ErrorMessage field's value. -func (s *FailedWorkspaceChangeRequest) SetErrorMessage(v string) *FailedWorkspaceChangeRequest { - s.ErrorMessage = &v - return s -} - -// SetWorkspaceId sets the WorkspaceId field's value. -func (s *FailedWorkspaceChangeRequest) SetWorkspaceId(v string) *FailedWorkspaceChangeRequest { - s.WorkspaceId = &v - return s -} - -// Information about a WorkSpace modification. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModificationState -type ModificationState struct { - _ struct{} `type:"structure"` - - // The resource. - Resource *string `type:"string" enum:"ModificationResourceEnum"` - - // The modification state. - State *string `type:"string" enum:"ModificationStateEnum"` -} - -// String returns the string representation -func (s ModificationState) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ModificationState) GoString() string { - return s.String() -} - -// SetResource sets the Resource field's value. -func (s *ModificationState) SetResource(v string) *ModificationState { - s.Resource = &v - return s -} - -// SetState sets the State field's value. -func (s *ModificationState) SetState(v string) *ModificationState { - s.State = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModifyWorkspacePropertiesRequest -type ModifyWorkspacePropertiesInput struct { - _ struct{} `type:"structure"` - - // The ID of the WorkSpace. - // - // WorkspaceId is a required field - WorkspaceId *string `type:"string" required:"true"` - - // The properties of the WorkSpace. - // - // WorkspaceProperties is a required field - WorkspaceProperties *WorkspaceProperties `type:"structure" required:"true"` -} - -// String returns the string representation -func (s ModifyWorkspacePropertiesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ModifyWorkspacePropertiesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ModifyWorkspacePropertiesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ModifyWorkspacePropertiesInput"} - if s.WorkspaceId == nil { - invalidParams.Add(request.NewErrParamRequired("WorkspaceId")) - } - if s.WorkspaceProperties == nil { - invalidParams.Add(request.NewErrParamRequired("WorkspaceProperties")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetWorkspaceId sets the WorkspaceId field's value. -func (s *ModifyWorkspacePropertiesInput) SetWorkspaceId(v string) *ModifyWorkspacePropertiesInput { - s.WorkspaceId = &v - return s -} - -// SetWorkspaceProperties sets the WorkspaceProperties field's value. -func (s *ModifyWorkspacePropertiesInput) SetWorkspaceProperties(v *WorkspaceProperties) *ModifyWorkspacePropertiesInput { - s.WorkspaceProperties = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModifyWorkspacePropertiesResult -type ModifyWorkspacePropertiesOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s ModifyWorkspacePropertiesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ModifyWorkspacePropertiesOutput) GoString() string { - return s.String() -} - -// Information used to reboot a WorkSpace. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootRequest -type RebootRequest struct { - _ struct{} `type:"structure"` - - // The identifier of the WorkSpace. - // - // WorkspaceId is a required field - WorkspaceId *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s RebootRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RebootRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RebootRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RebootRequest"} - if s.WorkspaceId == nil { - invalidParams.Add(request.NewErrParamRequired("WorkspaceId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetWorkspaceId sets the WorkspaceId field's value. -func (s *RebootRequest) SetWorkspaceId(v string) *RebootRequest { - s.WorkspaceId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootWorkspacesRequest -type RebootWorkspacesInput struct { - _ struct{} `type:"structure"` - - // The WorkSpaces to reboot. - // - // RebootWorkspaceRequests is a required field - RebootWorkspaceRequests []*RebootRequest `min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s RebootWorkspacesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RebootWorkspacesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RebootWorkspacesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RebootWorkspacesInput"} - if s.RebootWorkspaceRequests == nil { - invalidParams.Add(request.NewErrParamRequired("RebootWorkspaceRequests")) - } - if s.RebootWorkspaceRequests != nil && len(s.RebootWorkspaceRequests) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RebootWorkspaceRequests", 1)) - } - if s.RebootWorkspaceRequests != nil { - for i, v := range s.RebootWorkspaceRequests { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RebootWorkspaceRequests", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRebootWorkspaceRequests sets the RebootWorkspaceRequests field's value. -func (s *RebootWorkspacesInput) SetRebootWorkspaceRequests(v []*RebootRequest) *RebootWorkspacesInput { - s.RebootWorkspaceRequests = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootWorkspacesResult -type RebootWorkspacesOutput struct { - _ struct{} `type:"structure"` - - // Information about the WorkSpaces that could not be rebooted. - FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` -} - -// String returns the string representation -func (s RebootWorkspacesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RebootWorkspacesOutput) GoString() string { - return s.String() -} - -// SetFailedRequests sets the FailedRequests field's value. -func (s *RebootWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeRequest) *RebootWorkspacesOutput { - s.FailedRequests = v - return s -} - -// Information used to rebuild a WorkSpace. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildRequest -type RebuildRequest struct { - _ struct{} `type:"structure"` - - // The identifier of the WorkSpace. - // - // WorkspaceId is a required field - WorkspaceId *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s RebuildRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RebuildRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RebuildRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RebuildRequest"} - if s.WorkspaceId == nil { - invalidParams.Add(request.NewErrParamRequired("WorkspaceId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetWorkspaceId sets the WorkspaceId field's value. -func (s *RebuildRequest) SetWorkspaceId(v string) *RebuildRequest { - s.WorkspaceId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildWorkspacesRequest -type RebuildWorkspacesInput struct { - _ struct{} `type:"structure"` - - // The WorkSpaces to rebuild. - // - // RebuildWorkspaceRequests is a required field - RebuildWorkspaceRequests []*RebuildRequest `min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s RebuildWorkspacesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RebuildWorkspacesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RebuildWorkspacesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RebuildWorkspacesInput"} - if s.RebuildWorkspaceRequests == nil { - invalidParams.Add(request.NewErrParamRequired("RebuildWorkspaceRequests")) - } - if s.RebuildWorkspaceRequests != nil && len(s.RebuildWorkspaceRequests) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RebuildWorkspaceRequests", 1)) - } - if s.RebuildWorkspaceRequests != nil { - for i, v := range s.RebuildWorkspaceRequests { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RebuildWorkspaceRequests", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRebuildWorkspaceRequests sets the RebuildWorkspaceRequests field's value. -func (s *RebuildWorkspacesInput) SetRebuildWorkspaceRequests(v []*RebuildRequest) *RebuildWorkspacesInput { - s.RebuildWorkspaceRequests = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildWorkspacesResult -type RebuildWorkspacesOutput struct { - _ struct{} `type:"structure"` - - // Information about the WorkSpaces that could not be rebuilt. - FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` -} - -// String returns the string representation -func (s RebuildWorkspacesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RebuildWorkspacesOutput) GoString() string { - return s.String() -} - -// SetFailedRequests sets the FailedRequests field's value. -func (s *RebuildWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeRequest) *RebuildWorkspacesOutput { - s.FailedRequests = v - return s -} - -// Information about the root volume for a WorkSpace bundle. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RootStorage -type RootStorage struct { - _ struct{} `type:"structure"` - - // The size of the root volume. - Capacity *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s RootStorage) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RootStorage) GoString() string { - return s.String() -} - -// SetCapacity sets the Capacity field's value. -func (s *RootStorage) SetCapacity(v string) *RootStorage { - s.Capacity = &v - return s -} - -// Information used to start a WorkSpace. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StartRequest -type StartRequest struct { - _ struct{} `type:"structure"` - - // The ID of the WorkSpace. - WorkspaceId *string `type:"string"` -} - -// String returns the string representation -func (s StartRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartRequest) GoString() string { - return s.String() -} - -// SetWorkspaceId sets the WorkspaceId field's value. -func (s *StartRequest) SetWorkspaceId(v string) *StartRequest { - s.WorkspaceId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StartWorkspacesRequest -type StartWorkspacesInput struct { - _ struct{} `type:"structure"` - - // The WorkSpaces to start. - // - // StartWorkspaceRequests is a required field - StartWorkspaceRequests []*StartRequest `min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s StartWorkspacesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartWorkspacesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartWorkspacesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartWorkspacesInput"} - if s.StartWorkspaceRequests == nil { - invalidParams.Add(request.NewErrParamRequired("StartWorkspaceRequests")) - } - if s.StartWorkspaceRequests != nil && len(s.StartWorkspaceRequests) < 1 { - invalidParams.Add(request.NewErrParamMinLen("StartWorkspaceRequests", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStartWorkspaceRequests sets the StartWorkspaceRequests field's value. -func (s *StartWorkspacesInput) SetStartWorkspaceRequests(v []*StartRequest) *StartWorkspacesInput { - s.StartWorkspaceRequests = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StartWorkspacesResult -type StartWorkspacesOutput struct { - _ struct{} `type:"structure"` - - // Information about the WorkSpaces that could not be started. - FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` -} - -// String returns the string representation -func (s StartWorkspacesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartWorkspacesOutput) GoString() string { - return s.String() -} - -// SetFailedRequests sets the FailedRequests field's value. -func (s *StartWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeRequest) *StartWorkspacesOutput { - s.FailedRequests = v - return s -} - -// Information used to stop a WorkSpace. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StopRequest -type StopRequest struct { - _ struct{} `type:"structure"` - - // The ID of the WorkSpace. - WorkspaceId *string `type:"string"` -} - -// String returns the string representation -func (s StopRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopRequest) GoString() string { - return s.String() -} - -// SetWorkspaceId sets the WorkspaceId field's value. -func (s *StopRequest) SetWorkspaceId(v string) *StopRequest { - s.WorkspaceId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StopWorkspacesRequest -type StopWorkspacesInput struct { - _ struct{} `type:"structure"` - - // The WorkSpaces to stop. - // - // StopWorkspaceRequests is a required field - StopWorkspaceRequests []*StopRequest `min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s StopWorkspacesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopWorkspacesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StopWorkspacesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StopWorkspacesInput"} - if s.StopWorkspaceRequests == nil { - invalidParams.Add(request.NewErrParamRequired("StopWorkspaceRequests")) - } - if s.StopWorkspaceRequests != nil && len(s.StopWorkspaceRequests) < 1 { - invalidParams.Add(request.NewErrParamMinLen("StopWorkspaceRequests", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStopWorkspaceRequests sets the StopWorkspaceRequests field's value. -func (s *StopWorkspacesInput) SetStopWorkspaceRequests(v []*StopRequest) *StopWorkspacesInput { - s.StopWorkspaceRequests = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StopWorkspacesResult -type StopWorkspacesOutput struct { - _ struct{} `type:"structure"` - - // Information about the WorkSpaces that could not be stopped. - FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` -} - -// String returns the string representation -func (s StopWorkspacesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StopWorkspacesOutput) GoString() string { - return s.String() -} - -// SetFailedRequests sets the FailedRequests field's value. -func (s *StopWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeRequest) *StopWorkspacesOutput { - s.FailedRequests = v - return s -} - -// Information about a tag. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/Tag -type Tag struct { - _ struct{} `type:"structure"` - - // The key of the tag. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // The value of the tag. - Value *string `type:"string"` -} - -// String returns the string representation -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v - return s -} - -// Information used to terminate a WorkSpace. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateRequest -type TerminateRequest struct { - _ struct{} `type:"structure"` - - // The identifier of the WorkSpace. - // - // WorkspaceId is a required field - WorkspaceId *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s TerminateRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TerminateRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TerminateRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TerminateRequest"} - if s.WorkspaceId == nil { - invalidParams.Add(request.NewErrParamRequired("WorkspaceId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetWorkspaceId sets the WorkspaceId field's value. -func (s *TerminateRequest) SetWorkspaceId(v string) *TerminateRequest { - s.WorkspaceId = &v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateWorkspacesRequest -type TerminateWorkspacesInput struct { - _ struct{} `type:"structure"` - - // The WorkSpaces to terminate. - // - // TerminateWorkspaceRequests is a required field - TerminateWorkspaceRequests []*TerminateRequest `min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s TerminateWorkspacesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TerminateWorkspacesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TerminateWorkspacesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TerminateWorkspacesInput"} - if s.TerminateWorkspaceRequests == nil { - invalidParams.Add(request.NewErrParamRequired("TerminateWorkspaceRequests")) - } - if s.TerminateWorkspaceRequests != nil && len(s.TerminateWorkspaceRequests) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TerminateWorkspaceRequests", 1)) - } - if s.TerminateWorkspaceRequests != nil { - for i, v := range s.TerminateWorkspaceRequests { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TerminateWorkspaceRequests", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTerminateWorkspaceRequests sets the TerminateWorkspaceRequests field's value. -func (s *TerminateWorkspacesInput) SetTerminateWorkspaceRequests(v []*TerminateRequest) *TerminateWorkspacesInput { - s.TerminateWorkspaceRequests = v - return s -} - -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateWorkspacesResult -type TerminateWorkspacesOutput struct { - _ struct{} `type:"structure"` - - // Information about the WorkSpaces that could not be terminated. - FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` -} - -// String returns the string representation -func (s TerminateWorkspacesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TerminateWorkspacesOutput) GoString() string { - return s.String() -} - -// SetFailedRequests sets the FailedRequests field's value. -func (s *TerminateWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeRequest) *TerminateWorkspacesOutput { - s.FailedRequests = v - return s -} - -// Information about the user storage for a WorkSpace bundle. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/UserStorage -type UserStorage struct { - _ struct{} `type:"structure"` - - // The size of the user storage. - Capacity *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s UserStorage) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UserStorage) GoString() string { - return s.String() -} - -// SetCapacity sets the Capacity field's value. -func (s *UserStorage) SetCapacity(v string) *UserStorage { - s.Capacity = &v - return s -} - -// Information about a WorkSpace. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/Workspace -type Workspace struct { - _ struct{} `type:"structure"` - - // The identifier of the bundle used to create the WorkSpace. - BundleId *string `type:"string"` - - // The name of the WorkSpace, as seen by the operating system. - ComputerName *string `type:"string"` - - // The identifier of the AWS Directory Service directory for the WorkSpace. - DirectoryId *string `type:"string"` - - // If the WorkSpace could not be created, contains the error code. - ErrorCode *string `type:"string"` - - // If the WorkSpace could not be created, contains a textual error message that - // describes the failure. - ErrorMessage *string `type:"string"` - - // The IP address of the WorkSpace. - IpAddress *string `type:"string"` - - // The modification states of the WorkSpace. - ModificationStates []*ModificationState `type:"list"` - - // Indicates whether the data stored on the root volume is encrypted. - RootVolumeEncryptionEnabled *bool `type:"boolean"` - - // The operational state of the WorkSpace. - State *string `type:"string" enum:"WorkspaceState"` - - // The identifier of the subnet for the WorkSpace. - SubnetId *string `type:"string"` - - // The user for the WorkSpace. - UserName *string `min:"1" type:"string"` - - // Indicates whether the data stored on the user volume is encrypted. - UserVolumeEncryptionEnabled *bool `type:"boolean"` - - // The KMS key used to encrypt data stored on your WorkSpace. - VolumeEncryptionKey *string `type:"string"` - - // The identifier of the WorkSpace. - WorkspaceId *string `type:"string"` - - // The properties of the WorkSpace. - WorkspaceProperties *WorkspaceProperties `type:"structure"` -} - -// String returns the string representation -func (s Workspace) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Workspace) GoString() string { - return s.String() -} - -// SetBundleId sets the BundleId field's value. -func (s *Workspace) SetBundleId(v string) *Workspace { - s.BundleId = &v - return s -} - -// SetComputerName sets the ComputerName field's value. -func (s *Workspace) SetComputerName(v string) *Workspace { - s.ComputerName = &v - return s -} - -// SetDirectoryId sets the DirectoryId field's value. -func (s *Workspace) SetDirectoryId(v string) *Workspace { - s.DirectoryId = &v - return s -} - -// SetErrorCode sets the ErrorCode field's value. -func (s *Workspace) SetErrorCode(v string) *Workspace { - s.ErrorCode = &v - return s -} - -// SetErrorMessage sets the ErrorMessage field's value. -func (s *Workspace) SetErrorMessage(v string) *Workspace { - s.ErrorMessage = &v - return s -} - -// SetIpAddress sets the IpAddress field's value. -func (s *Workspace) SetIpAddress(v string) *Workspace { - s.IpAddress = &v - return s -} - -// SetModificationStates sets the ModificationStates field's value. -func (s *Workspace) SetModificationStates(v []*ModificationState) *Workspace { - s.ModificationStates = v - return s -} - -// SetRootVolumeEncryptionEnabled sets the RootVolumeEncryptionEnabled field's value. -func (s *Workspace) SetRootVolumeEncryptionEnabled(v bool) *Workspace { - s.RootVolumeEncryptionEnabled = &v - return s -} - -// SetState sets the State field's value. -func (s *Workspace) SetState(v string) *Workspace { - s.State = &v - return s -} - -// SetSubnetId sets the SubnetId field's value. -func (s *Workspace) SetSubnetId(v string) *Workspace { - s.SubnetId = &v - return s -} - -// SetUserName sets the UserName field's value. -func (s *Workspace) SetUserName(v string) *Workspace { - s.UserName = &v - return s -} - -// SetUserVolumeEncryptionEnabled sets the UserVolumeEncryptionEnabled field's value. -func (s *Workspace) SetUserVolumeEncryptionEnabled(v bool) *Workspace { - s.UserVolumeEncryptionEnabled = &v - return s -} - -// SetVolumeEncryptionKey sets the VolumeEncryptionKey field's value. -func (s *Workspace) SetVolumeEncryptionKey(v string) *Workspace { - s.VolumeEncryptionKey = &v - return s -} - -// SetWorkspaceId sets the WorkspaceId field's value. -func (s *Workspace) SetWorkspaceId(v string) *Workspace { - s.WorkspaceId = &v - return s -} - -// SetWorkspaceProperties sets the WorkspaceProperties field's value. -func (s *Workspace) SetWorkspaceProperties(v *WorkspaceProperties) *Workspace { - s.WorkspaceProperties = v - return s -} - -// Information about a WorkSpace bundle. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceBundle -type WorkspaceBundle struct { - _ struct{} `type:"structure"` - - // The bundle identifier. - BundleId *string `type:"string"` - - // The compute type. For more information, see Amazon WorkSpaces Bundles (http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). - ComputeType *ComputeType `type:"structure"` - - // A description. - Description *string `type:"string"` - - // The name of the bundle. - Name *string `min:"1" type:"string"` - - // The owner of the bundle. This is the account identifier of the owner, or - // AMAZON if the bundle is provided by AWS. - Owner *string `type:"string"` - - // The size of the root volume. - RootStorage *RootStorage `type:"structure"` - - // The size of the user storage. - UserStorage *UserStorage `type:"structure"` -} - -// String returns the string representation -func (s WorkspaceBundle) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkspaceBundle) GoString() string { - return s.String() -} - -// SetBundleId sets the BundleId field's value. -func (s *WorkspaceBundle) SetBundleId(v string) *WorkspaceBundle { - s.BundleId = &v - return s -} - -// SetComputeType sets the ComputeType field's value. -func (s *WorkspaceBundle) SetComputeType(v *ComputeType) *WorkspaceBundle { - s.ComputeType = v - return s -} - -// SetDescription sets the Description field's value. -func (s *WorkspaceBundle) SetDescription(v string) *WorkspaceBundle { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *WorkspaceBundle) SetName(v string) *WorkspaceBundle { - s.Name = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *WorkspaceBundle) SetOwner(v string) *WorkspaceBundle { - s.Owner = &v - return s -} - -// SetRootStorage sets the RootStorage field's value. -func (s *WorkspaceBundle) SetRootStorage(v *RootStorage) *WorkspaceBundle { - s.RootStorage = v - return s -} - -// SetUserStorage sets the UserStorage field's value. -func (s *WorkspaceBundle) SetUserStorage(v *UserStorage) *WorkspaceBundle { - s.UserStorage = v - return s -} - -// Describes the connection status of a WorkSpace. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceConnectionStatus -type WorkspaceConnectionStatus struct { - _ struct{} `type:"structure"` - - // The connection state of the WorkSpace. The connection state is unknown if - // the WorkSpace is stopped. - ConnectionState *string `type:"string" enum:"ConnectionState"` - - // The timestamp of the connection state check. - ConnectionStateCheckTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The timestamp of the last known user connection. - LastKnownUserConnectionTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The ID of the WorkSpace. - WorkspaceId *string `type:"string"` -} - -// String returns the string representation -func (s WorkspaceConnectionStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkspaceConnectionStatus) GoString() string { - return s.String() -} - -// SetConnectionState sets the ConnectionState field's value. -func (s *WorkspaceConnectionStatus) SetConnectionState(v string) *WorkspaceConnectionStatus { - s.ConnectionState = &v - return s -} - -// SetConnectionStateCheckTimestamp sets the ConnectionStateCheckTimestamp field's value. -func (s *WorkspaceConnectionStatus) SetConnectionStateCheckTimestamp(v time.Time) *WorkspaceConnectionStatus { - s.ConnectionStateCheckTimestamp = &v - return s -} - -// SetLastKnownUserConnectionTimestamp sets the LastKnownUserConnectionTimestamp field's value. -func (s *WorkspaceConnectionStatus) SetLastKnownUserConnectionTimestamp(v time.Time) *WorkspaceConnectionStatus { - s.LastKnownUserConnectionTimestamp = &v - return s -} - -// SetWorkspaceId sets the WorkspaceId field's value. -func (s *WorkspaceConnectionStatus) SetWorkspaceId(v string) *WorkspaceConnectionStatus { - s.WorkspaceId = &v - return s -} - -// Contains information about an AWS Directory Service directory for use with -// Amazon WorkSpaces. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceDirectory -type WorkspaceDirectory struct { - _ struct{} `type:"structure"` - - // The directory alias. - Alias *string `type:"string"` - - // The user name for the service account. - CustomerUserName *string `min:"1" type:"string"` - - // The directory identifier. - DirectoryId *string `type:"string"` - - // The name of the directory. - DirectoryName *string `type:"string"` - - // The directory type. - DirectoryType *string `type:"string" enum:"WorkspaceDirectoryType"` - - // The IP addresses of the DNS servers for the directory. - DnsIpAddresses []*string `type:"list"` - - // The identifier of the IAM role. This is the role that allows Amazon WorkSpaces - // to make calls to other services, such as Amazon EC2, on your behalf. - IamRoleId *string `type:"string"` - - // The registration code for the directory. This is the code that users enter - // in their Amazon WorkSpaces client application to connect to the directory. - RegistrationCode *string `min:"1" type:"string"` - - // The state of the directory's registration with Amazon WorkSpaces - State *string `type:"string" enum:"WorkspaceDirectoryState"` - - // The identifiers of the subnets used with the directory. - SubnetIds []*string `type:"list"` - - // The default creation properties for all WorkSpaces in the directory. - WorkspaceCreationProperties *DefaultWorkspaceCreationProperties `type:"structure"` - - // The identifier of the security group that is assigned to new WorkSpaces. - WorkspaceSecurityGroupId *string `type:"string"` -} - -// String returns the string representation -func (s WorkspaceDirectory) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkspaceDirectory) GoString() string { - return s.String() -} - -// SetAlias sets the Alias field's value. -func (s *WorkspaceDirectory) SetAlias(v string) *WorkspaceDirectory { - s.Alias = &v - return s -} - -// SetCustomerUserName sets the CustomerUserName field's value. -func (s *WorkspaceDirectory) SetCustomerUserName(v string) *WorkspaceDirectory { - s.CustomerUserName = &v - return s -} - -// SetDirectoryId sets the DirectoryId field's value. -func (s *WorkspaceDirectory) SetDirectoryId(v string) *WorkspaceDirectory { - s.DirectoryId = &v - return s -} - -// SetDirectoryName sets the DirectoryName field's value. -func (s *WorkspaceDirectory) SetDirectoryName(v string) *WorkspaceDirectory { - s.DirectoryName = &v - return s -} - -// SetDirectoryType sets the DirectoryType field's value. -func (s *WorkspaceDirectory) SetDirectoryType(v string) *WorkspaceDirectory { - s.DirectoryType = &v - return s -} - -// SetDnsIpAddresses sets the DnsIpAddresses field's value. -func (s *WorkspaceDirectory) SetDnsIpAddresses(v []*string) *WorkspaceDirectory { - s.DnsIpAddresses = v - return s -} - -// SetIamRoleId sets the IamRoleId field's value. -func (s *WorkspaceDirectory) SetIamRoleId(v string) *WorkspaceDirectory { - s.IamRoleId = &v - return s -} - -// SetRegistrationCode sets the RegistrationCode field's value. -func (s *WorkspaceDirectory) SetRegistrationCode(v string) *WorkspaceDirectory { - s.RegistrationCode = &v - return s -} - -// SetState sets the State field's value. -func (s *WorkspaceDirectory) SetState(v string) *WorkspaceDirectory { - s.State = &v - return s -} - -// SetSubnetIds sets the SubnetIds field's value. -func (s *WorkspaceDirectory) SetSubnetIds(v []*string) *WorkspaceDirectory { - s.SubnetIds = v - return s -} - -// SetWorkspaceCreationProperties sets the WorkspaceCreationProperties field's value. -func (s *WorkspaceDirectory) SetWorkspaceCreationProperties(v *DefaultWorkspaceCreationProperties) *WorkspaceDirectory { - s.WorkspaceCreationProperties = v - return s -} - -// SetWorkspaceSecurityGroupId sets the WorkspaceSecurityGroupId field's value. -func (s *WorkspaceDirectory) SetWorkspaceSecurityGroupId(v string) *WorkspaceDirectory { - s.WorkspaceSecurityGroupId = &v - return s -} - -// Information about a WorkSpace. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceProperties -type WorkspaceProperties struct { - _ struct{} `type:"structure"` - - // The compute type. For more information, see Amazon WorkSpaces Bundles (http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). - ComputeTypeName *string `type:"string" enum:"Compute"` - - // The size of the root volume. - RootVolumeSizeGib *int64 `type:"integer"` - - // The running mode. For more information, see Manage the WorkSpace Running - // Mode (http://docs.aws.amazon.com/workspaces/latest/adminguide/running-mode.html). - RunningMode *string `type:"string" enum:"RunningMode"` - - // The time after a user logs off when WorkSpaces are automatically stopped. - // Configured in 60 minute intervals. - RunningModeAutoStopTimeoutInMinutes *int64 `type:"integer"` - - // The size of the user storage. - UserVolumeSizeGib *int64 `type:"integer"` -} - -// String returns the string representation -func (s WorkspaceProperties) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkspaceProperties) GoString() string { - return s.String() -} - -// SetComputeTypeName sets the ComputeTypeName field's value. -func (s *WorkspaceProperties) SetComputeTypeName(v string) *WorkspaceProperties { - s.ComputeTypeName = &v - return s -} - -// SetRootVolumeSizeGib sets the RootVolumeSizeGib field's value. -func (s *WorkspaceProperties) SetRootVolumeSizeGib(v int64) *WorkspaceProperties { - s.RootVolumeSizeGib = &v - return s -} - -// SetRunningMode sets the RunningMode field's value. -func (s *WorkspaceProperties) SetRunningMode(v string) *WorkspaceProperties { - s.RunningMode = &v - return s -} - -// SetRunningModeAutoStopTimeoutInMinutes sets the RunningModeAutoStopTimeoutInMinutes field's value. -func (s *WorkspaceProperties) SetRunningModeAutoStopTimeoutInMinutes(v int64) *WorkspaceProperties { - s.RunningModeAutoStopTimeoutInMinutes = &v - return s -} - -// SetUserVolumeSizeGib sets the UserVolumeSizeGib field's value. -func (s *WorkspaceProperties) SetUserVolumeSizeGib(v int64) *WorkspaceProperties { - s.UserVolumeSizeGib = &v - return s -} - -// Information used to create a WorkSpace. -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceRequest -type WorkspaceRequest struct { - _ struct{} `type:"structure"` - - // The identifier of the bundle for the WorkSpace. You can use DescribeWorkspaceBundles - // to list the available bundles. - // - // BundleId is a required field - BundleId *string `type:"string" required:"true"` - - // The identifier of the AWS Directory Service directory for the WorkSpace. - // You can use DescribeWorkspaceDirectories to list the available directories. - // - // DirectoryId is a required field - DirectoryId *string `type:"string" required:"true"` - - // Indicates whether the data stored on the root volume is encrypted. - RootVolumeEncryptionEnabled *bool `type:"boolean"` - - // The tags for the WorkSpace. - Tags []*Tag `type:"list"` - - // The username of the user for the WorkSpace. This username must exist in the - // AWS Directory Service directory for the WorkSpace. - // - // UserName is a required field - UserName *string `min:"1" type:"string" required:"true"` - - // Indicates whether the data stored on the user volume is encrypted. - UserVolumeEncryptionEnabled *bool `type:"boolean"` - - // The KMS key used to encrypt data stored on your WorkSpace. - VolumeEncryptionKey *string `type:"string"` - - // The WorkSpace properties. - WorkspaceProperties *WorkspaceProperties `type:"structure"` -} - -// String returns the string representation -func (s WorkspaceRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WorkspaceRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *WorkspaceRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WorkspaceRequest"} - if s.BundleId == nil { - invalidParams.Add(request.NewErrParamRequired("BundleId")) - } - if s.DirectoryId == nil { - invalidParams.Add(request.NewErrParamRequired("DirectoryId")) - } - if s.UserName == nil { - invalidParams.Add(request.NewErrParamRequired("UserName")) - } - if s.UserName != nil && len(*s.UserName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBundleId sets the BundleId field's value. -func (s *WorkspaceRequest) SetBundleId(v string) *WorkspaceRequest { - s.BundleId = &v - return s -} - -// SetDirectoryId sets the DirectoryId field's value. -func (s *WorkspaceRequest) SetDirectoryId(v string) *WorkspaceRequest { - s.DirectoryId = &v - return s -} - -// SetRootVolumeEncryptionEnabled sets the RootVolumeEncryptionEnabled field's value. -func (s *WorkspaceRequest) SetRootVolumeEncryptionEnabled(v bool) *WorkspaceRequest { - s.RootVolumeEncryptionEnabled = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *WorkspaceRequest) SetTags(v []*Tag) *WorkspaceRequest { - s.Tags = v - return s -} - -// SetUserName sets the UserName field's value. -func (s *WorkspaceRequest) SetUserName(v string) *WorkspaceRequest { - s.UserName = &v - return s -} - -// SetUserVolumeEncryptionEnabled sets the UserVolumeEncryptionEnabled field's value. -func (s *WorkspaceRequest) SetUserVolumeEncryptionEnabled(v bool) *WorkspaceRequest { - s.UserVolumeEncryptionEnabled = &v - return s -} - -// SetVolumeEncryptionKey sets the VolumeEncryptionKey field's value. -func (s *WorkspaceRequest) SetVolumeEncryptionKey(v string) *WorkspaceRequest { - s.VolumeEncryptionKey = &v - return s -} - -// SetWorkspaceProperties sets the WorkspaceProperties field's value. -func (s *WorkspaceRequest) SetWorkspaceProperties(v *WorkspaceProperties) *WorkspaceRequest { - s.WorkspaceProperties = v - return s -} - -const ( - // ComputeValue is a Compute enum value - ComputeValue = "VALUE" - - // ComputeStandard is a Compute enum value - ComputeStandard = "STANDARD" - - // ComputePerformance is a Compute enum value - ComputePerformance = "PERFORMANCE" - - // ComputePower is a Compute enum value - ComputePower = "POWER" - - // ComputeGraphics is a Compute enum value - ComputeGraphics = "GRAPHICS" -) - -const ( - // ConnectionStateConnected is a ConnectionState enum value - ConnectionStateConnected = "CONNECTED" - - // ConnectionStateDisconnected is a ConnectionState enum value - ConnectionStateDisconnected = "DISCONNECTED" - - // ConnectionStateUnknown is a ConnectionState enum value - ConnectionStateUnknown = "UNKNOWN" -) - -const ( - // ModificationResourceEnumRootVolume is a ModificationResourceEnum enum value - ModificationResourceEnumRootVolume = "ROOT_VOLUME" - - // ModificationResourceEnumUserVolume is a ModificationResourceEnum enum value - ModificationResourceEnumUserVolume = "USER_VOLUME" - - // ModificationResourceEnumComputeType is a ModificationResourceEnum enum value - ModificationResourceEnumComputeType = "COMPUTE_TYPE" -) - -const ( - // ModificationStateEnumUpdateInitiated is a ModificationStateEnum enum value - ModificationStateEnumUpdateInitiated = "UPDATE_INITIATED" - - // ModificationStateEnumUpdateInProgress is a ModificationStateEnum enum value - ModificationStateEnumUpdateInProgress = "UPDATE_IN_PROGRESS" -) - -const ( - // RunningModeAutoStop is a RunningMode enum value - RunningModeAutoStop = "AUTO_STOP" - - // RunningModeAlwaysOn is a RunningMode enum value - RunningModeAlwaysOn = "ALWAYS_ON" -) - -const ( - // WorkspaceDirectoryStateRegistering is a WorkspaceDirectoryState enum value - WorkspaceDirectoryStateRegistering = "REGISTERING" - - // WorkspaceDirectoryStateRegistered is a WorkspaceDirectoryState enum value - WorkspaceDirectoryStateRegistered = "REGISTERED" - - // WorkspaceDirectoryStateDeregistering is a WorkspaceDirectoryState enum value - WorkspaceDirectoryStateDeregistering = "DEREGISTERING" - - // WorkspaceDirectoryStateDeregistered is a WorkspaceDirectoryState enum value - WorkspaceDirectoryStateDeregistered = "DEREGISTERED" - - // WorkspaceDirectoryStateError is a WorkspaceDirectoryState enum value - WorkspaceDirectoryStateError = "ERROR" -) - -const ( - // WorkspaceDirectoryTypeSimpleAd is a WorkspaceDirectoryType enum value - WorkspaceDirectoryTypeSimpleAd = "SIMPLE_AD" - - // WorkspaceDirectoryTypeAdConnector is a WorkspaceDirectoryType enum value - WorkspaceDirectoryTypeAdConnector = "AD_CONNECTOR" -) - -const ( - // WorkspaceStatePending is a WorkspaceState enum value - WorkspaceStatePending = "PENDING" - - // WorkspaceStateAvailable is a WorkspaceState enum value - WorkspaceStateAvailable = "AVAILABLE" - - // WorkspaceStateImpaired is a WorkspaceState enum value - WorkspaceStateImpaired = "IMPAIRED" - - // WorkspaceStateUnhealthy is a WorkspaceState enum value - WorkspaceStateUnhealthy = "UNHEALTHY" - - // WorkspaceStateRebooting is a WorkspaceState enum value - WorkspaceStateRebooting = "REBOOTING" - - // WorkspaceStateStarting is a WorkspaceState enum value - WorkspaceStateStarting = "STARTING" - - // WorkspaceStateRebuilding is a WorkspaceState enum value - WorkspaceStateRebuilding = "REBUILDING" - - // WorkspaceStateMaintenance is a WorkspaceState enum value - WorkspaceStateMaintenance = "MAINTENANCE" - - // WorkspaceStateTerminating is a WorkspaceState enum value - WorkspaceStateTerminating = "TERMINATING" - - // WorkspaceStateTerminated is a WorkspaceState enum value - WorkspaceStateTerminated = "TERMINATED" - - // WorkspaceStateSuspended is a WorkspaceState enum value - WorkspaceStateSuspended = "SUSPENDED" - - // WorkspaceStateUpdating is a WorkspaceState enum value - WorkspaceStateUpdating = "UPDATING" - - // WorkspaceStateStopping is a WorkspaceState enum value - WorkspaceStateStopping = "STOPPING" - - // WorkspaceStateStopped is a WorkspaceState enum value - WorkspaceStateStopped = "STOPPED" - - // WorkspaceStateError is a WorkspaceState enum value - WorkspaceStateError = "ERROR" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/doc.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/doc.go deleted file mode 100644 index cae0167d1b3..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/workspaces/doc.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package workspaces provides the client and types for making API -// requests to Amazon WorkSpaces. -// -// Amazon WorkSpaces enables you to provision virtual, cloud-based Microsoft -// Windows desktops for your users. -// -// See https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08 for more information on this service. -// -// See workspaces package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/workspaces/ -// -// Using the Client -// -// To contact Amazon WorkSpaces with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the Amazon WorkSpaces client WorkSpaces for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/workspaces/#New -package workspaces diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/errors.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/errors.go deleted file mode 100644 index 78fcbedb2b2..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/workspaces/errors.go +++ /dev/null @@ -1,56 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package workspaces - -const ( - - // ErrCodeAccessDeniedException for service response error code - // "AccessDeniedException". - // - // The user is not authorized to access a resource. - ErrCodeAccessDeniedException = "AccessDeniedException" - - // ErrCodeInvalidParameterValuesException for service response error code - // "InvalidParameterValuesException". - // - // One or more parameter values are not valid. - ErrCodeInvalidParameterValuesException = "InvalidParameterValuesException" - - // ErrCodeInvalidResourceStateException for service response error code - // "InvalidResourceStateException". - // - // The state of the WorkSpace is not valid for this operation. - ErrCodeInvalidResourceStateException = "InvalidResourceStateException" - - // ErrCodeOperationInProgressException for service response error code - // "OperationInProgressException". - // - // The properties of this WorkSpace are currently being modified. Try again - // in a moment. - ErrCodeOperationInProgressException = "OperationInProgressException" - - // ErrCodeResourceLimitExceededException for service response error code - // "ResourceLimitExceededException". - // - // Your resource limits have been exceeded. - ErrCodeResourceLimitExceededException = "ResourceLimitExceededException" - - // ErrCodeResourceNotFoundException for service response error code - // "ResourceNotFoundException". - // - // The resource could not be found. - ErrCodeResourceNotFoundException = "ResourceNotFoundException" - - // ErrCodeResourceUnavailableException for service response error code - // "ResourceUnavailableException". - // - // The specified resource is not available. - ErrCodeResourceUnavailableException = "ResourceUnavailableException" - - // ErrCodeUnsupportedWorkspaceConfigurationException for service response error code - // "UnsupportedWorkspaceConfigurationException". - // - // The configuration of this WorkSpace is not supported for this operation. - // For more information, see the Amazon WorkSpaces Administration Guide (http://docs.aws.amazon.com/workspaces/latest/adminguide/). - ErrCodeUnsupportedWorkspaceConfigurationException = "UnsupportedWorkspaceConfigurationException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go deleted file mode 100644 index 45241703115..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go +++ /dev/null @@ -1,95 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package workspaces - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -// WorkSpaces provides the API operation methods for making requests to -// Amazon WorkSpaces. See this package's package overview docs -// for details on the service. -// -// WorkSpaces methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type WorkSpaces struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "workspaces" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the WorkSpaces client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a WorkSpaces client from just a session. -// svc := workspaces.New(mySession) -// -// // Create a WorkSpaces client with additional configuration -// svc := workspaces.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *WorkSpaces { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WorkSpaces { - svc := &WorkSpaces{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2015-04-08", - JSONVersion: "1.1", - TargetPrefix: "WorkspacesService", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a WorkSpaces operation and runs any -// custom request initialization. -func (c *WorkSpaces) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/chzyer/readline/CHANGELOG.md b/vendor/github.com/chzyer/readline/CHANGELOG.md deleted file mode 100644 index 5c1811a7fd4..00000000000 --- a/vendor/github.com/chzyer/readline/CHANGELOG.md +++ /dev/null @@ -1,58 +0,0 @@ -# ChangeLog - -### 1.4 - 2016-07-25 - -* [#60][60] Support dynamic autocompletion -* Fix ANSI parser on Windows -* Fix wrong column width in complete mode on Windows -* Remove dependent package "golang.org/x/crypto/ssh/terminal" - -### 1.3 - 2016-05-09 - -* [#38][38] add SetChildren for prefix completer interface -* [#42][42] improve multiple lines compatibility -* [#43][43] remove sub-package(runes) for gopkg compatiblity -* [#46][46] Auto complete with space prefixed line -* [#48][48] support suspend process (ctrl+Z) -* [#49][49] fix bug that check equals with previous command -* [#53][53] Fix bug which causes integer divide by zero panicking when input buffer is empty - -### 1.2 - 2016-03-05 - -* Add a demo for checking password strength [example/readline-pass-strength](https://github.com/chzyer/readline/blob/master/example/readline-pass-strength/readline-pass-strength.go), , written by [@sahib](https://github.com/sahib) -* [#23][23], support stdin remapping -* [#27][27], add a `UniqueEditLine` to `Config`, which will erase the editing line after user submited it, usually use in IM. -* Add a demo for multiline [example/readline-multiline](https://github.com/chzyer/readline/blob/master/example/readline-multiline/readline-multiline.go) which can submit one SQL by multiple lines. -* Supports performs even stdin/stdout is not a tty. -* Add a new simple apis for single instance, check by [here](https://github.com/chzyer/readline/blob/master/std.go). It need to save history manually if using this api. -* [#28][28], fixes the history is not working as expected. -* [#33][33], vim mode now support `c`, `d`, `x (delete character)`, `r (replace character)` - -### 1.1 - 2015-11-20 - -* [#12][12] Add support for key ``/``/`` -* Only enter raw mode as needed (calling `Readline()`), program will receive signal(e.g. Ctrl+C) if not interact with `readline`. -* Bugs fixed for `PrefixCompleter` -* Press `Ctrl+D` in empty line will cause `io.EOF` in error, Press `Ctrl+C` in anytime will cause `ErrInterrupt` instead of `io.EOF`, this will privodes a shell-like user experience. -* Customable Interrupt/EOF prompt in `Config` -* [#17][17] Change atomic package to use 32bit function to let it runnable on arm 32bit devices -* Provides a new password user experience(`readline.ReadPasswordEx()`). - -### 1.0 - 2015-10-14 - -* Initial public release. - -[12]: https://github.com/chzyer/readline/pull/12 -[17]: https://github.com/chzyer/readline/pull/17 -[23]: https://github.com/chzyer/readline/pull/23 -[27]: https://github.com/chzyer/readline/pull/27 -[28]: https://github.com/chzyer/readline/pull/28 -[33]: https://github.com/chzyer/readline/pull/33 -[38]: https://github.com/chzyer/readline/pull/38 -[42]: https://github.com/chzyer/readline/pull/42 -[43]: https://github.com/chzyer/readline/pull/43 -[46]: https://github.com/chzyer/readline/pull/46 -[48]: https://github.com/chzyer/readline/pull/48 -[49]: https://github.com/chzyer/readline/pull/49 -[53]: https://github.com/chzyer/readline/pull/53 -[60]: https://github.com/chzyer/readline/pull/60 diff --git a/vendor/github.com/chzyer/readline/LICENSE b/vendor/github.com/chzyer/readline/LICENSE deleted file mode 100644 index c9afab3dcd0..00000000000 --- a/vendor/github.com/chzyer/readline/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Chzyer - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/chzyer/readline/README.md b/vendor/github.com/chzyer/readline/README.md deleted file mode 100644 index fab974b7f34..00000000000 --- a/vendor/github.com/chzyer/readline/README.md +++ /dev/null @@ -1,114 +0,0 @@ -[![Build Status](https://travis-ci.org/chzyer/readline.svg?branch=master)](https://travis-ci.org/chzyer/readline) -[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.md) -[![Version](https://img.shields.io/github/tag/chzyer/readline.svg)](https://github.com/chzyer/readline/releases) -[![GoDoc](https://godoc.org/github.com/chzyer/readline?status.svg)](https://godoc.org/github.com/chzyer/readline) -[![OpenCollective](https://opencollective.com/readline/badge/backers.svg)](#backers) -[![OpenCollective](https://opencollective.com/readline/badge/sponsors.svg)](#sponsors) - -

- - - -

- -A powerful readline library in `Linux` `macOS` `Windows` `Solaris` - -## Guide - -* [Demo](example/readline-demo/readline-demo.go) -* [Shortcut](doc/shortcut.md) - -## Repos using readline - -[![cockroachdb](https://img.shields.io/github/stars/cockroachdb/cockroach.svg?label=cockroachdb/cockroach)](https://github.com/cockroachdb/cockroach) -[![robertkrimen/otto](https://img.shields.io/github/stars/robertkrimen/otto.svg?label=robertkrimen/otto)](https://github.com/robertkrimen/otto) -[![empire](https://img.shields.io/github/stars/remind101/empire.svg?label=remind101/empire)](https://github.com/remind101/empire) -[![mehrdadrad/mylg](https://img.shields.io/github/stars/mehrdadrad/mylg.svg?label=mehrdadrad/mylg)](https://github.com/mehrdadrad/mylg) -[![knq/usql](https://img.shields.io/github/stars/knq/usql.svg?label=knq/usql)](https://github.com/knq/usql) -[![youtube/doorman](https://img.shields.io/github/stars/youtube/doorman.svg?label=youtube/doorman)](https://github.com/youtube/doorman) -[![bom-d-van/harp](https://img.shields.io/github/stars/bom-d-van/harp.svg?label=bom-d-van/harp)](https://github.com/bom-d-van/harp) -[![abiosoft/ishell](https://img.shields.io/github/stars/abiosoft/ishell.svg?label=abiosoft/ishell)](https://github.com/abiosoft/ishell) -[![Netflix/hal-9001](https://img.shields.io/github/stars/Netflix/hal-9001.svg?label=Netflix/hal-9001)](https://github.com/Netflix/hal-9001) -[![docker/go-p9p](https://img.shields.io/github/stars/docker/go-p9p.svg?label=docker/go-p9p)](https://github.com/docker/go-p9p) - - -## Feedback - -If you have any questions, please submit a github issue and any pull requests is welcomed :) - -* [https://twitter.com/chzyer](https://twitter.com/chzyer) -* [http://weibo.com/2145262190](http://weibo.com/2145262190) - - -## Backers - -Love Readline? Help me keep it alive by donating funds to cover project expenses!
-[[Become a backer](https://opencollective.com/readline#backer)] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## Sponsors - -Become a sponsor and get your logo here on our Github page. [[Become a sponsor](https://opencollective.com/readline#sponsor)] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/chzyer/readline/ansi_windows.go b/vendor/github.com/chzyer/readline/ansi_windows.go deleted file mode 100644 index da106b576c5..00000000000 --- a/vendor/github.com/chzyer/readline/ansi_windows.go +++ /dev/null @@ -1,246 +0,0 @@ -// +build windows - -package readline - -import ( - "bufio" - "io" - "strconv" - "strings" - "sync" - "unicode/utf8" - "unsafe" -) - -const ( - _ = uint16(0) - COLOR_FBLUE = 0x0001 - COLOR_FGREEN = 0x0002 - COLOR_FRED = 0x0004 - COLOR_FINTENSITY = 0x0008 - - COLOR_BBLUE = 0x0010 - COLOR_BGREEN = 0x0020 - COLOR_BRED = 0x0040 - COLOR_BINTENSITY = 0x0080 - - COMMON_LVB_UNDERSCORE = 0x8000 -) - -var ColorTableFg = []word{ - 0, // 30: Black - COLOR_FRED, // 31: Red - COLOR_FGREEN, // 32: Green - COLOR_FRED | COLOR_FGREEN, // 33: Yellow - COLOR_FBLUE, // 34: Blue - COLOR_FRED | COLOR_FBLUE, // 35: Magenta - COLOR_FGREEN | COLOR_FBLUE, // 36: Cyan - COLOR_FRED | COLOR_FBLUE | COLOR_FGREEN, // 37: White -} - -var ColorTableBg = []word{ - 0, // 40: Black - COLOR_BRED, // 41: Red - COLOR_BGREEN, // 42: Green - COLOR_BRED | COLOR_BGREEN, // 43: Yellow - COLOR_BBLUE, // 44: Blue - COLOR_BRED | COLOR_BBLUE, // 45: Magenta - COLOR_BGREEN | COLOR_BBLUE, // 46: Cyan - COLOR_BRED | COLOR_BBLUE | COLOR_BGREEN, // 47: White -} - -type ANSIWriter struct { - target io.Writer - wg sync.WaitGroup - ctx *ANSIWriterCtx - sync.Mutex -} - -func NewANSIWriter(w io.Writer) *ANSIWriter { - a := &ANSIWriter{ - target: w, - ctx: NewANSIWriterCtx(w), - } - return a -} - -func (a *ANSIWriter) Close() error { - a.wg.Wait() - return nil -} - -type ANSIWriterCtx struct { - isEsc bool - isEscSeq bool - arg []string - target *bufio.Writer - wantFlush bool -} - -func NewANSIWriterCtx(target io.Writer) *ANSIWriterCtx { - return &ANSIWriterCtx{ - target: bufio.NewWriter(target), - } -} - -func (a *ANSIWriterCtx) Flush() { - a.target.Flush() -} - -func (a *ANSIWriterCtx) process(r rune) bool { - if a.wantFlush { - if r == 0 || r == CharEsc { - a.wantFlush = false - a.target.Flush() - } - } - if a.isEscSeq { - a.isEscSeq = a.ioloopEscSeq(a.target, r, &a.arg) - return true - } - - switch r { - case CharEsc: - a.isEsc = true - case '[': - if a.isEsc { - a.arg = nil - a.isEscSeq = true - a.isEsc = false - break - } - fallthrough - default: - a.target.WriteRune(r) - a.wantFlush = true - } - return true -} - -func (a *ANSIWriterCtx) ioloopEscSeq(w *bufio.Writer, r rune, argptr *[]string) bool { - arg := *argptr - var err error - - if r >= 'A' && r <= 'D' { - count := short(GetInt(arg, 1)) - info, err := GetConsoleScreenBufferInfo() - if err != nil { - return false - } - switch r { - case 'A': // up - info.dwCursorPosition.y -= count - case 'B': // down - info.dwCursorPosition.y += count - case 'C': // right - info.dwCursorPosition.x += count - case 'D': // left - info.dwCursorPosition.x -= count - } - SetConsoleCursorPosition(&info.dwCursorPosition) - return false - } - - switch r { - case 'J': - killLines() - case 'K': - eraseLine() - case 'm': - color := word(0) - for _, item := range arg { - var c int - c, err = strconv.Atoi(item) - if err != nil { - w.WriteString("[" + strings.Join(arg, ";") + "m") - break - } - if c >= 30 && c < 40 { - color ^= COLOR_FINTENSITY - color |= ColorTableFg[c-30] - } else if c >= 40 && c < 50 { - color ^= COLOR_BINTENSITY - color |= ColorTableBg[c-40] - } else if c == 4 { - color |= COMMON_LVB_UNDERSCORE | ColorTableFg[7] - } else { // unknown code treat as reset - color = ColorTableFg[7] - } - } - if err != nil { - break - } - kernel.SetConsoleTextAttribute(stdout, uintptr(color)) - case '\007': // set title - case ';': - if len(arg) == 0 || arg[len(arg)-1] != "" { - arg = append(arg, "") - *argptr = arg - } - return true - default: - if len(arg) == 0 { - arg = append(arg, "") - } - arg[len(arg)-1] += string(r) - *argptr = arg - return true - } - *argptr = nil - return false -} - -func (a *ANSIWriter) Write(b []byte) (int, error) { - a.Lock() - defer a.Unlock() - - off := 0 - for len(b) > off { - r, size := utf8.DecodeRune(b[off:]) - if size == 0 { - return off, io.ErrShortWrite - } - off += size - a.ctx.process(r) - } - a.ctx.Flush() - return off, nil -} - -func killLines() error { - sbi, err := GetConsoleScreenBufferInfo() - if err != nil { - return err - } - - size := (sbi.dwCursorPosition.y - sbi.dwSize.y) * sbi.dwSize.x - size += sbi.dwCursorPosition.x - - var written int - kernel.FillConsoleOutputAttribute(stdout, uintptr(ColorTableFg[7]), - uintptr(size), - sbi.dwCursorPosition.ptr(), - uintptr(unsafe.Pointer(&written)), - ) - return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '), - uintptr(size), - sbi.dwCursorPosition.ptr(), - uintptr(unsafe.Pointer(&written)), - ) -} - -func eraseLine() error { - sbi, err := GetConsoleScreenBufferInfo() - if err != nil { - return err - } - - size := sbi.dwSize.x - sbi.dwCursorPosition.x = 0 - var written int - return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '), - uintptr(size), - sbi.dwCursorPosition.ptr(), - uintptr(unsafe.Pointer(&written)), - ) -} diff --git a/vendor/github.com/chzyer/readline/complete.go b/vendor/github.com/chzyer/readline/complete.go deleted file mode 100644 index 349fc3d2446..00000000000 --- a/vendor/github.com/chzyer/readline/complete.go +++ /dev/null @@ -1,283 +0,0 @@ -package readline - -import ( - "bufio" - "bytes" - "fmt" - "io" -) - -type AutoCompleter interface { - // Readline will pass the whole line and current offset to it - // Completer need to pass all the candidates, and how long they shared the same characters in line - // Example: - // [go, git, git-shell, grep] - // Do("g", 1) => ["o", "it", "it-shell", "rep"], 1 - // Do("gi", 2) => ["t", "t-shell"], 2 - // Do("git", 3) => ["", "-shell"], 3 - Do(line []rune, pos int) (newLine [][]rune, length int) -} - -type TabCompleter struct{} - -func (t *TabCompleter) Do([]rune, int) ([][]rune, int) { - return [][]rune{[]rune("\t")}, 0 -} - -type opCompleter struct { - w io.Writer - op *Operation - width int - - inCompleteMode bool - inSelectMode bool - candidate [][]rune - candidateSource []rune - candidateOff int - candidateChoise int - candidateColNum int -} - -func newOpCompleter(w io.Writer, op *Operation, width int) *opCompleter { - return &opCompleter{ - w: w, - op: op, - width: width, - } -} - -func (o *opCompleter) doSelect() { - if len(o.candidate) == 1 { - o.op.buf.WriteRunes(o.candidate[0]) - o.ExitCompleteMode(false) - return - } - o.nextCandidate(1) - o.CompleteRefresh() -} - -func (o *opCompleter) nextCandidate(i int) { - o.candidateChoise += i - o.candidateChoise = o.candidateChoise % len(o.candidate) - if o.candidateChoise < 0 { - o.candidateChoise = len(o.candidate) + o.candidateChoise - } -} - -func (o *opCompleter) OnComplete() bool { - if o.width == 0 { - return false - } - if o.IsInCompleteSelectMode() { - o.doSelect() - return true - } - - buf := o.op.buf - rs := buf.Runes() - - if o.IsInCompleteMode() && o.candidateSource != nil && runes.Equal(rs, o.candidateSource) { - o.EnterCompleteSelectMode() - o.doSelect() - return true - } - - o.ExitCompleteSelectMode() - o.candidateSource = rs - newLines, offset := o.op.cfg.AutoComplete.Do(rs, buf.idx) - if len(newLines) == 0 { - o.ExitCompleteMode(false) - return true - } - - // only Aggregate candidates in non-complete mode - if !o.IsInCompleteMode() { - if len(newLines) == 1 { - buf.WriteRunes(newLines[0]) - o.ExitCompleteMode(false) - return true - } - - same, size := runes.Aggregate(newLines) - if size > 0 { - buf.WriteRunes(same) - o.ExitCompleteMode(false) - return true - } - } - - o.EnterCompleteMode(offset, newLines) - return true -} - -func (o *opCompleter) IsInCompleteSelectMode() bool { - return o.inSelectMode -} - -func (o *opCompleter) IsInCompleteMode() bool { - return o.inCompleteMode -} - -func (o *opCompleter) HandleCompleteSelect(r rune) bool { - next := true - switch r { - case CharEnter, CharCtrlJ: - next = false - o.op.buf.WriteRunes(o.op.candidate[o.op.candidateChoise]) - o.ExitCompleteMode(false) - case CharLineStart: - num := o.candidateChoise % o.candidateColNum - o.nextCandidate(-num) - case CharLineEnd: - num := o.candidateColNum - o.candidateChoise%o.candidateColNum - 1 - o.candidateChoise += num - if o.candidateChoise >= len(o.candidate) { - o.candidateChoise = len(o.candidate) - 1 - } - case CharBackspace: - o.ExitCompleteSelectMode() - next = false - case CharTab, CharForward: - o.doSelect() - case CharBell, CharInterrupt: - o.ExitCompleteMode(true) - next = false - case CharNext: - tmpChoise := o.candidateChoise + o.candidateColNum - if tmpChoise >= o.getMatrixSize() { - tmpChoise -= o.getMatrixSize() - } else if tmpChoise >= len(o.candidate) { - tmpChoise += o.candidateColNum - tmpChoise -= o.getMatrixSize() - } - o.candidateChoise = tmpChoise - case CharBackward: - o.nextCandidate(-1) - case CharPrev: - tmpChoise := o.candidateChoise - o.candidateColNum - if tmpChoise < 0 { - tmpChoise += o.getMatrixSize() - if tmpChoise >= len(o.candidate) { - tmpChoise -= o.candidateColNum - } - } - o.candidateChoise = tmpChoise - default: - next = false - o.ExitCompleteSelectMode() - } - if next { - o.CompleteRefresh() - return true - } - return false -} - -func (o *opCompleter) getMatrixSize() int { - line := len(o.candidate) / o.candidateColNum - if len(o.candidate)%o.candidateColNum != 0 { - line++ - } - return line * o.candidateColNum -} - -func (o *opCompleter) OnWidthChange(newWidth int) { - o.width = newWidth -} - -func (o *opCompleter) CompleteRefresh() { - if !o.inCompleteMode { - return - } - lineCnt := o.op.buf.CursorLineCount() - colWidth := 0 - for _, c := range o.candidate { - w := runes.WidthAll(c) - if w > colWidth { - colWidth = w - } - } - colWidth += o.candidateOff + 1 - same := o.op.buf.RuneSlice(-o.candidateOff) - - // -1 to avoid reach the end of line - width := o.width - 1 - colNum := width / colWidth - colWidth += (width - (colWidth * colNum)) / colNum - - o.candidateColNum = colNum - buf := bufio.NewWriter(o.w) - buf.Write(bytes.Repeat([]byte("\n"), lineCnt)) - - colIdx := 0 - lines := 1 - buf.WriteString("\033[J") - for idx, c := range o.candidate { - inSelect := idx == o.candidateChoise && o.IsInCompleteSelectMode() - if inSelect { - buf.WriteString("\033[30;47m") - } - buf.WriteString(string(same)) - buf.WriteString(string(c)) - buf.Write(bytes.Repeat([]byte(" "), colWidth-len(c)-len(same))) - - if inSelect { - buf.WriteString("\033[0m") - } - - colIdx++ - if colIdx == colNum { - buf.WriteString("\n") - lines++ - colIdx = 0 - } - } - - // move back - fmt.Fprintf(buf, "\033[%dA\r", lineCnt-1+lines) - fmt.Fprintf(buf, "\033[%dC", o.op.buf.idx+o.op.buf.PromptLen()) - buf.Flush() -} - -func (o *opCompleter) aggCandidate(candidate [][]rune) int { - offset := 0 - for i := 0; i < len(candidate[0]); i++ { - for j := 0; j < len(candidate)-1; j++ { - if i > len(candidate[j]) { - goto aggregate - } - if candidate[j][i] != candidate[j+1][i] { - goto aggregate - } - } - offset = i - } -aggregate: - return offset -} - -func (o *opCompleter) EnterCompleteSelectMode() { - o.inSelectMode = true - o.candidateChoise = -1 - o.CompleteRefresh() -} - -func (o *opCompleter) EnterCompleteMode(offset int, candidate [][]rune) { - o.inCompleteMode = true - o.candidate = candidate - o.candidateOff = offset - o.CompleteRefresh() -} - -func (o *opCompleter) ExitCompleteSelectMode() { - o.inSelectMode = false - o.candidate = nil - o.candidateChoise = -1 - o.candidateOff = -1 - o.candidateSource = nil -} - -func (o *opCompleter) ExitCompleteMode(revent bool) { - o.inCompleteMode = false - o.ExitCompleteSelectMode() -} diff --git a/vendor/github.com/chzyer/readline/complete_helper.go b/vendor/github.com/chzyer/readline/complete_helper.go deleted file mode 100644 index 58d724872bf..00000000000 --- a/vendor/github.com/chzyer/readline/complete_helper.go +++ /dev/null @@ -1,165 +0,0 @@ -package readline - -import ( - "bytes" - "strings" -) - -// Caller type for dynamic completion -type DynamicCompleteFunc func(string) []string - -type PrefixCompleterInterface interface { - Print(prefix string, level int, buf *bytes.Buffer) - Do(line []rune, pos int) (newLine [][]rune, length int) - GetName() []rune - GetChildren() []PrefixCompleterInterface - SetChildren(children []PrefixCompleterInterface) -} - -type DynamicPrefixCompleterInterface interface { - PrefixCompleterInterface - IsDynamic() bool - GetDynamicNames(line []rune) [][]rune -} - -type PrefixCompleter struct { - Name []rune - Dynamic bool - Callback DynamicCompleteFunc - Children []PrefixCompleterInterface -} - -func (p *PrefixCompleter) Tree(prefix string) string { - buf := bytes.NewBuffer(nil) - p.Print(prefix, 0, buf) - return buf.String() -} - -func Print(p PrefixCompleterInterface, prefix string, level int, buf *bytes.Buffer) { - if strings.TrimSpace(string(p.GetName())) != "" { - buf.WriteString(prefix) - if level > 0 { - buf.WriteString("├") - buf.WriteString(strings.Repeat("─", (level*4)-2)) - buf.WriteString(" ") - } - buf.WriteString(string(p.GetName()) + "\n") - level++ - } - for _, ch := range p.GetChildren() { - ch.Print(prefix, level, buf) - } -} - -func (p *PrefixCompleter) Print(prefix string, level int, buf *bytes.Buffer) { - Print(p, prefix, level, buf) -} - -func (p *PrefixCompleter) IsDynamic() bool { - return p.Dynamic -} - -func (p *PrefixCompleter) GetName() []rune { - return p.Name -} - -func (p *PrefixCompleter) GetDynamicNames(line []rune) [][]rune { - var names = [][]rune{} - for _, name := range p.Callback(string(line)) { - names = append(names, []rune(name+" ")) - } - return names -} - -func (p *PrefixCompleter) GetChildren() []PrefixCompleterInterface { - return p.Children -} - -func (p *PrefixCompleter) SetChildren(children []PrefixCompleterInterface) { - p.Children = children -} - -func NewPrefixCompleter(pc ...PrefixCompleterInterface) *PrefixCompleter { - return PcItem("", pc...) -} - -func PcItem(name string, pc ...PrefixCompleterInterface) *PrefixCompleter { - name += " " - return &PrefixCompleter{ - Name: []rune(name), - Dynamic: false, - Children: pc, - } -} - -func PcItemDynamic(callback DynamicCompleteFunc, pc ...PrefixCompleterInterface) *PrefixCompleter { - return &PrefixCompleter{ - Callback: callback, - Dynamic: true, - Children: pc, - } -} - -func (p *PrefixCompleter) Do(line []rune, pos int) (newLine [][]rune, offset int) { - return doInternal(p, line, pos, line) -} - -func Do(p PrefixCompleterInterface, line []rune, pos int) (newLine [][]rune, offset int) { - return doInternal(p, line, pos, line) -} - -func doInternal(p PrefixCompleterInterface, line []rune, pos int, origLine []rune) (newLine [][]rune, offset int) { - line = runes.TrimSpaceLeft(line[:pos]) - goNext := false - var lineCompleter PrefixCompleterInterface - for _, child := range p.GetChildren() { - childNames := make([][]rune, 1) - - childDynamic, ok := child.(DynamicPrefixCompleterInterface) - if ok && childDynamic.IsDynamic() { - childNames = childDynamic.GetDynamicNames(origLine) - } else { - childNames[0] = child.GetName() - } - - for _, childName := range childNames { - if len(line) >= len(childName) { - if runes.HasPrefix(line, childName) { - if len(line) == len(childName) { - newLine = append(newLine, []rune{' '}) - } else { - newLine = append(newLine, childName) - } - offset = len(childName) - lineCompleter = child - goNext = true - } - } else { - if runes.HasPrefix(childName, line) { - newLine = append(newLine, childName[len(line):]) - offset = len(line) - lineCompleter = child - } - } - } - } - - if len(newLine) != 1 { - return - } - - tmpLine := make([]rune, 0, len(line)) - for i := offset; i < len(line); i++ { - if line[i] == ' ' { - continue - } - - tmpLine = append(tmpLine, line[i:]...) - return doInternal(lineCompleter, tmpLine, len(tmpLine), origLine) - } - - if goNext { - return doInternal(lineCompleter, nil, 0, origLine) - } - return -} diff --git a/vendor/github.com/chzyer/readline/complete_segment.go b/vendor/github.com/chzyer/readline/complete_segment.go deleted file mode 100644 index 5ceadd80f97..00000000000 --- a/vendor/github.com/chzyer/readline/complete_segment.go +++ /dev/null @@ -1,82 +0,0 @@ -package readline - -type SegmentCompleter interface { - // a - // |- a1 - // |--- a11 - // |- a2 - // b - // input: - // DoTree([], 0) [a, b] - // DoTree([a], 1) [a] - // DoTree([a, ], 0) [a1, a2] - // DoTree([a, a], 1) [a1, a2] - // DoTree([a, a1], 2) [a1] - // DoTree([a, a1, ], 0) [a11] - // DoTree([a, a1, a], 1) [a11] - DoSegment([][]rune, int) [][]rune -} - -type dumpSegmentCompleter struct { - f func([][]rune, int) [][]rune -} - -func (d *dumpSegmentCompleter) DoSegment(segment [][]rune, n int) [][]rune { - return d.f(segment, n) -} - -func SegmentFunc(f func([][]rune, int) [][]rune) AutoCompleter { - return &SegmentComplete{&dumpSegmentCompleter{f}} -} - -func SegmentAutoComplete(completer SegmentCompleter) *SegmentComplete { - return &SegmentComplete{ - SegmentCompleter: completer, - } -} - -type SegmentComplete struct { - SegmentCompleter -} - -func RetSegment(segments [][]rune, cands [][]rune, idx int) ([][]rune, int) { - ret := make([][]rune, 0, len(cands)) - lastSegment := segments[len(segments)-1] - for _, cand := range cands { - if !runes.HasPrefix(cand, lastSegment) { - continue - } - ret = append(ret, cand[len(lastSegment):]) - } - return ret, idx -} - -func SplitSegment(line []rune, pos int) ([][]rune, int) { - segs := [][]rune{} - lastIdx := -1 - line = line[:pos] - pos = 0 - for idx, l := range line { - if l == ' ' { - pos = 0 - segs = append(segs, line[lastIdx+1:idx]) - lastIdx = idx - } else { - pos++ - } - } - segs = append(segs, line[lastIdx+1:]) - return segs, pos -} - -func (c *SegmentComplete) Do(line []rune, pos int) (newLine [][]rune, offset int) { - - segment, idx := SplitSegment(line, pos) - - cands := c.DoSegment(segment, idx) - newLine, offset = RetSegment(segment, cands, idx) - for idx := range newLine { - newLine[idx] = append(newLine[idx], ' ') - } - return newLine, offset -} diff --git a/vendor/github.com/chzyer/readline/history.go b/vendor/github.com/chzyer/readline/history.go deleted file mode 100644 index b154aedd530..00000000000 --- a/vendor/github.com/chzyer/readline/history.go +++ /dev/null @@ -1,312 +0,0 @@ -package readline - -import ( - "bufio" - "container/list" - "fmt" - "os" - "strings" - "sync" -) - -type hisItem struct { - Source []rune - Version int64 - Tmp []rune -} - -func (h *hisItem) Clean() { - h.Source = nil - h.Tmp = nil -} - -type opHistory struct { - cfg *Config - history *list.List - historyVer int64 - current *list.Element - fd *os.File - fdLock sync.Mutex -} - -func newOpHistory(cfg *Config) (o *opHistory) { - o = &opHistory{ - cfg: cfg, - history: list.New(), - } - return o -} - -func (o *opHistory) Reset() { - o.history = list.New() - o.current = nil -} - -func (o *opHistory) IsHistoryClosed() bool { - o.fdLock.Lock() - defer o.fdLock.Unlock() - return o.fd.Fd() == ^(uintptr(0)) -} - -func (o *opHistory) Init() { - if o.IsHistoryClosed() { - o.initHistory() - } -} - -func (o *opHistory) initHistory() { - if o.cfg.HistoryFile != "" { - o.historyUpdatePath(o.cfg.HistoryFile) - } -} - -// only called by newOpHistory -func (o *opHistory) historyUpdatePath(path string) { - o.fdLock.Lock() - defer o.fdLock.Unlock() - f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) - if err != nil { - return - } - o.fd = f - r := bufio.NewReader(o.fd) - total := 0 - for ; ; total++ { - line, err := r.ReadString('\n') - if err != nil { - break - } - // ignore the empty line - line = strings.TrimSpace(line) - if len(line) == 0 { - continue - } - o.Push([]rune(line)) - o.Compact() - } - if total > o.cfg.HistoryLimit { - o.rewriteLocked() - } - o.historyVer++ - o.Push(nil) - return -} - -func (o *opHistory) Compact() { - for o.history.Len() > o.cfg.HistoryLimit && o.history.Len() > 0 { - o.history.Remove(o.history.Front()) - } -} - -func (o *opHistory) Rewrite() { - o.fdLock.Lock() - defer o.fdLock.Unlock() - o.rewriteLocked() -} - -func (o *opHistory) rewriteLocked() { - if o.cfg.HistoryFile == "" { - return - } - - tmpFile := o.cfg.HistoryFile + ".tmp" - fd, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_APPEND, 0666) - if err != nil { - return - } - - buf := bufio.NewWriter(fd) - for elem := o.history.Front(); elem != nil; elem = elem.Next() { - buf.WriteString(string(elem.Value.(*hisItem).Source)) - } - buf.Flush() - - // replace history file - if err = os.Rename(tmpFile, o.cfg.HistoryFile); err != nil { - fd.Close() - return - } - - if o.fd != nil { - o.fd.Close() - } - // fd is write only, just satisfy what we need. - o.fd = fd -} - -func (o *opHistory) Close() { - o.fdLock.Lock() - defer o.fdLock.Unlock() - if o.fd != nil { - o.fd.Close() - } -} - -func (o *opHistory) FindBck(isNewSearch bool, rs []rune, start int) (int, *list.Element) { - for elem := o.current; elem != nil; elem = elem.Prev() { - item := o.showItem(elem.Value) - if isNewSearch { - start += len(rs) - } - if elem == o.current { - if len(item) >= start { - item = item[:start] - } - } - idx := runes.IndexAllBckEx(item, rs, o.cfg.HistorySearchFold) - if idx < 0 { - continue - } - return idx, elem - } - return -1, nil -} - -func (o *opHistory) FindFwd(isNewSearch bool, rs []rune, start int) (int, *list.Element) { - for elem := o.current; elem != nil; elem = elem.Next() { - item := o.showItem(elem.Value) - if isNewSearch { - start -= len(rs) - if start < 0 { - start = 0 - } - } - if elem == o.current { - if len(item)-1 >= start { - item = item[start:] - } else { - continue - } - } - idx := runes.IndexAllEx(item, rs, o.cfg.HistorySearchFold) - if idx < 0 { - continue - } - if elem == o.current { - idx += start - } - return idx, elem - } - return -1, nil -} - -func (o *opHistory) showItem(obj interface{}) []rune { - item := obj.(*hisItem) - if item.Version == o.historyVer { - return item.Tmp - } - return item.Source -} - -func (o *opHistory) Prev() []rune { - if o.current == nil { - return nil - } - current := o.current.Prev() - if current == nil { - return nil - } - o.current = current - return runes.Copy(o.showItem(current.Value)) -} - -func (o *opHistory) Next() ([]rune, bool) { - if o.current == nil { - return nil, false - } - current := o.current.Next() - if current == nil { - return nil, false - } - - o.current = current - return runes.Copy(o.showItem(current.Value)), true -} - -func (o *opHistory) debug() { - Debug("-------") - for item := o.history.Front(); item != nil; item = item.Next() { - Debug(fmt.Sprintf("%+v", item.Value)) - } -} - -// save history -func (o *opHistory) New(current []rune) (err error) { - current = runes.Copy(current) - - // if just use last command without modify - // just clean lastest history - if back := o.history.Back(); back != nil { - prev := back.Prev() - if prev != nil { - if runes.Equal(current, prev.Value.(*hisItem).Source) { - o.current = o.history.Back() - o.current.Value.(*hisItem).Clean() - o.historyVer++ - return nil - } - } - } - - if len(current) == 0 { - o.current = o.history.Back() - if o.current != nil { - o.current.Value.(*hisItem).Clean() - o.historyVer++ - return nil - } - } - - if o.current != o.history.Back() { - // move history item to current command - currentItem := o.current.Value.(*hisItem) - // set current to last item - o.current = o.history.Back() - - current = runes.Copy(currentItem.Tmp) - } - - // err only can be a IO error, just report - err = o.Update(current, true) - - // push a new one to commit current command - o.historyVer++ - o.Push(nil) - return -} - -func (o *opHistory) Revert() { - o.historyVer++ - o.current = o.history.Back() -} - -func (o *opHistory) Update(s []rune, commit bool) (err error) { - o.fdLock.Lock() - defer o.fdLock.Unlock() - s = runes.Copy(s) - if o.current == nil { - o.Push(s) - o.Compact() - return - } - r := o.current.Value.(*hisItem) - r.Version = o.historyVer - if commit { - r.Source = s - if o.fd != nil { - // just report the error - _, err = o.fd.Write([]byte(string(r.Source) + "\n")) - } - } else { - r.Tmp = append(r.Tmp[:0], s...) - } - o.current.Value = r - o.Compact() - return -} - -func (o *opHistory) Push(s []rune) { - s = runes.Copy(s) - elem := o.history.PushBack(&hisItem{Source: s}) - o.current = elem -} diff --git a/vendor/github.com/chzyer/readline/operation.go b/vendor/github.com/chzyer/readline/operation.go deleted file mode 100644 index 2c93561d2c4..00000000000 --- a/vendor/github.com/chzyer/readline/operation.go +++ /dev/null @@ -1,504 +0,0 @@ -package readline - -import ( - "errors" - "io" -) - -var ( - ErrInterrupt = errors.New("Interrupt") -) - -type InterruptError struct { - Line []rune -} - -func (*InterruptError) Error() string { - return "Interrupted" -} - -type Operation struct { - cfg *Config - t *Terminal - buf *RuneBuffer - outchan chan []rune - errchan chan error - w io.Writer - - history *opHistory - *opSearch - *opCompleter - *opPassword - *opVim -} - -func (o *Operation) SetBuffer(what string) { - o.buf.Set([]rune(what)) -} - -type wrapWriter struct { - r *Operation - t *Terminal - target io.Writer -} - -func (w *wrapWriter) Write(b []byte) (int, error) { - if !w.t.IsReading() { - return w.target.Write(b) - } - - var ( - n int - err error - ) - w.r.buf.Refresh(func() { - n, err = w.target.Write(b) - }) - - if w.r.IsSearchMode() { - w.r.SearchRefresh(-1) - } - if w.r.IsInCompleteMode() { - w.r.CompleteRefresh() - } - return n, err -} - -func NewOperation(t *Terminal, cfg *Config) *Operation { - width := cfg.FuncGetWidth() - op := &Operation{ - t: t, - buf: NewRuneBuffer(t, cfg.Prompt, cfg, width), - outchan: make(chan []rune), - errchan: make(chan error), - } - op.w = op.buf.w - op.SetConfig(cfg) - op.opVim = newVimMode(op) - op.opCompleter = newOpCompleter(op.buf.w, op, width) - op.opPassword = newOpPassword(op) - op.cfg.FuncOnWidthChanged(func() { - newWidth := cfg.FuncGetWidth() - op.opCompleter.OnWidthChange(newWidth) - op.opSearch.OnWidthChange(newWidth) - op.buf.OnWidthChange(newWidth) - }) - go op.ioloop() - return op -} - -func (o *Operation) SetPrompt(s string) { - o.buf.SetPrompt(s) -} - -func (o *Operation) SetMaskRune(r rune) { - o.buf.SetMask(r) -} - -func (o *Operation) ioloop() { - for { - keepInSearchMode := false - keepInCompleteMode := false - r := o.t.ReadRune() - if o.cfg.FuncFilterInputRune != nil { - var process bool - r, process = o.cfg.FuncFilterInputRune(r) - if !process { - o.buf.Refresh(nil) // to refresh the line - continue // ignore this rune - } - } - - if r == 0 { // io.EOF - if o.buf.Len() == 0 { - o.buf.Clean() - select { - case o.errchan <- io.EOF: - } - break - } else { - // if stdin got io.EOF and there is something left in buffer, - // let's flush them by sending CharEnter. - // And we will got io.EOF int next loop. - r = CharEnter - } - } - isUpdateHistory := true - - if o.IsInCompleteSelectMode() { - keepInCompleteMode = o.HandleCompleteSelect(r) - if keepInCompleteMode { - continue - } - - o.buf.Refresh(nil) - switch r { - case CharEnter, CharCtrlJ: - o.history.Update(o.buf.Runes(), false) - fallthrough - case CharInterrupt: - o.t.KickRead() - fallthrough - case CharBell: - continue - } - } - - if o.IsEnableVimMode() { - r = o.HandleVim(r, o.t.ReadRune) - if r == 0 { - continue - } - } - - switch r { - case CharBell: - if o.IsSearchMode() { - o.ExitSearchMode(true) - o.buf.Refresh(nil) - } - if o.IsInCompleteMode() { - o.ExitCompleteMode(true) - o.buf.Refresh(nil) - } - case CharTab: - if o.cfg.AutoComplete == nil { - o.t.Bell() - break - } - if o.OnComplete() { - keepInCompleteMode = true - } else { - o.t.Bell() - break - } - - case CharBckSearch: - if !o.SearchMode(S_DIR_BCK) { - o.t.Bell() - break - } - keepInSearchMode = true - case CharCtrlU: - o.buf.KillFront() - case CharFwdSearch: - if !o.SearchMode(S_DIR_FWD) { - o.t.Bell() - break - } - keepInSearchMode = true - case CharKill: - o.buf.Kill() - keepInCompleteMode = true - case MetaForward: - o.buf.MoveToNextWord() - case CharTranspose: - o.buf.Transpose() - case MetaBackward: - o.buf.MoveToPrevWord() - case MetaDelete: - o.buf.DeleteWord() - case CharLineStart: - o.buf.MoveToLineStart() - case CharLineEnd: - o.buf.MoveToLineEnd() - case CharBackspace, CharCtrlH: - if o.IsSearchMode() { - o.SearchBackspace() - keepInSearchMode = true - break - } - - if o.buf.Len() == 0 { - o.t.Bell() - break - } - o.buf.Backspace() - if o.IsInCompleteMode() { - o.OnComplete() - } - case CharCtrlZ: - o.buf.Clean() - o.t.SleepToResume() - o.Refresh() - case CharCtrlL: - ClearScreen(o.w) - o.Refresh() - case MetaBackspace, CharCtrlW: - o.buf.BackEscapeWord() - case CharEnter, CharCtrlJ: - if o.IsSearchMode() { - o.ExitSearchMode(false) - } - o.buf.MoveToLineEnd() - var data []rune - if !o.cfg.UniqueEditLine { - o.buf.WriteRune('\n') - data = o.buf.Reset() - data = data[:len(data)-1] // trim \n - } else { - o.buf.Clean() - data = o.buf.Reset() - } - o.outchan <- data - if !o.cfg.DisableAutoSaveHistory { - // ignore IO error - _ = o.history.New(data) - } else { - isUpdateHistory = false - } - case CharBackward: - o.buf.MoveBackward() - case CharForward: - o.buf.MoveForward() - case CharPrev: - buf := o.history.Prev() - if buf != nil { - o.buf.Set(buf) - } else { - o.t.Bell() - } - case CharNext: - buf, ok := o.history.Next() - if ok { - o.buf.Set(buf) - } else { - o.t.Bell() - } - case CharDelete: - if o.buf.Len() > 0 || !o.IsNormalMode() { - o.t.KickRead() - if !o.buf.Delete() { - o.t.Bell() - } - break - } - - // treat as EOF - if !o.cfg.UniqueEditLine { - o.buf.WriteString(o.cfg.EOFPrompt + "\n") - } - o.buf.Reset() - isUpdateHistory = false - o.history.Revert() - o.errchan <- io.EOF - if o.cfg.UniqueEditLine { - o.buf.Clean() - } - case CharInterrupt: - if o.IsSearchMode() { - o.t.KickRead() - o.ExitSearchMode(true) - break - } - if o.IsInCompleteMode() { - o.t.KickRead() - o.ExitCompleteMode(true) - o.buf.Refresh(nil) - break - } - o.buf.MoveToLineEnd() - o.buf.Refresh(nil) - hint := o.cfg.InterruptPrompt + "\n" - if !o.cfg.UniqueEditLine { - o.buf.WriteString(hint) - } - remain := o.buf.Reset() - if !o.cfg.UniqueEditLine { - remain = remain[:len(remain)-len([]rune(hint))] - } - isUpdateHistory = false - o.history.Revert() - o.errchan <- &InterruptError{remain} - default: - if o.IsSearchMode() { - o.SearchChar(r) - keepInSearchMode = true - break - } - o.buf.WriteRune(r) - if o.IsInCompleteMode() { - o.OnComplete() - keepInCompleteMode = true - } - } - - if o.cfg.Listener != nil { - newLine, newPos, ok := o.cfg.Listener.OnChange(o.buf.Runes(), o.buf.Pos(), r) - if ok { - o.buf.SetWithIdx(newPos, newLine) - } - } - - if !keepInSearchMode && o.IsSearchMode() { - o.ExitSearchMode(false) - o.buf.Refresh(nil) - } else if o.IsInCompleteMode() { - if !keepInCompleteMode { - o.ExitCompleteMode(false) - o.Refresh() - } else { - o.buf.Refresh(nil) - o.CompleteRefresh() - } - } - if isUpdateHistory && !o.IsSearchMode() { - // it will cause null history - o.history.Update(o.buf.Runes(), false) - } - } -} - -func (o *Operation) Stderr() io.Writer { - return &wrapWriter{target: o.cfg.Stderr, r: o, t: o.t} -} - -func (o *Operation) Stdout() io.Writer { - return &wrapWriter{target: o.cfg.Stdout, r: o, t: o.t} -} - -func (o *Operation) String() (string, error) { - r, err := o.Runes() - return string(r), err -} - -func (o *Operation) Runes() ([]rune, error) { - o.t.EnterRawMode() - defer o.t.ExitRawMode() - - if o.cfg.Listener != nil { - o.cfg.Listener.OnChange(nil, 0, 0) - } - - o.buf.Refresh(nil) // print prompt - o.t.KickRead() - select { - case r := <-o.outchan: - return r, nil - case err := <-o.errchan: - if e, ok := err.(*InterruptError); ok { - return e.Line, ErrInterrupt - } - return nil, err - } -} - -func (o *Operation) PasswordEx(prompt string, l Listener) ([]byte, error) { - cfg := o.GenPasswordConfig() - cfg.Prompt = prompt - cfg.Listener = l - return o.PasswordWithConfig(cfg) -} - -func (o *Operation) GenPasswordConfig() *Config { - return o.opPassword.PasswordConfig() -} - -func (o *Operation) PasswordWithConfig(cfg *Config) ([]byte, error) { - if err := o.opPassword.EnterPasswordMode(cfg); err != nil { - return nil, err - } - defer o.opPassword.ExitPasswordMode() - return o.Slice() -} - -func (o *Operation) Password(prompt string) ([]byte, error) { - return o.PasswordEx(prompt, nil) -} - -func (o *Operation) SetTitle(t string) { - o.w.Write([]byte("\033[2;" + t + "\007")) -} - -func (o *Operation) Slice() ([]byte, error) { - r, err := o.Runes() - if err != nil { - return nil, err - } - return []byte(string(r)), nil -} - -func (o *Operation) Close() { - o.history.Close() -} - -func (o *Operation) SetHistoryPath(path string) { - if o.history != nil { - o.history.Close() - } - o.cfg.HistoryFile = path - o.history = newOpHistory(o.cfg) -} - -func (o *Operation) IsNormalMode() bool { - return !o.IsInCompleteMode() && !o.IsSearchMode() -} - -func (op *Operation) SetConfig(cfg *Config) (*Config, error) { - if op.cfg == cfg { - return op.cfg, nil - } - if err := cfg.Init(); err != nil { - return op.cfg, err - } - old := op.cfg - op.cfg = cfg - op.SetPrompt(cfg.Prompt) - op.SetMaskRune(cfg.MaskRune) - op.buf.SetConfig(cfg) - width := op.cfg.FuncGetWidth() - - if cfg.opHistory == nil { - op.SetHistoryPath(cfg.HistoryFile) - cfg.opHistory = op.history - cfg.opSearch = newOpSearch(op.buf.w, op.buf, op.history, cfg, width) - } - op.history = cfg.opHistory - - // SetHistoryPath will close opHistory which already exists - // so if we use it next time, we need to reopen it by `InitHistory()` - op.history.Init() - - if op.cfg.AutoComplete != nil { - op.opCompleter = newOpCompleter(op.buf.w, op, width) - } - - op.opSearch = cfg.opSearch - return old, nil -} - -func (o *Operation) ResetHistory() { - o.history.Reset() -} - -// if err is not nil, it just mean it fail to write to file -// other things goes fine. -func (o *Operation) SaveHistory(content string) error { - return o.history.New([]rune(content)) -} - -func (o *Operation) Refresh() { - if o.t.IsReading() { - o.buf.Refresh(nil) - } -} - -func (o *Operation) Clean() { - o.buf.Clean() -} - -func FuncListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) Listener { - return &DumpListener{f: f} -} - -type DumpListener struct { - f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) -} - -func (d *DumpListener) OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) { - return d.f(line, pos, key) -} - -type Listener interface { - OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) -} diff --git a/vendor/github.com/chzyer/readline/password.go b/vendor/github.com/chzyer/readline/password.go deleted file mode 100644 index 4b073795453..00000000000 --- a/vendor/github.com/chzyer/readline/password.go +++ /dev/null @@ -1,32 +0,0 @@ -package readline - -type opPassword struct { - o *Operation - backupCfg *Config -} - -func newOpPassword(o *Operation) *opPassword { - return &opPassword{o: o} -} - -func (o *opPassword) ExitPasswordMode() { - o.o.SetConfig(o.backupCfg) - o.backupCfg = nil -} - -func (o *opPassword) EnterPasswordMode(cfg *Config) (err error) { - o.backupCfg, err = o.o.SetConfig(cfg) - return -} - -func (o *opPassword) PasswordConfig() *Config { - return &Config{ - EnableMask: true, - InterruptPrompt: "\n", - EOFPrompt: "\n", - HistoryLimit: -1, - - Stdout: o.o.cfg.Stdout, - Stderr: o.o.cfg.Stderr, - } -} diff --git a/vendor/github.com/chzyer/readline/rawreader_windows.go b/vendor/github.com/chzyer/readline/rawreader_windows.go deleted file mode 100644 index 073ef150a59..00000000000 --- a/vendor/github.com/chzyer/readline/rawreader_windows.go +++ /dev/null @@ -1,125 +0,0 @@ -// +build windows - -package readline - -import "unsafe" - -const ( - VK_CANCEL = 0x03 - VK_BACK = 0x08 - VK_TAB = 0x09 - VK_RETURN = 0x0D - VK_SHIFT = 0x10 - VK_CONTROL = 0x11 - VK_MENU = 0x12 - VK_ESCAPE = 0x1B - VK_LEFT = 0x25 - VK_UP = 0x26 - VK_RIGHT = 0x27 - VK_DOWN = 0x28 - VK_DELETE = 0x2E - VK_LSHIFT = 0xA0 - VK_RSHIFT = 0xA1 - VK_LCONTROL = 0xA2 - VK_RCONTROL = 0xA3 -) - -// RawReader translate input record to ANSI escape sequence. -// To provides same behavior as unix terminal. -type RawReader struct { - ctrlKey bool - altKey bool -} - -func NewRawReader() *RawReader { - r := new(RawReader) - return r -} - -// only process one action in one read -func (r *RawReader) Read(buf []byte) (int, error) { - ir := new(_INPUT_RECORD) - var read int - var err error -next: - err = kernel.ReadConsoleInputW(stdin, - uintptr(unsafe.Pointer(ir)), - 1, - uintptr(unsafe.Pointer(&read)), - ) - if err != nil { - return 0, err - } - if ir.EventType != EVENT_KEY { - goto next - } - ker := (*_KEY_EVENT_RECORD)(unsafe.Pointer(&ir.Event[0])) - if ker.bKeyDown == 0 { // keyup - if r.ctrlKey || r.altKey { - switch ker.wVirtualKeyCode { - case VK_RCONTROL, VK_LCONTROL: - r.ctrlKey = false - case VK_MENU: //alt - r.altKey = false - } - } - goto next - } - - if ker.unicodeChar == 0 { - var target rune - switch ker.wVirtualKeyCode { - case VK_RCONTROL, VK_LCONTROL: - r.ctrlKey = true - case VK_MENU: //alt - r.altKey = true - case VK_LEFT: - target = CharBackward - case VK_RIGHT: - target = CharForward - case VK_UP: - target = CharPrev - case VK_DOWN: - target = CharNext - } - if target != 0 { - return r.write(buf, target) - } - goto next - } - char := rune(ker.unicodeChar) - if r.ctrlKey { - switch char { - case 'A': - char = CharLineStart - case 'E': - char = CharLineEnd - case 'R': - char = CharBckSearch - case 'S': - char = CharFwdSearch - } - } else if r.altKey { - switch char { - case VK_BACK: - char = CharBackspace - } - return r.writeEsc(buf, char) - } - return r.write(buf, char) -} - -func (r *RawReader) writeEsc(b []byte, char rune) (int, error) { - b[0] = '\033' - n := copy(b[1:], []byte(string(char))) - return n + 1, nil -} - -func (r *RawReader) write(b []byte, char rune) (int, error) { - n := copy(b, []byte(string(char))) - return n, nil -} - -func (r *RawReader) Close() error { - return nil -} diff --git a/vendor/github.com/chzyer/readline/readline.go b/vendor/github.com/chzyer/readline/readline.go deleted file mode 100644 index b0242f77ff8..00000000000 --- a/vendor/github.com/chzyer/readline/readline.go +++ /dev/null @@ -1,288 +0,0 @@ -// Readline is a pure go implementation for GNU-Readline kind library. -// -// example: -// rl, err := readline.New("> ") -// if err != nil { -// panic(err) -// } -// defer rl.Close() -// -// for { -// line, err := rl.Readline() -// if err != nil { // io.EOF -// break -// } -// println(line) -// } -// -package readline - -import "io" - -type Instance struct { - Config *Config - Terminal *Terminal - Operation *Operation -} - -type Config struct { - // prompt supports ANSI escape sequence, so we can color some characters even in windows - Prompt string - - // readline will persist historys to file where HistoryFile specified - HistoryFile string - // specify the max length of historys, it's 500 by default, set it to -1 to disable history - HistoryLimit int - DisableAutoSaveHistory bool - // enable case-insensitive history searching - HistorySearchFold bool - - // AutoCompleter will called once user press TAB - AutoComplete AutoCompleter - - // Any key press will pass to Listener - // NOTE: Listener will be triggered by (nil, 0, 0) immediately - Listener Listener - - // If VimMode is true, readline will in vim.insert mode by default - VimMode bool - - InterruptPrompt string - EOFPrompt string - - FuncGetWidth func() int - - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer - - EnableMask bool - MaskRune rune - - // erase the editing line after user submited it - // it use in IM usually. - UniqueEditLine bool - - // filter input runes (may be used to disable CtrlZ or for translating some keys to different actions) - // -> output = new (translated) rune and true/false if continue with processing this one - FuncFilterInputRune func(rune) (rune, bool) - - // force use interactive even stdout is not a tty - FuncIsTerminal func() bool - FuncMakeRaw func() error - FuncExitRaw func() error - FuncOnWidthChanged func(func()) - ForceUseInteractive bool - - // private fields - inited bool - opHistory *opHistory - opSearch *opSearch -} - -func (c *Config) useInteractive() bool { - if c.ForceUseInteractive { - return true - } - return c.FuncIsTerminal() -} - -func (c *Config) Init() error { - if c.inited { - return nil - } - c.inited = true - if c.Stdin == nil { - c.Stdin = NewCancelableStdin(Stdin) - } - if c.Stdout == nil { - c.Stdout = Stdout - } - if c.Stderr == nil { - c.Stderr = Stderr - } - if c.HistoryLimit == 0 { - c.HistoryLimit = 500 - } - - if c.InterruptPrompt == "" { - c.InterruptPrompt = "^C" - } else if c.InterruptPrompt == "\n" { - c.InterruptPrompt = "" - } - if c.EOFPrompt == "" { - c.EOFPrompt = "^D" - } else if c.EOFPrompt == "\n" { - c.EOFPrompt = "" - } - - if c.AutoComplete == nil { - c.AutoComplete = &TabCompleter{} - } - if c.FuncGetWidth == nil { - c.FuncGetWidth = GetScreenWidth - } - if c.FuncIsTerminal == nil { - c.FuncIsTerminal = DefaultIsTerminal - } - rm := new(RawMode) - if c.FuncMakeRaw == nil { - c.FuncMakeRaw = rm.Enter - } - if c.FuncExitRaw == nil { - c.FuncExitRaw = rm.Exit - } - if c.FuncOnWidthChanged == nil { - c.FuncOnWidthChanged = DefaultOnWidthChanged - } - - return nil -} - -func (c Config) Clone() *Config { - c.opHistory = nil - c.opSearch = nil - return &c -} - -func (c *Config) SetListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) { - c.Listener = FuncListener(f) -} - -func NewEx(cfg *Config) (*Instance, error) { - t, err := NewTerminal(cfg) - if err != nil { - return nil, err - } - rl := t.Readline() - return &Instance{ - Config: cfg, - Terminal: t, - Operation: rl, - }, nil -} - -func New(prompt string) (*Instance, error) { - return NewEx(&Config{Prompt: prompt}) -} - -func (i *Instance) ResetHistory() { - i.Operation.ResetHistory() -} - -func (i *Instance) SetPrompt(s string) { - i.Operation.SetPrompt(s) -} - -func (i *Instance) SetMaskRune(r rune) { - i.Operation.SetMaskRune(r) -} - -// change history persistence in runtime -func (i *Instance) SetHistoryPath(p string) { - i.Operation.SetHistoryPath(p) -} - -// readline will refresh automatic when write through Stdout() -func (i *Instance) Stdout() io.Writer { - return i.Operation.Stdout() -} - -// readline will refresh automatic when write through Stdout() -func (i *Instance) Stderr() io.Writer { - return i.Operation.Stderr() -} - -// switch VimMode in runtime -func (i *Instance) SetVimMode(on bool) { - i.Operation.SetVimMode(on) -} - -func (i *Instance) IsVimMode() bool { - return i.Operation.IsEnableVimMode() -} - -func (i *Instance) GenPasswordConfig() *Config { - return i.Operation.GenPasswordConfig() -} - -// we can generate a config by `i.GenPasswordConfig()` -func (i *Instance) ReadPasswordWithConfig(cfg *Config) ([]byte, error) { - return i.Operation.PasswordWithConfig(cfg) -} - -func (i *Instance) ReadPasswordEx(prompt string, l Listener) ([]byte, error) { - return i.Operation.PasswordEx(prompt, l) -} - -func (i *Instance) ReadPassword(prompt string) ([]byte, error) { - return i.Operation.Password(prompt) -} - -type Result struct { - Line string - Error error -} - -func (l *Result) CanContinue() bool { - return len(l.Line) != 0 && l.Error == ErrInterrupt -} - -func (l *Result) CanBreak() bool { - return !l.CanContinue() && l.Error != nil -} - -func (i *Instance) Line() *Result { - ret, err := i.Readline() - return &Result{ret, err} -} - -// err is one of (nil, io.EOF, readline.ErrInterrupt) -func (i *Instance) Readline() (string, error) { - return i.Operation.String() -} - -func (i *Instance) ReadlineWithDefault(what string) (string, error) { - i.Operation.SetBuffer(what) - return i.Operation.String() -} - -func (i *Instance) SaveHistory(content string) error { - return i.Operation.SaveHistory(content) -} - -// same as readline -func (i *Instance) ReadSlice() ([]byte, error) { - return i.Operation.Slice() -} - -// we must make sure that call Close() before process exit. -func (i *Instance) Close() error { - if err := i.Terminal.Close(); err != nil { - return err - } - i.Operation.Close() - return nil -} -func (i *Instance) Clean() { - i.Operation.Clean() -} - -func (i *Instance) Write(b []byte) (int, error) { - return i.Stdout().Write(b) -} - -func (i *Instance) SetConfig(cfg *Config) *Config { - if i.Config == cfg { - return cfg - } - old := i.Config - i.Config = cfg - i.Operation.SetConfig(cfg) - i.Terminal.SetConfig(cfg) - return old -} - -func (i *Instance) Refresh() { - i.Operation.Refresh() -} diff --git a/vendor/github.com/chzyer/readline/remote.go b/vendor/github.com/chzyer/readline/remote.go deleted file mode 100644 index db77ae8cfd9..00000000000 --- a/vendor/github.com/chzyer/readline/remote.go +++ /dev/null @@ -1,474 +0,0 @@ -package readline - -import ( - "bufio" - "bytes" - "encoding/binary" - "fmt" - "io" - "net" - "os" - "sync" - "sync/atomic" -) - -type MsgType int16 - -const ( - T_DATA = MsgType(iota) - T_WIDTH - T_WIDTH_REPORT - T_ISTTY_REPORT - T_RAW - T_ERAW // exit raw - T_EOF -) - -type RemoteSvr struct { - eof int32 - closed int32 - width int32 - reciveChan chan struct{} - writeChan chan *writeCtx - conn net.Conn - isTerminal bool - funcWidthChan func() - stopChan chan struct{} - - dataBufM sync.Mutex - dataBuf bytes.Buffer -} - -type writeReply struct { - n int - err error -} - -type writeCtx struct { - msg *Message - reply chan *writeReply -} - -func newWriteCtx(msg *Message) *writeCtx { - return &writeCtx{ - msg: msg, - reply: make(chan *writeReply), - } -} - -func NewRemoteSvr(conn net.Conn) (*RemoteSvr, error) { - rs := &RemoteSvr{ - width: -1, - conn: conn, - writeChan: make(chan *writeCtx), - reciveChan: make(chan struct{}), - stopChan: make(chan struct{}), - } - buf := bufio.NewReader(rs.conn) - - if err := rs.init(buf); err != nil { - return nil, err - } - - go rs.readLoop(buf) - go rs.writeLoop() - return rs, nil -} - -func (r *RemoteSvr) init(buf *bufio.Reader) error { - m, err := ReadMessage(buf) - if err != nil { - return err - } - // receive isTerminal - if m.Type != T_ISTTY_REPORT { - return fmt.Errorf("unexpected init message") - } - r.GotIsTerminal(m.Data) - - // receive width - m, err = ReadMessage(buf) - if err != nil { - return err - } - if m.Type != T_WIDTH_REPORT { - return fmt.Errorf("unexpected init message") - } - r.GotReportWidth(m.Data) - - return nil -} - -func (r *RemoteSvr) HandleConfig(cfg *Config) { - cfg.Stderr = r - cfg.Stdout = r - cfg.Stdin = r - cfg.FuncExitRaw = r.ExitRawMode - cfg.FuncIsTerminal = r.IsTerminal - cfg.FuncMakeRaw = r.EnterRawMode - cfg.FuncExitRaw = r.ExitRawMode - cfg.FuncGetWidth = r.GetWidth - cfg.FuncOnWidthChanged = func(f func()) { - r.funcWidthChan = f - } -} - -func (r *RemoteSvr) IsTerminal() bool { - return r.isTerminal -} - -func (r *RemoteSvr) checkEOF() error { - if atomic.LoadInt32(&r.eof) == 1 { - return io.EOF - } - return nil -} - -func (r *RemoteSvr) Read(b []byte) (int, error) { - r.dataBufM.Lock() - n, err := r.dataBuf.Read(b) - r.dataBufM.Unlock() - if n == 0 { - if err := r.checkEOF(); err != nil { - return 0, err - } - } - - if n == 0 && err == io.EOF { - <-r.reciveChan - r.dataBufM.Lock() - n, err = r.dataBuf.Read(b) - r.dataBufM.Unlock() - } - if n == 0 { - if err := r.checkEOF(); err != nil { - return 0, err - } - } - - return n, err -} - -func (r *RemoteSvr) writeMsg(m *Message) error { - ctx := newWriteCtx(m) - r.writeChan <- ctx - reply := <-ctx.reply - return reply.err -} - -func (r *RemoteSvr) Write(b []byte) (int, error) { - ctx := newWriteCtx(NewMessage(T_DATA, b)) - r.writeChan <- ctx - reply := <-ctx.reply - return reply.n, reply.err -} - -func (r *RemoteSvr) EnterRawMode() error { - return r.writeMsg(NewMessage(T_RAW, nil)) -} - -func (r *RemoteSvr) ExitRawMode() error { - return r.writeMsg(NewMessage(T_ERAW, nil)) -} - -func (r *RemoteSvr) writeLoop() { - defer r.Close() - -loop: - for { - select { - case ctx, ok := <-r.writeChan: - if !ok { - break - } - n, err := ctx.msg.WriteTo(r.conn) - ctx.reply <- &writeReply{n, err} - case <-r.stopChan: - break loop - } - } -} - -func (r *RemoteSvr) Close() { - if atomic.CompareAndSwapInt32(&r.closed, 0, 1) { - close(r.stopChan) - r.conn.Close() - } -} - -func (r *RemoteSvr) readLoop(buf *bufio.Reader) { - defer r.Close() - for { - m, err := ReadMessage(buf) - if err != nil { - break - } - switch m.Type { - case T_EOF: - atomic.StoreInt32(&r.eof, 1) - select { - case r.reciveChan <- struct{}{}: - default: - } - case T_DATA: - r.dataBufM.Lock() - r.dataBuf.Write(m.Data) - r.dataBufM.Unlock() - select { - case r.reciveChan <- struct{}{}: - default: - } - case T_WIDTH_REPORT: - r.GotReportWidth(m.Data) - case T_ISTTY_REPORT: - r.GotIsTerminal(m.Data) - } - } -} - -func (r *RemoteSvr) GotIsTerminal(data []byte) { - if binary.BigEndian.Uint16(data) == 0 { - r.isTerminal = false - } else { - r.isTerminal = true - } -} - -func (r *RemoteSvr) GotReportWidth(data []byte) { - atomic.StoreInt32(&r.width, int32(binary.BigEndian.Uint16(data))) - if r.funcWidthChan != nil { - r.funcWidthChan() - } -} - -func (r *RemoteSvr) GetWidth() int { - return int(atomic.LoadInt32(&r.width)) -} - -// ----------------------------------------------------------------------------- - -type Message struct { - Type MsgType - Data []byte -} - -func ReadMessage(r io.Reader) (*Message, error) { - m := new(Message) - var length int32 - if err := binary.Read(r, binary.BigEndian, &length); err != nil { - return nil, err - } - if err := binary.Read(r, binary.BigEndian, &m.Type); err != nil { - return nil, err - } - m.Data = make([]byte, int(length)-2) - if _, err := io.ReadFull(r, m.Data); err != nil { - return nil, err - } - return m, nil -} - -func NewMessage(t MsgType, data []byte) *Message { - return &Message{t, data} -} - -func (m *Message) WriteTo(w io.Writer) (int, error) { - buf := bytes.NewBuffer(make([]byte, 0, len(m.Data)+2+4)) - binary.Write(buf, binary.BigEndian, int32(len(m.Data)+2)) - binary.Write(buf, binary.BigEndian, m.Type) - buf.Write(m.Data) - n, err := buf.WriteTo(w) - return int(n), err -} - -// ----------------------------------------------------------------------------- - -type RemoteCli struct { - conn net.Conn - raw RawMode - receiveChan chan struct{} - inited int32 - isTerminal *bool - - data bytes.Buffer - dataM sync.Mutex -} - -func NewRemoteCli(conn net.Conn) (*RemoteCli, error) { - r := &RemoteCli{ - conn: conn, - receiveChan: make(chan struct{}), - } - return r, nil -} - -func (r *RemoteCli) MarkIsTerminal(is bool) { - r.isTerminal = &is -} - -func (r *RemoteCli) init() error { - if !atomic.CompareAndSwapInt32(&r.inited, 0, 1) { - return nil - } - - if err := r.reportIsTerminal(); err != nil { - return err - } - - if err := r.reportWidth(); err != nil { - return err - } - - // register sig for width changed - DefaultOnWidthChanged(func() { - r.reportWidth() - }) - return nil -} - -func (r *RemoteCli) writeMsg(m *Message) error { - r.dataM.Lock() - _, err := m.WriteTo(r.conn) - r.dataM.Unlock() - return err -} - -func (r *RemoteCli) Write(b []byte) (int, error) { - m := NewMessage(T_DATA, b) - r.dataM.Lock() - _, err := m.WriteTo(r.conn) - r.dataM.Unlock() - return len(b), err -} - -func (r *RemoteCli) reportWidth() error { - screenWidth := GetScreenWidth() - data := make([]byte, 2) - binary.BigEndian.PutUint16(data, uint16(screenWidth)) - msg := NewMessage(T_WIDTH_REPORT, data) - - if err := r.writeMsg(msg); err != nil { - return err - } - return nil -} - -func (r *RemoteCli) reportIsTerminal() error { - var isTerminal bool - if r.isTerminal != nil { - isTerminal = *r.isTerminal - } else { - isTerminal = DefaultIsTerminal() - } - data := make([]byte, 2) - if isTerminal { - binary.BigEndian.PutUint16(data, 1) - } else { - binary.BigEndian.PutUint16(data, 0) - } - msg := NewMessage(T_ISTTY_REPORT, data) - if err := r.writeMsg(msg); err != nil { - return err - } - return nil -} - -func (r *RemoteCli) readLoop() { - buf := bufio.NewReader(r.conn) - for { - msg, err := ReadMessage(buf) - if err != nil { - break - } - switch msg.Type { - case T_ERAW: - r.raw.Exit() - case T_RAW: - r.raw.Enter() - case T_DATA: - os.Stdout.Write(msg.Data) - } - } -} - -func (r *RemoteCli) ServeBy(source io.Reader) error { - if err := r.init(); err != nil { - return err - } - - go func() { - defer r.Close() - for { - n, _ := io.Copy(r, source) - if n == 0 { - break - } - } - }() - defer r.raw.Exit() - r.readLoop() - return nil -} - -func (r *RemoteCli) Close() { - r.writeMsg(NewMessage(T_EOF, nil)) -} - -func (r *RemoteCli) Serve() error { - return r.ServeBy(os.Stdin) -} - -func ListenRemote(n, addr string, cfg *Config, h func(*Instance), onListen ...func(net.Listener) error) error { - ln, err := net.Listen(n, addr) - if err != nil { - return err - } - if len(onListen) > 0 { - if err := onListen[0](ln); err != nil { - return err - } - } - for { - conn, err := ln.Accept() - if err != nil { - break - } - go func() { - defer conn.Close() - rl, err := HandleConn(*cfg, conn) - if err != nil { - return - } - h(rl) - }() - } - return nil -} - -func HandleConn(cfg Config, conn net.Conn) (*Instance, error) { - r, err := NewRemoteSvr(conn) - if err != nil { - return nil, err - } - r.HandleConfig(&cfg) - - rl, err := NewEx(&cfg) - if err != nil { - return nil, err - } - return rl, nil -} - -func DialRemote(n, addr string) error { - conn, err := net.Dial(n, addr) - if err != nil { - return err - } - defer conn.Close() - - cli, err := NewRemoteCli(conn) - if err != nil { - return err - } - return cli.Serve() -} diff --git a/vendor/github.com/chzyer/readline/runebuf.go b/vendor/github.com/chzyer/readline/runebuf.go deleted file mode 100644 index 1b2a5d04de2..00000000000 --- a/vendor/github.com/chzyer/readline/runebuf.go +++ /dev/null @@ -1,572 +0,0 @@ -package readline - -import ( - "bufio" - "bytes" - "io" - "strings" - "sync" -) - -type runeBufferBck struct { - buf []rune - idx int -} - -type RuneBuffer struct { - buf []rune - idx int - prompt []rune - w io.Writer - - hadClean bool - interactive bool - cfg *Config - - width int - - bck *runeBufferBck - - offset string - - sync.Mutex -} - -func (r *RuneBuffer) OnWidthChange(newWidth int) { - r.Lock() - r.width = newWidth - r.Unlock() -} - -func (r *RuneBuffer) Backup() { - r.Lock() - r.bck = &runeBufferBck{r.buf, r.idx} - r.Unlock() -} - -func (r *RuneBuffer) Restore() { - r.Refresh(func() { - if r.bck == nil { - return - } - r.buf = r.bck.buf - r.idx = r.bck.idx - }) -} - -func NewRuneBuffer(w io.Writer, prompt string, cfg *Config, width int) *RuneBuffer { - rb := &RuneBuffer{ - w: w, - interactive: cfg.useInteractive(), - cfg: cfg, - width: width, - } - rb.SetPrompt(prompt) - return rb -} - -func (r *RuneBuffer) SetConfig(cfg *Config) { - r.Lock() - r.cfg = cfg - r.interactive = cfg.useInteractive() - r.Unlock() -} - -func (r *RuneBuffer) SetMask(m rune) { - r.Lock() - r.cfg.MaskRune = m - r.Unlock() -} - -func (r *RuneBuffer) CurrentWidth(x int) int { - r.Lock() - defer r.Unlock() - return runes.WidthAll(r.buf[:x]) -} - -func (r *RuneBuffer) PromptLen() int { - r.Lock() - width := r.promptLen() - r.Unlock() - return width -} - -func (r *RuneBuffer) promptLen() int { - return runes.WidthAll(runes.ColorFilter(r.prompt)) -} - -func (r *RuneBuffer) RuneSlice(i int) []rune { - r.Lock() - defer r.Unlock() - - if i > 0 { - rs := make([]rune, i) - copy(rs, r.buf[r.idx:r.idx+i]) - return rs - } - rs := make([]rune, -i) - copy(rs, r.buf[r.idx+i:r.idx]) - return rs -} - -func (r *RuneBuffer) Runes() []rune { - r.Lock() - newr := make([]rune, len(r.buf)) - copy(newr, r.buf) - r.Unlock() - return newr -} - -func (r *RuneBuffer) Pos() int { - r.Lock() - defer r.Unlock() - return r.idx -} - -func (r *RuneBuffer) Len() int { - r.Lock() - defer r.Unlock() - return len(r.buf) -} - -func (r *RuneBuffer) MoveToLineStart() { - r.Refresh(func() { - if r.idx == 0 { - return - } - r.idx = 0 - }) -} - -func (r *RuneBuffer) MoveBackward() { - r.Refresh(func() { - if r.idx == 0 { - return - } - r.idx-- - }) -} - -func (r *RuneBuffer) WriteString(s string) { - r.WriteRunes([]rune(s)) -} - -func (r *RuneBuffer) WriteRune(s rune) { - r.WriteRunes([]rune{s}) -} - -func (r *RuneBuffer) WriteRunes(s []rune) { - r.Refresh(func() { - tail := append(s, r.buf[r.idx:]...) - r.buf = append(r.buf[:r.idx], tail...) - r.idx += len(s) - }) -} - -func (r *RuneBuffer) MoveForward() { - r.Refresh(func() { - if r.idx == len(r.buf) { - return - } - r.idx++ - }) -} - -func (r *RuneBuffer) IsCursorInEnd() bool { - r.Lock() - defer r.Unlock() - return r.idx == len(r.buf) -} - -func (r *RuneBuffer) Replace(ch rune) { - r.Refresh(func() { - r.buf[r.idx] = ch - }) -} - -func (r *RuneBuffer) Erase() { - r.Refresh(func() { - r.idx = 0 - r.buf = r.buf[:0] - }) -} - -func (r *RuneBuffer) Delete() (success bool) { - r.Refresh(func() { - if r.idx == len(r.buf) { - return - } - r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...) - success = true - }) - return -} - -func (r *RuneBuffer) DeleteWord() { - if r.idx == len(r.buf) { - return - } - init := r.idx - for init < len(r.buf) && IsWordBreak(r.buf[init]) { - init++ - } - for i := init + 1; i < len(r.buf); i++ { - if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { - r.Refresh(func() { - r.buf = append(r.buf[:r.idx], r.buf[i-1:]...) - }) - return - } - } - r.Kill() -} - -func (r *RuneBuffer) MoveToPrevWord() (success bool) { - r.Refresh(func() { - if r.idx == 0 { - return - } - - for i := r.idx - 1; i > 0; i-- { - if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { - r.idx = i - success = true - return - } - } - r.idx = 0 - success = true - }) - return -} - -func (r *RuneBuffer) KillFront() { - r.Refresh(func() { - if r.idx == 0 { - return - } - - length := len(r.buf) - r.idx - copy(r.buf[:length], r.buf[r.idx:]) - r.idx = 0 - r.buf = r.buf[:length] - }) -} - -func (r *RuneBuffer) Kill() { - r.Refresh(func() { - r.buf = r.buf[:r.idx] - }) -} - -func (r *RuneBuffer) Transpose() { - r.Refresh(func() { - if len(r.buf) == 1 { - r.idx++ - } - - if len(r.buf) < 2 { - return - } - - if r.idx == 0 { - r.idx = 1 - } else if r.idx >= len(r.buf) { - r.idx = len(r.buf) - 1 - } - r.buf[r.idx], r.buf[r.idx-1] = r.buf[r.idx-1], r.buf[r.idx] - r.idx++ - }) -} - -func (r *RuneBuffer) MoveToNextWord() { - r.Refresh(func() { - for i := r.idx + 1; i < len(r.buf); i++ { - if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { - r.idx = i - return - } - } - - r.idx = len(r.buf) - }) -} - -func (r *RuneBuffer) MoveToEndWord() { - r.Refresh(func() { - // already at the end, so do nothing - if r.idx == len(r.buf) { - return - } - // if we are at the end of a word already, go to next - if !IsWordBreak(r.buf[r.idx]) && IsWordBreak(r.buf[r.idx+1]) { - r.idx++ - } - - // keep going until at the end of a word - for i := r.idx + 1; i < len(r.buf); i++ { - if IsWordBreak(r.buf[i]) && !IsWordBreak(r.buf[i-1]) { - r.idx = i - 1 - return - } - } - r.idx = len(r.buf) - }) -} - -func (r *RuneBuffer) BackEscapeWord() { - r.Refresh(func() { - if r.idx == 0 { - return - } - for i := r.idx - 1; i > 0; i-- { - if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { - r.buf = append(r.buf[:i], r.buf[r.idx:]...) - r.idx = i - return - } - } - - r.buf = r.buf[:0] - r.idx = 0 - }) -} - -func (r *RuneBuffer) Backspace() { - r.Refresh(func() { - if r.idx == 0 { - return - } - - r.idx-- - r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...) - }) -} - -func (r *RuneBuffer) MoveToLineEnd() { - r.Refresh(func() { - if r.idx == len(r.buf) { - return - } - - r.idx = len(r.buf) - }) -} - -func (r *RuneBuffer) LineCount(width int) int { - if width == -1 { - width = r.width - } - return LineCount(width, - runes.WidthAll(r.buf)+r.PromptLen()) -} - -func (r *RuneBuffer) MoveTo(ch rune, prevChar, reverse bool) (success bool) { - r.Refresh(func() { - if reverse { - for i := r.idx - 1; i >= 0; i-- { - if r.buf[i] == ch { - r.idx = i - if prevChar { - r.idx++ - } - success = true - return - } - } - return - } - for i := r.idx + 1; i < len(r.buf); i++ { - if r.buf[i] == ch { - r.idx = i - if prevChar { - r.idx-- - } - success = true - return - } - } - }) - return -} - -func (r *RuneBuffer) isInLineEdge() bool { - if isWindows { - return false - } - sp := r.getSplitByLine(r.buf) - return len(sp[len(sp)-1]) == 0 -} - -func (r *RuneBuffer) getSplitByLine(rs []rune) []string { - return SplitByLine(r.promptLen(), r.width, rs) -} - -func (r *RuneBuffer) IdxLine(width int) int { - r.Lock() - defer r.Unlock() - return r.idxLine(width) -} - -func (r *RuneBuffer) idxLine(width int) int { - if width == 0 { - return 0 - } - sp := r.getSplitByLine(r.buf[:r.idx]) - return len(sp) - 1 -} - -func (r *RuneBuffer) CursorLineCount() int { - return r.LineCount(r.width) - r.IdxLine(r.width) -} - -func (r *RuneBuffer) Refresh(f func()) { - r.Lock() - defer r.Unlock() - - if !r.interactive { - if f != nil { - f() - } - return - } - - r.clean() - if f != nil { - f() - } - r.print() -} - -func (r *RuneBuffer) SetOffset(offset string) { - r.Lock() - r.offset = offset - r.Unlock() -} - -func (r *RuneBuffer) print() { - r.w.Write(r.output()) - r.hadClean = false -} - -func (r *RuneBuffer) output() []byte { - buf := bytes.NewBuffer(nil) - buf.WriteString(string(r.prompt)) - if r.cfg.EnableMask && len(r.buf) > 0 { - buf.Write([]byte(strings.Repeat(string(r.cfg.MaskRune), len(r.buf)-1))) - if r.buf[len(r.buf)-1] == '\n' { - buf.Write([]byte{'\n'}) - } else { - buf.Write([]byte(string(r.cfg.MaskRune))) - } - if len(r.buf) > r.idx { - buf.Write(runes.Backspace(r.buf[r.idx:])) - } - - } else { - for idx := range r.buf { - if r.buf[idx] == '\t' { - buf.WriteString(strings.Repeat(" ", TabWidth)) - } else { - buf.WriteRune(r.buf[idx]) - } - } - if r.isInLineEdge() { - buf.Write([]byte(" \b")) - } - } - - if len(r.buf) > r.idx { - buf.Write(runes.Backspace(r.buf[r.idx:])) - } - return buf.Bytes() -} - -func (r *RuneBuffer) Reset() []rune { - ret := runes.Copy(r.buf) - r.buf = r.buf[:0] - r.idx = 0 - return ret -} - -func (r *RuneBuffer) calWidth(m int) int { - if m > 0 { - return runes.WidthAll(r.buf[r.idx : r.idx+m]) - } - return runes.WidthAll(r.buf[r.idx+m : r.idx]) -} - -func (r *RuneBuffer) SetStyle(start, end int, style string) { - if end < start { - panic("end < start") - } - - // goto start - move := start - r.idx - if move > 0 { - r.w.Write([]byte(string(r.buf[r.idx : r.idx+move]))) - } else { - r.w.Write(bytes.Repeat([]byte("\b"), r.calWidth(move))) - } - r.w.Write([]byte("\033[" + style + "m")) - r.w.Write([]byte(string(r.buf[start:end]))) - r.w.Write([]byte("\033[0m")) - // TODO: move back -} - -func (r *RuneBuffer) SetWithIdx(idx int, buf []rune) { - r.Refresh(func() { - r.buf = buf - r.idx = idx - }) -} - -func (r *RuneBuffer) Set(buf []rune) { - r.SetWithIdx(len(buf), buf) -} - -func (r *RuneBuffer) SetPrompt(prompt string) { - r.Lock() - r.prompt = []rune(prompt) - r.Unlock() -} - -func (r *RuneBuffer) cleanOutput(w io.Writer, idxLine int) { - buf := bufio.NewWriter(w) - - if r.width == 0 { - buf.WriteString(strings.Repeat("\r\b", len(r.buf)+r.promptLen())) - buf.Write([]byte("\033[J")) - } else { - buf.Write([]byte("\033[J")) // just like ^k :) - if idxLine == 0 { - buf.WriteString("\033[2K") - buf.WriteString("\r") - } else { - for i := 0; i < idxLine; i++ { - io.WriteString(buf, "\033[2K\r\033[A") - } - io.WriteString(buf, "\033[2K\r") - } - } - buf.Flush() - return -} - -func (r *RuneBuffer) Clean() { - r.Lock() - r.clean() - r.Unlock() -} - -func (r *RuneBuffer) clean() { - r.cleanWithIdxLine(r.idxLine(r.width)) -} - -func (r *RuneBuffer) cleanWithIdxLine(idxLine int) { - if r.hadClean || !r.interactive { - return - } - r.hadClean = true - r.cleanOutput(r.w, idxLine) -} diff --git a/vendor/github.com/chzyer/readline/runes.go b/vendor/github.com/chzyer/readline/runes.go deleted file mode 100644 index a669bc48c30..00000000000 --- a/vendor/github.com/chzyer/readline/runes.go +++ /dev/null @@ -1,223 +0,0 @@ -package readline - -import ( - "bytes" - "unicode" - "unicode/utf8" -) - -var runes = Runes{} -var TabWidth = 4 - -type Runes struct{} - -func (Runes) EqualRune(a, b rune, fold bool) bool { - if a == b { - return true - } - if !fold { - return false - } - if a > b { - a, b = b, a - } - if b < utf8.RuneSelf && 'A' <= a && a <= 'Z' { - if b == a+'a'-'A' { - return true - } - } - return false -} - -func (r Runes) EqualRuneFold(a, b rune) bool { - return r.EqualRune(a, b, true) -} - -func (r Runes) EqualFold(a, b []rune) bool { - if len(a) != len(b) { - return false - } - for i := 0; i < len(a); i++ { - if r.EqualRuneFold(a[i], b[i]) { - continue - } - return false - } - - return true -} - -func (Runes) Equal(a, b []rune) bool { - if len(a) != len(b) { - return false - } - for i := 0; i < len(a); i++ { - if a[i] != b[i] { - return false - } - } - return true -} - -func (rs Runes) IndexAllBckEx(r, sub []rune, fold bool) int { - for i := len(r) - len(sub); i >= 0; i-- { - found := true - for j := 0; j < len(sub); j++ { - if !rs.EqualRune(r[i+j], sub[j], fold) { - found = false - break - } - } - if found { - return i - } - } - return -1 -} - -// Search in runes from end to front -func (rs Runes) IndexAllBck(r, sub []rune) int { - return rs.IndexAllBckEx(r, sub, false) -} - -// Search in runes from front to end -func (rs Runes) IndexAll(r, sub []rune) int { - return rs.IndexAllEx(r, sub, false) -} - -func (rs Runes) IndexAllEx(r, sub []rune, fold bool) int { - for i := 0; i < len(r); i++ { - found := true - if len(r[i:]) < len(sub) { - return -1 - } - for j := 0; j < len(sub); j++ { - if !rs.EqualRune(r[i+j], sub[j], fold) { - found = false - break - } - } - if found { - return i - } - } - return -1 -} - -func (Runes) Index(r rune, rs []rune) int { - for i := 0; i < len(rs); i++ { - if rs[i] == r { - return i - } - } - return -1 -} - -func (Runes) ColorFilter(r []rune) []rune { - newr := make([]rune, 0, len(r)) - for pos := 0; pos < len(r); pos++ { - if r[pos] == '\033' && r[pos+1] == '[' { - idx := runes.Index('m', r[pos+2:]) - if idx == -1 { - continue - } - pos += idx + 2 - continue - } - newr = append(newr, r[pos]) - } - return newr -} - -var zeroWidth = []*unicode.RangeTable{ - unicode.Mn, - unicode.Me, - unicode.Cc, - unicode.Cf, -} - -var doubleWidth = []*unicode.RangeTable{ - unicode.Han, - unicode.Hangul, - unicode.Hiragana, - unicode.Katakana, -} - -func (Runes) Width(r rune) int { - if r == '\t' { - return TabWidth - } - if unicode.IsOneOf(zeroWidth, r) { - return 0 - } - if unicode.IsOneOf(doubleWidth, r) { - return 2 - } - return 1 -} - -func (Runes) WidthAll(r []rune) (length int) { - for i := 0; i < len(r); i++ { - length += runes.Width(r[i]) - } - return -} - -func (Runes) Backspace(r []rune) []byte { - return bytes.Repeat([]byte{'\b'}, runes.WidthAll(r)) -} - -func (Runes) Copy(r []rune) []rune { - n := make([]rune, len(r)) - copy(n, r) - return n -} - -func (Runes) HasPrefixFold(r, prefix []rune) bool { - if len(r) < len(prefix) { - return false - } - return runes.EqualFold(r[:len(prefix)], prefix) -} - -func (Runes) HasPrefix(r, prefix []rune) bool { - if len(r) < len(prefix) { - return false - } - return runes.Equal(r[:len(prefix)], prefix) -} - -func (Runes) Aggregate(candicate [][]rune) (same []rune, size int) { - for i := 0; i < len(candicate[0]); i++ { - for j := 0; j < len(candicate)-1; j++ { - if i >= len(candicate[j]) || i >= len(candicate[j+1]) { - goto aggregate - } - if candicate[j][i] != candicate[j+1][i] { - goto aggregate - } - } - size = i + 1 - } -aggregate: - if size > 0 { - same = runes.Copy(candicate[0][:size]) - for i := 0; i < len(candicate); i++ { - n := runes.Copy(candicate[i]) - copy(n, n[size:]) - candicate[i] = n[:len(n)-size] - } - } - return -} - -func (Runes) TrimSpaceLeft(in []rune) []rune { - firstIndex := len(in) - for i, r := range in { - if unicode.IsSpace(r) == false { - firstIndex = i - break - } - } - return in[firstIndex:] -} diff --git a/vendor/github.com/chzyer/readline/search.go b/vendor/github.com/chzyer/readline/search.go deleted file mode 100644 index 52e8ff09953..00000000000 --- a/vendor/github.com/chzyer/readline/search.go +++ /dev/null @@ -1,164 +0,0 @@ -package readline - -import ( - "bytes" - "container/list" - "fmt" - "io" -) - -const ( - S_STATE_FOUND = iota - S_STATE_FAILING -) - -const ( - S_DIR_BCK = iota - S_DIR_FWD -) - -type opSearch struct { - inMode bool - state int - dir int - source *list.Element - w io.Writer - buf *RuneBuffer - data []rune - history *opHistory - cfg *Config - markStart int - markEnd int - width int -} - -func newOpSearch(w io.Writer, buf *RuneBuffer, history *opHistory, cfg *Config, width int) *opSearch { - return &opSearch{ - w: w, - buf: buf, - cfg: cfg, - history: history, - width: width, - } -} - -func (o *opSearch) OnWidthChange(newWidth int) { - o.width = newWidth -} - -func (o *opSearch) IsSearchMode() bool { - return o.inMode -} - -func (o *opSearch) SearchBackspace() { - if len(o.data) > 0 { - o.data = o.data[:len(o.data)-1] - o.search(true) - } -} - -func (o *opSearch) findHistoryBy(isNewSearch bool) (int, *list.Element) { - if o.dir == S_DIR_BCK { - return o.history.FindBck(isNewSearch, o.data, o.buf.idx) - } - return o.history.FindFwd(isNewSearch, o.data, o.buf.idx) -} - -func (o *opSearch) search(isChange bool) bool { - if len(o.data) == 0 { - o.state = S_STATE_FOUND - o.SearchRefresh(-1) - return true - } - idx, elem := o.findHistoryBy(isChange) - if elem == nil { - o.SearchRefresh(-2) - return false - } - o.history.current = elem - - item := o.history.showItem(o.history.current.Value) - start, end := 0, 0 - if o.dir == S_DIR_BCK { - start, end = idx, idx+len(o.data) - } else { - start, end = idx, idx+len(o.data) - idx += len(o.data) - } - o.buf.SetWithIdx(idx, item) - o.markStart, o.markEnd = start, end - o.SearchRefresh(idx) - return true -} - -func (o *opSearch) SearchChar(r rune) { - o.data = append(o.data, r) - o.search(true) -} - -func (o *opSearch) SearchMode(dir int) bool { - if o.width == 0 { - return false - } - alreadyInMode := o.inMode - o.inMode = true - o.dir = dir - o.source = o.history.current - if alreadyInMode { - o.search(false) - } else { - o.SearchRefresh(-1) - } - return true -} - -func (o *opSearch) ExitSearchMode(revert bool) { - if revert { - o.history.current = o.source - o.buf.Set(o.history.showItem(o.history.current.Value)) - } - o.markStart, o.markEnd = 0, 0 - o.state = S_STATE_FOUND - o.inMode = false - o.source = nil - o.data = nil -} - -func (o *opSearch) SearchRefresh(x int) { - if x == -2 { - o.state = S_STATE_FAILING - } else if x >= 0 { - o.state = S_STATE_FOUND - } - if x < 0 { - x = o.buf.idx - } - x = o.buf.CurrentWidth(x) - x += o.buf.PromptLen() - x = x % o.width - - if o.markStart > 0 { - o.buf.SetStyle(o.markStart, o.markEnd, "4") - } - - lineCnt := o.buf.CursorLineCount() - buf := bytes.NewBuffer(nil) - buf.Write(bytes.Repeat([]byte("\n"), lineCnt)) - buf.WriteString("\033[J") - if o.state == S_STATE_FAILING { - buf.WriteString("failing ") - } - if o.dir == S_DIR_BCK { - buf.WriteString("bck") - } else if o.dir == S_DIR_FWD { - buf.WriteString("fwd") - } - buf.WriteString("-i-search: ") - buf.WriteString(string(o.data)) // keyword - buf.WriteString("\033[4m \033[0m") // _ - fmt.Fprintf(buf, "\r\033[%dA", lineCnt) // move prev - if x > 0 { - fmt.Fprintf(buf, "\033[%dC", x) // move forward - } - o.w.Write(buf.Bytes()) -} diff --git a/vendor/github.com/chzyer/readline/std.go b/vendor/github.com/chzyer/readline/std.go deleted file mode 100644 index e0c55ee983d..00000000000 --- a/vendor/github.com/chzyer/readline/std.go +++ /dev/null @@ -1,133 +0,0 @@ -package readline - -import ( - "io" - "os" - "sync" - "sync/atomic" -) - -var ( - Stdin io.ReadCloser = os.Stdin - Stdout io.WriteCloser = os.Stdout - Stderr io.WriteCloser = os.Stderr -) - -var ( - std *Instance - stdOnce sync.Once -) - -// global instance will not submit history automatic -func getInstance() *Instance { - stdOnce.Do(func() { - std, _ = NewEx(&Config{ - DisableAutoSaveHistory: true, - }) - }) - return std -} - -// let readline load history from filepath -// and try to persist history into disk -// set fp to "" to prevent readline persisting history to disk -// so the `AddHistory` will return nil error forever. -func SetHistoryPath(fp string) { - ins := getInstance() - cfg := ins.Config.Clone() - cfg.HistoryFile = fp - ins.SetConfig(cfg) -} - -// set auto completer to global instance -func SetAutoComplete(completer AutoCompleter) { - ins := getInstance() - cfg := ins.Config.Clone() - cfg.AutoComplete = completer - ins.SetConfig(cfg) -} - -// add history to global instance manually -// raise error only if `SetHistoryPath` is set with a non-empty path -func AddHistory(content string) error { - ins := getInstance() - return ins.SaveHistory(content) -} - -func Password(prompt string) ([]byte, error) { - ins := getInstance() - return ins.ReadPassword(prompt) -} - -// readline with global configs -func Line(prompt string) (string, error) { - ins := getInstance() - ins.SetPrompt(prompt) - return ins.Readline() -} - -type CancelableStdin struct { - r io.Reader - mutex sync.Mutex - stop chan struct{} - closed int32 - notify chan struct{} - data []byte - read int - err error -} - -func NewCancelableStdin(r io.Reader) *CancelableStdin { - c := &CancelableStdin{ - r: r, - notify: make(chan struct{}), - stop: make(chan struct{}), - } - go c.ioloop() - return c -} - -func (c *CancelableStdin) ioloop() { -loop: - for { - select { - case <-c.notify: - c.read, c.err = c.r.Read(c.data) - select { - case c.notify <- struct{}{}: - case <-c.stop: - break loop - } - case <-c.stop: - break loop - } - } -} - -func (c *CancelableStdin) Read(b []byte) (n int, err error) { - c.mutex.Lock() - defer c.mutex.Unlock() - if atomic.LoadInt32(&c.closed) == 1 { - return 0, io.EOF - } - - c.data = b - select { - case c.notify <- struct{}{}: - case <-c.stop: - return 0, io.EOF - } - select { - case <-c.notify: - return c.read, c.err - case <-c.stop: - return 0, io.EOF - } -} - -func (c *CancelableStdin) Close() error { - if atomic.CompareAndSwapInt32(&c.closed, 0, 1) { - close(c.stop) - } - return nil -} diff --git a/vendor/github.com/chzyer/readline/std_windows.go b/vendor/github.com/chzyer/readline/std_windows.go deleted file mode 100644 index b10f91bcb7e..00000000000 --- a/vendor/github.com/chzyer/readline/std_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package readline - -func init() { - Stdin = NewRawReader() - Stdout = NewANSIWriter(Stdout) - Stderr = NewANSIWriter(Stderr) -} diff --git a/vendor/github.com/chzyer/readline/term.go b/vendor/github.com/chzyer/readline/term.go deleted file mode 100644 index 133993ca8ea..00000000000 --- a/vendor/github.com/chzyer/readline/term.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris - -// Package terminal provides support functions for dealing with terminals, as -// commonly found on UNIX systems. -// -// Putting a terminal into raw mode is the most common requirement: -// -// oldState, err := terminal.MakeRaw(0) -// if err != nil { -// panic(err) -// } -// defer terminal.Restore(0, oldState) -package readline - -import ( - "io" - "syscall" -) - -// State contains the state of a terminal. -type State struct { - termios Termios -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - _, err := getTermios(fd) - return err == nil -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd int) (*State, error) { - var oldState State - - if termios, err := getTermios(fd); err != nil { - return nil, err - } else { - oldState.termios = *termios - } - - newState := oldState.termios - // This attempts to replicate the behaviour documented for cfmakeraw in - // the termios(3) manpage. - newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON - // newState.Oflag &^= syscall.OPOST - newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN - newState.Cflag &^= syscall.CSIZE | syscall.PARENB - newState.Cflag |= syscall.CS8 - - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - return &oldState, setTermios(fd, &newState) -} - -// GetState returns the current state of a terminal which may be useful to -// restore the terminal after a signal. -func GetState(fd int) (*State, error) { - termios, err := getTermios(fd) - if err != nil { - return nil, err - } - - return &State{termios: *termios}, nil -} - -// Restore restores the terminal connected to the given file descriptor to a -// previous state. -func restoreTerm(fd int, state *State) error { - return setTermios(fd, &state.termios) -} - -// ReadPassword reads a line of input from a terminal without local echo. This -// is commonly used for inputting passwords and other sensitive data. The slice -// returned does not include the \n. -func ReadPassword(fd int) ([]byte, error) { - oldState, err := getTermios(fd) - if err != nil { - return nil, err - } - - newState := oldState - newState.Lflag &^= syscall.ECHO - newState.Lflag |= syscall.ICANON | syscall.ISIG - newState.Iflag |= syscall.ICRNL - if err := setTermios(fd, newState); err != nil { - return nil, err - } - - defer func() { - setTermios(fd, oldState) - }() - - var buf [16]byte - var ret []byte - for { - n, err := syscall.Read(fd, buf[:]) - if err != nil { - return nil, err - } - if n == 0 { - if len(ret) == 0 { - return nil, io.EOF - } - break - } - if buf[n-1] == '\n' { - n-- - } - ret = append(ret, buf[:n]...) - if n < len(buf) { - break - } - } - - return ret, nil -} diff --git a/vendor/github.com/chzyer/readline/term_bsd.go b/vendor/github.com/chzyer/readline/term_bsd.go deleted file mode 100644 index 68b56ea6ba7..00000000000 --- a/vendor/github.com/chzyer/readline/term_bsd.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -package readline - -import ( - "syscall" - "unsafe" -) - -func getTermios(fd int) (*Termios, error) { - termios := new(Termios) - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) - if err != 0 { - return nil, err - } - return termios, nil -} - -func setTermios(fd int, termios *Termios) error { - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) - if err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/chzyer/readline/term_linux.go b/vendor/github.com/chzyer/readline/term_linux.go deleted file mode 100644 index e3392b4ac2d..00000000000 --- a/vendor/github.com/chzyer/readline/term_linux.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package readline - -import ( - "syscall" - "unsafe" -) - -// These constants are declared here, rather than importing -// them from the syscall package as some syscall packages, even -// on linux, for example gccgo, do not declare them. -const ioctlReadTermios = 0x5401 // syscall.TCGETS -const ioctlWriteTermios = 0x5402 // syscall.TCSETS - -func getTermios(fd int) (*Termios, error) { - termios := new(Termios) - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0) - if err != 0 { - return nil, err - } - return termios, nil -} - -func setTermios(fd int, termios *Termios) error { - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0) - if err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/chzyer/readline/term_solaris.go b/vendor/github.com/chzyer/readline/term_solaris.go deleted file mode 100644 index 4c27273c7ab..00000000000 --- a/vendor/github.com/chzyer/readline/term_solaris.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package readline - -import "golang.org/x/sys/unix" - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (int, int, error) { - ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) - if err != nil { - return 0, 0, err - } - return int(ws.Col), int(ws.Row), nil -} - -type Termios unix.Termios - -func getTermios(fd int) (*Termios, error) { - termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) - if err != nil { - return nil, err - } - return (*Termios)(termios), nil -} - -func setTermios(fd int, termios *Termios) error { - return unix.IoctlSetTermios(fd, unix.TCSETSF, (*unix.Termios)(termios)) -} diff --git a/vendor/github.com/chzyer/readline/term_unix.go b/vendor/github.com/chzyer/readline/term_unix.go deleted file mode 100644 index d3ea242448d..00000000000 --- a/vendor/github.com/chzyer/readline/term_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd - -package readline - -import ( - "syscall" - "unsafe" -) - -type Termios syscall.Termios - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (int, int, error) { - var dimensions [4]uint16 - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0) - if err != 0 { - return 0, 0, err - } - return int(dimensions[1]), int(dimensions[0]), nil -} diff --git a/vendor/github.com/chzyer/readline/term_windows.go b/vendor/github.com/chzyer/readline/term_windows.go deleted file mode 100644 index 1290e00bc14..00000000000 --- a/vendor/github.com/chzyer/readline/term_windows.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -// Package terminal provides support functions for dealing with terminals, as -// commonly found on UNIX systems. -// -// Putting a terminal into raw mode is the most common requirement: -// -// oldState, err := terminal.MakeRaw(0) -// if err != nil { -// panic(err) -// } -// defer terminal.Restore(0, oldState) -package readline - -import ( - "io" - "syscall" - "unsafe" -) - -const ( - enableLineInput = 2 - enableEchoInput = 4 - enableProcessedInput = 1 - enableWindowInput = 8 - enableMouseInput = 16 - enableInsertMode = 32 - enableQuickEditMode = 64 - enableExtendedFlags = 128 - enableAutoPosition = 256 - enableProcessedOutput = 1 - enableWrapAtEolOutput = 2 -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") - procSetConsoleMode = kernel32.NewProc("SetConsoleMode") - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") -) - -type ( - coord struct { - x short - y short - } - smallRect struct { - left short - top short - right short - bottom short - } - consoleScreenBufferInfo struct { - size coord - cursorPosition coord - attributes word - window smallRect - maximumWindowSize coord - } -) - -type State struct { - mode uint32 -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd int) (*State, error) { - var st uint32 - _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - if e != 0 { - return nil, error(e) - } - raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) - _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0) - if e != 0 { - return nil, error(e) - } - return &State{st}, nil -} - -// GetState returns the current state of a terminal which may be useful to -// restore the terminal after a signal. -func GetState(fd int) (*State, error) { - var st uint32 - _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - if e != 0 { - return nil, error(e) - } - return &State{st}, nil -} - -// Restore restores the terminal connected to the given file descriptor to a -// previous state. -func restoreTerm(fd int, state *State) error { - _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) - return err -} - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (width, height int, err error) { - var info consoleScreenBufferInfo - _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) - if e != 0 { - return 0, 0, error(e) - } - return int(info.size.x), int(info.size.y), nil -} - -// ReadPassword reads a line of input from a terminal without local echo. This -// is commonly used for inputting passwords and other sensitive data. The slice -// returned does not include the \n. -func ReadPassword(fd int) ([]byte, error) { - var st uint32 - _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - if e != 0 { - return nil, error(e) - } - old := st - - st &^= (enableEchoInput) - st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) - _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) - if e != 0 { - return nil, error(e) - } - - defer func() { - syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) - }() - - var buf [16]byte - var ret []byte - for { - n, err := syscall.Read(syscall.Handle(fd), buf[:]) - if err != nil { - return nil, err - } - if n == 0 { - if len(ret) == 0 { - return nil, io.EOF - } - break - } - if buf[n-1] == '\n' { - n-- - } - if n > 0 && buf[n-1] == '\r' { - n-- - } - ret = append(ret, buf[:n]...) - if n < len(buf) { - break - } - } - - return ret, nil -} diff --git a/vendor/github.com/chzyer/readline/terminal.go b/vendor/github.com/chzyer/readline/terminal.go deleted file mode 100644 index 80147122dc8..00000000000 --- a/vendor/github.com/chzyer/readline/terminal.go +++ /dev/null @@ -1,232 +0,0 @@ -package readline - -import ( - "bufio" - "fmt" - "io" - "strings" - "sync" - "sync/atomic" -) - -type Terminal struct { - m sync.Mutex - cfg *Config - outchan chan rune - closed int32 - stopChan chan struct{} - kickChan chan struct{} - wg sync.WaitGroup - isReading int32 - sleeping int32 - - sizeChan chan string -} - -func NewTerminal(cfg *Config) (*Terminal, error) { - if err := cfg.Init(); err != nil { - return nil, err - } - t := &Terminal{ - cfg: cfg, - kickChan: make(chan struct{}, 1), - outchan: make(chan rune), - stopChan: make(chan struct{}, 1), - sizeChan: make(chan string, 1), - } - - go t.ioloop() - return t, nil -} - -// SleepToResume will sleep myself, and return only if I'm resumed. -func (t *Terminal) SleepToResume() { - if !atomic.CompareAndSwapInt32(&t.sleeping, 0, 1) { - return - } - defer atomic.StoreInt32(&t.sleeping, 0) - - t.ExitRawMode() - ch := WaitForResume() - SuspendMe() - <-ch - t.EnterRawMode() -} - -func (t *Terminal) EnterRawMode() (err error) { - return t.cfg.FuncMakeRaw() -} - -func (t *Terminal) ExitRawMode() (err error) { - return t.cfg.FuncExitRaw() -} - -func (t *Terminal) Write(b []byte) (int, error) { - return t.cfg.Stdout.Write(b) -} - -type termSize struct { - left int - top int -} - -func (t *Terminal) GetOffset(f func(offset string)) { - go func() { - f(<-t.sizeChan) - }() - t.Write([]byte("\033[6n")) -} - -func (t *Terminal) Print(s string) { - fmt.Fprintf(t.cfg.Stdout, "%s", s) -} - -func (t *Terminal) PrintRune(r rune) { - fmt.Fprintf(t.cfg.Stdout, "%c", r) -} - -func (t *Terminal) Readline() *Operation { - return NewOperation(t, t.cfg) -} - -// return rune(0) if meet EOF -func (t *Terminal) ReadRune() rune { - ch, ok := <-t.outchan - if !ok { - return rune(0) - } - return ch -} - -func (t *Terminal) IsReading() bool { - return atomic.LoadInt32(&t.isReading) == 1 -} - -func (t *Terminal) KickRead() { - select { - case t.kickChan <- struct{}{}: - default: - } -} - -func (t *Terminal) ioloop() { - t.wg.Add(1) - defer func() { - t.wg.Done() - close(t.outchan) - }() - - var ( - isEscape bool - isEscapeEx bool - expectNextChar bool - ) - - buf := bufio.NewReader(t.getStdin()) - for { - if !expectNextChar { - atomic.StoreInt32(&t.isReading, 0) - select { - case <-t.kickChan: - atomic.StoreInt32(&t.isReading, 1) - case <-t.stopChan: - return - } - } - expectNextChar = false - r, _, err := buf.ReadRune() - if err != nil { - if strings.Contains(err.Error(), "interrupted system call") { - expectNextChar = true - continue - } - break - } - - if isEscape { - isEscape = false - if r == CharEscapeEx { - expectNextChar = true - isEscapeEx = true - continue - } - r = escapeKey(r, buf) - } else if isEscapeEx { - isEscapeEx = false - if key := readEscKey(r, buf); key != nil { - r = escapeExKey(key) - // offset - if key.typ == 'R' { - if _, _, ok := key.Get2(); ok { - select { - case t.sizeChan <- key.attr: - default: - } - } - expectNextChar = true - continue - } - } - if r == 0 { - expectNextChar = true - continue - } - } - - expectNextChar = true - switch r { - case CharEsc: - if t.cfg.VimMode { - t.outchan <- r - break - } - isEscape = true - case CharInterrupt, CharEnter, CharCtrlJ, CharDelete: - expectNextChar = false - fallthrough - default: - t.outchan <- r - } - } - -} - -func (t *Terminal) Bell() { - fmt.Fprintf(t, "%c", CharBell) -} - -func (t *Terminal) Close() error { - if atomic.SwapInt32(&t.closed, 1) != 0 { - return nil - } - if closer, ok := t.cfg.Stdin.(io.Closer); ok { - closer.Close() - } - close(t.stopChan) - t.wg.Wait() - return t.ExitRawMode() -} - -func (t *Terminal) GetConfig() *Config { - t.m.Lock() - cfg := *t.cfg - t.m.Unlock() - return &cfg -} - -func (t *Terminal) getStdin() io.Reader { - t.m.Lock() - r := t.cfg.Stdin - t.m.Unlock() - return r -} - -func (t *Terminal) SetConfig(c *Config) error { - if err := c.Init(); err != nil { - return err - } - t.m.Lock() - t.cfg = c - t.m.Unlock() - return nil -} diff --git a/vendor/github.com/chzyer/readline/utils.go b/vendor/github.com/chzyer/readline/utils.go deleted file mode 100644 index 670736b38fe..00000000000 --- a/vendor/github.com/chzyer/readline/utils.go +++ /dev/null @@ -1,276 +0,0 @@ -package readline - -import ( - "bufio" - "bytes" - "container/list" - "fmt" - "os" - "strconv" - "strings" - "sync" - "time" - "unicode" -) - -var ( - isWindows = false -) - -const ( - CharLineStart = 1 - CharBackward = 2 - CharInterrupt = 3 - CharDelete = 4 - CharLineEnd = 5 - CharForward = 6 - CharBell = 7 - CharCtrlH = 8 - CharTab = 9 - CharCtrlJ = 10 - CharKill = 11 - CharCtrlL = 12 - CharEnter = 13 - CharNext = 14 - CharPrev = 16 - CharBckSearch = 18 - CharFwdSearch = 19 - CharTranspose = 20 - CharCtrlU = 21 - CharCtrlW = 23 - CharCtrlZ = 26 - CharEsc = 27 - CharEscapeEx = 91 - CharBackspace = 127 -) - -const ( - MetaBackward rune = -iota - 1 - MetaForward - MetaDelete - MetaBackspace - MetaTranspose -) - -// WaitForResume need to call before current process got suspend. -// It will run a ticker until a long duration is occurs, -// which means this process is resumed. -func WaitForResume() chan struct{} { - ch := make(chan struct{}) - var wg sync.WaitGroup - wg.Add(1) - go func() { - ticker := time.NewTicker(10 * time.Millisecond) - t := time.Now() - wg.Done() - for { - now := <-ticker.C - if now.Sub(t) > 100*time.Millisecond { - break - } - t = now - } - ticker.Stop() - ch <- struct{}{} - }() - wg.Wait() - return ch -} - -func Restore(fd int, state *State) error { - err := restoreTerm(fd, state) - if err != nil { - // errno 0 means everything is ok :) - if err.Error() == "errno 0" { - return nil - } else { - return err - } - } - return nil -} - -func IsPrintable(key rune) bool { - isInSurrogateArea := key >= 0xd800 && key <= 0xdbff - return key >= 32 && !isInSurrogateArea -} - -// translate Esc[X -func escapeExKey(key *escapeKeyPair) rune { - var r rune - switch key.typ { - case 'D': - r = CharBackward - case 'C': - r = CharForward - case 'A': - r = CharPrev - case 'B': - r = CharNext - case 'H': - r = CharLineStart - case 'F': - r = CharLineEnd - case '~': - if key.attr == "3" { - r = CharDelete - } - default: - } - return r -} - -type escapeKeyPair struct { - attr string - typ rune -} - -func (e *escapeKeyPair) Get2() (int, int, bool) { - sp := strings.Split(e.attr, ";") - if len(sp) < 2 { - return -1, -1, false - } - s1, err := strconv.Atoi(sp[0]) - if err != nil { - return -1, -1, false - } - s2, err := strconv.Atoi(sp[1]) - if err != nil { - return -1, -1, false - } - return s1, s2, true -} - -func readEscKey(r rune, reader *bufio.Reader) *escapeKeyPair { - p := escapeKeyPair{} - buf := bytes.NewBuffer(nil) - for { - if r == ';' { - } else if unicode.IsNumber(r) { - } else { - p.typ = r - break - } - buf.WriteRune(r) - r, _, _ = reader.ReadRune() - } - p.attr = buf.String() - return &p -} - -// translate EscX to Meta+X -func escapeKey(r rune, reader *bufio.Reader) rune { - switch r { - case 'b': - r = MetaBackward - case 'f': - r = MetaForward - case 'd': - r = MetaDelete - case CharTranspose: - r = MetaTranspose - case CharBackspace: - r = MetaBackspace - case 'O': - d, _, _ := reader.ReadRune() - switch d { - case 'H': - r = CharLineStart - case 'F': - r = CharLineEnd - default: - reader.UnreadRune() - } - case CharEsc: - - } - return r -} - -func SplitByLine(start, screenWidth int, rs []rune) []string { - var ret []string - buf := bytes.NewBuffer(nil) - currentWidth := start - for _, r := range rs { - w := runes.Width(r) - currentWidth += w - buf.WriteRune(r) - if currentWidth >= screenWidth { - ret = append(ret, buf.String()) - buf.Reset() - currentWidth = 0 - } - } - ret = append(ret, buf.String()) - return ret -} - -// calculate how many lines for N character -func LineCount(screenWidth, w int) int { - r := w / screenWidth - if w%screenWidth != 0 { - r++ - } - return r -} - -func IsWordBreak(i rune) bool { - switch { - case i >= 'a' && i <= 'z': - case i >= 'A' && i <= 'Z': - case i >= '0' && i <= '9': - default: - return true - } - return false -} - -func GetInt(s []string, def int) int { - if len(s) == 0 { - return def - } - c, err := strconv.Atoi(s[0]) - if err != nil { - return def - } - return c -} - -type RawMode struct { - state *State -} - -func (r *RawMode) Enter() (err error) { - r.state, err = MakeRaw(GetStdin()) - return err -} - -func (r *RawMode) Exit() error { - if r.state == nil { - return nil - } - return Restore(GetStdin(), r.state) -} - -// ----------------------------------------------------------------------------- - -func sleep(n int) { - Debug(n) - time.Sleep(2000 * time.Millisecond) -} - -// print a linked list to Debug() -func debugList(l *list.List) { - idx := 0 - for e := l.Front(); e != nil; e = e.Next() { - Debug(idx, fmt.Sprintf("%+v", e.Value)) - idx++ - } -} - -// append log info to another file -func Debug(o ...interface{}) { - f, _ := os.OpenFile("debug.tmp", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) - fmt.Fprintln(f, o...) - f.Close() -} diff --git a/vendor/github.com/chzyer/readline/utils_unix.go b/vendor/github.com/chzyer/readline/utils_unix.go deleted file mode 100644 index f88dac97bd7..00000000000 --- a/vendor/github.com/chzyer/readline/utils_unix.go +++ /dev/null @@ -1,83 +0,0 @@ -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris - -package readline - -import ( - "io" - "os" - "os/signal" - "sync" - "syscall" -) - -type winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -// SuspendMe use to send suspend signal to myself, when we in the raw mode. -// For OSX it need to send to parent's pid -// For Linux it need to send to myself -func SuspendMe() { - p, _ := os.FindProcess(os.Getppid()) - p.Signal(syscall.SIGTSTP) - p, _ = os.FindProcess(os.Getpid()) - p.Signal(syscall.SIGTSTP) -} - -// get width of the terminal -func getWidth(stdoutFd int) int { - cols, _, err := GetSize(stdoutFd) - if err != nil { - return -1 - } - return cols -} - -func GetScreenWidth() int { - w := getWidth(syscall.Stdout) - if w < 0 { - w = getWidth(syscall.Stderr) - } - return w -} - -// ClearScreen clears the console screen -func ClearScreen(w io.Writer) (int, error) { - return w.Write([]byte("\033[H")) -} - -func DefaultIsTerminal() bool { - return IsTerminal(syscall.Stdin) && (IsTerminal(syscall.Stdout) || IsTerminal(syscall.Stderr)) -} - -func GetStdin() int { - return syscall.Stdin -} - -// ----------------------------------------------------------------------------- - -var ( - widthChange sync.Once - widthChangeCallback func() -) - -func DefaultOnWidthChanged(f func()) { - widthChangeCallback = f - widthChange.Do(func() { - ch := make(chan os.Signal, 1) - signal.Notify(ch, syscall.SIGWINCH) - - go func() { - for { - _, ok := <-ch - if !ok { - break - } - widthChangeCallback() - } - }() - }) -} diff --git a/vendor/github.com/chzyer/readline/utils_windows.go b/vendor/github.com/chzyer/readline/utils_windows.go deleted file mode 100644 index 5bfa55dcce8..00000000000 --- a/vendor/github.com/chzyer/readline/utils_windows.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build windows - -package readline - -import ( - "io" - "syscall" -) - -func SuspendMe() { -} - -func GetStdin() int { - return int(syscall.Stdin) -} - -func init() { - isWindows = true -} - -// get width of the terminal -func GetScreenWidth() int { - info, _ := GetConsoleScreenBufferInfo() - if info == nil { - return -1 - } - return int(info.dwSize.x) -} - -// ClearScreen clears the console screen -func ClearScreen(_ io.Writer) error { - return SetConsoleCursorPosition(&_COORD{0, 0}) -} - -func DefaultIsTerminal() bool { - return true -} - -func DefaultOnWidthChanged(func()) { - -} diff --git a/vendor/github.com/chzyer/readline/vim.go b/vendor/github.com/chzyer/readline/vim.go deleted file mode 100644 index 641b22b7a54..00000000000 --- a/vendor/github.com/chzyer/readline/vim.go +++ /dev/null @@ -1,174 +0,0 @@ -package readline - -const ( - VIM_NORMAL = iota - VIM_INSERT - VIM_VISUAL -) - -type opVim struct { - cfg *Config - op *Operation - vimMode int -} - -func newVimMode(op *Operation) *opVim { - ov := &opVim{ - cfg: op.cfg, - op: op, - } - ov.SetVimMode(ov.cfg.VimMode) - return ov -} - -func (o *opVim) SetVimMode(on bool) { - if o.cfg.VimMode && !on { // turn off - o.ExitVimMode() - } - o.cfg.VimMode = on - o.vimMode = VIM_INSERT -} - -func (o *opVim) ExitVimMode() { - o.vimMode = VIM_INSERT -} - -func (o *opVim) IsEnableVimMode() bool { - return o.cfg.VimMode -} - -func (o *opVim) handleVimNormalMovement(r rune, readNext func() rune) (t rune, handled bool) { - rb := o.op.buf - handled = true - switch r { - case 'h': - t = CharBackward - case 'j': - t = CharNext - case 'k': - t = CharPrev - case 'l': - t = CharForward - case '0', '^': - rb.MoveToLineStart() - case '$': - rb.MoveToLineEnd() - case 'x': - rb.Delete() - if rb.IsCursorInEnd() { - rb.MoveBackward() - } - case 'r': - rb.Replace(readNext()) - case 'd': - next := readNext() - switch next { - case 'd': - rb.Erase() - case 'w': - rb.DeleteWord() - case 'h': - rb.Backspace() - case 'l': - rb.Delete() - } - case 'b', 'B': - rb.MoveToPrevWord() - case 'w', 'W': - rb.MoveToNextWord() - case 'e', 'E': - rb.MoveToEndWord() - case 'f', 'F', 't', 'T': - next := readNext() - prevChar := r == 't' || r == 'T' - reverse := r == 'F' || r == 'T' - switch next { - case CharEsc: - default: - rb.MoveTo(next, prevChar, reverse) - } - default: - return r, false - } - return t, true -} - -func (o *opVim) handleVimNormalEnterInsert(r rune, readNext func() rune) (t rune, handled bool) { - rb := o.op.buf - handled = true - switch r { - case 'i': - case 'I': - rb.MoveToLineStart() - case 'a': - rb.MoveForward() - case 'A': - rb.MoveToLineEnd() - case 's': - rb.Delete() - case 'S': - rb.Erase() - case 'c': - next := readNext() - switch next { - case 'c': - rb.Erase() - case 'w': - rb.DeleteWord() - case 'h': - rb.Backspace() - case 'l': - rb.Delete() - } - default: - return r, false - } - - o.EnterVimInsertMode() - return -} - -func (o *opVim) HandleVimNormal(r rune, readNext func() rune) (t rune) { - switch r { - case CharEnter, CharInterrupt: - o.ExitVimMode() - return r - } - - if r, handled := o.handleVimNormalMovement(r, readNext); handled { - return r - } - - if r, handled := o.handleVimNormalEnterInsert(r, readNext); handled { - return r - } - - // invalid operation - o.op.t.Bell() - return 0 -} - -func (o *opVim) EnterVimInsertMode() { - o.vimMode = VIM_INSERT -} - -func (o *opVim) ExitVimInsertMode() { - o.vimMode = VIM_NORMAL -} - -func (o *opVim) HandleVim(r rune, readNext func() rune) rune { - if o.vimMode == VIM_NORMAL { - return o.HandleVimNormal(r, readNext) - } - if r == CharEsc { - o.ExitVimInsertMode() - return 0 - } - - switch o.vimMode { - case VIM_INSERT: - return r - case VIM_VISUAL: - } - return r -} diff --git a/vendor/github.com/chzyer/readline/windows_api.go b/vendor/github.com/chzyer/readline/windows_api.go deleted file mode 100644 index 63f4f7b78fc..00000000000 --- a/vendor/github.com/chzyer/readline/windows_api.go +++ /dev/null @@ -1,152 +0,0 @@ -// +build windows - -package readline - -import ( - "reflect" - "syscall" - "unsafe" -) - -var ( - kernel = NewKernel() - stdout = uintptr(syscall.Stdout) - stdin = uintptr(syscall.Stdin) -) - -type Kernel struct { - SetConsoleCursorPosition, - SetConsoleTextAttribute, - FillConsoleOutputCharacterW, - FillConsoleOutputAttribute, - ReadConsoleInputW, - GetConsoleScreenBufferInfo, - GetConsoleCursorInfo, - GetStdHandle CallFunc -} - -type short int16 -type word uint16 -type dword uint32 -type wchar uint16 - -type _COORD struct { - x short - y short -} - -func (c *_COORD) ptr() uintptr { - return uintptr(*(*int32)(unsafe.Pointer(c))) -} - -const ( - EVENT_KEY = 0x0001 - EVENT_MOUSE = 0x0002 - EVENT_WINDOW_BUFFER_SIZE = 0x0004 - EVENT_MENU = 0x0008 - EVENT_FOCUS = 0x0010 -) - -type _KEY_EVENT_RECORD struct { - bKeyDown int32 - wRepeatCount word - wVirtualKeyCode word - wVirtualScanCode word - unicodeChar wchar - dwControlKeyState dword -} - -// KEY_EVENT_RECORD KeyEvent; -// MOUSE_EVENT_RECORD MouseEvent; -// WINDOW_BUFFER_SIZE_RECORD WindowBufferSizeEvent; -// MENU_EVENT_RECORD MenuEvent; -// FOCUS_EVENT_RECORD FocusEvent; -type _INPUT_RECORD struct { - EventType word - Padding uint16 - Event [16]byte -} - -type _CONSOLE_SCREEN_BUFFER_INFO struct { - dwSize _COORD - dwCursorPosition _COORD - wAttributes word - srWindow _SMALL_RECT - dwMaximumWindowSize _COORD -} - -type _SMALL_RECT struct { - left short - top short - right short - bottom short -} - -type _CONSOLE_CURSOR_INFO struct { - dwSize dword - bVisible bool -} - -type CallFunc func(u ...uintptr) error - -func NewKernel() *Kernel { - k := &Kernel{} - kernel32 := syscall.NewLazyDLL("kernel32.dll") - v := reflect.ValueOf(k).Elem() - t := v.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - f := kernel32.NewProc(name) - v.Field(i).Set(reflect.ValueOf(k.Wrap(f))) - } - return k -} - -func (k *Kernel) Wrap(p *syscall.LazyProc) CallFunc { - return func(args ...uintptr) error { - var r0 uintptr - var e1 syscall.Errno - size := uintptr(len(args)) - if len(args) <= 3 { - buf := make([]uintptr, 3) - copy(buf, args) - r0, _, e1 = syscall.Syscall(p.Addr(), size, - buf[0], buf[1], buf[2]) - } else { - buf := make([]uintptr, 6) - copy(buf, args) - r0, _, e1 = syscall.Syscall6(p.Addr(), size, - buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], - ) - } - - if int(r0) == 0 { - if e1 != 0 { - return error(e1) - } else { - return syscall.EINVAL - } - } - return nil - } - -} - -func GetConsoleScreenBufferInfo() (*_CONSOLE_SCREEN_BUFFER_INFO, error) { - t := new(_CONSOLE_SCREEN_BUFFER_INFO) - err := kernel.GetConsoleScreenBufferInfo( - stdout, - uintptr(unsafe.Pointer(t)), - ) - return t, err -} - -func GetConsoleCursorInfo() (*_CONSOLE_CURSOR_INFO, error) { - t := new(_CONSOLE_CURSOR_INFO) - err := kernel.GetConsoleCursorInfo(stdout, uintptr(unsafe.Pointer(t))) - return t, err -} - -func SetConsoleCursorPosition(c *_COORD) error { - return kernel.SetConsoleCursorPosition(stdout, c.ptr()) -} diff --git a/vendor/github.com/dylanmei/iso8601/LICENSE b/vendor/github.com/dylanmei/iso8601/LICENSE deleted file mode 100644 index dcabcdc70d4..00000000000 --- a/vendor/github.com/dylanmei/iso8601/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Dylan Meissner - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/dylanmei/iso8601/README.md b/vendor/github.com/dylanmei/iso8601/README.md deleted file mode 100644 index c93b3cf775a..00000000000 --- a/vendor/github.com/dylanmei/iso8601/README.md +++ /dev/null @@ -1,9 +0,0 @@ - -iso 8601 parser and formatter -============================= - -An [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) Go utility. - -- *Time* is not yet implemented -- *Duration* is mostly implemented - diff --git a/vendor/github.com/dylanmei/iso8601/duration.go b/vendor/github.com/dylanmei/iso8601/duration.go deleted file mode 100644 index d5cab17dcba..00000000000 --- a/vendor/github.com/dylanmei/iso8601/duration.go +++ /dev/null @@ -1,96 +0,0 @@ -package iso8601 - -import ( - "errors" - "fmt" - "regexp" - "strconv" - "time" -) - -var ( - // ErrBadFormat is returned when parsing fails - ErrBadFormat = errors.New("bad format string") - - // ErrNoMonth is raised when a month is in the format string - ErrNoMonth = errors.New("no months allowed") - - full = regexp.MustCompile(`P((?P\d+)Y)?((?P\d+)M)?((?P\d+)D)?(T((?P\d+)H)?((?P\d+)M)?((?P\d+)S)?)?`) - week = regexp.MustCompile(`P((?P\d+)W)`) -) - -// adapted from https://github.com/BrianHicks/finch/duration -func ParseDuration(value string) (time.Duration, error) { - var match []string - var regex *regexp.Regexp - - if week.MatchString(value) { - match = week.FindStringSubmatch(value) - regex = week - } else if full.MatchString(value) { - match = full.FindStringSubmatch(value) - regex = full - } else { - return time.Duration(0), ErrBadFormat - } - - d := time.Duration(0) - day := time.Hour * 24 - week := day * 7 - year := day * 365 - - for i, name := range regex.SubexpNames() { - part := match[i] - if i == 0 || name == "" || part == "" { - continue - } - - value, err := strconv.Atoi(part) - if err != nil { - return time.Duration(0), err - } - switch name { - case "year": - d += year * time.Duration(value) - case "month": - return time.Duration(0), ErrNoMonth - case "week": - d += week * time.Duration(value) - case "day": - d += day * time.Duration(value) - case "hour": - d += time.Hour * time.Duration(value) - case "minute": - d += time.Minute * time.Duration(value) - case "second": - d += time.Second * time.Duration(value) - } - } - - return d, nil -} - -func FormatDuration(duration time.Duration) string { - // we're not doing negative durations - if duration.Seconds() <= 0 { - return "PT0S" - } - - hours := int(duration.Hours()) - minutes := int(duration.Minutes()) - (hours * 60) - seconds := int(duration.Seconds()) - (hours*3600 + minutes*60) - - // we're not doing Y,M,W - s := "PT" - if hours > 0 { - s = fmt.Sprintf("%s%dH", s, hours) - } - if minutes > 0 { - s = fmt.Sprintf("%s%dM", s, minutes) - } - if seconds > 0 { - s = fmt.Sprintf("%s%dS", s, seconds) - } - - return s -} diff --git a/vendor/github.com/dylanmei/winrmtest/LICENSE b/vendor/github.com/dylanmei/winrmtest/LICENSE deleted file mode 100644 index aac5c68e7b5..00000000000 --- a/vendor/github.com/dylanmei/winrmtest/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2014-2015 Dylan Meissner - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/dylanmei/winrmtest/README.md b/vendor/github.com/dylanmei/winrmtest/README.md deleted file mode 100644 index 19c19609dbe..00000000000 --- a/vendor/github.com/dylanmei/winrmtest/README.md +++ /dev/null @@ -1,48 +0,0 @@ - -# winrmtest - -An in-progress testing package to compliment the [masterzen/winrm](https://github.com/masterzen/winrm) Go-based winrm library. - -My primary use-case for this is for [dylanmei/packer-communicator-winrm](https://github.com/dylanmei/packer-communicator-winrm), a [Packer](http://packer.io) communicator plugin for interacting with machines using Windows Remote Management. - -## Example Use - -A fictitious "Windows tools" package. - -``` - -package wintools - -import ( - "io" - "testing" - "github.com/dylanmei/winrmtest" -) - -func Test_empty_temp_directory(t *testing.T) { - r := winrmtest.NewRemote() - defer r.Close() - - r.CommandFunc(wimrmtest.MatchText("dir C:\Temp"), func(out, err io.Writer) int { - out.Write([]byte(` Volume in drive C is Windows 2012 R2 - Volume Serial Number is XXXX-XXXX - - Directory of C:\ - -File Not Found`)) - return 0 - }) - - lister := NewDirectoryLister(r.Host, r.Port) - list, _ := lister.TempDirectory() - - if count := len(list.Dirs()); count != 0 { - t.Errorf("Expected 0 directories but found %d.\n", count) - } - - if count := len(list.Files()); count != 0 { - t.Errorf("Expected 0 files but found %d.\n", count) - } -} -``` - diff --git a/vendor/github.com/dylanmei/winrmtest/remote.go b/vendor/github.com/dylanmei/winrmtest/remote.go deleted file mode 100644 index ecc083f796e..00000000000 --- a/vendor/github.com/dylanmei/winrmtest/remote.go +++ /dev/null @@ -1,79 +0,0 @@ -package winrmtest - -import ( - "io" - "net/http" - "net/http/httptest" - "net/url" - "regexp" - "strconv" - "strings" -) - -// Remote respresents a WinRM server -type Remote struct { - Host string - Port int - server *httptest.Server - service *wsman -} - -// NewRemote returns a new initialized Remote -func NewRemote() *Remote { - mux := http.NewServeMux() - srv := httptest.NewServer(mux) - - host, port, _ := splitAddr(srv.URL) - remote := Remote{ - Host: host, - Port: port, - server: srv, - service: &wsman{}, - } - - mux.Handle("/wsman", remote.service) - return &remote -} - -// Close closes the WinRM server -func (r *Remote) Close() { - r.server.Close() -} - -// MatcherFunc respresents a function used to match WinRM commands -type MatcherFunc func(candidate string) bool - -// MatchText return a new MatcherFunc based on text matching -func MatchText(text string) MatcherFunc { - return func(candidate string) bool { - return text == candidate - } -} - -// MatchPattern return a new MatcherFunc based on pattern matching -func MatchPattern(pattern string) MatcherFunc { - r := regexp.MustCompile(pattern) - return func(candidate string) bool { - return r.MatchString(candidate) - } -} - -// CommandFunc respresents a function used to mock WinRM commands -type CommandFunc func(out, err io.Writer) (exitCode int) - -// CommandFunc adds a WinRM command mock function to the WinRM server -func (r *Remote) CommandFunc(m MatcherFunc, f CommandFunc) { - r.service.HandleCommand(m, f) -} - -func splitAddr(addr string) (host string, port int, err error) { - u, err := url.Parse(addr) - if err != nil { - return - } - - split := strings.Split(u.Host, ":") - host = split[0] - port, err = strconv.Atoi(split[1]) - return -} diff --git a/vendor/github.com/dylanmei/winrmtest/wsman.go b/vendor/github.com/dylanmei/winrmtest/wsman.go deleted file mode 100644 index c6d1c247b00..00000000000 --- a/vendor/github.com/dylanmei/winrmtest/wsman.go +++ /dev/null @@ -1,170 +0,0 @@ -package winrmtest - -import ( - "bytes" - "encoding/base64" - "fmt" - "net/http" - "strconv" - "strings" - - "github.com/masterzen/winrm/soap" - "github.com/masterzen/xmlpath" - "github.com/satori/go.uuid" -) - -type wsman struct { - commands []*command - identitySeed int -} - -type command struct { - id string - matcher MatcherFunc - handler CommandFunc -} - -func (w *wsman) HandleCommand(m MatcherFunc, f CommandFunc) string { - id := uuid.NewV4().String() - w.commands = append(w.commands, &command{ - id: id, - matcher: m, - handler: f, - }) - - return id -} - -func (w *wsman) CommandByText(cmd string) *command { - for _, c := range w.commands { - if c.matcher(cmd) { - return c - } - } - return nil -} - -func (w *wsman) CommandByID(id string) *command { - for _, c := range w.commands { - if c.id == id { - return c - } - } - return nil -} - -func (w *wsman) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - rw.Header().Add("Content-Type", "application/soap+xml") - - defer r.Body.Close() - env, err := xmlpath.Parse(r.Body) - - if err != nil { - return - } - - action := readAction(env) - switch { - case strings.HasSuffix(action, "transfer/Create"): - // create a new shell - - rw.Write([]byte(` - - 123 - `)) - - case strings.HasSuffix(action, "shell/Command"): - // execute on behalf of the client - text := readCommand(env) - cmd := w.CommandByText(text) - - if cmd == nil { - fmt.Printf("I don't know this command: Command=%s\n", text) - rw.WriteHeader(http.StatusInternalServerError) - return - } - - rw.Write([]byte(fmt.Sprintf(` - - %s - `, cmd.id))) - - case strings.HasSuffix(action, "shell/Receive"): - // client ready to receive the results - - id := readCommandIDFromDesiredStream(env) - cmd := w.CommandByID(id) - - if cmd == nil { - fmt.Printf("I don't know this command: CommandId=%s\n", id) - rw.WriteHeader(http.StatusInternalServerError) - return - } - - stdout := new(bytes.Buffer) - stderr := new(bytes.Buffer) - result := cmd.handler(stdout, stderr) - content := base64.StdEncoding.EncodeToString(stdout.Bytes()) - - rw.Write([]byte(fmt.Sprintf(` - - - %s - - - - %d - - - `, id, content, id, id, result))) - - case strings.HasSuffix(action, "shell/Signal"): - // end of the shell command - rw.WriteHeader(http.StatusOK) - case strings.HasSuffix(action, "transfer/Delete"): - // end of the session - rw.WriteHeader(http.StatusOK) - default: - fmt.Printf("I don't know this action: %s\n", action) - rw.WriteHeader(http.StatusInternalServerError) - } -} - -func readAction(env *xmlpath.Node) string { - xpath, err := xmlpath.CompileWithNamespace( - "//a:Action", soap.GetAllNamespaces()) - - if err != nil { - return "" - } - - action, _ := xpath.String(env) - return action -} - -func readCommand(env *xmlpath.Node) string { - xpath, err := xmlpath.CompileWithNamespace( - "//rsp:Command", soap.GetAllNamespaces()) - - if err != nil { - return "" - } - - command, _ := xpath.String(env) - if unquoted, err := strconv.Unquote(command); err == nil { - return unquoted - } - return command -} - -func readCommandIDFromDesiredStream(env *xmlpath.Node) string { - xpath, err := xmlpath.CompileWithNamespace( - "//rsp:DesiredStream/@CommandId", soap.GetAllNamespaces()) - - if err != nil { - return "" - } - - id, _ := xpath.String(env) - return id -} diff --git a/vendor/github.com/hashicorp/atlas-go/LICENSE b/vendor/github.com/hashicorp/atlas-go/LICENSE deleted file mode 100644 index 82b4de97c7e..00000000000 --- a/vendor/github.com/hashicorp/atlas-go/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/atlas-go/archive/archive.go b/vendor/github.com/hashicorp/atlas-go/archive/archive.go deleted file mode 100644 index 0a025b21f7e..00000000000 --- a/vendor/github.com/hashicorp/atlas-go/archive/archive.go +++ /dev/null @@ -1,528 +0,0 @@ -// archive is package that helps create archives in a format that -// Atlas expects with its various upload endpoints. -package archive - -import ( - "archive/tar" - "bufio" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" -) - -// Archive is the resulting archive. The archive data is generally streamed -// so the io.ReadCloser can be used to backpressure the archive progress -// and avoid memory pressure. -type Archive struct { - io.ReadCloser - - Size int64 - Metadata map[string]string -} - -// ArchiveOpts are the options for defining how the archive will be built. -type ArchiveOpts struct { - // Exclude and Include are filters of files to include/exclude in - // the archive when creating it from a directory. These filters should - // be relative to the packaging directory and should be basic glob - // patterns. - Exclude []string - Include []string - - // Extra is a mapping of extra files to include within the archive. The - // key should be the path within the archive and the value should be - // an absolute path to the file to put into the archive. These extra - // files will override any other files in the archive. - Extra map[string]string - - // VCS, if true, will detect and use a VCS system to determine what - // files to include the archive. - VCS bool -} - -// IsSet says whether any options were set. -func (o *ArchiveOpts) IsSet() bool { - return len(o.Exclude) > 0 || len(o.Include) > 0 || o.VCS -} - -// Constants related to setting special values for Extra in ArchiveOpts. -const ( - // ExtraEntryDir just creates the Extra key as a directory entry. - ExtraEntryDir = "" -) - -// CreateArchive takes the given path and ArchiveOpts and archives it. -// -// The archive will be fully completed and put into a temporary file. -// This must be done to retrieve the content length of the archive which -// is needed for almost all operations involving archives with Atlas. Because -// of this, sufficient disk space will be required to buffer the archive. -func CreateArchive(path string, opts *ArchiveOpts) (*Archive, error) { - log.Printf("[INFO] creating archive from %s", path) - - // Dereference any symlinks and determine the real path and info - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - if fi.Mode()&os.ModeSymlink != 0 { - path, fi, err = readLinkFull(path, fi) - if err != nil { - return nil, err - } - } - - // Windows - path = filepath.ToSlash(path) - - // Direct file paths cannot have archive options - if !fi.IsDir() && opts.IsSet() { - return nil, fmt.Errorf( - "options such as exclude, include, and VCS can't be set when " + - "the path is a file.") - } - - if fi.IsDir() { - return archiveDir(path, opts) - } else { - return archiveFile(path) - } -} - -func archiveFile(path string) (*Archive, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - - if _, err := gzip.NewReader(f); err == nil { - // Reset the read offset for future reading - if _, err := f.Seek(0, 0); err != nil { - f.Close() - return nil, err - } - - // Get the file info for the size - fi, err := f.Stat() - if err != nil { - f.Close() - return nil, err - } - - // This is a gzip file, let it through. - return &Archive{ReadCloser: f, Size: fi.Size()}, nil - } - - // Close the file, no use for it anymore - f.Close() - - // We have a single file that is not gzipped. Compress it. - path, err = filepath.Abs(path) - if err != nil { - return nil, err - } - - // Act like we're compressing a directory, but only include this one - // file. - return archiveDir(filepath.Dir(path), &ArchiveOpts{ - Include: []string{filepath.Base(path)}, - }) -} - -func archiveDir(root string, opts *ArchiveOpts) (*Archive, error) { - - var vcsInclude []string - var metadata map[string]string - if opts.VCS { - var err error - - if err = vcsPreflight(root); err != nil { - return nil, err - } - - vcsInclude, err = vcsFiles(root) - if err != nil { - return nil, err - } - - metadata, err = vcsMetadata(root) - if err != nil { - return nil, err - } - } - - // Make sure the root path is absolute - root, err := filepath.Abs(root) - if err != nil { - return nil, err - } - - // Create the temporary file that we'll send the archive data to. - archiveF, err := ioutil.TempFile("", "atlas-archive") - if err != nil { - return nil, err - } - - // Create the wrapper for the result which will automatically - // remove the temporary file on close. - archiveWrapper := &readCloseRemover{F: archiveF} - - // Buffer the writer so that we can push as much data to disk at - // a time as possible. 4M should be good. - bufW := bufio.NewWriterSize(archiveF, 4096*1024) - - // Gzip compress all the output data - gzipW := gzip.NewWriter(bufW) - - // Tar the file contents - tarW := tar.NewWriter(gzipW) - - // First, walk the path and do the normal files - werr := filepath.Walk(root, copyDirWalkFn( - tarW, root, "", opts, vcsInclude)) - if werr == nil { - // If that succeeded, handle the extra files - werr = copyExtras(tarW, opts.Extra) - } - - // Attempt to close all the things. If we get an error on the way - // and we haven't had an error yet, then record that as the critical - // error. But we still try to close everything. - - // Close the tar writer - if err := tarW.Close(); err != nil && werr == nil { - werr = err - } - - // Close the gzip writer - if err := gzipW.Close(); err != nil && werr == nil { - werr = err - } - - // Flush the buffer - if err := bufW.Flush(); err != nil && werr == nil { - werr = err - } - - // If we had an error, then close the file (removing it) and - // return the error. - if werr != nil { - archiveWrapper.Close() - return nil, werr - } - - // Seek to the beginning - if _, err := archiveWrapper.F.Seek(0, 0); err != nil { - archiveWrapper.Close() - return nil, err - } - - // Get the file information so we can get the size - fi, err := archiveWrapper.F.Stat() - if err != nil { - archiveWrapper.Close() - return nil, err - } - - return &Archive{ - ReadCloser: archiveWrapper, - Size: fi.Size(), - Metadata: metadata, - }, nil -} - -func copyDirWalkFn( - tarW *tar.Writer, root string, prefix string, - opts *ArchiveOpts, vcsInclude []string) filepath.WalkFunc { - - errFunc := func(err error) filepath.WalkFunc { - return func(string, os.FileInfo, error) error { - return err - } - } - - // Windows - root = filepath.ToSlash(root) - - var includeMap map[string]struct{} - - // If we have an include/exclude pattern set, then setup the lookup - // table to determine what we want to include. - if opts != nil && len(opts.Include) > 0 { - includeMap = make(map[string]struct{}) - for _, pattern := range opts.Include { - matches, err := filepath.Glob(filepath.Join(root, pattern)) - if err != nil { - return errFunc(fmt.Errorf( - "error checking include glob '%s': %s", - pattern, err)) - } - - for _, path := range matches { - // Windows - path = filepath.ToSlash(path) - subpath, err := filepath.Rel(root, path) - subpath = filepath.ToSlash(subpath) - - if err != nil { - return errFunc(err) - } - - for { - includeMap[subpath] = struct{}{} - subpath = filepath.Dir(subpath) - if subpath == "." { - break - } - } - } - } - } - - return func(path string, info os.FileInfo, err error) error { - path = filepath.ToSlash(path) - - if err != nil { - return err - } - - // Get the relative path from the path since it contains the root - // plus the path. - subpath, err := filepath.Rel(root, path) - if err != nil { - return err - } - if subpath == "." { - return nil - } - if prefix != "" { - subpath = filepath.Join(prefix, subpath) - } - // Windows - subpath = filepath.ToSlash(subpath) - - // If we have a list of VCS files, check that first - skip := false - if len(vcsInclude) > 0 { - skip = true - for _, f := range vcsInclude { - if f == subpath { - skip = false - break - } - - if info.IsDir() && strings.HasPrefix(f, subpath+"/") { - skip = false - break - } - } - } - - // If include is present, we only include what is listed - if len(includeMap) > 0 { - if _, ok := includeMap[subpath]; !ok { - skip = true - } - } - - // If exclude, it is one last gate to excluding files - if opts != nil { - for _, exclude := range opts.Exclude { - match, err := filepath.Match(exclude, subpath) - if err != nil { - return err - } - if match { - skip = true - break - } - } - } - - // If we have to skip this file, then skip it, properly skipping - // children if we're a directory. - if skip { - if info.IsDir() { - return filepath.SkipDir - } - - return nil - } - - // If this is a symlink, then we need to get the symlink target - // rather than the symlink itself. - if info.Mode()&os.ModeSymlink != 0 { - target, info, err := readLinkFull(path, info) - if err != nil { - return err - } - - // Copy the concrete entry for this path. This will either - // be the file itself or just a directory entry. - if err := copyConcreteEntry(tarW, subpath, target, info); err != nil { - return err - } - - if info.IsDir() { - return filepath.Walk(target, copyDirWalkFn( - tarW, target, subpath, opts, vcsInclude)) - } - } - - return copyConcreteEntry(tarW, subpath, path, info) - } -} - -func copyConcreteEntry( - tarW *tar.Writer, entry string, - path string, info os.FileInfo) error { - // Windows - path = filepath.ToSlash(path) - - // Build the file header for the tar entry - header, err := tar.FileInfoHeader(info, path) - if err != nil { - return fmt.Errorf( - "failed creating archive header: %s", path) - } - - // Modify the header to properly be the full entry name - header.Name = entry - if info.IsDir() { - header.Name += "/" - } - - // Write the header first to the archive. - if err := tarW.WriteHeader(header); err != nil { - return fmt.Errorf( - "failed writing archive header: %s", path) - } - - // If it is a directory, then we're done (no body to write) - if info.IsDir() { - return nil - } - - // Open the real file to write the data - f, err := os.Open(path) - if err != nil { - return fmt.Errorf( - "failed opening file '%s' to write compressed archive.", path) - } - defer f.Close() - - if _, err = io.Copy(tarW, f); err != nil { - return fmt.Errorf( - "failed copying file to archive: %s", path) - } - - return nil -} - -func copyExtras(w *tar.Writer, extra map[string]string) error { - var tmpDir string - defer func() { - if tmpDir != "" { - os.RemoveAll(tmpDir) - } - }() - - for entry, path := range extra { - // If the path is empty, then we set it to a generic empty directory - if path == "" { - // If tmpDir is still empty, then we create an empty dir - if tmpDir == "" { - td, err := ioutil.TempDir("", "archive") - if err != nil { - return err - } - - tmpDir = td - } - - path = tmpDir - } - - info, err := os.Stat(path) - if err != nil { - return err - } - - // No matter what, write the entry. If this is a directory, - // it'll just write the directory header. - if err := copyConcreteEntry(w, entry, path, info); err != nil { - return err - } - - // If this is a directory, then we walk the internal contents - // and copy those as well. - if info.IsDir() { - err := filepath.Walk(path, copyDirWalkFn( - w, path, entry, nil, nil)) - if err != nil { - return err - } - } - } - - return nil -} - -func readLinkFull(path string, info os.FileInfo) (string, os.FileInfo, error) { - // Read the symlink continously until we reach a concrete file. - target := path - tries := 0 - for info.Mode()&os.ModeSymlink != 0 { - var err error - target, err = os.Readlink(target) - if err != nil { - return "", nil, err - } - if !filepath.IsAbs(target) { - target, err = filepath.Abs(target) - if err != nil { - return "", nil, err - } - } - info, err = os.Lstat(target) - if err != nil { - return "", nil, err - } - - tries++ - if tries > 100 { - return "", nil, fmt.Errorf( - "Symlink for %s is too deep, over 100 levels deep", - path) - } - } - - return target, info, nil -} - -// readCloseRemover is an io.ReadCloser implementation that will remove -// the file on Close(). We use this to clean up our temporary file for -// the archive. -type readCloseRemover struct { - F *os.File -} - -func (r *readCloseRemover) Read(p []byte) (int, error) { - return r.F.Read(p) -} - -func (r *readCloseRemover) Close() error { - // First close the file - err := r.F.Close() - - // Next make sure to remove it, or at least try, regardless of error - // above. - os.Remove(r.F.Name()) - - return err -} diff --git a/vendor/github.com/hashicorp/atlas-go/archive/vcs.go b/vendor/github.com/hashicorp/atlas-go/archive/vcs.go deleted file mode 100644 index 479a5bcdae9..00000000000 --- a/vendor/github.com/hashicorp/atlas-go/archive/vcs.go +++ /dev/null @@ -1,365 +0,0 @@ -package archive - -import ( - "bufio" - "bytes" - "fmt" - "log" - "os" - "os/exec" - "path/filepath" - "strings" - - version "github.com/hashicorp/go-version" -) - -// VCS is a struct that explains how to get the file list for a given -// VCS. -type VCS struct { - Name string - - // Detect is a list of files/folders that if they exist, signal that - // this VCS is the VCS in use. - Detect []string - - // Files returns the files that are under version control for the - // given path. - Files VCSFilesFunc - - // Metadata returns arbitrary metadata about the underlying VCS for the - // given path. - Metadata VCSMetadataFunc - - // Preflight is a function to run before looking for VCS files. - Preflight VCSPreflightFunc -} - -// VCSList is the list of VCS we recognize. -var VCSList = []*VCS{ - &VCS{ - Name: "git", - Detect: []string{".git/"}, - Preflight: gitPreflight, - Files: vcsFilesCmd("git", "ls-files"), - Metadata: gitMetadata, - }, - &VCS{ - Name: "hg", - Detect: []string{".hg/"}, - Files: vcsTrimCmd(vcsFilesCmd("hg", "locate", "-f", "--include", ".")), - }, - &VCS{ - Name: "svn", - Detect: []string{".svn/"}, - Files: vcsFilesCmd("svn", "ls"), - }, -} - -// VCSFilesFunc is the callback invoked to return the files in the VCS. -// -// The return value should be paths relative to the given path. -type VCSFilesFunc func(string) ([]string, error) - -// VCSMetadataFunc is the callback invoked to get arbitrary information about -// the current VCS. -// -// The return value should be a map of key-value pairs. -type VCSMetadataFunc func(string) (map[string]string, error) - -// VCSPreflightFunc is a function that runs before VCS detection to be -// configured by the user. It may be used to check if pre-requisites (like the -// actual VCS) are installed or that a program is at the correct version. If an -// error is returned, the VCS will not be processed and the error will be -// returned up the stack. -// -// The given argument is the path where the VCS is running. -type VCSPreflightFunc func(string) error - -// vcsDetect detects the VCS that is used for path. -func vcsDetect(path string) (*VCS, error) { - dir := path - for { - for _, v := range VCSList { - for _, f := range v.Detect { - check := filepath.Join(dir, f) - if _, err := os.Stat(check); err == nil { - return v, nil - } - } - } - lastDir := dir - dir = filepath.Dir(dir) - if dir == lastDir { - break - } - } - - return nil, fmt.Errorf("no VCS found for path: %s", path) -} - -// vcsPreflight returns the metadata for the VCS directory path. -func vcsPreflight(path string) error { - vcs, err := vcsDetect(path) - if err != nil { - return fmt.Errorf("error detecting VCS: %s", err) - } - - if vcs.Preflight != nil { - return vcs.Preflight(path) - } - - return nil -} - -// vcsFiles returns the files for the VCS directory path. -func vcsFiles(path string) ([]string, error) { - vcs, err := vcsDetect(path) - if err != nil { - return nil, fmt.Errorf("error detecting VCS: %s", err) - } - - if vcs.Files != nil { - return vcs.Files(path) - } - - return nil, nil -} - -// vcsFilesCmd creates a Files-compatible function that reads the files -// by executing the command in the repository path and returning each -// line in stdout. -func vcsFilesCmd(args ...string) VCSFilesFunc { - return func(path string) ([]string, error) { - var stderr, stdout bytes.Buffer - - cmd := exec.Command(args[0], args[1:]...) - cmd.Dir = path - cmd.Stdout = &stdout - cmd.Stderr = &stderr - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf( - "error executing %s: %s", - strings.Join(args, " "), - err) - } - - // Read each line of output as a path - result := make([]string, 0, 100) - scanner := bufio.NewScanner(&stdout) - for scanner.Scan() { - result = append(result, scanner.Text()) - } - - // Always use *nix-style paths (for Windows) - for idx, value := range result { - result[idx] = filepath.ToSlash(value) - } - - return result, nil - } -} - -// vcsTrimCmd trims the prefix from the paths returned by another VCSFilesFunc. -// This should be used to wrap another function if the return value is known -// to have full paths rather than relative paths -func vcsTrimCmd(f VCSFilesFunc) VCSFilesFunc { - return func(path string) ([]string, error) { - absPath, err := filepath.Abs(path) - if err != nil { - return nil, fmt.Errorf( - "error expanding VCS path: %s", err) - } - - // Now that we have the root path, get the inner files - fs, err := f(path) - if err != nil { - return nil, err - } - - // Trim the root path from the files - result := make([]string, 0, len(fs)) - for _, f := range fs { - if !strings.HasPrefix(f, absPath) { - continue - } - - f, err = filepath.Rel(absPath, f) - if err != nil { - return nil, fmt.Errorf( - "error determining path: %s", err) - } - - result = append(result, f) - } - - return result, nil - } -} - -// vcsMetadata returns the metadata for the VCS directory path. -func vcsMetadata(path string) (map[string]string, error) { - vcs, err := vcsDetect(path) - if err != nil { - return nil, fmt.Errorf("error detecting VCS: %s", err) - } - - if vcs.Metadata != nil { - return vcs.Metadata(path) - } - - return nil, nil -} - -const ignorableDetachedHeadError = "HEAD is not a symbolic ref" - -// gitBranch gets and returns the current git branch for the Git repository -// at the given path. It is assumed that the VCS is git. -func gitBranch(path string) (string, error) { - var stderr, stdout bytes.Buffer - - cmd := exec.Command("git", "symbolic-ref", "--short", "HEAD") - cmd.Dir = path - cmd.Stdout = &stdout - cmd.Stderr = &stderr - if err := cmd.Run(); err != nil { - if strings.Contains(stderr.String(), ignorableDetachedHeadError) { - return "", nil - } else { - return "", - fmt.Errorf("error getting git branch: %s\nstdout: %s\nstderr: %s", - err, stdout.String(), stderr.String()) - } - } - - branch := strings.TrimSpace(stdout.String()) - - return branch, nil -} - -// gitCommit gets the SHA of the latest commit for the Git repository at the -// given path. It is assumed that the VCS is git. -func gitCommit(path string) (string, error) { - var stderr, stdout bytes.Buffer - - cmd := exec.Command("git", "log", "-n1", "--pretty=format:%H") - cmd.Dir = path - cmd.Stdout = &stdout - cmd.Stderr = &stderr - if err := cmd.Run(); err != nil { - return "", fmt.Errorf("error getting git commit: %s\nstdout: %s\nstderr: %s", - err, stdout.String(), stderr.String()) - } - - commit := strings.TrimSpace(stdout.String()) - - return commit, nil -} - -// gitRemotes gets and returns a map of all remotes for the Git repository. The -// map key is the name of the remote of the format "remote.NAME" and the value -// is the endpoint for the remote. It is assumed that the VCS is git. -func gitRemotes(path string) (map[string]string, error) { - var stderr, stdout bytes.Buffer - - cmd := exec.Command("git", "remote", "-v") - cmd.Dir = path - cmd.Stdout = &stdout - cmd.Stderr = &stderr - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("error getting git remotes: %s\nstdout: %s\nstderr: %s", - err, stdout.String(), stderr.String()) - } - - // Read each line of output as a remote - result := make(map[string]string) - scanner := bufio.NewScanner(&stdout) - for scanner.Scan() { - line := scanner.Text() - split := strings.Split(line, "\t") - - if len(split) < 2 { - return nil, fmt.Errorf("invalid response from git remote: %s", stdout.String()) - } - - remote := fmt.Sprintf("remote.%s", strings.TrimSpace(split[0])) - if _, ok := result[remote]; !ok { - // https://github.com/foo/bar.git (fetch) #=> https://github.com/foo/bar.git - urlSplit := strings.Split(split[1], " ") - result[remote] = strings.TrimSpace(urlSplit[0]) - } - } - - return result, nil -} - -// gitPreflight is the pre-flight command that runs for Git-based VCSs -func gitPreflight(path string) error { - var stderr, stdout bytes.Buffer - - cmd := exec.Command("git", "--version") - cmd.Dir = path - cmd.Stdout = &stdout - cmd.Stderr = &stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("error getting git version: %s\nstdout: %s\nstderr: %s", - err, stdout.String(), stderr.String()) - } - - // Check if the output is valid - output := strings.Split(strings.TrimSpace(stdout.String()), " ") - if len(output) < 1 { - log.Printf("[WARN] could not extract version output from Git") - return nil - } - - // Parse the version - gitv, err := version.NewVersion(output[len(output)-1]) - if err != nil { - log.Printf("[WARN] could not parse version output from Git") - return nil - } - - constraint, err := version.NewConstraint("> 1.8") - if err != nil { - log.Printf("[WARN] could not create version constraint to check") - return nil - } - if !constraint.Check(gitv) { - return fmt.Errorf("git version (%s) is too old, please upgrade", gitv.String()) - } - - return nil -} - -// gitMetadata is the function to parse and return Git metadata -func gitMetadata(path string) (map[string]string, error) { - // Future-self note: Git is NOT threadsafe, so we cannot run these - // operations in go routines or else you're going to have a really really - // bad day and Panda.State == "Sad" :( - - branch, err := gitBranch(path) - if err != nil { - return nil, err - } - - commit, err := gitCommit(path) - if err != nil { - return nil, err - } - - remotes, err := gitRemotes(path) - if err != nil { - return nil, err - } - - // Make the return result (we already know the size) - result := make(map[string]string, 2+len(remotes)) - - result["branch"] = branch - result["commit"] = commit - for remote, value := range remotes { - result[remote] = value - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/application.go b/vendor/github.com/hashicorp/atlas-go/v1/application.go deleted file mode 100644 index 42cf8820126..00000000000 --- a/vendor/github.com/hashicorp/atlas-go/v1/application.go +++ /dev/null @@ -1,164 +0,0 @@ -package atlas - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "log" -) - -// appWrapper is the API wrapper since the server wraps the resulting object. -type appWrapper struct { - Application *App `json:"application"` -} - -// App represents a single instance of an application on the Atlas server. -type App struct { - // User is the namespace (username or organization) under which the - // Atlas application resides - User string `json:"username"` - - // Name is the name of the application - Name string `json:"name"` -} - -// Slug returns the slug format for this App (User/Name) -func (a *App) Slug() string { - return fmt.Sprintf("%s/%s", a.User, a.Name) -} - -// App gets the App by the given user space and name. In the event the App is -// not found (404), or for any other non-200 responses, an error is returned. -func (c *Client) App(user, name string) (*App, error) { - log.Printf("[INFO] getting application %s/%s", user, name) - - endpoint := fmt.Sprintf("/api/v1/vagrant/applications/%s/%s", user, name) - request, err := c.Request("GET", endpoint, nil) - if err != nil { - return nil, err - } - - response, err := checkResp(c.HTTPClient.Do(request)) - if err != nil { - return nil, err - } - - var app App - if err := decodeJSON(response, &app); err != nil { - return nil, err - } - - return &app, nil -} - -// CreateApp creates a new App under the given user with the given name. If the -// App is created successfully, it is returned. If the server returns any -// errors, an error is returned. -func (c *Client) CreateApp(user, name string) (*App, error) { - log.Printf("[INFO] creating application %s/%s", user, name) - - body, err := json.Marshal(&appWrapper{&App{ - User: user, - Name: name, - }}) - if err != nil { - return nil, err - } - - endpoint := "/api/v1/vagrant/applications" - request, err := c.Request("POST", endpoint, &RequestOptions{ - Body: bytes.NewReader(body), - Headers: map[string]string{ - "Content-Type": "application/json", - }, - }) - if err != nil { - return nil, err - } - - response, err := checkResp(c.HTTPClient.Do(request)) - if err != nil { - return nil, err - } - - var app App - if err := decodeJSON(response, &app); err != nil { - return nil, err - } - - return &app, nil -} - -// appVersion represents a specific version of an App in Atlas. It is actually -// an upload container/wrapper. -type appVersion struct { - UploadPath string `json:"upload_path"` - Token string `json:"token"` - Version uint64 `json:"version"` -} - -// appMetadataWrapper is a wrapper around a map the prefixes the json key with -// "metadata" when marshalled to format requests to the API properly. -type appMetadataWrapper struct { - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -// UploadApp creates and uploads a new version for the App. If the server does not -// find the application, an error is returned. If the server does not accept the -// data, an error is returned. -// -// It is the responsibility of the caller to create a properly-formed data -// object; this method blindly passes along the contents of the io.Reader. -func (c *Client) UploadApp(app *App, metadata map[string]interface{}, - data io.Reader, size int64) (uint64, error) { - - log.Printf("[INFO] uploading application %s (%d bytes) with metadata %q", - app.Slug(), size, metadata) - - endpoint := fmt.Sprintf("/api/v1/vagrant/applications/%s/%s/versions", - app.User, app.Name) - - // If metadata was given, setup the RequestOptions to pass in the metadata - // with the request. - var ro *RequestOptions - if metadata != nil { - // wrap the struct into the correct JSON format - wrapper := struct { - Application *appMetadataWrapper `json:"application"` - }{ - &appMetadataWrapper{metadata}, - } - m, err := json.Marshal(wrapper) - if err != nil { - return 0, err - } - - // Create the request options. - ro = &RequestOptions{ - Body: bytes.NewReader(m), - BodyLength: int64(len(m)), - } - } - - request, err := c.Request("POST", endpoint, ro) - if err != nil { - return 0, err - } - - response, err := checkResp(c.HTTPClient.Do(request)) - if err != nil { - return 0, err - } - - var av appVersion - if err := decodeJSON(response, &av); err != nil { - return 0, err - } - - if err := c.putFile(av.UploadPath, data, size); err != nil { - return 0, err - } - - return av.Version, nil -} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/artifact.go b/vendor/github.com/hashicorp/atlas-go/v1/artifact.go deleted file mode 100644 index 0a8dc4b0f18..00000000000 --- a/vendor/github.com/hashicorp/atlas-go/v1/artifact.go +++ /dev/null @@ -1,248 +0,0 @@ -package atlas - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "log" - "net/url" -) - -// Artifact represents a single instance of an artifact. -type Artifact struct { - // User and name are self-explanatory. Tag is the combination - // of both into "username/name" - User string `json:"username"` - Name string `json:"name"` - Tag string `json:",omitempty"` -} - -// ArtifactVersion represents a single version of an artifact. -type ArtifactVersion struct { - User string `json:"username"` - Name string `json:"name"` - Tag string `json:",omitempty"` - Type string `json:"artifact_type"` - ID string `json:"id"` - Version int `json:"version"` - Metadata map[string]string `json:"metadata"` - File bool `json:"file"` - Slug string `json:"slug"` - - UploadPath string `json:"upload_path"` - UploadToken string `json:"upload_token"` -} - -// ArtifactSearchOpts are the options used to search for an artifact. -type ArtifactSearchOpts struct { - User string - Name string - Type string - - Build string - Version string - Metadata map[string]string -} - -// UploadArtifactOpts are the options used to upload an artifact. -type UploadArtifactOpts struct { - User string - Name string - Type string - ID string - File io.Reader - FileSize int64 - Metadata map[string]string - BuildID int - CompileID int -} - -// MarshalJSON converts the UploadArtifactOpts into a JSON struct. -func (o *UploadArtifactOpts) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "artifact_version": map[string]interface{}{ - "id": o.ID, - "file": o.File != nil, - "metadata": o.Metadata, - "build_id": o.BuildID, - "compile_id": o.CompileID, - }, - }) -} - -// This is the value that should be used for metadata in ArtifactSearchOpts -// if you don't care what the value is. -const MetadataAnyValue = "943febbf-589f-401b-8f25-58f6d8786848" - -// Artifact finds the Atlas artifact by the given name and returns it. Any -// errors that occur are returned, including ErrAuth and ErrNotFound special -// exceptions which the user may want to handle separately. -func (c *Client) Artifact(user, name string) (*Artifact, error) { - endpoint := fmt.Sprintf("/api/v1/artifacts/%s/%s", user, name) - request, err := c.Request("GET", endpoint, nil) - if err != nil { - return nil, err - } - - response, err := checkResp(c.HTTPClient.Do(request)) - if err != nil { - return nil, err - } - - var aw artifactWrapper - if err := decodeJSON(response, &aw); err != nil { - return nil, err - } - - return aw.Artifact, nil -} - -// ArtifactSearch searches Atlas for the given ArtifactSearchOpts and returns -// a slice of ArtifactVersions. -func (c *Client) ArtifactSearch(opts *ArtifactSearchOpts) ([]*ArtifactVersion, error) { - log.Printf("[INFO] searching artifacts: %#v", opts) - - params := make(map[string]string) - if opts.Version != "" { - params["version"] = opts.Version - } - if opts.Build != "" { - params["build"] = opts.Build - } - - i := 1 - for k, v := range opts.Metadata { - prefix := fmt.Sprintf("metadata.%d.", i) - params[prefix+"key"] = k - if v != MetadataAnyValue { - params[prefix+"value"] = v - } - - i++ - } - - endpoint := fmt.Sprintf("/api/v1/artifacts/%s/%s/%s/search", - opts.User, opts.Name, opts.Type) - request, err := c.Request("GET", endpoint, &RequestOptions{ - Params: params, - }) - if err != nil { - return nil, err - } - - response, err := checkResp(c.HTTPClient.Do(request)) - if err != nil { - return nil, err - } - - var w artifactSearchWrapper - if err := decodeJSON(response, &w); err != nil { - return nil, err - } - - return w.Versions, nil -} - -// CreateArtifact creates and returns a new Artifact in Atlas. Any errors that -// occurr are returned. -func (c *Client) CreateArtifact(user, name string) (*Artifact, error) { - log.Printf("[INFO] creating artifact: %s/%s", user, name) - body, err := json.Marshal(&artifactWrapper{&Artifact{ - User: user, - Name: name, - }}) - if err != nil { - return nil, err - } - - endpoint := "/api/v1/artifacts" - request, err := c.Request("POST", endpoint, &RequestOptions{ - Body: bytes.NewReader(body), - Headers: map[string]string{ - "Content-Type": "application/json", - }, - }) - if err != nil { - return nil, err - } - - response, err := checkResp(c.HTTPClient.Do(request)) - if err != nil { - return nil, err - } - - var aw artifactWrapper - if err := decodeJSON(response, &aw); err != nil { - return nil, err - } - - return aw.Artifact, nil -} - -// ArtifactFileURL is a helper method for getting the URL for an ArtifactVersion -// from the Client. -func (c *Client) ArtifactFileURL(av *ArtifactVersion) (*url.URL, error) { - if !av.File { - return nil, nil - } - - u := *c.URL - u.Path = fmt.Sprintf("/api/v1/artifacts/%s/%s/%s/%d/file", - av.User, av.Name, av.Type, av.Version) - return &u, nil -} - -// UploadArtifact streams the upload of a file on disk using the given -// UploadArtifactOpts. Any errors that occur are returned. -func (c *Client) UploadArtifact(opts *UploadArtifactOpts) (*ArtifactVersion, error) { - log.Printf("[INFO] uploading artifact: %s/%s (%s)", opts.User, opts.Name, opts.Type) - - endpoint := fmt.Sprintf("/api/v1/artifacts/%s/%s/%s", - opts.User, opts.Name, opts.Type) - - body, err := json.Marshal(opts) - if err != nil { - return nil, err - } - - request, err := c.Request("POST", endpoint, &RequestOptions{ - Body: bytes.NewReader(body), - Headers: map[string]string{ - "Content-Type": "application/json", - }, - }) - if err != nil { - return nil, err - } - - response, err := checkResp(c.HTTPClient.Do(request)) - if err != nil { - return nil, err - } - - var av ArtifactVersion - if err := decodeJSON(response, &av); err != nil { - return nil, err - } - - if opts.File != nil { - if err := c.putFile(av.UploadPath, opts.File, opts.FileSize); err != nil { - return nil, err - } - } - - return &av, nil -} - -type artifactWrapper struct { - Artifact *Artifact `json:"artifact"` -} - -type artifactSearchWrapper struct { - Versions []*ArtifactVersion -} - -type artifactVersionWrapper struct { - Version *ArtifactVersion -} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/authentication.go b/vendor/github.com/hashicorp/atlas-go/v1/authentication.go deleted file mode 100644 index 613eaefcc75..00000000000 --- a/vendor/github.com/hashicorp/atlas-go/v1/authentication.go +++ /dev/null @@ -1,88 +0,0 @@ -package atlas - -import ( - "fmt" - "log" - "net/url" - "strings" -) - -// Login accepts a username and password as string arguments. Both username and -// password must be non-nil, non-empty values. Atlas does not permit -// passwordless authentication. -// -// If authentication is unsuccessful, an error is returned with the body of the -// error containing the server's response. -// -// If authentication is successful, this method sets the Token value on the -// Client and returns the Token as a string. -func (c *Client) Login(username, password string) (string, error) { - log.Printf("[INFO] logging in user %s", username) - - if len(username) == 0 { - return "", fmt.Errorf("client: missing username") - } - - if len(password) == 0 { - return "", fmt.Errorf("client: missing password") - } - - // Make a request - request, err := c.Request("POST", "/api/v1/authenticate", &RequestOptions{ - Body: strings.NewReader(url.Values{ - "user[login]": []string{username}, - "user[password]": []string{password}, - "user[description]": []string{"Created by the Atlas Go Client"}, - }.Encode()), - Headers: map[string]string{ - "Content-Type": "application/x-www-form-urlencoded", - }, - }) - if err != nil { - return "", err - } - - // Make the request - response, err := checkResp(c.HTTPClient.Do(request)) - if err != nil { - return "", err - } - - // Decode the body - var tResponse struct{ Token string } - if err := decodeJSON(response, &tResponse); err != nil { - return "", nil - } - - // Set the token - log.Printf("[DEBUG] setting atlas token (%s)", maskString(tResponse.Token)) - c.Token = tResponse.Token - - // Return the token - return c.Token, nil -} - -// Verify verifies that authentication and communication with Atlas -// is properly functioning. -func (c *Client) Verify() error { - log.Printf("[INFO] verifying authentication") - - request, err := c.Request("GET", "/api/v1/authenticate", nil) - if err != nil { - return err - } - - _, err = checkResp(c.HTTPClient.Do(request)) - return err -} - -// maskString masks all but the first few characters of a string for display -// output. This is useful for tokens so we can display them to the user without -// showing the full output. -func maskString(s string) string { - if len(s) <= 3 { - return "*** (masked)" - } - - return s[0:3] + "*** (masked)" -} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/build_config.go b/vendor/github.com/hashicorp/atlas-go/v1/build_config.go deleted file mode 100644 index fbcd9127083..00000000000 --- a/vendor/github.com/hashicorp/atlas-go/v1/build_config.go +++ /dev/null @@ -1,193 +0,0 @@ -package atlas - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "log" -) - -// bcWrapper is the API wrapper since the server wraps the resulting object. -type bcWrapper struct { - BuildConfig *BuildConfig `json:"build_configuration"` -} - -// Atlas expects a list of key/value vars -type BuildVar struct { - Key string `json:"key"` - Value string `json:"value"` - Sensitive bool `json:"sensitive"` -} -type BuildVars []BuildVar - -// BuildConfig represents a Packer build configuration. -type BuildConfig struct { - // User is the namespace under which the build config lives - User string `json:"username"` - - // Name is the actual name of the build config, unique in the scope - // of the username. - Name string `json:"name"` -} - -// Slug returns the slug format for this BuildConfig (User/Name) -func (b *BuildConfig) Slug() string { - return fmt.Sprintf("%s/%s", b.User, b.Name) -} - -// BuildConfigVersion represents a single uploaded (or uploadable) version -// of a build configuration. -type BuildConfigVersion struct { - // The fields below are the username/name combo to uniquely identify - // a build config. - User string `json:"username"` - Name string `json:"name"` - - // Builds is the list of builds that this version supports. - Builds []BuildConfigBuild -} - -// Slug returns the slug format for this BuildConfigVersion (User/Name) -func (bv *BuildConfigVersion) Slug() string { - return fmt.Sprintf("%s/%s", bv.User, bv.Name) -} - -// BuildConfigBuild is a single build that is present in an uploaded -// build configuration. -type BuildConfigBuild struct { - // Name is a unique name for this build - Name string `json:"name"` - - // Type is the type of builder that this build needs to run on, - // such as "amazon-ebs" or "qemu". - Type string `json:"type"` - - // Artifact is true if this build results in one or more artifacts - // being sent to Atlas - Artifact bool `json:"artifact"` -} - -// BuildConfig gets a single build configuration by user and name. -func (c *Client) BuildConfig(user, name string) (*BuildConfig, error) { - log.Printf("[INFO] getting build configuration %s/%s", user, name) - - endpoint := fmt.Sprintf("/api/v1/packer/build-configurations/%s/%s", user, name) - request, err := c.Request("GET", endpoint, nil) - if err != nil { - return nil, err - } - - response, err := checkResp(c.HTTPClient.Do(request)) - if err != nil { - return nil, err - } - - var bc BuildConfig - if err := decodeJSON(response, &bc); err != nil { - return nil, err - } - - return &bc, nil -} - -// CreateBuildConfig creates a new build configuration. -func (c *Client) CreateBuildConfig(user, name string) (*BuildConfig, error) { - log.Printf("[INFO] creating build configuration %s/%s", user, name) - - endpoint := "/api/v1/packer/build-configurations" - body, err := json.Marshal(&bcWrapper{ - BuildConfig: &BuildConfig{ - User: user, - Name: name, - }, - }) - if err != nil { - return nil, err - } - - request, err := c.Request("POST", endpoint, &RequestOptions{ - Body: bytes.NewReader(body), - Headers: map[string]string{ - "Content-Type": "application/json", - }, - }) - if err != nil { - return nil, err - } - - response, err := checkResp(c.HTTPClient.Do(request)) - if err != nil { - return nil, err - } - - var bc BuildConfig - if err := decodeJSON(response, &bc); err != nil { - return nil, err - } - - return &bc, nil -} - -// UploadBuildConfigVersion creates a single build configuration version -// and uploads the template associated with it. -// -// Actual API: "Create Build Config Version" -func (c *Client) UploadBuildConfigVersion(v *BuildConfigVersion, metadata map[string]interface{}, - vars BuildVars, data io.Reader, size int64) error { - - log.Printf("[INFO] uploading build configuration version %s (%d bytes), with metadata %q", - v.Slug(), size, metadata) - - endpoint := fmt.Sprintf("/api/v1/packer/build-configurations/%s/%s/versions", - v.User, v.Name) - - var bodyData bcCreateWrapper - bodyData.Version.Builds = v.Builds - bodyData.Version.Metadata = metadata - bodyData.Version.Vars = vars - body, err := json.Marshal(bodyData) - if err != nil { - return err - } - - request, err := c.Request("POST", endpoint, &RequestOptions{ - Body: bytes.NewReader(body), - Headers: map[string]string{ - "Content-Type": "application/json", - }, - }) - if err != nil { - return err - } - - response, err := checkResp(c.HTTPClient.Do(request)) - if err != nil { - return err - } - - var bv bcCreate - if err := decodeJSON(response, &bv); err != nil { - return err - } - - if err := c.putFile(bv.UploadPath, data, size); err != nil { - return err - } - - return nil -} - -// bcCreate is the struct returned when creating a build configuration. -type bcCreate struct { - UploadPath string `json:"upload_path"` -} - -// bcCreateWrapper is the wrapper for creating a build config. -type bcCreateWrapper struct { - Version struct { - Metadata map[string]interface{} `json:"metadata,omitempty"` - Builds []BuildConfigBuild `json:"builds"` - Vars BuildVars `json:"packer_vars,omitempty"` - } `json:"version"` -} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/client.go b/vendor/github.com/hashicorp/atlas-go/v1/client.go deleted file mode 100644 index a38f97eba10..00000000000 --- a/vendor/github.com/hashicorp/atlas-go/v1/client.go +++ /dev/null @@ -1,339 +0,0 @@ -package atlas - -import ( - "bytes" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "log" - "net/http" - "net/url" - "os" - "path" - "runtime" - "strings" - - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/go-rootcerts" -) - -const ( - // atlasDefaultEndpoint is the default base URL for connecting to Atlas. - atlasDefaultEndpoint = "https://atlas.hashicorp.com" - - // atlasEndpointEnvVar is the environment variable that overrrides the - // default Atlas address. - atlasEndpointEnvVar = "ATLAS_ADDRESS" - - // atlasCAFileEnvVar is the environment variable that causes the client to - // load trusted certs from a file - atlasCAFileEnvVar = "ATLAS_CAFILE" - - // atlasCAPathEnvVar is the environment variable that causes the client to - // load trusted certs from a directory - atlasCAPathEnvVar = "ATLAS_CAPATH" - - // atlasTLSNoVerifyEnvVar disables TLS verification, similar to curl -k - // This defaults to false (verify) and will change to true (skip - // verification) with any non-empty value - atlasTLSNoVerifyEnvVar = "ATLAS_TLS_NOVERIFY" - - // atlasTokenHeader is the header key used for authenticating with Atlas - atlasTokenHeader = "X-Atlas-Token" -) - -var projectURL = "https://github.com/hashicorp/atlas-go" -var userAgent = fmt.Sprintf("AtlasGo/1.0 (+%s; %s)", - projectURL, runtime.Version()) - -// ErrAuth is the error returned if a 401 is returned by an API request. -var ErrAuth = fmt.Errorf("authentication failed") - -// ErrNotFound is the error returned if a 404 is returned by an API request. -var ErrNotFound = fmt.Errorf("resource not found") - -// RailsError represents an error that was returned from the Rails server. -type RailsError struct { - Errors []string `json:"errors"` -} - -// Error collects all of the errors in the RailsError and returns a comma- -// separated list of the errors that were returned from the server. -func (re *RailsError) Error() string { - return strings.Join(re.Errors, ", ") -} - -// Client represents a single connection to a Atlas API endpoint. -type Client struct { - // URL is the full endpoint address to the Atlas server including the - // protocol, port, and path. - URL *url.URL - - // Token is the Atlas authentication token - Token string - - // HTTPClient is the underlying http client with which to make requests. - HTTPClient *http.Client - - // DefaultHeaders is a set of headers that will be added to every request. - // This minimally includes the atlas user-agent string. - DefaultHeader http.Header -} - -// DefaultClient returns a client that connects to the Atlas API. -func DefaultClient() *Client { - atlasEndpoint := os.Getenv(atlasEndpointEnvVar) - if atlasEndpoint == "" { - atlasEndpoint = atlasDefaultEndpoint - } - - client, err := NewClient(atlasEndpoint) - if err != nil { - panic(err) - } - - return client -} - -// NewClient creates a new Atlas Client from the given URL (as a string). If -// the URL cannot be parsed, an error is returned. The HTTPClient is set to -// an empty http.Client, but this can be changed programmatically by setting -// client.HTTPClient. The user can also programmatically set the URL as a -// *url.URL. -func NewClient(urlString string) (*Client, error) { - if len(urlString) == 0 { - return nil, fmt.Errorf("client: missing url") - } - - parsedURL, err := url.Parse(urlString) - if err != nil { - return nil, err - } - - token := os.Getenv("ATLAS_TOKEN") - if token != "" { - log.Printf("[DEBUG] using ATLAS_TOKEN (%s)", maskString(token)) - } - - client := &Client{ - URL: parsedURL, - Token: token, - DefaultHeader: make(http.Header), - } - - client.DefaultHeader.Set("User-Agent", userAgent) - - if err := client.init(); err != nil { - return nil, err - } - - return client, nil -} - -// init() sets defaults on the client. -func (c *Client) init() error { - c.HTTPClient = cleanhttp.DefaultClient() - tlsConfig := &tls.Config{} - if os.Getenv(atlasTLSNoVerifyEnvVar) != "" { - tlsConfig.InsecureSkipVerify = true - } - err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ - CAFile: os.Getenv(atlasCAFileEnvVar), - CAPath: os.Getenv(atlasCAPathEnvVar), - }) - if err != nil { - return err - } - t := cleanhttp.DefaultTransport() - t.TLSClientConfig = tlsConfig - c.HTTPClient.Transport = t - return nil -} - -// RequestOptions is the list of options to pass to the request. -type RequestOptions struct { - // Params is a map of key-value pairs that will be added to the Request. - Params map[string]string - - // Headers is a map of key-value pairs that will be added to the Request. - Headers map[string]string - - // Body is an io.Reader object that will be streamed or uploaded with the - // Request. BodyLength is the final size of the Body. - Body io.Reader - BodyLength int64 -} - -// Request creates a new HTTP request using the given verb and sub path. -func (c *Client) Request(verb, spath string, ro *RequestOptions) (*http.Request, error) { - log.Printf("[INFO] request: %s %s", verb, spath) - - // Ensure we have a RequestOptions struct (passing nil is an acceptable) - if ro == nil { - ro = new(RequestOptions) - } - - // Create a new URL with the appended path - u := *c.URL - u.Path = path.Join(c.URL.Path, spath) - - // Add the token and other params - if c.Token != "" { - log.Printf("[DEBUG] request: appending token (%s)", maskString(c.Token)) - if ro.Headers == nil { - ro.Headers = make(map[string]string) - } - - ro.Headers[atlasTokenHeader] = c.Token - } - - return c.rawRequest(verb, &u, ro) -} - -func (c *Client) putFile(rawURL string, r io.Reader, size int64) error { - log.Printf("[INFO] putting file: %s", rawURL) - - url, err := url.Parse(rawURL) - if err != nil { - return err - } - - request, err := c.rawRequest("PUT", url, &RequestOptions{ - Body: r, - BodyLength: size, - }) - if err != nil { - return err - } - - if _, err := checkResp(c.HTTPClient.Do(request)); err != nil { - return err - } - - return nil -} - -// rawRequest accepts a verb, URL, and RequestOptions struct and returns the -// constructed http.Request and any errors that occurred -func (c *Client) rawRequest(verb string, u *url.URL, ro *RequestOptions) (*http.Request, error) { - if verb == "" { - return nil, fmt.Errorf("client: missing verb") - } - - if u == nil { - return nil, fmt.Errorf("client: missing URL.url") - } - - if ro == nil { - return nil, fmt.Errorf("client: missing RequestOptions") - } - - // Add the token and other params - var params = make(url.Values) - for k, v := range ro.Params { - params.Add(k, v) - } - u.RawQuery = params.Encode() - - // Create the request object - request, err := http.NewRequest(verb, u.String(), ro.Body) - if err != nil { - return nil, err - } - - // set our default headers first - for k, v := range c.DefaultHeader { - request.Header[k] = v - } - - // Add any request headers (auth will be here if set) - for k, v := range ro.Headers { - request.Header.Add(k, v) - } - - // Add content-length if we have it - if ro.BodyLength > 0 { - request.ContentLength = ro.BodyLength - } - - log.Printf("[DEBUG] raw request: %#v", request) - - return request, nil -} - -// checkResp wraps http.Client.Do() and verifies that the request was -// successful. A non-200 request returns an error formatted to included any -// validation problems or otherwise. -func checkResp(resp *http.Response, err error) (*http.Response, error) { - // If the err is already there, there was an error higher up the chain, so - // just return that - if err != nil { - return resp, err - } - - log.Printf("[INFO] response: %d (%s)", resp.StatusCode, resp.Status) - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - log.Printf("[ERR] response: error copying response body") - } else { - log.Printf("[DEBUG] response: %s", buf.String()) - - // We are going to reset the response body, so we need to close the old - // one or else it will leak. - resp.Body.Close() - resp.Body = &bytesReadCloser{&buf} - } - - switch resp.StatusCode { - case 200: - return resp, nil - case 201: - return resp, nil - case 202: - return resp, nil - case 204: - return resp, nil - case 400: - return nil, parseErr(resp) - case 401: - return nil, ErrAuth - case 404: - return nil, ErrNotFound - case 422: - return nil, parseErr(resp) - default: - return nil, fmt.Errorf("client: %s", resp.Status) - } -} - -// parseErr is used to take an error JSON response and return a single string -// for use in error messages. -func parseErr(r *http.Response) error { - re := &RailsError{} - - if err := decodeJSON(r, &re); err != nil { - return fmt.Errorf("error decoding JSON body: %s", err) - } - - return re -} - -// decodeJSON is used to JSON decode a body into an interface. -func decodeJSON(resp *http.Response, out interface{}) error { - defer resp.Body.Close() - dec := json.NewDecoder(resp.Body) - return dec.Decode(out) -} - -// bytesReadCloser is a simple wrapper around a bytes buffer that implements -// Close as a noop. -type bytesReadCloser struct { - *bytes.Buffer -} - -func (nrc *bytesReadCloser) Close() error { - // we don't actually have to do anything here, since the buffer is just some - // data in memory and the error is initialized to no-error - return nil -} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/terraform.go b/vendor/github.com/hashicorp/atlas-go/v1/terraform.go deleted file mode 100644 index debd1d31940..00000000000 --- a/vendor/github.com/hashicorp/atlas-go/v1/terraform.go +++ /dev/null @@ -1,106 +0,0 @@ -package atlas - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "log" -) - -// TerraformConfigVersion represents a single uploaded version of a -// Terraform configuration. -type TerraformConfigVersion struct { - Version int - Remotes []string `json:"remotes"` - Metadata map[string]string `json:"metadata"` - Variables map[string]string `json:"variables,omitempty"` - TFVars []TFVar `json:"tf_vars"` -} - -// TFVar is used to serialize a single Terraform variable sent by the -// manager as a collection of Variables in a Job payload. -type TFVar struct { - Key string `json:"key"` - Value string `json:"value"` - IsHCL bool `json:"hcl"` -} - -// TerraformConfigLatest returns the latest Terraform configuration version. -func (c *Client) TerraformConfigLatest(user, name string) (*TerraformConfigVersion, error) { - log.Printf("[INFO] getting terraform configuration %s/%s", user, name) - - endpoint := fmt.Sprintf("/api/v1/terraform/configurations/%s/%s/versions/latest", user, name) - request, err := c.Request("GET", endpoint, nil) - if err != nil { - return nil, err - } - - response, err := checkResp(c.HTTPClient.Do(request)) - if err == ErrNotFound { - return nil, nil - } - if err != nil { - return nil, err - } - - var wrapper tfConfigVersionWrapper - if err := decodeJSON(response, &wrapper); err != nil { - return nil, err - } - - return wrapper.Version, nil -} - -// CreateTerraformConfigVersion creatse a new Terraform configuration -// versions and uploads a slug with it. -func (c *Client) CreateTerraformConfigVersion( - user string, name string, - version *TerraformConfigVersion, - data io.Reader, size int64) (int, error) { - log.Printf("[INFO] creating terraform configuration %s/%s", user, name) - - endpoint := fmt.Sprintf( - "/api/v1/terraform/configurations/%s/%s/versions", user, name) - body, err := json.Marshal(&tfConfigVersionWrapper{ - Version: version, - }) - if err != nil { - return 0, err - } - - request, err := c.Request("POST", endpoint, &RequestOptions{ - Body: bytes.NewReader(body), - Headers: map[string]string{ - "Content-Type": "application/json", - }, - }) - if err != nil { - return 0, err - } - - response, err := checkResp(c.HTTPClient.Do(request)) - if err != nil { - return 0, err - } - - var result tfConfigVersionCreate - if err := decodeJSON(response, &result); err != nil { - return 0, err - } - - if err := c.putFile(result.UploadPath, data, size); err != nil { - return 0, err - } - - return result.Version, nil -} - -type tfConfigVersionCreate struct { - UploadPath string `json:"upload_path"` - Version int -} - -type tfConfigVersionWrapper struct { - Version *TerraformConfigVersion `json:"version"` -} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/util.go b/vendor/github.com/hashicorp/atlas-go/v1/util.go deleted file mode 100644 index 9aa0d28863e..00000000000 --- a/vendor/github.com/hashicorp/atlas-go/v1/util.go +++ /dev/null @@ -1,22 +0,0 @@ -package atlas - -import ( - "fmt" - "strings" -) - -// ParseSlug parses a slug of the format (x/y) into the x and y components. It -// accepts a string of the format "x/y" ("user/name" for example). If an empty -// string is given, an error is returned. If the given string is not a valid -// slug format, an error is returned. -func ParseSlug(slug string) (string, string, error) { - if slug == "" { - return "", "", fmt.Errorf("missing slug") - } - - parts := strings.Split(slug, "/") - if len(parts) != 2 { - return "", "", fmt.Errorf("malformed slug %q", slug) - } - return parts[0], parts[1], nil -} diff --git a/vendor/github.com/hashicorp/go-checkpoint/LICENSE b/vendor/github.com/hashicorp/go-checkpoint/LICENSE deleted file mode 100644 index c33dcc7c928..00000000000 --- a/vendor/github.com/hashicorp/go-checkpoint/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-checkpoint/README.md b/vendor/github.com/hashicorp/go-checkpoint/README.md deleted file mode 100644 index e717b6ad339..00000000000 --- a/vendor/github.com/hashicorp/go-checkpoint/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# Go Checkpoint Client - -[Checkpoint](http://checkpoint.hashicorp.com) is an internal service at -Hashicorp that we use to check version information, broadcast security -bulletins, etc. - -We understand that software making remote calls over the internet -for any reason can be undesirable. Because of this, Checkpoint can be -disabled in all of our software that includes it. You can view the source -of this client to see that we're not sending any private information. - -Each Hashicorp application has it's specific configuration option -to disable checkpoint calls, but the `CHECKPOINT_DISABLE` makes -the underlying checkpoint component itself disabled. For example -in the case of packer: -``` -CHECKPOINT_DISABLE=1 packer build -``` - -**Note:** This repository is probably useless outside of internal HashiCorp -use. It is open source for disclosure and because our open source projects -must be able to link to it. diff --git a/vendor/github.com/hashicorp/go-checkpoint/checkpoint.go b/vendor/github.com/hashicorp/go-checkpoint/checkpoint.go deleted file mode 100644 index 4695bd9c217..00000000000 --- a/vendor/github.com/hashicorp/go-checkpoint/checkpoint.go +++ /dev/null @@ -1,464 +0,0 @@ -// checkpoint is a package for checking version information and alerts -// for a HashiCorp product. -package checkpoint - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/binary" - "encoding/json" - "fmt" - "io" - "io/ioutil" - mrand "math/rand" - "net/http" - "net/url" - "os" - "path/filepath" - "reflect" - "runtime" - "strings" - "time" - - "github.com/hashicorp/go-cleanhttp" - uuid "github.com/hashicorp/go-uuid" -) - -var magicBytes [4]byte = [4]byte{0x35, 0x77, 0x69, 0xFB} - -// ReportParams are the parameters for configuring a telemetry report. -type ReportParams struct { - // Signature is some random signature that should be stored and used - // as a cookie-like value. This ensures that alerts aren't repeated. - // If the signature is changed, repeat alerts may be sent down. The - // signature should NOT be anything identifiable to a user (such as - // a MAC address). It should be random. - // - // If SignatureFile is given, then the signature will be read from this - // file. If the file doesn't exist, then a random signature will - // automatically be generated and stored here. SignatureFile will be - // ignored if Signature is given. - Signature string `json:"signature"` - SignatureFile string `json:"-"` - - StartTime time.Time `json:"start_time"` - EndTime time.Time `json:"end_time"` - Arch string `json:"arch"` - OS string `json:"os"` - Payload interface{} `json:"payload,omitempty"` - Product string `json:"product"` - RunID string `json:"run_id"` - SchemaVersion string `json:"schema_version"` - Version string `json:"version"` -} - -func (i *ReportParams) signature() string { - signature := i.Signature - if i.Signature == "" && i.SignatureFile != "" { - var err error - signature, err = checkSignature(i.SignatureFile) - if err != nil { - return "" - } - } - return signature -} - -// Report sends telemetry information to checkpoint -func Report(ctx context.Context, r *ReportParams) error { - if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { - return nil - } - - req, err := ReportRequest(r) - if err != nil { - return err - } - - client := cleanhttp.DefaultClient() - resp, err := client.Do(req.WithContext(ctx)) - if err != nil { - return err - } - if resp.StatusCode != 201 { - return fmt.Errorf("Unknown status: %d", resp.StatusCode) - } - - return nil -} - -// ReportRequest creates a request object for making a report -func ReportRequest(r *ReportParams) (*http.Request, error) { - // Populate some fields automatically if we can - if r.RunID == "" { - uuid, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } - r.RunID = uuid - } - if r.Arch == "" { - r.Arch = runtime.GOARCH - } - if r.OS == "" { - r.OS = runtime.GOOS - } - if r.Signature == "" { - r.Signature = r.signature() - } - - b, err := json.Marshal(r) - if err != nil { - return nil, err - } - - u := &url.URL{ - Scheme: "https", - Host: "checkpoint-api.hashicorp.com", - Path: fmt.Sprintf("/v1/telemetry/%s", r.Product), - } - - req, err := http.NewRequest("POST", u.String(), bytes.NewReader(b)) - if err != nil { - return nil, err - } - req.Header.Add("Accept", "application/json") - req.Header.Add("User-Agent", "HashiCorp/go-checkpoint") - - return req, nil -} - -// CheckParams are the parameters for configuring a check request. -type CheckParams struct { - // Product and version are used to lookup the correct product and - // alerts for the proper version. The version is also used to perform - // a version check. - Product string - Version string - - // Arch and OS are used to filter alerts potentially only to things - // affecting a specific os/arch combination. If these aren't specified, - // they'll be automatically filled in. - Arch string - OS string - - // Signature is some random signature that should be stored and used - // as a cookie-like value. This ensures that alerts aren't repeated. - // If the signature is changed, repeat alerts may be sent down. The - // signature should NOT be anything identifiable to a user (such as - // a MAC address). It should be random. - // - // If SignatureFile is given, then the signature will be read from this - // file. If the file doesn't exist, then a random signature will - // automatically be generated and stored here. SignatureFile will be - // ignored if Signature is given. - Signature string - SignatureFile string - - // CacheFile, if specified, will cache the result of a check. The - // duration of the cache is specified by CacheDuration, and defaults - // to 48 hours if not specified. If the CacheFile is newer than the - // CacheDuration, than the Check will short-circuit and use those - // results. - // - // If the CacheFile directory doesn't exist, it will be created with - // permissions 0755. - CacheFile string - CacheDuration time.Duration - - // Force, if true, will force the check even if CHECKPOINT_DISABLE - // is set. Within HashiCorp products, this is ONLY USED when the user - // specifically requests it. This is never automatically done without - // the user's consent. - Force bool -} - -// CheckResponse is the response for a check request. -type CheckResponse struct { - Product string - CurrentVersion string `json:"current_version"` - CurrentReleaseDate int `json:"current_release_date"` - CurrentDownloadURL string `json:"current_download_url"` - CurrentChangelogURL string `json:"current_changelog_url"` - ProjectWebsite string `json:"project_website"` - Outdated bool `json:"outdated"` - Alerts []*CheckAlert -} - -// CheckAlert is a single alert message from a check request. -// -// These never have to be manually constructed, and are typically populated -// into a CheckResponse as a result of the Check request. -type CheckAlert struct { - ID int - Date int - Message string - URL string - Level string -} - -// Check checks for alerts and new version information. -func Check(p *CheckParams) (*CheckResponse, error) { - if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" && !p.Force { - return &CheckResponse{}, nil - } - - // If we have a cached result, then use that - if r, err := checkCache(p.Version, p.CacheFile, p.CacheDuration); err != nil { - return nil, err - } else if r != nil { - defer r.Close() - return checkResult(r) - } - - var u url.URL - - if p.Arch == "" { - p.Arch = runtime.GOARCH - } - if p.OS == "" { - p.OS = runtime.GOOS - } - - // If we're given a SignatureFile, then attempt to read that. - signature := p.Signature - if p.Signature == "" && p.SignatureFile != "" { - var err error - signature, err = checkSignature(p.SignatureFile) - if err != nil { - return nil, err - } - } - - v := u.Query() - v.Set("version", p.Version) - v.Set("arch", p.Arch) - v.Set("os", p.OS) - v.Set("signature", signature) - - u.Scheme = "https" - u.Host = "checkpoint-api.hashicorp.com" - u.Path = fmt.Sprintf("/v1/check/%s", p.Product) - u.RawQuery = v.Encode() - - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return nil, err - } - req.Header.Add("Accept", "application/json") - req.Header.Add("User-Agent", "HashiCorp/go-checkpoint") - - client := cleanhttp.DefaultClient() - resp, err := client.Do(req) - if err != nil { - return nil, err - } - if resp.StatusCode != 200 { - return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode) - } - - var r io.Reader = resp.Body - if p.CacheFile != "" { - // Make sure the directory holding our cache exists. - if err := os.MkdirAll(filepath.Dir(p.CacheFile), 0755); err != nil { - return nil, err - } - - // We have to cache the result, so write the response to the - // file as we read it. - f, err := os.Create(p.CacheFile) - if err != nil { - return nil, err - } - - // Write the cache header - if err := writeCacheHeader(f, p.Version); err != nil { - f.Close() - os.Remove(p.CacheFile) - return nil, err - } - - defer f.Close() - r = io.TeeReader(r, f) - } - - return checkResult(r) -} - -// CheckInterval is used to check for a response on a given interval duration. -// The interval is not exact, and checks are randomized to prevent a thundering -// herd. However, it is expected that on average one check is performed per -// interval. The returned channel may be closed to stop background checks. -func CheckInterval(p *CheckParams, interval time.Duration, cb func(*CheckResponse, error)) chan struct{} { - doneCh := make(chan struct{}) - - if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { - return doneCh - } - - go func() { - for { - select { - case <-time.After(randomStagger(interval)): - resp, err := Check(p) - cb(resp, err) - case <-doneCh: - return - } - } - }() - - return doneCh -} - -// randomStagger returns an interval that is between 3/4 and 5/4 of -// the given interval. The expected value is the interval. -func randomStagger(interval time.Duration) time.Duration { - stagger := time.Duration(mrand.Int63()) % (interval / 2) - return 3*(interval/4) + stagger -} - -func checkCache(current string, path string, d time.Duration) (io.ReadCloser, error) { - fi, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - // File doesn't exist, not a problem - return nil, nil - } - - return nil, err - } - - if d == 0 { - d = 48 * time.Hour - } - - if fi.ModTime().Add(d).Before(time.Now()) { - // Cache is busted, delete the old file and re-request. We ignore - // errors here because re-creating the file is fine too. - os.Remove(path) - return nil, nil - } - - // File looks good so far, open it up so we can inspect the contents. - f, err := os.Open(path) - if err != nil { - return nil, err - } - - // Check the signature of the file - var sig [4]byte - if err := binary.Read(f, binary.LittleEndian, sig[:]); err != nil { - f.Close() - return nil, err - } - if !reflect.DeepEqual(sig, magicBytes) { - // Signatures don't match. Reset. - f.Close() - return nil, nil - } - - // Check the version. If it changed, then rewrite - var length uint32 - if err := binary.Read(f, binary.LittleEndian, &length); err != nil { - f.Close() - return nil, err - } - data := make([]byte, length) - if _, err := io.ReadFull(f, data); err != nil { - f.Close() - return nil, err - } - if string(data) != current { - // Version changed, reset - f.Close() - return nil, nil - } - - return f, nil -} - -func checkResult(r io.Reader) (*CheckResponse, error) { - var result CheckResponse - dec := json.NewDecoder(r) - if err := dec.Decode(&result); err != nil { - return nil, err - } - - return &result, nil -} - -func checkSignature(path string) (string, error) { - _, err := os.Stat(path) - if err == nil { - // The file exists, read it out - sigBytes, err := ioutil.ReadFile(path) - if err != nil { - return "", err - } - - // Split the file into lines - lines := strings.SplitN(string(sigBytes), "\n", 2) - if len(lines) > 0 { - return strings.TrimSpace(lines[0]), nil - } - } - - // If this isn't a non-exist error, then return that. - if !os.IsNotExist(err) { - return "", err - } - - // The file doesn't exist, so create a signature. - var b [16]byte - n := 0 - for n < 16 { - n2, err := rand.Read(b[n:]) - if err != nil { - return "", err - } - - n += n2 - } - signature := fmt.Sprintf( - "%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) - - // Make sure the directory holding our signature exists. - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return "", err - } - - // Write the signature - if err := ioutil.WriteFile(path, []byte(signature+"\n\n"+userMessage+"\n"), 0644); err != nil { - return "", err - } - - return signature, nil -} - -func writeCacheHeader(f io.Writer, v string) error { - // Write our signature first - if err := binary.Write(f, binary.LittleEndian, magicBytes); err != nil { - return err - } - - // Write out our current version length - var length uint32 = uint32(len(v)) - if err := binary.Write(f, binary.LittleEndian, length); err != nil { - return err - } - - _, err := f.Write([]byte(v)) - return err -} - -// userMessage is suffixed to the signature file to provide feedback. -var userMessage = ` -This signature is a randomly generated UUID used to de-duplicate -alerts and version information. This signature is random, it is -not based on any personally identifiable information. To create -a new signature, you can simply delete this file at any time. -See the documentation for the software using Checkpoint for more -information on how to disable it. -` diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE deleted file mode 100644 index e87a115e462..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile deleted file mode 100644 index c3989e789f6..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -TEST?=./... - -test: - go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4 - go vet $(TEST) - go test $(TEST) -race - -.PHONY: test diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md deleted file mode 100644 index f5abffc2934..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# rootcerts - -Functions for loading root certificates for TLS connections. - ------ - -Go's standard library `crypto/tls` provides a common mechanism for configuring -TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool -of certificates for the client to use as a trust store when verifying server -certificates. - -This library contains utility functions for loading certificates destined for -that field, as well as one other important thing: - -When the `RootCAs` field is `nil`, the standard library attempts to load the -host's root CA set. This behavior is OS-specific, and the Darwin -implementation contains [a bug that prevents trusted certificates from the -System and Login keychains from being loaded][1]. This library contains -Darwin-specific behavior that works around that bug. - -[1]: https://github.com/golang/go/issues/14514 - -## Example Usage - -Here's a snippet demonstrating how this library is meant to be used: - -```go -func httpClient() (*http.Client, error) - tlsConfig := &tls.Config{} - err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ - CAFile: os.Getenv("MYAPP_CAFILE"), - CAPath: os.Getenv("MYAPP_CAPATH"), - }) - if err != nil { - return nil, err - } - c := cleanhttp.DefaultClient() - t := cleanhttp.DefaultTransport() - t.TLSClientConfig = tlsConfig - c.Transport = t - return c, nil -} -``` diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go deleted file mode 100644 index b55cc628485..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package rootcerts contains functions to aid in loading CA certificates for -// TLS connections. -// -// In addition, its default behavior on Darwin works around an open issue [1] -// in Go's crypto/x509 that prevents certicates from being loaded from the -// System or Login keychains. -// -// [1] https://github.com/golang/go/issues/14514 -package rootcerts diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go deleted file mode 100644 index aeb30ece324..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go +++ /dev/null @@ -1,103 +0,0 @@ -package rootcerts - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// Config determines where LoadCACerts will load certificates from. When both -// CAFile and CAPath are blank, this library's functions will either load -// system roots explicitly and return them, or set the CertPool to nil to allow -// Go's standard library to load system certs. -type Config struct { - // CAFile is a path to a PEM-encoded certificate file or bundle. Takes - // precedence over CAPath. - CAFile string - - // CAPath is a path to a directory populated with PEM-encoded certificates. - CAPath string -} - -// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the -// Config specified. -func ConfigureTLS(t *tls.Config, c *Config) error { - if t == nil { - return nil - } - pool, err := LoadCACerts(c) - if err != nil { - return err - } - t.RootCAs = pool - return nil -} - -// LoadCACerts loads a CertPool based on the Config specified. -func LoadCACerts(c *Config) (*x509.CertPool, error) { - if c == nil { - c = &Config{} - } - if c.CAFile != "" { - return LoadCAFile(c.CAFile) - } - if c.CAPath != "" { - return LoadCAPath(c.CAPath) - } - - return LoadSystemCAs() -} - -// LoadCAFile loads a single PEM-encoded file from the path specified. -func LoadCAFile(caFile string) (*x509.CertPool, error) { - pool := x509.NewCertPool() - - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("Error loading CA File: %s", err) - } - - ok := pool.AppendCertsFromPEM(pem) - if !ok { - return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile) - } - - return pool, nil -} - -// LoadCAPath walks the provided path and loads all certificates encounted into -// a pool. -func LoadCAPath(caPath string) (*x509.CertPool, error) { - pool := x509.NewCertPool() - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() { - return nil - } - - pem, err := ioutil.ReadFile(path) - if err != nil { - return fmt.Errorf("Error loading file from CAPath: %s", err) - } - - ok := pool.AppendCertsFromPEM(pem) - if !ok { - return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path) - } - - return nil - } - - err := filepath.Walk(caPath, walkFn) - if err != nil { - return nil, err - } - - return pool, nil -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go deleted file mode 100644 index 66b1472c4a0..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !darwin - -package rootcerts - -import "crypto/x509" - -// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that -// default behavior of standard TLS config libraries is triggered, which is to -// load system certs. -func LoadSystemCAs() (*x509.CertPool, error) { - return nil, nil -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go deleted file mode 100644 index a9a040657fe..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go +++ /dev/null @@ -1,48 +0,0 @@ -package rootcerts - -import ( - "crypto/x509" - "os/exec" - "path" - - "github.com/mitchellh/go-homedir" -) - -// LoadSystemCAs has special behavior on Darwin systems to work around -func LoadSystemCAs() (*x509.CertPool, error) { - pool := x509.NewCertPool() - - for _, keychain := range certKeychains() { - err := addCertsFromKeychain(pool, keychain) - if err != nil { - return nil, err - } - } - - return pool, nil -} - -func addCertsFromKeychain(pool *x509.CertPool, keychain string) error { - cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain) - data, err := cmd.Output() - if err != nil { - return err - } - - pool.AppendCertsFromPEM(data) - - return nil -} - -func certKeychains() []string { - keychains := []string{ - "/System/Library/Keychains/SystemRootCertificates.keychain", - "/Library/Keychains/System.keychain", - } - home, err := homedir.Dir() - if err == nil { - loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain") - keychains = append(keychains, loginKeychain) - } - return keychains -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go deleted file mode 100644 index 2380d71e3c6..00000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go +++ /dev/null @@ -1,162 +0,0 @@ -// Derivative work from: -// - https://golang.org/src/cmd/gofmt/gofmt.go -// - https://github.com/fatih/hclfmt - -package fmtcmd - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/hashicorp/hcl/hcl/printer" -) - -var ( - ErrWriteStdin = errors.New("cannot use write option with standard input") -) - -type Options struct { - List bool // list files whose formatting differs - Write bool // write result to (source) file instead of stdout - Diff bool // display diffs of formatting changes -} - -func isValidFile(f os.FileInfo, extensions []string) bool { - if !f.IsDir() && !strings.HasPrefix(f.Name(), ".") { - for _, ext := range extensions { - if strings.HasSuffix(f.Name(), "."+ext) { - return true - } - } - } - - return false -} - -// If in == nil, the source is the contents of the file with the given filename. -func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts Options) error { - if in == nil { - f, err := os.Open(filename) - if err != nil { - return err - } - defer f.Close() - in = f - } - - src, err := ioutil.ReadAll(in) - if err != nil { - return err - } - - res, err := printer.Format(src) - if err != nil { - return fmt.Errorf("In %s: %s", filename, err) - } - - if !bytes.Equal(src, res) { - // formatting has changed - if opts.List { - fmt.Fprintln(out, filename) - } - if opts.Write { - err = ioutil.WriteFile(filename, res, 0644) - if err != nil { - return err - } - } - if opts.Diff { - data, err := diff(src, res) - if err != nil { - return fmt.Errorf("computing diff: %s", err) - } - fmt.Fprintf(out, "diff a/%s b/%s\n", filename, filename) - out.Write(data) - } - } - - if !opts.List && !opts.Write && !opts.Diff { - _, err = out.Write(res) - } - - return err -} - -func walkDir(path string, extensions []string, stdout io.Writer, opts Options) error { - visitFile := func(path string, f os.FileInfo, err error) error { - if err == nil && isValidFile(f, extensions) { - err = processFile(path, nil, stdout, false, opts) - } - return err - } - - return filepath.Walk(path, visitFile) -} - -func Run( - paths, extensions []string, - stdin io.Reader, - stdout io.Writer, - opts Options, -) error { - if len(paths) == 0 { - if opts.Write { - return ErrWriteStdin - } - if err := processFile("", stdin, stdout, true, opts); err != nil { - return err - } - return nil - } - - for _, path := range paths { - switch dir, err := os.Stat(path); { - case err != nil: - return err - case dir.IsDir(): - if err := walkDir(path, extensions, stdout, opts); err != nil { - return err - } - default: - if err := processFile(path, nil, stdout, false, opts); err != nil { - return err - } - } - } - - return nil -} - -func diff(b1, b2 []byte) (data []byte, err error) { - f1, err := ioutil.TempFile("", "") - if err != nil { - return - } - defer os.Remove(f1.Name()) - defer f1.Close() - - f2, err := ioutil.TempFile("", "") - if err != nil { - return - } - defer os.Remove(f2.Name()) - defer f2.Close() - - f1.Write(b1) - f2.Write(b2) - - data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput() - if len(data) > 0 { - // diff exits with a non-zero status when the files don't match. - // Ignore that failure as long as we get output. - err = nil - } - return -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go deleted file mode 100644 index c896d5844a2..00000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go +++ /dev/null @@ -1,779 +0,0 @@ -package printer - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/token" -) - -const ( - blank = byte(' ') - newline = byte('\n') - tab = byte('\t') - infinity = 1 << 30 // offset or line -) - -var ( - unindent = []byte("\uE123") // in the private use space -) - -type printer struct { - cfg Config - prev token.Pos - - comments []*ast.CommentGroup // may be nil, contains all comments - standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node) - - enableTrace bool - indentTrace int -} - -type ByPosition []*ast.CommentGroup - -func (b ByPosition) Len() int { return len(b) } -func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) } - -// collectComments comments all standalone comments which are not lead or line -// comment -func (p *printer) collectComments(node ast.Node) { - // first collect all comments. This is already stored in - // ast.File.(comments) - ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { - switch t := nn.(type) { - case *ast.File: - p.comments = t.Comments - return nn, false - } - return nn, true - }) - - standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0) - for _, c := range p.comments { - standaloneComments[c.Pos()] = c - } - - // next remove all lead and line comments from the overall comment map. - // This will give us comments which are standalone, comments which are not - // assigned to any kind of node. - ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { - switch t := nn.(type) { - case *ast.LiteralType: - if t.LeadComment != nil { - for _, comment := range t.LeadComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - - if t.LineComment != nil { - for _, comment := range t.LineComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - case *ast.ObjectItem: - if t.LeadComment != nil { - for _, comment := range t.LeadComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - - if t.LineComment != nil { - for _, comment := range t.LineComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - } - - return nn, true - }) - - for _, c := range standaloneComments { - p.standaloneComments = append(p.standaloneComments, c) - } - - sort.Sort(ByPosition(p.standaloneComments)) -} - -// output prints creates b printable HCL output and returns it. -func (p *printer) output(n interface{}) []byte { - var buf bytes.Buffer - - switch t := n.(type) { - case *ast.File: - // File doesn't trace so we add the tracing here - defer un(trace(p, "File")) - return p.output(t.Node) - case *ast.ObjectList: - defer un(trace(p, "ObjectList")) - - var index int - for { - // Determine the location of the next actual non-comment - // item. If we're at the end, the next item is at "infinity" - var nextItem token.Pos - if index != len(t.Items) { - nextItem = t.Items[index].Pos() - } else { - nextItem = token.Pos{Offset: infinity, Line: infinity} - } - - // Go through the standalone comments in the file and print out - // the comments that we should be for this object item. - for _, c := range p.standaloneComments { - // Go through all the comments in the group. The group - // should be printed together, not separated by double newlines. - printed := false - newlinePrinted := false - for _, comment := range c.List { - // We only care about comments after the previous item - // we've printed so that comments are printed in the - // correct locations (between two objects for example). - // And before the next item. - if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { - // if we hit the end add newlines so we can print the comment - // we don't do this if prev is invalid which means the - // beginning of the file since the first comment should - // be at the first line. - if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) { - buf.Write([]byte{newline, newline}) - newlinePrinted = true - } - - // Write the actual comment. - buf.WriteString(comment.Text) - buf.WriteByte(newline) - - // Set printed to true to note that we printed something - printed = true - } - } - - // If we're not at the last item, write a new line so - // that there is a newline separating this comment from - // the next object. - if printed && index != len(t.Items) { - buf.WriteByte(newline) - } - } - - if index == len(t.Items) { - break - } - - buf.Write(p.output(t.Items[index])) - if index != len(t.Items)-1 { - // Always write a newline to separate us from the next item - buf.WriteByte(newline) - - // Need to determine if we're going to separate the next item - // with a blank line. The logic here is simple, though there - // are a few conditions: - // - // 1. The next object is more than one line away anyways, - // so we need an empty line. - // - // 2. The next object is not a "single line" object, so - // we need an empty line. - // - // 3. This current object is not a single line object, - // so we need an empty line. - current := t.Items[index] - next := t.Items[index+1] - if next.Pos().Line != t.Items[index].Pos().Line+1 || - !p.isSingleLineObject(next) || - !p.isSingleLineObject(current) { - buf.WriteByte(newline) - } - } - index++ - } - case *ast.ObjectKey: - buf.WriteString(t.Token.Text) - case *ast.ObjectItem: - p.prev = t.Pos() - buf.Write(p.objectItem(t)) - case *ast.LiteralType: - buf.Write(p.literalType(t)) - case *ast.ListType: - buf.Write(p.list(t)) - case *ast.ObjectType: - buf.Write(p.objectType(t)) - default: - fmt.Printf(" unknown type: %T\n", n) - } - - return buf.Bytes() -} - -func (p *printer) literalType(lit *ast.LiteralType) []byte { - result := []byte(lit.Token.Text) - switch lit.Token.Type { - case token.HEREDOC: - // Clear the trailing newline from heredocs - if result[len(result)-1] == '\n' { - result = result[:len(result)-1] - } - - // Poison lines 2+ so that we don't indent them - result = p.heredocIndent(result) - case token.STRING: - // If this is a multiline string, poison lines 2+ so we don't - // indent them. - if bytes.IndexRune(result, '\n') >= 0 { - result = p.heredocIndent(result) - } - } - - return result -} - -// objectItem returns the printable HCL form of an object item. An object type -// starts with one/multiple keys and has a value. The value might be of any -// type. -func (p *printer) objectItem(o *ast.ObjectItem) []byte { - defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) - var buf bytes.Buffer - - if o.LeadComment != nil { - for _, comment := range o.LeadComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - for i, k := range o.Keys { - buf.WriteString(k.Token.Text) - buf.WriteByte(blank) - - // reach end of key - if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { - buf.WriteString("=") - buf.WriteByte(blank) - } - } - - buf.Write(p.output(o.Val)) - - if o.Val.Pos().Line == o.Keys[0].Pos().Line && o.LineComment != nil { - buf.WriteByte(blank) - for _, comment := range o.LineComment.List { - buf.WriteString(comment.Text) - } - } - - return buf.Bytes() -} - -// objectType returns the printable HCL form of an object type. An object type -// begins with a brace and ends with a brace. -func (p *printer) objectType(o *ast.ObjectType) []byte { - defer un(trace(p, "ObjectType")) - var buf bytes.Buffer - buf.WriteString("{") - - var index int - var nextItem token.Pos - var commented, newlinePrinted bool - for { - // Determine the location of the next actual non-comment - // item. If we're at the end, the next item is the closing brace - if index != len(o.List.Items) { - nextItem = o.List.Items[index].Pos() - } else { - nextItem = o.Rbrace - } - - // Go through the standalone comments in the file and print out - // the comments that we should be for this object item. - for _, c := range p.standaloneComments { - printed := false - var lastCommentPos token.Pos - for _, comment := range c.List { - // We only care about comments after the previous item - // we've printed so that comments are printed in the - // correct locations (between two objects for example). - // And before the next item. - if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { - // If there are standalone comments and the initial newline has not - // been printed yet, do it now. - if !newlinePrinted { - newlinePrinted = true - buf.WriteByte(newline) - } - - // add newline if it's between other printed nodes - if index > 0 { - commented = true - buf.WriteByte(newline) - } - - // Store this position - lastCommentPos = comment.Pos() - - // output the comment itself - buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) - - // Set printed to true to note that we printed something - printed = true - - /* - if index != len(o.List.Items) { - buf.WriteByte(newline) // do not print on the end - } - */ - } - } - - // Stuff to do if we had comments - if printed { - // Always write a newline - buf.WriteByte(newline) - - // If there is another item in the object and our comment - // didn't hug it directly, then make sure there is a blank - // line separating them. - if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { - buf.WriteByte(newline) - } - } - } - - if index == len(o.List.Items) { - p.prev = o.Rbrace - break - } - - // At this point we are sure that it's not a totally empty block: print - // the initial newline if it hasn't been printed yet by the previous - // block about standalone comments. - if !newlinePrinted { - buf.WriteByte(newline) - newlinePrinted = true - } - - // check if we have adjacent one liner items. If yes we'll going to align - // the comments. - var aligned []*ast.ObjectItem - for _, item := range o.List.Items[index:] { - // we don't group one line lists - if len(o.List.Items) == 1 { - break - } - - // one means a oneliner with out any lead comment - // two means a oneliner with lead comment - // anything else might be something else - cur := lines(string(p.objectItem(item))) - if cur > 2 { - break - } - - curPos := item.Pos() - - nextPos := token.Pos{} - if index != len(o.List.Items)-1 { - nextPos = o.List.Items[index+1].Pos() - } - - prevPos := token.Pos{} - if index != 0 { - prevPos = o.List.Items[index-1].Pos() - } - - // fmt.Println("DEBUG ----------------") - // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) - // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) - // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) - - if curPos.Line+1 == nextPos.Line { - aligned = append(aligned, item) - index++ - continue - } - - if curPos.Line-1 == prevPos.Line { - aligned = append(aligned, item) - index++ - - // finish if we have a new line or comment next. This happens - // if the next item is not adjacent - if curPos.Line+1 != nextPos.Line { - break - } - continue - } - - break - } - - // put newlines if the items are between other non aligned items. - // newlines are also added if there is a standalone comment already, so - // check it too - if !commented && index != len(aligned) { - buf.WriteByte(newline) - } - - if len(aligned) >= 1 { - p.prev = aligned[len(aligned)-1].Pos() - - items := p.alignedItems(aligned) - buf.Write(p.indent(items)) - } else { - p.prev = o.List.Items[index].Pos() - - buf.Write(p.indent(p.objectItem(o.List.Items[index]))) - index++ - } - - buf.WriteByte(newline) - } - - buf.WriteString("}") - return buf.Bytes() -} - -func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { - var buf bytes.Buffer - - // find the longest key and value length, needed for alignment - var longestKeyLen int // longest key length - var longestValLen int // longest value length - for _, item := range items { - key := len(item.Keys[0].Token.Text) - val := len(p.output(item.Val)) - - if key > longestKeyLen { - longestKeyLen = key - } - - if val > longestValLen { - longestValLen = val - } - } - - for i, item := range items { - if item.LeadComment != nil { - for _, comment := range item.LeadComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - for i, k := range item.Keys { - keyLen := len(k.Token.Text) - buf.WriteString(k.Token.Text) - for i := 0; i < longestKeyLen-keyLen+1; i++ { - buf.WriteByte(blank) - } - - // reach end of key - if i == len(item.Keys)-1 && len(item.Keys) == 1 { - buf.WriteString("=") - buf.WriteByte(blank) - } - } - - val := p.output(item.Val) - valLen := len(val) - buf.Write(val) - - if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { - for i := 0; i < longestValLen-valLen+1; i++ { - buf.WriteByte(blank) - } - - for _, comment := range item.LineComment.List { - buf.WriteString(comment.Text) - } - } - - // do not print for the last item - if i != len(items)-1 { - buf.WriteByte(newline) - } - } - - return buf.Bytes() -} - -// list returns the printable HCL form of an list type. -func (p *printer) list(l *ast.ListType) []byte { - var buf bytes.Buffer - buf.WriteString("[") - - var longestLine int - for _, item := range l.List { - // for now we assume that the list only contains literal types - if lit, ok := item.(*ast.LiteralType); ok { - lineLen := len(lit.Token.Text) - if lineLen > longestLine { - longestLine = lineLen - } - } - } - - insertSpaceBeforeItem := false - lastHadLeadComment := false - for i, item := range l.List { - // Keep track of whether this item is a heredoc since that has - // unique behavior. - heredoc := false - if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { - heredoc = true - } - - if item.Pos().Line != l.Lbrack.Line { - // multiline list, add newline before we add each item - buf.WriteByte(newline) - insertSpaceBeforeItem = false - - // If we have a lead comment, then we want to write that first - leadComment := false - if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { - leadComment = true - - // If this isn't the first item and the previous element - // didn't have a lead comment, then we need to add an extra - // newline to properly space things out. If it did have a - // lead comment previously then this would be done - // automatically. - if i > 0 && !lastHadLeadComment { - buf.WriteByte(newline) - } - - for _, comment := range lit.LeadComment.List { - buf.Write(p.indent([]byte(comment.Text))) - buf.WriteByte(newline) - } - } - - // also indent each line - val := p.output(item) - curLen := len(val) - buf.Write(p.indent(val)) - - // if this item is a heredoc, then we output the comma on - // the next line. This is the only case this happens. - comma := []byte{','} - if heredoc { - buf.WriteByte(newline) - comma = p.indent(comma) - } - - buf.Write(comma) - - if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { - // if the next item doesn't have any comments, do not align - buf.WriteByte(blank) // align one space - for i := 0; i < longestLine-curLen; i++ { - buf.WriteByte(blank) - } - - for _, comment := range lit.LineComment.List { - buf.WriteString(comment.Text) - } - } - - lastItem := i == len(l.List)-1 - if lastItem { - buf.WriteByte(newline) - } - - if leadComment && !lastItem { - buf.WriteByte(newline) - } - - lastHadLeadComment = leadComment - } else { - if insertSpaceBeforeItem { - buf.WriteByte(blank) - insertSpaceBeforeItem = false - } - - // Output the item itself - // also indent each line - val := p.output(item) - curLen := len(val) - buf.Write(val) - - // If this is a heredoc item we always have to output a newline - // so that it parses properly. - if heredoc { - buf.WriteByte(newline) - } - - // If this isn't the last element, write a comma. - if i != len(l.List)-1 { - buf.WriteString(",") - insertSpaceBeforeItem = true - } - - if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { - // if the next item doesn't have any comments, do not align - buf.WriteByte(blank) // align one space - for i := 0; i < longestLine-curLen; i++ { - buf.WriteByte(blank) - } - - for _, comment := range lit.LineComment.List { - buf.WriteString(comment.Text) - } - } - } - - } - - buf.WriteString("]") - return buf.Bytes() -} - -// indent indents the lines of the given buffer for each non-empty line -func (p *printer) indent(buf []byte) []byte { - var prefix []byte - if p.cfg.SpacesWidth != 0 { - for i := 0; i < p.cfg.SpacesWidth; i++ { - prefix = append(prefix, blank) - } - } else { - prefix = []byte{tab} - } - - var res []byte - bol := true - for _, c := range buf { - if bol && c != '\n' { - res = append(res, prefix...) - } - - res = append(res, c) - bol = c == '\n' - } - return res -} - -// unindent removes all the indentation from the tombstoned lines -func (p *printer) unindent(buf []byte) []byte { - var res []byte - for i := 0; i < len(buf); i++ { - skip := len(buf)-i <= len(unindent) - if !skip { - skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) - } - if skip { - res = append(res, buf[i]) - continue - } - - // We have a marker. we have to backtrace here and clean out - // any whitespace ahead of our tombstone up to a \n - for j := len(res) - 1; j >= 0; j-- { - if res[j] == '\n' { - break - } - - res = res[:j] - } - - // Skip the entire unindent marker - i += len(unindent) - 1 - } - - return res -} - -// heredocIndent marks all the 2nd and further lines as unindentable -func (p *printer) heredocIndent(buf []byte) []byte { - var res []byte - bol := false - for _, c := range buf { - if bol && c != '\n' { - res = append(res, unindent...) - } - res = append(res, c) - bol = c == '\n' - } - return res -} - -// isSingleLineObject tells whether the given object item is a single -// line object such as "obj {}". -// -// A single line object: -// -// * has no lead comments (hence multi-line) -// * has no assignment -// * has no values in the stanza (within {}) -// -func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { - // If there is a lead comment, can't be one line - if val.LeadComment != nil { - return false - } - - // If there is assignment, we always break by line - if val.Assign.IsValid() { - return false - } - - // If it isn't an object type, then its not a single line object - ot, ok := val.Val.(*ast.ObjectType) - if !ok { - return false - } - - // If the object has no items, it is single line! - return len(ot.List.Items) == 0 -} - -func lines(txt string) int { - endline := 1 - for i := 0; i < len(txt); i++ { - if txt[i] == '\n' { - endline++ - } - } - return endline -} - -// ---------------------------------------------------------------------------- -// Tracing support - -func (p *printer) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - i := 2 * p.indentTrace - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *printer, msg string) *printer { - p.printTrace(msg, "(") - p.indentTrace++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *printer) { - p.indentTrace-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go deleted file mode 100644 index 6617ab8e7a2..00000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package printer implements printing of AST nodes to HCL format. -package printer - -import ( - "bytes" - "io" - "text/tabwriter" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" -) - -var DefaultConfig = Config{ - SpacesWidth: 2, -} - -// A Config node controls the output of Fprint. -type Config struct { - SpacesWidth int // if set, it will use spaces instead of tabs for alignment -} - -func (c *Config) Fprint(output io.Writer, node ast.Node) error { - p := &printer{ - cfg: *c, - comments: make([]*ast.CommentGroup, 0), - standaloneComments: make([]*ast.CommentGroup, 0), - // enableTrace: true, - } - - p.collectComments(node) - - if _, err := output.Write(p.unindent(p.output(node))); err != nil { - return err - } - - // flush tabwriter, if any - var err error - if tw, _ := output.(*tabwriter.Writer); tw != nil { - err = tw.Flush() - } - - return err -} - -// Fprint "pretty-prints" an HCL node to output -// It calls Config.Fprint with default settings. -func Fprint(output io.Writer, node ast.Node) error { - return DefaultConfig.Fprint(output, node) -} - -// Format formats src HCL and returns the result. -func Format(src []byte) ([]byte, error) { - node, err := parser.Parse(src) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - if err := DefaultConfig.Fprint(&buf, node); err != nil { - return nil, err - } - - // Add trailing newline to result - buf.WriteString("\n") - return buf.Bytes(), nil -} diff --git a/vendor/github.com/hashicorp/terraform/BUILDING.md b/vendor/github.com/hashicorp/terraform/BUILDING.md deleted file mode 100644 index fae3a9e8e51..00000000000 --- a/vendor/github.com/hashicorp/terraform/BUILDING.md +++ /dev/null @@ -1,56 +0,0 @@ -# Building Terraform - -This document contains details about the process for building binaries for -Terraform. - -## Versioning - -As a pre-1.0 project, we use the MINOR and PATCH versions as follows: - - * a `MINOR` version increment indicates a release that may contain backwards - incompatible changes - * a `PATCH` version increment indicates a release that may contain bugfixes as - well as additive (backwards compatible) features and enhancements - -## Process - -If only need to build binaries for the platform you're running (Windows, Linux, -Mac OS X etc..), you can follow the instructions in the README for [Developing -Terraform][1]. - -The guide below outlines the steps HashiCorp takes to build the official release -binaries for Terraform. This process will generate a set of binaries for each supported -platform, using the [gox](https://github.com/mitchellh/gox) tool. - -A Vagrant virtual machine is used to provide a consistent environment with -the pre-requisite tools in place. The specifics of this VM are defined in the -[Vagrantfile](Vagrantfile). - - -```sh -# clone the repository if needed -git clone https://github.com/hashicorp/terraform.git -cd terraform - -# Spin up a fresh build VM -vagrant destroy -f -vagrant up -vagrant ssh - -# The Vagrantfile installs Go and configures the $GOPATH at /opt/gopath -# The current "terraform" directory is then sync'd into the gopath -cd /opt/gopath/src/github.com/hashicorp/terraform/ - -# Verify unit tests pass -make test - -# Build the release -# This generates binaries for each platform and places them in the pkg folder -make bin -``` - -After running these commands, you should have binaries for all supported -platforms in the `pkg` folder. - - -[1]: https://github.com/hashicorp/terraform#developing-terraform diff --git a/vendor/github.com/hashicorp/terraform/CHANGELOG.md b/vendor/github.com/hashicorp/terraform/CHANGELOG.md deleted file mode 100644 index 8b98f0dd036..00000000000 --- a/vendor/github.com/hashicorp/terraform/CHANGELOG.md +++ /dev/null @@ -1,4599 +0,0 @@ -## 0.11.2 (Unreleased) - -BACKWARDS INCOMPATIBILITIES / NOTES: - -* backend/gcs: The gcs remote state backend was erroneously creating the state bucket if it didn't exist. This is not the intended behavior of backends, as Terraform cannot track or manage that resource. The target bucket must now be created separately, before using it with Terraform. [GH-16865] - -NEW FEATURES: - -* **[Habitat](https://www.habitat.sh/) Provisioner** allowing automatic installation of the Habitat agent [GH-16280] - -IMPROVEMENTS: - -* config: new `rsadecrypt` interpolation function allows decrypting a base64-encoded ciphertext using a given private key. This is particularly useful for decrypting the password for a Windows instance on AWS EC2, but is generic and may find other uses too. [GH-16647] -* config: new `timeadd` interpolation function allows calculating a new timestamp relative to an existing known timestamp. [GH-16644] -* cli: Module and provider installation (and some other Terraform features) now implement [RFC6555](https://tools.ietf.org/html/rfc6555) when making outgoing HTTP requests, which should improve installation reliability for dual-stack (both IPv4 and IPv6) hosts running on networks that have non-performant or broken IPv6 Internet connectivity by trying both IPv4 and IPv6 connections. [GH-16805] -* backend/s3: it is now possible to disable the region check, for improved compatibility with third-party services that attempt to mimic the S3 API. [GH-16757] -* backend/s3: it is now possible to use named credentials from the `~/.aws/credentials` file, similarly to the AWS plugin [GH-16661] -* provider/terraform: in `terraform_remote_state`, the argument `environment` is now deprecated in favor of `workspace`. The `environment` argument will be removed in a later Terraform release. [GH-16558] - -BUG FIXES: - -* config: Referencing a count attribute in an output no longer generates a warning [GH-16866] -* cli: Terraform will no longer crash when `terraform plan`, `terraform apply`, and some other commands encounter an invalid provider version constraint in configuration, generating a proper error message instead. [GH-16867] -* backend/gcs: The usage of the GOOGLE_CREDENTIALS environment variable now matches that of the google provider [GH-16865] -* backend/gcs: fixed the locking methodology to avoid "double-locking" issues when used with the `terraform_remote_state` data source [GH-16852] -* provisioner/salt-masterless: now waits for all of the remote operations to complete before returning [GH-16704] - -## 0.11.1 (November 30, 2017) - -IMPROVEMENTS: - -* modules: Modules can now receive a specific provider configuration in the `providers` map, even if it's only implicitly used ([#16619](https://github.com/hashicorp/terraform/issues/16619)) -* config: Terraform will now detect and warn about outputs containing potentially-problematic references to resources with `count` set where the references does not use the "splat" syntax. This identifies situations where an output may [reference a resource with `count = 0`](https://www.terraform.io/upgrade-guides/0-11.html#referencing-attributes-from-resources-with-count-0) even if the `count` expression does not _currently_ evaluate to `0`, allowing the bug to be detected and fixed _before_ the value is later changed to `0` and would thus become an error. **This usage will become a fatal error in Terraform 0.12**. ([#16735](https://github.com/hashicorp/terraform/issues/16735)) -* core: A new environment variable `TF_WARN_OUTPUT_ERRORS=1` is supported to opt out of the behavior introduced in 0.11.0 where errors in output expressions halt execution. This restores the previous behavior where such errors are ignored, allowing users to apply problematic configurations without fixing all of the errors. This opt-out will be removed in Terraform 0.12, so it is strongly recommended to use the new warning described in the previous item to detect and fix these problematic expressions. ([#16782](https://github.com/hashicorp/terraform/issues/16782)) - -BUG FIXES: - -* cli: fix crash when subcommands with sub-subcommands are accidentally provided as a single argument, such as `terraform "workspace list"` ([#16789](https://github.com/hashicorp/terraform/issues/16789)) - -## 0.11.0 (November 16, 2017) - -The following list combines the changes from 0.11.0-beta1 and 0.11.0-rc1 to give the full set of changes since 0.10.8. For details on each of the individual pre-releases, please see [the 0.11.0-rc1 CHANGELOG](https://github.com/hashicorp/terraform/blob/v0.11.0-rc1/CHANGELOG.md). - -BACKWARDS INCOMPATIBILITIES / NOTES: - -The following items give an overview of the incompatibilities and other noteworthy changes in this release. For more details on some of these changes, along with information on how to upgrade existing configurations where needed, see [the v0.11 upgrade guide](https://www.terraform.io/upgrade-guides/0-11.html). - -* Output interpolation errors are now fatal. Module configs with unused outputs which contained errors will no longer be valid. -* Module configuration blocks have 2 new reserved attribute names, "providers" and "version". Modules using these as input variables will need to be updated. -* The module provider inheritance rules have changed. Inherited provider configurations will no longer be merged with local configurations, and additional (aliased) provider configurations must be explicitly passed between modules when shared. See [the upgrade guide](https://www.terraform.io/upgrade-guides/0-11.html) for more details. -* The command `terraform apply` with no explicit plan argument is now interactive by default. Specifically, it will show the generated plan and wait for confirmation before applying it, similar to the existing behavior of `terraform destroy`. The behavior is unchanged when a plan file argument is provided, and the previous behavior can be obtained _without_ a plan file by using the `-auto-approve` option. -* The `terraform` provider (that is, the provider that contains the `terraform_remote_state` data source) has been re-incorporated as a built-in provider in the Terraform Core executable. In 0.10 it was split into a separate plugin along with all of the other providers, but this provider uses several internal Terraform Core APIs and so in practice it's been confusing to version that separately from Terraform Core. As a consequence, this provider no longer supports version constraints, and so `version` attributes for this provider in configuration must be removed. -* When remote state is enabled, Terraform will no longer generate a local `terraform.tfstate.backup` file before updating remote state. Previously this file could potentially be used to recover a previous state to help recover after a mistake, but it also caused a potentially-sensitive state file to be generated in an unexpected location that may be inadvertently copied or checked in to version control. With this local backup now removed, we recommend instead relying on versioning or backup mechanisms provided by the backend, such as Amazon S3 versioning or Terraform Enterprise's built-in state history mechanism. (Terraform will still create the local file `errored.tfstate` in the unlikely event that there is an error when writing to the remote backend.) - -NEW FEATURES: - -* modules: Module configuration blocks now have a "version" attribute, to set a version constraint for modules sourced from a registry. ([#16466](https://github.com/hashicorp/terraform/issues/16466)) -* modules: Module configuration blocks now have a "providers" attribute, to map a provider configuration from the current module into a submodule ([#16379](https://github.com/hashicorp/terraform/issues/16379)) -* backend/gcs: The gcs remote state backend now supports workspaces and locking. -* backend/manta: The Manta backend now supports workspaces and locking ([#16296](https://github.com/hashicorp/terraform/issues/16296)) - -IMPROVEMENTS: - -* cli: The `terraform apply` command now waits for interactive approval of the generated plan before applying it, unless an explicit plan file is provided. ([#16502](https://github.com/hashicorp/terraform/issues/16502)) -* cli: The `terraform version` command now prints out the version numbers of initialized plugins as well as the version of Terraform core, so that they can be more easily shared when opening GitHub Issues, etc. ([#16439](https://github.com/hashicorp/terraform/issues/16439)) -* cli: A new `TF_DATA_DIR` environment variable can be used to override the location where Terraform stores the files normally placed in the `.terraform` directory. ([#16207](https://github.com/hashicorp/terraform/issues/16207)) -* provider/terraform: now built in to Terraform Core so that it will always have the same backend functionality as the Terraform release it corresponds to. ([#16543](https://github.com/hashicorp/terraform/issues/16543)) - -BUG FIXES: - -* config: Provider config in submodules will no longer be overridden by parent providers with the same name. ([#16379](https://github.com/hashicorp/terraform/issues/16379)) -* cli: When remote state is enabled, Terraform will no longer generate a local `terraform.tfstate.backup` file before updating remote state. ([#16464](https://github.com/hashicorp/terraform/issues/16464)) -* core: state now includes a reference to the provider configuration most recently used to create or update a resource, so that the same configuration can be used to destroy that resource if its configuration (including the explicit pointer to a provider configuration) is removed ([#16586](https://github.com/hashicorp/terraform/issues/16586)) -* core: Module outputs can now produce errors, preventing them from silently propagating through the config. ([#16204](https://github.com/hashicorp/terraform/issues/16204)) -* backend/gcs: will now automatically add a slash to the given prefix if not present, since without it the workspace enumeration does not function correctly ([#16585](https://github.com/hashicorp/terraform/issues/16585)) - -PROVIDER FRAMEWORK CHANGES (not user-facing): - -* helper/schema: Loosen validation for 'id' field ([#16456](https://github.com/hashicorp/terraform/issues/16456)) - -## 0.10.8 (October 25, 2017) - -NEW FEATURES: - -* **New `etcdv3` backend**, for use with the newer etcd api ([#15680](https://github.com/hashicorp/terraform/issues/15680)) -* **New interpolation function `chunklist`**, for spliting a list into a list of lists with specified sublist chunk sizes. ([#15112](https://github.com/hashicorp/terraform/issues/15112)) - -IMPROVEMENTS: - -* backend/s3: Add options to skip AWS validation which allows non-AWS S3 backends ([#15553](https://github.com/hashicorp/terraform/issues/15553)) - -BUG FIXES: - -* command/validate: Respect `-plugin-dir` overridden plugin paths in the `terraform validate` command. ([#15985](https://github.com/hashicorp/terraform/issues/15985)) -* provisioner/chef: Clean clients from `chef-vault` when `recreate_client` enabled ([#16357](https://github.com/hashicorp/terraform/issues/16357)) -* communicator/winrm: Support the `cacert` option for custom certificate authorities when provisioning over WinRM ([#14783](https://github.com/hashicorp/terraform/issues/14783)) - -## 0.10.7 (October 2, 2017) - -NEW FEATURES: - -* Provider plugins can now optionally be cached in a shared directory to avoid re-downloading them for each configuration working directory. For more information, see [the documentation](https://github.com/hashicorp/terraform/blob/34956cd12449cb77db3f55e3286cd369e8332eeb/website/docs/configuration/providers.html.md#provider-plugin-cache). ([#16000](https://github.com/hashicorp/terraform/issues/16000)) - -IMPROVEMENTS: - -* config: New `abs` interpolation function, returning the absolute value of a number ([#16168](https://github.com/hashicorp/terraform/issues/16168)) -* config: New `transpose` interpolation function, which swaps the keys and values in a map of lists of strings. ([#16192](https://github.com/hashicorp/terraform/issues/16192)) -* cli: The Terraform CLI now supports tab-completion for commands and certain arguments for `bash` and `zsh` users. See [the tab-completion docs](https://github.com/hashicorp/terraform/blob/2c782e60fad78e6fc976d850162322608f074e57/website/docs/commands/index.html.markdown#shell-tab-completion) for information on how to enable it. ([#16176](https://github.com/hashicorp/terraform/issues/16176)) -* cli: `terraform state rm` now includes in its output the count of resources that were removed from the state. ([#16137](https://github.com/hashicorp/terraform/issues/16137)) - -BUG FIXES: - -* modules: Update go-getter to fix crash when fetching invalid source subdir ([#16161](https://github.com/hashicorp/terraform/issues/16161)) -* modules: Fix regression in the handling of modules sourcing other modules with relative paths ([#16160](https://github.com/hashicorp/terraform/issues/16160)) -* core: Skip local value interpolation during destroy ([#16213](https://github.com/hashicorp/terraform/issues/16213)) - -## 0.10.6 (September 19, 2017) - -UPGRADE NOTES: - -* The internal storage of modules has changed in this release, so after - upgrading `terraform init` must be run to re-install modules in the new - on-disk format. The existing installed versions of modules will be ignored, - so the latest version of each module will be installed. - -IMPROVEMENTS: - -* Modules can now be installed from [the Terraform Registry](https://registry.terraform.io/) -* cli: `terraform import` now accepts an option `-allow-missing-config` that overrides the default requirement that a configuration block must already be present for the resource being imported. ([#15876](https://github.com/hashicorp/terraform/issues/15876)) - -## 0.10.5 (September 14, 2017) - -NEW FEATURES: - -* config: `indent` interpolation function appends spaces to all but the first line of a multi-line string ([#15311](https://github.com/hashicorp/terraform/issues/15311)) - -IMPROVEMENTS: - -* cli: `terraform fmt` has a new option `-check` which makes it return a non-zero exit status if any formatting changes are required ([#15387](https://github.com/hashicorp/terraform/issues/15387)) -* cli: When [running Terraform in automation](https://www.terraform.io/guides/running-terraform-in-automation.html), a new environment variable `TF_IN_AUTOMATION` can be used to disable or adjust certain prompts that would normally include specific CLI commands to run. This assumes that the wrapping automation tool is providing its own UI for guiding the user through the workflow, and thus the standard advice would be redundant and/or confusing. ([#16059](https://github.com/hashicorp/terraform/issues/16059)) - -BUG FIXES: - -* cli: restore the "(forces new resource)" annotations on attributes that were inadvertently disabled in 0.10.4. ([#16067](https://github.com/hashicorp/terraform/issues/16067)) -* cli: fix regression with installing modules from git when the `GIT_SSH_COMMAND` environment variable is set ([#16099](https://github.com/hashicorp/terraform/issues/16099)) - -## 0.10.4 (September 6, 2017) - -IMPROVEMENTS: -* `terraform apply` now uses the standard resource address syntax to refer to resources in its log ([#15884](https://github.com/hashicorp/terraform/issues/15884)) -* `terraform plan` output has some minor adjustments to improve readability and accessibility for those who can't see its colors ([#15884](https://github.com/hashicorp/terraform/issues/15884)) - -BUG FIXES: - -* backend/consul: fix crash during consul backend initialization ([#15976](https://github.com/hashicorp/terraform/issues/15976)) -* backend/azurerm: ensure that blob storage metadata is preserved when updating state blobs, to avoid losing track of lock metadata ([#16015](https://github.com/hashicorp/terraform/issues/16015)) -* config: local values now work properly in resource `count` and in modules with more than one `.tf` file ([#15995](https://github.com/hashicorp/terraform/issues/15995)] [[#15982](https://github.com/hashicorp/terraform/issues/15982)) -* cli: removed some inconsistencies in how data sources are counted and tallied in plan vs. apply and apply vs. destroy. In particular, data sources are no longer incorrectly counted as destroyed in `terraform destroy` ([#15884](https://github.com/hashicorp/terraform/issues/15884)) - -## 0.10.3 (August 30, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - -* LGPL Dependencies Removed ([#15862](https://github.com/hashicorp/terraform/issues/15862)) - -NEW FEATURES: - -* **Local Values**: this new configuration language feature allows assigning a symbolic local name to an expression so it can be used multiple times in configuration without repetition. See [the documentation](https://github.com/hashicorp/terraform/blob/master/website/docs/configuration/locals.html.md) for how to define and use local values. ([#15449](https://github.com/hashicorp/terraform/issues/15449)) -* **`base64gzip` interpolation function**: compresses a string with gzip and then base64-encodes the result ([#3858](https://github.com/hashicorp/terraform/issues/3858)) -* **`flatten` interpolation function**: turns a list of lists, or list of lists of lists, etc into a flat list of primitive values ([#15278](https://github.com/hashicorp/terraform/issues/15278)) -* **`urlencode` interpolation function**: applies standard URL encoding to a string so that it can be embedded in a URL without making it invalid and without any of the characters being interpreted as part of the URL structure ([#15871](https://github.com/hashicorp/terraform/issues/15871)) -* **`salt-masterless` provisioner**: runs Salt in masterless mode on a target server ([#14720](https://github.com/hashicorp/terraform/issues/14720)) - -IMPROVEMENTS: - -* config: The `jsonencode` interpolation function now accepts nested list and map structures, where before it would accept only strings, lists of strings, and maps of strings. ([#14884](https://github.com/hashicorp/terraform/issues/14884)) -* cli: The "creation complete" (and similar) messages from `terraform apply` now include a total elapsed time for each operation. ([#15548](https://github.com/hashicorp/terraform/issues/15548)) -* cli: Module installation (with either `terraform init` or `terraform get`) now detects and recursively initializes submodules when the source is a git repository. ([#15891](https://github.com/hashicorp/terraform/issues/15891)) -* cli: Modules can now be installed from `.tar.xz` archives, in addition to the existing `.tar.gz`, `.tar.bz2` and `.zip`. ([#15891](https://github.com/hashicorp/terraform/issues/15891)) -* provisioner/local-exec: now possible to specify a custom "interpreter", overriding the default of either `bash -c` (on Unix) or `cmd.exe /C` (on Windows) ([#15166](https://github.com/hashicorp/terraform/issues/15166)) -* backend/consul: can now set the path to a specific CA certificate file, client certificate file, and client key file that will be used when configuring the underlying Consul client. ([#15405](https://github.com/hashicorp/terraform/issues/15405)) -* backend/http: now has optional support for locking, with special support from the target server. Additionally, the update operation can now optionally be implemented via `PUT` rather than `POST`. ([#15793](https://github.com/hashicorp/terraform/issues/15793)) -* helper/resource: Add `TestStep.SkipFunc` ([#15957](https://github.com/hashicorp/terraform/issues/15957)) - -BUG FIXES: - -* cli: `terraform init` now verifies the required Terraform version from the root module config. Previously this was verified only on subsequent commands, after initialization. ([#15935](https://github.com/hashicorp/terraform/issues/15935)) -* cli: `terraform validate` now consults `terraform.tfvars`, if present, to set variable values. This is now consistent with the behavior of other commands. ([#15938](https://github.com/hashicorp/terraform/issues/15938)) - -## 0.10.2 (August 16, 2017) - -BUG FIXES: - -* tools/terraform-bundle: Add missing Ui to ProviderInstaller (fix crash) ([#15826](https://github.com/hashicorp/terraform/issues/15826)) -* go-plugin: don't crash when server emits non-key-value JSON ([go-plugin#43](https://github.com/hashicorp/go-plugin/pull/43)) - -## 0.10.1 (August 15, 2017) - -BUG FIXES: - -* Fix `terraform state rm` and `mv` commands to work correctly with remote state backends ([#15652](https://github.com/hashicorp/terraform/issues/15652)) -* Fix errors when interpolations fail during input ([#15780](https://github.com/hashicorp/terraform/issues/15780)) -* Backoff retries in remote-execution provisioner ([#15772](https://github.com/hashicorp/terraform/issues/15772)) -* Load plugins from `~/.terraform.d/plugins/OS_ARCH/` and `.terraformrc` ([#15769](https://github.com/hashicorp/terraform/issues/15769)) -* The `import` command was ignoring the remote state configuration ([#15768](https://github.com/hashicorp/terraform/issues/15768)) -* Don't allow leading slashes in s3 bucket names for remote state ([#15738](https://github.com/hashicorp/terraform/issues/15738)) - -IMPROVEMENTS: - -* helper/schema: Add `GetOkExists` schema function ([#15723](https://github.com/hashicorp/terraform/issues/15723)) -* helper/schema: Make 'id' a reserved field name ([#15695](https://github.com/hashicorp/terraform/issues/15695)) -* command/init: Display version + source when initializing plugins ([#15804](https://github.com/hashicorp/terraform/issues/15804)) - -INTERNAL CHANGES: - -* DiffFieldReader.ReadField caches results to optimize deeply nested schemas ([#15663](https://github.com/hashicorp/terraform/issues/15663)) - - -## 0.10.0 (August 2, 2017) - -**This is the complete 0.9.11 to 0.10.0 CHANGELOG** - -BACKWARDS INCOMPATIBILITIES / NOTES: - -* A new flag `-auto-approve` has been added to `terraform apply`. This flag controls whether an interactive approval is applied before - making the changes in the plan. For now this flag defaults to `true` to preserve previous behavior, but this will become the new default - in a future version. We suggest that anyone running `terraform apply` in wrapper scripts or automation refer to the upgrade guide to learn - how to prepare such wrapper scripts for the later breaking change. -* The `validate` command now checks that all variables are specified by default. The validation will fail by default if that's not the - case. ([#13872](https://github.com/hashicorp/terraform/issues/13872)) -* `terraform state rm` now requires at least one argument. Previously, calling it with no arguments would remove all resources from state, - which is consistent with the other `terraform state` commands but unlikely enough that we considered it better to be inconsistent here to - reduce the risk of accidentally destroying the state. -* Terraform providers are no longer distributed as part of the main Terraform distribution. Instead, they are installed automatically as - part of running `terraform init`. It is therefore now mandatory to run `terraform init` before any other operations that use provider - plugins, to ensure that the required plugins are installed and properly initialized. -* The `terraform env` family of commands have been renamed to `terraform workspace`, in response to feedback that the previous naming was - confusing due to collisions with other concepts of the same name. The commands still work the same as they did before, and the `env` - subcommand is still supported as an alias for backward compatibility. The `env` subcommand will be removed altogether in a future release, - so it's recommended to update any automation or wrapper scripts that use these commands. -* The `terraform init` subcommand no longer takes a SOURCE argument to copy to the current directory. The behavior has been changed to match - that of `plan` and `apply`, so that a configuration can be provided as an argument on the commandline while initializing the current - directory. If a module needs to be copied into the current directory before initialization, it will have to be done manually. -* The `-target` option available on several Terraform subcommands has changed behavior and **now matches potentially more resources**. In - particular, given an option `-target=module.foo`, resources in any descendent modules of `foo` will also be targeted, where before this - was not true. After upgrading, be sure to look carefully at the set of changes proposed by `terraform plan` when using `-target` to ensure - that the target is being interpreted as expected. Note that the `-target` argument is offered for exceptional circumstances only and is - not intended for routine use. -* The `import` command requires that imported resources be specified in the configuration file. Previously, users were encouraged to import - a resource and _then_ write the configuration block for it. This creates the risk that users could import a resource and subsequently - create no configuration for it, which results in Terraform deleting the resource. If the imported resource is not present in the - configuration file, the `import` command will fail. - -FEATURES: - -* **Separate Provider Releases:** Providers are now released independently from Terraform. -* **Automatic Provider Installation:** The required providers will be automatically installed during `terraform init`. -* **Provider Constraints:** Provider are now versioned, and version constraints may be declared in the configuration. - -PROVIDERS: - -* Providers now maintain their own CHANGELOGs in their respective repositories: [terraform-providers](https://github.com/terraform-providers) - -IMPROVEMENTS: - -* cli: Add a `-from-module` flag to `terraform init` to re-introduce the legacy `terraform init` behavior of fetching a module. ([#15666](https://github.com/hashicorp/terraform/issues/15666)) -* backend/s3: Add `workspace_key_prefix` to allow a user-configurable prefix for workspaces in the S3 Backend. ([#15370](https://github.com/hashicorp/terraform/issues/15370)) -* cli: `terraform apply` now has an option `-auto-approve=false` that produces an interactive prompt to approve the generated plan. This will become the default workflow in a future Terraform version. ([#7251](https://github.com/hashicorp/terraform/issues/7251)) -* cli: `terraform workspace show` command prints the current workspace name in a way that's more convenient for processing in wrapper scripts. ([#15157](https://github.com/hashicorp/terraform/issues/15157)) -* cli: `terraform state rm` will now generate an error if no arguments are passed, whereas before it treated it as an open resource address selecting _all_ resources ([#15283](https://github.com/hashicorp/terraform/issues/15283)) -* cli: Files in the config directory ending in `.auto.tfvars` are now loaded automatically (in lexicographical order) in addition to the single `terraform.tfvars` file that auto-loaded previously. ([#13306](https://github.com/hashicorp/terraform/issues/13306)) -* Providers no longer in the main Terraform distribution; installed automatically by init instead ([#15208](https://github.com/hashicorp/terraform/issues/15208)) -* cli: `terraform env` command renamed to `terraform workspace` ([#14952](https://github.com/hashicorp/terraform/issues/14952)) -* cli: `terraform init` command now has `-upgrade` option to download the latest versions (within specified constraints) of modules and provider plugins. -* cli: The `-target` option to various Terraform operation can now target resources in descendent modules. ([#15314](https://github.com/hashicorp/terraform/issues/15314)) -* cli: Minor updates to `terraform plan` output: use standard resource address syntax, more visually-distinct `-/+` actions, and more ([#15362](https://github.com/hashicorp/terraform/issues/15362)) -* config: New interpolation function `contains`, to find if a given string exists in a list of strings. ([#15322](https://github.com/hashicorp/terraform/issues/15322)) - -BUG FIXES: - -* provisioner/chef: fix panic ([#15617](https://github.com/hashicorp/terraform/issues/15617)) -* Don't show a message about the path to the state file if the state is remote ([#15435](https://github.com/hashicorp/terraform/issues/15435)) -* Fix crash when `terraform graph` is run with no configuration ([#15588](https://github.com/hashicorp/terraform/issues/15588)) -* Handle correctly the `.exe` suffix on locally-compiled provider plugins on Windows systems. ([#15587](https://github.com/hashicorp/terraform/issues/15587)) -* config: Fixed a parsing issue in the interpolation language HIL that was causing misinterpretation of literal strings ending with escaped backslashes ([#15415](https://github.com/hashicorp/terraform/issues/15415)) -* core: the S3 Backend was failing to remove the state file checksums from DynamoDB when deleting a workspace ([#15383](https://github.com/hashicorp/terraform/issues/15383)) -* core: Improved reslience against crashes for a certain kind of inconsistency in the representation of list values in state. ([#15390](https://github.com/hashicorp/terraform/issues/15390)) -* core: Display correct to and from backends in copy message when migrating to new remote state ([#15318](https://github.com/hashicorp/terraform/issues/15318)) -* core: Fix a regression from 0.9.6 that was causing the tally of resources to create to be double-counted sometimes in the plan output ([#15344](https://github.com/hashicorp/terraform/issues/15344)) -* cli: the state `rm` and `mv` commands were always loading a state from a Backend, and ignoring the `-state` flag ([#15388](https://github.com/hashicorp/terraform/issues/15388)) -* cli: certain prompts in `terraform init` were respecting `-input=false` but not the `TF_INPUT` environment variable ([#15391](https://github.com/hashicorp/terraform/issues/15391)) -* state: Further work, building on [#15423](https://github.com/hashicorp/terraform/issues/15423), to improve the internal design of the state managers to make this code more maintainable and reduce the risk of regressions; this may lead to slight changes to the number of times Terraform writes to remote state and how the serial is implemented with respect to those writes, which does not affect outward functionality but is worth noting if you log or inspect state updates for debugging purposes. -* config: Interpolation function `cidrhost` was not correctly calcluating host addresses under IPv6 CIDR prefixes ([#15321](https://github.com/hashicorp/terraform/issues/15321)) -* provisioner/chef: Prevent a panic while trying to read the connection info ([#15271](https://github.com/hashicorp/terraform/issues/15271)) -* provisioner/file: Refactor the provisioner validation function to prevent false positives ([#15273](https://github.com/hashicorp/terraform/issues/15273)) - -INTERNAL CHANGES: - -* helper/schema: Actively disallow reserved field names in schema ([#15522](https://github.com/hashicorp/terraform/issues/15522)) -* helper/schema: Force field names to be alphanum lowercase + underscores ([#15562](https://github.com/hashicorp/terraform/issues/15562)) - - -## 0.10.0-rc1 to 0.10.0 (August 2, 2017) - -BUG FIXES: - -* provisioner/chef: fix panic ([#15617](https://github.com/hashicorp/terraform/issues/15617)) - -IMPROVEMENTS: - -* cli: Add a `-from-module` flag to `terraform init` to re-introduce the legacy `terraform init` behavior of fetching a module. ([#15666](https://github.com/hashicorp/terraform/issues/15666)) - - -## 0.10.0-rc1 (July 19, 2017) - -BUG FIXES: - -* Don't show a message about the path to the state file if the state is remote ([#15435](https://github.com/hashicorp/terraform/issues/15435)) -* Fix crash when `terraform graph` is run with no configuration ([#15588](https://github.com/hashicorp/terraform/issues/15588)) -* Handle correctly the `.exe` suffix on locally-compiled provider plugins on Windows systems. ([#15587](https://github.com/hashicorp/terraform/issues/15587)) - -INTERNAL CHANGES: - -* helper/schema: Actively disallow reserved field names in schema ([#15522](https://github.com/hashicorp/terraform/issues/15522)) -* helper/schema: Force field names to be alphanum lowercase + underscores ([#15562](https://github.com/hashicorp/terraform/issues/15562)) - -## 0.10.0-beta2 (July 6, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - -* A new flag `-auto-approve` has been added to `terraform apply`. This flag controls whether an interactive approval is applied before making the changes in the plan. For now this flag defaults to `true` to preserve previous behavior, but this will become the new default in a future version. We suggest that anyone running `terraform apply` in wrapper scripts or automation refer to the upgrade guide to learn how to prepare such wrapper scripts for the later breaking change. -* The `validate` command now checks that all variables are specified by default. - The validation will fail by default if that's not the case. ([#13872](https://github.com/hashicorp/terraform/issues/13872)) -* `terraform state rm` now requires at least one argument. Previously, calling it with no arguments would remove all resources from state, which is consistent with the other `terraform state` commands but unlikely enough that we considered it better to be inconsistent here to reduce the risk of accidentally destroying the state. - -IMPROVEMENTS: - -* backend/s3: Add `workspace_key_prefix` to allow a user-configurable prefix for workspaces in the S3 Backend. ([#15370](https://github.com/hashicorp/terraform/issues/15370)) -* cli: `terraform apply` now has an option `-auto-approve=false` that produces an interactive prompt to approve the generated plan. This will become the default workflow in a future Terraform version. ([#7251](https://github.com/hashicorp/terraform/issues/7251)) -* cli: `terraform workspace show` command prints the current workspace name in a way that's more convenient for processing in wrapper scripts. ([#15157](https://github.com/hashicorp/terraform/issues/15157)) -* cli: `terraform state rm` will now generate an error if no arguments are passed, whereas before it treated it as an open resource address selecting _all_ resources ([#15283](https://github.com/hashicorp/terraform/issues/15283)) -* cli: Files in the config directory ending in `.auto.tfvars` are now loaded automatically (in lexicographical order) in addition to the single `terraform.tfvars` file that auto-loaded previously. ([#13306](https://github.com/hashicorp/terraform/issues/13306)) - -BUG FIXES: - -* config: Fixed a parsing issue in the interpolation language HIL that was causing misinterpretation of literal strings ending with escaped backslashes ([#15415](https://github.com/hashicorp/terraform/issues/15415)) -* core: the S3 Backend was failing to remove the state file checksums from DynamoDB when deleting a workspace ([#15383](https://github.com/hashicorp/terraform/issues/15383)) -* core: Improved reslience against crashes for a certain kind of inconsistency in the representation of list values in state. ([#15390](https://github.com/hashicorp/terraform/issues/15390)) -* core: Display correct to and from backends in copy message when migrating to new remote state ([#15318](https://github.com/hashicorp/terraform/issues/15318)) -* core: Fix a regression from 0.9.6 that was causing the tally of resources to create to be double-counted sometimes in the plan output ([#15344](https://github.com/hashicorp/terraform/issues/15344)) -* cli: the state `rm` and `mv` commands were always loading a state from a Backend, and ignoring the `-state` flag ([#15388](https://github.com/hashicorp/terraform/issues/15388)) -* cli: certain prompts in `terraform init` were respecting `-input=false` but not the `TF_INPUT` environment variable ([#15391](https://github.com/hashicorp/terraform/issues/15391)) -* state: Further work, building on [#15423](https://github.com/hashicorp/terraform/issues/15423), to improve the internal design of the state managers to make this code more maintainable and reduce the risk of regressions; this may lead to slight changes to the number of times Terraform writes to remote state and how the serial is implemented with respect to those writes, which does not affect outward functionality but is worth noting if you log or inspect state updates for debugging purposes. - -## 0.10.0-beta1 (June 22, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - -* Terraform providers are no longer distributed as part of the main Terraform distribution. Instead, they are installed automatically - as part of running `terraform init`. It is therefore now mandatory to run `terraform init` before any other operations that use - provider plugins, to ensure that the required plugins are installed and properly initialized. -* The `terraform env` family of commands have been renamed to `terraform workspace`, in response to feedback that the previous naming - was confusing due to collisions with other concepts of the same name. The commands still work the same as they did before, and - the `env` subcommand is still supported as an alias for backward compatibility. The `env` subcommand will be removed altogether in - a future release, so it's recommended to update any automation or wrapper scripts that use these commands. -* The `terraform init` subcommand no longer takes a SOURCE argument to copy to the current directory. The behavior has - been changed to match that of `plan` and `apply`, so that a configuration can be provided as an argument on the - commandline while initializing the current directory. If a module needs to be copied into the current directory before - initialization, it will have to be done manually. -* The `-target` option available on several Terraform subcommands has changed behavior and **now matches potentially more resources**. - In particular, given an option `-target=module.foo`, resources in any descendent modules of `foo` will also be targeted, where before - this was not true. After upgrading, be sure to look carefully at the set of changes proposed by `terraform plan` when using `-target` - to ensure that the target is being interpreted as expected. Note that the `-target` argument is offered for exceptional circumstances - only and is not intended for routine use. -* The `import` command requires that imported resources be specified in the configuration file. Previously, users were encouraged to - import a resource and _then_ write the configuration block for it. This creates the risk that users could import a resource and - subsequently create no configuration for it, which results in Terraform deleting the resource. If the imported resource is not - present in the configuration file, the `import` command will fail. - -IMPROVEMENTS: - -* Providers no longer in the main Terraform distribution; installed automatically by init instead ([#15208](https://github.com/hashicorp/terraform/issues/15208)) -* cli: `terraform env` command renamed to `terraform workspace` ([#14952](https://github.com/hashicorp/terraform/issues/14952)) -* cli: `terraform init` command now has `-upgrade` option to download the latest versions (within specified constraints) of modules and provider plugins. -* cli: The `-target` option to various Terraform operation can now target resources in descendent modules. ([#15314](https://github.com/hashicorp/terraform/issues/15314)) -* cli: Minor updates to `terraform plan` output: use standard resource address syntax, more visually-distinct `-/+` actions, and more ([#15362](https://github.com/hashicorp/terraform/issues/15362)) -* config: New interpolation function `contains`, to find if a given string exists in a list of strings. ([#15322](https://github.com/hashicorp/terraform/issues/15322)) - -BUG FIXES: - -* config: Interpolation function `cidrhost` was not correctly calcluating host addresses under IPv6 CIDR prefixes ([#15321](https://github.com/hashicorp/terraform/issues/15321)) -* provisioner/chef: Prevent a panic while trying to read the connection info ([#15271](https://github.com/hashicorp/terraform/issues/15271)) -* provisioner/file: Refactor the provisioner validation function to prevent false positives ([#15273](https://github.com/hashicorp/terraform/issues/15273)) - -## 0.9.11 (Jul 3, 2017) - -BUG FIXES: - -* core: Hotfix for issue where a state from a plan was reported as not equal to the same state stored to a backend. This arose from the fix for the previous issue where the incorrect copy of the state was being used when applying with a plan. ([#15460](https://github.com/hashicorp/terraform/issues/15460)) - - -## 0.9.10 (June 29, 2017) - -BUG FIXES: - -* core: Hotfix for issue where state index wasn't getting properly incremented when applying a change containing only data source updates and/or resource drift. (That is, state changes made during refresh.) - This issue is significant only for the "atlas" backend, since that backend verifies on the server that state serial numbers are being used consistently. ([#15423](https://github.com/hashicorp/terraform/issues/15423)) - -## 0.9.9 (June 26, 2017) - -BUG FIXES: - - * provisioner/file: Refactor the provisioner validation function to prevent false positives ([#15273](https://github.com/hashicorp/terraform/issues/15273))) - * provisioner/chef: Prevent a panic while trying to read the connection info ([#15271](https://github.com/hashicorp/terraform/issues/15271))) - -## 0.9.8 (June 7, 2017) - -NOTE: - -* The 0.9.7 release had a bug with its new feature of periodically persisting state to the backend during an apply, as part of [[#14834](https://github.com/hashicorp/terraform/issues/14834)]. This change has been reverted in this release and will be re-introduced at a later time once it has been made to work properly. - -IMPROVEMENTS: - -* provider/google: `network` argument in `google_compute_instance_group` is now optional ([#13493](https://github.com/hashicorp/terraform/issues/13493)) -* provider/google: Add support for `draining_timeout_sec` to `google_compute_backend_service`. ([#14559](https://github.com/hashicorp/terraform/issues/14559)) - -BUG FIXES: - -* provider/aws: fixed reading network configurations for `spot_fleet_request` ([#13748](https://github.com/hashicorp/terraform/issues/13748)) - -## 0.9.7 (June 7, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - -* The `lock_table` attribute in the S3 backend configuration has been deprecated in favor of `dynamodb_table`, which better reflects that the table is no longer only used for locks. ([#14949](https://github.com/hashicorp/terraform/issues/14949)) - -FEATURES: - - * **New Data Source:** `aws_elastic_beanstalk_solution_stack` ([#14944](https://github.com/hashicorp/terraform/issues/14944)) - * **New Data Source:** `aws_elasticache_cluster` ([#14895](https://github.com/hashicorp/terraform/issues/14895)) - * **New Data Source:** `aws_ssm_parameter` ([#15035](https://github.com/hashicorp/terraform/issues/15035)) - * **New Data Source:** `azurerm_public_ip` ([#15110](https://github.com/hashicorp/terraform/issues/15110)) - * **New Resource:** `aws_ssm_parameter` ([#15035](https://github.com/hashicorp/terraform/issues/15035)) - * **New Resource:** `aws_ssm_patch_baseline` ([#14954](https://github.com/hashicorp/terraform/issues/14954)) - * **New Resource:** `aws_ssm_patch_group` ([#14954](https://github.com/hashicorp/terraform/issues/14954)) - * **New Resource:** `librato_metric` ([#14562](https://github.com/hashicorp/terraform/issues/14562)) - * **New Resource:** `digitalocean_certificate` ([#14578](https://github.com/hashicorp/terraform/issues/14578)) - * **New Resource:** `vcd_edgegateway_vpn` ([#13123](https://github.com/hashicorp/terraform/issues/13123)) - * **New Resource:** `vault_mount` ([#14456](https://github.com/hashicorp/terraform/issues/14456)) - * **New Interpolation Function:** `bcrypt` ([#14725](https://github.com/hashicorp/terraform/issues/14725)) - -IMPROVEMENTS: - -* backend/consul: Storing state to Consul now uses Check-And-Set (CAS) by default to avoid inconsistent state, and will automatically attempt to re-acquire a lock if it is lost during Terraform execution. ([#14930](https://github.com/hashicorp/terraform/issues/14930)) -* core: Remote state is now persisted more frequently to minimize data loss in the event of a crash. ([#14834](https://github.com/hashicorp/terraform/issues/14834)) -* provider/alicloud: Add the function of replacing ecs instance's system disk ([#15048](https://github.com/hashicorp/terraform/issues/15048)) -* provider/aws: Expose RDS instance and cluster resource id ([#14882](https://github.com/hashicorp/terraform/issues/14882)) -* provider/aws: Export internal tunnel addresses + document ([#14835](https://github.com/hashicorp/terraform/issues/14835)) -* provider/aws: Fix misleading error in aws_route validation ([#14972](https://github.com/hashicorp/terraform/issues/14972)) -* provider/aws: Support import of aws_lambda_event_source_mapping ([#14898](https://github.com/hashicorp/terraform/issues/14898)) -* provider/aws: Add support for a configurable timeout in db_option_group ([#15023](https://github.com/hashicorp/terraform/issues/15023)) -* provider/aws: Add task_parameters parameter to aws_ssm_maintenance_window_task resource ([#15104](https://github.com/hashicorp/terraform/issues/15104)) -* provider/aws: Expose reason of EMR cluster termination ([#15117](https://github.com/hashicorp/terraform/issues/15117)) -* provider/aws: `data.aws_acm_certificate` can now filter by `type` ([#15063](https://github.com/hashicorp/terraform/issues/15063)) -* provider/azurerm: Ignore case sensivity in Azurerm resource enums ([#14861](https://github.com/hashicorp/terraform/issues/14861)) -* provider/digitalocean: Add support for changing TTL on DigitalOcean domain records. ([#14805](https://github.com/hashicorp/terraform/issues/14805)) -* provider/google: Add ability to import Google Compute persistent disks ([#14573](https://github.com/hashicorp/terraform/issues/14573)) -* provider/google: `google_container_cluster.master_auth` should be optional ([#14630](https://github.com/hashicorp/terraform/issues/14630)) -* provider/google: Add CORS support for `google_storage_bucket` ([#14695](https://github.com/hashicorp/terraform/issues/14695)) -* provider/google: Allow resizing of Google Cloud persistent disks ([#15077](https://github.com/hashicorp/terraform/issues/15077)) -* provider/google: Add private_ip_google_access update support to google_compute_subnetwork ([#15125](https://github.com/hashicorp/terraform/issues/15125)) -* provider/heroku: can now import Heroku Spaces ([#14973](https://github.com/hashicorp/terraform/issues/14973)) -* provider/kubernetes: Upgrade K8S from 1.5.3 to 1.6.1 ([#14923](https://github.com/hashicorp/terraform/issues/14923)) -* provider/kubernetes: Provide more details about why PVC failed to bind ([#15019](https://github.com/hashicorp/terraform/issues/15019)) -* provider/kubernetes: Allow sourcing config_path from `KUBECONFIG` env var ([#14889](https://github.com/hashicorp/terraform/issues/14889)) -* provider/openstack: Add support provider networks ([#10265](https://github.com/hashicorp/terraform/issues/10265)) -* provider/openstack: Allow numerical protocols in security group rules ([#14917](https://github.com/hashicorp/terraform/issues/14917)) -* provider/openstack: Sort request/response headers in debug output ([#14956](https://github.com/hashicorp/terraform/issues/14956)) -* provider/openstack: Add support for FWaaS routerinsertion extension ([#12589](https://github.com/hashicorp/terraform/issues/12589)) -* provider/openstack: Add Terraform version to UserAgent string ([#14955](https://github.com/hashicorp/terraform/issues/14955)) -* provider/openstack: Optimize the printing of debug output ([#15086](https://github.com/hashicorp/terraform/issues/15086)) -* provisioner/chef: Use `helpers.shema.Provisoner` in Chef provisioner V2 ([#14681](https://github.com/hashicorp/terraform/issues/14681)) - -BUG FIXES: - -* provider/alicloud: set `alicloud_nat_gateway` zone to be Computed to avoid perpetual diffs ([#15050](https://github.com/hashicorp/terraform/issues/15050)) -* provider/alicloud: set provider to read env vars for access key and secrey key if empty strings ([#15050](https://github.com/hashicorp/terraform/issues/15050)) -* provider/alicloud: Fix vpc and vswitch bugs while creating vpc and vswitch ([#15082](https://github.com/hashicorp/terraform/issues/15082)) -* provider/alicloud: Fix allocating public ip bug ([#15049](https://github.com/hashicorp/terraform/issues/15049)) -* provider/alicloud: Fix security group rules nic_type bug ([#15114](https://github.com/hashicorp/terraform/issues/15114)) -* provider/aws: ForceNew aws_launch_config on ebs_block_device change ([#14899](https://github.com/hashicorp/terraform/issues/14899)) -* provider/aws: Avoid crash when EgressOnly IGW disappears ([#14929](https://github.com/hashicorp/terraform/issues/14929)) -* provider/aws: Allow IPv6/IPv4 addresses to coexist ([#13702](https://github.com/hashicorp/terraform/issues/13702)) -* provider/aws: Expect exception on deletion of APIG Usage Plan Key ([#14958](https://github.com/hashicorp/terraform/issues/14958)) -* provider/aws: Fix panic on nil dead_letter_config ([#14964](https://github.com/hashicorp/terraform/issues/14964)) -* provider/aws: Work around IAM eventual consistency in CW Log Subs ([#14959](https://github.com/hashicorp/terraform/issues/14959)) -* provider/aws: Fix ModifyInstanceAttribute on new instances ([#14992](https://github.com/hashicorp/terraform/issues/14992)) -* provider/aws: Fix issue with removing tags in aws_cloudwatch_log_group ([#14886](https://github.com/hashicorp/terraform/issues/14886)) -* provider/aws: Raise timeout for VPC DHCP options creation to 5 mins ([#15084](https://github.com/hashicorp/terraform/issues/15084)) -* provider/aws: Retry Redshift cluster deletion on InvalidClusterState ([#15068](https://github.com/hashicorp/terraform/issues/15068)) -* provider/aws: Retry Lambda func creation on IAM error ([#15067](https://github.com/hashicorp/terraform/issues/15067)) -* provider/aws: Retry ECS service creation on ClusterNotFoundException ([#15066](https://github.com/hashicorp/terraform/issues/15066)) -* provider/aws: Retry ECS service update on ServiceNotFoundException ([#15073](https://github.com/hashicorp/terraform/issues/15073)) -* provider/aws: Retry DB parameter group delete on InvalidDBParameterGroupState ([#15071](https://github.com/hashicorp/terraform/issues/15071)) -* provider/aws: Guard against panic when no aws_default_vpc found ([#15070](https://github.com/hashicorp/terraform/issues/15070)) -* provider/aws: Guard against panic if no NodeGroupMembers returned in `elasticache_replication_group` ([#13488](https://github.com/hashicorp/terraform/issues/13488)) -* provider/aws: Revoke default ipv6 egress rule for aws_security_group ([#15075](https://github.com/hashicorp/terraform/issues/15075)) -* provider/aws: Lambda ENI deletion fails on destroy ([#11849](https://github.com/hashicorp/terraform/issues/11849)) -* provider/aws: Add gov and cn hosted zone Ids to aws_elb_hosted_zone data source ([#15149](https://github.com/hashicorp/terraform/issues/15149)) -* provider/azurerm: VM - making `os_profile` optional ([#14176](https://github.com/hashicorp/terraform/issues/14176)) -* provider/azurerm: Preserve the Subnet properties on Update ([#13877](https://github.com/hashicorp/terraform/issues/13877)) -* provider/datadog: make datadog_user verified a computed attribute ([#15034](https://github.com/hashicorp/terraform/issues/15034)) -* provider/datadog: use correct evaluation_delay parameter ([#14878](https://github.com/hashicorp/terraform/issues/14878)) -* provider/digitalocean: Refresh DO loadbalancer from state if 404 ([#14897](https://github.com/hashicorp/terraform/issues/14897)) -* provider/github: Do not set incorrect values in github_team data source ([#14859](https://github.com/hashicorp/terraform/issues/14859)) -* provider/google: use a mutex to prevent concurrent sql instance operations ([#14424](https://github.com/hashicorp/terraform/issues/14424)) -* provider/google: Set instances to computed in compute_instance_group ([#15025](https://github.com/hashicorp/terraform/issues/15025)) -* provider/google: Make google_compute_autoscaler use Update instead of Patch. ([#15101](https://github.com/hashicorp/terraform/issues/15101)) -* provider/kubernetes: Ignore internal k8s labels in `kubernetes_persistent_volume` ([#13716](https://github.com/hashicorp/terraform/issues/13716)) -* provider/librato: Add retry to librato_alert ([#15118](https://github.com/hashicorp/terraform/issues/15118)) -* provider/postgresql: Fix for leaking credentials in the provider ([#14817](https://github.com/hashicorp/terraform/issues/14817)) -* provider/postgresql: Drop the optional WITH token from CREATE ROLE. ([#14864](https://github.com/hashicorp/terraform/issues/14864)) -* provider/rancher: refresh rancher_host from state on nil or removed host ([#15015](https://github.com/hashicorp/terraform/issues/15015)) - -## 0.9.6 (May 25, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - -* When assigning a "splat variable" to a resource attribute, like `foo = "${some_resource.foo.*.baz}"`, it is no longer required (nor recommended) to wrap the string in list brackets. The extra brackets continue to be allowed for resource attributes for compatibility, but this will cease to be allowed in a future version. ([#14737](https://github.com/hashicorp/terraform/issues/14737)) -* provider/aws: Allow lightsail resources to work in other regions. Previously Terraform would automatically configure lightsail resources to run solely in `us-east-1`. This means that if a provider was initialized with a different region than `us-east-1`, users will need to create a provider alias to maintain their lightsail resources in us-east-1 [[#14685](https://github.com/hashicorp/terraform/issues/14685)]. -* provider/aws: Users of `aws_cloudfront_distribution` `default_cache_behavior` will notice that cookies is now a required value - even if that value is none ([#12628](https://github.com/hashicorp/terraform/issues/12628)) -* provider/google: Users of `google_compute_health_check` who were not setting a value for the `host` property of `http_health_check` or `https_health_check` previously had a faulty default value. This has been fixed and will show as a change in terraform plan/apply. ([#14441](https://github.com/hashicorp/terraform/issues/14441)) - -FEATURES: - -* **New Provider:** `ovh` ([#12669](https://github.com/hashicorp/terraform/issues/12669)) -* **New Resource:** `aws_default_subnet` ([#14476](https://github.com/hashicorp/terraform/issues/14476)) -* **New Resource:** `aws_default_vpc` ([#11710](https://github.com/hashicorp/terraform/issues/11710)) -* **New Resource:** `aws_default_vpc_dhcp_options` ([#14475](https://github.com/hashicorp/terraform/issues/14475)) -* **New Resource:** `aws_devicefarm_project` ([#14288](https://github.com/hashicorp/terraform/issues/14288)) -* **New Resource:** `aws_wafregional_ipset` ([#13705](https://github.com/hashicorp/terraform/issues/13705)) -* **New Resource:** `aws_wafregional_byte_match_set` ([#13705](https://github.com/hashicorp/terraform/issues/13705)) -* **New Resource:** `azurerm_express_route_circuit` ([#14265](https://github.com/hashicorp/terraform/issues/14265)) -* **New Resource:** `gitlab_deploy_key` ([#14734](https://github.com/hashicorp/terraform/issues/14734)) -* **New Resource:** `gitlab_group` ([#14490](https://github.com/hashicorp/terraform/issues/14490)) -* **New Resource:** `google_compute_router` ([#12411](https://github.com/hashicorp/terraform/issues/12411)) -* **New Resource:** `google_compute_router_interface` ([#12411](https://github.com/hashicorp/terraform/issues/12411)) -* **New Resource:** `google_compute_router_peer` ([#12411](https://github.com/hashicorp/terraform/issues/12411)) -* **New Resource:** `kubernetes_horizontal_pod_autoscaler` ([#14763](https://github.com/hashicorp/terraform/issues/14763)) -* **New Resource:** `kubernetes_service` ([#14554](https://github.com/hashicorp/terraform/issues/14554)) -* **New Resource:** `openstack_dns_zone_v2` ([#14721](https://github.com/hashicorp/terraform/issues/14721)) -* **New Resource:** `openstack_dns_recordset_v2` ([#14813](https://github.com/hashicorp/terraform/issues/14813)) -* **New Data Source:** `aws_db_snapshot` ([#10291](https://github.com/hashicorp/terraform/issues/10291)) -* **New Data Source:** `aws_kms_ciphertext` ([#14691](https://github.com/hashicorp/terraform/issues/14691)) -* **New Data Source:** `github_user` ([#14570](https://github.com/hashicorp/terraform/issues/14570)) -* **New Data Source:** `github_team` ([#14614](https://github.com/hashicorp/terraform/issues/14614)) -* **New Data Source:** `google_storage_object_signed_url` ([#14643](https://github.com/hashicorp/terraform/issues/14643)) -* **New Interpolation Function:** `pow` ([#14598](https://github.com/hashicorp/terraform/issues/14598)) - -IMPROVEMENTS: - -* core: After `apply`, if the state cannot be persisted to remote for some reason then write out a local state file for recovery ([#14423](https://github.com/hashicorp/terraform/issues/14423)) -* core: It's no longer required to surround an attribute value that is just a "splat" variable with a redundant set of array brackets. ([#14737](https://github.com/hashicorp/terraform/issues/14737)) -* core/provider-split: Split out the Oracle OPC provider to new structure ([#14362](https://github.com/hashicorp/terraform/issues/14362)) -* provider/aws: Show state reason when EC2 instance fails to launch ([#14479](https://github.com/hashicorp/terraform/issues/14479)) -* provider/aws: Show last scaling activity when ASG creation/update fails ([#14480](https://github.com/hashicorp/terraform/issues/14480)) -* provider/aws: Add `tags` (list of maps) for `aws_autoscaling_group` ([#13574](https://github.com/hashicorp/terraform/issues/13574)) -* provider/aws: Support filtering in ASG data source ([#14501](https://github.com/hashicorp/terraform/issues/14501)) -* provider/aws: Add ability to 'terraform import' aws_kms_alias resources ([#14679](https://github.com/hashicorp/terraform/issues/14679)) -* provider/aws: Allow lightsail resources to work in other regions ([#14685](https://github.com/hashicorp/terraform/issues/14685)) -* provider/aws: Configurable timeouts for EC2 instance + spot instance ([#14711](https://github.com/hashicorp/terraform/issues/14711)) -* provider/aws: Add ability to define timeouts for DMS replication instance ([#14729](https://github.com/hashicorp/terraform/issues/14729)) -* provider/aws: Add support for X-Ray tracing to aws_lambda_function ([#14728](https://github.com/hashicorp/terraform/issues/14728)) -* provider/azurerm: Virtual Machine Scale Sets with managed disk support ([#13717](https://github.com/hashicorp/terraform/issues/13717)) -* provider/azurerm: Virtual Machine Scale Sets with single placement option support ([#14510](https://github.com/hashicorp/terraform/issues/14510)) -* provider/azurerm: Adding support for VMSS Data Disks using Managed Disk feature ([#14608](https://github.com/hashicorp/terraform/issues/14608)) -* provider/azurerm: Adding support for 4TB disks ([#14688](https://github.com/hashicorp/terraform/issues/14688)) -* provider/cloudstack: Load the provider configuration from a CloudMonkey config file ([#13926](https://github.com/hashicorp/terraform/issues/13926)) -* provider/datadog: Add last aggregator to datadog_timeboard resource ([#14391](https://github.com/hashicorp/terraform/issues/14391)) -* provider/datadog: Added new evaluation_delay parameter ([#14433](https://github.com/hashicorp/terraform/issues/14433)) -* provider/docker: Allow Windows Docker containers to map volumes ([#13584](https://github.com/hashicorp/terraform/issues/13584)) -* provider/docker: Add `network_alias` to `docker_container` resource ([#14710](https://github.com/hashicorp/terraform/issues/14710)) -* provider/fastly: Mark the `s3_access_key`, `s3_secret_key`, & `secret_key` fields as sensitive ([#14634](https://github.com/hashicorp/terraform/issues/14634)) -* provider/gitlab: Add namespcace ID attribute to `gitlab_project` ([#14483](https://github.com/hashicorp/terraform/issues/14483)) -* provider/google: Add a `url` attribute to `google_storage_bucket` ([#14393](https://github.com/hashicorp/terraform/issues/14393)) -* provider/google: Make google resource storage bucket importable ([#14455](https://github.com/hashicorp/terraform/issues/14455)) -* provider/google: Add support for privateIpGoogleAccess on subnetworks ([#14234](https://github.com/hashicorp/terraform/issues/14234)) -* provider/google: Add import support to `google_sql_user` ([#14457](https://github.com/hashicorp/terraform/issues/14457)) -* provider/google: add failover parameter to `google_sql_database_instance` ([#14336](https://github.com/hashicorp/terraform/issues/14336)) -* provider/google: resource_compute_disks can now reference snapshots using the snapshot URL ([#14774](https://github.com/hashicorp/terraform/issues/14774)) -* provider/heroku: Add import support for `heroku_pipeline` resource ([#14486](https://github.com/hashicorp/terraform/issues/14486)) -* provider/heroku: Add import support for `heroku_pipeline_coupling` resource ([#14495](https://github.com/hashicorp/terraform/issues/14495)) -* provider/heroku: Add import support for `heroku_addon` resource ([#14508](https://github.com/hashicorp/terraform/issues/14508)) -* provider/openstack: Add support for all protocols in Security Group Rules ([#14307](https://github.com/hashicorp/terraform/issues/14307)) -* provider/openstack: Add support for updating Subnet Allocation Pools ([#14782](https://github.com/hashicorp/terraform/issues/14782)) -* provider/openstack: Enable Security Group Updates ([#14815](https://github.com/hashicorp/terraform/issues/14815)) -* provider/rancher: Add member support to `rancher_environment` ([#14563](https://github.com/hashicorp/terraform/issues/14563)) -* provider/rundeck: adds `description` to `command` schema in `rundeck_job` resource ([#14352](https://github.com/hashicorp/terraform/issues/14352)) -* provider/scaleway: allow public_ip to be set on server resource ([#14515](https://github.com/hashicorp/terraform/issues/14515)) -* provider/vsphere: Exposing moid value from vm resource ([#14793](https://github.com/hashicorp/terraform/issues/14793)) - -BUG FIXES: - -* core: Store and verify checksums for S3 remote state to prevent fetching a stale state ([#14746](https://github.com/hashicorp/terraform/issues/14746)) -* core: Allow -force-unlock of an S3 named state ([#14680](https://github.com/hashicorp/terraform/issues/14680)) -* core: Fix incorrect errors when validatin nested objects ([#14784](https://github.com/hashicorp/terraform/issues/14784)] [[#14801](https://github.com/hashicorp/terraform/issues/14801)) -* core: When using `-target`, any outputs that include attributes of the targeted resources are now updated ([#14186](https://github.com/hashicorp/terraform/issues/14186)) -* core: Fixed 0.9.5 regression with the conditional operator `.. ? .. : ..` failing to type check with unknown/computed values ([#14454](https://github.com/hashicorp/terraform/issues/14454)) -* core: Fixed 0.9 regression causing issues during refresh when adding new data resource instances using `count` ([#14098](https://github.com/hashicorp/terraform/issues/14098)) -* core: Fixed crasher when populating a "splat variable" from an empty (nil) module state. ([#14526](https://github.com/hashicorp/terraform/issues/14526)) -* core: fix bad Sprintf in backend migration message ([#14601](https://github.com/hashicorp/terraform/issues/14601)) -* core: Addressed 0.9.5 issue with passing partially-unknown splat results through module variables, by removing the requirement to pass a redundant list level. ([#14737](https://github.com/hashicorp/terraform/issues/14737)) -* provider/aws: Allow updating constraints in WAF SizeConstraintSet + no constraints ([#14661](https://github.com/hashicorp/terraform/issues/14661)) -* provider/aws: Allow updating tuples in WAF ByteMatchSet + no tuples ([#14071](https://github.com/hashicorp/terraform/issues/14071)) -* provider/aws: Allow updating tuples in WAF SQLInjectionMatchSet + no tuples ([#14667](https://github.com/hashicorp/terraform/issues/14667)) -* provider/aws: Allow updating tuples in WAF XssMatchSet + no tuples ([#14671](https://github.com/hashicorp/terraform/issues/14671)) -* provider/aws: Increase EIP update timeout ([#14381](https://github.com/hashicorp/terraform/issues/14381)) -* provider/aws: Increase timeout for creating security group ([#14380](https://github.com/hashicorp/terraform/issues/14380)] [[#14724](https://github.com/hashicorp/terraform/issues/14724)) -* provider/aws: Increase timeout for (dis)associating IPv6 addr to/from subnet ([#14401](https://github.com/hashicorp/terraform/issues/14401)) -* provider/aws: Increase timeout for retrying creation of IAM server cert ([#14609](https://github.com/hashicorp/terraform/issues/14609)) -* provider/aws: Increase timeout for deleting IGW ([#14705](https://github.com/hashicorp/terraform/issues/14705)) -* provider/aws: Increase timeout for retrying creation of CW log subs ([#14722](https://github.com/hashicorp/terraform/issues/14722)) -* provider/aws: Using the new time schema helper for RDS Instance lifecycle mgmt ([#14369](https://github.com/hashicorp/terraform/issues/14369)) -* provider/aws: Using the timeout schema helper to make alb timeout cofigurable ([#14375](https://github.com/hashicorp/terraform/issues/14375)) -* provider/aws: Refresh from state when CodePipeline Not Found ([#14431](https://github.com/hashicorp/terraform/issues/14431)) -* provider/aws: Override spot_instance_requests volume_tags schema ([#14481](https://github.com/hashicorp/terraform/issues/14481)) -* provider/aws: Allow Internet Gateway IPv6 routes ([#14484](https://github.com/hashicorp/terraform/issues/14484)) -* provider/aws: ForceNew aws_launch_config when root_block_device changes ([#14507](https://github.com/hashicorp/terraform/issues/14507)) -* provider/aws: Pass IAM Roles to codepipeline actions ([#14263](https://github.com/hashicorp/terraform/issues/14263)) -* provider/aws: Create rule(s) for prefix-list-only AWS security group permissions on 'terraform import' ([#14528](https://github.com/hashicorp/terraform/issues/14528)) -* provider/aws: Set aws_subnet ipv6_cidr_block to computed ([#14542](https://github.com/hashicorp/terraform/issues/14542)) -* provider/aws: Change of aws_subnet ipv6 causing update failure ([#14545](https://github.com/hashicorp/terraform/issues/14545)) -* provider/aws: Nothing to update in cloudformation should not result in errors ([#14463](https://github.com/hashicorp/terraform/issues/14463)) -* provider/aws: Handling data migration in RDS snapshot restoring ([#14622](https://github.com/hashicorp/terraform/issues/14622)) -* provider/aws: Mark cookies in `default_cache_behaviour` of cloudfront_distribution as required ([#12628](https://github.com/hashicorp/terraform/issues/12628)) -* provider/aws: Fall back to old tagging mechanism for AWS gov and aws China ([#14627](https://github.com/hashicorp/terraform/issues/14627)) -* provider/aws: Change AWS ssm_maintenance_window Read func ([#14665](https://github.com/hashicorp/terraform/issues/14665)) -* provider/aws: Increase timeout for creation of route_table ([#14701](https://github.com/hashicorp/terraform/issues/14701)) -* provider/aws: Retry ElastiCache cluster deletion when it's snapshotting ([#14700](https://github.com/hashicorp/terraform/issues/14700)) -* provider/aws: Retry ECS service update on InvalidParameterException ([#14708](https://github.com/hashicorp/terraform/issues/14708)) -* provider/aws: Retry IAM Role deletion on DeleteConflict ([#14707](https://github.com/hashicorp/terraform/issues/14707)) -* provider/aws: Do not dereference source_Dest_check in aws_instance ([#14723](https://github.com/hashicorp/terraform/issues/14723)) -* provider/aws: Add validation function for IAM Policies ([#14669](https://github.com/hashicorp/terraform/issues/14669)) -* provider/aws: Fix panic on instance shutdown ([#14727](https://github.com/hashicorp/terraform/issues/14727)) -* provider/aws: Handle migration when restoring db cluster from snapshot ([#14766](https://github.com/hashicorp/terraform/issues/14766)) -* provider/aws: Provider ability to enable snapshotting on ElastiCache RG ([#14757](https://github.com/hashicorp/terraform/issues/14757)) -* provider/cloudstack: `cloudstack_firewall` panicked when used with older (< v4.6) CloudStack versions ([#14044](https://github.com/hashicorp/terraform/issues/14044)) -* provider/datadog: Allowed method on aggregator is `avg` ! `average` ([#14414](https://github.com/hashicorp/terraform/issues/14414)) -* provider/digitalocean: Fix parsing of digitalocean dns records ([#14215](https://github.com/hashicorp/terraform/issues/14215)) -* provider/github: Log HTTP requests and responses in DEBUG mode ([#14363](https://github.com/hashicorp/terraform/issues/14363)) -* provider/github Check for potentially nil response from GitHub API client ([#14683](https://github.com/hashicorp/terraform/issues/14683)) -* provider/google: Fix health check http/https defaults ([#14441](https://github.com/hashicorp/terraform/issues/14441)) -* provider/google: Fix issue with GCP Cloud SQL Instance `disk_autoresize` ([#14582](https://github.com/hashicorp/terraform/issues/14582)) -* provider/google: Fix crash creating Google Cloud SQL 2nd Generation replication instance ([#14373](https://github.com/hashicorp/terraform/issues/14373)) -* provider/google: Disks now detach before getting deleted ([#14651](https://github.com/hashicorp/terraform/issues/14651)) -* provider/google: Update `google_compute_target_pool`'s session_affinity default ([#14807](https://github.com/hashicorp/terraform/issues/14807)) -* provider/heroku: Fix issue with setting correct CName in heroku_domain ([#14443](https://github.com/hashicorp/terraform/issues/14443)) -* provider/opc: Correctly export `ip_address` in IP Addr Reservation ([#14543](https://github.com/hashicorp/terraform/issues/14543)) -* provider/openstack: Handle Deleted Resources in Floating IP Association ([#14533](https://github.com/hashicorp/terraform/issues/14533)) -* provider/openstack: Catch error during instance network parsing ([#14704](https://github.com/hashicorp/terraform/issues/14704)) -* provider/vault: Prevent panic when no secret found ([#14435](https://github.com/hashicorp/terraform/issues/14435)) - -## 0.9.5 (May 11, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - -* provider/aws: Users of aws_cloudfront_distributions with custom_origins have been broken due to changes in the AWS API requiring `OriginReadTimeout` being set for updates. This has been fixed and will show as a change in terraform plan / apply. ([#13367](https://github.com/hashicorp/terraform/issues/13367)) -* provider/aws: Users of China and Gov clouds, cannot use the new tagging of volumes created as part of aws_instances ([#14055](https://github.com/hashicorp/terraform/issues/14055)) -* provider/aws: Skip tag operations on cloudwatch logs in govcloud partition. Currently not supported by Amazon. ([#12414](https://github.com/hashicorp/terraform/issues/12414)) -* provider/aws: More consistent (un)quoting of long TXT/SPF `aws_route53_record`s. - Previously we were trimming first 2 quotes and now we're (correctly) trimming first and last one. - Depending on the use of quotes in your TXT/SPF records this may result in extra diff in plan/apply ([#14170](https://github.com/hashicorp/terraform/issues/14170)) - -FEATURES: - -* **New Provider:** `gitlab` ([#13898](https://github.com/hashicorp/terraform/issues/13898)) -* **New Resource:** `aws_emr_security_configuration` ([#14080](https://github.com/hashicorp/terraform/issues/14080)) -* **New Resource:** `aws_ssm_maintenance_window` ([#14087](https://github.com/hashicorp/terraform/issues/14087)) -* **New Resource:** `aws_ssm_maintenance_window_target` ([#14087](https://github.com/hashicorp/terraform/issues/14087)) -* **New Resource:** `aws_ssm_maintenance_window_task` ([#14087](https://github.com/hashicorp/terraform/issues/14087)) -* **New Resource:** `azurerm_sql_elasticpool` ([#14099](https://github.com/hashicorp/terraform/issues/14099)) -* **New Resource:** `google_bigquery_table` ([#13743](https://github.com/hashicorp/terraform/issues/13743)) -* **New Resource:** `google_compute_backend_bucket` ([#14015](https://github.com/hashicorp/terraform/issues/14015)) -* **New Resource:** `google_compute_snapshot` ([#12482](https://github.com/hashicorp/terraform/issues/12482)) -* **New Resource:** `heroku_app_feature` ([#14035](https://github.com/hashicorp/terraform/issues/14035)) -* **New Resource:** `heroku_pipeline` ([#14078](https://github.com/hashicorp/terraform/issues/14078)) -* **New Resource:** `heroku_pipeline_coupling` ([#14078](https://github.com/hashicorp/terraform/issues/14078)) -* **New Resource:** `kubernetes_limit_range` ([#14285](https://github.com/hashicorp/terraform/issues/14285)) -* **New Resource:** `kubernetes_resource_quota` ([#13914](https://github.com/hashicorp/terraform/issues/13914)) -* **New Resource:** `vault_auth_backend` ([#10988](https://github.com/hashicorp/terraform/issues/10988)) -* **New Data Source:** `aws_efs_file_system` ([#14041](https://github.com/hashicorp/terraform/issues/14041)) -* **New Data Source:** `http`, for retrieving text data from generic HTTP servers ([#14270](https://github.com/hashicorp/terraform/issues/14270)) -* **New Data Source:** `google_container_engine_versions`, for retrieving valid versions for clusters ([#14280](https://github.com/hashicorp/terraform/issues/14280)) -* **New Interpolation Function:** `log`, for computing logarithms ([#12872](https://github.com/hashicorp/terraform/issues/12872)) - -IMPROVEMENTS: - -* core: `sha512` and `base64sha512` interpolation functions, similar to their `sha256` equivalents. ([#14100](https://github.com/hashicorp/terraform/issues/14100)) -* core: It's now possible to use the index operator `[ ]` to select a known value out of a partially-known list, such as using "splat syntax" and increasing the `count`. ([#14135](https://github.com/hashicorp/terraform/issues/14135)) -* provider/aws: Add support for CustomOrigin timeouts to aws_cloudfront_distribution ([#13367](https://github.com/hashicorp/terraform/issues/13367)) -* provider/aws: Add support for IAMDatabaseAuthenticationEnabled ([#14092](https://github.com/hashicorp/terraform/issues/14092)) -* provider/aws: aws_dynamodb_table Add support for TimeToLive ([#14104](https://github.com/hashicorp/terraform/issues/14104)) -* provider/aws: Add `security_configuration` support to `aws_emr_cluster` ([#14133](https://github.com/hashicorp/terraform/issues/14133)) -* provider/aws: Add support for the tenancy placement option in `aws_spot_fleet_request` ([#14163](https://github.com/hashicorp/terraform/issues/14163)) -* provider/aws: `aws_db_option_group` normalizes name to lowercase ([#14192](https://github.com/hashicorp/terraform/issues/14192), [#14366](https://github.com/hashicorp/terraform/issues/14366)) -* provider/aws: Add support description to aws_iam_role ([#14208](https://github.com/hashicorp/terraform/issues/14208)) -* provider/aws: Add support for SSM Documents to aws_cloudwatch_event_target ([#14067](https://github.com/hashicorp/terraform/issues/14067)) -* provider/aws: add additional custom service endpoint options for CloudFormation, KMS, RDS, SNS & SQS ([#14097](https://github.com/hashicorp/terraform/issues/14097)) -* provider/aws: Add ARN to security group data source ([#14245](https://github.com/hashicorp/terraform/issues/14245)) -* provider/aws: Improve the wording of DynamoDB Validation error message ([#14256](https://github.com/hashicorp/terraform/issues/14256)) -* provider/aws: Add support for importing Kinesis Streams ([#14278](https://github.com/hashicorp/terraform/issues/14278)) -* provider/aws: Add `arn` attribute to `aws_ses_domain_identity` resource ([#14306](https://github.com/hashicorp/terraform/issues/14306)) -* provider/aws: Add support for targets to aws_ssm_association ([#14246](https://github.com/hashicorp/terraform/issues/14246)) -* provider/aws: native redis clustering support for elasticache ([#14317](https://github.com/hashicorp/terraform/issues/14317)) -* provider/aws: Support updating `aws_waf_rule` predicates ([#14089](https://github.com/hashicorp/terraform/issues/14089)) -* provider/azurerm: `azurerm_template_deployment` now supports String/Int/Boolean outputs ([#13670](https://github.com/hashicorp/terraform/issues/13670)) -* provider/azurerm: Expose the Private IP Address for a Load Balancer, if available ([#13965](https://github.com/hashicorp/terraform/issues/13965)) -* provider/dns: Fix data dns txt record set ([#14271](https://github.com/hashicorp/terraform/issues/14271)) -* provider/dnsimple: Add support for import for dnsimple_records ([#9130](https://github.com/hashicorp/terraform/issues/9130)) -* provider/dyn: Add verbose Dyn provider logs ([#14076](https://github.com/hashicorp/terraform/issues/14076)) -* provider/google: Add support for networkIP in compute instance templates ([#13515](https://github.com/hashicorp/terraform/issues/13515)) -* provider/google: google_dns_managed_zone is now importable ([#13824](https://github.com/hashicorp/terraform/issues/13824)) -* provider/google: Add support for `compute_route` ([#14065](https://github.com/hashicorp/terraform/issues/14065)) -* provider/google: Add `path` to `google_pubsub_subscription` ([#14238](https://github.com/hashicorp/terraform/issues/14238)) -* provider/google: Improve Service Account by offering to recreate if missing ([#14282](https://github.com/hashicorp/terraform/issues/14282)) -* provider/google: Log HTTP requests and responses in DEBUG mode ([#14281](https://github.com/hashicorp/terraform/issues/14281)) -* provider/google: Add additional properties for google resource storage bucket object ([#14259](https://github.com/hashicorp/terraform/issues/14259)) -* provider/google: Handle all 404 checks in read functions via the new function ([#14335](https://github.com/hashicorp/terraform/issues/14335)) -* provider/heroku: import heroku_app resource ([#14248](https://github.com/hashicorp/terraform/issues/14248)) -* provider/nomad: Add TLS options ([#13956](https://github.com/hashicorp/terraform/issues/13956)) -* provider/triton: Add support for reading provider configuration from `TRITON_*` environment variables in addition to `SDC_*`([#14000](https://github.com/hashicorp/terraform/issues/14000)) -* provider/triton: Add `cloud_config` argument to `triton_machine` resources for Linux containers ([#12840](https://github.com/hashicorp/terraform/issues/12840)) -* provider/triton: Add `insecure_skip_tls_verify` ([#14077](https://github.com/hashicorp/terraform/issues/14077)) - -BUG FIXES: - -* core: `module` blocks without names are now caught in validation, along with various other block types ([#14162](https://github.com/hashicorp/terraform/issues/14162)) -* core: no longer will errors and normal log output get garbled together on Windows ([#14194](https://github.com/hashicorp/terraform/issues/14194)) -* core: Avoid crash on empty TypeSet blocks ([#14305](https://github.com/hashicorp/terraform/issues/14305)) -* provider/aws: Update aws_ebs_volume when attached ([#14005](https://github.com/hashicorp/terraform/issues/14005)) -* provider/aws: Set aws_instance volume_tags to be Computed ([#14007](https://github.com/hashicorp/terraform/issues/14007)) -* provider/aws: Fix issue getting partition for federated users ([#13992](https://github.com/hashicorp/terraform/issues/13992)) -* provider/aws: aws_spot_instance_request not forcenew on volume_tags ([#14046](https://github.com/hashicorp/terraform/issues/14046)) -* provider/aws: Exclude aws_instance volume tagging for China and Gov Clouds ([#14055](https://github.com/hashicorp/terraform/issues/14055)) -* provider/aws: Fix source_dest_check with network_interface ([#14079](https://github.com/hashicorp/terraform/issues/14079)) -* provider/aws: Fixes the bug where SNS delivery policy get always recreated ([#14064](https://github.com/hashicorp/terraform/issues/14064)) -* provider/aws: Increase timeouts for Route Table retries ([#14345](https://github.com/hashicorp/terraform/issues/14345)) -* provider/aws: Prevent Crash when importing aws_route53_record ([#14218](https://github.com/hashicorp/terraform/issues/14218)) -* provider/aws: More consistent (un)quoting of long TXT/SPF `aws_route53_record`s ([#14170](https://github.com/hashicorp/terraform/issues/14170)) -* provider/aws: Retry deletion of AWSConfig Rule on ResourceInUseException ([#14269](https://github.com/hashicorp/terraform/issues/14269)) -* provider/aws: Refresh ssm document from state on 404 ([#14279](https://github.com/hashicorp/terraform/issues/14279)) -* provider/aws: Allow zero-value ELB and ALB names ([#14304](https://github.com/hashicorp/terraform/issues/14304)) -* provider/aws: Update the ignoring of AWS specific tags ([#14321](https://github.com/hashicorp/terraform/issues/14321)) -* provider/aws: Adding IPv6 address to instance causes perpetual diff ([#14355](https://github.com/hashicorp/terraform/issues/14355)) -* provider/aws: Fix SG update on instance with multiple network interfaces ([#14299](https://github.com/hashicorp/terraform/issues/14299)) -* provider/azurerm: Fixing a bug in `azurerm_network_interface` ([#14365](https://github.com/hashicorp/terraform/issues/14365)) -* provider/digitalocean: Prevent diffs when using IDs of images instead of slugs ([#13879](https://github.com/hashicorp/terraform/issues/13879)) -* provider/fastly: Changes setting conditionals to optional ([#14103](https://github.com/hashicorp/terraform/issues/14103)) -* provider/google: Ignore certain project services that can't be enabled directly via the api ([#13730](https://github.com/hashicorp/terraform/issues/13730)) -* provider/google: Ability to add more than 25 project services ([#13758](https://github.com/hashicorp/terraform/issues/13758)) -* provider/google: Fix compute instance panic with bad disk config ([#14169](https://github.com/hashicorp/terraform/issues/14169)) -* provider/google: Handle `google_storage_bucket_object` not being found ([#14203](https://github.com/hashicorp/terraform/issues/14203)) -* provider/google: Handle `google_compute_instance_group_manager` not being found ([#14190](https://github.com/hashicorp/terraform/issues/14190)) -* provider/google: better visibility for compute_region_backend_service ([#14301](https://github.com/hashicorp/terraform/issues/14301)) -* provider/heroku: Configure buildpacks correctly for both Org Apps and non-org Apps ([#13990](https://github.com/hashicorp/terraform/issues/13990)) -* provider/heroku: Fix `heroku_cert` update of ssl cert ([#14240](https://github.com/hashicorp/terraform/issues/14240)) -* provider/openstack: Handle disassociating deleted FloatingIP's from a server ([#14210](https://github.com/hashicorp/terraform/issues/14210)) -* provider/postgres grant role when creating database ([#11452](https://github.com/hashicorp/terraform/issues/11452)) -* provider/triton: Make triton machine deletes synchronous. ([#14368](https://github.com/hashicorp/terraform/issues/14368)) -* provisioner/remote-exec: Fix panic from remote_exec provisioner ([#14134](https://github.com/hashicorp/terraform/issues/14134)) - -## 0.9.4 (26th April 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/template: Fix invalid MIME formatting in `template_cloudinit_config`. - While the change itself is not breaking the data source it may be referenced - e.g. in `aws_launch_configuration` and similar resources which are immutable - and the formatting change will therefore trigger recreation ([#13752](https://github.com/hashicorp/terraform/issues/13752)) - -FEATURES: - -* **New Provider:** `opc` - Oracle Public Cloud ([#13468](https://github.com/hashicorp/terraform/issues/13468)) -* **New Provider:** `oneandone` ([#13633](https://github.com/hashicorp/terraform/issues/13633)) -* **New Data Source:** `aws_ami_ids` ([#13844](https://github.com/hashicorp/terraform/issues/13844)] [[#13866](https://github.com/hashicorp/terraform/issues/13866)) -* **New Data Source:** `aws_ebs_snapshot_ids` ([#13844](https://github.com/hashicorp/terraform/issues/13844)] [[#13866](https://github.com/hashicorp/terraform/issues/13866)) -* **New Data Source:** `aws_kms_alias` ([#13669](https://github.com/hashicorp/terraform/issues/13669)) -* **New Data Source:** `aws_kinesis_stream` ([#13562](https://github.com/hashicorp/terraform/issues/13562)) -* **New Data Source:** `digitalocean_image` ([#13787](https://github.com/hashicorp/terraform/issues/13787)) -* **New Data Source:** `google_compute_network` ([#12442](https://github.com/hashicorp/terraform/issues/12442)) -* **New Data Source:** `google_compute_subnetwork` ([#12442](https://github.com/hashicorp/terraform/issues/12442)) -* **New Resource:** `local_file` for creating local files (please see the docs for caveats) ([#12757](https://github.com/hashicorp/terraform/issues/12757)) -* **New Resource:** `alicloud_ess_scalinggroup` ([#13731](https://github.com/hashicorp/terraform/issues/13731)) -* **New Resource:** `alicloud_ess_scalingconfiguration` ([#13731](https://github.com/hashicorp/terraform/issues/13731)) -* **New Resource:** `alicloud_ess_scalingrule` ([#13731](https://github.com/hashicorp/terraform/issues/13731)) -* **New Resource:** `alicloud_ess_schedule` ([#13731](https://github.com/hashicorp/terraform/issues/13731)) -* **New Resource:** `alicloud_snat_entry` ([#13731](https://github.com/hashicorp/terraform/issues/13731)) -* **New Resource:** `alicloud_forward_entry` ([#13731](https://github.com/hashicorp/terraform/issues/13731)) -* **New Resource:** `aws_cognito_identity_pool` ([#13783](https://github.com/hashicorp/terraform/issues/13783)) -* **New Resource:**  `aws_network_interface_attachment` ([#13861](https://github.com/hashicorp/terraform/issues/13861)) -* **New Resource:** `github_branch_protection` ([#10476](https://github.com/hashicorp/terraform/issues/10476)) -* **New Resource:** `google_bigquery_dataset` ([#13436](https://github.com/hashicorp/terraform/issues/13436)) -* **New Resource:** `heroku_space` ([#13921](https://github.com/hashicorp/terraform/issues/13921)) -* **New Resource:** `template_dir` for producing a directory from templates ([#13652](https://github.com/hashicorp/terraform/issues/13652)) -* **New Interpolation Function:** `coalescelist()` ([#12537](https://github.com/hashicorp/terraform/issues/12537)) - - -IMPROVEMENTS: - - * core: Add a `-reconfigure` flag to the `init` command, to configure a backend while ignoring any saved configuration ([#13825](https://github.com/hashicorp/terraform/issues/13825)) - * helper/schema: Disallow validation+diff suppression on computed fields ([#13878](https://github.com/hashicorp/terraform/issues/13878)) - * config: The interpolation function `cidrhost` now accepts a negative host number to count backwards from the end of the range ([#13765](https://github.com/hashicorp/terraform/issues/13765)) - * config: New interpolation function `matchkeys` for using values from one list to filter corresponding values from another list using a matching set. ([#13847](https://github.com/hashicorp/terraform/issues/13847)) - * state/remote/swift: Support Openstack request logging ([#13583](https://github.com/hashicorp/terraform/issues/13583)) - * provider/aws: Add an option to skip getting the supported EC2 platforms ([#13672](https://github.com/hashicorp/terraform/issues/13672)) - * provider/aws: Add `name_prefix` support to `aws_cloudwatch_log_group` ([#13273](https://github.com/hashicorp/terraform/issues/13273)) - * provider/aws: Add `bucket_prefix` to `aws_s3_bucket` ([#13274](https://github.com/hashicorp/terraform/issues/13274)) - * provider/aws: Add replica_source_db to the aws_db_instance datasource ([#13842](https://github.com/hashicorp/terraform/issues/13842)) - * provider/aws: Add IPv6 outputs to aws_subnet datasource ([#13841](https://github.com/hashicorp/terraform/issues/13841)) - * provider/aws: Exercise SecondaryPrivateIpAddressCount for network interface ([#10590](https://github.com/hashicorp/terraform/issues/10590)) - * provider/aws: Expose execution ARN + invoke URL for APIG deployment ([#13889](https://github.com/hashicorp/terraform/issues/13889)) - * provider/aws: Expose invoke ARN from Lambda function (for API Gateway) ([#13890](https://github.com/hashicorp/terraform/issues/13890)) - * provider/aws: Add tagging support to the 'aws_lambda_function' resource ([#13873](https://github.com/hashicorp/terraform/issues/13873)) - * provider/aws: Validate WAF metric names ([#13885](https://github.com/hashicorp/terraform/issues/13885)) - * provider/aws: Allow AWS Subnet to change IPv6 CIDR Block without ForceNew ([#13909](https://github.com/hashicorp/terraform/issues/13909)) - * provider/aws: Allow filtering of aws_subnet_ids by tags ([#13937](https://github.com/hashicorp/terraform/issues/13937)) - * provider/aws: Support aws_instance and volume tagging on creation ([#13945](https://github.com/hashicorp/terraform/issues/13945)) - * provider/aws: Add network_interface to aws_instance ([#12933](https://github.com/hashicorp/terraform/issues/12933)) - * provider/azurerm: VM Scale Sets - import support ([#13464](https://github.com/hashicorp/terraform/issues/13464)) - * provider/azurerm: Allow Azure China region support ([#13767](https://github.com/hashicorp/terraform/issues/13767)) - * provider/digitalocean: Export droplet prices ([#13720](https://github.com/hashicorp/terraform/issues/13720)) - * provider/fastly: Add support for GCS logging ([#13553](https://github.com/hashicorp/terraform/issues/13553)) - * provider/google: `google_compute_address` and `google_compute_global_address` are now importable ([#13270](https://github.com/hashicorp/terraform/issues/13270)) - * provider/google: `google_compute_network` is now importable ([#13834](https://github.com/hashicorp/terraform/issues/13834)) - * provider/google: add attached_disk field to google_compute_instance ([#13443](https://github.com/hashicorp/terraform/issues/13443)) - * provider/heroku: Set App buildpacks from config ([#13910](https://github.com/hashicorp/terraform/issues/13910)) - * provider/heroku: Create Heroku app in a private space ([#13862](https://github.com/hashicorp/terraform/issues/13862)) - * provider/vault: `vault_generic_secret` resource can now optionally detect drift if it has appropriate access ([#11776](https://github.com/hashicorp/terraform/issues/11776)) - -BUG FIXES: - - * core: Prevent resource.Retry from adding untracked resources after the timeout: ([#13778](https://github.com/hashicorp/terraform/issues/13778)) - * core: Allow a schema.TypeList to be ForceNew and computed ([#13863](https://github.com/hashicorp/terraform/issues/13863)) - * core: Fix crash when refresh or apply build an invalid graph ([#13665](https://github.com/hashicorp/terraform/issues/13665)) - * core: Add the close provider/provisioner transformers back ([#13102](https://github.com/hashicorp/terraform/issues/13102)) - * core: Fix a crash condition by improving the flatmap.Expand() logic ([#13541](https://github.com/hashicorp/terraform/issues/13541)) - * provider/alicloud: Fix create PrePaid instance ([#13662](https://github.com/hashicorp/terraform/issues/13662)) - * provider/alicloud: Fix allocate public ip error ([#13268](https://github.com/hashicorp/terraform/issues/13268)) - * provider/alicloud: alicloud_security_group_rule: check ptr before use it [[#13731](https://github.com/hashicorp/terraform/issues/13731)) - * provider/alicloud: alicloud_instance: fix ecs internet_max_bandwidth_out cannot set zero bug ([#13731](https://github.com/hashicorp/terraform/issues/13731)) - * provider/aws: Allow force-destroying `aws_route53_zone` which has trailing dot ([#12421](https://github.com/hashicorp/terraform/issues/12421)) - * provider/aws: Allow GovCloud KMS ARNs to pass validation in `kms_key_id` attributes ([#13699](https://github.com/hashicorp/terraform/issues/13699)) - * provider/aws: Changing aws_opsworks_instance should ForceNew ([#13839](https://github.com/hashicorp/terraform/issues/13839)) - * provider/aws: Fix DB Parameter Group Name ([#13279](https://github.com/hashicorp/terraform/issues/13279)) - * provider/aws: Fix issue importing some Security Groups and Rules based on rule structure ([#13630](https://github.com/hashicorp/terraform/issues/13630)) - * provider/aws: Fix issue for cross account IAM role with `aws_lambda_permission` ([#13865](https://github.com/hashicorp/terraform/issues/13865)) - * provider/aws: Fix WAF IPSet descriptors removal on update ([#13766](https://github.com/hashicorp/terraform/issues/13766)) - * provider/aws: Increase default number of retries from 11 to 25 ([#13673](https://github.com/hashicorp/terraform/issues/13673)) - * provider/aws: Remove aws_vpc_dhcp_options if not found ([#13610](https://github.com/hashicorp/terraform/issues/13610)) - * provider/aws: Remove aws_network_acl_rule if not found ([#13608](https://github.com/hashicorp/terraform/issues/13608)) - * provider/aws: Use mutex & retry for WAF change operations ([#13656](https://github.com/hashicorp/terraform/issues/13656)) - * provider/aws: Adding support for ipv6 to aws_subnets needs migration ([#13876](https://github.com/hashicorp/terraform/issues/13876)) - * provider/aws: Fix validation of the `name_prefix` parameter of the `aws_alb` resource ([#13441](https://github.com/hashicorp/terraform/issues/13441)) - * provider/azurerm: azurerm_redis_cache resource missing hostname ([#13650](https://github.com/hashicorp/terraform/issues/13650)) - * provider/azurerm: Locking around Network Security Group / Subnets ([#13637](https://github.com/hashicorp/terraform/issues/13637)) - * provider/azurerm: Locking route table on subnet create/delete ([#13791](https://github.com/hashicorp/terraform/issues/13791)) - * provider/azurerm: VM's - fixes a bug where ssh_keys could contain a null entry ([#13755](https://github.com/hashicorp/terraform/issues/13755)) - * provider/azurerm: VM's - ignoring the case on the `create_option` field during Diff's ([#13933](https://github.com/hashicorp/terraform/issues/13933)) - * provider/azurerm: fixing a bug refreshing the `azurerm_redis_cache` ([#13899](https://github.com/hashicorp/terraform/issues/13899)) - * provider/fastly: Fix issue with using 0 for `default_ttl` ([#13648](https://github.com/hashicorp/terraform/issues/13648)) - * provider/google: Fix panic in GKE provisioning with addons ([#13954](https://github.com/hashicorp/terraform/issues/13954)) - * provider/fastly: Add ability to associate a healthcheck to a backend ([#13539](https://github.com/hashicorp/terraform/issues/13539)) - * provider/google: Stop setting the id when project creation fails ([#13644](https://github.com/hashicorp/terraform/issues/13644)) - * provider/google: Make ports in resource_compute_forwarding_rule ForceNew ([#13833](https://github.com/hashicorp/terraform/issues/13833)) - * provider/google: Validation fixes for forwarding rules ([#13952](https://github.com/hashicorp/terraform/issues/13952)) - * provider/ignition: Internal cache moved to global, instead per provider instance ([#13919](https://github.com/hashicorp/terraform/issues/13919)) - * provider/logentries: Refresh from state when resources not found ([#13810](https://github.com/hashicorp/terraform/issues/13810)) - * provider/newrelic: newrelic_alert_condition - `condition_scope` must be `application` or `instance` ([#12972](https://github.com/hashicorp/terraform/issues/12972)) - * provider/opc: fixed issue with unqualifying nats ([#13826](https://github.com/hashicorp/terraform/issues/13826)) - * provider/opc: Fix instance label if unset ([#13846](https://github.com/hashicorp/terraform/issues/13846)) - * provider/openstack: Fix updating Ports ([#13604](https://github.com/hashicorp/terraform/issues/13604)) - * provider/rabbitmq: Allow users without tags ([#13798](https://github.com/hashicorp/terraform/issues/13798)) - -## 0.9.3 (April 12, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * provider/aws: Fix a critical bug in `aws_emr_cluster` in order to preserve the ordering - of any arguments in `bootstrap_action`. Terraform will now enforce the ordering - from the configuration. As a result, `aws_emr_cluster` resources may need to be - recreated, as there is no API to update them in-place ([#13580](https://github.com/hashicorp/terraform/issues/13580)) - -FEATURES: - - * **New Resource:** `aws_api_gateway_method_settings` ([#13542](https://github.com/hashicorp/terraform/issues/13542)) - * **New Resource:** `aws_api_gateway_stage` ([#13540](https://github.com/hashicorp/terraform/issues/13540)) - * **New Resource:** `aws_iam_openid_connect_provider` ([#13456](https://github.com/hashicorp/terraform/issues/13456)) - * **New Resource:** `aws_lightsail_static_ip` ([#13175](https://github.com/hashicorp/terraform/issues/13175)) - * **New Resource:** `aws_lightsail_static_ip_attachment` ([#13207](https://github.com/hashicorp/terraform/issues/13207)) - * **New Resource:** `aws_ses_domain_identity` ([#13098](https://github.com/hashicorp/terraform/issues/13098)) - * **New Resource:** `azurerm_managed_disk` ([#12455](https://github.com/hashicorp/terraform/issues/12455)) - * **New Resource:** `kubernetes_persistent_volume` ([#13277](https://github.com/hashicorp/terraform/issues/13277)) - * **New Resource:** `kubernetes_persistent_volume_claim` ([#13527](https://github.com/hashicorp/terraform/issues/13527)) - * **New Resource:** `kubernetes_secret` ([#12960](https://github.com/hashicorp/terraform/issues/12960)) - * **New Data Source:** `aws_iam_role` ([#13213](https://github.com/hashicorp/terraform/issues/13213)) - -IMPROVEMENTS: - - * core: add `-lock-timeout` option, which will block and retry locks for the given duration ([#13262](https://github.com/hashicorp/terraform/issues/13262)) - * core: new `chomp` interpolation function which returns the given string with any trailing newline characters removed ([#13419](https://github.com/hashicorp/terraform/issues/13419)) - * backend/remote-state: Add support for assume role extensions to s3 backend ([#13236](https://github.com/hashicorp/terraform/issues/13236)) - * backend/remote-state: Filter extra entries from s3 environment listings ([#13596](https://github.com/hashicorp/terraform/issues/13596)) - * config: New interpolation functions `basename` and `dirname`, for file path manipulation ([#13080](https://github.com/hashicorp/terraform/issues/13080)) - * helper/resource: Allow unknown "pending" states ([#13099](https://github.com/hashicorp/terraform/issues/13099)) - * command/hook_ui: Increase max length of state IDs from 20 to 80 ([#13317](https://github.com/hashicorp/terraform/issues/13317)) - * provider/aws: Add support to set iam_role_arn on cloudformation Stack ([#12547](https://github.com/hashicorp/terraform/issues/12547)) - * provider/aws: Support priority and listener_arn update of alb_listener_rule ([#13125](https://github.com/hashicorp/terraform/issues/13125)) - * provider/aws: Deprecate roles in favour of role in iam_instance_profile ([#13130](https://github.com/hashicorp/terraform/issues/13130)) - * provider/aws: Make alb_target_group_attachment port optional ([#13139](https://github.com/hashicorp/terraform/issues/13139)) - * provider/aws: `aws_api_gateway_domain_name` `certificate_private_key` field marked as sensitive ([#13147](https://github.com/hashicorp/terraform/issues/13147)) - * provider/aws: `aws_directory_service_directory` `password` field marked as sensitive ([#13147](https://github.com/hashicorp/terraform/issues/13147)) - * provider/aws: `aws_kinesis_firehose_delivery_stream` `password` field marked as sensitive ([#13147](https://github.com/hashicorp/terraform/issues/13147)) - * provider/aws: `aws_opsworks_application` `app_source.0.password` & `ssl_configuration.0.private_key` fields marked as sensitive ([#13147](https://github.com/hashicorp/terraform/issues/13147)) - * provider/aws: `aws_opsworks_stack` `custom_cookbooks_source.0.password` field marked as sensitive ([#13147](https://github.com/hashicorp/terraform/issues/13147)) - * provider/aws: Support the ability to enable / disable ipv6 support in VPC ([#12527](https://github.com/hashicorp/terraform/issues/12527)) - * provider/aws: Added API Gateway integration update ([#13249](https://github.com/hashicorp/terraform/issues/13249)) - * provider/aws: Add `identifier` | `name_prefix` to RDS resources ([#13232](https://github.com/hashicorp/terraform/issues/13232)) - * provider/aws: Validate `aws_ecs_task_definition.container_definitions` ([#12161](https://github.com/hashicorp/terraform/issues/12161)) - * provider/aws: Update caller_identity data source ([#13092](https://github.com/hashicorp/terraform/issues/13092)) - * provider/aws: `aws_subnet_ids` data source for getting a list of subnet ids matching certain criteria ([#13188](https://github.com/hashicorp/terraform/issues/13188)) - * provider/aws: Support ip_address_type for aws_alb ([#13227](https://github.com/hashicorp/terraform/issues/13227)) - * provider/aws: Migrate `aws_dms_*` resources away from AWS waiters ([#13291](https://github.com/hashicorp/terraform/issues/13291)) - * provider/aws: Add support for treat_missing_data to cloudwatch_metric_alarm ([#13358](https://github.com/hashicorp/terraform/issues/13358)) - * provider/aws: Add support for evaluate_low_sample_count_percentiles to cloudwatch_metric_alarm ([#13371](https://github.com/hashicorp/terraform/issues/13371)) - * provider/aws: Add `name_prefix` to `aws_alb_target_group` ([#13442](https://github.com/hashicorp/terraform/issues/13442)) - * provider/aws: Add support for EMR clusters to aws_appautoscaling_target ([#13368](https://github.com/hashicorp/terraform/issues/13368)) - * provider/aws: Add import capabilities to codecommit_repository ([#13577](https://github.com/hashicorp/terraform/issues/13577)) - * provider/bitbucket: Improved error handling ([#13390](https://github.com/hashicorp/terraform/issues/13390)) - * provider/cloudstack: Do not force a new resource when updating `cloudstack_loadbalancer_rule` members ([#11786](https://github.com/hashicorp/terraform/issues/11786)) - * provider/fastly: Add support for Sumologic logging ([#12541](https://github.com/hashicorp/terraform/issues/12541)) - * provider/github: Handle the case when issue labels already exist ([#13182](https://github.com/hashicorp/terraform/issues/13182)) - * provider/google: Mark `google_container_cluster`'s `client_key` & `password` inside `master_auth` as sensitive ([#13148](https://github.com/hashicorp/terraform/issues/13148)) - * provider/google: Add node_pool field in resource_container_cluster ([#13402](https://github.com/hashicorp/terraform/issues/13402)) - * provider/kubernetes: Allow defining custom config context ([#12958](https://github.com/hashicorp/terraform/issues/12958)) - * provider/openstack: Add support for 'value_specs' options to `openstack_compute_servergroup_v2` ([#13380](https://github.com/hashicorp/terraform/issues/13380)) - * provider/statuscake: Add support for StatusCake TriggerRate field ([#13340](https://github.com/hashicorp/terraform/issues/13340)) - * provider/triton: Move to joyent/triton-go ([#13225](https://github.com/hashicorp/terraform/issues/13225)) - * provisioner/chef: Make sure we add new Chef-Vault clients as clients ([#13525](https://github.com/hashicorp/terraform/issues/13525)) - -BUG FIXES: - - * core: Escaped interpolation-like sequences (like `$${foo}`) now permitted in variable defaults ([#13137](https://github.com/hashicorp/terraform/issues/13137)) - * core: Fix strange issues with computed values in provider configuration that were worked around with `-input=false` ([#11264](https://github.com/hashicorp/terraform/issues/11264)], [[#13264](https://github.com/hashicorp/terraform/issues/13264)) - * core: Fix crash when providing nested maps as variable values in a `module` block ([#13343](https://github.com/hashicorp/terraform/issues/13343)) - * core: `connection` block attributes are now subject to basic validation of attribute names during validate walk ([#13400](https://github.com/hashicorp/terraform/issues/13400)) - * provider/aws: Add Support for maintenance_window and back_window to rds_cluster_instance ([#13134](https://github.com/hashicorp/terraform/issues/13134)) - * provider/aws: Increase timeout for AMI registration ([#13159](https://github.com/hashicorp/terraform/issues/13159)) - * provider/aws: Increase timeouts for ELB ([#13161](https://github.com/hashicorp/terraform/issues/13161)) - * provider/aws: `volume_type` of `aws_elasticsearch_domain.0.ebs_options` marked as `Computed` which prevents spurious diffs ([#13160](https://github.com/hashicorp/terraform/issues/13160)) - * provider/aws: Don't set DBName on `aws_db_instance` from snapshot ([#13140](https://github.com/hashicorp/terraform/issues/13140)) - * provider/aws: Add DiffSuppression to aws_ecs_service placement_strategies ([#13220](https://github.com/hashicorp/terraform/issues/13220)) - * provider/aws: Refresh aws_alb_target_group stickiness on manual updates ([#13199](https://github.com/hashicorp/terraform/issues/13199)) - * provider/aws: Preserve default retain_on_delete in cloudfront import ([#13209](https://github.com/hashicorp/terraform/issues/13209)) - * provider/aws: Refresh aws_alb_target_group tags ([#13200](https://github.com/hashicorp/terraform/issues/13200)) - * provider/aws: Set aws_vpn_connection to recreate when in deleted state ([#13204](https://github.com/hashicorp/terraform/issues/13204)) - * provider/aws: Wait for aws_opsworks_instance to be running when it's specified ([#13218](https://github.com/hashicorp/terraform/issues/13218)) - * provider/aws: Handle `aws_lambda_function` missing s3 key error ([#10960](https://github.com/hashicorp/terraform/issues/10960)) - * provider/aws: Set stickiness to computed in alb_target_group ([#13278](https://github.com/hashicorp/terraform/issues/13278)) - * provider/aws: Increase timeout for deploying `cloudfront_distribution` from 40 to 70 mins ([#13319](https://github.com/hashicorp/terraform/issues/13319)) - * provider/aws: Increase AMI retry timeouts ([#13324](https://github.com/hashicorp/terraform/issues/13324)) - * provider/aws: Increase subnet deletion timeout ([#13356](https://github.com/hashicorp/terraform/issues/13356)) - * provider/aws: Increase launch_configuration creation timeout ([#13357](https://github.com/hashicorp/terraform/issues/13357)) - * provider/aws: Increase Beanstalk env 'ready' timeout ([#13359](https://github.com/hashicorp/terraform/issues/13359)) - * provider/aws: Raise timeout for deleting APIG REST API ([#13414](https://github.com/hashicorp/terraform/issues/13414)) - * provider/aws: Raise timeout for attaching/detaching VPN Gateway ([#13457](https://github.com/hashicorp/terraform/issues/13457)) - * provider/aws: Recreate opsworks_stack on change of service_role_arn ([#13325](https://github.com/hashicorp/terraform/issues/13325)) - * provider/aws: Fix KMS Key reading with Exists method ([#13348](https://github.com/hashicorp/terraform/issues/13348)) - * provider/aws: Fix DynamoDB issues about GSIs indexes ([#13256](https://github.com/hashicorp/terraform/issues/13256)) - * provider/aws: Fix `aws_s3_bucket` drift detection of logging options ([#13281](https://github.com/hashicorp/terraform/issues/13281)) - * provider/aws: Update ElasticTranscoderPreset to have default for MaxFrameRate ([#13422](https://github.com/hashicorp/terraform/issues/13422)) - * provider/aws: Fix aws_ami_launch_permission refresh when AMI disappears ([#13469](https://github.com/hashicorp/terraform/issues/13469)) - * provider/aws: Add support for updating SSM documents ([#13491](https://github.com/hashicorp/terraform/issues/13491)) - * provider/aws: Fix panic on nil route configs ([#13548](https://github.com/hashicorp/terraform/issues/13548)) - * provider/azurerm: Network Security Group - ignoring protocol casing at Import time ([#13153](https://github.com/hashicorp/terraform/issues/13153)) - * provider/azurerm: Fix crash when importing Local Network Gateways ([#13261](https://github.com/hashicorp/terraform/issues/13261)) - * provider/azurerm: Defaulting the value of `duplicate_detection_history_time_window` for `azurerm_servicebus_topic` ([#13223](https://github.com/hashicorp/terraform/issues/13223)) - * provider/azurerm: Event Hubs making the Location field idempotent ([#13570](https://github.com/hashicorp/terraform/issues/13570)) - * provider/bitbucket: Fixed issue where provider would fail with an "EOF" error on some operations ([#13390](https://github.com/hashicorp/terraform/issues/13390)) - * provider/dnsimple: Handle 404 on DNSimple records ([#13131](https://github.com/hashicorp/terraform/issues/13131)) - * provider/kubernetes: Use PATCH to update namespace ([#13114](https://github.com/hashicorp/terraform/issues/13114)) - * provider/ns1: No splitting answer on SPF records. ([#13260](https://github.com/hashicorp/terraform/issues/13260)) - * provider/openstack: Refresh volume_attachment from state if NotFound ([#13342](https://github.com/hashicorp/terraform/issues/13342)) - * provider/openstack: Add SOFT_DELETED to delete status ([#13444](https://github.com/hashicorp/terraform/issues/13444)) - * provider/profitbricks: Changed output type of ips variable of ip_block ProfitBricks resource ([#13290](https://github.com/hashicorp/terraform/issues/13290)) - * provider/template: Fix panic in cloudinit config ([#13581](https://github.com/hashicorp/terraform/issues/13581)) - -## 0.9.2 (March 28, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/openstack: Port Fixed IPs are able to be read again using the original numerical notation. However, Fixed IP configurations which are obtaining addresses via DHCP must now use the `all_fixed_ips` attribute to reference the returned IP address. - * Environment names must be safe to use as a URL path segment without escaping, and is enforced by the CLI. - -FEATURES: - - * **New Resource:** `alicloud_db_instance` ([#12913](https://github.com/hashicorp/terraform/issues/12913)) - * **New Resource:** `aws_api_gateway_usage_plan` ([#12542](https://github.com/hashicorp/terraform/issues/12542)) - * **New Resource:** `aws_api_gateway_usage_plan_key` ([#12851](https://github.com/hashicorp/terraform/issues/12851)) - * **New Resource:** `github_repository_webhook` ([#12924](https://github.com/hashicorp/terraform/issues/12924)) - * **New Resource:** `random_pet` ([#12903](https://github.com/hashicorp/terraform/issues/12903)) - * **New Interpolation:** `substr` ([#12870](https://github.com/hashicorp/terraform/issues/12870)) - * **S3 Environments:** The S3 remote state backend now supports named environments - -IMPROVEMENTS: - - * core: fix interpolation error when referencing computed values from an `aws_instance` `cidr_block` ([#13046](https://github.com/hashicorp/terraform/issues/13046)) - * core: fix `ignore_changes` causing fields to be removed during apply ([#12897](https://github.com/hashicorp/terraform/issues/12897)) - * core: add `-force-copy` option to `terraform init` to supress prompts for copying state ([#12939](https://github.com/hashicorp/terraform/issues/12939)) - * helper/acctest: Add NewSSHKeyPair function ([#12894](https://github.com/hashicorp/terraform/issues/12894)) - * provider/alicloud: simplify validators ([#12982](https://github.com/hashicorp/terraform/issues/12982)) - * provider/aws: Added support for EMR AutoScalingRole ([#12823](https://github.com/hashicorp/terraform/issues/12823)) - * provider/aws: Add `name_prefix` to `aws_autoscaling_group` and `aws_elb` resources ([#12629](https://github.com/hashicorp/terraform/issues/12629)) - * provider/aws: Updated default configuration manager version in `aws_opsworks_stack` ([#12979](https://github.com/hashicorp/terraform/issues/12979)) - * provider/aws: Added aws_api_gateway_api_key value attribute ([#9462](https://github.com/hashicorp/terraform/issues/9462)) - * provider/aws: Allow aws_alb subnets to change ([#12850](https://github.com/hashicorp/terraform/issues/12850)) - * provider/aws: Support Attachment of ALB Target Groups to Autoscaling Groups ([#12855](https://github.com/hashicorp/terraform/issues/12855)) - * provider/aws: Support Import of iam_server_certificate ([#13065](https://github.com/hashicorp/terraform/issues/13065)) - * provider/azurerm: Add support for setting the primary network interface ([#11290](https://github.com/hashicorp/terraform/issues/11290)) - * provider/cloudstack: Add `zone_id` to `cloudstack_ipaddress` resource ([#11306](https://github.com/hashicorp/terraform/issues/11306)) - * provider/consul: Add support for basic auth to the provider ([#12679](https://github.com/hashicorp/terraform/issues/12679)) - * provider/digitalocean: Support disk only resize ([#13059](https://github.com/hashicorp/terraform/issues/13059)) - * provider/dnsimple: Allow dnsimple_record.priority attribute to be set ([#12843](https://github.com/hashicorp/terraform/issues/12843)) - * provider/google: Add support for service_account, metadata, and image_type fields in GKE cluster config ([#12743](https://github.com/hashicorp/terraform/issues/12743)) - * provider/google: Add local ssd count support for container clusters ([#12281](https://github.com/hashicorp/terraform/issues/12281)) - * provider/ignition: ignition_filesystem, explicit option to create the filesystem ([#12980](https://github.com/hashicorp/terraform/issues/12980)) - * provider/kubernetes: Internal K8S annotations are ignored in `config_map` ([#12945](https://github.com/hashicorp/terraform/issues/12945)) - * provider/ns1: Ensure provider checks for credentials ([#12920](https://github.com/hashicorp/terraform/issues/12920)) - * provider/openstack: Adding Timeouts to Blockstorage Resources ([#12862](https://github.com/hashicorp/terraform/issues/12862)) - * provider/openstack: Adding Timeouts to FWaaS v1 Resources ([#12863](https://github.com/hashicorp/terraform/issues/12863)) - * provider/openstack: Adding Timeouts to Image v2 and LBaaS v2 Resources ([#12865](https://github.com/hashicorp/terraform/issues/12865)) - * provider/openstack: Adding Timeouts to Network Resources ([#12866](https://github.com/hashicorp/terraform/issues/12866)) - * provider/openstack: Adding Timeouts to LBaaS v1 Resources ([#12867](https://github.com/hashicorp/terraform/issues/12867)) - * provider/openstack: Deprecating Instance Volume attribute ([#13062](https://github.com/hashicorp/terraform/issues/13062)) - * provider/openstack: Decprecating Instance Floating IP attribute ([#13063](https://github.com/hashicorp/terraform/issues/13063)) - * provider/openstack: Don't log the catalog ([#13075](https://github.com/hashicorp/terraform/issues/13075)) - * provider/openstack: Handle 409/500 Response on Pool Create ([#13074](https://github.com/hashicorp/terraform/issues/13074)) - * provider/pagerduty: Validate credentials ([#12854](https://github.com/hashicorp/terraform/issues/12854)) - * provider/openstack: Adding all_metadata attribute ([#13061](https://github.com/hashicorp/terraform/issues/13061)) - * provider/profitbricks: Handling missing resources ([#13053](https://github.com/hashicorp/terraform/issues/13053)) - -BUG FIXES: - - * core: Remove legacy remote state configuration on state migration. This fixes errors when saving plans. ([#12888](https://github.com/hashicorp/terraform/issues/12888)) - * provider/arukas: Default timeout for launching container increased to 15mins (was 10mins) ([#12849](https://github.com/hashicorp/terraform/issues/12849)) - * provider/aws: Fix flattened cloudfront lambda function associations to be a set not a slice ([#11984](https://github.com/hashicorp/terraform/issues/11984)) - * provider/aws: Consider ACTIVE as pending state during ECS svc deletion ([#12986](https://github.com/hashicorp/terraform/issues/12986)) - * provider/aws: Deprecate the usage of Api Gateway Key Stages in favor of Usage Plans ([#12883](https://github.com/hashicorp/terraform/issues/12883)) - * provider/aws: prevent panic in resourceAwsSsmDocumentRead ([#12891](https://github.com/hashicorp/terraform/issues/12891)) - * provider/aws: Prevent panic when setting AWS CodeBuild Source to state ([#12915](https://github.com/hashicorp/terraform/issues/12915)) - * provider/aws: Only call replace Iam Instance Profile on existing machines ([#12922](https://github.com/hashicorp/terraform/issues/12922)) - * provider/aws: Increase AWS AMI Destroy timeout ([#12943](https://github.com/hashicorp/terraform/issues/12943)) - * provider/aws: Set aws_vpc ipv6 for associated only ([#12899](https://github.com/hashicorp/terraform/issues/12899)) - * provider/aws: Fix AWS ECS placement strategy spread fields ([#12998](https://github.com/hashicorp/terraform/issues/12998)) - * provider/aws: Specify that aws_network_acl_rule requires a cidr block ([#13013](https://github.com/hashicorp/terraform/issues/13013)) - * provider/aws: aws_network_acl_rule treat all and -1 for protocol the same ([#13049](https://github.com/hashicorp/terraform/issues/13049)) - * provider/aws: Only allow 1 value in alb_listener_rule condition ([#13051](https://github.com/hashicorp/terraform/issues/13051)) - * provider/aws: Correct handling of network ACL default IPv6 ingress/egress rules ([#12835](https://github.com/hashicorp/terraform/issues/12835)) - * provider/aws: aws_ses_receipt_rule: fix off-by-one errors ([#12961](https://github.com/hashicorp/terraform/issues/12961)) - * provider/aws: Fix issue upgrading to Terraform v0.9+ with AWS OpsWorks Stacks ([#13024](https://github.com/hashicorp/terraform/issues/13024)) - * provider/fastly: Fix issue importing Fastly Services with Backends ([#12538](https://github.com/hashicorp/terraform/issues/12538)) - * provider/google: turn compute_instance_group.instances into a set ([#12790](https://github.com/hashicorp/terraform/issues/12790)) - * provider/mysql: recreate user/grant if user/grant got deleted manually ([#12791](https://github.com/hashicorp/terraform/issues/12791)) - * provider/openstack: Fix monitor_id typo in LBaaS v1 Pool ([#13069](https://github.com/hashicorp/terraform/issues/13069)) - * provider/openstack: Resolve issues with Port Fixed IPs ([#13056](https://github.com/hashicorp/terraform/issues/13056)) - * provider/rancher: error when no api_url is provided ([#13086](https://github.com/hashicorp/terraform/issues/13086)) - * provider/scaleway: work around parallel request limitation ([#13045](https://github.com/hashicorp/terraform/issues/13045)) - -## 0.9.1 (March 17, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/pagerduty: the deprecated `name_regex` field has been removed from vendor data source ([#12396](https://github.com/hashicorp/terraform/issues/12396)) - -FEATURES: - - * **New Provider:** `kubernetes` ([#12372](https://github.com/hashicorp/terraform/issues/12372)) - * **New Resource:** `kubernetes_namespace` ([#12372](https://github.com/hashicorp/terraform/issues/12372)) - * **New Resource:** `kubernetes_config_map` ([#12753](https://github.com/hashicorp/terraform/issues/12753)) - * **New Data Source:** `dns_a_record_set` ([#12744](https://github.com/hashicorp/terraform/issues/12744)) - * **New Data Source:** `dns_cname_record_set` ([#12744](https://github.com/hashicorp/terraform/issues/12744)) - * **New Data Source:** `dns_txt_record_set` ([#12744](https://github.com/hashicorp/terraform/issues/12744)) - -IMPROVEMENTS: - - * command/init: `-backend-config` accepts `key=value` pairs - * provider/aws: Improved error when failing to get S3 tags ([#12759](https://github.com/hashicorp/terraform/issues/12759)) - * provider/aws: Validate CIDR Blocks in SG and SG rule resources ([#12765](https://github.com/hashicorp/terraform/issues/12765)) - * provider/aws: Add KMS key tag support ([#12243](https://github.com/hashicorp/terraform/issues/12243)) - * provider/aws: Allow `name_prefix` to be used with various IAM resources ([#12658](https://github.com/hashicorp/terraform/issues/12658)) - * provider/openstack: Add timeout support for Compute resources ([#12794](https://github.com/hashicorp/terraform/issues/12794)) - * provider/scaleway: expose public IPv6 information on scaleway_server ([#12748](https://github.com/hashicorp/terraform/issues/12748)) - -BUG FIXES: - - * core: Fix panic when an undefined module is reference ([#12793](https://github.com/hashicorp/terraform/issues/12793)) - * core: Fix regression from 0.8.x when using a data source in a module ([#12837](https://github.com/hashicorp/terraform/issues/12837)) - * command/apply: Applies from plans with backends set will reuse the backend rather than local ([#12785](https://github.com/hashicorp/terraform/issues/12785)) - * command/init: Changing only `-backend-config` detects changes and reconfigures ([#12776](https://github.com/hashicorp/terraform/issues/12776)) - * command/init: Fix legacy backend init error that could occur when upgrading ([#12818](https://github.com/hashicorp/terraform/issues/12818)) - * command/push: Detect local state and error properly ([#12773](https://github.com/hashicorp/terraform/issues/12773)) - * command/refresh: Allow empty and non-existent state ([#12777](https://github.com/hashicorp/terraform/issues/12777)) - * provider/aws: Get the aws_lambda_function attributes when there are great than 50 versions of a function ([#11745](https://github.com/hashicorp/terraform/issues/11745)) - * provider/aws: Correctly check for nil cidr_block in aws_network_acl ([#12735](https://github.com/hashicorp/terraform/issues/12735)) - * provider/aws: Stop setting weight property on route53_record read ([#12756](https://github.com/hashicorp/terraform/issues/12756)) - * provider/google: Fix the Google provider asking for account_file input on every run ([#12729](https://github.com/hashicorp/terraform/issues/12729)) - * provider/profitbricks: Prevent panic on profitbricks volume ([#12819](https://github.com/hashicorp/terraform/issues/12819)) - - -## 0.9.0 (March 15, 2017) - -**This is the complete 0.8.8 to 0.9 CHANGELOG. Below this section we also have a 0.9.0-beta2 to 0.9.0 final CHANGELOG.** - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: `aws_codebuild_project` renamed `timeout` to `build_timeout` ([#12503](https://github.com/hashicorp/terraform/issues/12503)) - * provider/azurem: `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` now store has of custom_data not all custom_data ([#12214](https://github.com/hashicorp/terraform/issues/12214)) - * provider/azurerm: scale_sets `os_profile_master_password` now marked as sensitive - * provider/azurerm: sql_server `administrator_login_password` now marked as sensitive - * provider/dnsimple: Provider has been upgraded to APIv2 therefore, you will need to use the APIv2 auth token - * provider/google: storage buckets have been updated with the new storage classes. The old classes will continue working as before, but should be migrated as soon as possible, as there's no guarantee they'll continue working forever. ([#12044](https://github.com/hashicorp/terraform/issues/12044)) - * provider/google: compute_instance, compute_instance_template, and compute_disk all have a subtly changed logic when specifying an image family as the image; in 0.8.x they would pin to the latest image in the family when the resource is created; in 0.9.x they pass the family to the API and use its behaviour. New input formats are also supported. ([#12223](https://github.com/hashicorp/terraform/issues/12223)) - * provider/google: removed the unused and deprecated region field from google_compute_backend_service ([#12663](https://github.com/hashicorp/terraform/issues/12663)) - * provider/google: removed the deprecated account_file field for the Google Cloud provider ([#12668](https://github.com/hashicorp/terraform/issues/12668)) - * provider/google: removed the deprecated fields from google_project ([#12659](https://github.com/hashicorp/terraform/issues/12659)) - -FEATURES: - - * **Remote Backends:** This is a successor to "remote state" and includes - file-based configuration, an improved setup process (just run `terraform init`), - no more local caching of remote state, and more. ([#11286](https://github.com/hashicorp/terraform/issues/11286)) - * **Destroy Provisioners:** Provisioners can now be configured to run - on resource destruction. ([#11329](https://github.com/hashicorp/terraform/issues/11329)) - * **State Locking:** State will be automatically locked when supported by the backend. - Backends supporting locking in this release are Local, S3 (via DynamoDB), and Consul. ([#11187](https://github.com/hashicorp/terraform/issues/11187)) - * **State Environments:** You can now create named "environments" for states. This allows you to manage distinct infrastructure resources from the same configuration. - * **New Provider:** `Circonus` ([#12578](https://github.com/hashicorp/terraform/issues/12578)) - * **New Data Source:** `openstack_networking_network_v2` ([#12304](https://github.com/hashicorp/terraform/issues/12304)) - * **New Resource:** `aws_iam_account_alias` ([#12648](https://github.com/hashicorp/terraform/issues/12648)) - * **New Resource:** `datadog_downtime` ([#10994](https://github.com/hashicorp/terraform/issues/10994)) - * **New Resource:** `ns1_notifylist` ([#12373](https://github.com/hashicorp/terraform/issues/12373)) - * **New Resource:** `google_container_node_pool` ([#11802](https://github.com/hashicorp/terraform/issues/11802)) - * **New Resource:** `rancher_certificate` ([#12717](https://github.com/hashicorp/terraform/issues/12717)) - * **New Resource:** `rancher_host` ([#11545](https://github.com/hashicorp/terraform/issues/11545)) - * helper/schema: Added Timeouts to allow Provider/Resource developers to expose configurable timeouts for actions ([#12311](https://github.com/hashicorp/terraform/issues/12311)) - -IMPROVEMENTS: - - * core: Data source values can now be used as part of a `count` calculation. ([#11482](https://github.com/hashicorp/terraform/issues/11482)) - * core: "terraformrc" can contain env var references with $FOO ([#11929](https://github.com/hashicorp/terraform/issues/11929)) - * core: report all errors encountered during config validation ([#12383](https://github.com/hashicorp/terraform/issues/12383)) - * command: CLI args can be specified via env vars. Specify `TF_CLI_ARGS` or `TF_CLI_ARGS_name` (where name is the name of a command) to specify additional CLI args ([#11922](https://github.com/hashicorp/terraform/issues/11922)) - * command/init: previous behavior is retained, but init now also configures - the new remote backends as well as downloads modules. It is the single - command to initialize a new or existing Terraform configuration. - * command: Display resource state ID in refresh/plan/destroy output ([#12261](https://github.com/hashicorp/terraform/issues/12261)) - * provider/aws: AWS Lambda DeadLetterConfig support ([#12188](https://github.com/hashicorp/terraform/issues/12188)) - * provider/aws: Return errors from Elastic Beanstalk ([#12425](https://github.com/hashicorp/terraform/issues/12425)) - * provider/aws: Set aws_db_cluster to snapshot by default ([#11668](https://github.com/hashicorp/terraform/issues/11668)) - * provider/aws: Enable final snapshots for aws_rds_cluster by default ([#11694](https://github.com/hashicorp/terraform/issues/11694)) - * provider/aws: Enable snapshotting by default on aws_redshift_cluster ([#11695](https://github.com/hashicorp/terraform/issues/11695)) - * provider/aws: Add support for ACM certificates to `api_gateway_domain_name` ([#12592](https://github.com/hashicorp/terraform/issues/12592)) - * provider/aws: Add support for IPv6 to aws\_security\_group\_rule ([#12645](https://github.com/hashicorp/terraform/issues/12645)) - * provider/aws: Add IPv6 Support to aws\_route\_table ([#12640](https://github.com/hashicorp/terraform/issues/12640)) - * provider/aws: Add support for IPv6 to aws\_network\_acl\_rule ([#12644](https://github.com/hashicorp/terraform/issues/12644)) - * provider/aws: Add support for IPv6 to aws\_default\_route\_table ([#12642](https://github.com/hashicorp/terraform/issues/12642)) - * provider/aws: Add support for IPv6 to aws\_network\_acl ([#12641](https://github.com/hashicorp/terraform/issues/12641)) - * provider/aws: Add support for IPv6 in aws\_route ([#12639](https://github.com/hashicorp/terraform/issues/12639)) - * provider/aws: Add support for IPv6 to aws\_security\_group ([#12655](https://github.com/hashicorp/terraform/issues/12655)) - * provider/aws: Add replace\_unhealthy\_instances to spot\_fleet\_request ([#12681](https://github.com/hashicorp/terraform/issues/12681)) - * provider/aws: Remove restriction on running aws\_opsworks\_* on us-east-1 ([#12688](https://github.com/hashicorp/terraform/issues/12688)) - * provider/aws: Improve error message on S3 Bucket Object deletion ([#12712](https://github.com/hashicorp/terraform/issues/12712)) - * provider/aws: Add log message about if changes are being applied now or later ([#12691](https://github.com/hashicorp/terraform/issues/12691)) - * provider/azurerm: Mark the azurerm_scale_set machine password as sensitive ([#11982](https://github.com/hashicorp/terraform/issues/11982)) - * provider/azurerm: Mark the azurerm_sql_server admin password as sensitive ([#12004](https://github.com/hashicorp/terraform/issues/12004)) - * provider/azurerm: Add support for managed availability sets. ([#12532](https://github.com/hashicorp/terraform/issues/12532)) - * provider/azurerm: Add support for extensions on virtual machine scale sets ([#12124](https://github.com/hashicorp/terraform/issues/12124)) - * provider/dnsimple: Upgrade DNSimple provider to API v2 ([#10760](https://github.com/hashicorp/terraform/issues/10760)) - * provider/docker: added support for linux capabilities ([#12045](https://github.com/hashicorp/terraform/issues/12045)) - * provider/fastly: Add Fastly SSL validation fields ([#12578](https://github.com/hashicorp/terraform/issues/12578)) - * provider/ignition: Migrate all of the igition resources to data sources ([#11851](https://github.com/hashicorp/terraform/issues/11851)) - * provider/openstack: Set Availability Zone in Instances ([#12610](https://github.com/hashicorp/terraform/issues/12610)) - * provider/openstack: Force Deletion of Instances ([#12689](https://github.com/hashicorp/terraform/issues/12689)) - * provider/rancher: Better comparison of compose files ([#12561](https://github.com/hashicorp/terraform/issues/12561)) - * provider/azurerm: store only hash of `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` custom_data - reduces size of state ([#12214](https://github.com/hashicorp/terraform/issues/12214)) - * provider/vault: read vault token from `~/.vault-token` as a fallback for the - `VAULT_TOKEN` environment variable. ([#11529](https://github.com/hashicorp/terraform/issues/11529)) - * provisioners: All provisioners now respond very quickly to interrupts for - fast cancellation. ([#10934](https://github.com/hashicorp/terraform/issues/10934)) - -BUG FIXES: - - * core: targeting will remove untargeted providers ([#12050](https://github.com/hashicorp/terraform/issues/12050)) - * core: doing a map lookup in a resource config with a computed set no longer crashes ([#12210](https://github.com/hashicorp/terraform/issues/12210)) - * provider/aws: Fixes issue for aws_lb_ssl_negotiation_policy of already deleted ELB ([#12360](https://github.com/hashicorp/terraform/issues/12360)) - * provider/aws: Populate the iam_instance_profile uniqueId ([#12449](https://github.com/hashicorp/terraform/issues/12449)) - * provider/aws: Only send iops when creating io1 devices ([#12392](https://github.com/hashicorp/terraform/issues/12392)) - * provider/aws: Fix spurious aws_spot_fleet_request diffs ([#12437](https://github.com/hashicorp/terraform/issues/12437)) - * provider/aws: Changing volumes in ECS task definition should force new revision ([#11403](https://github.com/hashicorp/terraform/issues/11403)) - * provider/aws: Ignore whitespace in json diff for aws_dms_replication_task options ([#12380](https://github.com/hashicorp/terraform/issues/12380)) - * provider/aws: Check spot instance is running before trying to attach volumes ([#12459](https://github.com/hashicorp/terraform/issues/12459)) - * provider/aws: Add the IPV6 cidr block to the vpc datasource ([#12529](https://github.com/hashicorp/terraform/issues/12529)) - * provider/aws: Error on trying to recreate an existing customer gateway ([#12501](https://github.com/hashicorp/terraform/issues/12501)) - * provider/aws: Prevent aws_dms_replication_task panic ([#12539](https://github.com/hashicorp/terraform/issues/12539)) - * provider/aws: output the task definition name when errors occur during refresh ([#12609](https://github.com/hashicorp/terraform/issues/12609)) - * provider/aws: Refresh iam saml provider from state on 404 ([#12602](https://github.com/hashicorp/terraform/issues/12602)) - * provider/aws: Add address, port, hosted_zone_id and endpoint for aws_db_instance datasource ([#12623](https://github.com/hashicorp/terraform/issues/12623)) - * provider/aws: Allow recreation of `aws_opsworks_user_profile` when the `user_arn` is changed ([#12595](https://github.com/hashicorp/terraform/issues/12595)) - * provider/aws: Guard clause to prevent panic on ELB connectionSettings ([#12685](https://github.com/hashicorp/terraform/issues/12685)) - * provider/azurerm: bug fix to prevent crashes during azurerm_container_service provisioning ([#12516](https://github.com/hashicorp/terraform/issues/12516)) - * provider/cobbler: Fix Profile Repos ([#12452](https://github.com/hashicorp/terraform/issues/12452)) - * provider/datadog: Update to datadog_monitor to use default values ([#12497](https://github.com/hashicorp/terraform/issues/12497)) - * provider/datadog: Default notify_no_data on datadog_monitor to false ([#11903](https://github.com/hashicorp/terraform/issues/11903)) - * provider/google: Correct the incorrect instance group manager URL returned from GKE ([#4336](https://github.com/hashicorp/terraform/issues/4336)) - * provider/google: Fix a plan/apply cycle in IAM policies ([#12387](https://github.com/hashicorp/terraform/issues/12387)) - * provider/google: Fix a plan/apply cycle in forwarding rules when only a single port is specified ([#12662](https://github.com/hashicorp/terraform/issues/12662)) - * provider/google: Minor correction : "Deleting disk" message in Delete method ([#12521](https://github.com/hashicorp/terraform/issues/12521)) - * provider/mysql: Avoid crash on un-interpolated provider cfg ([#12391](https://github.com/hashicorp/terraform/issues/12391)) - * provider/ns1: Fix incorrect schema (causing crash) for 'ns1_user.notify' ([#12721](https://github.com/hashicorp/terraform/issues/12721)) - * provider/openstack: Handle cases where volumes are disabled ([#12374](https://github.com/hashicorp/terraform/issues/12374)) - * provider/openstack: Toggle Creation of Default Security Group Rules ([#12119](https://github.com/hashicorp/terraform/issues/12119)) - * provider/openstack: Change Port fixed_ip to a Set ([#12613](https://github.com/hashicorp/terraform/issues/12613)) - * provider/openstack: Add network_id to Network data source ([#12615](https://github.com/hashicorp/terraform/issues/12615)) - * provider/openstack: Check for ErrDefault500 when creating/deleting pool member ([#12664](https://github.com/hashicorp/terraform/issues/12664)) - * provider/rancher: Apply the set value for finish_upgrade to set to prevent recurring plans ([#12545](https://github.com/hashicorp/terraform/issues/12545)) - * provider/scaleway: work around API concurrency issue ([#12707](https://github.com/hashicorp/terraform/issues/12707)) - * provider/statuscake: use default status code list when updating test ([#12375](https://github.com/hashicorp/terraform/issues/12375)) - -## 0.9.0 from 0.9.0-beta2 (March 15, 2017) - -**This only includes changes from 0.9.0-beta2 to 0.9.0 final. The section above has the complete 0.8.x to 0.9.0 CHANGELOG.** - -FEATURES: - - * **New Provider:** `Circonus` ([#12578](https://github.com/hashicorp/terraform/issues/12578)) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: `aws_codebuild_project` renamed `timeout` to `build_timeout` ([#12503](https://github.com/hashicorp/terraform/issues/12503)) - * provider/azurem: `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` now store has of custom_data not all custom_data ([#12214](https://github.com/hashicorp/terraform/issues/12214)) - * provider/google: compute_instance, compute_instance_template, and compute_disk all have a subtly changed logic when specifying an image family as the image; in 0.8.x they would pin to the latest image in the family when the resource is created; in 0.9.x they pass the family to the API and use its behaviour. New input formats are also supported. ([#12223](https://github.com/hashicorp/terraform/issues/12223)) - * provider/google: removed the unused and deprecated region field from google_compute_backend_service ([#12663](https://github.com/hashicorp/terraform/issues/12663)) - * provider/google: removed the deprecated account_file field for the Google Cloud provider ([#12668](https://github.com/hashicorp/terraform/issues/12668)) - * provider/google: removed the deprecated fields from google_project ([#12659](https://github.com/hashicorp/terraform/issues/12659)) - -IMPROVEMENTS: - - * provider/azurerm: store only hash of `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` custom_data - reduces size of state ([#12214](https://github.com/hashicorp/terraform/issues/12214)) - * report all errors encountered during config validation ([#12383](https://github.com/hashicorp/terraform/issues/12383)) - -BUG FIXES: - - * provider/google: Correct the incorrect instance group manager URL returned from GKE ([#4336](https://github.com/hashicorp/terraform/issues/4336)) - * provider/google: Fix a plan/apply cycle in IAM policies ([#12387](https://github.com/hashicorp/terraform/issues/12387)) - * provider/google: Fix a plan/apply cycle in forwarding rules when only a single port is specified ([#12662](https://github.com/hashicorp/terraform/issues/12662)) - -## 0.9.0-beta2 (March 2, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/azurerm: scale_sets `os_profile_master_password` now marked as sensitive - * provider/azurerm: sql_server `administrator_login_password` now marked as sensitive - * provider/google: storage buckets have been updated with the new storage classes. The old classes will continue working as before, but should be migrated as soon as possible, as there's no guarantee they'll continue working forever. ([#12044](https://github.com/hashicorp/terraform/issues/12044)) - * provider/dnsimple: Provider has been upgraded to APIv2 therefore, you will need to use the APIv2 auth token - -FEATURES: - - * **State Environments:** You can now create named "environments" for states. This allows you to manage distinct infrastructure resources from the same configuration. - * helper/schema: Added Timeouts to allow Provider/Resource developers to expose configurable timeouts for actions ([#12311](https://github.com/hashicorp/terraform/issues/12311)) - -IMPROVEMENTS: - - * core: "terraformrc" can contain env var references with $FOO ([#11929](https://github.com/hashicorp/terraform/issues/11929)) - * command: Display resource state ID in refresh/plan/destroy output ([#12261](https://github.com/hashicorp/terraform/issues/12261)) - * provider/aws: AWS Lambda DeadLetterConfig support ([#12188](https://github.com/hashicorp/terraform/issues/12188)) - * provider/azurerm: Mark the azurerm_scale_set machine password as sensitive ([#11982](https://github.com/hashicorp/terraform/issues/11982)) - * provider/azurerm: Mark the azurerm_sql_server admin password as sensitive ([#12004](https://github.com/hashicorp/terraform/issues/12004)) - * provider/dnsimple: Upgrade DNSimple provider to API v2 ([#10760](https://github.com/hashicorp/terraform/issues/10760)) - -BUG FIXES: - - * core: targeting will remove untargeted providers ([#12050](https://github.com/hashicorp/terraform/issues/12050)) - * core: doing a map lookup in a resource config with a computed set no longer crashes ([#12210](https://github.com/hashicorp/terraform/issues/12210)) - -0.9.0-beta1 FIXES: - - * core: backends are validated to not contain interpolations ([#12067](https://github.com/hashicorp/terraform/issues/12067)) - * core: fix local state locking on Windows ([#12059](https://github.com/hashicorp/terraform/issues/12059)) - * core: destroy provisioners dependent on module variables work ([#12063](https://github.com/hashicorp/terraform/issues/12063)) - * core: resource destruction happens after dependent resources' destroy provisioners ([#12063](https://github.com/hashicorp/terraform/issues/12063)) - * core: invalid resource attribute interpolation in a destroy provisioner errors ([#12063](https://github.com/hashicorp/terraform/issues/12063)) - * core: legacy backend loading of Consul now works properly ([#12320](https://github.com/hashicorp/terraform/issues/12320)) - * command/init: allow unsetting a backend properly ([#11988](https://github.com/hashicorp/terraform/issues/11988)) - * command/apply: fix crash that could happen with an empty directory ([#11989](https://github.com/hashicorp/terraform/issues/11989)) - * command/refresh: fix crash when no configs were in the pwd ([#12178](https://github.com/hashicorp/terraform/issues/12178)) - * command/{state,taint}: work properly with backend state ([#12155](https://github.com/hashicorp/terraform/issues/12155)) - * providers/terraform: remote state data source works with new backends ([#12173](https://github.com/hashicorp/terraform/issues/12173)) - -## 0.9.0-beta1 (February 15, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * Once an environment is updated to use the new "remote backend" feature - (from a prior remote state), it cannot be used with prior Terraform versions. - Remote backends themselves are fully backwards compatible with prior - Terraform versions. - * provider/aws: `aws_db_instance` now defaults to making a final snapshot on delete - * provider/aws: `aws_rds_cluster` now defaults to making a final snapshot on delete - * provider/aws: `aws_redshift_cluster` now defaults to making a final snapshot on delete - * provider/aws: Deprecated fields `kinesis_endpoint` & `dynamodb_endpoint` were removed. Use `kinesis` & `dynamodb` inside the `endpoints` block instead. ([#11778](https://github.com/hashicorp/terraform/issues/11778)) - * provider/datadog: `datadog_monitor` now defaults `notify_no_data` to `false` as per the datadog API - -FEATURES: - - * **Remote Backends:** This is a successor to "remote state" and includes - file-based configuration, an improved setup process (just run `terraform init`), - no more local caching of remote state, and more. ([#11286](https://github.com/hashicorp/terraform/issues/11286)) - * **Destroy Provisioners:** Provisioners can now be configured to run - on resource destruction. ([#11329](https://github.com/hashicorp/terraform/issues/11329)) - * **State Locking:** State will be automatically locked when supported by the backend. - Backends supporting locking in this release are Local, S3 (via DynamoDB), and Consul. ([#11187](https://github.com/hashicorp/terraform/issues/11187)) - -IMPROVEMENTS: - - * core: Data source values can now be used as part of a `count` calculation. ([#11482](https://github.com/hashicorp/terraform/issues/11482)) - * command: CLI args can be specified via env vars. Specify `TF_CLI_ARGS` or `TF_CLI_ARGS_name` (where name is the name of a command) to specify additional CLI args ([#11922](https://github.com/hashicorp/terraform/issues/11922)) - * command/init: previous behavior is retained, but init now also configures - the new remote backends as well as downloads modules. It is the single - command to initialize a new or existing Terraform configuration. - * provisioners: All provisioners now respond very quickly to interrupts for - fast cancellation. ([#10934](https://github.com/hashicorp/terraform/issues/10934)) - * provider/aws: Set aws_db_cluster to snapshot by default ([#11668](https://github.com/hashicorp/terraform/issues/11668)) - * provider/aws: Enable final snapshots for aws_rds_cluster by default ([#11694](https://github.com/hashicorp/terraform/issues/11694)) - * provider/aws: Enable snapshotting by default on aws_redshift_cluster ([#11695](https://github.com/hashicorp/terraform/issues/11695)) - * provider/vault: read vault token from `~/.vault-token` as a fallback for the - `VAULT_TOKEN` environment variable. ([#11529](https://github.com/hashicorp/terraform/issues/11529)) - -BUG FIXES: - - * provider/datadog: Default notify_no_data on datadog_monitor to false ([#11903](https://github.com/hashicorp/terraform/issues/11903)) - -## 0.8.8 (March 2, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * provider/aws: Potential breaking change for `root_block_device` ([#12379](https://github.com/hashicorp/terraform/issues/12379)) - -FEATURES: - - * **New Provider:** `spotinst` ([#5001](https://github.com/hashicorp/terraform/issues/5001)) - * **New Interpolation:** `slice` ([#9729](https://github.com/hashicorp/terraform/issues/9729)) - * **New Data Source:** `aws_sns_topic` ([#11752](https://github.com/hashicorp/terraform/issues/11752)) - * **New Data Source:** `openstack_images_image_v2` ([#12097](https://github.com/hashicorp/terraform/issues/12097)) - * **New Resource:** `aws_elastic_beanstalk_application_version` ([#5770](https://github.com/hashicorp/terraform/issues/5770)) - * **New Resource:** `aws_cloudwatch_log_destination` ([#11940](https://github.com/hashicorp/terraform/issues/11940)) - * **New Resource:** `aws_cloudwatch_log_destination_policy` ([#11940](https://github.com/hashicorp/terraform/issues/11940)) - * **New Resource:** `aws_codepipeline` ([#11814](https://github.com/hashicorp/terraform/issues/11814)) - * **New Resource:** `aws_egress_only_internet_gateway` ([#10538](https://github.com/hashicorp/terraform/issues/10538)) - * **New Resource:** `datadog_user` ([#12268](https://github.com/hashicorp/terraform/issues/12268)) - * **New Resource:** `digitalocean_loadbalancer` ([#12077](https://github.com/hashicorp/terraform/issues/12077)) - * **New Resource:** `openstack_images_image_v2` ([#11942](https://github.com/hashicorp/terraform/issues/11942)) - * **New Resource:** `openstack_compute_floatingip_associate_v2` ([#12190](https://github.com/hashicorp/terraform/issues/12190)) - -IMPROVEMENTS: - - * provider/aws: Add support for AWS EBS Elastic Volumes ([#11981](https://github.com/hashicorp/terraform/issues/11981)) - * provider/aws: Allow aws_instances to be resized rather than forcing a new instance ([#11998](https://github.com/hashicorp/terraform/issues/11998)) - * provider/aws: Report bucket name in S3 Error message ([#12122](https://github.com/hashicorp/terraform/issues/12122)) - * provider/aws: Implement IPV6 Support for ec2 / VPC ([#10538](https://github.com/hashicorp/terraform/issues/10538)) - * provider/aws: Add support for import of aws_elasticsearch_domain ([#12330](https://github.com/hashicorp/terraform/issues/12330)) - * provider/aws: improve redshift cluster validation ([#12313](https://github.com/hashicorp/terraform/issues/12313)) - * provider/aws: Support IAM role attachment and replacement for existing EC2 instance ([#11852](https://github.com/hashicorp/terraform/issues/11852)) - * provider/azurerm: Auto base64encode virtual_machine custom data ([#12164](https://github.com/hashicorp/terraform/issues/12164)) - * provider/datadog: add support for new host delay to the datadog_monitor resource ([#11975](https://github.com/hashicorp/terraform/issues/11975)) - * provider/datadog: Upgrade to Datadog API v2 ([#12098](https://github.com/hashicorp/terraform/issues/12098)) - * provider/fastly: Make Backends optional if used in VCL ([#12025](https://github.com/hashicorp/terraform/issues/12025)) - * provider/fastly: Add support for custom `response_object` ([#12032](https://github.com/hashicorp/terraform/issues/12032)) - * provider/google: Add support for maintenance window in `sql_database_instance` ([#12042](https://github.com/hashicorp/terraform/issues/12042)) - * provider/google: google_project supports billing account ([#11653](https://github.com/hashicorp/terraform/issues/11653)) - * provider/openstack: Don't allow floating IP and port ([#12099](https://github.com/hashicorp/terraform/issues/12099)) - * provider/openstack: Enable HTTP Logging ([#12089](https://github.com/hashicorp/terraform/issues/12089)) - * provider/openstack: Add Additional Targets for LBaaS v1 Member ([#12266](https://github.com/hashicorp/terraform/issues/12266)) - * provider/openstack: Redesign openstack_blockstorage_volume_attach_v2 ([#12071](https://github.com/hashicorp/terraform/issues/12071)) - * provider/pagerduty: Import support for service integrations ([#12141](https://github.com/hashicorp/terraform/issues/12141)) - * provider/pagerduty: Updated implementation of pagerduty_vendor & pagerduty_service_integration ([#12357](https://github.com/hashicorp/terraform/issues/12357)) - * provider/random_id: Add prefix attribute ([#12016](https://github.com/hashicorp/terraform/issues/12016)) - * provider/statuscake: Add support for Port in statuscake_test ([#11966](https://github.com/hashicorp/terraform/issues/11966)) - -BUG FIXES: - - * core: Fix a hang that could occur at the end of a Terraform command with custom plugins used ([#12048](https://github.com/hashicorp/terraform/issues/12048)) - * command/fmt: Fix incorrect formatting with single line object following complex object ([#12049](https://github.com/hashicorp/terraform/issues/12049)) - * command/state: `-backup` flags work with `mv` and `rm` ([#12156](https://github.com/hashicorp/terraform/issues/12156)) - * provider/aws: add bucket name to delete error notification ([#11952](https://github.com/hashicorp/terraform/issues/11952)) - * provider/aws: Use proper Set for source.Auth in resource_aws_codebuild_project ([#11741](https://github.com/hashicorp/terraform/issues/11741)) - * provider/aws: aws_ecs_service should output service name along with err ([#12072](https://github.com/hashicorp/terraform/issues/12072)) - * provider/aws: Add VRRP to allowed protocols in network ACL rules ([#12107](https://github.com/hashicorp/terraform/issues/12107)) - * provider/aws: Add owner_account option to aws_redshift_cluster ([#12062](https://github.com/hashicorp/terraform/issues/12062)) - * provider/aws: Update of inspector_assessment_target should use ARN not Name ([#12115](https://github.com/hashicorp/terraform/issues/12115)) - * provider/aws: Fix the panic in ssm_association with parameters ([#12215](https://github.com/hashicorp/terraform/issues/12215)) - * provider/aws: Fix update of environment_variable in codebuild_project ([#12169](https://github.com/hashicorp/terraform/issues/12169)) - * provider/aws: Refresh aws_autoscaling_schedule from state when autoscaling_group not found ([#12312](https://github.com/hashicorp/terraform/issues/12312)) - * provider/aws: No longer ForceNew resource on lambda_function runtime update ([#12329](https://github.com/hashicorp/terraform/issues/12329)) - * provider/aws: reading multiple pages of aws_efs_file_system tags ([#12328](https://github.com/hashicorp/terraform/issues/12328)) - * provider/aws: Refresh cloudwatch log subscription filter on 404 ([#12333](https://github.com/hashicorp/terraform/issues/12333)) - * provider/aws: more details on which s3 bucket had an error ([#12314](https://github.com/hashicorp/terraform/issues/12314)) - * provider/azurerm: Ignore case on protocol and allocation types ([#12176](https://github.com/hashicorp/terraform/issues/12176)) - * provider/cloudflare: add validation for proxied record types ([#11993](https://github.com/hashicorp/terraform/issues/11993)) - * provider/datadog: Adding default values to datadog_monitor ([#12168](https://github.com/hashicorp/terraform/issues/12168)) - * provider/google: make local_traffic_selector computed ([#11631](https://github.com/hashicorp/terraform/issues/11631)) - * provider/google: Write the raw disk encryption key in the state file to avoid diffs on plan ([#12068](https://github.com/hashicorp/terraform/issues/12068)) - * provider/google: fix url map test and update logic ([#12317](https://github.com/hashicorp/terraform/issues/12317)) - * provider/openstack: Rename provider to loadbalancer_provider ([#12239](https://github.com/hashicorp/terraform/issues/12239)) - * provider/pagerduty: Setting incident_urgency_rule as optional ([#12211](https://github.com/hashicorp/terraform/issues/12211)) - * provider/profitbricks: Fixing how primary_nic is added to profitbricks server ([#12197](https://github.com/hashicorp/terraform/issues/12197)) - * state/azure: add environment option for non-public cloud usage ([#12364](https://github.com/hashicorp/terraform/issues/12364)) - -## 0.8.7 (February 15, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: `kinesis_endpoint` & `dynamodb_endpoint` fields in the provider schema were deprecated in favour of `kinesis` & `dynamodb` inside the `endpoints` block. Deprecated fields will be removed in 0.9 ([#11768](https://github.com/hashicorp/terraform/issues/11768)) - -FEATURES: - - * **New Interpolation:** `slice` ([#9729](https://github.com/hashicorp/terraform/issues/9729)) - * **New Provider:** `arukas` ([#11171](https://github.com/hashicorp/terraform/issues/11171)) - * **New Data Source:** `aws_db_instance` ([#11717](https://github.com/hashicorp/terraform/issues/11717)) - * **New Data Source:** `aws_vpn_gateway` ([#11886](https://github.com/hashicorp/terraform/issues/11886)) - * **New Data Source:** `consul_agent_self`, `consul_catalog_service`, `consul_catalog_services`, `consul_catalog_nodes` ([#11729](https://github.com/hashicorp/terraform/pull/11729)) - * **New Data Source:** `google_compute_zones` ([#11954](https://github.com/hashicorp/terraform/issues/11954)) - * **New Resource:** `aws_elasticsearch_domain_policy` ([#8648](https://github.com/hashicorp/terraform/issues/8648)) - * **New Resource:** `aws_vpc_peering_connection_accepter` ([#11505](https://github.com/hashicorp/terraform/issues/11505)) - * **New Resource:** `aws_config_config_rule` ([#5850](https://github.com/hashicorp/terraform/issues/5850)) - * **New Resource:** `aws_config_configuration_recorder` ([#5850](https://github.com/hashicorp/terraform/issues/5850)) - * **New Resource:** `aws_config_configuration_recorder_status` ([#5850](https://github.com/hashicorp/terraform/issues/5850)) - * **New Resource:** `aws_config_delivery_channel` ([#5850](https://github.com/hashicorp/terraform/issues/5850)) - * **New Resource:** `azurerm_container_service` ([#10820](https://github.com/hashicorp/terraform/issues/10820)) - * **New Resource:** `vault_policy` ([#10980](https://github.com/hashicorp/terraform/issues/10980)) - -IMPROVEMENTS: - - * provider/aws: Update aws_ssm_document to include `document_type`, `latest_version` and `default_version` ([#11671](https://github.com/hashicorp/terraform/issues/11671)) - * provider/aws: Support import of aws_opsworks_instance ([#11783](https://github.com/hashicorp/terraform/issues/11783)) - * provider/aws Add S3 bucket object tag support ([#11344](https://github.com/hashicorp/terraform/issues/11344)) - * provider/aws: Add validation for aws_iam_role ([#11915](https://github.com/hashicorp/terraform/issues/11915)) - * provider/fastly Allows for conditional settings across fastly ([#11843](https://github.com/hashicorp/terraform/issues/11843)) - * provider/openstack: Allow OpenStack SSL certs + keys to take path or content ([#10271](https://github.com/hashicorp/terraform/issues/10271)) - * provider/pagerduty: Add support for `incident_urgency_rule`, `support_hours` and `scheduled_actions` to `pagerduty_service` ([#11856](https://github.com/hashicorp/terraform/issues/11856)) - * provider/rancher: parse Rancher client cli.json config file ([#11658](https://github.com/hashicorp/terraform/issues/11658)) - * provider/vault: Use Vault api.DefaultConfig() ([#11523](https://github.com/hashicorp/terraform/issues/11523)) - -Bug FIXES: - - * core: resources that depend on create-before-destroy resources don't create cycles ([#11753](https://github.com/hashicorp/terraform/issues/11753)) - * core: create-before-destroy resources with a count > 1 create proper edges ([#11753](https://github.com/hashicorp/terraform/issues/11753)) - * core: fix "diffs didn't match issue" for removing or empty collections that force new ([#11732](https://github.com/hashicorp/terraform/issues/11732)) - * core: module sources ended in archive extensions without a "." won't be treated as archives ([#11438](https://github.com/hashicorp/terraform/issues/11438)) - * core: destroy ordering of resources within modules is correct ([#11765](https://github.com/hashicorp/terraform/issues/11765)) - * core: Fix crash if count interpolates into a non-int ([#11864](https://github.com/hashicorp/terraform/issues/11864)) - * core: Targeting a module will properly exclude untargeted module outputs ([#11921](https://github.com/hashicorp/terraform/issues/11921)) - * state/remote/s3: Fix Bug with Assume Role for Federated IAM Account ([#10067](https://github.com/hashicorp/terraform/issues/10067)) - * provider/aws: Fix security_group_rule resource timeout errors ([#11809](https://github.com/hashicorp/terraform/issues/11809)) - * provider/aws: Fix diff suppress function for aws_db_instance ([#11909](https://github.com/hashicorp/terraform/issues/11909)) - * provider/aws: Fix default values for AMI volume size ([#11842](https://github.com/hashicorp/terraform/issues/11842)) - * provider/aws: Fix aws_db_event_subscription import ([#11744](https://github.com/hashicorp/terraform/issues/11744)) - * provider/aws: Respect 400 returned from AWS API on RDS Cluster termination ([#11795](https://github.com/hashicorp/terraform/issues/11795)) - * provider/aws: Raise the codebuild_project create timeout ([#11777](https://github.com/hashicorp/terraform/issues/11777)) - * provider/aws: Make aws_dms_endpoint database_name optional ([#11792](https://github.com/hashicorp/terraform/issues/11792)) - * provider/aws: Bump Create and Delete timeouts to 60 mins on directory_service ([#11793](https://github.com/hashicorp/terraform/issues/11793)) - * provider/aws: aws_codecommit_trigger fix typo that causes serialization to fail when events is non-empty ([#11839](https://github.com/hashicorp/terraform/issues/11839)) - * provider/aws: Fix bug to allow update of maintenance_window in elasticache_replication_group ([#11850](https://github.com/hashicorp/terraform/issues/11850)) - * provider/azurerm: Don't push an empty set of ssh keys to virtual machine or they cannot be ammended ([#11804](https://github.com/hashicorp/terraform/issues/11804)) - * provider/azurerm: Refresh from state when VM Extension Resource not found ([#11894](https://github.com/hashicorp/terraform/issues/11894)) - * provider/cloudstack: Ensure consistent hashes of `cloudstack_port_forward` forward items. ([#11546](https://github.com/hashicorp/terraform/issues/11546)) - * provider/google: set additional_zones to computed and disallow the original zone from appearing in the list ([#11650](https://github.com/hashicorp/terraform/issues/11650)) - * provider/google: set subnetwork_project to computed ([#11646](https://github.com/hashicorp/terraform/issues/11646)) - * provider/openstack BlockStorage v1 availability_zone Fix ([#11949](https://github.com/hashicorp/terraform/issues/11949)) - -## 0.8.6 (07 February 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: `aws_appautoscaling_policy` no longer has default values for `scalable_dimension` and `service_namespace` - - -FEATURES: - - * **New Data Source:** `aws_kms_secret` ([#11460](https://github.com/hashicorp/terraform/issues/11460)) - * **New Data Source:** `aws_ecs_task_definition` ([#8509](https://github.com/hashicorp/terraform/issues/8509)) - * **New Data Source:** `aws_ecs_cluster` ([#11558](https://github.com/hashicorp/terraform/issues/11558)) - * **New Data Source:** `aws_partition` ([#11675](https://github.com/hashicorp/terraform/issues/11675)) - * **New Data Source:** `pagerduty_escalation_policy` ([#11616](https://github.com/hashicorp/terraform/issues/11616)) - * **New Data Source:** `pagerduty_schedule` ([#11614](https://github.com/hashicorp/terraform/issues/11614)) - * **New Data Source:** `profitbricks_datacenter` ([#11520](https://github.com/hashicorp/terraform/issues/11520)) - * **New Data Source:** `profitbricks_location` ([#11520](https://github.com/hashicorp/terraform/issues/11520)) - * **New Data Source:** `profitbricks_image` ([#11520](https://github.com/hashicorp/terraform/issues/11520)) - * **New Resource:** `aws_sfn_activity` ([#11420](https://github.com/hashicorp/terraform/issues/11420)) - * **New Resource:** `aws_sfn_state_machine` ([#11420](https://github.com/hashicorp/terraform/issues/11420)) - * **New Resource:** `aws_codebuild_project` ([#11560](https://github.com/hashicorp/terraform/issues/11560)) - * **New Resource:** `aws_dms_certificate` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `aws_dms_endpoint` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `aws_dms_replication_instance` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `aws_dms_replication_subnet_group` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `aws_dms_replication_subnet_group` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `pagerduty_addon` ([#11620](https://github.com/hashicorp/terraform/issues/11620)) - - -IMPROVEMENTS: - - * core: Interaction with Atlas now supports the `ATLAS_TLS_NOVERIFY` environment variable ([#11576](https://github.com/hashicorp/terraform/issues/11576)) - * provider/aws: Add EBS Volume support for EMR Instance Groups ([#11411](https://github.com/hashicorp/terraform/issues/11411)) - * provider/aws: Add support for policy to AWS provider assume_role ([#11501](https://github.com/hashicorp/terraform/issues/11501)) - * provider/aws: Add support for more sns_topic_subscription parameters on import command ([#10408](https://github.com/hashicorp/terraform/issues/10408)) - * provider/aws: Add support for Sever Side Encryption with default S3 KMS key to `aws_s3_bucket_object` ([#11261](https://github.com/hashicorp/terraform/issues/11261)) - * provider/aws: Add support for Cross Region RDS Cluster Replica ([#11428](https://github.com/hashicorp/terraform/issues/11428)) - * provider/aws: Add sensitive attribute in master_password ([#11584](https://github.com/hashicorp/terraform/issues/11584)) - * provider/aws: Application Auto Scaling now supports scaling an Amazon EC2 Spot fleet ([#8697](https://github.com/hashicorp/terraform/issues/8697)) - * provider/aws: Add tag support to DynamoDb tables ([#11617](https://github.com/hashicorp/terraform/issues/11617)) - * provider/aws: Provide the certificate ID in the aws data source ([#11693](https://github.com/hashicorp/terraform/issues/11693)) - * provider/aws: Wait for instance_profile creation to complete ([#11678](https://github.com/hashicorp/terraform/issues/11678)) - * provider/azurerm: Add support for scale sets overprovision ([#11516](https://github.com/hashicorp/terraform/issues/11516)) - * provider/azurerm: support import for load balancer and sub resources ([#11610](https://github.com/hashicorp/terraform/issues/11610)) - * provider/fastly: Adds papertrail logging ([#11491](https://github.com/hashicorp/terraform/issues/11491)) - * provider/fastly: Adds format_version for s3logging ([#11725](https://github.com/hashicorp/terraform/issues/11725)) - * provider/fastly: Adds healthcheck service ([#11709](https://github.com/hashicorp/terraform/issues/11709)) - * provider/google: allow instance group managers in region other than project ([#11294](https://github.com/hashicorp/terraform/issues/11294)) - * provider/google: Add second generation disk specification options ([#11571](https://github.com/hashicorp/terraform/issues/11571)) - * provider/google: remote_traffic_selector for google_compute_vpn_tunnel ([#11020](https://github.com/hashicorp/terraform/issues/11020)) - * provider/nomad: Update jobspec dependency to allow parsing parameterized nomad jobfiles ([#11691](https://github.com/hashicorp/terraform/issues/11691)) - * provider/google: No default root user for SQL ([#11590](https://github.com/hashicorp/terraform/issues/11590)) - * provider/opsgenie: Descriptions for Teams ([#11391](https://github.com/hashicorp/terraform/issues/11391)) - * provider/rancher: rancher_registration_token add image parameter ([#11551](https://github.com/hashicorp/terraform/issues/11551)) - * provider/rancher: allow for importing resources using environment ID to target ([#11688](https://github.com/hashicorp/terraform/issues/11688)) - -BUG FIXES: - - * core: Remove missed subfields when parent list is removed ([#11498](https://github.com/hashicorp/terraform/issues/11498)) - * command/fmt: Trailing blocks of comments at the end of files are formatted properly ([#11585](https://github.com/hashicorp/terraform/issues/11585)) - * provider/aws: Fix issue with `path` not updated when modifying AWS API Gateway Resource ([#11443](https://github.com/hashicorp/terraform/issues/11443)) - * provider/aws: Fix AWS Lambda Qualifier Regexp for `aws_lambda_permission` ([#11383](https://github.com/hashicorp/terraform/issues/11383)) - * provider/aws: allow destroy of LB stickiness policy with missing LB ([#11462](https://github.com/hashicorp/terraform/issues/11462)) - * provider/aws: ECS Placement constraints fix ([#11475](https://github.com/hashicorp/terraform/issues/11475)) - * provider/aws: retry kms_key CreateKey if arn in policy not yet seen ([#11509](https://github.com/hashicorp/terraform/issues/11509)) - * provider/aws: Fix ALB Listener Rule Import ([#1174](https://github.com/hashicorp/terraform/issues/1174)) - * provider/aws: Fix issue with ECS Placement Strat. and type casing ([#11565](https://github.com/hashicorp/terraform/issues/11565)) - * provider/aws: aws_route53_record import error processing ([#11603](https://github.com/hashicorp/terraform/issues/11603)) - * provider/aws: Fix panic in aws_rds_cluster missing parameter error message ([#11600](https://github.com/hashicorp/terraform/issues/11600)) - * provider/aws: Succeed creating aws_volume_attachment if identical attachment exists ([#11060](https://github.com/hashicorp/terraform/issues/11060)) - * provider/aws: Guard against panic in aws_vpc_endpoint_association ([#11613](https://github.com/hashicorp/terraform/issues/11613)) - * provider/aws: Allow root volume size changes in aws_instance ([#11619](https://github.com/hashicorp/terraform/issues/11619)) - * provider/aws: Fix spot instance request block device configs ([#11649](https://github.com/hashicorp/terraform/issues/11649)) - * provider/aws: Fix validation issues for onceAWeek and onceADay validation functions ([#11679](https://github.com/hashicorp/terraform/issues/11679)) - * provider/aws: Return route_table_id from aws_route_table data source ([#11703](https://github.com/hashicorp/terraform/issues/11703)) - * provider/aws: validate aws_alb_target_group name is less than 32 characters ([#11699](https://github.com/hashicorp/terraform/issues/11699)) - * provider/azurerm: Scale Sets Load balancer pools should not be computed ([#11516](https://github.com/hashicorp/terraform/issues/11516)) - * provider/azurerm: Scale Sets ip configuration handling and update support for load balancer backend pools. ([#11516](https://github.com/hashicorp/terraform/issues/11516)) - * provider/azurerm: check if lb sub resources exist when reading ([#11553](https://github.com/hashicorp/terraform/issues/11553)) - * provider/google: Fix master_instance_name to prevent slave rebuilds ([#11477](https://github.com/hashicorp/terraform/issues/11477)) - * provider/google: Refresh google_compute_instance machine_type on read ([#11645](https://github.com/hashicorp/terraform/issues/11645)) - * provider/google: Added forceNew on accessConfig in google_compute_instance_template ([#11548](https://github.com/hashicorp/terraform/issues/11548)) - * provider/ignition: Allow to add authorized keys without user creation ([#11406](https://github.com/hashicorp/terraform/issues/11406)) - * provider/ignition: mount and path are mutually exclusive ([#11409](https://github.com/hashicorp/terraform/issues/11409)) - * provider/ns1: Fix "use_client_subnet" in ns1_record ([#11368](https://github.com/hashicorp/terraform/issues/11368)) - * provider/openstack: Remove Default Security Group Rules on Create ([#11466](https://github.com/hashicorp/terraform/issues/11466)) - * provider/pagerduty: Allow timeouts to be disabled (pagerduty_service) ([#11483](https://github.com/hashicorp/terraform/issues/11483)) - * provider/rancher: Use environment specific client for accessing resources ([#11503](https://github.com/hashicorp/terraform/issues/11503)) - * provider/rancher: Refresh rancher stack from state on delete ([#11539](https://github.com/hashicorp/terraform/issues/11539)) - * provider/rancher: Refresh rancher token and registry from state on not found ([#11543](https://github.com/hashicorp/terraform/issues/11543)) - * provider/rancher: return error when Rancher template not found ([#11544](https://github.com/hashicorp/terraform/issues/11544)) - * provider/rancher: rancher_stack set docker_compose and rancher_compose ([#11550](https://github.com/hashicorp/terraform/issues/11550)) - * provider/rancher: Handle deleted/purged resources from Rancher ([#11607](https://github.com/hashicorp/terraform/issues/11607)) - * provider/statuscake: Remove computed from statuscake_test timeout parameter ([#11541](https://github.com/hashicorp/terraform/issues/11541)) - * provider/vsphere: vSphere virtual machine don't ignore VM power on errors ([#11604](https://github.com/hashicorp/terraform/issues/11604)) - * provisioner/remote-exec: Revert change in 0.8.5 that treated each line as a script since that doesn't work for stateful scripts. ([#11692](https://github.com/hashicorp/terraform/issues/11692)) - * provisioner/chef: Attributes JSON coming from computed source validates ([#11502](https://github.com/hashicorp/terraform/issues/11502)) - -## 0.8.5 (26 January 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: We no longer prefix an ECR repository address with `https://` - * provider/google: `google_project` has undergone significant changes. Existing configs and state should continue to work as they always have, but new configs and state will exhibit some new behaviour, including actually creating and deleting projects, instead of just referencing them. See https://www.terraform.io/docs/providers/google/r/google_project.html for more details. - -FEATURES: - - * **New Data Source:** `aws_autoscaling_groups` ([#11303](https://github.com/hashicorp/terraform/issues/11303)) - * **New Data Source:** `aws_elb_hosted_zone_id ` ([#11027](https://github.com/hashicorp/terraform/issues/11027)) - * **New Data Source:** `aws_instance` ([#11272](https://github.com/hashicorp/terraform/issues/11272)) - * **New Data Source:** `aws_canonical_user_id` ([#11332](https://github.com/hashicorp/terraform/issues/11332)) - * **New Data Source:** `aws_vpc_endpoint` ([#11323](https://github.com/hashicorp/terraform/issues/11323)) - * **New Provider:** `profitbricks` ([#7943](https://github.com/hashicorp/terraform/issues/7943)) - * **New Provider:** `alicloud` ([#11235](https://github.com/hashicorp/terraform/issues/11235)) - * **New Provider:** `ns1` ([#10782](https://github.com/hashicorp/terraform/issues/10782)) - * **New Resource:** `aws_inspector_assessment_target` ([#11217](https://github.com/hashicorp/terraform/issues/11217)) - * **New Resource:** `aws_inspector_assessment_template` ([#11217](https://github.com/hashicorp/terraform/issues/11217)) - * **New Resource:** `aws_inspector_resource_group` ([#11217](https://github.com/hashicorp/terraform/issues/11217)) - * **New Resource:** `google_project_iam_policy` ([#10425](https://github.com/hashicorp/terraform/issues/10425)) - * **New Resource:** `google_project_services` ([#10425](https://github.com/hashicorp/terraform/issues/10425)) - * **New Interpolation Function:** `pathexpand()` ([#11277](https://github.com/hashicorp/terraform/issues/11277)) - -IMPROVEMENTS: - - * command/fmt: Single line objects (such as `variable "foo" {}`) aren't separated by newlines - * provider/aws: Add 'route_table_id' to route_table data source ([#11157](https://github.com/hashicorp/terraform/pull/11157)) - * provider/aws: Add Support for aws_cloudwatch_metric_alarm extended statistic ([#11193](https://github.com/hashicorp/terraform/issues/11193)) - * provider/aws: Make the type of a route53_record modifiable without recreating the resource ([#11164](https://github.com/hashicorp/terraform/issues/11164)) - * provider/aws: Add Placement Strategy to aws_ecs_service resource ([#11201](https://github.com/hashicorp/terraform/issues/11201)) - * provider/aws: Add support for placement_constraint to aws_ecs_service ([#11242](https://github.com/hashicorp/terraform/issues/11242)) - * provider/aws: allow ALB target group stickiness to be enabled/disabled ([#11251](https://github.com/hashicorp/terraform/issues/11251)) - * provider/aws: ALBs now wait for provisioning to complete before proceeding ([#11333](https://github.com/hashicorp/terraform/issues/11333)) - * provider/aws: Add support for setting MSSQL Timezone in aws_db_instance ([#11247](https://github.com/hashicorp/terraform/issues/11247)) - * provider/aws: CloudFormation YAML template support ([#11121](https://github.com/hashicorp/terraform/issues/11121)) - * provider/aws: Remove hardcoded https from the ecr repository ([#11307](https://github.com/hashicorp/terraform/issues/11307)) - * provider/aws: Implement CloudFront Lambda Function Associations ([#11291](https://github.com/hashicorp/terraform/issues/11291)) - * provider/aws: Remove MaxFrameRate default on ElasticTranscoderPreset ([#11340](https://github.com/hashicorp/terraform/issues/11340)) - * provider/aws: Allow ARN Identifier to be set for different partitions ([#11359](https://github.com/hashicorp/terraform/issues/11359)) - * provider/aws: Allow bypassing region validation ([#11358](https://github.com/hashicorp/terraform/issues/11358)) - * provider/aws: Added a s3_bucket domain name attribute ([#10088](https://github.com/hashicorp/terraform/issues/10088)) - * provider/aws: Add DiffSupressFunction to aws_db_instance's engine_version ([#11369](https://github.com/hashicorp/terraform/issues/11369)) - * provider/archive: Adding support for multiple source contents ([#11271](https://github.com/hashicorp/terraform/issues/11271)) - * provider/azurerm: add caching support for virtual_machine data_disks ([#11142](https://github.com/hashicorp/terraform/issues/11142)) - * provider/azurerm: make lb sub resources idempotent ([#11128](https://github.com/hashicorp/terraform/issues/11128)) - * provider/cloudflare: Add verification for record types and content ([#11197](https://github.com/hashicorp/terraform/issues/11197)) - * provider/datadog: Add aggregator method to timeboard graph resource ([#11206](https://github.com/hashicorp/terraform/issues/11206)) - * provider/fastly Add request_condition to backend definition ([#11238](https://github.com/hashicorp/terraform/issues/11238)) - * provider/google: Add subnetwork_project field to enable cross-project networking in instance templates ([#11110](https://github.com/hashicorp/terraform/issues/11110)) - * provider/google: Add support for encrypting a disk ([#11167](https://github.com/hashicorp/terraform/issues/11167)) - * provider/google: Add support for session_affinity to google_compute_region_backend_service ([#11228](https://github.com/hashicorp/terraform/issues/11228)) - * provider/google: Allow additional zones to be configured in GKE ([#11018](https://github.com/hashicorp/terraform/issues/11018)) - * provider/ignition: Allow empty dropin and content for systemd_units ([#11327](https://github.com/hashicorp/terraform/issues/11327)) - * provider/openstack: LoadBalancer Security Groups ([#11074](https://github.com/hashicorp/terraform/issues/11074)) - * provider/openstack: Volume Attachment Updates ([#11285](https://github.com/hashicorp/terraform/issues/11285)) - * provider/scaleway improve bootscript data source ([#11183](https://github.com/hashicorp/terraform/issues/11183)) - * provider/statuscake: Add support for StatusCake confirmation servers ([#11179](https://github.com/hashicorp/terraform/issues/11179)) - * provider/statuscake: Add support for Updating StatusCake contact_ids ([#7115](https://github.com/hashicorp/terraform/issues/7115)) - * provisioner/chef: Add support for named run-lists when using policyfiles ([#11215](https://github.com/hashicorp/terraform/issues/11215)) - * core: Add basic HTTP Auth for remote state backend ([#11301](https://github.com/hashicorp/terraform/issues/11301)) - -BUG FIXES: - - * command/fmt: Multiple `#` comments won't be separated by newlines. ([#11209](https://github.com/hashicorp/terraform/issues/11209)) - * command/fmt: Lists with a heredoc element that starts on the same line as the opening brace is formatted properly. ([#11208](https://github.com/hashicorp/terraform/issues/11208)) - * command/import: Provider configuration inheritance into modules works properly ([#11393](https://github.com/hashicorp/terraform/issues/11393)) - * command/import: Update help text to note that `-var` and `-var-file` work - * provider/aws: Fix panic when querying VPC's main route table via data source ([#11134](https://github.com/hashicorp/terraform/issues/11134)) - * provider/aws: Allow creating aws_codecommit repository outside of us-east-1 ([#11177](https://github.com/hashicorp/terraform/issues/11177)) - * provider/aws: Fix issue destroying or updating CloudFront due to missing Lambda Function Associations parameters ([#11291](https://github.com/hashicorp/terraform/issues/11291)) - * provider/aws: Correct error messages are now returned if an `aws_autoscaling_lifecycle_hook` fails during creation ([#11360](https://github.com/hashicorp/terraform/issues/11360)) - * provider/aws: Fix issue updating/destroying Spot Fleet requests when using `terminate_instances_with_expiration` ([#10953](https://github.com/hashicorp/terraform/issues/10953)) - * provider/azurerm: use configured environment for storage clients ([#11159](https://github.com/hashicorp/terraform/issues/11159)) - * provider/google: removes region param from google_compute_backend_service ([#10903](https://github.com/hashicorp/terraform/issues/10903)) - * provider/ignition: allowing empty systemd.content when a dropin is provided ([#11216](https://github.com/hashicorp/terraform/issues/11216)) - * provider/openstack: Increase deletion timeout for router interfaces ([#11250](https://github.com/hashicorp/terraform/issues/11250)) - * provider/openstack: Fix Instance Metadata Deletion ([#11252](https://github.com/hashicorp/terraform/issues/11252)) - * provider/scaleway: Rename Scaleway provider parameters to match more closely to the API ([#10874](https://github.com/hashicorp/terraform/issues/10874)) - * provider/vault: Remove user input for optional vault provider fields ([#11082](https://github.com/hashicorp/terraform/issues/11082)) - * provider/vsphere: Set deviceID to 0 if one 1 network interface in vsphere_virtual_machine ([#8276](https://github.com/hashicorp/terraform/issues/8276)) - * provisioner/remote-exec: fail on first inline script with bad exit code ([#11155](https://github.com/hashicorp/terraform/issues/11155)) - -## 0.8.4 (January 11, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * We have removed the `Arukas` provider that was added in v0.8.3 for this release. Unfortunately we found the - new provider included a dependency that would not compile and run on Windows operating systems. For now the - provider has been removed and we hope to work to reintroduce it for all platforms in the near future. Going forward we will also be taking additional steps in our build testing to ensure Terraform builds on all platforms before release. - -## 0.8.3 (January 10, 2017) - -FEATURES: - - * **New Provider:** `Arukas` ([#10862](https://github.com/hashicorp/terraform/issues/10862)) - * **New Provider:** `Ignition` ([#6189](https://github.com/hashicorp/terraform/issues/6189)) - * **New Provider:** `OpsGenie` ([#11012](https://github.com/hashicorp/terraform/issues/11012)) - * **New Data Source:** `aws_vpc_peering_connection` ([#10913](https://github.com/hashicorp/terraform/issues/10913)) - * **New Resource:** `aws_codedeploy_deployment_config` ([#11062](https://github.com/hashicorp/terraform/issues/11062)) - * **New Resource:** `azurerm_container_registry` ([#10973](https://github.com/hashicorp/terraform/issues/10973)) - * **New Resource:** `azurerm_eventhub_authorization_rule` ([#10971](https://github.com/hashicorp/terraform/issues/10971)) - * **New Resource:** `azurerm_eventhub_consumer_group` ([#9902](https://github.com/hashicorp/terraform/issues/9902)) - -IMPROVEMENTS: - - * command/fmt: Show filename on parse error ([#10923](https://github.com/hashicorp/terraform/issues/10923)) - * provider/archive: `archive_file` now exports `output_md5` attribute in addition to existing SHA1 and Base64 SHA256 hashes. ([#10851](https://github.com/hashicorp/terraform/issues/10851)) - * provider/aws: Add `most_recent` to the `ebs_snapshot` data source ([#10986](https://github.com/hashicorp/terraform/issues/10986)) - * provider/aws: Add support for instance tenancy in `aws_opsworks_instance` ([#10885](https://github.com/hashicorp/terraform/issues/10885)) - * provider/aws: Added a validation for security group rule types ([#10864](https://github.com/hashicorp/terraform/issues/10864)) - * provider:aws: Add support for updating aws_emr_cluster parameters ([#11008](https://github.com/hashicorp/terraform/issues/11008)) - * provider/aws: Add Placement Constraints to `aws_ecs_task_definition` ([#11030](https://github.com/hashicorp/terraform/issues/11030)) - * provider/aws: Increasing timeout for redshift cluster creation to 75 minutes ([#11041](https://github.com/hashicorp/terraform/issues/11041)) - * provider/aws: Add support for content_handling to aws_api_gateway_integration_response ([#11002](https://github.com/hashicorp/terraform/issues/11002)) - * provider/aws: Add S3 bucket name validation ([#11116](https://github.com/hashicorp/terraform/issues/11116)) - * provider/aws: Add Route53 Record type validation ([#11119](https://github.com/hashicorp/terraform/issues/11119)) - * provider/azurerm: support non public clouds ([#11026](https://github.com/hashicorp/terraform/issues/11026)) - * provider/azurerm: Azure resource providers which are already registered are no longer re-registered. ([#10991](https://github.com/hashicorp/terraform/issues/10991)) - * provider/docker: Add network create --internal flag support ([#10932](https://github.com/hashicorp/terraform/issues/10932)) - * provider/docker: Add support for a list of pull_triggers within the docker_image resource. ([#10845](https://github.com/hashicorp/terraform/issues/10845)) - * provider/pagerduty Add delete support to `pagerduty_service_integration` ([#10891](https://github.com/hashicorp/terraform/issues/10891)) - * provider/postgresql Add permissions support to `postgresql_schema` as nested `policy` attributes ([#10808](https://github.com/hashicorp/terraform/issues/10808)) - -BUG FIXES: - - * core: Properly expand sets as lists from a flatmap ([#11042](https://github.com/hashicorp/terraform/issues/11042)) - * core: Disallow root modules named "root" as a temporary workaround ([#11099](https://github.com/hashicorp/terraform/issues/11099)) - * command/fmt: Lists of heredocs format properly ([#10947](https://github.com/hashicorp/terraform/issues/10947)) - * command/graph: Fix crash when `-type=legacy` ([#11095](https://github.com/hashicorp/terraform/issues/11095)) - * provider/aws: Guard against nil change output in `route53_zone` that causes panic ([#10798](https://github.com/hashicorp/terraform/issues/10798)) - * provider/aws: Reworked validateArn function to handle empty values ([#10833](https://github.com/hashicorp/terraform/issues/10833)) - * provider/aws: Set `aws_autoscaling_policy` `metric_aggregation_type` to be Computed ([#10904](https://github.com/hashicorp/terraform/issues/10904)) - * provider/aws: `storage_class` is now correctly treated as optional when configuring replication for `aws_s3_bucket` resources. ([#10921](https://github.com/hashicorp/terraform/issues/10921)) - * provider/aws: `user_data` on `aws_launch_configuration` resources is only base 64 encoded if the value provided is not already base 64 encoded. ([#10871](https://github.com/hashicorp/terraform/issues/10871)) - * provider/aws: Add snapshotting to the list of pending state for elasticache ([#10965](https://github.com/hashicorp/terraform/issues/10965)) - * provider/aws: Add support for updating tags in aws_emr_cluster ([#11003](https://github.com/hashicorp/terraform/issues/11003)) - * provider/aws: Fix the normalization of AWS policy statements ([#11009](https://github.com/hashicorp/terraform/issues/11009)) - * provider/aws: data_source_aws_iam_server_certificate latest should be bool not string causes panic ([#11016](https://github.com/hashicorp/terraform/issues/11016)) - * provider/aws: Fix typo in aws_redshift_cluster causing security groups to not allow update ([#11025](https://github.com/hashicorp/terraform/issues/11025)) - * provider/aws: Set `key_name` in `aws_key_pair` if omited in configuration ([#10987](https://github.com/hashicorp/terraform/issues/10987)) - * provider/aws: Updating the aws_efs_mount_target dns_name ([#11023](https://github.com/hashicorp/terraform/issues/11023)) - * provider/aws: Validate window time format for snapshot times and backup windows on RDS and ElastiCache resources ([#11089](https://github.com/hashicorp/terraform/issues/11089)) - * provider/aws: aws_db_instance restored from snapshot had problem with subnet_group ([#11050](https://github.com/hashicorp/terraform/issues/11050)) - * provider/aws: Allow disabled access_log in ELB ([#11120](https://github.com/hashicorp/terraform/issues/11120)) - * provider/azurerm: fix update protocol for lb_probe ([#11125](https://github.com/hashicorp/terraform/issues/11125)) - * provider/google: Fix backwards incompatibility around create_timeout in instances ([#10858](https://github.com/hashicorp/terraform/issues/10858)) - * provider/google: google_compute_instance_group_manager update_strategy not properly read ([#10174](https://github.com/hashicorp/terraform/issues/10174)) - * provider/openstack: Handle `PENDING_UPDATE` status with LBaaS v2 members ([#10875](https://github.com/hashicorp/terraform/issues/10875)) - * provider/rancher: Add 'finishing-upgrade' state to rancher stack ([#11019](https://github.com/hashicorp/terraform/issues/11019)) - - -## 0.8.2 (December 21, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * `aws_lambda_function` Please note that `runtime` is now a required field as AWS have deprecated the use of nodejs 0.10 in lambda functions ([#9724](https://github.com/hashicorp/terraform/issues/9724)) - -FEATURES: - - * **New Provider:** `New Relic` ([#10317](https://github.com/hashicorp/terraform/issues/10317)) - * **New Resource:** `aws_ses_configuration_set` ([#10735](https://github.com/hashicorp/terraform/issues/10735)) - * **New Resource:** `aws_ses_event_destination` ([#10735](https://github.com/hashicorp/terraform/issues/10735)) - * **New Resource:** `azurerm_redis_cache` ([#10184](https://github.com/hashicorp/terraform/issues/10184)) - * **New Resource:** `ultradns_dirpool` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Resource:** `ultradns_probe_http` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Resource:** `ultradns_probe_ping` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Resource:** `ultradns_record` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Resource:** `ultradns_tcpool` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Data Source:** `aws_iam_account_alias` ([#10804](https://github.com/hashicorp/terraform/issues/10804)) - -IMPROVEMENTS: - - * provider/aws: Add support for BinaryMediaTypes and ContentHandling to AWS API Gateway ([#10776](https://github.com/hashicorp/terraform/issues/10776)) - * provider/aws: Deprecated aws_lambda_function nodejs runtime in favor of nodejs4.3 ([#9724](https://github.com/hashicorp/terraform/issues/9724)) - * provider/aws: Support updating of aws_db_instance db_subnet_group_name ([#10818](https://github.com/hashicorp/terraform/issues/10818)) - * provider/aws: Allow update to RDS password when restoring from snapshot ([#8622](https://github.com/hashicorp/terraform/issues/8622)) - * provider/azurerm: add support for tags to dns_zone ([#10750](https://github.com/hashicorp/terraform/issues/10750)) - * provider/pagerduty pagerduty_schedule - support for start_day_of_week (schedule restriction) ([#10069](https://github.com/hashicorp/terraform/issues/10069)) - * state/remote/swift: add support for token authentication ([#10866](https://github.com/hashicorp/terraform/issues/10866)) - -BUG FIXES: - - * core: Improve validation for provider aliases to allow inheritance in moduels. ([#10807](https://github.com/hashicorp/terraform/issues/10807)) - * core: Math operations always prefer floating point if an argument is floating point. ([#10886](https://github.com/hashicorp/terraform/issues/10886)) - * core: Strings are implicitly converted to integers/floats for comparison. ([#10886](https://github.com/hashicorp/terraform/issues/10886)) - * provider/aws: Fixed crash in `data_source_ami` with empty `owner` value ([#10763](https://github.com/hashicorp/terraform/issues/10763)) - * provider/aws: Require `master_username` and `master_password` if no snapshot given in Redshift Cluster ([#9837](https://github.com/hashicorp/terraform/issues/9837)) - * provider/azurerm: fix network_interface.ip_configuration hash for load balancers ([#10834](https://github.com/hashicorp/terraform/issues/10834)) - * provider/docker: Fix regression, 'cert_path' stop working ([#10801](https://github.com/hashicorp/terraform/issues/10801)) - * provider/google: Use node_version during google_container_cluster creation ([#10817](https://github.com/hashicorp/terraform/issues/10817)) - * provider/openstack: Handle Volume Creation Errors ([#10821](https://github.com/hashicorp/terraform/issues/10821)) - -## 0.8.1 (December 14, 2016) - -IMPROVEMENTS: - - * provider/aws: Support eu-west-2 ([#10470](https://github.com/hashicorp/terraform/issues/10470)) - * provider/aws: Improved the SNS topic subscription protocols validation ([#10704](https://github.com/hashicorp/terraform/issues/10704)) - * providers/google: Add subnetwork_project field to enable cross-project networking ([#9662](https://github.com/hashicorp/terraform/issues/9662)) - * provider/pagerduty: Allow 'team_responder' role for pagerduty_user resource ([#10728](https://github.com/hashicorp/terraform/issues/10728)) - -BUG FIXES: - - * core: Handle whitespace around the key in the `-var` flag. ([#10717](https://github.com/hashicorp/terraform/issues/10717)) - * core: `terraform` block works in the presence of `_override` files ([#10715](https://github.com/hashicorp/terraform/issues/10715)) - * core: Fix error when a provider in a module only referenced a variable ([#10719](https://github.com/hashicorp/terraform/issues/10719)) - * core: Destroy ordering for resources that depend on each other across modules is correct ([#745](https://github.com/hashicorp/terraform/issues/745)) - -DEPRECATION REMOVALS: - - * provider/aws: Removed deprecated `parameter_group` from `aws_rds_cluster` ([#10733](https://github.com/hashicorp/terraform/issues/10733)) - -## 0.8.0 (December 13, 2016) - -**This is the complete 0.7.13 to 0.8 CHANGELOG. Below this section we -also have a 0.8.0-rc3 to 0.8.0 final CHANGELOG.** - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * `template_file` _inline_ templates must escape their variable usage. What - was previously `${foo}` must now be `$${foo}`. Note that this is only - for _inline_ templates. Templates read from files are unchanged. ([#9698](https://github.com/hashicorp/terraform/issues/9698)) - * Escape sequences used to require double-escaping when used within interpolations. - You now must only escape once (which is the expected/typical behavior). - For example: `${replace(var.foo, "\\", "\\\\")}` is correct. Before, - that would cause very strange behavior. However, this may break existing - configurations which found a level of escape sequences to work. Check - `terraform plan` for incorrect output. - * Math operators now follow the standard order of operations: *, /, % followed - by +, -. See the updated interpolation docs for more information. You can - continue to force ordering with parentheses. - * Strings in configuration can no longer contain unescaped newlines. For - unescaped newlines, heredocs must be used - - * provider/aws: Anywhere where we can specify kms_key_id must now be a valid KMS Key ID ARN to stop continual diffs - * provider/chef: The chef provider now accepts `key_material` as an alternative to - `private_key_pem`. The `private_key_pem` attribute will be deprecated in a - future release - * provider/postgres: `ssl_mode` has been renamed `sslmode` to match common usage ([#10682](https://github.com/hashicorp/terraform/issues/10682)) - -DEPRECATION REMOVALS: - - * The `template_file` resource no longer accepts a direct file path for the - `template` attribute. You may either specify a path wrapped in a `file` - function or specify a file path with the `filepath` attribute. This was - deprecated during 0.7.x. - -FEATURES: - - * **New command:** `terraform console`, an interactive console for experimenting - with and using interpolations. ([#10093](https://github.com/hashicorp/terraform/issues/10093)) - * **Terraform version requirement in configuration.** You can now specify - a Terraform version requirement in configuration and modules. ([#10080](https://github.com/hashicorp/terraform/issues/10080)) - * **Conditional values:** You can now use conditionals to determine the values - of attributes. For example: `count = "${var.env == "prod" ? 1 : 0}"`. - * **`depends_on` can reference modules.** This allows a resource or output - to depend on everything within a module. ([#10076](https://github.com/hashicorp/terraform/issues/10076)) - * **`output` supports `depends_on`.** This is useful when the output depends - on a certain ordering to happen that can't be represented with interpolations. - ([#10072](https://github.com/hashicorp/terraform/issues/10072)) - * Providers and resources are now notified by Terraform core to "stop" when - an interrupt is received, allowing resources to gracefully exit much, much - faster. ([#9607](https://github.com/hashicorp/terraform/issues/9607)) - * The `import` command can now specify a provider alias to use. ([#10310](https://github.com/hashicorp/terraform/issues/10310)) - * The `import` command will now read provider configuration from Terraform - configuration files (including loading tfvars files and so on). - ([#9809](https://github.com/hashicorp/terraform/issues/9809)) - - * **New Provider:** `external` ([#8768](https://github.com/hashicorp/terraform/issues/8768)) - * **New Provider:** `nomad` ([#9538](https://github.com/hashicorp/terraform/issues/9538)) - * **New Provider:** `rancher` ([#9173](https://github.com/hashicorp/terraform/issues/9173)) - * **New Provider:** `vault` ([#9158](https://github.com/hashicorp/terraform/issues/9158)) - * **New Provider:** `Icinga2` ([#8306](https://github.com/hashicorp/terraform/issues/8306)) - * **New Resource:** `aws_ebs_snapshot` ([#10017](https://github.com/hashicorp/terraform/issues/10017)) - * **New Resource:** `aws_lightsail_domain` ([#10637](https://github.com/hashicorp/terraform/issues/10637)) - * **New Resource:** `aws_lightsail_key_pair` ([#10583](https://github.com/hashicorp/terraform/issues/10583)) - * **New Resource:** `aws_lightsail_instance` ([#10473](https://github.com/hashicorp/terraform/issues/10473)) - * **New Resource:** `aws_opsworks_rds_db_instance` ([#10294](https://github.com/hashicorp/terraform/issues/10294)) - * **New Resource:** `aws_snapshot_create_volume_permission` ([#9891](https://github.com/hashicorp/terraform/issues/9891)) - * **New Resource:** `aws_vpc_endpoint_route_table_association` ([#10137](https://github.com/hashicorp/terraform/issues/10137)) - * **New Resource:** `google_compute_health_check` ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * **New Resource:** `google_compute_region_backend_service` ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * **New Resource:** `openstack_blockstorage_volume_attach_v2` ([#10259](https://github.com/hashicorp/terraform/issues/10259)) - * **New Resource:** `openstack_compute_volume_attach_v2` ([#10260](https://github.com/hashicorp/terraform/issues/10260)) - * **New Data Source:** `aws_ebs_snapshot` ([#10017](https://github.com/hashicorp/terraform/issues/10017)) - * **New Data Source:** `aws_eip` ([#9833](https://github.com/hashicorp/terraform/issues/9833)) - * **New Data Source:** `aws_iam_server_certificate` ([#10558](https://github.com/hashicorp/terraform/issues/10558)) - * **New Data Source:** `aws_route_table` ([#10301](https://github.com/hashicorp/terraform/issues/10301)) - * **New Data Source:** `aws_route53_zone` ([#9766](https://github.com/hashicorp/terraform/issues/9766)) - * **New Data Source:** `aws_vpc_endpoint_services` ([#10261](https://github.com/hashicorp/terraform/issues/10261)) - * **New Data Source:** `pagerduty_user` ([#10541](https://github.com/hashicorp/terraform/issues/10541)) - * **New Interpolation Function:** `timestamp` ([#10475](https://github.com/hashicorp/terraform/issues/10475)) - * core: allow outputs to have descriptions ([#9722](https://github.com/hashicorp/terraform/issues/9722)) - * state/azure: support passing of lease ID when writing storage blob ([#10115](https://github.com/hashicorp/terraform/issues/10115)) - -IMPROVEMENTS: - - * core: Human-friendly error when a computed count is used. ([#10060](https://github.com/hashicorp/terraform/issues/10060)) - * core: Maps across multiple input sources (files, CLI, env vars) are merged. ([#10654](https://github.com/hashicorp/terraform/issues/10654)) - * core: SIGTERM also triggers graceful shutdown in addition to SIGINT ([#10534](https://github.com/hashicorp/terraform/issues/10534)) - * core: Plan will show deposed-only destroys for create-before-destroy resources. ([#10404](https://github.com/hashicorp/terraform/issues/10404)) - * command/plan: Show warning when a plan file is given as input to make behavior clear. ([#10639](https://github.com/hashicorp/terraform/issues/10639)) - * helper/schema: only map, list, and set elements that are actually causing - a resource to destroy/create are marked as "requires new". ([#9613](https://github.com/hashicorp/terraform/issues/9613)) - * provider/aws: Add support for AWS CA Central 1 Region ([#10618](https://github.com/hashicorp/terraform/issues/10618)) - * provider/aws: Allow importing of aws_iam_role, aws_iam_role_policy and aws_iam_policy ([#9398](https://github.com/hashicorp/terraform/issues/9398)) - * provider/aws: Added s3 bucket region attribute management ([#10482](https://github.com/hashicorp/terraform/issues/10482)) - * provider/aws: Added SQS FIFO queues ([#10614](https://github.com/hashicorp/terraform/issues/10614)) - * provider/aws: Addition of suspended_processes to aws_autoscaling_group ([#10096](https://github.com/hashicorp/terraform/issues/10096)) - * provider/aws: added auto_minor_version_upgrade on aws_rds_cluster_insstance ([#10284](https://github.com/hashicorp/terraform/issues/10284)) - * provider/aws: Add JSON validation to the aws_iam_policy resource ([#10239](https://github.com/hashicorp/terraform/issues/10239)) - * provider/aws: Support MFA delete for s3 bucket versioning ([#10020](https://github.com/hashicorp/terraform/issues/10020)) - * provider/aws: Enable DeleteOnTermination in ENI when created by spot fleet ([#9922](https://github.com/hashicorp/terraform/issues/9922)) - * provider/aws: Enforced kms_key_* attributes to be ARNs ([#10356](https://github.com/hashicorp/terraform/issues/10356)) - * provider/aws: IPv6 Support To Cloudfront ([#10332](https://github.com/hashicorp/terraform/issues/10332)) - * provider/aws: Support import of aws_iam_instance_profile ([#10436](https://github.com/hashicorp/terraform/issues/10436)) - * provider/aws: Increase `aws_emr_cluster` timeout ([#10444](https://github.com/hashicorp/terraform/issues/10444)) - * provider/aws: Support Automatic Rollback of CodeDeploy deployments and CloudWatch Alarms for a Deployment Group ([#9039](https://github.com/hashicorp/terraform/issues/9039)) - * provider/aws: Add support for termination protection and autotermination to EMR ([#10252](https://github.com/hashicorp/terraform/issues/10252)) - * provider/aws: Add "no_device" support to ephemeral block devices ([#10547](https://github.com/hashicorp/terraform/issues/10547)) - * provider/aws: Added S3 Bucket replication ([#10552](https://github.com/hashicorp/terraform/issues/10552)) - * provider/aws: Add `pgp_key` to `aws_iam_access_key` to protect key. ([#10615](https://github.com/hashicorp/terraform/issues/10615)) - * provider/azurerm: make DiskSizeGB optional for azurerm_virtual_machine data_disks ([#10232](https://github.com/hashicorp/terraform/issues/10232)) - * provider/azurerm support `license_type` virtual_machine property ([#10539](https://github.com/hashicorp/terraform/issues/10539)) - * provider/azurerm: support import of routes, fix route_table ([#10389](https://github.com/hashicorp/terraform/issues/10389)) - * provider/azurerm: enable import of more resources ([#10195](https://github.com/hashicorp/terraform/issues/10195)) - * provider/azurerm: create common schema for location field, add diff suppress ([#10409](https://github.com/hashicorp/terraform/issues/10409)) - * provider/chef: Migrate Chef to use KEY_MATERIAL rather than using a Pem file ([#10105](https://github.com/hashicorp/terraform/issues/10105)) - * provider/cloudstack: Add option to set a custom `network_domain` for `cloudstack_network` ([#10638](https://github.com/hashicorp/terraform/issues/10638)) - * provider/cloudstack: Support using secondary IP addresses with the `cloudstack_static_nat` resource ([#10420](https://github.com/hashicorp/terraform/issues/10420)) - * provider/cloudstack: Support using secondary IP addresses with the `cloudstack_port_forward` resource ([#10638](https://github.com/hashicorp/terraform/issues/10638)) - * provider/datadog: Make monitor thresholds optional. ([#10526](https://github.com/hashicorp/terraform/issues/10526)) - * provider/datadog: Improve datadog timeboard support ([#10027](https://github.com/hashicorp/terraform/issues/10027)) - * provider/docker: Upload files into container before first start ([#9520](https://github.com/hashicorp/terraform/issues/9520)) - * provider/docker: authentication via values instead of files ([#10151](https://github.com/hashicorp/terraform/issues/10151)) - * provider/fastly add origin shielding ([#10677](https://github.com/hashicorp/terraform/issues/10677)) - * provider/fastly: add ssl_hostname option ([#9629](https://github.com/hashicorp/terraform/issues/9629)) - * provider/github: supports importing resources ([#10382](https://github.com/hashicorp/terraform/issues/10382)) - * provider/google: Add support for Internal Load Balancing ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * provider/google: Add Service Accounts resource ([#9946](https://github.com/hashicorp/terraform/issues/9946)) - * provider/google: Instances and templates now both support `metadata_startup_script` and `metadata.startup-script`. ([#10537](https://github.com/hashicorp/terraform/issues/10537)) - * provider/google: Added support for session affinity to compute_backend_service ([#10387](https://github.com/hashicorp/terraform/issues/10387)) - * provider/google: Projects are now importable ([#10469](https://github.com/hashicorp/terraform/issues/10469)) - * provider/google: SSL certificates can now specify prefix instead of a full name ([#10684](https://github.com/hashicorp/terraform/issues/10684)) - * provider/openstack: Add Swauth/Swift Authentication ([#9943](https://github.com/hashicorp/terraform/issues/9943)) - * provider/openstack: Detect Region for Importing Resources ([#10509](https://github.com/hashicorp/terraform/issues/10509)) - * provider/postgresql: Improved support for many PostgreSQL resources ([#10682](https://github.com/hashicorp/terraform/issues/10682)) - * provider/postgresql: Added 'connect_timeout' argument to provider 'postgresql' ([#10380](https://github.com/hashicorp/terraform/issues/10380)) - * provider/rundeck: enable validation for multiple values in an array ([#8913](https://github.com/hashicorp/terraform/issues/8913)) - * provider/rundeck: Add support for scheduler to rundeck_job ([#9449](https://github.com/hashicorp/terraform/issues/9449)) - * state/remote/swift: Add support for versioning state file in swift and expiring versioned state ([#10055](https://github.com/hashicorp/terraform/issues/10055)) - -BUG FIXES: - - * core: Escape sequences in interpolations work in every case. ([#8709](https://github.com/hashicorp/terraform/issues/8709)) - * core: Maps in outputs with computed values are no longer removed. ([#9549](https://github.com/hashicorp/terraform/issues/9549)) - * core: Direct indexing into a computed list no longer errors. ([#10657](https://github.com/hashicorp/terraform/issues/10657)) - * core: Validate fails on invalid keys in `variable` blocks. ([#10658](https://github.com/hashicorp/terraform/issues/10658)) - * core: Validate that only a single `lifecycle` block exists per rource. ([#10656](https://github.com/hashicorp/terraform/issues/10656)) - * core: When destroying, the resources of a provider that depends on another resource are destroyed first. ([#10659](https://github.com/hashicorp/terraform/issues/10659)) - * core: Catch parse errors for null characters mid-file ([#9134](https://github.com/hashicorp/terraform/issues/9134)) - * core: Remove extra dot from state command backup files ([#10300](https://github.com/hashicorp/terraform/issues/10300)) - * core: Validate data sources do not have provisioners ([#10318](https://github.com/hashicorp/terraform/issues/10318)) - * core: Disable checkpoint settings take effect ([#10206](https://github.com/hashicorp/terraform/issues/10206)) - * core: Changed attribute console output shows up on Windows. ([#10417](https://github.com/hashicorp/terraform/issues/10417)) - * core: Destroying deposed resources in create before destroy waits until the creation step of its specific index. (0.8 regression) ([#10416](https://github.com/hashicorp/terraform/issues/10416)) - * core: Certain invalid configurations will no longer print "illegal". ([#10448](https://github.com/hashicorp/terraform/issues/10448)) - * core: Fix a crash that could occur when multiple deposed instances exist. ([#10504](https://github.com/hashicorp/terraform/issues/10504)) - * core: Fix a diff mismatch error that could happen when a resource depends on a count resource being decreased. ([#10522](https://github.com/hashicorp/terraform/issues/10522)) - * core: On Unix machines if `getent` is not available, fall back to shell to find home dir. ([#10515](https://github.com/hashicorp/terraform/issues/10515)) - * command/fmt: Multiline comments aren't indented every fmt. ([#6524](https://github.com/hashicorp/terraform/issues/6524)) - * communicator/ssh: Avoid race that could cause parallel remote execs on the same host to overwrite each other ([#10549](https://github.com/hashicorp/terraform/issues/10549)) - * provider/aws: Added Lambda function guard when needed attributes are not set ([#10663](https://github.com/hashicorp/terraform/issues/10663)) - * provider/aws: Allow import of aws_security_groups with more than one source_security_group_id rule ([#9477](https://github.com/hashicorp/terraform/issues/9477)) - * provider/aws: Allow setting the DB Instance name when restoring from a snapshot ([#10664](https://github.com/hashicorp/terraform/issues/10664)) - * provider/aws: Fix issue importing `aws_vpc_peering_connection` ([#10635](https://github.com/hashicorp/terraform/issues/10635)) - * provider/aws: Fixed deletion of aws_api_gateway_base_path_mapping with empty path ([#10177](https://github.com/hashicorp/terraform/issues/10177)) - * provider/aws: Fix issue removing Lambda environment variables ([#10492](https://github.com/hashicorp/terraform/issues/10492)) - * provider/aws: Skip VPC endpoint routes when removing default route table's routes ([#10303](https://github.com/hashicorp/terraform/issues/10303)) - * provider/aws: Do not return a root device for instance store backed AMIs. ([#9483](https://github.com/hashicorp/terraform/issues/9483)) - * provider/aws: resource_aws_opsworks_application does not accept document_root parameter ([#10477](https://github.com/hashicorp/terraform/issues/10477)) - * provider/aws: bug fix when specifying level on aws_opsworks_permission ([#10394](https://github.com/hashicorp/terraform/issues/10394)) - * provider/aws: cloudfront distribution 404 should mark as gone ([#10281](https://github.com/hashicorp/terraform/issues/10281)) - * provider/aws: Assign correct number of core instances (n-1) to aws-emr-cluster on update ([#10529](https://github.com/hashicorp/terraform/issues/10529)) - * provider/aws: Allow update of Service role on a CodeDeploy deployment group ([#9866](https://github.com/hashicorp/terraform/issues/9866)) - * provider/aws: fixed the api_gw_domain_name replace operation ([#10179](https://github.com/hashicorp/terraform/issues/10179)) - * provider/aws: Forces the API GW domain name certificates to recreate the resource ([#10588](https://github.com/hashicorp/terraform/issues/10588)) - * provider/aws: Validate `effect` in aws_iam_policy_document data source ([#10021](https://github.com/hashicorp/terraform/issues/10021)) - * provider/azurerm: fix virtual_machine reading plan as the wrong type ([#10626](https://github.com/hashicorp/terraform/issues/10626)) - * provider/azurerm: Prevent null reference when reading boot_diagnostics settings in azurerm_virtual_machine ([#10283](https://github.com/hashicorp/terraform/issues/10283)) - * provider/azurerm: azurerm_availability_set not is ForceNew for UpdateDomain and FaultDomain ([#10545](https://github.com/hashicorp/terraform/issues/10545)) - * provider/azurerm: fix servicebus_topic max_size_in_megabytes for premium namespaces ([#10611](https://github.com/hashicorp/terraform/issues/10611)) - * provider/azurerm: set ForceNew for storage image and OS disk of virtual_machine ([#10340](https://github.com/hashicorp/terraform/issues/10340)) - * provider/datadog: Refactor monitor tags to a list instead of a map. ([#10570](https://github.com/hashicorp/terraform/issues/10570)) - * provider/datadog 9869: Validate credentials when initialising client. ([#10567](https://github.com/hashicorp/terraform/issues/10567)) - * provider/openstack: More Import and Region Fixes ([#10662](https://github.com/hashicorp/terraform/issues/10662)) - * provider/openstack: Fix Ordering of Port Allowed Address Pairs ([#10250](https://github.com/hashicorp/terraform/issues/10250)) - * provider/template: No file path error when setting template to `/` ([#10297](https://github.com/hashicorp/terraform/issues/10297)) - -## 0.8.0 from 0.8.0-rc3 (December 13, 2016) - -**This only includes changes from 0.8.0-rc3 to 0.8.0 final. The section above -has the complete 0.7.x to 0.8.0 CHANGELOG.** - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/postgres: `ssl_mode` has been renamed `sslmode` to match common usage ([#10682](https://github.com/hashicorp/terraform/issues/10682)) - -FEATURES: - - * **New Provider:** `Icinga2` ([#8306](https://github.com/hashicorp/terraform/issues/8306)) - * **New Resource:** `aws_lightsail_domain` ([#10637](https://github.com/hashicorp/terraform/issues/10637)) - * **New Resource:** `aws_lightsail_key_pair` ([#10583](https://github.com/hashicorp/terraform/issues/10583)) - * **New Resource:** `aws_snapshot_create_volume_permission` ([#9891](https://github.com/hashicorp/terraform/issues/9891)) - * **New Resource:** `google_compute_health_check` ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * **New Resource:** `google_compute_region_backend_service` ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * **New Data Source:** `aws_eip` ([#9833](https://github.com/hashicorp/terraform/issues/9833)) - * **New Data Source:** `aws_route53_zone` ([#9766](https://github.com/hashicorp/terraform/issues/9766)) - * **New Data Source:** `aws_vpc_endpoint_services` ([#10261](https://github.com/hashicorp/terraform/issues/10261)) - -IMPROVEMENTS: - - * command/plan: Show warning when a plan file is given as input to make behavior clear. ([#10639](https://github.com/hashicorp/terraform/issues/10639)) - * core: Maps across multiple input sources (files, CLI, env vars) are merged. ([#10654](https://github.com/hashicorp/terraform/issues/10654)) - * provider/aws: Add support for AWS CA Central 1 Region ([#10618](https://github.com/hashicorp/terraform/issues/10618)) - * provider/aws: Added SQS FIFO queues ([#10614](https://github.com/hashicorp/terraform/issues/10614)) - * provider/aws: Support MFA delete for s3 bucket versioning ([#10020](https://github.com/hashicorp/terraform/issues/10020)) - * provider/aws: Enable DeleteOnTermination in ENI when created by spot fleet ([#9922](https://github.com/hashicorp/terraform/issues/9922)) - * provider/cloudstack: Add option to set a custom `network_domain` for `cloudstack_network` ([#10638](https://github.com/hashicorp/terraform/issues/10638)) - * provider/cloudstack: Support using secondary IP addresses with the `cloudstack_port_forward` resource ([#10638](https://github.com/hashicorp/terraform/issues/10638)) - * provider/fastly add origin shielding ([#10677](https://github.com/hashicorp/terraform/issues/10677)) - * provider/google: Add support for Internal Load Balancing ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * provider/google: SSL certificates can now specify prefix instead of a full name ([#10684](https://github.com/hashicorp/terraform/issues/10684)) - * provider/postgresql: Improved support for many PostgreSQL resources ([#10682](https://github.com/hashicorp/terraform/issues/10682)) - * provider/rundeck: enable validation for multiple values in an array ([#8913](https://github.com/hashicorp/terraform/issues/8913)) - * provider/rundeck: Add support for scheduler to rundeck_job ([#9449](https://github.com/hashicorp/terraform/issues/9449)) - -BUG FIXES: - - * core: Direct indexing into a computed list no longer errors. ([#10657](https://github.com/hashicorp/terraform/issues/10657)) - * core: Validate fails on invalid keys in `variable` blocks. ([#10658](https://github.com/hashicorp/terraform/issues/10658)) - * core: Validate that only a single `lifecycle` block exists per rource. ([#10656](https://github.com/hashicorp/terraform/issues/10656)) - * core: When destroying, the resources of a provider that depends on another resource are destroyed first. ([#10659](https://github.com/hashicorp/terraform/issues/10659)) - * provider/aws: Added Lambda function guard when needed attributes are not set ([#10663](https://github.com/hashicorp/terraform/issues/10663)) - * provider/aws: Allow import of aws_security_groups with more than one source_security_group_id rule ([#9477](https://github.com/hashicorp/terraform/issues/9477)) - * provider/aws: Allow setting the DB Instance name when restoring from a snapshot ([#10664](https://github.com/hashicorp/terraform/issues/10664)) - * provider/aws: Fix issue importing `aws_vpc_peering_connection` ([#10635](https://github.com/hashicorp/terraform/issues/10635)) - * provider/aws: Fixed deletion of aws_api_gateway_base_path_mapping with empty path ([#10177](https://github.com/hashicorp/terraform/issues/10177)) - * provider/aws: Fix issue removing Lambda environment variables ([#10492](https://github.com/hashicorp/terraform/issues/10492)) - * provider/azurerm: fix virtual_machine reading plan as the wrong type ([#10626](https://github.com/hashicorp/terraform/issues/10626)) - * provider/azurerm: set ForceNew for storage image and OS disk of virtual_machine ([#10340](https://github.com/hashicorp/terraform/issues/10340)) - * provider/openstack: More Import and Region Fixes ([#10662](https://github.com/hashicorp/terraform/issues/10662)) - -## 0.8.0-rc3 (December 8, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * Variable, resource, provider, and module names may no longer start with - a number or hyphen. Please see the upgrade guide for more information. - -FEATURES: - - * **New Provider:** `external` ([#8768](https://github.com/hashicorp/terraform/issues/8768)) - * **New Provider:** `Rancher` ([#9173](https://github.com/hashicorp/terraform/issues/9173)) - * **New Data Source:** `aws_iam_server_certificate` ([#10558](https://github.com/hashicorp/terraform/issues/10558)) - * **New Data Source:** `pagerduty_user` ([#10541](https://github.com/hashicorp/terraform/issues/10541)) - * **New Resource:** `aws_opsworks_rds_db_instance` ([#10294](https://github.com/hashicorp/terraform/issues/10294)) - * **New Resource:** `aws_vpc_endpoint_route_table_association` ([#10137](https://github.com/hashicorp/terraform/issues/10137)) -  * **New Resource:**  `aws_lightsail_instance` ([#10473](https://github.com/hashicorp/terraform/issues/10473)) -IMPROVEMENTS: - - * core: SIGTERM also triggers graceful shutdown in addition to SIGINT ([#10534](https://github.com/hashicorp/terraform/issues/10534)) - * provider/aws: Add support for termination protection and autotermination to EMR ([#10252](https://github.com/hashicorp/terraform/issues/10252)) - * provider/aws: Add "no_device" support to ephemeral block devices ([#10547](https://github.com/hashicorp/terraform/issues/10547)) - * provider/aws: Added S3 Bucket replication ([#10552](https://github.com/hashicorp/terraform/issues/10552)) - * provider/aws: Add `pgp_key` to `aws_iam_access_key` to protect key. ([#10615](https://github.com/hashicorp/terraform/issues/10615)) - * provider/azurerm: make DiskSizeGB optional for azurerm_virtual_machine data_disks ([#10232](https://github.com/hashicorp/terraform/issues/10232)) - * provider/azurerm support `license_type` virtual_machine property ([#10539](https://github.com/hashicorp/terraform/issues/10539)) - * provider/datadog: Make monitor thresholds optional. ([#10526](https://github.com/hashicorp/terraform/issues/10526)) - * provider/datadog: Improve datadog timeboard support ([#10027](https://github.com/hashicorp/terraform/issues/10027)) - * provider/docker: Upload files into container before first start ([#9520](https://github.com/hashicorp/terraform/issues/9520)) - * provider/fastly: add ssl_hostname option ([#9629](https://github.com/hashicorp/terraform/issues/9629)) - * provider/openstack: Detect Region for Importing Resources ([#10509](https://github.com/hashicorp/terraform/issues/10509)) - * provider/google: Instances and templates now both support `metadata_startup_script` and `metadata.startup-script`. ([#10537](https://github.com/hashicorp/terraform/issues/10537)) - -BUG FIXES: - - * core: Fix a diff mismatch error that could happen when a resource depends on a count resource being decreased. ([#10522](https://github.com/hashicorp/terraform/issues/10522)) - * core: On Unix machines if `getent` is not available, fall back to shell to find home dir. ([#10515](https://github.com/hashicorp/terraform/issues/10515)) - * communicator/ssh: Avoid race that could cause parallel remote execs on the same host to overwrite each other ([#10549](https://github.com/hashicorp/terraform/issues/10549)) - * provider/aws: cloudfront distribution 404 should mark as gone ([#10281](https://github.com/hashicorp/terraform/issues/10281)) - * provider/aws: Assign correct number of core instances (n-1) to aws-emr-cluster on update ([#10529](https://github.com/hashicorp/terraform/issues/10529)) - * provider/aws: Allow update of Service role on a CodeDeploy deployment group ([#9866](https://github.com/hashicorp/terraform/issues/9866)) - * provider/aws: fixed the api_gw_domain_name replace operation ([#10179](https://github.com/hashicorp/terraform/issues/10179)) - * provider/aws: Forces the API GW domain name certificates to recreate the resource ([#10588](https://github.com/hashicorp/terraform/issues/10588)) - * provider/aws: Validate `effect` in aws_iam_policy_document data source ([#10021](https://github.com/hashicorp/terraform/issues/10021)) - * provider/azurem: azurerm_availability_set not is ForceNew for UpdateDomain and FaultDomain ([#10545](https://github.com/hashicorp/terraform/issues/10545)) - * provider/azurerm: fix servicebus_topic max_size_in_megabytes for premium namespaces ([#10611](https://github.com/hashicorp/terraform/issues/10611)) - * provider/datadog: Refactor monitor tags to a list instead of a map. ([#10570](https://github.com/hashicorp/terraform/issues/10570)) - * provider/datadog 9869: Validate credentials when initialising client. ([#10567](https://github.com/hashicorp/terraform/issues/10567)) - * provider/openstack: Fix Ordering of Port Allowed Address Pairs ([#10250](https://github.com/hashicorp/terraform/issues/10250)) - -## 0.8.0-rc2 (December 2, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * Strings in configuration can no longer contain unescaped newlines. For unescaped newlines, heredocs must be used - * provider/aws: Anywhere where we can specify kms_key_id must now be a valid KMS Key ID ARN to stop continual diffs - -FEATURES: - - * **New DataSource:** `aws_route_table` ([#10301](https://github.com/hashicorp/terraform/issues/10301)) - * **New Interpolation Function:** `timestamp` ([#10475](https://github.com/hashicorp/terraform/issues/10475)) - -IMPROVEMENTS: - - * core: Plan will show deposed-only destroys for create-before-destroy resources. ([#10404](https://github.com/hashicorp/terraform/issues/10404)) - * provider/aws: Enforced kms_key_* attributes to be ARNs ([#10356](https://github.com/hashicorp/terraform/issues/10356)) - * provider/aws: IPv6 Support To Cloudfront ([#10332](https://github.com/hashicorp/terraform/issues/10332)) - * provider/aws: Support import of aws_iam_instance_profile ([#10436](https://github.com/hashicorp/terraform/issues/10436)) - * provider/aws: Increase `aws_emr_cluster` timeout ([#10444](https://github.com/hashicorp/terraform/issues/10444)) - * provider/aws: Support Automatic Rollback of CodeDeploy deployments and CloudWatch Alarms for a Deployment Group ([#9039](https://github.com/hashicorp/terraform/issues/9039)) - * provider/aws: Allow importing of aws_iam_role, aws_iam_role_policy and aws_iam_policy ([#9398](https://github.com/hashicorp/terraform/issues/9398)) - * provider/aws: Added s3 bucket region attribute management ([#10482](https://github.com/hashicorp/terraform/issues/10482)) - * provider/azurerm: support import of routes, fix route_table ([#10389](https://github.com/hashicorp/terraform/issues/10389)) - * provider/azurerm: create common schema for location field, add diff suppress ([#10409](https://github.com/hashicorp/terraform/issues/10409)) - * provider/github: supports importing resources ([#10382](https://github.com/hashicorp/terraform/issues/10382)) - * provider/postgresql: Added 'connect_timeout' argument to provider 'postgresql' ([#10380](https://github.com/hashicorp/terraform/issues/10380)) - * provider/cloudstack: Support using secondary IP addresses with the `cloudstack_static_nat` resource ([#10420](https://github.com/hashicorp/terraform/issues/10420)) - * provider/google: Added support for session affinity to compute_backend_service ([#10387](https://github.com/hashicorp/terraform/issues/10387)) - * provider/google: Projects are now importable ([#10469](https://github.com/hashicorp/terraform/issues/10469)) - -BUG FIXES: - - * core: Changed attribute console output shows up on Windows. ([#10417](https://github.com/hashicorp/terraform/issues/10417)) - * core: Destroying deposed resources in create before destroy waits until the creation step of its specific index. (0.8 regression) ([#10416](https://github.com/hashicorp/terraform/issues/10416)) - * core: Certain invalid configurations will no longer print "illegal". ([#10448](https://github.com/hashicorp/terraform/issues/10448)) - * core: Fix a crash that could occur when multiple deposed instances exist. ([#10504](https://github.com/hashicorp/terraform/issues/10504)) - * command/console: variable access works ([#10446](https://github.com/hashicorp/terraform/issues/10446)) - * provider/aws: Do not return a root device for instance store backed AMIs. ([#9483](https://github.com/hashicorp/terraform/issues/9483)) - * provider/aws: resource_aws_opsworks_application does not accept document_root parameter ([#10477](https://github.com/hashicorp/terraform/issues/10477)) - * provider/aws: bug fix when specifying level on aws_opsworks_permission ([#10394](https://github.com/hashicorp/terraform/issues/10394)) - -## 0.8.0-rc1 (November 23, 2016) - -BASED ON: 0.7.13 (includes any changes up to that point as well) - -**Please read prior beta notes, as those are also included. The 0.8 changes -will be coalesced for a 0.8 final, but will remain separate for the pre-release -period.** - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * The chef provider now accepts `key_material` as an alternative to `private_key_pem`. The `private_key_pem` attribute will be deprecated in a future release - * The `template_file` resource no longer accepts a direct file path for the `template` attribute. You may either specify a path wrapped in a `file` function or specify a file path with the `filepath` attribute. This was deprecated during 0.7.x. - -FEATURES: - * core: allow outputs to have descriptions ([#9722](https://github.com/hashicorp/terraform/issues/9722)) - * state/azure: support passing of lease ID when writing storage blob ([#10115](https://github.com/hashicorp/terraform/issues/10115)) - * **New Resource:** `aws_ebs_snapshot` ([#10017](https://github.com/hashicorp/terraform/issues/10017)) - * **New Resource:** `openstack_blockstorage_volume_attach_v2` ([#10259](https://github.com/hashicorp/terraform/issues/10259)) - * **New Resource:** `openstack_compute_volume_attach_v2` ([#10260](https://github.com/hashicorp/terraform/issues/10260)) - * **New Data Source:** `aws_ebs_snapshot` ([#10017](https://github.com/hashicorp/terraform/issues/10017)) - * The `import` command can now specify a provider alias to use. ([#10310](https://github.com/hashicorp/terraform/issues/10310)) - -IMPROVEMENTS: - - * provider/aws: Addition of suspended_processes to aws_autoscaling_group ([#10096](https://github.com/hashicorp/terraform/issues/10096)) - * provider/aws: added auto_minor_version_upgrade on aws_rds_cluster_insstance ([#10284](https://github.com/hashicorp/terraform/issues/10284)) - * provider/aws: Add JSON validation to the aws_iam_policy resource ([#10239](https://github.com/hashicorp/terraform/issues/10239)) - * provider/azurerm: enable import of more resources ([#10195](https://github.com/hashicorp/terraform/issues/10195)) - * provider/chef: Migrate Chef to use KEY_MATERIAL rather than using a Pem file ([#10105](https://github.com/hashicorp/terraform/issues/10105)) - * provider/docker: authentication via values instead of files ([#10151](https://github.com/hashicorp/terraform/issues/10151)) - * provider/google: Add Service Accounts resource ([#9946](https://github.com/hashicorp/terraform/issues/9946)) - * provider/nomad: Update to support Nomad 0.5.0 - * provider/openstack: Add Swauth/Swift Authentication ([#9943](https://github.com/hashicorp/terraform/issues/9943)) - * state/remote/swift: Add support for versioning state file in swift and expiring versioned state ([#10055](https://github.com/hashicorp/terraform/issues/10055)) - -BUG FIXES: - - * core: Catch parse errors for null characters mid-file ([#9134](https://github.com/hashicorp/terraform/issues/9134)) - * core: escape sequence for " works (0.8 beta regression) ([#10236](https://github.com/hashicorp/terraform/issues/10236)) - * core: Terraform starts on Windows (0.8 beta2 regression) ([#10266](https://github.com/hashicorp/terraform/issues/10266)) - * core: Remove extra dot from state command backup files ([#10300](https://github.com/hashicorp/terraform/issues/10300)) - * core: Validate data sources do not have provisioners ([#10318](https://github.com/hashicorp/terraform/issues/10318)) - * core: Disable checkpoint settings take effect ([#10206](https://github.com/hashicorp/terraform/issues/10206)) - * provider/aws: Skip VPC endpoint routes when removing default route table's routes ([#10303](https://github.com/hashicorp/terraform/issues/10303)) - * provider/azurerm: Prevent null reference when reading boot_diagnostics settings in azurerm_virtual_machine ([#10283](https://github.com/hashicorp/terraform/issues/10283)) - * provider/template: No file path error when setting template to `/` ([#10297](https://github.com/hashicorp/terraform/issues/10297)) - -PLUGIN CHANGES: - - * The protocol version has been incremented, requiring all plugins for - 0.8 to be built with 0.8 sources (or newer). This should only require - a simple recompile for compatibility. - -## 0.8.0-beta2 (November 16, 2016) - -BASED ON: 0.7.11 (includes any changes up to that point as well) - -**Please read prior beta notes, as those are also included. The 0.8 changes -will be coalesced for a 0.8 final, but will remain separate for the pre-release -period.** - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * Math operators now follow the standard order of operations: *, /, % followed - by +, -. See the updated interpolation docs for more information. You can - continue to force ordering with parentheses. - -FEATURES: - - * **New command:** `terraform console`, an interactive console for experimenting - with and using interpolations. ([#10093](https://github.com/hashicorp/terraform/issues/10093)) - * **Terraform version requirement in configuration.** You can now specify - a Terraform version requirement in configuration and modules. ([#10080](https://github.com/hashicorp/terraform/issues/10080)) - * **`depends_on` can reference modules.** This allows a resource or output - to depend on everything within a module. ([#10076](https://github.com/hashicorp/terraform/issues/10076)) - * **`output` supports `depends_on`.** This is useful when the output depends - on a certain ordering to happen that can't be represented with interpolations. - ([#10072](https://github.com/hashicorp/terraform/issues/10072)) - -## 0.8.0-beta1 (November 11, 2016) - -BASED ON: 0.7.10 (includes any changes up to that point as well) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * `template_file` _inline_ templates must escape their variable usage. What - was previously `${foo}` must now be `$${foo}`. Note that this is only - for _inline_ templates. Templates read from files are unchanged. ([#9698](https://github.com/hashicorp/terraform/issues/9698)) - * Escape sequences used to require double-escaping when used within interpolations. - You now must only escape once (which is the expected/typical behavior). - For example: `${replace(var.foo, "\\", "\\\\")}` is correct. Before, - that would cause very strange behavior. However, this may break existing - configurations which found a level of escape sequences to work. Check - `terraform plan` for incorrect output. - -FEATURES: - - * **New provider:** `nomad` ([#9538](https://github.com/hashicorp/terraform/issues/9538)) - * **New provider:** `vault` ([#9158](https://github.com/hashicorp/terraform/issues/9158)) - * The `import` command will now read provider configuration from Terraform - configuration files (including loading tfvars files and so on). ([#9809](https://github.com/hashicorp/terraform/issues/9809)) - * Providers and resources are now notified by Terraform core to "stop" when - an interrupt is received, allowing resources to gracefully exit much, much - faster. ([#9607](https://github.com/hashicorp/terraform/issues/9607)) - -IMPROVEMENTS: - - * core: Human-friendly error when a computed count is used. ([#10060](https://github.com/hashicorp/terraform/issues/10060)) - * helper/schema: only map, list, and set elements that are actually causing - a resource to destroy/create are marked as "requires new". ([#9613](https://github.com/hashicorp/terraform/issues/9613)) - -BUG FIXES: - - * core: Escape sequences in interpolations work in every case. ([#8709](https://github.com/hashicorp/terraform/issues/8709)) - * core: Maps in outputs with computed values are no longer removed. ([#9549](https://github.com/hashicorp/terraform/issues/9549)) - * command/fmt: Multiline comments aren't indented every fmt. ([#6524](https://github.com/hashicorp/terraform/issues/6524)) - -## 0.7.13 (November 23, 2016) - -BUG FIXES: - - * core: New graph records dependencies for explicit self references ([#10319](https://github.com/hashicorp/terraform/issues/10319)) - -## 0.7.12 (November 22, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/cloudstack: `cloudstack_static_nat` has now deprecated `network_id` ([#10204](https://github.com/hashicorp/terraform/issues/10204)) - -FEATURES: - - * *New Data Source:* `aws_alb_listener` ([#10181](https://github.com/hashicorp/terraform/issues/10181)) - * *New Resource:* `github_label` ([#10213](https://github.com/hashicorp/terraform/issues/10213)) - -IMPROVEMENTS: - - * core: Experimental feature failures are less verbose. ([#10276](https://github.com/hashicorp/terraform/issues/10276)) - * provider/aws: Add name_prefix to aws_iam_policy ([#10178](https://github.com/hashicorp/terraform/issues/10178)) - * provider/aws: Add ability to select aws_prefix_list data source by name ([#10248](https://github.com/hashicorp/terraform/issues/10248)) - * provider/aws Return service CIDR blocks from aws_vpc_endpoint resource ([#10254](https://github.com/hashicorp/terraform/issues/10254)) - * provider/aws: Added `environment` configuration for AWS Lambda Functions ([#10275](https://github.com/hashicorp/terraform/issues/10275)) - -BUG FIXES: - - * core: Fix potential crashing race condition on state write ([#10277](https://github.com/hashicorp/terraform/issues/10277)) - * core: Data sources in modules lose their `data.` prefix when moved within the state ([#9996](https://github.com/hashicorp/terraform/issues/9996)) - * provider/aws: Fixed issue with `enable_dns_support` on creation in `aws_vpc` ([#10171](https://github.com/hashicorp/terraform/issues/10171)) - * provider/aws: Add CertificateNotFound retry waiter to aws_alb_listener ([#10180](https://github.com/hashicorp/terraform/issues/10180)) - * provider/aws: Remove IAM user's MFA devices with `force_destroy` ([#10262](https://github.com/hashicorp/terraform/issues/10262)) - * provider/scaleway: improve volume attachment ([#10084](https://github.com/hashicorp/terraform/issues/10084)) - -## 0.7.11 (November 15, 2016) - -FEATURES: - -IMPROVEMENTS: - - * provider/aws: Expose RDS DB Instance HostedZoneId attribute ([#10000](https://github.com/hashicorp/terraform/issues/10000)) - * provider/aws: Ignore AWS internal tags ([#7454](https://github.com/hashicorp/terraform/issues/7454)) - * provider/aws: Exposed aws_iam_role create_date attribute ([#10091](https://github.com/hashicorp/terraform/issues/10091)) - * provider/aws: Added aws_api_gateway_api_key created_date & last_updated_date attributes ([#9530](https://github.com/hashicorp/terraform/issues/9530)) - * provider/aws: Added aws_api_gateway_rest_api created_date attribute ([#9532](https://github.com/hashicorp/terraform/issues/9532)) - * provider/aws: Exposed aws_api_gateway_deployment.created_date attribute ([#9534](https://github.com/hashicorp/terraform/issues/9534)) - * provider/aws: Added `retry_duration` to `redshift_configuration` in `kinesis_firehose_delivery_stream` ([#10113](https://github.com/hashicorp/terraform/issues/10113)) - * provider/azurerm: allow updating load balancer sub-resources ([#10016](https://github.com/hashicorp/terraform/issues/10016)) - * provider/openstack: Instance `user_data` will now detect if input is already Base64-encode ([#9966](https://github.com/hashicorp/terraform/issues/9966)) - -BUG FIXES: - - * core: Fix diff mismatch error on "Destroy: true to false" scenarios. ([#10139](https://github.com/hashicorp/terraform/issues/10139)) - * core: New destroy graph `-target` includes dependencies. ([#10036](https://github.com/hashicorp/terraform/issues/10036)) - * core: New destroy graph creates proper edges through module outputs ([#10068](https://github.com/hashicorp/terraform/issues/10068)) - * core: Fix shadow error when using uuid() ([#10106](https://github.com/hashicorp/terraform/issues/10106)) - * core: Fix an issue where applies with data sources could hang ([#10134](https://github.com/hashicorp/terraform/issues/10134)) - * core: Fix plan operation diff mismatch for computed keys in slices ([#10118](https://github.com/hashicorp/terraform/issues/10118)) - * provider/aws: fix the validation of aws_redshift_cluster database_name ([#10019](https://github.com/hashicorp/terraform/issues/10019)) - * provider/aws: Fix panic in aws_acm_certificate datasource ([#10051](https://github.com/hashicorp/terraform/issues/10051)) - * provider/aws: increase aws_lambda_function timeout ([#10116](https://github.com/hashicorp/terraform/issues/10116)) - * provider/aws: Fixed ES buffering_interval option in `kinesis_firehose_delivery_stream` ([#10112](https://github.com/hashicorp/terraform/issues/10112)) - -## 0.7.10 (November 9, 2016) - -FEATURES: - - * **New Resource:** `azurerm_eventhub` ([#9889](https://github.com/hashicorp/terraform/issues/9889)) - * **New Resource:** `azurerm_virtual_machine_extension` ([#9962](https://github.com/hashicorp/terraform/issues/9962)) - * **Experimental new plan graph:** `terraform plan` is getting a new graph - creation process for 0.8. This is now available behind a flag `-Xnew-apply` - (on any command). This will become the default in 0.8. There may still be - bugs. ([#9973](https://github.com/hashicorp/terraform/issues/9973)) - -IMPROVEMENTS: - - * provider/aws: Add support for Service Access Security Group in `aws_emr_cluster` ([#9600](https://github.com/hashicorp/terraform/issues/9600)) - * provider/aws: Add Enhanced VPC routing to Redshift ([#9950](https://github.com/hashicorp/terraform/issues/9950)) - * provider/aws: Add key_name_prefix argument to aws_key_pair resource ([#9993](https://github.com/hashicorp/terraform/issues/9993)) - * provider/openstack: Add `value_specs` to `openstack_fw_policy_v1` resource, allowing vendor information ([#9835](https://github.com/hashicorp/terraform/issues/9835)) - * provider/openstack: Add `value_specs` to `openstack_fw_firewall_v1` resource, allowing vendor information ([#9836](https://github.com/hashicorp/terraform/issues/9836)) - * provider/random: The `b64` attribute on `random_id` resources is deprecated, replaced by `b64_url` and `b64_std` ([#9903](https://github.com/hashicorp/terraform/issues/9903)) - -BUG FIXES: - - * core: Splat variables (`foo.*.bar`) are now ordered by count index for deterministic ordering. ([#9883](https://github.com/hashicorp/terraform/issues/9883)) - * core: Prune orphan outputs (in the config but not in the state). ([#9971](https://github.com/hashicorp/terraform/issues/9971)) - * core: New apply graph doesn't prune module variables as aggressively. ([#9898](https://github.com/hashicorp/terraform/issues/9898)) - * core: New apply graph properly configures providers with aliases. ([#9894](https://github.com/hashicorp/terraform/issues/9894)) - * core: New destroy graph doesn't create edge loops to destroy nodes that reference themselves. ([#9968](https://github.com/hashicorp/terraform/issues/9968)) - * provider/aws: Fix crash when adding EBS volumes to spot fleet request. ([#9857](https://github.com/hashicorp/terraform/issues/9857)) - * provider/aws: Ignore NoSuchEntity error when IAM user does not have login profile ([#9900](https://github.com/hashicorp/terraform/issues/9900)) - * provider/aws: Setting static_routes_only on import of vpn_connection ([#9802](https://github.com/hashicorp/terraform/issues/9802)) - * provider/aws: aws_alb_target_group arn_suffix missing the targetgroup ([#9911](https://github.com/hashicorp/terraform/issues/9911)) - * provider/aws: Fix the validateFunc of aws_elasticache_replication_group ([#9918](https://github.com/hashicorp/terraform/issues/9918)) - * provider/aws: removing toLower when setting aws_db_parameter_group options ([#9820](https://github.com/hashicorp/terraform/issues/9820)) - * provider/aws: Fix panic when passing statuses to aws_acm_certificate ([#9990](https://github.com/hashicorp/terraform/issues/9990)) - * provider/aws: AWS IAM, User and Role allow + in the name ([#9991](https://github.com/hashicorp/terraform/issues/9991)) - * provider/scaleway: retry volume attachment ([#9972](https://github.com/hashicorp/terraform/issues/9972)) - * provider/scaleway: fix `scaleway_image` datasource returning unknown images ([#9899](https://github.com/hashicorp/terraform/issues/9899)) - * provider/google: fix crash when mistakenly configuring disks ([#9942](https://github.com/hashicorp/terraform/issues/9942)) - -## 0.7.9 (November 4, 2016) - -FEATURES: - - * **New Data Source:** `aws_acm_certificate` ([#8359](https://github.com/hashicorp/terraform/issues/8359)) - * **New Resource:** `aws_autoscaling_attachment` ([#9146](https://github.com/hashicorp/terraform/issues/9146)) - * **New Resource:** `postgresql_extension` ([#9210](https://github.com/hashicorp/terraform/issues/9210)) - -IMPROVEMENTS: - - * core: Improve shadow graph robustness by catching panics during graph evaluation. ([#9852](https://github.com/hashicorp/terraform/issues/9852)) - * provider/aws: Provide the option to skip_destroy on aws_volume_attachment ([#9792](https://github.com/hashicorp/terraform/issues/9792)) - * provider/aws: Allows aws_alb security_groups to be updated ([#9804](https://github.com/hashicorp/terraform/issues/9804)) - * provider/aws: Add the enable_sni attribute for Route53 health checks. ([#9822](https://github.com/hashicorp/terraform/issues/9822)) - * provider/openstack: Add `value_specs` to openstack_fw_rule_v1 resource, allowing vendor information ([#9834](https://github.com/hashicorp/terraform/issues/9834)) - * state/remote/swift: Enable OpenStack Identity/Keystone v3 authentication ([#9769](https://github.com/hashicorp/terraform/issues/9769)) - * state/remote/swift: Now supports all login/config options that the OpenStack Provider supports ([#9777](https://github.com/hashicorp/terraform/issues/9777)) - -BUG FIXES: - - * core: Provisioners in modules do not crash during `apply` (regression). ([#9846](https://github.com/hashicorp/terraform/issues/9846)) - * core: Computed bool fields with non-bool values will not crash ([#9812](https://github.com/hashicorp/terraform/issues/9812)) - * core: `formatlist` interpolation function accepts an empty list ([#9795](https://github.com/hashicorp/terraform/issues/9795)) - * core: Validate outputs have a name ([#9823](https://github.com/hashicorp/terraform/issues/9823)) - * core: Validate variables have a name ([#9818](https://github.com/hashicorp/terraform/issues/9818)) - * command/apply: If a partial set of required variables are provided with `-var`, ask for the remainder ([#9794](https://github.com/hashicorp/terraform/issues/9794)) - * command/fmt: Multiline strings aren't erroneously indented ([#9859](https://github.com/hashicorp/terraform/issues/9859)) - * provider/aws: Fix issue setting `certificate_upload_date` in `aws_api_gateway_domain_name` ([#9815](https://github.com/hashicorp/terraform/issues/9815)) - * provider/azurerm: allow storage_account resource with name "$root" ([#9813](https://github.com/hashicorp/terraform/issues/9813)) - * provider/google: fix for looking up project image families ([#9243](https://github.com/hashicorp/terraform/issues/9243)) - * provider/openstack: Don't pass `shared` in FWaaS Policy unless it's set ([#9830](https://github.com/hashicorp/terraform/issues/9830)) - * provider/openstack: openstack_fw_firewall_v1 `admin_state_up` should default to true ([#9832](https://github.com/hashicorp/terraform/issues/9832)) - -PLUGIN CHANGES: - - * Fields in resources can now have both `Optional` and `ConflictsWith` ([#9825](https://github.com/hashicorp/terraform/issues/9825)) - -## 0.7.8 (November 1, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/openstack: The OpenStack provider has switched to the new Gophercloud SDK. - No front-facing changes were made, but please be aware that there might be bugs. - Please report any if found. - * `archive_file` is now a data source, instead of a resource ([#8492](https://github.com/hashicorp/terraform/issues/8492)) - -FEATURES: - - * **Experimental new apply graph:** `terraform apply` is getting a new graph - creation process for 0.8. This is now available behind a flag `-Xnew-apply` - (on any command). This will become the default in 0.8. There may still be - bugs. ([#9388](https://github.com/hashicorp/terraform/issues/9388)) - * **Experimental new destroy graph:** `terraform destroy` is also getting - a new graph creation process for 0.8. This is now available behind a flag - `-Xnew-destroy`. This will become the default in 0.8. ([#9527](https://github.com/hashicorp/terraform/issues/9527)) - * **New Provider:** `pagerduty` ([#9022](https://github.com/hashicorp/terraform/issues/9022)) - * **New Resource:** `aws_iam_user_login_profile` ([#9605](https://github.com/hashicorp/terraform/issues/9605)) - * **New Resource:** `aws_waf_ipset` ([#8852](https://github.com/hashicorp/terraform/issues/8852)) - * **New Resource:** `aws_waf_rule` ([#8852](https://github.com/hashicorp/terraform/issues/8852)) - * **New Resource:** `aws_waf_web_acl` ([#8852](https://github.com/hashicorp/terraform/issues/8852)) - * **New Resource:** `aws_waf_byte_match_set` ([#9681](https://github.com/hashicorp/terraform/issues/9681)) - * **New Resource:** `aws_waf_size_constraint_set` ([#9689](https://github.com/hashicorp/terraform/issues/9689)) - * **New Resource:** `aws_waf_sql_injection_match_set` ([#9709](https://github.com/hashicorp/terraform/issues/9709)) - * **New Resource:** `aws_waf_xss_match_set` ([#9710](https://github.com/hashicorp/terraform/issues/9710)) - * **New Resource:** `aws_ssm_activation` ([#9111](https://github.com/hashicorp/terraform/issues/9111)) - * **New Resource:** `azurerm_key_vault` ([#9478](https://github.com/hashicorp/terraform/issues/9478)) - * **New Resource:** `azurerm_storage_share` ([#8674](https://github.com/hashicorp/terraform/issues/8674)) - * **New Resource:** `azurerm_eventhub_namespace` ([#9297](https://github.com/hashicorp/terraform/issues/9297)) - * **New Resource:** `cloudstack_security_group` ([#9103](https://github.com/hashicorp/terraform/issues/9103)) - * **New Resource:** `cloudstack_security_group_rule` ([#9645](https://github.com/hashicorp/terraform/issues/9645)) - * **New Resource:** `cloudstack_private_gateway` ([#9637](https://github.com/hashicorp/terraform/issues/9637)) - * **New Resource:** `cloudstack_static_route` ([#9637](https://github.com/hashicorp/terraform/issues/9637)) - * **New DataSource:** `aws_ebs_volume` ([#9753](https://github.com/hashicorp/terraform/issues/9753)) - * **New DataSource:** `aws_prefix_list` ([#9566](https://github.com/hashicorp/terraform/issues/9566)) - * **New DataSource:** `aws_security_group` ([#9604](https://github.com/hashicorp/terraform/issues/9604)) - * **New DataSource:** `azurerm_client_config` ([#9478](https://github.com/hashicorp/terraform/issues/9478)) - * **New Interpolation Function:** `ceil` ([#9692](https://github.com/hashicorp/terraform/issues/9692)) - * **New Interpolation Function:** `floor` ([#9692](https://github.com/hashicorp/terraform/issues/9692)) - * **New Interpolation Function:** `min` ([#9692](https://github.com/hashicorp/terraform/issues/9692)) - * **New Interpolation Function:** `max` ([#9692](https://github.com/hashicorp/terraform/issues/9692)) - * **New Interpolation Function:** `title` ([#9087](https://github.com/hashicorp/terraform/issues/9087)) - * **New Interpolation Function:** `zipmap` ([#9627](https://github.com/hashicorp/terraform/issues/9627)) - -IMPROVEMENTS: - - * provider/aws: No longer require `route_table_ids` list in `aws_vpc_endpoint` resources ([#9357](https://github.com/hashicorp/terraform/issues/9357)) - * provider/aws: Allow `description` in `aws_redshift_subnet_group` to be modified ([#9515](https://github.com/hashicorp/terraform/issues/9515)) - * provider/aws: Add tagging support to aws_redshift_subnet_group ([#9504](https://github.com/hashicorp/terraform/issues/9504)) - * provider/aws: Add validation to IAM User and Group Name ([#9584](https://github.com/hashicorp/terraform/issues/9584)) - * provider/aws: Add Ability To Enable / Disable ALB AccessLogs ([#9290](https://github.com/hashicorp/terraform/issues/9290)) - * provider/aws: Add support for `AutoMinorVersionUpgrade` to aws_elasticache_replication_group resource. ([#9657](https://github.com/hashicorp/terraform/issues/9657)) - * provider/aws: Fix import of RouteTable with destination prefixes ([#9686](https://github.com/hashicorp/terraform/issues/9686)) - * provider/aws: Add support for reference_name to aws_route53_health_check ([#9737](https://github.com/hashicorp/terraform/issues/9737)) - * provider/aws: Expose ARN suffix on ALB Target Group ([#9734](https://github.com/hashicorp/terraform/issues/9734)) - * provider/azurerm: add account_kind and access_tier to storage_account ([#9408](https://github.com/hashicorp/terraform/issues/9408)) - * provider/azurerm: write load_balanacer attributes to network_interface_card hash ([#9207](https://github.com/hashicorp/terraform/issues/9207)) - * provider/azurerm: Add disk_size_gb param to VM storage_os_disk ([#9200](https://github.com/hashicorp/terraform/issues/9200)) - * provider/azurerm: support importing of subnet resource ([#9646](https://github.com/hashicorp/terraform/issues/9646)) - * provider/azurerm: Add support for *all* of the Azure regions e.g. Germany, China and Government ([#9765](https://github.com/hashicorp/terraform/issues/9765)) - * provider/digitalocean: Allow resizing DigitalOcean Droplets without increasing disk size. ([#9573](https://github.com/hashicorp/terraform/issues/9573)) - * provider/google: enhance service scope list ([#9442](https://github.com/hashicorp/terraform/issues/9442)) - * provider/google Change default MySQL instance version to 5.6 ([#9674](https://github.com/hashicorp/terraform/issues/9674)) - * provider/google Support MySQL 5.7 instances ([#9673](https://github.com/hashicorp/terraform/issues/9673)) - * provider/google: Add support for using source_disk to google_compute_image ([#9614](https://github.com/hashicorp/terraform/issues/9614)) - * provider/google: Add support for default-internet-gateway alias for google_compute_route ([#9676](https://github.com/hashicorp/terraform/issues/9676)) - * provider/openstack: Added value_specs to openstack_networking_port_v2, allowing vendor information ([#9551](https://github.com/hashicorp/terraform/issues/9551)) - * provider/openstack: Added value_specs to openstack_networking_floatingip_v2, allowing vendor information ([#9552](https://github.com/hashicorp/terraform/issues/9552)) - * provider/openstack: Added value_specs to openstack_compute_keypair_v2, allowing vendor information ([#9554](https://github.com/hashicorp/terraform/issues/9554)) - * provider/openstack: Allow any protocol in openstack_fw_rule_v1 ([#9617](https://github.com/hashicorp/terraform/issues/9617)) - * provider/openstack: expose LoadBalancer v2 VIP Port ID ([#9727](https://github.com/hashicorp/terraform/issues/9727)) - * provider/openstack: Openstack Provider enhancements including environment variables ([#9725](https://github.com/hashicorp/terraform/issues/9725)) - * provider/scaleway: update sdk for ams1 region ([#9687](https://github.com/hashicorp/terraform/issues/9687)) - * provider/scaleway: server volume property ([#9695](https://github.com/hashicorp/terraform/issues/9695)) - -BUG FIXES: - - * core: Resources suffixed with 'panic' won't falsely trigger crash detection. ([#9395](https://github.com/hashicorp/terraform/issues/9395)) - * core: Validate lifecycle options don't contain interpolations. ([#9576](https://github.com/hashicorp/terraform/issues/9576)) - * core: Tainted resources will not process `ignore_changes`. ([#7855](https://github.com/hashicorp/terraform/issues/7855)) - * core: Boolean looking values passed in via `-var` no longer cause type errors. ([#9642](https://github.com/hashicorp/terraform/issues/9642)) - * core: Computed primitives in certain cases no longer cause diff mismatch errors. ([#9618](https://github.com/hashicorp/terraform/issues/9618)) - * core: Empty arrays for list vars in JSON work ([#8886](https://github.com/hashicorp/terraform/issues/8886)) - * core: Boolean types in tfvars work propertly ([#9751](https://github.com/hashicorp/terraform/issues/9751)) - * core: Deposed resource destruction is accounted for properly in `apply` counts. ([#9731](https://github.com/hashicorp/terraform/issues/9731)) - * core: Check for graph cycles on resource expansion to catch cycles between self-referenced resources. ([#9728](https://github.com/hashicorp/terraform/issues/9728)) - * core: `prevent_destroy` prevents decreasing count ([#9707](https://github.com/hashicorp/terraform/issues/9707)) - * core: removed optional items will trigger "requires new" if necessary ([#9699](https://github.com/hashicorp/terraform/issues/9699)) - * command/apply: `-backup` and `-state-out` work with plan files ([#9706](https://github.com/hashicorp/terraform/issues/9706)) - * command/fmt: Cleaner formatting for multiline standalone comments above resources - * command/validate: respond to `--help` ([#9660](https://github.com/hashicorp/terraform/issues/9660)) - * provider/archive: Converting to datasource. ([#8492](https://github.com/hashicorp/terraform/issues/8492)) - * provider/aws: Fix issue importing AWS Instances and setting the correct `associate_public_ip_address` value ([#9453](https://github.com/hashicorp/terraform/issues/9453)) - * provider/aws: Fix issue with updating ElasticBeanstalk environment variables ([#9259](https://github.com/hashicorp/terraform/issues/9259)) - * provider/aws: Allow zero value for `scaling_adjustment` in `aws_autoscaling_policy` when using `SimpleScaling` ([#8893](https://github.com/hashicorp/terraform/issues/8893)) - * provider/aws: Increase ECS service drain timeout ([#9521](https://github.com/hashicorp/terraform/issues/9521)) - * provider/aws: Remove VPC Endpoint from state if it's not found ([#9561](https://github.com/hashicorp/terraform/issues/9561)) - * provider/aws: Delete Loging Profile from IAM User on force_destroy ([#9583](https://github.com/hashicorp/terraform/issues/9583)) - * provider/aws: Exposed aws_api_gw_domain_name.certificate_upload_date attribute ([#9533](https://github.com/hashicorp/terraform/issues/9533)) - * provider/aws: fix aws_elasticache_replication_group for Redis in cluster mode ([#9601](https://github.com/hashicorp/terraform/issues/9601)) - * provider/aws: Validate regular expression passed via the ami data_source `name_regex` attribute. ([#9622](https://github.com/hashicorp/terraform/issues/9622)) - * provider/aws: Bug fix for NoSuckBucket on Destroy of aws_s3_bucket_policy ([#9641](https://github.com/hashicorp/terraform/issues/9641)) - * provider/aws: Refresh aws_autoscaling_schedule from state on 404 ([#9659](https://github.com/hashicorp/terraform/issues/9659)) - * provider/aws: Allow underscores in IAM user and group names ([#9684](https://github.com/hashicorp/terraform/issues/9684)) - * provider/aws: aws_ami: handle deletion of AMIs ([#9721](https://github.com/hashicorp/terraform/issues/9721)) - * provider/aws: Fix aws_route53_record alias perpetual diff ([#9704](https://github.com/hashicorp/terraform/issues/9704)) - * provider/aws: Allow `active` state while waiting for the VPC Peering Connection. ([#9754](https://github.com/hashicorp/terraform/issues/9754)) - * provider/aws: Normalize all-principals wildcard in `aws_iam_policy_document` ([#9720](https://github.com/hashicorp/terraform/issues/9720)) - * provider/azurerm: Fix Azure RM loadbalancer rules validation ([#9468](https://github.com/hashicorp/terraform/issues/9468)) - * provider/azurerm: Fix servicebus_topic values when using the Update func to stop perpetual diff ([#9323](https://github.com/hashicorp/terraform/issues/9323)) - * provider/azurerm: lower servicebus_topic max size to Azure limit ([#9649](https://github.com/hashicorp/terraform/issues/9649)) - * provider/azurerm: Fix VHD deletion when VM and Storage account are in separate resource groups ([#9631](https://github.com/hashicorp/terraform/issues/9631)) - * provider/azurerm: Guard against panic when importing arm_virtual_network ([#9739](https://github.com/hashicorp/terraform/issues/9739)) - * provider/azurerm: fix sql_database resource reading tags ([#9767](https://github.com/hashicorp/terraform/issues/9767)) - * provider/cloudflare: update client library to stop connection closed issues ([#9715](https://github.com/hashicorp/terraform/issues/9715)) - * provider/consul: Change to consul_service resource to introduce a `service_id` parameter ([#9366](https://github.com/hashicorp/terraform/issues/9366)) - * provider/datadog: Ignore float/int diffs on thresholds ([#9466](https://github.com/hashicorp/terraform/issues/9466)) - * provider/docker: Fixes for docker_container host object and documentation ([#9367](https://github.com/hashicorp/terraform/issues/9367)) - * provider/scaleway improve the performance of server deletion ([#9491](https://github.com/hashicorp/terraform/issues/9491)) - * provider/scaleway: fix scaleway_volume_attachment with count > 1 ([#9493](https://github.com/hashicorp/terraform/issues/9493)) - - -## 0.7.7 (October 18, 2016) - -FEATURES: - - * **New Data Source:** `scaleway_bootsscript`. ([#9386](https://github.com/hashicorp/terraform/issues/9386)) - * **New Data Source:** `scaleway_image`. ([#9386](https://github.com/hashicorp/terraform/issues/9386)) - -IMPROVEMENTS: - - * core: When the environment variable TF_LOG_PATH is specified, debug logs are now appended to the specified file instead of being truncated. ([#9440](https://github.com/hashicorp/terraform/pull/9440)) - * provider/aws: Expose ARN for `aws_lambda_alias`. ([#9390](https://github.com/hashicorp/terraform/issues/9390)) - * provider/aws: Add support for AWS US East (Ohio) region. ([#9414](https://github.com/hashicorp/terraform/issues/9414)) - * provider/scaleway: `scaleway_ip`, `scaleway_security_group`, `scalway_server` and `scaleway_volume` resources can now be imported. ([#9387](https://github.com/hashicorp/terraform/issues/9387)) - -BUG FIXES: - - * core: List and map indexes support arithmetic. ([#9372](https://github.com/hashicorp/terraform/issues/9372)) - * core: List and map indexes are implicitly converted to the correct type if possible. ([#9372](https://github.com/hashicorp/terraform/issues/9372)) - * provider/aws: Read back `associate_public_ip_address` in `aws_launch_configuration` resources to enable importing. ([#9399](https://github.com/hashicorp/terraform/issues/9399)) - * provider/aws: Remove `aws_route` resources from state if their associated `aws_route_table` has been removed. ([#9431](https://github.com/hashicorp/terraform/issues/9431)) - * provider/azurerm: Load balancer resources now have their `id` attribute set to the resource URI instead of the load balancer URI. ([#9401](https://github.com/hashicorp/terraform/issues/9401)) - * provider/google: Fix a bug causing a crash when migrating `google_compute_target_pool` resources from 0.6.x releases. ([#9370](https://github.com/hashicorp/terraform/issues/9370)) - -## 0.7.6 (October 14, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * `azurerm_virtual_machine` has deprecated the use of `diagnostics_profile` in favour of `boot_diagnostics`. ([#9122](https://github.com/hashicorp/terraform/issues/9122)) - * The deprecated `key_file` and `bastion_key_file` arguments to Provisioner Connections have been removed ([#9340](https://github.com/hashicorp/terraform/issues/9340)) - -FEATURES: - * **New Data Source:** `aws_billing_service_account` ([#8701](https://github.com/hashicorp/terraform/issues/8701)) - * **New Data Source:** `aws_availability_zone` ([#6819](https://github.com/hashicorp/terraform/issues/6819)) - * **New Data Source:** `aws_region` ([#6819](https://github.com/hashicorp/terraform/issues/6819)) - * **New Data Source:** `aws_subnet` ([#6819](https://github.com/hashicorp/terraform/issues/6819)) - * **New Data Source:** `aws_vpc` ([#6819](https://github.com/hashicorp/terraform/issues/6819)) - * **New Resource:** `azurerm_lb` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_backend_address_pool` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_nat_rule` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_nat_pool` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_probe` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_rule` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `github_repository` ([#9327](https://github.com/hashicorp/terraform/issues/9327)) - -IMPROVEMENTS: - * core-validation: create validation package to provide common validation functions ([#8103](https://github.com/hashicorp/terraform/issues/8103)) - * provider/aws: Support Import of OpsWorks Custom Layers ([#9252](https://github.com/hashicorp/terraform/issues/9252)) - * provider/aws: Automatically constructed ARNs now support partitions other than `aws`, allowing operation with `aws-cn` and `aws-us-gov` ([#9273](https://github.com/hashicorp/terraform/issues/9273)) - * provider/aws: Retry setTags operation for EC2 resources ([#7890](https://github.com/hashicorp/terraform/issues/7890)) - * provider/aws: Support refresh of EC2 instance `user_data` ([#6736](https://github.com/hashicorp/terraform/issues/6736)) - * provider/aws: Poll to confirm delete of `resource_aws_customer_gateway` ([#9346](https://github.com/hashicorp/terraform/issues/9346)) - * provider/azurerm: expose default keys for `servicebus_namespace` ([#9242](https://github.com/hashicorp/terraform/issues/9242)) - * provider/azurerm: add `enable_blob_encryption` to `azurerm_storage_account` resource ([#9233](https://github.com/hashicorp/terraform/issues/9233)) - * provider/azurerm: set `resource_group_name` on resource import across the provider ([#9073](https://github.com/hashicorp/terraform/issues/9073)) - * provider/azurerm: `azurerm_cdn_profile` resources can now be imported ([#9306](https://github.com/hashicorp/terraform/issues/9306)) - * provider/datadog: add support for Datadog dashboard "type" and "style" options ([#9228](https://github.com/hashicorp/terraform/issues/9228)) - * provider/scaleway: `region` is now supported for provider configuration - -BUG FIXES: - * core: Local state can now be refreshed when no resources exist ([#7320](https://github.com/hashicorp/terraform/issues/7320)) - * core: Orphaned nested (depth 2+) modules will inherit provider configs ([#9318](https://github.com/hashicorp/terraform/issues/9318)) - * core: Fix crash when a map key contains an interpolation function ([#9282](https://github.com/hashicorp/terraform/issues/9282)) - * core: Numeric variables values were incorrectly converted to numbers ([#9263](https://github.com/hashicorp/terraform/issues/9263)) - * core: Fix input and output of map variables from HCL ([#9268](https://github.com/hashicorp/terraform/issues/9268)) - * core: Crash when interpolating a map value with a function in the key ([#9282](https://github.com/hashicorp/terraform/issues/9282)) - * core: Crash when copying a nil value in an InstanceState ([#9356](https://github.com/hashicorp/terraform/issues/9356)) - * command/fmt: Bare comment groups no longer have superfluous newlines - * command/fmt: Leading comments on list items are formatted properly - * provider/aws: Return correct AMI image when `most_recent` is set to `true`. ([#9277](https://github.com/hashicorp/terraform/issues/9277)) - * provider/aws: Fix issue with diff on import of `aws_eip` in EC2 Classic ([#9009](https://github.com/hashicorp/terraform/issues/9009)) - * provider/aws: Handle EC2 tags related errors in CloudFront Distribution resource. ([#9298](https://github.com/hashicorp/terraform/issues/9298)) - * provider/aws: Fix cause error when using `etag` and `kms_key_id` with `aws_s3_bucket_object` ([#9168](https://github.com/hashicorp/terraform/issues/9168)) - * provider/aws: Fix issue reassigning EIP instances appropriately ([#7686](https://github.com/hashicorp/terraform/issues/7686)) - * provider/azurerm: removing resources from state when the API returns a 404 for them ([#8859](https://github.com/hashicorp/terraform/issues/8859)) - * provider/azurerm: Fixed a panic in `azurerm_virtual_machine` when using `diagnostic_profile` ([#9122](https://github.com/hashicorp/terraform/issues/9122)) - -## 0.7.5 (October 6, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * `tls_cert_request` is now a managed resource instead of a data source, restoring the pre-Terraform 0.7 behaviour ([#9035](https://github.com/hashicorp/terraform/issues/9035)) - -FEATURES: - * **New Provider:** `bitbucket` ([#7405](https://github.com/hashicorp/terraform/issues/7405)) - * **New Resource:** `aws_api_gateway_client_certificate` ([#8775](https://github.com/hashicorp/terraform/issues/8775)) - * **New Resource:** `azurerm_servicebus_topic` ([#9151](https://github.com/hashicorp/terraform/issues/9151)) - * **New Resource:** `azurerm_servicebus_subscription` ([#9185](https://github.com/hashicorp/terraform/issues/9185)) - * **New Resource:** `aws_emr_cluster` ([#9106](https://github.com/hashicorp/terraform/issues/9106)) - * **New Resource:** `aws_emr_instance_group` ([#9106](https://github.com/hashicorp/terraform/issues/9106)) - -IMPROVEMENTS: - * helper/schema: Adding of MinItems as a validation to Lists and Maps ([#9216](https://github.com/hashicorp/terraform/issues/9216)) - * provider/aws: Add JSON validation to the `aws_cloudwatch_event_rule` resource ([#8897](https://github.com/hashicorp/terraform/issues/8897)) - * provider/aws: S3 bucket policies are imported as separate resources ([#8915](https://github.com/hashicorp/terraform/issues/8915)) - * provider/aws: S3 bucket policies can now be removed via the `aws_s3_bucket` resource ([#8915](https://github.com/hashicorp/terraform/issues/8915)) - * provider/aws: Added a `cluster_address` attribute to aws elasticache ([#8935](https://github.com/hashicorp/terraform/issues/8935)) - * provider/aws: Add JSON validation to the `aws_elasticsearch_domain resource`. ([#8898](https://github.com/hashicorp/terraform/issues/8898)) - * provider/aws: Add JSON validation to the `aws_kms_key resource`. ([#8900](https://github.com/hashicorp/terraform/issues/8900)) - * provider/aws: Add JSON validation to the `aws_s3_bucket_policy resource`. ([#8901](https://github.com/hashicorp/terraform/issues/8901)) - * provider/aws: Add JSON validation to the `aws_sns_topic resource`. ([#8902](https://github.com/hashicorp/terraform/issues/8902)) - * provider/aws: Add JSON validation to the `aws_sns_topic_policy resource`. ([#8903](https://github.com/hashicorp/terraform/issues/8903)) - * provider/aws: Add JSON validation to the `aws_sqs_queue resource`. ([#8904](https://github.com/hashicorp/terraform/issues/8904)) - * provider/aws: Add JSON validation to the `aws_sqs_queue_policy resource`. ([#8905](https://github.com/hashicorp/terraform/issues/8905)) - * provider/aws: Add JSON validation to the `aws_vpc_endpoint resource`. ([#8906](https://github.com/hashicorp/terraform/issues/8906)) - * provider/aws: Update `aws_cloudformation_stack` data source with new helper function. ([#8907](https://github.com/hashicorp/terraform/issues/8907)) - * provider/aws: Add JSON validation to the `aws_s3_bucket` resource. ([#8908](https://github.com/hashicorp/terraform/issues/8908)) - * provider/aws: Add support for `cloudwatch_logging_options` to Firehose Delivery Streams ([#8671](https://github.com/hashicorp/terraform/issues/8671)) - * provider/aws: Add HTTP/2 support via the http_version parameter to CloudFront distribution ([#8777](https://github.com/hashicorp/terraform/issues/8777)) - * provider/aws: Add `query_string_cache_keys` to allow for selective caching of CloudFront keys ([#8777](https://github.com/hashicorp/terraform/issues/8777)) - * provider/aws: Support Import `aws_elasticache_cluster` ([#9010](https://github.com/hashicorp/terraform/issues/9010)) - * provider/aws: Add support for tags to `aws_cloudfront_distribution` ([#9011](https://github.com/hashicorp/terraform/issues/9011)) - * provider/aws: Support Import `aws_opsworks_stack` ([#9124](https://github.com/hashicorp/terraform/issues/9124)) - * provider/aws: Support Import `aws_elasticache_replication_groups` ([#9140](https://github.com/hashicorp/terraform/issues/9140)) - * provider/aws: Add new aws api-gateway integration types ([#9213](https://github.com/hashicorp/terraform/issues/9213)) - * provider/aws: Import `aws_db_event_subscription` ([#9220](https://github.com/hashicorp/terraform/issues/9220)) - * provider/azurerm: Add normalizeJsonString and validateJsonString functions ([#8909](https://github.com/hashicorp/terraform/issues/8909)) - * provider/azurerm: Support AzureRM Sql Database DataWarehouse ([#9196](https://github.com/hashicorp/terraform/issues/9196)) - * provider/openstack: Use proxy environment variables for communication with services ([#8948](https://github.com/hashicorp/terraform/issues/8948)) - * provider/vsphere: Adding `detach_unknown_disks_on_delete` flag for VM resource ([#8947](https://github.com/hashicorp/terraform/issues/8947)) - * provisioner/chef: Add `skip_register` attribute to allow skipping the registering steps ([#9127](https://github.com/hashicorp/terraform/issues/9127)) - -BUG FIXES: - * core: Fixed variables not being in scope for destroy -target on modules ([#9021](https://github.com/hashicorp/terraform/issues/9021)) - * core: Fixed issue that prevented diffs from being properly generated in a specific resource schema scenario ([#8891](https://github.com/hashicorp/terraform/issues/8891)) - * provider/aws: Remove support for `ah` and `esp` literals in Security Group Ingress/Egress rules; you must use the actual protocol number for protocols other than `tcp`, `udp`, `icmp`, or `all` ([#8975](https://github.com/hashicorp/terraform/issues/8975)) - * provider/aws: Do not report drift for effect values differing only by case in AWS policies ([#9139](https://github.com/hashicorp/terraform/issues/9139)) - * provider/aws: VPC ID, Port, Protocol and Name change on aws_alb_target_group will ForceNew resource ([#8989](https://github.com/hashicorp/terraform/issues/8989)) - * provider/aws: Wait for Spot Fleet to drain before removing from state ([#8938](https://github.com/hashicorp/terraform/issues/8938)) - * provider/aws: Fix issue when importing `aws_eip` resources by IP address ([#8970](https://github.com/hashicorp/terraform/issues/8970)) - * provider/aws: Ensure that origin_access_identity is a required value within the CloudFront distribution s3_config block ([#8777](https://github.com/hashicorp/terraform/issues/8777)) - * provider/aws: Corrected Seoul S3 Website Endpoint format ([#9032](https://github.com/hashicorp/terraform/issues/9032)) - * provider/aws: Fix failed remove S3 lifecycle_rule ([#9031](https://github.com/hashicorp/terraform/issues/9031)) - * provider/aws: Fix crashing bug in `aws_ami` data source when using `name_regex` ([#9033](https://github.com/hashicorp/terraform/issues/9033)) - * provider/aws: Fix reading dimensions on cloudwatch alarms ([#9029](https://github.com/hashicorp/terraform/issues/9029)) - * provider/aws: Changing snapshot_identifier on aws_db_instance resource should force… ([#8806](https://github.com/hashicorp/terraform/issues/8806)) - * provider/aws: Refresh AWS EIP association from state when not found ([#9056](https://github.com/hashicorp/terraform/issues/9056)) - * provider/aws: Make encryption in Aurora instances computed-only ([#9060](https://github.com/hashicorp/terraform/issues/9060)) - * provider/aws: Make sure that VPC Peering Connection in a failed state returns an error. ([#9038](https://github.com/hashicorp/terraform/issues/9038)) - * provider/aws: guard against aws_route53_record delete panic ([#9049](https://github.com/hashicorp/terraform/issues/9049)) - * provider/aws: aws_db_option_group flattenOptions failing due to missing values ([#9052](https://github.com/hashicorp/terraform/issues/9052)) - * provider/aws: Add retry logic to the aws_ecr_repository delete func ([#9050](https://github.com/hashicorp/terraform/issues/9050)) - * provider/aws: Modifying the parameter_group_name of aws_elasticache_replication_group caused a panic ([#9101](https://github.com/hashicorp/terraform/issues/9101)) - * provider/aws: Fix issue with updating ELB subnets for subnets in the same AZ ([#9131](https://github.com/hashicorp/terraform/issues/9131)) - * provider/aws: aws_route53_record alias refresh manually updated record ([#9125](https://github.com/hashicorp/terraform/issues/9125)) - * provider/aws: Fix issue detaching volumes that were already detached ([#9023](https://github.com/hashicorp/terraform/issues/9023)) - * provider/aws: Add retry to the `aws_ssm_document` delete func ([#9188](https://github.com/hashicorp/terraform/issues/9188)) - * provider/aws: Fix issue updating `search_string` in aws_cloudwatch_metric_alarm ([#9230](https://github.com/hashicorp/terraform/issues/9230)) - * provider/aws: Update EFS resource to read performance mode and creation_token ([#9234](https://github.com/hashicorp/terraform/issues/9234)) - * provider/azurerm: fix resource ID parsing for subscriptions resources ([#9163](https://github.com/hashicorp/terraform/issues/9163)) - * provider/librato: Mandatory name and conditions attributes weren't being sent on Update unless changed ([#8984](https://github.com/hashicorp/terraform/issues/8984)) - * provisioner/chef: Fix an error with parsing certain `vault_json` content ([#9114](https://github.com/hashicorp/terraform/issues/9114)) - * provisioner/chef: Change to order in which to cleanup the user key so this is done before the Chef run starts ([#9114](https://github.com/hashicorp/terraform/issues/9114)) - -## 0.7.4 (September 19, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * In previous releases, the `private_key` field in the connection provisioner - inadvertently accepted a path argument and would read the file contents. - This functionality has been removed in this release ([#8577](https://github.com/hashicorp/terraform/issues/8577)), and the documented - method of using the `file()` interpolation function should be used to load - the key from a file. - -FEATURES: - * **New Resource:** `aws_codecommit_trigger` ([#8751](https://github.com/hashicorp/terraform/issues/8751)) - * **New Resource:** `aws_default_security_group` ([#8861](https://github.com/hashicorp/terraform/issues/8861)) - * **New Remote State Backend:** `manta` ([#8830](https://github.com/hashicorp/terraform/issues/8830)) - -IMPROVEMENTS: - * provider/aws: Support 'publish' attribute in `lambda_function` ([#8653](https://github.com/hashicorp/terraform/issues/8653)) - * provider/aws: Add `reader_endpoint` RDS Clusters ([#8884](https://github.com/hashicorp/terraform/issues/8884)) - * provider/aws: Export AWS ELB service account ARN ([#8700](https://github.com/hashicorp/terraform/issues/8700)) - * provider/aws: Allow `aws_alb` to have the name auto-generated ([#8673](https://github.com/hashicorp/terraform/issues/8673)) - * provider/aws: Expose `arn_suffix` on `aws_alb` ([#8833](https://github.com/hashicorp/terraform/issues/8833)) - * provider/aws: Add JSON validation to the `aws_cloudformation_stack` resource ([#8896](https://github.com/hashicorp/terraform/issues/8896)) - * provider/aws: Add JSON validation to the `aws_glacier_vault` resource ([#8899](https://github.com/hashicorp/terraform/issues/8899)) - * provider/azurerm: support Diagnostics Profile ([#8277](https://github.com/hashicorp/terraform/issues/8277)) - * provider/google: Resources depending on the `network` attribute can now reference the network by `self_link` or `name` ([#8639](https://github.com/hashicorp/terraform/issues/8639)) - * provider/postgresql: The standard environment variables PGHOST, PGUSER, PGPASSWORD and PGSSLMODE are now supported for provider configuration ([#8666](https://github.com/hashicorp/terraform/issues/8666)) - * helper/resource: Add timeout duration to timeout error message ([#8773](https://github.com/hashicorp/terraform/issues/8773)) - * provisioner/chef: Support recreating Chef clients by setting `recreate_client=true` ([#8577](https://github.com/hashicorp/terraform/issues/8577)) - * provisioner/chef: Support encrypting existing Chef-Vaults for newly created clients ([#8577](https://github.com/hashicorp/terraform/issues/8577)) - -BUG FIXES: - * core: Fix regression when loading variables from json ([#8820](https://github.com/hashicorp/terraform/issues/8820)) - * provider/aws: Prevent crash creating an `aws_sns_topic` with an empty policy ([#8834](https://github.com/hashicorp/terraform/issues/8834)) - * provider/aws: Bump `aws_elasticsearch_domain` timeout values ([#672](https://github.com/hashicorp/terraform/issues/672)) - * provider/aws: `aws_nat_gateways` will now recreate on `failed` state ([#8689](https://github.com/hashicorp/terraform/issues/8689)) - * provider/aws: Prevent crash on account ID validation ([#8731](https://github.com/hashicorp/terraform/issues/8731)) - * provider/aws: `aws_db_instance` unexpected state when configurating enhanced monitoring ([#8707](https://github.com/hashicorp/terraform/issues/8707)) - * provider/aws: Remove region condition from `aws_codecommit_repository` ([#8778](https://github.com/hashicorp/terraform/issues/8778)) - * provider/aws: Support Policy DiffSuppression in `aws_kms_key` policy ([#8675](https://github.com/hashicorp/terraform/issues/8675)) - * provider/aws: Fix issue updating Elastic Beanstalk Environment variables ([#8848](https://github.com/hashicorp/terraform/issues/8848)) - * provider/scaleway: Fix `security_group_rule` identification ([#8661](https://github.com/hashicorp/terraform/issues/8661)) - * provider/cloudstack: Fix renaming a VPC with the `cloudstack_vpc` resource ([#8784](https://github.com/hashicorp/terraform/issues/8784)) - -## 0.7.3 (September 5, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * Terraform now validates the uniqueness of variable and output names in your configurations. In prior versions certain ways of duplicating variable names would work. This is now a configuration error (and should've always been). If you get an error running Terraform you may need to remove the duplicates. Done right, this should not affect the behavior of Terraform. - * The internal structure of `.terraform/modules` changed slightly. For configurations with modules, you'll need to run `terraform get` again. - -FEATURES: - * **New Provider:** `rabbitmq` ([#7694](https://github.com/hashicorp/terraform/issues/7694)) - * **New Data Source:** `aws_cloudformation_stack` ([#8640](https://github.com/hashicorp/terraform/issues/8640)) - * **New Resource:** `aws_cloudwatch_log_stream` ([#8626](https://github.com/hashicorp/terraform/issues/8626)) - * **New Resource:** `aws_default_route_table` ([#8323](https://github.com/hashicorp/terraform/issues/8323)) - * **New Resource:** `aws_spot_datafeed_subscription` ([#8640](https://github.com/hashicorp/terraform/issues/8640)) - * **New Resource:** `aws_s3_bucket_policy` ([#8615](https://github.com/hashicorp/terraform/issues/8615)) - * **New Resource:** `aws_sns_topic_policy` ([#8654](https://github.com/hashicorp/terraform/issues/8654)) - * **New Resource:** `aws_sqs_queue_policy` ([#8657](https://github.com/hashicorp/terraform/issues/8657)) - * **New Resource:** `aws_ssm_association` ([#8376](https://github.com/hashicorp/terraform/issues/8376)) - * **New Resource:** `cloudstack_affinity_group` ([#8360](https://github.com/hashicorp/terraform/issues/8360)) - * **New Resource:** `librato_alert` ([#8170](https://github.com/hashicorp/terraform/issues/8170)) - * **New Resource:** `librato_service` ([#8170](https://github.com/hashicorp/terraform/issues/8170)) - * **New Remote State Backend:** `local` ([#8647](https://github.com/hashicorp/terraform/issues/8647)) - * Data source blocks can now have a count associated with them ([#8635](https://github.com/hashicorp/terraform/issues/8635)) - * The count of a resource can now be referenced for interpolations: `self.count` and `type.name.count` work ([#8581](https://github.com/hashicorp/terraform/issues/8581)) - * Provisioners now support connection using IPv6 in addition to IPv4 ([#6616](https://github.com/hashicorp/terraform/issues/6616)) - -IMPROVEMENTS: - * core: Add wildcard (match all) support to `ignore_changes` ([#8599](https://github.com/hashicorp/terraform/issues/8599)) - * core: HTTP module sources can now use netrc files for auth - * core: Show last resource state in a timeout error message ([#8510](https://github.com/hashicorp/terraform/issues/8510)) - * helper/schema: Add diff suppression callback ([#8585](https://github.com/hashicorp/terraform/issues/8585)) - * provider/aws: API Gateway Custom Authorizer ([#8535](https://github.com/hashicorp/terraform/issues/8535)) - * provider/aws: Add MemoryReservation To `aws_ecs_container_definition` data source ([#8437](https://github.com/hashicorp/terraform/issues/8437)) - * provider/aws: Add ability Enable/Disable For ELB Access logs ([#8438](https://github.com/hashicorp/terraform/issues/8438)) - * provider/aws: Add support for assuming a role prior to performing API operations ([#8638](https://github.com/hashicorp/terraform/issues/8638)) - * provider/aws: Export `arn` of `aws_autoscaling_group` ([#8503](https://github.com/hashicorp/terraform/issues/8503)) - * provider/aws: More robust handling of Lambda function archives hosted on S3 ([#6860](https://github.com/hashicorp/terraform/issues/6860)) - * provider/aws: Spurious diffs of `aws_s3_bucket` policy attributes due to JSON field ordering are reduced ([#8615](https://github.com/hashicorp/terraform/issues/8615)) - * provider/aws: `name_regex` attribute for local post-filtering of `aws_ami` data source results ([#8403](https://github.com/hashicorp/terraform/issues/8403)) - * provider/aws: Support for lifecycle hooks at ASG creation ([#5620](https://github.com/hashicorp/terraform/issues/5620)) - * provider/consul: Make provider settings truly optional ([#8551](https://github.com/hashicorp/terraform/issues/8551)) - * provider/statuscake: Add support for contact-group id in statuscake test ([#8417](https://github.com/hashicorp/terraform/issues/8417)) - -BUG FIXES: - * core: Changing a module source from file to VCS no longer errors ([#8398](https://github.com/hashicorp/terraform/issues/8398)) - * core: Configuration is now validated prior to input, fixing an obscure parse error when attempting to interpolate a count ([#8591](https://github.com/hashicorp/terraform/issues/8591)) - * core: JSON configuration with resources with a single key parse properly ([#8485](https://github.com/hashicorp/terraform/issues/8485)) - * core: States with duplicate modules are detected and an error is shown ([#8463](https://github.com/hashicorp/terraform/issues/8463)) - * core: Validate uniqueness of variables/outputs in a module ([#8482](https://github.com/hashicorp/terraform/issues/8482)) - * core: `-var` flag inputs starting with `/` work - * core: `-var` flag inputs starting with a number work and was fixed in such a way that this should overall be a lot more resilient to inputs ([#8044](https://github.com/hashicorp/terraform/issues/8044)) - * provider/aws: Add AWS error message to retry APIGateway account update ([#8533](https://github.com/hashicorp/terraform/issues/8533)) - * provider/aws: Do not set empty string to state for `aws_vpn_gateway` availability zone ([#8645](https://github.com/hashicorp/terraform/issues/8645)) - * provider/aws: Fix. Adjust create and destroy timeout in aws_vpn_gateway_attachment. ([#8636](https://github.com/hashicorp/terraform/issues/8636)) - * provider/aws: Handle missing EFS mount target in `aws_efs_mount_target` ([#8529](https://github.com/hashicorp/terraform/issues/8529)) - * provider/aws: If an `aws_security_group` was used in Lambda function it may have prevented you from destroying such SG due to dangling ENIs created by Lambda service. These ENIs are now automatically cleaned up prior to SG deletion ([#8033](https://github.com/hashicorp/terraform/issues/8033)) - * provider/aws: Increase `aws_route_table` timeouts from 1 min to 2 mins ([#8465](https://github.com/hashicorp/terraform/issues/8465)) - * provider/aws: Increase aws_rds_cluster timeout to 40 minutes ([#8623](https://github.com/hashicorp/terraform/issues/8623)) - * provider/aws: Refresh `aws_route` from state if `aws_route_table` not found ([#8443](https://github.com/hashicorp/terraform/issues/8443)) - * provider/aws: Remove `aws_elasticsearch_domain` from state if it doesn't exist ([#8643](https://github.com/hashicorp/terraform/issues/8643)) - * provider/aws: Remove unsafe ptr dereferencing from ECS/ECR ([#8514](https://github.com/hashicorp/terraform/issues/8514)) - * provider/aws: Set `apply_method` to state in `aws_db_parameter_group` ([#8603](https://github.com/hashicorp/terraform/issues/8603)) - * provider/aws: Stop `aws_instance` `source_dest_check` triggering an API call on each terraform run ([#8450](https://github.com/hashicorp/terraform/issues/8450)) - * provider/aws: Wait for `aws_route_53_record` to be in-sync after a delete ([#8646](https://github.com/hashicorp/terraform/issues/8646)) - * provider/aws: `aws_volume_attachment` detachment errors are caught ([#8479](https://github.com/hashicorp/terraform/issues/8479)) - * provider/aws: adds resource retry to `aws_spot_instance_request` ([#8516](https://github.com/hashicorp/terraform/issues/8516)) - * provider/aws: Add validation of Health Check target to aws_elb. ([#8578](https://github.com/hashicorp/terraform/issues/8578)) - * provider/aws: Skip detaching when aws_internet_gateway not found ([#8454](https://github.com/hashicorp/terraform/issues/8454)) - * provider/aws: Handle all kinds of CloudFormation stack failures ([#5606](https://github.com/hashicorp/terraform/issues/5606)) - * provider/azurerm: Reordering the checks after an Azure API Get ([#8607](https://github.com/hashicorp/terraform/issues/8607)) - * provider/chef: Fix "invalid header" errors that could occur ([#8382](https://github.com/hashicorp/terraform/issues/8382)) - * provider/github: Remove unsafe ptr dereferencing ([#8512](https://github.com/hashicorp/terraform/issues/8512)) - * provider/librato: Refresh space from state when not found ([#8596](https://github.com/hashicorp/terraform/issues/8596)) - * provider/mysql: Fix breakage in parsing MySQL version string ([#8571](https://github.com/hashicorp/terraform/issues/8571)) - * provider/template: `template_file` vars can be floating point ([#8590](https://github.com/hashicorp/terraform/issues/8590)) - * provider/triton: Fix bug where the ID of a `triton_key` was used prior to being set ([#8563](https://github.com/hashicorp/terraform/issues/8563)) - -## 0.7.2 (August 25, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * provider/openstack: changes were made to how volumes attached to instances are detected. If you attached a volume to an instance out of band to Terraform, it will be detached upon the next apply. You can resolve this by adding a `volume` entry for the attached volume. - * provider/aws: `aws_spot_fleet_request` has changed the `associate_public_ip_address` default from `true` to `false` - -FEATURES: - * **New Resource:** `aws_api_gateway_base_path_mapping` ([#8353](https://github.com/hashicorp/terraform/issues/8353)) - * **New Resource:** `aws_api_gateway_domain_name` ([#8353](https://github.com/hashicorp/terraform/issues/8353)) - * **New Resource:** `aws_ssm_document` ([#8460](https://github.com/hashicorp/terraform/issues/8460)) - -IMPROVEMENTS: - * core: Names generated with a unique prefix are now sortable based on age ([#8249](https://github.com/hashicorp/terraform/issues/8249)) - * provider/aws: Add Primary Endpoint Address attribute for `aws_elasticache_replication_group` ([#8385](https://github.com/hashicorp/terraform/issues/8385)) - * provider/aws: Add support for `network_mode` to `aws_ecs_task_definition` ([#8391](https://github.com/hashicorp/terraform/issues/8391)) - * provider/aws: Add support for LB target group to ECS service ([#8190](https://github.com/hashicorp/terraform/issues/8190)) - * provider/aws: Support Tags for `aws_alb` and `aws_alb_target_group` resources ([#8422](https://github.com/hashicorp/terraform/issues/8422)) - * provider/aws: Support `snapshot_name` for ElastiCache Cluster and Replication Groups ([#8419](https://github.com/hashicorp/terraform/issues/8419)) - * provider/aws: Add support to `aws_redshift_cluster` for restoring from snapshot ([#8414](https://github.com/hashicorp/terraform/issues/8414)) - * provider/aws: Add validation for master_password in `aws_redshift_cluster` ([#8434](https://github.com/hashicorp/terraform/issues/8434)) - * provider/openstack: Add `allowed_address_pairs` to `openstack_networking_port_v2` ([#8257](https://github.com/hashicorp/terraform/issues/8257)) - -BUG FIXES: - * core: fix crash case when malformed JSON given ([#8295](https://github.com/hashicorp/terraform/issues/8295)) - * core: when asking for input, spaces are allowed ([#8394](https://github.com/hashicorp/terraform/issues/8394)) - * core: module sources with URL encodings in the local file path won't error ([#8418](https://github.com/hashicorp/terraform/issues/8418)) - * command/apply: prefix destroying resources with module path ([#8396](https://github.com/hashicorp/terraform/issues/8396)) - * command/import: can import into specific indexes ([#8335](https://github.com/hashicorp/terraform/issues/8335)) - * command/push: -upload-modules=false works ([#8456](https://github.com/hashicorp/terraform/issues/8456)) - * command/state mv: nested modules can be moved ([#8304](https://github.com/hashicorp/terraform/issues/8304)) - * command/state mv: resources with a count > 1 can be moved ([#8304](https://github.com/hashicorp/terraform/issues/8304)) - * provider/aws: Refresh `aws_lambda_event_source_mapping` from state when NotFound ([#8378](https://github.com/hashicorp/terraform/issues/8378)) - * provider/aws: `aws_elasticache_replication_group_id` validation change ([#8381](https://github.com/hashicorp/terraform/issues/8381)) - * provider/aws: Fix possible crash if using duplicate Route53 records ([#8399](https://github.com/hashicorp/terraform/issues/8399)) - * provider/aws: Refresh `aws_autoscaling_policy` from state on 404 ([#8430](https://github.com/hashicorp/terraform/issues/8430)) - * provider/aws: Fix crash with VPC Peering connection accept/requests ([#8432](https://github.com/hashicorp/terraform/issues/8432)) - * provider/aws: AWS SpotFleet Requests now works with Subnets and AZs ([#8320](https://github.com/hashicorp/terraform/issues/8320)) - * provider/aws: Refresh `aws_cloudwatch_event_target` from state on `ResourceNotFoundException` ([#8442](https://github.com/hashicorp/terraform/issues/8442)) - * provider/aws: Validate `aws_iam_policy_attachment` Name parameter to stop being empty ([#8441](https://github.com/hashicorp/terraform/issues/8441)) - * provider/aws: Fix segmentation fault in `aws_api_gateway_base_path_mapping` resource ([#8466](https://github.com/hashicorp/terraform/issues/8466)) - * provider/google: fix crash regression from Terraform 0.7.1 on `google_compute_firewall` resource ([#8390](https://github.com/hashicorp/terraform/issues/8390)) - * provider/openstack: Volume Attachment and Detachment Fixes ([#8172](https://github.com/hashicorp/terraform/issues/8172)) - -## 0.7.1 (August 19, 2016) - -FEATURES: - * **New Command:** `terraform state rm` ([#8200](https://github.com/hashicorp/terraform/issues/8200)) - * **New Provider:** `archive` ([#7322](https://github.com/hashicorp/terraform/issues/7322)) - * **New Resource:** `aws_alb` ([#8254](https://github.com/hashicorp/terraform/issues/8254)) - * **New Resource:** `aws_alb_listener` ([#8269](https://github.com/hashicorp/terraform/issues/8269)) - * **New Resource:** `aws_alb_target_group` ([#8254](https://github.com/hashicorp/terraform/issues/8254)) - * **New Resource:** `aws_alb_target_group_attachment` ([#8254](https://github.com/hashicorp/terraform/issues/8254)) - * **New Resource:** `aws_alb_target_group_rule` ([#8321](https://github.com/hashicorp/terraform/issues/8321)) - * **New Resource:** `aws_vpn_gateway_attachment` ([#7870](https://github.com/hashicorp/terraform/issues/7870)) - * **New Resource:** `aws_load_balancer_policy` ([#7458](https://github.com/hashicorp/terraform/issues/7458)) - * **New Resource:** `aws_load_balancer_backend_server_policy` ([#7458](https://github.com/hashicorp/terraform/issues/7458)) - * **New Resource:** `aws_load_balancer_listener_policy` ([#7458](https://github.com/hashicorp/terraform/issues/7458)) - * **New Resource:** `aws_lb_ssl_negotiation_policy` ([#8084](https://github.com/hashicorp/terraform/issues/8084)) - * **New Resource:** `aws_elasticache_replication_groups` ([#8275](https://github.com/hashicorp/terraform/issues/8275)) - * **New Resource:** `azurerm_virtual_network_peering` ([#8168](https://github.com/hashicorp/terraform/issues/8168)) - * **New Resource:** `azurerm_servicebus_namespace` ([#8195](https://github.com/hashicorp/terraform/issues/8195)) - * **New Resource:** `google_compute_image` ([#7960](https://github.com/hashicorp/terraform/issues/7960)) - * **New Resource:** `packet_volume` ([#8142](https://github.com/hashicorp/terraform/issues/8142)) - * **New Resource:** `consul_prepared_query` ([#7474](https://github.com/hashicorp/terraform/issues/7474)) - * **New Data Source:** `aws_ip_ranges` ([#7984](https://github.com/hashicorp/terraform/issues/7984)) - * **New Data Source:** `fastly_ip_ranges` ([#7984](https://github.com/hashicorp/terraform/issues/7984)) - * **New Data Source:** `aws_caller_identity` ([#8206](https://github.com/hashicorp/terraform/issues/8206)) - * **New Data Source:** `aws_elb_service_account` ([#8221](https://github.com/hashicorp/terraform/issues/8221)) - * **New Data Source:** `aws_redshift_service_account` ([#8224](https://github.com/hashicorp/terraform/issues/8224)) - -IMPROVEMENTS - * provider/archive support folders in output_path ([#8278](https://github.com/hashicorp/terraform/issues/8278)) - * provider/aws: Introduce `aws_elasticsearch_domain` `elasticsearch_version` field (to specify ES version) ([#7860](https://github.com/hashicorp/terraform/issues/7860)) - * provider/aws: Add support for TargetGroups (`aws_alb_target_groups`) to `aws_autoscaling_group` [8327] - * provider/aws: CloudWatch Metrics are now supported for `aws_route53_health_check` resources ([#8319](https://github.com/hashicorp/terraform/issues/8319)) - * provider/aws: Query all pages of group membership ([#6726](https://github.com/hashicorp/terraform/issues/6726)) - * provider/aws: Query all pages of IAM Policy attachments ([#7779](https://github.com/hashicorp/terraform/issues/7779)) - * provider/aws: Change the way ARNs are built ([#7151](https://github.com/hashicorp/terraform/issues/7151)) - * provider/aws: Add support for Elasticsearch destination to firehose delivery streams ([#7839](https://github.com/hashicorp/terraform/issues/7839)) - * provider/aws: Retry AttachInternetGateway and increase timeout on `aws_internet_gateway` ([#7891](https://github.com/hashicorp/terraform/issues/7891)) - * provider/aws: Add support for Enhanced monitoring to `aws_rds_cluster_instance` ([#8038](https://github.com/hashicorp/terraform/issues/8038)) - * provider/aws: Add ability to set Requests Payer in `aws_s3_bucket` ([#8065](https://github.com/hashicorp/terraform/issues/8065)) - * provider/aws: Add ability to set canned ACL in `aws_s3_bucket_object` ([#8091](https://github.com/hashicorp/terraform/issues/8091)) - * provider/aws: Allow skipping credentials validation, requesting Account ID and/or metadata API check ([#7874](https://github.com/hashicorp/terraform/issues/7874)) - * provider/aws: API gateway request/response parameters can now be specified as map, original `*_in_json` parameters deprecated ([#7794](https://github.com/hashicorp/terraform/issues/7794)) - * provider/aws: Add support for `promotion_tier` to `aws_rds_cluster_instance` ([#8087](https://github.com/hashicorp/terraform/issues/8087)) - * provider/aws: Allow specifying custom S3 endpoint and enforcing S3 path style URLs via new provider options ([#7871](https://github.com/hashicorp/terraform/issues/7871)) - * provider/aws: Add ability to set Storage Class in `aws_s3_bucket_object` ([#8174](https://github.com/hashicorp/terraform/issues/8174)) - * provider/aws: Treat `aws_lambda_function` w/ empty `subnet_ids` & `security_groups_ids` in `vpc_config` as VPC-disabled function ([#6191](https://github.com/hashicorp/terraform/issues/6191)) - * provider/aws: Allow `source_ids` in `aws_db_event_subscription` to be Updatable ([#7892](https://github.com/hashicorp/terraform/issues/7892)) - * provider/aws: Make `aws_efs_mount_target` creation fail for 2+ targets per AZ ([#8205](https://github.com/hashicorp/terraform/issues/8205)) - * provider/aws: Add `force_destroy` option to `aws_route53_zone` ([#8239](https://github.com/hashicorp/terraform/issues/8239)) - * provider/aws: Support import of `aws_s3_bucket` ([#8262](https://github.com/hashicorp/terraform/issues/8262)) - * provider/aws: Increase timeout for retrying creation of IAM role ([#7733](https://github.com/hashicorp/terraform/issues/7733)) - * provider/aws: Add ability to set peering options in aws_vpc_peering_connection. ([#8310](https://github.com/hashicorp/terraform/issues/8310)) - * provider/azure: add custom_data argument for azure_instance resource ([#8158](https://github.com/hashicorp/terraform/issues/8158)) - * provider/azurerm: Adds support for uploading blobs to azure storage from local source ([#7994](https://github.com/hashicorp/terraform/issues/7994)) - * provider/azurerm: Storage blob contents can be copied from an existing blob ([#8126](https://github.com/hashicorp/terraform/issues/8126)) - * provider/datadog: Allow `tags` to be configured for monitor resources. ([#8284](https://github.com/hashicorp/terraform/issues/8284)) - * provider/google: allows atomic Cloud DNS record changes ([#6575](https://github.com/hashicorp/terraform/issues/6575)) - * provider/google: Move URLMap hosts to TypeSet from TypeList ([#7472](https://github.com/hashicorp/terraform/issues/7472)) - * provider/google: Support static private IP addresses in `resource_compute_instance` ([#6310](https://github.com/hashicorp/terraform/issues/6310)) - * provider/google: Add support for using a GCP Image Family ([#8083](https://github.com/hashicorp/terraform/issues/8083)) - * provider/openstack: Support updating the External Gateway assigned to a Neutron router ([#8070](https://github.com/hashicorp/terraform/issues/8070)) - * provider/openstack: Support for `value_specs` param on `openstack_networking_network_v2` ([#8155](https://github.com/hashicorp/terraform/issues/8155)) - * provider/openstack: Add `value_specs` param on `openstack_networking_subnet_v2` ([#8181](https://github.com/hashicorp/terraform/issues/8181)) - * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` ([#7908](https://github.com/hashicorp/terraform/issues/7908)) - * provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` ([#7916](https://github.com/hashicorp/terraform/issues/7916)) - * provider/vsphere: Standardizing datastore references to use builtin Path func ([#8075](https://github.com/hashicorp/terraform/issues/8075)) - * provider/consul: add tls config support to consul provider ([#7015](https://github.com/hashicorp/terraform/issues/7015)) - * remote/consul: Support setting datacenter when using consul remote state ([#8102](https://github.com/hashicorp/terraform/issues/8102)) - * provider/google: Support import of `google_compute_instance_template` ([#8147](https://github.com/hashicorp/terraform/issues/8147)), `google_compute_firewall` ([#8236](https://github.com/hashicorp/terraform/issues/8236)), `google_compute_target_pool` ([#8133](https://github.com/hashicorp/terraform/issues/8133)), `google_compute_fowarding_rule` ([#8122](https://github.com/hashicorp/terraform/issues/8122)), `google_compute_http_health_check` ([#8121](https://github.com/hashicorp/terraform/issues/8121)), `google_compute_autoscaler` ([#8115](https://github.com/hashicorp/terraform/issues/8115)) - -BUG FIXES: - * core: Fix issue preventing `taint` from working with resources that had no other attributes in their diff ([#8167](https://github.com/hashicorp/terraform/issues/8167)) - * core: CLI will only run exact match commands ([#7983](https://github.com/hashicorp/terraform/issues/7983)) - * core: Fix panic when resources ends up null in state file ([#8120](https://github.com/hashicorp/terraform/issues/8120)) - * core: Fix panic when validating a count with a unprefixed variable ([#8243](https://github.com/hashicorp/terraform/issues/8243)) - * core: Divide by zero in interpolations no longer panics ([#7701](https://github.com/hashicorp/terraform/issues/7701)) - * core: Fix panic on some invalid interpolation syntax ([#5672](https://github.com/hashicorp/terraform/issues/5672)) - * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` ([#7966](https://github.com/hashicorp/terraform/issues/7966)) - * provider/aws: `aws_cloudformation_stack` now respects `timeout_in_minutes` field when waiting for CF API to finish an update operation ([#7997](https://github.com/hashicorp/terraform/issues/7997)) - * provider/aws: Prevent errors when `aws_s3_bucket` `acceleration_status` is not available in a given region ([#7999](https://github.com/hashicorp/terraform/issues/7999)) - * provider/aws: Add state filter to `aws_availability_zone`s data source ([#7965](https://github.com/hashicorp/terraform/issues/7965)) - * provider/aws: Handle lack of snapshot ID for a volume in `ami_copy` ([#7995](https://github.com/hashicorp/terraform/issues/7995)) - * provider/aws: Retry association of IAM Role & instance profile ([#7938](https://github.com/hashicorp/terraform/issues/7938)) - * provider/aws: Fix `aws_s3_bucket` resource `redirect_all_requests_to` action ([#7883](https://github.com/hashicorp/terraform/issues/7883)) - * provider/aws: Fix issue updating ElasticBeanstalk Environment Settings ([#7777](https://github.com/hashicorp/terraform/issues/7777)) - * provider/aws: `aws_rds_cluster` creation timeout bumped to 40 minutes ([#8052](https://github.com/hashicorp/terraform/issues/8052)) - * provider/aws: Update ElasticTranscoder to allow empty notifications, removing notifications, etc ([#8207](https://github.com/hashicorp/terraform/issues/8207)) - * provider/aws: Fix line ending errors/diffs with IAM Server Certs ([#8074](https://github.com/hashicorp/terraform/issues/8074)) - * provider/aws: Fixing IAM data source policy generation to prevent spurious diffs ([#6956](https://github.com/hashicorp/terraform/issues/6956)) - * provider/aws: Correct how CORS rules are handled in `aws_s3_bucket` ([#8096](https://github.com/hashicorp/terraform/issues/8096)) - * provider/aws: allow numeric characters in RedshiftClusterDbName ([#8178](https://github.com/hashicorp/terraform/issues/8178)) - * provider/aws: `aws_security_group` now creates tags as early as possible in the process ([#7849](https://github.com/hashicorp/terraform/issues/7849)) - * provider/aws: Defensively code around `db_security_group` ingress rules ([#7893](https://github.com/hashicorp/terraform/issues/7893)) - * provider/aws: `aws_spot_fleet_request` throws panic on missing subnet_id or availability_zone ([#8217](https://github.com/hashicorp/terraform/issues/8217)) - * provider/aws: Terraform fails during Redshift delete if FinalSnapshot is being taken. ([#8270](https://github.com/hashicorp/terraform/issues/8270)) - * provider/azurerm: `azurerm_storage_account` will interrupt for Ctrl-C ([#8215](https://github.com/hashicorp/terraform/issues/8215)) - * provider/azurerm: Public IP - Setting idle timeout value caused panic. #8283 - * provider/digitalocean: trim whitespace from ssh key ([#8173](https://github.com/hashicorp/terraform/issues/8173)) - * provider/digitalocean: Enforce Lowercase on IPV6 Addresses ([#7652](https://github.com/hashicorp/terraform/issues/7652)) - * provider/google: Use resource specific project when making queries/changes ([#7029](https://github.com/hashicorp/terraform/issues/7029)) - * provider/google: Fix read for the backend service resource ([#7476](https://github.com/hashicorp/terraform/issues/7476)) - * provider/mysql: `mysql_user` works with MySQL versions before 5.7.6 ([#8251](https://github.com/hashicorp/terraform/issues/8251)) - * provider/openstack: Fix typo in OpenStack LBaaSv2 pool resource ([#8179](https://github.com/hashicorp/terraform/issues/8179)) - * provider/vSphere: Fix for IPv6 only environment creation ([#7643](https://github.com/hashicorp/terraform/issues/7643)) - * provider/google: Correct update process for authorized networks in `google_sql_database_instance` ([#8290](https://github.com/hashicorp/terraform/issues/8290)) - -## 0.7.0 (August 2, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * Terraform Core - * Terraform's built-in plugins are now distributed as part of the main Terraform binary, and use the go-plugin framework. Overrides are still available using separate binaries, but will need recompiling against Terraform 0.7. - * The `terraform plan` command no longer persists state. This makes the command much safer to run, since it is now side-effect free. The `refresh` and `apply` commands still persist state to local and remote storage. Any automation that assumes that `terraform plan` persists state will need to be reworked to explicitly call `terraform refresh` to get the equivalent side-effect. (The `terraform plan` command no longer has the `-state-out` or `-backup` flags due to this change.) - * The `concat()` interpolation function can no longer be used to join strings. - * Quotation marks may no longer be escaped in HIL expressions ([#7201](https://github.com/hashicorp/terraform/issues/7201)) - * Lists materialized using splat syntax, for example `aws_instance.foo.*.id` are now ordered by the count index rather than lexographically sorted. If this produces a large number of undesirable differences, you can use the new `sort()` interpolation function to produce the previous behaviour. - * You now access the values of maps using the syntax `var.map["key"]` or the `lookup` function instead of `var.map.key`. - * Outputs on `terraform_remote_state` resources are now top level attributes rather than inside the `output` map. In order to access outputs, use the syntax: `terraform_remote_state.name.outputname`. Currently outputs cannot be named `config` or `backend`. - * AWS Provider - * `aws_elb` now defaults `cross_zone_load_balancing` to `true` - * `aws_instance`: EC2 Classic users may continue to use `security_groups` to reference Security Groups by their `name`. Users who are managing Instances inside VPCs will need to use `vpc_security_group_ids` instead, and reference the security groups by their `id`. Ref https://github.com/hashicorp/terraform/issues/6416#issuecomment-219145065 - * `aws_kinesis_firehose_delivery_stream`: AWS Kinesis Firehose has been refactored to support Redshift as a destination in addition to S3. As a result, the configuration has changed and users will need to update their configuration to match the new `s3_configuration` block. Checkout the documentaiton on [AWS Kinesis Firehose](http://localhost:4567/docs/providers/aws/r/kinesis_firehose_delivery_stream.html) for more information ([#7375](https://github.com/hashicorp/terraform/issues/7375)) - * `aws_route53_record`: `latency_routing_policy`, `geolocation_routing_policy`, and `failover_routing_policy` block options have been added. With these additions we’ve renamed the `weight` attribute to `weighted_routing_policy`, and it has changed from a string to a block to match the others. Please see the updated documentation on using `weighted_routing_policy`: https://www.terraform.io/docs/providers/aws/r/route53_record.html . ([#6954](https://github.com/hashicorp/terraform/issues/6954)) - * `aws_db_instance` now defaults `publicly_accessible` to false - * Microsoft Azure Provider - * In documentation, the "Azure (Resource Manager)" provider has been renamed to the "Microsoft Azure" provider. - * `azurerm_dns_cname_record` now accepts a single record rather than a list of records - * `azurerm_virtual_machine` computer_name now Required - * Openstack Provider - * `openstack_networking_subnet_v2` now defaults to turning DHCP on. - * `openstack_fw_policy_v1` now correctly applies rules in the order they are specified. Upon the next apply, current rules might be re-ordered. - * The `member` attribute of `openstack_lb_pool_v1` has been deprecated. Please ue the new `openstack_lb_member_v1` resource. - * Docker Provider - * `keep_updated` parameter removed from `docker_image` - This parameter never did what it was supposed to do. See relevant docs, specifically `pull_trigger` & new `docker_registry_image` data source to understand how to keep your `docker_image` updated. - * Atlas Provider - * `atlas_artifact` resource has be deprecated. Please use the new `atlas_artifact` Data Source. - * CloudStack Provider - * All deprecated parameters are removed from all `CloudStack` resources - -FEATURES: - - * **Data sources** are a new kind of primitive in Terraform. Attributes for data sources are refreshed and available during the planning stage. ([#6598](https://github.com/hashicorp/terraform/issues/6598)) - * **Lists and maps** can now be used as first class types for variables and may also be passed between modules. ([#6322](https://github.com/hashicorp/terraform/issues/6322)) - * **State management CLI commands** provide a variety of state manipulation functions for advanced use cases. This should be used where possible instead of manually modifying state files. ([#5811](https://github.com/hashicorp/terraform/issues/5811)) - * **State Import** allows a way to import existing resources into Terraform state for many types of resource. Initial coverage of AWS is quite high, and it is straightforward to add support for new resources. - * **New Command:** `terraform state` to provide access to a variety of state manipulation functions ([#5811](https://github.com/hashicorp/terraform/issues/5811)) - * **New Option:** `terraform output` now supports the `-json` flag to print a machine-readable representation of outputs ([#7608](https://github.com/hashicorp/terraform/issues/7608)) - * **New Data Source:** `aws_ami` ([#6911](https://github.com/hashicorp/terraform/issues/6911)) - * **New Data Source:** `aws_availability_zones` ([#6805](https://github.com/hashicorp/terraform/issues/6805)) - * **New Data Source:** `aws_iam_policy_document` ([#6881](https://github.com/hashicorp/terraform/issues/6881)) - * **New Data Source:** `aws_s3_bucket_object` ([#6946](https://github.com/hashicorp/terraform/issues/6946)) - * **New Data Source:** `aws_ecs_container_definition` ([#7230](https://github.com/hashicorp/terraform/issues/7230)) - * **New Data Source:** `atlas_artifact` ([#7419](https://github.com/hashicorp/terraform/issues/7419)) - * **New Data Source:** `docker_registry_image` ([#7000](https://github.com/hashicorp/terraform/issues/7000)) - * **New Data Source:** `consul_keys` ([#7678](https://github.com/hashicorp/terraform/issues/7678)) - * **New Interpolation Function:** `sort` ([#7128](https://github.com/hashicorp/terraform/issues/7128)) - * **New Interpolation Function:** `distinct` ([#7174](https://github.com/hashicorp/terraform/issues/7174)) - * **New Interpolation Function:** `list` ([#7528](https://github.com/hashicorp/terraform/issues/7528)) - * **New Interpolation Function:** `map` ([#7832](https://github.com/hashicorp/terraform/issues/7832)) - * **New Provider:** `grafana` ([#6206](https://github.com/hashicorp/terraform/issues/6206)) - * **New Provider:** `logentries` ([#7067](https://github.com/hashicorp/terraform/issues/7067)) - * **New Provider:** `scaleway` ([#7331](https://github.com/hashicorp/terraform/issues/7331)) - * **New Provider:** `random` - allows generation of random values without constantly generating diffs ([#6672](https://github.com/hashicorp/terraform/issues/6672)) - * **New Remote State Provider:** - `gcs` - Google Cloud Storage ([#6814](https://github.com/hashicorp/terraform/issues/6814)) - * **New Remote State Provider:** - `azure` - Microsoft Azure Storage ([#7064](https://github.com/hashicorp/terraform/issues/7064)) - * **New Resource:** `aws_elb_attachment` ([#6879](https://github.com/hashicorp/terraform/issues/6879)) - * **New Resource:** `aws_elastictranscoder_preset` ([#6965](https://github.com/hashicorp/terraform/issues/6965)) - * **New Resource:** `aws_elastictranscoder_pipeline` ([#6965](https://github.com/hashicorp/terraform/issues/6965)) - * **New Resource:** `aws_iam_group_policy_attachment` ([#6858](https://github.com/hashicorp/terraform/issues/6858)) - * **New Resource:** `aws_iam_role_policy_attachment` ([#6858](https://github.com/hashicorp/terraform/issues/6858)) - * **New Resource:** `aws_iam_user_policy_attachment` ([#6858](https://github.com/hashicorp/terraform/issues/6858)) - * **New Resource:** `aws_rds_cluster_parameter_group` ([#5269](https://github.com/hashicorp/terraform/issues/5269)) - * **New Resource:** `aws_spot_fleet_request` ([#7243](https://github.com/hashicorp/terraform/issues/7243)) - * **New Resource:** `aws_ses_active_receipt_rule_set` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) - * **New Resource:** `aws_ses_receipt_filter` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) - * **New Resource:** `aws_ses_receipt_rule` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) - * **New Resource:** `aws_ses_receipt_rule_set` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) - * **New Resource:** `aws_simpledb_domain` ([#7600](https://github.com/hashicorp/terraform/issues/7600)) - * **New Resource:** `aws_opsworks_user_profile` ([#6304](https://github.com/hashicorp/terraform/issues/6304)) - * **New Resource:** `aws_opsworks_permission` ([#6304](https://github.com/hashicorp/terraform/issues/6304)) - * **New Resource:** `aws_ami_launch_permission` ([#7365](https://github.com/hashicorp/terraform/issues/7365)) - * **New Resource:** `aws_appautoscaling_policy` ([#7663](https://github.com/hashicorp/terraform/issues/7663)) - * **New Resource:** `aws_appautoscaling_target` ([#7663](https://github.com/hashicorp/terraform/issues/7663)) - * **New Resource:** `openstack_blockstorage_volume_v2` ([#6693](https://github.com/hashicorp/terraform/issues/6693)) - * **New Resource:** `openstack_lb_loadbalancer_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `openstack_lb_listener_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `openstack_lb_pool_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `openstack_lb_member_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `openstack_lb_monitor_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `vsphere_virtual_disk` ([#6273](https://github.com/hashicorp/terraform/issues/6273)) - * **New Resource:** `github_repository_collaborator` ([#6861](https://github.com/hashicorp/terraform/issues/6861)) - * **New Resource:** `datadog_timeboard` ([#6900](https://github.com/hashicorp/terraform/issues/6900)) - * **New Resource:** `digitalocean_tag` ([#7500](https://github.com/hashicorp/terraform/issues/7500)) - * **New Resource:** `digitalocean_volume` ([#7560](https://github.com/hashicorp/terraform/issues/7560)) - * **New Resource:** `consul_agent_service` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) - * **New Resource:** `consul_catalog_entry` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) - * **New Resource:** `consul_node` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) - * **New Resource:** `consul_service` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) - * **New Resource:** `mysql_grant` ([#7656](https://github.com/hashicorp/terraform/issues/7656)) - * **New Resource:** `mysql_user` ([#7656](https://github.com/hashicorp/terraform/issues/7656)) - * **New Resource:** `azurerm_storage_table` ([#7327](https://github.com/hashicorp/terraform/issues/7327)) - * **New Resource:** `azurerm_virtual_machine_scale_set` ([#6711](https://github.com/hashicorp/terraform/issues/6711)) - * **New Resource:** `azurerm_traffic_manager_endpoint` ([#7826](https://github.com/hashicorp/terraform/issues/7826)) - * **New Resource:** `azurerm_traffic_manager_profile` ([#7826](https://github.com/hashicorp/terraform/issues/7826)) - * core: Tainted resources now show up in the plan and respect dependency ordering ([#6600](https://github.com/hashicorp/terraform/issues/6600)) - * core: The `lookup` interpolation function can now have a default fall-back value specified ([#6884](https://github.com/hashicorp/terraform/issues/6884)) - * core: The `terraform plan` command no longer persists state. ([#6811](https://github.com/hashicorp/terraform/issues/6811)) - -IMPROVEMENTS: - - * core: The `jsonencode` interpolation function now supports encoding lists and maps ([#6749](https://github.com/hashicorp/terraform/issues/6749)) - * core: Add the ability for resource definitions to mark attributes as "sensitive" which will omit them from UI output. ([#6923](https://github.com/hashicorp/terraform/issues/6923)) - * core: Support `.` in map keys ([#7654](https://github.com/hashicorp/terraform/issues/7654)) - * core: Enhance interpolation functions to account for first class maps and lists ([#7832](https://github.com/hashicorp/terraform/issues/7832)) ([#7834](https://github.com/hashicorp/terraform/issues/7834)) - * command: Remove second DefaultDataDirectory const ([#7666](https://github.com/hashicorp/terraform/issues/7666)) - * provider/aws: Add `dns_name` to `aws_efs_mount_target` ([#7428](https://github.com/hashicorp/terraform/issues/7428)) - * provider/aws: Add `force_destroy` to `aws_iam_user` for force-deleting access keys assigned to the user ([#7766](https://github.com/hashicorp/terraform/issues/7766)) - * provider/aws: Add `option_settings` to `aws_db_option_group` ([#6560](https://github.com/hashicorp/terraform/issues/6560)) - * provider/aws: Add more explicit support for Skipping Final Snapshot in RDS Cluster ([#6795](https://github.com/hashicorp/terraform/issues/6795)) - * provider/aws: Add support for S3 Bucket Acceleration ([#6628](https://github.com/hashicorp/terraform/issues/6628)) - * provider/aws: Add support for `kms_key_id` to `aws_db_instance` ([#6651](https://github.com/hashicorp/terraform/issues/6651)) - * provider/aws: Specifying more than one health check on an `aws_elb` fails with an error prior to making an API request ([#7489](https://github.com/hashicorp/terraform/issues/7489)) - * provider/aws: Add support to `aws_redshift_cluster` for `iam_roles` ([#6647](https://github.com/hashicorp/terraform/issues/6647)) - * provider/aws: SQS use raw policy string if compact fails ([#6724](https://github.com/hashicorp/terraform/issues/6724)) - * provider/aws: Set default description to "Managed by Terraform" ([#6104](https://github.com/hashicorp/terraform/issues/6104)) - * provider/aws: Support for Redshift Cluster encryption using a KMS key ([#6712](https://github.com/hashicorp/terraform/issues/6712)) - * provider/aws: Support tags for AWS redshift cluster ([#5356](https://github.com/hashicorp/terraform/issues/5356)) - * provider/aws: Add `iam_arn` to aws_cloudfront_origin_access_identity ([#6955](https://github.com/hashicorp/terraform/issues/6955)) - * provider/aws: Add `cross_zone_load_balancing` on `aws_elb` default to true ([#6897](https://github.com/hashicorp/terraform/issues/6897)) - * provider/aws: Add support for `character_set_name` to `aws_db_instance` ([#4861](https://github.com/hashicorp/terraform/issues/4861)) - * provider/aws: Add support for DB parameter group with RDS Cluster Instances (Aurora) ([#6865](https://github.com/hashicorp/terraform/issues/6865)) - * provider/aws: Add `name_prefix` to `aws_iam_instance_profile` and `aws_iam_role` ([#6939](https://github.com/hashicorp/terraform/issues/6939)) - * provider/aws: Allow authentication & credentials validation for federated IAM Roles and EC2 instance profiles ([#6536](https://github.com/hashicorp/terraform/issues/6536)) - * provider/aws: Rename parameter_group_name to db_cluster_parameter_group_name ([#7083](https://github.com/hashicorp/terraform/issues/7083)) - * provider/aws: Retry RouteTable Route/Assocation creation ([#7156](https://github.com/hashicorp/terraform/issues/7156)) - * provider/aws: `delegation_set_id` conflicts w/ `vpc_id` in `aws_route53_zone` as delegation sets can only be used for public zones ([#7213](https://github.com/hashicorp/terraform/issues/7213)) - * provider/aws: Support Elastic Beanstalk scheduledaction ([#7376](https://github.com/hashicorp/terraform/issues/7376)) - * provider/aws: Add support for NewInstancesProtectedFromScaleIn to `aws_autoscaling_group` ([#6490](https://github.com/hashicorp/terraform/issues/6490)) - * provider/aws: Added support for `snapshot_identifier` parameter in aws_rds_cluster ([#7158](https://github.com/hashicorp/terraform/issues/7158)) - * provider/aws: Add inplace edit/update DB Security Group Rule Ingress ([#7245](https://github.com/hashicorp/terraform/issues/7245)) - * provider/aws: Added support for redshift destination to firehose delivery streams ([#7375](https://github.com/hashicorp/terraform/issues/7375)) - * provider/aws: Allow `aws_redshift_security_group` ingress rules to change ([#5939](https://github.com/hashicorp/terraform/issues/5939)) - * provider/aws: Add support for `encryption` and `kms_key_id` to `aws_ami` ([#7181](https://github.com/hashicorp/terraform/issues/7181)) - * provider/aws: AWS prefix lists to enable security group egress to a VPC Endpoint ([#7511](https://github.com/hashicorp/terraform/issues/7511)) - * provider/aws: Retry creation of IAM role depending on new IAM user ([#7324](https://github.com/hashicorp/terraform/issues/7324)) - * provider/aws: Allow `port` on `aws_db_instance` to be updated ([#7441](https://github.com/hashicorp/terraform/issues/7441)) - * provider/aws: Allow VPC Classic Linking in Autoscaling Launch Configs ([#7470](https://github.com/hashicorp/terraform/issues/7470)) - * provider/aws: Support `task_role_arn` on `aws_ecs_task_definition ([#7653](https://github.com/hashicorp/terraform/issues/7653)) - * provider/aws: Support Tags on `aws_rds_cluster` ([#7695](https://github.com/hashicorp/terraform/issues/7695)) - * provider/aws: Support kms_key_id for `aws_rds_cluster` ([#7662](https://github.com/hashicorp/terraform/issues/7662)) - * provider/aws: Allow setting a `poll_interval` on `aws_elastic_beanstalk_environment` ([#7523](https://github.com/hashicorp/terraform/issues/7523)) - * provider/aws: Add support for Kinesis streams shard-level metrics ([#7684](https://github.com/hashicorp/terraform/issues/7684)) - * provider/aws: Support create / update greater than twenty db parameters in `aws_db_parameter_group` ([#7364](https://github.com/hashicorp/terraform/issues/7364)) - * provider/aws: expose network interface id in `aws_instance` ([#6751](https://github.com/hashicorp/terraform/issues/6751)) - * provider/aws: Adding passthrough behavior for API Gateway integration ([#7801](https://github.com/hashicorp/terraform/issues/7801)) - * provider/aws: Enable Redshift Cluster Logging ([#7813](https://github.com/hashicorp/terraform/issues/7813)) - * provider/aws: Add ability to set Performance Mode in `aws_efs_file_system` ([#7791](https://github.com/hashicorp/terraform/issues/7791)) - * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` ([#6807](https://github.com/hashicorp/terraform/issues/6807)) - * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys ([#6742](https://github.com/hashicorp/terraform/issues/6742)) - * provider/azurerm: The Azure SDK now exposes better error messages ([#6976](https://github.com/hashicorp/terraform/issues/6976)) - * provider/azurerm: `azurerm_dns_zone` now returns `name_servers` ([#7434](https://github.com/hashicorp/terraform/issues/7434)) - * provider/azurerm: dump entire Request/Response in autorest Decorator ([#7719](https://github.com/hashicorp/terraform/issues/7719)) - * provider/azurerm: add option to delete VMs Data disks on termination ([#7793](https://github.com/hashicorp/terraform/issues/7793)) - * provider/clc: Add support for hyperscale and bareMetal server types and package installation - * provider/clc: Fix optional server password ([#6414](https://github.com/hashicorp/terraform/issues/6414)) - * provider/cloudstack: Add support for affinity groups to `cloudstack_instance` ([#6898](https://github.com/hashicorp/terraform/issues/6898)) - * provider/cloudstack: Enable swapping of ACLs without having to rebuild the network tier ([#6741](https://github.com/hashicorp/terraform/issues/6741)) - * provider/cloudstack: Improve ACL swapping ([#7315](https://github.com/hashicorp/terraform/issues/7315)) - * provider/cloudstack: Add project support to `cloudstack_network_acl` and `cloudstack_network_acl_rule` ([#7612](https://github.com/hashicorp/terraform/issues/7612)) - * provider/cloudstack: Add option to set `root_disk_size` to `cloudstack_instance` ([#7070](https://github.com/hashicorp/terraform/issues/7070)) - * provider/cloudstack: Do no longer force a new `cloudstack_instance` resource when updating `user_data` ([#7074](https://github.com/hashicorp/terraform/issues/7074)) - * provider/cloudstack: Add option to set `security_group_names` to `cloudstack_instance` ([#7240](https://github.com/hashicorp/terraform/issues/7240)) - * provider/cloudstack: Add option to set `affinity_group_names` to `cloudstack_instance` ([#7242](https://github.com/hashicorp/terraform/issues/7242)) - * provider/datadog: Add support for 'require full window' and 'locked' ([#6738](https://github.com/hashicorp/terraform/issues/6738)) - * provider/docker: Docker Container DNS Setting Enhancements ([#7392](https://github.com/hashicorp/terraform/issues/7392)) - * provider/docker: Add `destroy_grace_seconds` option to stop container before delete ([#7513](https://github.com/hashicorp/terraform/issues/7513)) - * provider/docker: Add `pull_trigger` option to `docker_image` to trigger pulling layers of a given image ([#7000](https://github.com/hashicorp/terraform/issues/7000)) - * provider/fastly: Add support for Cache Settings ([#6781](https://github.com/hashicorp/terraform/issues/6781)) - * provider/fastly: Add support for Service Request Settings on `fastly_service_v1` resources ([#6622](https://github.com/hashicorp/terraform/issues/6622)) - * provider/fastly: Add support for custom VCL configuration ([#6662](https://github.com/hashicorp/terraform/issues/6662)) - * provider/google: Support optional uuid naming for Instance Template ([#6604](https://github.com/hashicorp/terraform/issues/6604)) - * provider/openstack: Add support for client certificate authentication ([#6279](https://github.com/hashicorp/terraform/issues/6279)) - * provider/openstack: Allow Neutron-based Floating IP to target a specific tenant ([#6454](https://github.com/hashicorp/terraform/issues/6454)) - * provider/openstack: Enable DHCP By Default ([#6838](https://github.com/hashicorp/terraform/issues/6838)) - * provider/openstack: Implement fixed_ip on Neutron floating ip allocations ([#6837](https://github.com/hashicorp/terraform/issues/6837)) - * provider/openstack: Increase timeouts for image resize, subnets, and routers ([#6764](https://github.com/hashicorp/terraform/issues/6764)) - * provider/openstack: Add `lb_provider` argument to `lb_pool_v1` resource ([#6919](https://github.com/hashicorp/terraform/issues/6919)) - * provider/openstack: Enforce `ForceNew` on Instance Block Device ([#6921](https://github.com/hashicorp/terraform/issues/6921)) - * provider/openstack: Can now stop instances before destroying them ([#7184](https://github.com/hashicorp/terraform/issues/7184)) - * provider/openstack: Disassociate LBaaS v1 Monitors from Pool Before Deletion ([#6997](https://github.com/hashicorp/terraform/issues/6997)) - * provider/powerdns: Add support for PowerDNS 4 API ([#7819](https://github.com/hashicorp/terraform/issues/7819)) - * provider/triton: add `triton_machine` `domain names` ([#7149](https://github.com/hashicorp/terraform/issues/7149)) - * provider/vsphere: Add support for `controller_type` to `vsphere_virtual_machine` ([#6785](https://github.com/hashicorp/terraform/issues/6785)) - * provider/vsphere: Fix bug with `vsphere_virtual_machine` wait for ip ([#6377](https://github.com/hashicorp/terraform/issues/6377)) - * provider/vsphere: Virtual machine update disk ([#6619](https://github.com/hashicorp/terraform/issues/6619)) - * provider/vsphere: `vsphere_virtual_machine` adding controller creation logic ([#6853](https://github.com/hashicorp/terraform/issues/6853)) - * provider/vsphere: `vsphere_virtual_machine` added support for `mac address` on `network_interface` ([#6966](https://github.com/hashicorp/terraform/issues/6966)) - * provider/vsphere: Enhanced `vsphere` logging capabilities ([#6893](https://github.com/hashicorp/terraform/issues/6893)) - * provider/vsphere: Add DiskEnableUUID option to `vsphere_virtual_machine` ([#7088](https://github.com/hashicorp/terraform/issues/7088)) - * provider/vsphere: Virtual Machine and File resources handle Read errors properley ([#7220](https://github.com/hashicorp/terraform/issues/7220)) - * provider/vsphere: set uuid as `vsphere_virtual_machine` output ([#4382](https://github.com/hashicorp/terraform/issues/4382)) - * provider/vsphere: Add support for `keep_on_remove` to `vsphere_virtual_machine` ([#7169](https://github.com/hashicorp/terraform/issues/7169)) - * provider/vsphere: Add support for additional `vsphere_virtial_machine` SCSI controller types ([#7525](https://github.com/hashicorp/terraform/issues/7525)) - * provisioner/file: File provisioners may now have file content set as an attribute ([#7561](https://github.com/hashicorp/terraform/issues/7561)) - -BUG FIXES: - - * core: Correct the previous fix for a bug causing "attribute not found" messages during destroy, as it was insufficient ([#6599](https://github.com/hashicorp/terraform/issues/6599)) - * core: Fix issue causing syntax errors interpolating count attribute when value passed between modules ([#6833](https://github.com/hashicorp/terraform/issues/6833)) - * core: Fix "diffs didn't match during apply" error for computed sets ([#7205](https://github.com/hashicorp/terraform/issues/7205)) - * core: Fix issue where `terraform init .` would truncate existing files ([#7273](https://github.com/hashicorp/terraform/issues/7273)) - * core: Don't compare diffs between maps with computed values ([#7249](https://github.com/hashicorp/terraform/issues/7249)) - * core: Don't copy existing files over themselves when fetching modules ([#7273](https://github.com/hashicorp/terraform/issues/7273)) - * core: Always increment the state serial number when upgrading the version ([#7402](https://github.com/hashicorp/terraform/issues/7402)) - * core: Fix a crash during eval when we're upgrading an empty state ([#7403](https://github.com/hashicorp/terraform/issues/7403)) - * core: Honor the `-state-out` flag when applying with a plan file ([#7443](https://github.com/hashicorp/terraform/issues/7443)) - * core: Fix a panic when a `terraform_remote_state` data source doesn't exist ([#7464](https://github.com/hashicorp/terraform/issues/7464)) - * core: Fix issue where `ignore_changes` caused incorrect diffs on dependent resources ([#7563](https://github.com/hashicorp/terraform/issues/7563)) - * provider/aws: Manual changes to `aws_codedeploy_deployment_group` resources are now detected ([#7530](https://github.com/hashicorp/terraform/issues/7530)) - * provider/aws: Changing keys in `aws_dynamodb_table` correctly force new resources ([#6829](https://github.com/hashicorp/terraform/issues/6829)) - * provider/aws: Fix a bug where CloudWatch alarms are created repeatedly if the user does not have permission to use the the DescribeAlarms operation ([#7227](https://github.com/hashicorp/terraform/issues/7227)) - * provider/aws: Fix crash in `aws_elasticache_parameter_group` occuring following edits in the console ([#6687](https://github.com/hashicorp/terraform/issues/6687)) - * provider/aws: Fix issue reattaching a VPN gateway to a VPC ([#6987](https://github.com/hashicorp/terraform/issues/6987)) - * provider/aws: Fix issue with Root Block Devices and encrypted flag in Launch Configurations ([#6512](https://github.com/hashicorp/terraform/issues/6512)) - * provider/aws: If more ENIs are attached to `aws_instance`, the one w/ DeviceIndex `0` is always used in context of `aws_instance` (previously unpredictable) ([#6761](https://github.com/hashicorp/terraform/issues/6761)) - * provider/aws: Increased lambda event mapping creation timeout ([#7657](https://github.com/hashicorp/terraform/issues/7657)) - * provider/aws: Handle spurious failures in resourceAwsSecurityGroupRuleRead ([#7377](https://github.com/hashicorp/terraform/issues/7377)) - * provider/aws: Make 'stage_name' required in api_gateway_deployment ([#6797](https://github.com/hashicorp/terraform/issues/6797)) - * provider/aws: Mark Lambda function as gone when it's gone ([#6924](https://github.com/hashicorp/terraform/issues/6924)) - * provider/aws: Trim trailing `.` from `name` in `aws_route53_record` resources to prevent spurious diffs ([#6592](https://github.com/hashicorp/terraform/issues/6592)) - * provider/aws: Update Lambda functions on name change ([#7081](https://github.com/hashicorp/terraform/issues/7081)) - * provider/aws: Updating state when `aws_sns_topic_subscription` is missing ([#6629](https://github.com/hashicorp/terraform/issues/6629)) - * provider/aws: `aws_codedeploy_deployment_group` panic when setting `on_premises_instance_tag_filter` ([#6617](https://github.com/hashicorp/terraform/issues/6617)) - * provider/aws: `aws_db_instance` now defaults `publicly_accessible` to false ([#7117](https://github.com/hashicorp/terraform/issues/7117)) - * provider/aws: `aws_opsworks_application.app_source` SSH key is write-only ([#6649](https://github.com/hashicorp/terraform/issues/6649)) - * provider/aws: fix Elastic Beanstalk `cname_prefix` continual plans ([#6653](https://github.com/hashicorp/terraform/issues/6653)) - * provider/aws: Bundle IOPs and Allocated Storage update for DB Instances ([#7203](https://github.com/hashicorp/terraform/issues/7203)) - * provider/aws: Fix case when instanceId is absent in network interfaces ([#6851](https://github.com/hashicorp/terraform/issues/6851)) - * provider/aws: fix aws_security_group_rule refresh ([#6730](https://github.com/hashicorp/terraform/issues/6730)) - * provider/aws: Fix issue with Elastic Beanstalk and invalid settings ([#7222](https://github.com/hashicorp/terraform/issues/7222)) - * provider/aws: Fix issue where aws_app_cookie_stickiness_policy fails on destroy if LoadBalancer doesn't exist ([#7166](https://github.com/hashicorp/terraform/issues/7166)) - * provider/aws: Stickiness Policy exists, but isn't assigned to the ELB ([#7188](https://github.com/hashicorp/terraform/issues/7188)) - * provider/aws: Fix issue with `manage_bundler` on `aws_opsworks_layers` ([#7219](https://github.com/hashicorp/terraform/issues/7219)) - * provider/aws: Set Elastic Beanstalk stack name back to state ([#7445](https://github.com/hashicorp/terraform/issues/7445)) - * provider/aws: Allow recreation of VPC Peering Connection when state is rejected ([#7466](https://github.com/hashicorp/terraform/issues/7466)) - * provider/aws: Remove EFS File System from State when NotFound ([#7437](https://github.com/hashicorp/terraform/issues/7437)) - * provider/aws: `aws_customer_gateway` refreshing from state on deleted state ([#7482](https://github.com/hashicorp/terraform/issues/7482)) - * provider/aws: Retry finding `aws_route` after creating it ([#7463](https://github.com/hashicorp/terraform/issues/7463)) - * provider/aws: Refresh CloudWatch Group from state on 404 ([#7576](https://github.com/hashicorp/terraform/issues/7576)) - * provider/aws: Adding in additional retry logic due to latency with delete of `db_option_group` ([#7312](https://github.com/hashicorp/terraform/issues/7312)) - * provider/aws: Safely get ELB values ([#7585](https://github.com/hashicorp/terraform/issues/7585)) - * provider/aws: Fix bug for recurring plans on ec2-classic and vpc in beanstalk ([#6491](https://github.com/hashicorp/terraform/issues/6491)) - * provider/aws: Bump rds_cluster timeout to 15 mins ([#7604](https://github.com/hashicorp/terraform/issues/7604)) - * provider/aws: Fix ICMP fields in `aws_network_acl_rule` to allow ICMP code 0 (echo reply) to be configured ([#7669](https://github.com/hashicorp/terraform/issues/7669)) - * provider/aws: Fix bug with Updating `aws_autoscaling_group` `enabled_metrics` ([#7698](https://github.com/hashicorp/terraform/issues/7698)) - * provider/aws: Ignore IOPS on non io1 AWS root_block_device ([#7783](https://github.com/hashicorp/terraform/issues/7783)) - * provider/aws: Ignore missing ENI attachment when trying to detach ENI ([#7185](https://github.com/hashicorp/terraform/issues/7185)) - * provider/aws: Fix issue updating ElasticBeanstalk Environment templates ([#7811](https://github.com/hashicorp/terraform/issues/7811)) - * provider/aws: Restore Defaults to SQS Queues ([#7818](https://github.com/hashicorp/terraform/issues/7818)) - * provider/aws: Don't delete Lambda function from state on initial call of the Read func ([#7829](https://github.com/hashicorp/terraform/issues/7829)) - * provider/aws: `aws_vpn_gateway` should be removed from state when in deleted state ([#7861](https://github.com/hashicorp/terraform/issues/7861)) - * provider/aws: Fix aws_route53_record 0-2 migration ([#7907](https://github.com/hashicorp/terraform/issues/7907)) - * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` ([#6766](https://github.com/hashicorp/terraform/issues/6766)) - * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources ([#6790](https://github.com/hashicorp/terraform/issues/6790)) - * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` ([#6768](https://github.com/hashicorp/terraform/issues/6768)) - * provider/azurerm: Add support for storage container name validation ([#6852](https://github.com/hashicorp/terraform/issues/6852)) - * provider/azurerm: Remove storage containers and blobs when storage accounts are not found ([#6855](https://github.com/hashicorp/terraform/issues/6855)) - * provider/azurerm: `azurerm_virtual_machine` fix `additional_unattend_rm` Windows config option ([#7105](https://github.com/hashicorp/terraform/issues/7105)) - * provider/azurerm: Fix `azurerm_virtual_machine` windows_config ([#7123](https://github.com/hashicorp/terraform/issues/7123)) - * provider/azurerm: `azurerm_dns_cname_record` can create CNAME records again ([#7113](https://github.com/hashicorp/terraform/issues/7113)) - * provider/azurerm: `azurerm_network_security_group` now waits for the provisioning state of `ready` before proceeding ([#7307](https://github.com/hashicorp/terraform/issues/7307)) - * provider/azurerm: `computer_name` is now required for `azurerm_virtual_machine` resources ([#7308](https://github.com/hashicorp/terraform/issues/7308)) - * provider/azurerm: destroy azurerm_virtual_machine OS Disk VHD on deletion ([#7584](https://github.com/hashicorp/terraform/issues/7584)) - * provider/azurerm: catch `azurerm_template_deployment` erroring silently ([#7644](https://github.com/hashicorp/terraform/issues/7644)) - * provider/azurerm: changing the name of an `azurerm_virtual_machine` now forces a new resource ([#7646](https://github.com/hashicorp/terraform/issues/7646)) - * provider/azurerm: azurerm_storage_account now returns storage keys value instead of their names ([#7674](https://github.com/hashicorp/terraform/issues/7674)) - * provider/azurerm: `azurerm_virtual_machine` computer_name now Required ([#7308](https://github.com/hashicorp/terraform/issues/7308)) - * provider/azurerm: Change of `availability_set_id` on `azurerm_virtual_machine` should ForceNew ([#7650](https://github.com/hashicorp/terraform/issues/7650)) - * provider/azurerm: Wait for `azurerm_storage_account` to be available ([#7329](https://github.com/hashicorp/terraform/issues/7329)) - * provider/cloudflare: Fix issue upgrading CloudFlare Records created before v0.6.15 ([#6969](https://github.com/hashicorp/terraform/issues/6969)) - * provider/cloudstack: Fix using `cloudstack_network_acl` within a project ([#6743](https://github.com/hashicorp/terraform/issues/6743)) - * provider/cloudstack: Fix refresing `cloudstack_network_acl_rule` when the associated ACL is deleted ([#7612](https://github.com/hashicorp/terraform/issues/7612)) - * provider/cloudstack: Fix refresing `cloudstack_port_forward` when the associated IP address is no longer associated ([#7612](https://github.com/hashicorp/terraform/issues/7612)) - * provider/cloudstack: Fix creating `cloudstack_network` with offerings that do not support specifying IP ranges ([#7612](https://github.com/hashicorp/terraform/issues/7612)) - * provider/digitalocean: Stop `digitocean_droplet` forcing new resource on uppercase region ([#7044](https://github.com/hashicorp/terraform/issues/7044)) - * provider/digitalocean: Reassign Floating IP when droplet changes ([#7411](https://github.com/hashicorp/terraform/issues/7411)) - * provider/google: Fix a bug causing an error attempting to delete an already-deleted `google_compute_disk` ([#6689](https://github.com/hashicorp/terraform/issues/6689)) - * provider/mysql: Specifying empty provider credentials no longer causes a panic ([#7211](https://github.com/hashicorp/terraform/issues/7211)) - * provider/openstack: Reassociate Floating IP on network changes ([#6579](https://github.com/hashicorp/terraform/issues/6579)) - * provider/openstack: Ensure CIDRs Are Lower Case ([#6864](https://github.com/hashicorp/terraform/issues/6864)) - * provider/openstack: Rebuild Instances On Network Changes ([#6844](https://github.com/hashicorp/terraform/issues/6844)) - * provider/openstack: Firewall rules are applied in the correct order ([#7194](https://github.com/hashicorp/terraform/issues/7194)) - * provider/openstack: Fix Security Group EOF Error when Adding / Removing Multiple Groups ([#7468](https://github.com/hashicorp/terraform/issues/7468)) - * provider/openstack: Fixing boot volumes interfering with block storage volumes list ([#7649](https://github.com/hashicorp/terraform/issues/7649)) - * provider/vsphere: `gateway` and `ipv6_gateway` are now read from `vsphere_virtual_machine` resources ([#6522](https://github.com/hashicorp/terraform/issues/6522)) - * provider/vsphere: `ipv*_gateway` parameters won't force a new `vsphere_virtual_machine` ([#6635](https://github.com/hashicorp/terraform/issues/6635)) - * provider/vsphere: adding a `vsphere_virtual_machine` migration ([#7023](https://github.com/hashicorp/terraform/issues/7023)) - * provider/vsphere: Don't require vsphere debug paths to be set ([#7027](https://github.com/hashicorp/terraform/issues/7027)) - * provider/vsphere: Fix bug where `enable_disk_uuid` was not set on `vsphere_virtual_machine` resources ([#7275](https://github.com/hashicorp/terraform/issues/7275)) - * provider/vsphere: Make `vsphere_virtual_machine` `product_key` optional ([#7410](https://github.com/hashicorp/terraform/issues/7410)) - * provider/vsphere: Refreshing devices list after adding a disk or cdrom controller ([#7167](https://github.com/hashicorp/terraform/issues/7167)) - * provider/vsphere: `vsphere_virtual_machine` no longer has to be powered on to delete ([#7206](https://github.com/hashicorp/terraform/issues/7206)) - * provider/vSphere: Fixes the hasBootableVmdk flag when attaching multiple disks ([#7804](https://github.com/hashicorp/terraform/issues/7804)) - * provisioner/remote-exec: Properly seed random script paths so they are not deterministic across runs ([#7413](https://github.com/hashicorp/terraform/issues/7413)) - -## 0.6.16 (May 9, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: `aws_eip` field `private_ip` is now a computed value, and cannot be set in your configuration. - Use `associate_with_private_ip` instead. See ([#6521](https://github.com/hashicorp/terraform/issues/6521)) - -FEATURES: - - * **New provider:** `librato` ([#3371](https://github.com/hashicorp/terraform/issues/3371)) - * **New provider:** `softlayer` ([#4327](https://github.com/hashicorp/terraform/issues/4327)) - * **New resource:** `aws_api_gateway_account` ([#6321](https://github.com/hashicorp/terraform/issues/6321)) - * **New resource:** `aws_api_gateway_authorizer` ([#6320](https://github.com/hashicorp/terraform/issues/6320)) - * **New resource:** `aws_db_event_subscription` ([#6367](https://github.com/hashicorp/terraform/issues/6367)) - * **New resource:** `aws_db_option_group` ([#4401](https://github.com/hashicorp/terraform/issues/4401)) - * **New resource:** `aws_eip_association` ([#6552](https://github.com/hashicorp/terraform/issues/6552)) - * **New resource:** `openstack_networking_secgroup_rule_v2` ([#6410](https://github.com/hashicorp/terraform/issues/6410)) - * **New resource:** `openstack_networking_secgroup_v2` ([#6410](https://github.com/hashicorp/terraform/issues/6410)) - * **New resource:** `vsphere_file` ([#6401](https://github.com/hashicorp/terraform/issues/6401)) - -IMPROVEMENTS: - - * core: update HCL dependency to improve whitespace handling in `terraform fmt` ([#6347](https://github.com/hashicorp/terraform/issues/6347)) - * core: Add support for marking outputs as sensitive ([#6559](https://github.com/hashicorp/terraform/issues/6559)) - * provider/aws: Add agent_version argument to `aws_opswork_stack` ([#6493](https://github.com/hashicorp/terraform/issues/6493)) - * provider/aws: Add support for request parameters to `api_gateway_method` & `api_gateway_integration` ([#6501](https://github.com/hashicorp/terraform/issues/6501)) - * provider/aws: Add support for response parameters to `api_gateway_method_response` & `api_gateway_integration_response` ([#6344](https://github.com/hashicorp/terraform/issues/6344)) - * provider/aws: Allow empty S3 config in Cloudfront Origin ([#6487](https://github.com/hashicorp/terraform/issues/6487)) - * provider/aws: Improve error handling in IAM Server Certificates ([#6442](https://github.com/hashicorp/terraform/issues/6442)) - * provider/aws: Use `sts:GetCallerIdentity` as additional method for getting AWS account ID ([#6385](https://github.com/hashicorp/terraform/issues/6385)) - * provider/aws: `aws_redshift_cluster` `automated_snapshot_retention_period` didn't allow 0 value ([#6537](https://github.com/hashicorp/terraform/issues/6537)) - * provider/aws: Add CloudFront `hosted_zone_id` attribute ([#6530](https://github.com/hashicorp/terraform/issues/6530)) - * provider/azurerm: Increase timeout for ARM Template deployments to 40 minutes ([#6319](https://github.com/hashicorp/terraform/issues/6319)) - * provider/azurerm: Make `private_ip_address` an exported field on `azurerm_network_interface` ([#6538](https://github.com/hashicorp/terraform/issues/6538)) - * provider/azurerm: Add support for `tags` to `azurerm_virtual_machine` ([#6556](https://github.com/hashicorp/terraform/issues/6556)) - * provider/azurerm: Add `os_type` and `image_uri` in `azurerm_virtual_machine` ([#6553](https://github.com/hashicorp/terraform/issues/6553)) - * provider/cloudflare: Add proxied option to `cloudflare_record` ([#5508](https://github.com/hashicorp/terraform/issues/5508)) - * provider/docker: Add ability to keep docker image locally on terraform destroy ([#6376](https://github.com/hashicorp/terraform/issues/6376)) - * provider/fastly: Add S3 Log Streaming to Fastly Service ([#6378](https://github.com/hashicorp/terraform/issues/6378)) - * provider/fastly: Add Conditions to Fastly Service ([#6481](https://github.com/hashicorp/terraform/issues/6481)) - * provider/github: Add support for Github Enterprise via base_url configuration option ([#6434](https://github.com/hashicorp/terraform/issues/6434)) - * provider/triton: Add support for specifying network interfaces on `triton machine` resources ([#6418](https://github.com/hashicorp/terraform/issues/6418)) - * provider/triton: Deleted firewall rules no longer prevent refresh ([#6529](https://github.com/hashicorp/terraform/issues/6529)) - * provider/vsphere: Add `skip_customization` option to `vsphere_virtual_machine` resources ([#6355](https://github.com/hashicorp/terraform/issues/6355)) - * provider/vsphere: Add ability to specify and mount bootable vmdk in `vsphere_virtual_machine` ([#6146](https://github.com/hashicorp/terraform/issues/6146)) - * provider/vsphere: Add support for IPV6 to `vsphere_virtual_machine` ([#6457](https://github.com/hashicorp/terraform/issues/6457)) - * provider/vsphere: Add support for `memory_reservation` to `vsphere_virtual_machine` ([#6036](https://github.com/hashicorp/terraform/issues/6036)) - * provider/vsphere: Checking for empty diskPath in `vsphere_virtual_machine` before creating ([#6400](https://github.com/hashicorp/terraform/issues/6400)) - * provider/vsphere: Support updates to vcpu and memory on `vsphere_virtual_machine` ([#6356](https://github.com/hashicorp/terraform/issues/6356)) - * remote/s3: Logic for loading credentials now follows the same [conventions as AWS provider](https://www.terraform.io/docs/providers/aws/index.html#authentication) which means it also supports EC2 role auth and session token (e.g. assumed IAM Roles) ([#5270](https://github.com/hashicorp/terraform/issues/5270)) - -BUG FIXES: - - * core: Boolean values in diffs are normalized to `true` and `false`, eliminating some erroneous diffs ([#6499](https://github.com/hashicorp/terraform/issues/6499)) - * core: Fix a bug causing "attribute not found" messages during destroy ([#6557](https://github.com/hashicorp/terraform/issues/6557)) - * provider/aws: Allow account ID checks on EC2 instances & w/ federated accounts ([#5030](https://github.com/hashicorp/terraform/issues/5030)) - * provider/aws: Fix an eventually consistent issue aws_security_group_rule and possible duplications ([#6325](https://github.com/hashicorp/terraform/issues/6325)) - * provider/aws: Fix bug where `aws_elastic_beanstalk_environment` ignored `wait_for_ready_timeout` ([#6358](https://github.com/hashicorp/terraform/issues/6358)) - * provider/aws: Fix bug where `aws_elastic_beanstalk_environment` update config template didn't work ([#6342](https://github.com/hashicorp/terraform/issues/6342)) - * provider/aws: Fix issue in updating CloudFront distribution LoggingConfig ([#6407](https://github.com/hashicorp/terraform/issues/6407)) - * provider/aws: Fix issue in upgrading AutoScaling Policy to use `min_adjustment_magnitude` ([#6440](https://github.com/hashicorp/terraform/issues/6440)) - * provider/aws: Fix issue replacing Network ACL Relationship ([#6421](https://github.com/hashicorp/terraform/issues/6421)) - * provider/aws: Fix issue with KMS Alias keys and name prefixes ([#6328](https://github.com/hashicorp/terraform/issues/6328)) - * provider/aws: Fix issue with encrypted snapshots of block devices in `aws_launch_configuration` resources ([#6452](https://github.com/hashicorp/terraform/issues/6452)) - * provider/aws: Fix read of `aws_cloudwatch_log_group` after an update is applied ([#6384](https://github.com/hashicorp/terraform/issues/6384)) - * provider/aws: Fix updating `number_of_nodes` on `aws_redshift_cluster` ([#6333](https://github.com/hashicorp/terraform/issues/6333)) - * provider/aws: Omit `aws_cloudfront_distribution` custom_error fields when not explicitly set ([#6382](https://github.com/hashicorp/terraform/issues/6382)) - * provider/aws: Refresh state on `aws_sqs_queue` not found ([#6381](https://github.com/hashicorp/terraform/issues/6381)) - * provider/aws: Respect `selection_pattern` in `aws_api_gateway_integration_response` (previously ignored field) ([#5893](https://github.com/hashicorp/terraform/issues/5893)) - * provider/aws: `aws_cloudfront_distribution` resources now require the `cookies` argument ([#6505](https://github.com/hashicorp/terraform/issues/6505)) - * provider/aws: `aws_route` crash when used with `aws_vpc_endpoint` ([#6338](https://github.com/hashicorp/terraform/issues/6338)) - * provider/aws: validate `cluster_id` length for `aws_elasticache_cluster` ([#6330](https://github.com/hashicorp/terraform/issues/6330)) - * provider/azurerm: `ssh_keys` can now be set for `azurerm_virtual_machine` resources, allowing provisioning ([#6541](https://github.com/hashicorp/terraform/issues/6541)) - * provider/azurerm: Fix issue that updating `azurerm_virtual_machine` was failing due to empty adminPassword ([#6528](https://github.com/hashicorp/terraform/issues/6528)) - * provider/azurerm: `storage_data_disk` settings now work correctly on `azurerm_virtual_machine` resources ([#6543](https://github.com/hashicorp/terraform/issues/6543)) - * provider/cloudflare: can manage apex records ([#6449](https://github.com/hashicorp/terraform/issues/6449)) - * provider/cloudflare: won't refresh with incorrect record if names match ([#6449](https://github.com/hashicorp/terraform/issues/6449)) - * provider/datadog: `notify_no_data` and `no_data_timeframe` are set correctly for `datadog_monitor` resources ([#6509](https://github.com/hashicorp/terraform/issues/6509)) - * provider/docker: Fix crash when using empty string in the `command` list in `docker_container` resources ([#6424](https://github.com/hashicorp/terraform/issues/6424)) - * provider/vsphere: Memory reservations are now set correctly in `vsphere_virtual_machine` resources ([#6482](https://github.com/hashicorp/terraform/issues/6482)) - -## 0.6.15 (April 22, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * `aws_instance` - if you still use `security_groups` field for SG IDs - i.e. inside VPC, this will generate diffs during `plan` and `apply` will **recreate** the resource. Terraform expects IDs (VPC SGs) inside `vpc_security_group_ids`. - -FEATURES: - - * **New command:** `terraform fmt` to automatically normalize config file style ([#4955](https://github.com/hashicorp/terraform/issues/4955)) - * **New interpolation function:** `jsonencode` ([#5890](https://github.com/hashicorp/terraform/issues/5890)) - * **New provider:** `cobbler` ([#5969](https://github.com/hashicorp/terraform/issues/5969)) - * **New provider:** `fastly` ([#5814](https://github.com/hashicorp/terraform/issues/5814)) - * **New resource:** `aws_cloudfront_distribution` ([#5221](https://github.com/hashicorp/terraform/issues/5221)) - * **New resource:** `aws_cloudfront_origin_access_identity` ([#5221](https://github.com/hashicorp/terraform/issues/5221)) - * **New resource:** `aws_iam_user_ssh_key` ([#5774](https://github.com/hashicorp/terraform/issues/5774)) - * **New resource:** `aws_s3_bucket_notification` ([#5473](https://github.com/hashicorp/terraform/issues/5473)) - * **New resource:** `cloudstack_static_nat` ([#6004](https://github.com/hashicorp/terraform/issues/6004)) - * **New resource:** `consul_key_prefix` ([#5988](https://github.com/hashicorp/terraform/issues/5988)) - * **New resource:** `aws_default_network_acl` ([#6165](https://github.com/hashicorp/terraform/issues/6165)) - * **New resource:** `triton_fabric` ([#5920](https://github.com/hashicorp/terraform/issues/5920)) - * **New resource:** `triton_vlan` ([#5920](https://github.com/hashicorp/terraform/issues/5920)) - * **New resource:** `aws_opsworks_application` ([#4419](https://github.com/hashicorp/terraform/issues/4419)) - * **New resource:** `aws_opsworks_instance` ([#4276](https://github.com/hashicorp/terraform/issues/4276)) - * **New resource:** `aws_cloudwatch_log_subscription_filter` ([#5996](https://github.com/hashicorp/terraform/issues/5996)) - * **New resource:** `openstack_networking_router_route_v2` ([#6207](https://github.com/hashicorp/terraform/issues/6207)) - -IMPROVEMENTS: - - * command/apply: Output will now show periodic status updates of slow resources. ([#6163](https://github.com/hashicorp/terraform/issues/6163)) - * core: Variables passed between modules are now type checked ([#6185](https://github.com/hashicorp/terraform/issues/6185)) - * core: Smaller release binaries by stripping debug information ([#6238](https://github.com/hashicorp/terraform/issues/6238)) - * provider/aws: Add support for Step Scaling in `aws_autoscaling_policy` ([#4277](https://github.com/hashicorp/terraform/issues/4277)) - * provider/aws: Add support for `cname_prefix` to `aws_elastic_beanstalk_environment` resource ([#5966](https://github.com/hashicorp/terraform/issues/5966)) - * provider/aws: Add support for trigger_configuration to `aws_codedeploy_deployment_group` ([#5599](https://github.com/hashicorp/terraform/issues/5599)) - * provider/aws: Adding outputs for elastic_beanstalk_environment resource ([#5915](https://github.com/hashicorp/terraform/issues/5915)) - * provider/aws: Adds `wait_for_ready_timeout` option to `aws_elastic_beanstalk_environment` ([#5967](https://github.com/hashicorp/terraform/issues/5967)) - * provider/aws: Allow `aws_db_subnet_group` description to be updated ([#5921](https://github.com/hashicorp/terraform/issues/5921)) - * provider/aws: Allow multiple EIPs to associate to single ENI ([#6070](https://github.com/hashicorp/terraform/issues/6070)) - * provider/aws: Change `aws_elb` access_logs to list type ([#5065](https://github.com/hashicorp/terraform/issues/5065)) - * provider/aws: Check that InternetGateway exists before returning from creation ([#6105](https://github.com/hashicorp/terraform/issues/6105)) - * provider/aws: Don't Base64-encode EC2 userdata if it is already Base64 encoded ([#6140](https://github.com/hashicorp/terraform/issues/6140)) - * provider/aws: Making the Cloudwatch Event Rule Target `target_id` optional ([#5787](https://github.com/hashicorp/terraform/issues/5787)) - * provider/aws: Timeouts for `elasticsearch_domain` are increased ([#5910](https://github.com/hashicorp/terraform/issues/5910)) - * provider/aws: `aws_codecommit_repository` set `default_branch` only if defined ([#5904](https://github.com/hashicorp/terraform/issues/5904)) - * provider/aws: `aws_redshift_cluster` allows usernames with underscore in it ([#5935](https://github.com/hashicorp/terraform/issues/5935)) - * provider/aws: normalise json for `aws_sns_topic` ([#6089](https://github.com/hashicorp/terraform/issues/6089)) - * provider/aws: normalize json for `aws_cloudwatch_event_rule` ([#6025](https://github.com/hashicorp/terraform/issues/6025)) - * provider/aws: increase timeout for aws_redshift_cluster ([#6305](https://github.com/hashicorp/terraform/issues/6305)) - * provider/aws: Opsworks layers now support `custom_json` argument ([#4272](https://github.com/hashicorp/terraform/issues/4272)) - * provider/aws: Added migration for `tier` attribute in `aws_elastic_beanstalk_environment` ([#6167](https://github.com/hashicorp/terraform/issues/6167)) - * provider/aws: Use resource.Retry for route creation and deletion ([#6225](https://github.com/hashicorp/terraform/issues/6225)) - * provider/aws: Add support S3 Bucket Lifecycle Rule ([#6220](https://github.com/hashicorp/terraform/issues/6220)) - * provider/clc: Override default `account` alias in provider config ([#5785](https://github.com/hashicorp/terraform/issues/5785)) - * provider/cloudstack: Deprecate `ipaddress` in favour of `ip_address` in all resources ([#6010](https://github.com/hashicorp/terraform/issues/6010)) - * provider/cloudstack: Deprecate allowing names (instead of IDs) for parameters that reference other resources ([#6123](https://github.com/hashicorp/terraform/issues/6123)) - * provider/datadog: Add heredoc support to message, escalation_message, and query ([#5788](https://github.com/hashicorp/terraform/issues/5788)) - * provider/docker: Add support for docker run --user option ([#5300](https://github.com/hashicorp/terraform/issues/5300)) - * provider/github: Add support for privacy to `github_team` ([#6116](https://github.com/hashicorp/terraform/issues/6116)) - * provider/google: Accept GOOGLE_CLOUD_KEYFILE_JSON env var for credentials ([#6007](https://github.com/hashicorp/terraform/issues/6007)) - * provider/google: Add "project" argument and attribute to all GCP compute resources which inherit from the provider's value ([#6112](https://github.com/hashicorp/terraform/issues/6112)) - * provider/google: Make "project" attribute on provider configuration optional ([#6112](https://github.com/hashicorp/terraform/issues/6112)) - * provider/google: Read more common configuration values from the environment and clarify precedence ordering ([#6114](https://github.com/hashicorp/terraform/issues/6114)) - * provider/google: `addons_config` and `subnetwork` added as attributes to `google_container_cluster` ([#5871](https://github.com/hashicorp/terraform/issues/5871)) - * provider/fastly: Add support for Request Headers ([#6197](https://github.com/hashicorp/terraform/issues/6197)) - * provider/fastly: Add support for Gzip rules ([#6247](https://github.com/hashicorp/terraform/issues/6247)) - * provider/openstack: Add value_specs argument and attribute for routers ([#4898](https://github.com/hashicorp/terraform/issues/4898)) - * provider/openstack: Allow subnets with no gateway ([#6060](https://github.com/hashicorp/terraform/issues/6060)) - * provider/openstack: Enable Token Authentication ([#6081](https://github.com/hashicorp/terraform/issues/6081)) - * provider/postgresql: New `ssl_mode` argument allowing different SSL usage tradeoffs ([#6008](https://github.com/hashicorp/terraform/issues/6008)) - * provider/vsphere: Support for linked clones and Windows-specific guest config options ([#6087](https://github.com/hashicorp/terraform/issues/6087)) - * provider/vsphere: Checking for Powered Off State before `vsphere_virtual_machine` deletion ([#6283](https://github.com/hashicorp/terraform/issues/6283)) - * provider/vsphere: Support mounting ISO images to virtual cdrom drives ([#4243](https://github.com/hashicorp/terraform/issues/4243)) - * provider/vsphere: Fix missing ssh connection info ([#4283](https://github.com/hashicorp/terraform/issues/4283)) - * provider/google: Deprecate unused "region" attribute in `global_forwarding_rule`; this attribute was never used anywhere in the computation of the resource ([#6112](https://github.com/hashicorp/terraform/issues/6112)) - * provider/cloudstack: Add group attribute to `cloudstack_instance` resource ([#6023](https://github.com/hashicorp/terraform/issues/6023)) - * provider/azurerm: Provider meaningful error message when credentials not correct ([#6290](https://github.com/hashicorp/terraform/issues/6290)) - * provider/cloudstack: Improve support for using projects ([#6282](https://github.com/hashicorp/terraform/issues/6282)) - -BUG FIXES: - - * core: Providers are now correctly inherited down a nested module tree ([#6186](https://github.com/hashicorp/terraform/issues/6186)) - * provider/aws: Convert protocols to standard format for Security Groups ([#5881](https://github.com/hashicorp/terraform/issues/5881)) - * provider/aws: Fix Lambda VPC integration (missing `vpc_id` field in schema) ([#6157](https://github.com/hashicorp/terraform/issues/6157)) - * provider/aws: Fix `aws_route panic` when destination CIDR block is nil ([#5781](https://github.com/hashicorp/terraform/issues/5781)) - * provider/aws: Fix issue re-creating deleted VPC peering connections ([#5959](https://github.com/hashicorp/terraform/issues/5959)) - * provider/aws: Fix issue with changing iops when also changing storage type to io1 on RDS ([#5676](https://github.com/hashicorp/terraform/issues/5676)) - * provider/aws: Fix issue with retrying deletion of Network ACLs ([#5954](https://github.com/hashicorp/terraform/issues/5954)) - * provider/aws: Fix potential crash when receiving malformed `aws_route` API responses ([#5867](https://github.com/hashicorp/terraform/issues/5867)) - * provider/aws: Guard against empty responses from Lambda Permissions ([#5838](https://github.com/hashicorp/terraform/issues/5838)) - * provider/aws: Normalize and compact SQS Redrive, Policy JSON ([#5888](https://github.com/hashicorp/terraform/issues/5888)) - * provider/aws: Fix issue updating ElasticBeanstalk Configuraiton Templates ([#6307](https://github.com/hashicorp/terraform/issues/6307)) - * provider/aws: Remove CloudTrail Trail from state if not found ([#6024](https://github.com/hashicorp/terraform/issues/6024)) - * provider/aws: Fix crash in AWS S3 Bucket when website index/error is empty ([#6269](https://github.com/hashicorp/terraform/issues/6269)) - * provider/aws: Report better error message in `aws_route53_record` when `set_identifier` is required ([#5777](https://github.com/hashicorp/terraform/issues/5777)) - * provider/aws: Show human-readable error message when failing to read an EBS volume ([#6038](https://github.com/hashicorp/terraform/issues/6038)) - * provider/aws: set ASG `health_check_grace_period` default to 300 ([#5830](https://github.com/hashicorp/terraform/issues/5830)) - * provider/aws: Fix issue with with Opsworks and empty Custom Cook Book sources ([#6078](https://github.com/hashicorp/terraform/issues/6078)) - * provider/aws: wait for IAM instance profile to propagate when creating Opsworks stacks ([#6049](https://github.com/hashicorp/terraform/issues/6049)) - * provider/aws: Don't read back `aws_opsworks_stack` cookbooks source password ([#6203](https://github.com/hashicorp/terraform/issues/6203)) - * provider/aws: Resolves DefaultOS and ConfigurationManager conflict on `aws_opsworks_stack` ([#6244](https://github.com/hashicorp/terraform/issues/6244)) - * provider/aws: Renaming `aws_elastic_beanstalk_configuration_template``option_settings` to `setting` ([#6043](https://github.com/hashicorp/terraform/issues/6043)) - * provider/aws: `aws_customer_gateway` will properly populate `bgp_asn` on refresh. [no issue] - * provider/aws: provider/aws: Refresh state on `aws_directory_service_directory` not found ([#6294](https://github.com/hashicorp/terraform/issues/6294)) - * provider/aws: `aws_elb` `cross_zone_load_balancing` is not refreshed in the state file ([#6295](https://github.com/hashicorp/terraform/issues/6295)) - * provider/aws: `aws_autoscaling_group` will properly populate `tag` on refresh. [no issue] - * provider/azurerm: Fix detection of `azurerm_storage_account` resources removed manually ([#5878](https://github.com/hashicorp/terraform/issues/5878)) - * provider/docker: Docker Image will be deleted on destroy ([#5801](https://github.com/hashicorp/terraform/issues/5801)) - * provider/openstack: Fix Disabling DHCP on Subnets ([#6052](https://github.com/hashicorp/terraform/issues/6052)) - * provider/openstack: Fix resizing when Flavor Name changes ([#6020](https://github.com/hashicorp/terraform/issues/6020)) - * provider/openstack: Fix Access Address Detection ([#6181](https://github.com/hashicorp/terraform/issues/6181)) - * provider/openstack: Fix admin_state_up on openstack_lb_member_v1 ([#6267](https://github.com/hashicorp/terraform/issues/6267)) - * provider/triton: Firewall status on `triton_machine` resources is reflected correctly ([#6119](https://github.com/hashicorp/terraform/issues/6119)) - * provider/triton: Fix time out when applying updates to Triton machine metadata ([#6149](https://github.com/hashicorp/terraform/issues/6149)) - * provider/vsphere: Add error handling to `vsphere_folder` ([#6095](https://github.com/hashicorp/terraform/issues/6095)) - * provider/cloudstack: Fix mashalling errors when using CloudStack 4.7.x (or newer) [GH-#226] - -## 0.6.14 (March 21, 2016) - -FEATURES: - - * **New provider:** `triton` - Manage Joyent Triton public cloud or on-premise installations ([#5738](https://github.com/hashicorp/terraform/issues/5738)) - * **New provider:** `clc` - Manage CenturyLink Cloud resources ([#4893](https://github.com/hashicorp/terraform/issues/4893)) - * **New provider:** `github` - Manage GitHub Organization permissions with Terraform config ([#5194](https://github.com/hashicorp/terraform/issues/5194)) - * **New provider:** `influxdb` - Manage InfluxDB databases ([#3478](https://github.com/hashicorp/terraform/issues/3478)) - * **New provider:** `ultradns` - Manage UltraDNS records ([#5716](https://github.com/hashicorp/terraform/issues/5716)) - * **New resource:** `aws_cloudwatch_log_metric_filter` ([#5444](https://github.com/hashicorp/terraform/issues/5444)) - * **New resource:** `azurerm_virtual_machine` ([#5514](https://github.com/hashicorp/terraform/issues/5514)) - * **New resource:** `azurerm_template_deployment` ([#5758](https://github.com/hashicorp/terraform/issues/5758)) - * **New interpolation function:** `uuid` ([#5575](https://github.com/hashicorp/terraform/issues/5575)) - -IMPROVEMENTS: - - * core: provisioners connecting via WinRM now respect HTTPS settings ([#5761](https://github.com/hashicorp/terraform/issues/5761)) - * provider/aws: `aws_db_instance` now makes `identifier` optional and generates a unique ID when it is omitted ([#5723](https://github.com/hashicorp/terraform/issues/5723)) - * provider/aws: `aws_redshift_cluster` now allows`publicly_accessible` to be modified ([#5721](https://github.com/hashicorp/terraform/issues/5721)) - * provider/aws: `aws_kms_alias` now allows name to be auto-generated with a `name_prefix` ([#5594](https://github.com/hashicorp/terraform/issues/5594)) - -BUG FIXES: - - * core: Color output is now shown correctly when running Terraform on Windows ([#5718](https://github.com/hashicorp/terraform/issues/5718)) - * core: HEREDOCs can now be indented in line with configuration using `<<-` and hanging indent is removed ([#5740](https://github.com/hashicorp/terraform/issues/5740)) - * core: Invalid HCL syntax of nested object blocks no longer causes a crash ([#5740](https://github.com/hashicorp/terraform/issues/5740)) - * core: Local directory-based modules now use junctions instead of symbolic links on Windows ([#5739](https://github.com/hashicorp/terraform/issues/5739)) - * core: Modules sourced from a Mercurial repository now work correctly on Windows ([#5739](https://github.com/hashicorp/terraform/issues/5739)) - * core: Address some issues with ignore_changes ([#5635](https://github.com/hashicorp/terraform/issues/5635)) - * core: Add a lock to fix an interpolation issue caught by the Go 1.6 concurrent map access detector ([#5772](https://github.com/hashicorp/terraform/issues/5772)) - * provider/aws: Fix crash when an `aws_rds_cluster_instance` is removed outside of Terraform ([#5717](https://github.com/hashicorp/terraform/issues/5717)) - * provider/aws: `aws_cloudformation_stack` use `timeout_in_minutes` for retry timeout to prevent unecessary timeouts ([#5712](https://github.com/hashicorp/terraform/issues/5712)) - * provider/aws: `aws_lambda_function` resources no longer error on refresh if deleted externally to Terraform ([#5668](https://github.com/hashicorp/terraform/issues/5668)) - * provider/aws: `aws_vpn_connection` resources deleted via the console on longer cause a crash ([#5747](https://github.com/hashicorp/terraform/issues/5747)) - * provider/aws: Fix crasher in Elastic Beanstalk Configuration when using options ([#5756](https://github.com/hashicorp/terraform/issues/5756)) - * provider/aws: Fix issue preventing `aws_opsworks_stck` from working with Windows set as the OS ([#5724](https://github.com/hashicorp/terraform/issues/5724)) - * provider/digitalocean: `digitalocean_ssh_key` resources no longer cause a panic if there is no network connectivity ([#5748](https://github.com/hashicorp/terraform/issues/5748)) - * provider/google: Default description `google_dns_managed_zone` resources to "Managed By Terraform" ([#5428](https://github.com/hashicorp/terraform/issues/5428)) - * provider/google: Fix error message on invalid instance URL for `google_compute_instance_group` ([#5715](https://github.com/hashicorp/terraform/issues/5715)) - * provider/vsphere: provide `host` to provisioner connections ([#5558](https://github.com/hashicorp/terraform/issues/5558)) - * provisioner/remote-exec: Address race condition introduced with script cleanup step introduced in 0.6.13 ([#5751](https://github.com/hashicorp/terraform/issues/5751)) - -## 0.6.13 (March 16, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: `aws_s3_bucket_object` field `etag` is now trimming off quotes (returns raw MD5 hash) ([#5305](https://github.com/hashicorp/terraform/issues/5305)) - * provider/aws: `aws_autoscaling_group` now supports metrics collection, so a diff installing the default value of `1Minute` for the `metrics_granularity` field is expected. This diff should resolve in the next `terraform apply` w/ no AWS API calls ([#4688](https://github.com/hashicorp/terraform/issues/4688)) - * provider/consul: `consul_keys` `key` blocks now respect `delete` flag for removing individual blocks. Previously keys would be deleted only when the entire resource was removed. - * provider/google: `next_hop_network` on `google_compute_route` is now read-only, to mirror the behavior in the official docs ([#5564](https://github.com/hashicorp/terraform/issues/5564)) - * state/remote/http: PUT requests for this backend will now have `Content-Type: application/json` instead of `application/octet-stream` ([#5499](https://github.com/hashicorp/terraform/issues/5499)) - -FEATURES: - - * **New command:** `terraform untaint` ([#5527](https://github.com/hashicorp/terraform/issues/5527)) - * **New resource:** `aws_api_gateway_api_key` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_deployment` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_integration_response` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_integration` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_method_response` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_method` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_model` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_resource` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_rest_api` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_elastic_beanstalk_application` ([#3157](https://github.com/hashicorp/terraform/issues/3157)) - * **New resource:** `aws_elastic_beanstalk_configuration_template` ([#3157](https://github.com/hashicorp/terraform/issues/3157)) - * **New resource:** `aws_elastic_beanstalk_environment` ([#3157](https://github.com/hashicorp/terraform/issues/3157)) - * **New resource:** `aws_iam_account_password_policy` ([#5029](https://github.com/hashicorp/terraform/issues/5029)) - * **New resource:** `aws_kms_alias` ([#3928](https://github.com/hashicorp/terraform/issues/3928)) - * **New resource:** `aws_kms_key` ([#3928](https://github.com/hashicorp/terraform/issues/3928)) - * **New resource:** `google_compute_instance_group` ([#4087](https://github.com/hashicorp/terraform/issues/4087)) - -IMPROVEMENTS: - - * provider/aws: Add `repository_link` as a computed field for `aws_ecr_repository` ([#5524](https://github.com/hashicorp/terraform/issues/5524)) - * provider/aws: Add ability to update Route53 zone comments ([#5318](https://github.com/hashicorp/terraform/issues/5318)) - * provider/aws: Add support for Metrics Collection to `aws_autoscaling_group` ([#4688](https://github.com/hashicorp/terraform/issues/4688)) - * provider/aws: Add support for `description` to `aws_network_interface` ([#5523](https://github.com/hashicorp/terraform/issues/5523)) - * provider/aws: Add support for `storage_encrypted` to `aws_rds_cluster` ([#5520](https://github.com/hashicorp/terraform/issues/5520)) - * provider/aws: Add support for routing rules on `aws_s3_bucket` resources ([#5327](https://github.com/hashicorp/terraform/issues/5327)) - * provider/aws: Enable updates & versioning for `aws_s3_bucket_object` ([#5305](https://github.com/hashicorp/terraform/issues/5305)) - * provider/aws: Guard against Nil Reference in Redshift Endpoints ([#5593](https://github.com/hashicorp/terraform/issues/5593)) - * provider/aws: Lambda S3 object version defaults to `$LATEST` if unspecified ([#5370](https://github.com/hashicorp/terraform/issues/5370)) - * provider/aws: Retry DB Creation on IAM propigation error ([#5515](https://github.com/hashicorp/terraform/issues/5515)) - * provider/aws: Support KMS encryption of S3 objects ([#5453](https://github.com/hashicorp/terraform/issues/5453)) - * provider/aws: `aws_autoscaling_lifecycle_hook` now have `notification_target_arn` and `role_arn` as optional ([#5616](https://github.com/hashicorp/terraform/issues/5616)) - * provider/aws: `aws_ecs_service` validates number of `load_balancer`s before creation/updates ([#5605](https://github.com/hashicorp/terraform/issues/5605)) - * provider/aws: send Terraform version in User-Agent ([#5621](https://github.com/hashicorp/terraform/issues/5621)) - * provider/cloudflare: Change `cloudflare_record` type to ForceNew ([#5353](https://github.com/hashicorp/terraform/issues/5353)) - * provider/consul: `consul_keys` now detects drift and supports deletion of individual `key` blocks ([#5210](https://github.com/hashicorp/terraform/issues/5210)) - * provider/digitalocean: Guard against Nil reference in `digitalocean_droplet` ([#5588](https://github.com/hashicorp/terraform/issues/5588)) - * provider/docker: Add support for `unless-stopped` to docker container `restart_policy` ([#5337](https://github.com/hashicorp/terraform/issues/5337)) - * provider/google: Mark `next_hop_network` as read-only on `google_compute_route` ([#5564](https://github.com/hashicorp/terraform/issues/5564)) - * provider/google: Validate VPN tunnel peer_ip at plan time ([#5501](https://github.com/hashicorp/terraform/issues/5501)) - * provider/openstack: Add Support for Domain ID and Domain Name environment variables ([#5355](https://github.com/hashicorp/terraform/issues/5355)) - * provider/openstack: Add support for instances to have multiple ephemeral disks. ([#5131](https://github.com/hashicorp/terraform/issues/5131)) - * provider/openstack: Re-Add server.AccessIPv4 and server.AccessIPv6 ([#5366](https://github.com/hashicorp/terraform/issues/5366)) - * provider/vsphere: Add support for disk init types ([#4284](https://github.com/hashicorp/terraform/issues/4284)) - * provisioner/remote-exec: Clear out scripts after uploading ([#5577](https://github.com/hashicorp/terraform/issues/5577)) - * state/remote/http: Change content type of PUT requests to the more appropriate `application/json` ([#5499](https://github.com/hashicorp/terraform/issues/5499)) - -BUG FIXES: - - * core: Disallow negative indices in the element() interpolation function, preventing crash ([#5263](https://github.com/hashicorp/terraform/issues/5263)) - * core: Fix issue that caused tainted resource destroys to be improperly filtered out when using -target and a plan file ([#5516](https://github.com/hashicorp/terraform/issues/5516)) - * core: Fix several issues with retry logic causing spurious "timeout while waiting for state to become ..." errors and unnecessary retry loops ([#5460](https://github.com/hashicorp/terraform/issues/5460)), ([#5538](https://github.com/hashicorp/terraform/issues/5538)), ([#5543](https://github.com/hashicorp/terraform/issues/5543)), ([#5553](https://github.com/hashicorp/terraform/issues/5553)) - * core: Includes upstream HCL fix to properly detect unbalanced braces and throw an error ([#5400](https://github.com/hashicorp/terraform/issues/5400)) - * provider/aws: Allow recovering from failed CloudWatch Event Target creation ([#5395](https://github.com/hashicorp/terraform/issues/5395)) - * provider/aws: Fix EC2 Classic SG Rule issue when referencing rules by name ([#5533](https://github.com/hashicorp/terraform/issues/5533)) - * provider/aws: Fix `aws_cloudformation_stack` update for `parameters` & `capabilities` if unmodified ([#5603](https://github.com/hashicorp/terraform/issues/5603)) - * provider/aws: Fix a bug where AWS Kinesis Stream includes closed shards in the shard_count ([#5401](https://github.com/hashicorp/terraform/issues/5401)) - * provider/aws: Fix a bug where ElasticSearch Domain tags were not being set correctly ([#5361](https://github.com/hashicorp/terraform/issues/5361)) - * provider/aws: Fix a bug where `aws_route` would show continual changes in the plan when not computed ([#5321](https://github.com/hashicorp/terraform/issues/5321)) - * provider/aws: Fix a bug where `publicly_assessible` wasn't being set to state in `aws_db_instance` ([#5535](https://github.com/hashicorp/terraform/issues/5535)) - * provider/aws: Fix a bug where listener protocol on `aws_elb` resources was case insensitive ([#5376](https://github.com/hashicorp/terraform/issues/5376)) - * provider/aws: Fix a bug which caused panics creating rules on security groups in EC2 Classic ([#5329](https://github.com/hashicorp/terraform/issues/5329)) - * provider/aws: Fix crash when `aws_lambda_function` VpcId is nil ([#5182](https://github.com/hashicorp/terraform/issues/5182)) - * provider/aws: Fix error with parsing JSON in `aws_s3_bucket` policy attribute ([#5474](https://github.com/hashicorp/terraform/issues/5474)) - * provider/aws: `aws_lambda_function` can be properly updated, either via `s3_object_version` or via `filename` & `source_code_hash` as described in docs ([#5239](https://github.com/hashicorp/terraform/issues/5239)) - * provider/google: Fix managed instance group preemptible instance creation ([#4834](https://github.com/hashicorp/terraform/issues/4834)) - * provider/openstack: Account for a 403 reply when os-tenant-networks is disabled ([#5432](https://github.com/hashicorp/terraform/issues/5432)) - * provider/openstack: Fix crashing during certain network updates in instances ([#5365](https://github.com/hashicorp/terraform/issues/5365)) - * provider/openstack: Fix create/delete statuses in load balancing resources ([#5557](https://github.com/hashicorp/terraform/issues/5557)) - * provider/openstack: Fix race condition between instance deletion and volume detachment ([#5359](https://github.com/hashicorp/terraform/issues/5359)) - * provider/template: Warn when `template` attribute specified as path ([#5563](https://github.com/hashicorp/terraform/issues/5563)) - -INTERNAL IMPROVEMENTS: - - * helper/schema: `MaxItems` attribute on schema lists and sets ([#5218](https://github.com/hashicorp/terraform/issues/5218)) - -## 0.6.12 (February 24, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * The `publicly_accessible` attribute on `aws_redshift_cluster` resources now defaults to true - -FEATURES: - - * **New command:** `validate` to perform syntax validation ([#3783](https://github.com/hashicorp/terraform/issues/3783)) - * **New provider:** `datadog` ([#5251](https://github.com/hashicorp/terraform/issues/5251)) - * **New interpolation function:** `md5` ([#5267](https://github.com/hashicorp/terraform/issues/5267)) - * **New interpolation function:** `signum` ([#4854](https://github.com/hashicorp/terraform/issues/4854)) - * **New resource:** `aws_cloudwatch_event_rule` ([#4986](https://github.com/hashicorp/terraform/issues/4986)) - * **New resource:** `aws_cloudwatch_event_target` ([#4986](https://github.com/hashicorp/terraform/issues/4986)) - * **New resource:** `aws_lambda_permission` ([#4826](https://github.com/hashicorp/terraform/issues/4826)) - * **New resource:** `azurerm_dns_a_record` ([#5013](https://github.com/hashicorp/terraform/issues/5013)) - * **New resource:** `azurerm_dns_aaaa_record` ([#5013](https://github.com/hashicorp/terraform/issues/5013)) - * **New resource:** `azurerm_dns_cname_record` ([#5013](https://github.com/hashicorp/terraform/issues/5013)) - * **New resource:** `azurerm_dns_mx_record` ([#5041](https://github.com/hashicorp/terraform/issues/5041)) - * **New resource:** `azurerm_dns_ns_record` ([#5041](https://github.com/hashicorp/terraform/issues/5041)) - * **New resource:** `azurerm_dns_srv_record` ([#5041](https://github.com/hashicorp/terraform/issues/5041)) - * **New resource:** `azurerm_dns_txt_record` ([#5041](https://github.com/hashicorp/terraform/issues/5041)) - * **New resource:** `azurerm_dns_zone` ([#4979](https://github.com/hashicorp/terraform/issues/4979)) - * **New resource:** `azurerm_search_service` ([#5203](https://github.com/hashicorp/terraform/issues/5203)) - * **New resource:** `azurerm_sql_database` ([#5003](https://github.com/hashicorp/terraform/issues/5003)) - * **New resource:** `azurerm_sql_firewall_rule` ([#5057](https://github.com/hashicorp/terraform/issues/5057)) - * **New resource:** `azurerm_sql_server` ([#4991](https://github.com/hashicorp/terraform/issues/4991)) - * **New resource:** `google_compute_subnetwork` ([#5130](https://github.com/hashicorp/terraform/issues/5130)) - -IMPROVEMENTS: - - * core: Backend names are now down cased during `init` in the same manner as `remote config` ([#5012](https://github.com/hashicorp/terraform/issues/5012)) - * core: Upgrade resource name validation warning to an error as planned ([#5272](https://github.com/hashicorp/terraform/issues/5272)) - * core: output "diffs didn't match" error details ([#5276](https://github.com/hashicorp/terraform/issues/5276)) - * provider/aws: Add `is_multi_region_trail` option to CloudTrail ([#4939](https://github.com/hashicorp/terraform/issues/4939)) - * provider/aws: Add support for HTTP(S) endpoints that auto confirm SNS subscription ([#4711](https://github.com/hashicorp/terraform/issues/4711)) - * provider/aws: Add support for Tags to CloudTrail ([#5135](https://github.com/hashicorp/terraform/issues/5135)) - * provider/aws: Add support for Tags to ElasticSearch ([#4973](https://github.com/hashicorp/terraform/issues/4973)) - * provider/aws: Add support for deployment configuration to `aws_ecs_service` ([#5220](https://github.com/hashicorp/terraform/issues/5220)) - * provider/aws: Add support for log validation + KMS encryption to `aws_cloudtrail` ([#5051](https://github.com/hashicorp/terraform/issues/5051)) - * provider/aws: Allow name-prefix and auto-generated names for IAM Server Cert ([#5178](https://github.com/hashicorp/terraform/issues/5178)) - * provider/aws: Expose additional VPN Connection attributes ([#5032](https://github.com/hashicorp/terraform/issues/5032)) - * provider/aws: Return an error if no matching route is found for an AWS Route ([#5155](https://github.com/hashicorp/terraform/issues/5155)) - * provider/aws: Support custom endpoints for AWS EC2 ELB and IAM ([#5114](https://github.com/hashicorp/terraform/issues/5114)) - * provider/aws: The `cluster_type` on `aws_redshift_cluster` resources is now computed ([#5238](https://github.com/hashicorp/terraform/issues/5238)) - * provider/aws: `aws_lambda_function` resources now support VPC configuration ([#5149](https://github.com/hashicorp/terraform/issues/5149)) - * provider/aws: Add support for Enhanced Monitoring to RDS Instances ([#4945](https://github.com/hashicorp/terraform/issues/4945)) - * provider/aws: Improve vpc cidr_block err message ([#5255](https://github.com/hashicorp/terraform/issues/5255)) - * provider/aws: Implement Retention Period for `aws_kinesis_stream` ([#5223](https://github.com/hashicorp/terraform/issues/5223)) - * provider/aws: Enable `stream_arm` output for DynamoDB Table when streams are enabled ([#5271](https://github.com/hashicorp/terraform/issues/5271)) - * provider/digitalocean: `digitalocean_record` resources now export a computed `fqdn` attribute ([#5071](https://github.com/hashicorp/terraform/issues/5071)) - * provider/google: Add assigned IP Address to CloudSQL Instance `google_sql_database_instance` ([#5245](https://github.com/hashicorp/terraform/issues/5245)) - * provider/openstack: Add support for Distributed Routers ([#4878](https://github.com/hashicorp/terraform/issues/4878)) - * provider/openstack: Add support for optional cacert_file parameter ([#5106](https://github.com/hashicorp/terraform/issues/5106)) - -BUG FIXES: - - * core: Fix bug detecting deeply nested module orphans ([#5022](https://github.com/hashicorp/terraform/issues/5022)) - * core: Fix bug where `ignore_changes` could produce "diffs didn't match during apply" errors ([#4965](https://github.com/hashicorp/terraform/issues/4965)) - * core: Fix race condition when handling tainted resource destroys ([#5026](https://github.com/hashicorp/terraform/issues/5026)) - * core: Improve handling of Provisioners in the graph, fixing "Provisioner already initialized" errors ([#4877](https://github.com/hashicorp/terraform/issues/4877)) - * core: Skip `create_before_destroy` processing during a `terraform destroy`, solving several issues preventing `destroy` - from working properly with CBD resources ([#5096](https://github.com/hashicorp/terraform/issues/5096)) - * core: Error instead of panic on self var in wrong scope ([#5273](https://github.com/hashicorp/terraform/issues/5273)) - * provider/aws: Fix Copy of Tags to DB Instance when created from Snapshot ([#5197](https://github.com/hashicorp/terraform/issues/5197)) - * provider/aws: Fix DynamoDB Table Refresh to ensure deleted tables are removed from state ([#4943](https://github.com/hashicorp/terraform/issues/4943)) - * provider/aws: Fix ElasticSearch `domain_name` validation ([#4973](https://github.com/hashicorp/terraform/issues/4973)) - * provider/aws: Fix issue applying security group changes in EC2 Classic RDS for aws_db_instance ([#4969](https://github.com/hashicorp/terraform/issues/4969)) - * provider/aws: Fix reading auto scaling group availability zones ([#5044](https://github.com/hashicorp/terraform/issues/5044)) - * provider/aws: Fix reading auto scaling group load balancers ([#5045](https://github.com/hashicorp/terraform/issues/5045)) - * provider/aws: Fix `aws_redshift_cluster` to allow `publicly_accessible` to be false ([#5262](https://github.com/hashicorp/terraform/issues/5262)) - * provider/aws: Wait longer for internet gateways to detach ([#5120](https://github.com/hashicorp/terraform/issues/5120)) - * provider/aws: Fix issue reading auto scaling group termination policies ([#5101](https://github.com/hashicorp/terraform/issues/5101)) - * provider/cloudflare: `ttl` no longer shows a change on each plan on `cloudflare_record` resources ([#5042](https://github.com/hashicorp/terraform/issues/5042)) - * provider/docker: Fix the default docker_host value ([#5088](https://github.com/hashicorp/terraform/issues/5088)) - * provider/google: Fix backend service max_utilization attribute ([#5075](https://github.com/hashicorp/terraform/issues/5075)) - * provider/google: Fix reading of `google_compute_vpn_gateway` without an explicit ([#5125](https://github.com/hashicorp/terraform/issues/5125)) - * provider/google: Fix crash when setting `ack_deadline_seconds` on `google_pubsub_subscription` ([#5110](https://github.com/hashicorp/terraform/issues/5110)) - * provider/openstack: Fix crash when `access_network` was not defined in instances ([#4966](https://github.com/hashicorp/terraform/issues/4966)) - * provider/powerdns: Fix refresh of `powerdns_record` no longer fails if the record name contains a `-` ([#5228](https://github.com/hashicorp/terraform/issues/5228)) - * provider/vcd: Wait for DHCP assignment when creating `vcd_vapp` resources with no static IP assignment ([#5195](https://github.com/hashicorp/terraform/issues/5195)) - -## 0.6.11 (February 1, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * The `max_size`, `min_size` and `desired_capacity` attributes on `aws_autoscaling_schedule` resources now default to 0 - -FEATURES: - - * **New provider: `powerdns` - PowerDNS REST API** ([#4885](https://github.com/hashicorp/terraform/issues/4885)) - * **New builtin function:** `trimspace` for trimming whitespaces ([#4910](https://github.com/hashicorp/terraform/issues/4910)) - * **New builtin function:** `base64sha256` for base64 encoding raw sha256 sum of a given string ([#4899](https://github.com/hashicorp/terraform/issues/4899)) - * **New resource:** `openstack_lb_member_v1` ([#4359](https://github.com/hashicorp/terraform/issues/4359)) - -IMPROVEMENTS: - - * provider/template: Remove unnecessary mime-type validation from `template_cloudinit_config` resources ([#4873](https://github.com/hashicorp/terraform/issues/4873)) - * provider/template: Correct spelling of "Boundary" in the part separator of rendered `template_cloudinit_config` resources ([#4873](https://github.com/hashicorp/terraform/issues/4873)) - * provider/aws: Provide a better message if no AWS creds are found ([#4869](https://github.com/hashicorp/terraform/issues/4869)) - * provider/openstack: Ability to specify per-network Floating IPs ([#4812](https://github.com/hashicorp/terraform/issues/4812)) - -BUG FIXES: - - * provider/aws: `aws_autoscale_schedule` 0 values ([#4693](https://github.com/hashicorp/terraform/issues/4693)) - * provider/aws: Fix regression with VPCs and ClassicLink for regions that do not support it ([#4879](https://github.com/hashicorp/terraform/issues/4879)) - * provider/aws: Change VPC ClassicLink to be computed ([#4933](https://github.com/hashicorp/terraform/issues/4933)) - * provider/aws: Fix SNS Topic Refresh to ensure deleted topics are removed from state ([#4891](https://github.com/hashicorp/terraform/issues/4891)) - * provider/aws: Refactor Route53 record to fix regression in deleting records created in previous versions of Terraform ([#4892](https://github.com/hashicorp/terraform/issues/4892)) - * provider/azurerm: Fix panic if no creds supplied ([#4902](https://github.com/hashicorp/terraform/issues/4902)) - * provider/openstack: Changing the port resource to mark the ip_address as optional ([#4850](https://github.com/hashicorp/terraform/issues/4850)) - * provider/docker: Catch potential custom network errors in docker ([#4918](https://github.com/hashicorp/terraform/issues/4918)) - - - -## 0.6.10 (January 27, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * The `-module-depth` flag available on `plan`, `apply`, `show`, and `graph` now defaults to `-1`, causing - resources within modules to be expanded in command output. This is only a cosmetic change; it does not affect - any behavior. - * This release includes a bugfix for `$${}` interpolation escaping. These strings are now properly converted to `${}` - during interpolation. This may cause diffs on existing configurations in certain cases. - * Users of `consul_keys` should note that the `value` sub-attribute of `key` will no longer be updated with the remote value of the key. It should be only used to _set_ a key in Consul K/V. To reference key values, use the `var` attribute. - * The 0.6.9 release contained a regression in `aws_autoscaling_group` capacity waiting behavior for configs where `min_elb_capacity != desired_capacity` or `min_size != desired_capacity`. This release remedies that regression by un-deprecating `min_elb_capacity` and restoring the prior behavior. - * Users of `aws_security_group` may notice new diffs in initial plans with 0.6.10 due to a bugfix that fixes drift detection on nested security group rules. These new diffs should reflect the actual state of the resources, which Terraform previously was unable to see. - - -FEATURES: - - * **New resource: `aws_lambda_alias`** ([#4664](https://github.com/hashicorp/terraform/issues/4664)) - * **New resource: `aws_redshift_cluster`** ([#3862](https://github.com/hashicorp/terraform/issues/3862)) - * **New resource: `aws_redshift_parameter_group`** ([#3862](https://github.com/hashicorp/terraform/issues/3862)) - * **New resource: `aws_redshift_security_group`** ([#3862](https://github.com/hashicorp/terraform/issues/3862)) - * **New resource: `aws_redshift_subnet_group`** ([#3862](https://github.com/hashicorp/terraform/issues/3862)) - * **New resource: `azurerm_cdn_endpoint`** ([#4759](https://github.com/hashicorp/terraform/issues/4759)) - * **New resource: `azurerm_cdn_profile`** ([#4740](https://github.com/hashicorp/terraform/issues/4740)) - * **New resource: `azurerm_network_interface`** ([#4598](https://github.com/hashicorp/terraform/issues/4598)) - * **New resource: `azurerm_network_security_rule`** ([#4586](https://github.com/hashicorp/terraform/issues/4586)) - * **New resource: `azurerm_route_table`** ([#4602](https://github.com/hashicorp/terraform/issues/4602)) - * **New resource: `azurerm_route`** ([#4604](https://github.com/hashicorp/terraform/issues/4604)) - * **New resource: `azurerm_storage_account`** ([#4698](https://github.com/hashicorp/terraform/issues/4698)) - * **New resource: `azurerm_storage_blob`** ([#4862](https://github.com/hashicorp/terraform/issues/4862)) - * **New resource: `azurerm_storage_container`** ([#4862](https://github.com/hashicorp/terraform/issues/4862)) - * **New resource: `azurerm_storage_queue`** ([#4862](https://github.com/hashicorp/terraform/issues/4862)) - * **New resource: `azurerm_subnet`** ([#4595](https://github.com/hashicorp/terraform/issues/4595)) - * **New resource: `docker_network`** ([#4483](https://github.com/hashicorp/terraform/issues/4483)) - * **New resource: `docker_volume`** ([#4483](https://github.com/hashicorp/terraform/issues/4483)) - * **New resource: `google_sql_user`** ([#4669](https://github.com/hashicorp/terraform/issues/4669)) - -IMPROVEMENTS: - - * core: Add `sha256()` interpolation function ([#4704](https://github.com/hashicorp/terraform/issues/4704)) - * core: Validate lifecycle keys to show helpful error messages whe they are mistypes ([#4745](https://github.com/hashicorp/terraform/issues/4745)) - * core: Default `module-depth` parameter to `-1`, which expands resources within modules in command output ([#4763](https://github.com/hashicorp/terraform/issues/4763)) - * core: Variable types may now be specified explicitly using the `type` argument ([#4795](https://github.com/hashicorp/terraform/issues/4795)) - * provider/aws: Add new parameters `az_mode` and `availability_zone(s)` in ElastiCache ([#4631](https://github.com/hashicorp/terraform/issues/4631)) - * provider/aws: Allow ap-northeast-2 (Seoul) as valid region ([#4637](https://github.com/hashicorp/terraform/issues/4637)) - * provider/aws: Limit SNS Topic Subscription protocols ([#4639](https://github.com/hashicorp/terraform/issues/4639)) - * provider/aws: Add support for configuring logging on `aws_s3_bucket` resources ([#4482](https://github.com/hashicorp/terraform/issues/4482)) - * provider/aws: Add AWS Classiclink for AWS VPC resource ([#3994](https://github.com/hashicorp/terraform/issues/3994)) - * provider/aws: Supporting New AWS Route53 HealthCheck additions ([#4564](https://github.com/hashicorp/terraform/issues/4564)) - * provider/aws: Store instance state ([#3261](https://github.com/hashicorp/terraform/issues/3261)) - * provider/aws: Add support for updating ELB availability zones and subnets ([#4597](https://github.com/hashicorp/terraform/issues/4597)) - * provider/aws: Enable specifying aws s3 redirect protocol ([#4098](https://github.com/hashicorp/terraform/issues/4098)) - * provider/aws: Added support for `encrypted` on `ebs_block_devices` in Launch Configurations ([#4481](https://github.com/hashicorp/terraform/issues/4481)) - * provider/aws: Retry Listener Creation for ELBs ([#4825](https://github.com/hashicorp/terraform/issues/4825)) - * provider/aws: Add support for creating Managed Microsoft Active Directory - and Directory Connectors ([#4388](https://github.com/hashicorp/terraform/issues/4388)) - * provider/aws: Mark some `aws_db_instance` fields as optional ([#3138](https://github.com/hashicorp/terraform/issues/3138)) - * provider/digitalocean: Add support for reassigning `digitalocean_floating_ip` resources ([#4476](https://github.com/hashicorp/terraform/issues/4476)) - * provider/dme: Add support for Global Traffic Director locations on `dme_record` resources ([#4305](https://github.com/hashicorp/terraform/issues/4305)) - * provider/docker: Add support for adding host entries on `docker_container` resources ([#3463](https://github.com/hashicorp/terraform/issues/3463)) - * provider/docker: Add support for mounting named volumes on `docker_container` resources ([#4480](https://github.com/hashicorp/terraform/issues/4480)) - * provider/google: Add content field to bucket object ([#3893](https://github.com/hashicorp/terraform/issues/3893)) - * provider/google: Add support for `named_port` blocks on `google_compute_instance_group_manager` resources ([#4605](https://github.com/hashicorp/terraform/issues/4605)) - * provider/openstack: Add "personality" support to instance resource ([#4623](https://github.com/hashicorp/terraform/issues/4623)) - * provider/packet: Handle external state changes for Packet resources gracefully ([#4676](https://github.com/hashicorp/terraform/issues/4676)) - * provider/tls: `tls_private_key` now exports attributes with public key in both PEM and OpenSSH format ([#4606](https://github.com/hashicorp/terraform/issues/4606)) - * provider/vdc: Add `allow_unverified_ssl` for connections to vCloud API ([#4811](https://github.com/hashicorp/terraform/issues/4811)) - * state/remote: Allow KMS Key Encryption to be used with S3 backend ([#2903](https://github.com/hashicorp/terraform/issues/2903)) - -BUG FIXES: - - * core: Fix handling of literals with escaped interpolations `$${var}` ([#4747](https://github.com/hashicorp/terraform/issues/4747)) - * core: Fix diff mismatch when RequiresNew field and list both change ([#4749](https://github.com/hashicorp/terraform/issues/4749)) - * core: Respect module target path argument on `terraform init` ([#4753](https://github.com/hashicorp/terraform/issues/4753)) - * core: Write planfile even on empty plans ([#4766](https://github.com/hashicorp/terraform/issues/4766)) - * core: Add validation error when output is missing value field ([#4762](https://github.com/hashicorp/terraform/issues/4762)) - * core: Fix improper handling of orphan resources when targeting ([#4574](https://github.com/hashicorp/terraform/issues/4574)) - * core: Properly handle references to computed set attributes ([#4840](https://github.com/hashicorp/terraform/issues/4840)) - * config: Detect a specific JSON edge case and show a helpful workaround ([#4746](https://github.com/hashicorp/terraform/issues/4746)) - * provider/openstack: Ensure valid Security Group Rule attribute combination ([#4466](https://github.com/hashicorp/terraform/issues/4466)) - * provider/openstack: Don't put fixed_ip in port creation request if not defined ([#4617](https://github.com/hashicorp/terraform/issues/4617)) - * provider/google: Clarify SQL Database Instance recent name restriction ([#4577](https://github.com/hashicorp/terraform/issues/4577)) - * provider/google: Split Instance network interface into two fields ([#4265](https://github.com/hashicorp/terraform/issues/4265)) - * provider/aws: Error with empty list item on security group ([#4140](https://github.com/hashicorp/terraform/issues/4140)) - * provider/aws: Fix issue with detecting drift in AWS Security Groups rules ([#4779](https://github.com/hashicorp/terraform/issues/4779)) - * provider/aws: Trap Instance error from mismatched SG IDs and Names ([#4240](https://github.com/hashicorp/terraform/issues/4240)) - * provider/aws: EBS optimised to force new resource in AWS Instance ([#4627](https://github.com/hashicorp/terraform/issues/4627)) - * provider/aws: Wait for NACL rule to be visible ([#4734](https://github.com/hashicorp/terraform/issues/4734)) - * provider/aws: `default_result` on `aws_autoscaling_lifecycle_hook` resources is now computed ([#4695](https://github.com/hashicorp/terraform/issues/4695)) - * provider/aws: fix ASG capacity waiting regression by un-deprecating `min_elb_capacity` ([#4864](https://github.com/hashicorp/terraform/issues/4864)) - * provider/consul: fix several bugs surrounding update behavior ([#4787](https://github.com/hashicorp/terraform/issues/4787)) - * provider/mailgun: Handle the fact that the domain destroy API is eventually consistent ([#4777](https://github.com/hashicorp/terraform/issues/4777)) - * provider/template: Fix race causing sporadic crashes in template_file with count > 1 ([#4694](https://github.com/hashicorp/terraform/issues/4694)) - * provider/template: Add support for updating `template_cloudinit_config` resources ([#4757](https://github.com/hashicorp/terraform/issues/4757)) - * provisioner/chef: Add ENV['no_proxy'] to chef provisioner if no_proxy is detected ([#4661](https://github.com/hashicorp/terraform/issues/4661)) - -## 0.6.9 (January 8, 2016) - -FEATURES: - - * **New provider: `vcd` - VMware vCloud Director** ([#3785](https://github.com/hashicorp/terraform/issues/3785)) - * **New provider: `postgresql` - Create PostgreSQL databases and roles** ([#3653](https://github.com/hashicorp/terraform/issues/3653)) - * **New provider: `chef` - Create chef environments, roles, etc** ([#3084](https://github.com/hashicorp/terraform/issues/3084)) - * **New provider: `azurerm` - Preliminary support for Azure Resource Manager** ([#4226](https://github.com/hashicorp/terraform/issues/4226)) - * **New provider: `mysql` - Create MySQL databases** ([#3122](https://github.com/hashicorp/terraform/issues/3122)) - * **New resource: `aws_autoscaling_schedule`** ([#4256](https://github.com/hashicorp/terraform/issues/4256)) - * **New resource: `aws_nat_gateway`** ([#4381](https://github.com/hashicorp/terraform/issues/4381)) - * **New resource: `aws_network_acl_rule`** ([#4286](https://github.com/hashicorp/terraform/issues/4286)) - * **New resources: `aws_ecr_repository` and `aws_ecr_repository_policy`** ([#4415](https://github.com/hashicorp/terraform/issues/4415)) - * **New resource: `google_pubsub_topic`** ([#3671](https://github.com/hashicorp/terraform/issues/3671)) - * **New resource: `google_pubsub_subscription`** ([#3671](https://github.com/hashicorp/terraform/issues/3671)) - * **New resource: `template_cloudinit_config`** ([#4095](https://github.com/hashicorp/terraform/issues/4095)) - * **New resource: `tls_locally_signed_cert`** ([#3930](https://github.com/hashicorp/terraform/issues/3930)) - * **New remote state backend: `artifactory`** ([#3684](https://github.com/hashicorp/terraform/issues/3684)) - -IMPROVEMENTS: - - * core: Change set internals for performance improvements ([#3992](https://github.com/hashicorp/terraform/issues/3992)) - * core: Support HTTP basic auth in consul remote state ([#4166](https://github.com/hashicorp/terraform/issues/4166)) - * core: Improve error message on resource arity mismatch ([#4244](https://github.com/hashicorp/terraform/issues/4244)) - * core: Add support for unary operators + and - to the interpolation syntax ([#3621](https://github.com/hashicorp/terraform/issues/3621)) - * core: Add SSH agent support for Windows ([#4323](https://github.com/hashicorp/terraform/issues/4323)) - * core: Add `sha1()` interpolation function ([#4450](https://github.com/hashicorp/terraform/issues/4450)) - * provider/aws: Add `placement_group` as an option for `aws_autoscaling_group` ([#3704](https://github.com/hashicorp/terraform/issues/3704)) - * provider/aws: Add support for DynamoDB Table StreamSpecifications ([#4208](https://github.com/hashicorp/terraform/issues/4208)) - * provider/aws: Add `name_prefix` to Security Groups ([#4167](https://github.com/hashicorp/terraform/issues/4167)) - * provider/aws: Add support for removing nodes to `aws_elasticache_cluster` ([#3809](https://github.com/hashicorp/terraform/issues/3809)) - * provider/aws: Add support for `skip_final_snapshot` to `aws_db_instance` ([#3853](https://github.com/hashicorp/terraform/issues/3853)) - * provider/aws: Adding support for Tags to DB SecurityGroup ([#4260](https://github.com/hashicorp/terraform/issues/4260)) - * provider/aws: Adding Tag support for DB Param Groups ([#4259](https://github.com/hashicorp/terraform/issues/4259)) - * provider/aws: Fix issue with updated route ids for VPC Endpoints ([#4264](https://github.com/hashicorp/terraform/issues/4264)) - * provider/aws: Added measure_latency option to Route 53 Health Check resource ([#3688](https://github.com/hashicorp/terraform/issues/3688)) - * provider/aws: Validate IOPs for EBS Volumes ([#4146](https://github.com/hashicorp/terraform/issues/4146)) - * provider/aws: DB Subnet group arn output ([#4261](https://github.com/hashicorp/terraform/issues/4261)) - * provider/aws: Get full Kinesis streams view with pagination ([#4368](https://github.com/hashicorp/terraform/issues/4368)) - * provider/aws: Allow changing private IPs for ENIs ([#4307](https://github.com/hashicorp/terraform/issues/4307)) - * provider/aws: Retry MalformedPolicy errors due to newly created principals in S3 Buckets ([#4315](https://github.com/hashicorp/terraform/issues/4315)) - * provider/aws: Validate `name` on `db_subnet_group` against AWS requirements ([#4340](https://github.com/hashicorp/terraform/issues/4340)) - * provider/aws: wait for ASG capacity on update ([#3947](https://github.com/hashicorp/terraform/issues/3947)) - * provider/aws: Add validation for ECR repository name ([#4431](https://github.com/hashicorp/terraform/issues/4431)) - * provider/cloudstack: performance improvements ([#4150](https://github.com/hashicorp/terraform/issues/4150)) - * provider/docker: Add support for setting the entry point on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for setting the restart policy on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for setting memory, swap and CPU shares on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for setting labels on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for setting log driver and options on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for settings network mode on `docker_container` resources ([#4475](https://github.com/hashicorp/terraform/issues/4475)) - * provider/heroku: Improve handling of Applications within an Organization ([#4495](https://github.com/hashicorp/terraform/issues/4495)) - * provider/vsphere: Add support for custom vm params on `vsphere_virtual_machine` ([#3867](https://github.com/hashicorp/terraform/issues/3867)) - * provider/vsphere: Rename vcenter_server config parameter to something clearer ([#3718](https://github.com/hashicorp/terraform/issues/3718)) - * provider/vsphere: Make allow_unverified_ssl a configuable on the provider ([#3933](https://github.com/hashicorp/terraform/issues/3933)) - * provider/vsphere: Add folder handling for folder-qualified vm names ([#3939](https://github.com/hashicorp/terraform/issues/3939)) - * provider/vsphere: Change ip_address parameter for ipv6 support ([#4035](https://github.com/hashicorp/terraform/issues/4035)) - * provider/openstack: Increase instance timeout from 10 to 30 minutes ([#4223](https://github.com/hashicorp/terraform/issues/4223)) - * provider/google: Add `restart_policy` attribute to `google_managed_instance_group` ([#3892](https://github.com/hashicorp/terraform/issues/3892)) - -BUG FIXES: - - * core: skip provider input for deprecated fields ([#4193](https://github.com/hashicorp/terraform/issues/4193)) - * core: Fix issue which could cause fields that become empty to retain old values in the state ([#3257](https://github.com/hashicorp/terraform/issues/3257)) - * provider/docker: Fix an issue running with Docker Swarm by looking up containers by ID instead of name ([#4148](https://github.com/hashicorp/terraform/issues/4148)) - * provider/openstack: Better handling of load balancing resource state changes ([#3926](https://github.com/hashicorp/terraform/issues/3926)) - * provider/aws: Treat `INACTIVE` ECS cluster as deleted ([#4364](https://github.com/hashicorp/terraform/issues/4364)) - * provider/aws: Skip `source_security_group_id` determination logic for Classic ELBs ([#4075](https://github.com/hashicorp/terraform/issues/4075)) - * provider/aws: Fix issue destroy Route 53 zone/record if it no longer exists ([#4198](https://github.com/hashicorp/terraform/issues/4198)) - * provider/aws: Fix issue force destroying a versioned S3 bucket ([#4168](https://github.com/hashicorp/terraform/issues/4168)) - * provider/aws: Update DB Replica to honor storage type ([#4155](https://github.com/hashicorp/terraform/issues/4155)) - * provider/aws: Fix issue creating AWS RDS replicas across regions ([#4215](https://github.com/hashicorp/terraform/issues/4215)) - * provider/aws: Fix issue with Route53 and zero weighted records ([#4427](https://github.com/hashicorp/terraform/issues/4427)) - * provider/aws: Fix issue with iam_profile in aws_instance when a path is specified ([#3663](https://github.com/hashicorp/terraform/issues/3663)) - * provider/aws: Refactor AWS Authentication chain to fix issue with authentication and IAM ([#4254](https://github.com/hashicorp/terraform/issues/4254)) - * provider/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region ([#4236](https://github.com/hashicorp/terraform/issues/4236)) - * provider/aws: Fix missing AMI issue with Launch Configurations ([#4242](https://github.com/hashicorp/terraform/issues/4242)) - * provider/aws: Opsworks stack SSH key is write-only ([#4241](https://github.com/hashicorp/terraform/issues/4241)) - * provider/aws: Update VPC Endpoint to correctly set route table ids ([#4392](https://github.com/hashicorp/terraform/issues/4392)) - * provider/aws: Fix issue with ElasticSearch Domain `access_policies` always appear changed ([#4245](https://github.com/hashicorp/terraform/issues/4245)) - * provider/aws: Fix issue with nil parameter group value causing panic in `aws_db_parameter_group` ([#4318](https://github.com/hashicorp/terraform/issues/4318)) - * provider/aws: Fix issue with Elastic IPs not recognizing when they have been unassigned manually ([#4387](https://github.com/hashicorp/terraform/issues/4387)) - * provider/aws: Use body or URL for all CloudFormation stack updates ([#4370](https://github.com/hashicorp/terraform/issues/4370)) - * provider/aws: Fix template_url/template_body conflict ([#4540](https://github.com/hashicorp/terraform/issues/4540)) - * provider/aws: Fix bug w/ changing ECS svc/ELB association ([#4366](https://github.com/hashicorp/terraform/issues/4366)) - * provider/aws: Fix RDS unexpected state config ([#4490](https://github.com/hashicorp/terraform/issues/4490)) - * provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic ([#4214](https://github.com/hashicorp/terraform/issues/4214)) - * provider/google: Fix project metadata sshKeys from showing up and causing unnecessary diffs ([#4512](https://github.com/hashicorp/terraform/issues/4512)) - * provider/heroku: Retry drain create until log channel is assigned ([#4823](https://github.com/hashicorp/terraform/issues/4823)) - * provider/openstack: Handle volumes in "deleting" state ([#4204](https://github.com/hashicorp/terraform/issues/4204)) - * provider/rundeck: Tolerate Rundeck server not returning project name when reading a job ([#4301](https://github.com/hashicorp/terraform/issues/4301)) - * provider/vsphere: Create and attach additional disks before bootup ([#4196](https://github.com/hashicorp/terraform/issues/4196)) - * provider/openstack: Convert block_device from a Set to a List ([#4288](https://github.com/hashicorp/terraform/issues/4288)) - * provider/google: Terraform identifies deleted resources and handles them appropriately on Read ([#3913](https://github.com/hashicorp/terraform/issues/3913)) - -## 0.6.8 (December 2, 2015) - -FEATURES: - - * **New provider: `statuscake`** ([#3340](https://github.com/hashicorp/terraform/issues/3340)) - * **New resource: `digitalocean_floating_ip`** ([#3748](https://github.com/hashicorp/terraform/issues/3748)) - * **New resource: `aws_lambda_event_source_mapping`** ([#4093](https://github.com/hashicorp/terraform/issues/4093)) - -IMPROVEMENTS: - - * provider/cloudstack: Reduce the number of network calls required for common operations ([#4051](https://github.com/hashicorp/terraform/issues/4051)) - * provider/aws: Make `publically_accessible` on an `aws_db_instance` update existing instances instead of forcing new ones ([#3895](https://github.com/hashicorp/terraform/issues/3895)) - * provider/aws: Allow `block_duration_minutes` to be set for spot instance requests ([#4071](https://github.com/hashicorp/terraform/issues/4071)) - * provider/aws: Make setting `acl` on S3 buckets update existing buckets instead of forcing new ones ([#4080](https://github.com/hashicorp/terraform/issues/4080)) - * provider/aws: Make updates to `assume_role_policy` modify existing IAM roles instead of forcing new ones ([#4107](https://github.com/hashicorp/terraform/issues/4107)) - -BUG FIXES: - - * core: Fix a bug which prevented HEREDOC syntax being used in lists ([#4078](https://github.com/hashicorp/terraform/issues/4078)) - * core: Fix a bug which prevented HEREDOC syntax where the anchor ends in a number ([#4128](https://github.com/hashicorp/terraform/issues/4128)) - * core: Fix a bug which prevented HEREDOC syntax being used with Windows line endings ([#4069](https://github.com/hashicorp/terraform/issues/4069)) - * provider/aws: Fix a bug which could result in a panic when reading EC2 metadata ([#4024](https://github.com/hashicorp/terraform/issues/4024)) - * provider/aws: Fix issue recreating security group rule if it has been destroyed ([#4050](https://github.com/hashicorp/terraform/issues/4050)) - * provider/aws: Fix issue with some attributes in Spot Instance Requests returning as nil ([#4132](https://github.com/hashicorp/terraform/issues/4132)) - * provider/aws: Fix issue where SPF records in Route 53 could show differences with no modification to the configuration ([#4108](https://github.com/hashicorp/terraform/issues/4108)) - * provisioner/chef: Fix issue with path separators breaking the Chef provisioner on Windows ([#4041](https://github.com/hashicorp/terraform/issues/4041)) - -## 0.6.7 (November 23, 2015) - -FEATURES: - - * **New provider: `tls`** - A utility provider for generating TLS keys/self-signed certificates for development and testing ([#2778](https://github.com/hashicorp/terraform/issues/2778)) - * **New provider: `dyn`** - Manage DNS records on Dyn - * **New resource: `aws_cloudformation_stack`** ([#2636](https://github.com/hashicorp/terraform/issues/2636)) - * **New resource: `aws_cloudtrail`** ([#3094](https://github.com/hashicorp/terraform/issues/3094)), ([#4010](https://github.com/hashicorp/terraform/issues/4010)) - * **New resource: `aws_route`** ([#3548](https://github.com/hashicorp/terraform/issues/3548)) - * **New resource: `aws_codecommit_repository`** ([#3274](https://github.com/hashicorp/terraform/issues/3274)) - * **New resource: `aws_kinesis_firehose_delivery_stream`** ([#3833](https://github.com/hashicorp/terraform/issues/3833)) - * **New resource: `google_sql_database` and `google_sql_database_instance`** ([#3617](https://github.com/hashicorp/terraform/issues/3617)) - * **New resource: `google_compute_global_address`** ([#3701](https://github.com/hashicorp/terraform/issues/3701)) - * **New resource: `google_compute_https_health_check`** ([#3883](https://github.com/hashicorp/terraform/issues/3883)) - * **New resource: `google_compute_ssl_certificate`** ([#3723](https://github.com/hashicorp/terraform/issues/3723)) - * **New resource: `google_compute_url_map`** ([#3722](https://github.com/hashicorp/terraform/issues/3722)) - * **New resource: `google_compute_target_http_proxy`** ([#3727](https://github.com/hashicorp/terraform/issues/3727)) - * **New resource: `google_compute_target_https_proxy`** ([#3728](https://github.com/hashicorp/terraform/issues/3728)) - * **New resource: `google_compute_global_forwarding_rule`** ([#3702](https://github.com/hashicorp/terraform/issues/3702)) - * **New resource: `openstack_networking_port_v2`** ([#3731](https://github.com/hashicorp/terraform/issues/3731)) - * New interpolation function: `coalesce` ([#3814](https://github.com/hashicorp/terraform/issues/3814)) - -IMPROVEMENTS: - - * core: Improve message to list only resources which will be destroyed when using `--target` ([#3859](https://github.com/hashicorp/terraform/issues/3859)) - * connection/ssh: Accept `private_key` contents instead of paths ([#3846](https://github.com/hashicorp/terraform/issues/3846)) - * provider/google: `preemptible` option for instance_template ([#3667](https://github.com/hashicorp/terraform/issues/3667)) - * provider/google: Accurate Terraform Version ([#3554](https://github.com/hashicorp/terraform/issues/3554)) - * provider/google: Simplified auth (DefaultClient support) ([#3553](https://github.com/hashicorp/terraform/issues/3553)) - * provider/google: `automatic_restart`, `preemptible`, `on_host_maintenance` options ([#3643](https://github.com/hashicorp/terraform/issues/3643)) - * provider/google: Read credentials as contents instead of path ([#3901](https://github.com/hashicorp/terraform/issues/3901)) - * null_resource: Enhance and document ([#3244](https://github.com/hashicorp/terraform/issues/3244), [#3659](https://github.com/hashicorp/terraform/issues/3659)) - * provider/aws: Add CORS settings to S3 bucket ([#3387](https://github.com/hashicorp/terraform/issues/3387)) - * provider/aws: Add notification topic ARN for ElastiCache clusters ([#3674](https://github.com/hashicorp/terraform/issues/3674)) - * provider/aws: Add `kinesis_endpoint` for configuring Kinesis ([#3255](https://github.com/hashicorp/terraform/issues/3255)) - * provider/aws: Add a computed ARN for S3 Buckets ([#3685](https://github.com/hashicorp/terraform/issues/3685)) - * provider/aws: Add S3 support for Lambda Function resource ([#3794](https://github.com/hashicorp/terraform/issues/3794)) - * provider/aws: Add `name_prefix` option to launch configurations ([#3802](https://github.com/hashicorp/terraform/issues/3802)) - * provider/aws: Add support for group name and path changes with IAM group update function ([#3237](https://github.com/hashicorp/terraform/issues/3237)) - * provider/aws: Provide `source_security_group_id` for ELBs inside a VPC ([#3780](https://github.com/hashicorp/terraform/issues/3780)) - * provider/aws: Add snapshot window and retention limits for ElastiCache (Redis) ([#3707](https://github.com/hashicorp/terraform/issues/3707)) - * provider/aws: Add username updates for `aws_iam_user` ([#3227](https://github.com/hashicorp/terraform/issues/3227)) - * provider/aws: Add AutoMinorVersionUpgrade to RDS Instances ([#3677](https://github.com/hashicorp/terraform/issues/3677)) - * provider/aws: Add `access_logs` to ELB resource ([#3756](https://github.com/hashicorp/terraform/issues/3756)) - * provider/aws: Add a retry function to rescue an error in creating Autoscaling Lifecycle Hooks ([#3694](https://github.com/hashicorp/terraform/issues/3694)) - * provider/aws: `engine_version` is now optional for DB Instance ([#3744](https://github.com/hashicorp/terraform/issues/3744)) - * provider/aws: Add configuration to enable copying RDS tags to final snapshot ([#3529](https://github.com/hashicorp/terraform/issues/3529)) - * provider/aws: RDS Cluster additions (`backup_retention_period`, `preferred_backup_window`, `preferred_maintenance_window`) ([#3757](https://github.com/hashicorp/terraform/issues/3757)) - * provider/aws: Document and validate ELB `ssl_certificate_id` and protocol requirements ([#3887](https://github.com/hashicorp/terraform/issues/3887)) - * provider/azure: Read `publish_settings` as contents instead of path ([#3899](https://github.com/hashicorp/terraform/issues/3899)) - * provider/openstack: Use IPv4 as the default IP version for subnets ([#3091](https://github.com/hashicorp/terraform/issues/3091)) - * provider/aws: Apply security group after restoring `db_instance` from snapshot ([#3513](https://github.com/hashicorp/terraform/issues/3513)) - * provider/aws: Make the AutoScalingGroup `name` optional ([#3710](https://github.com/hashicorp/terraform/issues/3710)) - * provider/openstack: Add "delete on termination" boot-from-volume option ([#3232](https://github.com/hashicorp/terraform/issues/3232)) - * provider/digitalocean: Make `user_data` force a new droplet ([#3740](https://github.com/hashicorp/terraform/issues/3740)) - * provider/vsphere: Do not add network interfaces by default ([#3652](https://github.com/hashicorp/terraform/issues/3652)) - * provider/openstack: Configure Fixed IPs through ports ([#3772](https://github.com/hashicorp/terraform/issues/3772)) - * provider/openstack: Specify a port ID on a Router Interface ([#3903](https://github.com/hashicorp/terraform/issues/3903)) - * provider/openstack: Make LBaaS Virtual IP computed ([#3927](https://github.com/hashicorp/terraform/issues/3927)) - -BUG FIXES: - - * `terraform remote config`: update `--help` output ([#3632](https://github.com/hashicorp/terraform/issues/3632)) - * core: Modules on Git branches now update properly ([#1568](https://github.com/hashicorp/terraform/issues/1568)) - * core: Fix issue preventing input prompts for unset variables during plan ([#3843](https://github.com/hashicorp/terraform/issues/3843)) - * core: Fix issue preventing input prompts for unset variables during refresh ([#4017](https://github.com/hashicorp/terraform/issues/4017)) - * core: Orphan resources can now be targets ([#3912](https://github.com/hashicorp/terraform/issues/3912)) - * helper/schema: Skip StateFunc when value is nil ([#4002](https://github.com/hashicorp/terraform/issues/4002)) - * provider/google: Timeout when deleting large `instance_group_manager` ([#3591](https://github.com/hashicorp/terraform/issues/3591)) - * provider/aws: Fix issue with order of Termination Policies in AutoScaling Groups. - This will introduce plans on upgrade to this version, in order to correct the ordering ([#2890](https://github.com/hashicorp/terraform/issues/2890)) - * provider/aws: Allow cluster name, not only ARN for `aws_ecs_service` ([#3668](https://github.com/hashicorp/terraform/issues/3668)) - * provider/aws: Fix a bug where a non-lower-cased `maintenance_window` can cause unnecessary planned changes ([#4020](https://github.com/hashicorp/terraform/issues/4020)) - * provider/aws: Only set `weight` on an `aws_route53_record` if it has been set in configuration ([#3900](https://github.com/hashicorp/terraform/issues/3900)) - * provider/aws: Ignore association not existing on route table destroy ([#3615](https://github.com/hashicorp/terraform/issues/3615)) - * provider/aws: Fix policy encoding issue with SNS Topics ([#3700](https://github.com/hashicorp/terraform/issues/3700)) - * provider/aws: Correctly export ARN in `aws_iam_saml_provider` ([#3827](https://github.com/hashicorp/terraform/issues/3827)) - * provider/aws: Fix issue deleting users who are attached to a group ([#4005](https://github.com/hashicorp/terraform/issues/4005)) - * provider/aws: Fix crash in Route53 Record if Zone not found ([#3945](https://github.com/hashicorp/terraform/issues/3945)) - * provider/aws: Retry deleting IAM Server Cert on dependency violation ([#3898](https://github.com/hashicorp/terraform/issues/3898)) - * provider/aws: Update Spot Instance request to provide connection information ([#3940](https://github.com/hashicorp/terraform/issues/3940)) - * provider/aws: Fix typo in error checking for IAM Policy Attachments ([#3970](https://github.com/hashicorp/terraform/issues/3970)) - * provider/aws: Fix issue with LB Cookie Stickiness and empty expiration period ([#3908](https://github.com/hashicorp/terraform/issues/3908)) - * provider/aws: Tolerate ElastiCache clusters being deleted outside Terraform ([#3767](https://github.com/hashicorp/terraform/issues/3767)) - * provider/aws: Downcase Route 53 record names in state file to match API output ([#3574](https://github.com/hashicorp/terraform/issues/3574)) - * provider/aws: Fix issue that could occur if no ECS Cluster was found for a given name ([#3829](https://github.com/hashicorp/terraform/issues/3829)) - * provider/aws: Fix issue with SNS topic policy if omitted ([#3777](https://github.com/hashicorp/terraform/issues/3777)) - * provider/aws: Support scratch volumes in `aws_ecs_task_definition` ([#3810](https://github.com/hashicorp/terraform/issues/3810)) - * provider/aws: Treat `aws_ecs_service` w/ Status==INACTIVE as deleted ([#3828](https://github.com/hashicorp/terraform/issues/3828)) - * provider/aws: Expand ~ to homedir in `aws_s3_bucket_object.source` ([#3910](https://github.com/hashicorp/terraform/issues/3910)) - * provider/aws: Fix issue with updating the `aws_ecs_task_definition` where `aws_ecs_service` didn't wait for a new computed ARN ([#3924](https://github.com/hashicorp/terraform/issues/3924)) - * provider/aws: Prevent crashing when deleting `aws_ecs_service` that is already gone ([#3914](https://github.com/hashicorp/terraform/issues/3914)) - * provider/aws: Allow spaces in `aws_db_subnet_group.name` (undocumented in the API) ([#3955](https://github.com/hashicorp/terraform/issues/3955)) - * provider/aws: Make VPC ID required on subnets ([#4021](https://github.com/hashicorp/terraform/issues/4021)) - * provider/azure: Various bug fixes ([#3695](https://github.com/hashicorp/terraform/issues/3695)) - * provider/digitalocean: Fix issue preventing SSH fingerprints from working ([#3633](https://github.com/hashicorp/terraform/issues/3633)) - * provider/digitalocean: Fix the DigitalOcean Droplet 404 potential on refresh of state ([#3768](https://github.com/hashicorp/terraform/issues/3768)) - * provider/openstack: Fix several issues causing unresolvable diffs ([#3440](https://github.com/hashicorp/terraform/issues/3440)) - * provider/openstack: Safely delete security groups ([#3696](https://github.com/hashicorp/terraform/issues/3696)) - * provider/openstack: Ignore order of `security_groups` in instance ([#3651](https://github.com/hashicorp/terraform/issues/3651)) - * provider/vsphere: Fix d.SetConnInfo error in case of a missing IP address ([#3636](https://github.com/hashicorp/terraform/issues/3636)) - * provider/openstack: Fix boot from volume ([#3206](https://github.com/hashicorp/terraform/issues/3206)) - * provider/openstack: Fix crashing when image is no longer accessible ([#2189](https://github.com/hashicorp/terraform/issues/2189)) - * provider/openstack: Better handling of network resource state changes ([#3712](https://github.com/hashicorp/terraform/issues/3712)) - * provider/openstack: Fix crashing when no security group is specified ([#3801](https://github.com/hashicorp/terraform/issues/3801)) - * provider/packet: Fix issue that could cause errors when provisioning many devices at once ([#3847](https://github.com/hashicorp/terraform/issues/3847)) - * provider/packet: Fix connection information for devices, allowing provisioners to run ([#3948](https://github.com/hashicorp/terraform/issues/3948)) - * provider/openstack: Fix issue preventing security group rules from being removed ([#3796](https://github.com/hashicorp/terraform/issues/3796)) - * provider/template: `template_file`: source contents instead of path ([#3909](https://github.com/hashicorp/terraform/issues/3909)) - -## 0.6.6 (October 23, 2015) - -FEATURES: - - * New interpolation functions: `cidrhost`, `cidrnetmask` and `cidrsubnet` ([#3127](https://github.com/hashicorp/terraform/issues/3127)) - -IMPROVEMENTS: - - * "forces new resource" now highlighted in plan output ([#3136](https://github.com/hashicorp/terraform/issues/3136)) - -BUG FIXES: - - * helper/schema: Better error message for assigning list/map to string ([#3009](https://github.com/hashicorp/terraform/issues/3009)) - * remote/state/atlas: Additional remote state conflict handling for semantically neutral state changes ([#3603](https://github.com/hashicorp/terraform/issues/3603)) - -## 0.6.5 (October 21, 2015) - -FEATURES: - - * **New resources: `aws_codeploy_app` and `aws_codeploy_deployment_group`** ([#2783](https://github.com/hashicorp/terraform/issues/2783)) - * New remote state backend: `etcd` ([#3487](https://github.com/hashicorp/terraform/issues/3487)) - * New interpolation functions: `upper` and `lower` ([#3558](https://github.com/hashicorp/terraform/issues/3558)) - -BUG FIXES: - - * core: Fix remote state conflicts caused by ambiguity in ordering of deeply nested modules ([#3573](https://github.com/hashicorp/terraform/issues/3573)) - * core: Fix remote state conflicts caused by state metadata differences ([#3569](https://github.com/hashicorp/terraform/issues/3569)) - * core: Avoid using http.DefaultClient ([#3532](https://github.com/hashicorp/terraform/issues/3532)) - -INTERNAL IMPROVEMENTS: - - * provider/digitalocean: use official Go client ([#3333](https://github.com/hashicorp/terraform/issues/3333)) - * core: extract module fetching to external library ([#3516](https://github.com/hashicorp/terraform/issues/3516)) - -## 0.6.4 (October 15, 2015) - -FEATURES: - - * **New provider: `rundeck`** ([#2412](https://github.com/hashicorp/terraform/issues/2412)) - * **New provider: `packet`** ([#2260](https://github.com/hashicorp/terraform/issues/2260)), ([#3472](https://github.com/hashicorp/terraform/issues/3472)) - * **New provider: `vsphere`**: Initial support for a VM resource ([#3419](https://github.com/hashicorp/terraform/issues/3419)) - * **New resource: `cloudstack_loadbalancer_rule`** ([#2934](https://github.com/hashicorp/terraform/issues/2934)) - * **New resource: `google_compute_project_metadata`** ([#3065](https://github.com/hashicorp/terraform/issues/3065)) - * **New resources: `aws_ami`, `aws_ami_copy`, `aws_ami_from_instance`** ([#2784](https://github.com/hashicorp/terraform/issues/2784)) - * **New resources: `aws_cloudwatch_log_group`** ([#2415](https://github.com/hashicorp/terraform/issues/2415)) - * **New resource: `google_storage_bucket_object`** ([#3192](https://github.com/hashicorp/terraform/issues/3192)) - * **New resources: `google_compute_vpn_gateway`, `google_compute_vpn_tunnel`** ([#3213](https://github.com/hashicorp/terraform/issues/3213)) - * **New resources: `google_storage_bucket_acl`, `google_storage_object_acl`** ([#3272](https://github.com/hashicorp/terraform/issues/3272)) - * **New resource: `aws_iam_saml_provider`** ([#3156](https://github.com/hashicorp/terraform/issues/3156)) - * **New resources: `aws_efs_file_system` and `aws_efs_mount_target`** ([#2196](https://github.com/hashicorp/terraform/issues/2196)) - * **New resources: `aws_opsworks_*`** ([#2162](https://github.com/hashicorp/terraform/issues/2162)) - * **New resource: `aws_elasticsearch_domain`** ([#3443](https://github.com/hashicorp/terraform/issues/3443)) - * **New resource: `aws_directory_service_directory`** ([#3228](https://github.com/hashicorp/terraform/issues/3228)) - * **New resource: `aws_autoscaling_lifecycle_hook`** ([#3351](https://github.com/hashicorp/terraform/issues/3351)) - * **New resource: `aws_placement_group`** ([#3457](https://github.com/hashicorp/terraform/issues/3457)) - * **New resource: `aws_glacier_vault`** ([#3491](https://github.com/hashicorp/terraform/issues/3491)) - * **New lifecycle flag: `ignore_changes`** ([#2525](https://github.com/hashicorp/terraform/issues/2525)) - -IMPROVEMENTS: - - * core: Add a function to find the index of an element in a list. ([#2704](https://github.com/hashicorp/terraform/issues/2704)) - * core: Print all outputs when `terraform output` is called with no arguments ([#2920](https://github.com/hashicorp/terraform/issues/2920)) - * core: In plan output summary, count resource replacement as Add/Remove instead of Change ([#3173](https://github.com/hashicorp/terraform/issues/3173)) - * core: Add interpolation functions for base64 encoding and decoding. ([#3325](https://github.com/hashicorp/terraform/issues/3325)) - * core: Expose parallelism as a CLI option instead of a hard-coding the default of 10 ([#3365](https://github.com/hashicorp/terraform/issues/3365)) - * core: Add interpolation function `compact`, to remove empty elements from a list. ([#3239](https://github.com/hashicorp/terraform/issues/3239)), ([#3479](https://github.com/hashicorp/terraform/issues/3479)) - * core: Allow filtering of log output by level, using e.g. ``TF_LOG=INFO`` ([#3380](https://github.com/hashicorp/terraform/issues/3380)) - * provider/aws: Add `instance_initiated_shutdown_behavior` to AWS Instance ([#2887](https://github.com/hashicorp/terraform/issues/2887)) - * provider/aws: Support IAM role names (previously just ARNs) in `aws_ecs_service.iam_role` ([#3061](https://github.com/hashicorp/terraform/issues/3061)) - * provider/aws: Add update method to RDS Subnet groups, can modify subnets without recreating ([#3053](https://github.com/hashicorp/terraform/issues/3053)) - * provider/aws: Paginate notifications returned for ASG Notifications ([#3043](https://github.com/hashicorp/terraform/issues/3043)) - * provider/aws: Adds additional S3 Bucket Object inputs ([#3265](https://github.com/hashicorp/terraform/issues/3265)) - * provider/aws: add `ses_smtp_password` to `aws_iam_access_key` ([#3165](https://github.com/hashicorp/terraform/issues/3165)) - * provider/aws: read `iam_instance_profile` for `aws_instance` and save to state ([#3167](https://github.com/hashicorp/terraform/issues/3167)) - * provider/aws: allow `instance` to be computed in `aws_eip` ([#3036](https://github.com/hashicorp/terraform/issues/3036)) - * provider/aws: Add `versioning` option to `aws_s3_bucket` ([#2942](https://github.com/hashicorp/terraform/issues/2942)) - * provider/aws: Add `configuration_endpoint` to `aws_elasticache_cluster` ([#3250](https://github.com/hashicorp/terraform/issues/3250)) - * provider/aws: Add validation for `app_cookie_stickiness_policy.name` ([#3277](https://github.com/hashicorp/terraform/issues/3277)) - * provider/aws: Add validation for `db_parameter_group.name` ([#3279](https://github.com/hashicorp/terraform/issues/3279)) - * provider/aws: Set DynamoDB Table ARN after creation ([#3500](https://github.com/hashicorp/terraform/issues/3500)) - * provider/aws: `aws_s3_bucket_object` allows interpolated content to be set with new `content` attribute. ([#3200](https://github.com/hashicorp/terraform/issues/3200)) - * provider/aws: Allow tags for `aws_kinesis_stream` resource. ([#3397](https://github.com/hashicorp/terraform/issues/3397)) - * provider/aws: Configurable capacity waiting duration for ASGs ([#3191](https://github.com/hashicorp/terraform/issues/3191)) - * provider/aws: Allow non-persistent Spot Requests ([#3311](https://github.com/hashicorp/terraform/issues/3311)) - * provider/aws: Support tags for AWS DB subnet group ([#3138](https://github.com/hashicorp/terraform/issues/3138)) - * provider/cloudstack: Add `project` parameter to `cloudstack_vpc`, `cloudstack_network`, `cloudstack_ipaddress` and `cloudstack_disk` ([#3035](https://github.com/hashicorp/terraform/issues/3035)) - * provider/openstack: add functionality to attach FloatingIP to Port ([#1788](https://github.com/hashicorp/terraform/issues/1788)) - * provider/google: Can now do multi-region deployments without using multiple providers ([#3258](https://github.com/hashicorp/terraform/issues/3258)) - * remote/s3: Allow canned ACLs to be set on state objects. ([#3233](https://github.com/hashicorp/terraform/issues/3233)) - * remote/s3: Remote state is stored in S3 with `Content-Type: application/json` ([#3385](https://github.com/hashicorp/terraform/issues/3385)) - -BUG FIXES: - - * core: Fix problems referencing list attributes in interpolations ([#2157](https://github.com/hashicorp/terraform/issues/2157)) - * core: don't error on computed value during input walk ([#2988](https://github.com/hashicorp/terraform/issues/2988)) - * core: Ignore missing variables during destroy phase ([#3393](https://github.com/hashicorp/terraform/issues/3393)) - * provider/google: Crashes with interface conversion in GCE Instance Template ([#3027](https://github.com/hashicorp/terraform/issues/3027)) - * provider/google: Convert int to int64 when building the GKE cluster.NodeConfig struct ([#2978](https://github.com/hashicorp/terraform/issues/2978)) - * provider/google: google_compute_instance_template.network_interface.network should be a URL ([#3226](https://github.com/hashicorp/terraform/issues/3226)) - * provider/aws: Retry creation of `aws_ecs_service` if IAM policy isn't ready yet ([#3061](https://github.com/hashicorp/terraform/issues/3061)) - * provider/aws: Fix issue with mixed capitalization for RDS Instances ([#3053](https://github.com/hashicorp/terraform/issues/3053)) - * provider/aws: Fix issue with RDS to allow major version upgrades ([#3053](https://github.com/hashicorp/terraform/issues/3053)) - * provider/aws: Fix shard_count in `aws_kinesis_stream` ([#2986](https://github.com/hashicorp/terraform/issues/2986)) - * provider/aws: Fix issue with `key_name` and using VPCs with spot instance requests ([#2954](https://github.com/hashicorp/terraform/issues/2954)) - * provider/aws: Fix unresolvable diffs coming from `aws_elasticache_cluster` names being downcased - by AWS ([#3120](https://github.com/hashicorp/terraform/issues/3120)) - * provider/aws: Read instance source_dest_check and save to state ([#3152](https://github.com/hashicorp/terraform/issues/3152)) - * provider/aws: Allow `weight = 0` in Route53 records ([#3196](https://github.com/hashicorp/terraform/issues/3196)) - * provider/aws: Normalize aws_elasticache_cluster id to lowercase, allowing convergence. ([#3235](https://github.com/hashicorp/terraform/issues/3235)) - * provider/aws: Fix ValidateAccountId for IAM Instance Profiles ([#3313](https://github.com/hashicorp/terraform/issues/3313)) - * provider/aws: Update Security Group Rules to Version 2 ([#3019](https://github.com/hashicorp/terraform/issues/3019)) - * provider/aws: Migrate KeyPair to version 1, fixing issue with using `file()` ([#3470](https://github.com/hashicorp/terraform/issues/3470)) - * provider/aws: Fix force_delete on autoscaling groups ([#3485](https://github.com/hashicorp/terraform/issues/3485)) - * provider/aws: Fix crash with VPC Peering connections ([#3490](https://github.com/hashicorp/terraform/issues/3490)) - * provider/aws: fix bug with reading GSIs from dynamodb ([#3300](https://github.com/hashicorp/terraform/issues/3300)) - * provider/docker: Fix issue preventing private images from being referenced ([#2619](https://github.com/hashicorp/terraform/issues/2619)) - * provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case ([#3284](https://github.com/hashicorp/terraform/issues/3284)) - * provider/openstack: add state 'downloading' to list of expected states in - `blockstorage_volume_v1` creation ([#2866](https://github.com/hashicorp/terraform/issues/2866)) - * provider/openstack: remove security groups (by name) before adding security - groups (by id) ([#2008](https://github.com/hashicorp/terraform/issues/2008)) - -INTERNAL IMPROVEMENTS: - - * core: Makefile target "plugin-dev" for building just one plugin. ([#3229](https://github.com/hashicorp/terraform/issues/3229)) - * helper/schema: Don't allow ``Update`` func if no attributes can actually be updated, per schema. ([#3288](https://github.com/hashicorp/terraform/issues/3288)) - * helper/schema: Default hashing function for sets ([#3018](https://github.com/hashicorp/terraform/issues/3018)) - * helper/multierror: Remove in favor of [github.com/hashicorp/go-multierror](http://github.com/hashicorp/go-multierror). ([#3336](https://github.com/hashicorp/terraform/issues/3336)) - -## 0.6.3 (August 11, 2015) - -BUG FIXES: - - * core: Skip all descendents after error, not just children; helps prevent confusing - additional errors/crashes after initial failure ([#2963](https://github.com/hashicorp/terraform/issues/2963)) - * core: fix deadlock possibility when both a module and a dependent resource are - removed in the same run ([#2968](https://github.com/hashicorp/terraform/issues/2968)) - * provider/aws: Fix issue with authenticating when using IAM profiles ([#2959](https://github.com/hashicorp/terraform/issues/2959)) - -## 0.6.2 (August 6, 2015) - -FEATURES: - - * **New resource: `google_compute_instance_group_manager`** ([#2868](https://github.com/hashicorp/terraform/issues/2868)) - * **New resource: `google_compute_autoscaler`** ([#2868](https://github.com/hashicorp/terraform/issues/2868)) - * **New resource: `aws_s3_bucket_object`** ([#2898](https://github.com/hashicorp/terraform/issues/2898)) - -IMPROVEMENTS: - - * core: Add resource IDs to errors coming from `apply`/`refresh` ([#2815](https://github.com/hashicorp/terraform/issues/2815)) - * provider/aws: Validate credentials before walking the graph ([#2730](https://github.com/hashicorp/terraform/issues/2730)) - * provider/aws: Added website_domain for S3 buckets ([#2210](https://github.com/hashicorp/terraform/issues/2210)) - * provider/aws: ELB names are now optional, and generated by Terraform if omitted ([#2571](https://github.com/hashicorp/terraform/issues/2571)) - * provider/aws: Downcase RDS engine names to prevent continuous diffs ([#2745](https://github.com/hashicorp/terraform/issues/2745)) - * provider/aws: Added `source_dest_check` attribute to the aws_network_interface ([#2741](https://github.com/hashicorp/terraform/issues/2741)) - * provider/aws: Clean up externally removed Launch Configurations ([#2806](https://github.com/hashicorp/terraform/issues/2806)) - * provider/aws: Allow configuration of the DynamoDB Endpoint ([#2825](https://github.com/hashicorp/terraform/issues/2825)) - * provider/aws: Compute private ip addresses of ENIs if they are not specified ([#2743](https://github.com/hashicorp/terraform/issues/2743)) - * provider/aws: Add `arn` attribute for DynamoDB tables ([#2924](https://github.com/hashicorp/terraform/issues/2924)) - * provider/aws: Fail silently when account validation fails while from instance profile ([#3001](https://github.com/hashicorp/terraform/issues/3001)) - * provider/azure: Allow `settings_file` to accept XML string ([#2922](https://github.com/hashicorp/terraform/issues/2922)) - * provider/azure: Provide a simpler error when using a Platform Image without a - Storage Service ([#2861](https://github.com/hashicorp/terraform/issues/2861)) - * provider/google: `account_file` is now expected to be JSON. Paths are still supported for - backwards compatibility. ([#2839](https://github.com/hashicorp/terraform/issues/2839)) - -BUG FIXES: - - * core: Prevent error duplication in `apply` ([#2815](https://github.com/hashicorp/terraform/issues/2815)) - * core: Fix crash when a provider validation adds a warning ([#2878](https://github.com/hashicorp/terraform/issues/2878)) - * provider/aws: Fix issue with toggling monitoring in AWS Instances ([#2794](https://github.com/hashicorp/terraform/issues/2794)) - * provider/aws: Fix issue with Spot Instance Requests and cancellation ([#2805](https://github.com/hashicorp/terraform/issues/2805)) - * provider/aws: Fix issue with checking for ElastiCache cluster cache node status ([#2842](https://github.com/hashicorp/terraform/issues/2842)) - * provider/aws: Fix issue when unable to find a Root Block Device name of an Instance Backed - AMI ([#2646](https://github.com/hashicorp/terraform/issues/2646)) - * provider/dnsimple: Domain and type should force new records ([#2777](https://github.com/hashicorp/terraform/issues/2777)) - * provider/aws: Fix issue with IAM Server Certificates and Chains ([#2871](https://github.com/hashicorp/terraform/issues/2871)) - * provider/aws: Fix issue with IAM Server Certificates when using `path` ([#2871](https://github.com/hashicorp/terraform/issues/2871)) - * provider/aws: Fix issue in Security Group Rules when the Security Group is not found ([#2897](https://github.com/hashicorp/terraform/issues/2897)) - * provider/aws: allow external ENI attachments ([#2943](https://github.com/hashicorp/terraform/issues/2943)) - * provider/aws: Fix issue with S3 Buckets, and throwing an error when not found ([#2925](https://github.com/hashicorp/terraform/issues/2925)) - -## 0.6.1 (July 20, 2015) - -FEATURES: - - * **New resource: `google_container_cluster`** ([#2357](https://github.com/hashicorp/terraform/issues/2357)) - * **New resource: `aws_vpc_endpoint`** ([#2695](https://github.com/hashicorp/terraform/issues/2695)) - -IMPROVEMENTS: - - * connection/ssh: Print SSH bastion host details to output ([#2684](https://github.com/hashicorp/terraform/issues/2684)) - * provider/aws: Create RDS databases from snapshots ([#2062](https://github.com/hashicorp/terraform/issues/2062)) - * provider/aws: Add support for restoring from Redis backup stored in S3 ([#2634](https://github.com/hashicorp/terraform/issues/2634)) - * provider/aws: Add `maintenance_window` to ElastiCache cluster ([#2642](https://github.com/hashicorp/terraform/issues/2642)) - * provider/aws: Availability Zones are optional when specifying VPC Zone Identifiers in - Auto Scaling Groups updates ([#2724](https://github.com/hashicorp/terraform/issues/2724)) - * provider/google: Add metadata_startup_script to google_compute_instance ([#2375](https://github.com/hashicorp/terraform/issues/2375)) - -BUG FIXES: - - * core: Don't prompt for variables with defaults ([#2613](https://github.com/hashicorp/terraform/issues/2613)) - * core: Return correct number of planned updates ([#2620](https://github.com/hashicorp/terraform/issues/2620)) - * core: Fix "provider not found" error that can occur while running - a destroy plan with grandchildren modules ([#2755](https://github.com/hashicorp/terraform/issues/2755)) - * core: Fix UUID showing up in diff for computed splat (`foo.*.bar`) - variables. ([#2788](https://github.com/hashicorp/terraform/issues/2788)) - * core: Orphan modules that contain no resources (only other modules) - are properly destroyed up to arbitrary depth ([#2786](https://github.com/hashicorp/terraform/issues/2786)) - * core: Fix "attribute not available" during destroy plans in - cases where the parameter is passed between modules ([#2775](https://github.com/hashicorp/terraform/issues/2775)) - * core: Record schema version when destroy fails ([#2923](https://github.com/hashicorp/terraform/issues/2923)) - * connection/ssh: fix issue on machines with an SSH Agent available - preventing `key_file` from being read without explicitly - setting `agent = false` ([#2615](https://github.com/hashicorp/terraform/issues/2615)) - * provider/aws: Allow uppercase characters in `aws_elb.name` ([#2580](https://github.com/hashicorp/terraform/issues/2580)) - * provider/aws: Allow underscores in `aws_db_subnet_group.name` (undocumented by AWS) ([#2604](https://github.com/hashicorp/terraform/issues/2604)) - * provider/aws: Allow dots in `aws_db_subnet_group.name` (undocumented by AWS) ([#2665](https://github.com/hashicorp/terraform/issues/2665)) - * provider/aws: Fix issue with pending Spot Instance requests ([#2640](https://github.com/hashicorp/terraform/issues/2640)) - * provider/aws: Fix issue in AWS Classic environment with referencing external - Security Groups ([#2644](https://github.com/hashicorp/terraform/issues/2644)) - * provider/aws: Bump internet gateway detach timeout ([#2669](https://github.com/hashicorp/terraform/issues/2669)) - * provider/aws: Fix issue with detecting differences in DB Parameters ([#2728](https://github.com/hashicorp/terraform/issues/2728)) - * provider/aws: `ecs_cluster` rename (recreation) and deletion is handled correctly ([#2698](https://github.com/hashicorp/terraform/issues/2698)) - * provider/aws: `aws_route_table` ignores routes generated for VPC endpoints ([#2695](https://github.com/hashicorp/terraform/issues/2695)) - * provider/aws: Fix issue with Launch Configurations and enable_monitoring ([#2735](https://github.com/hashicorp/terraform/issues/2735)) - * provider/openstack: allow empty api_key and endpoint_type ([#2626](https://github.com/hashicorp/terraform/issues/2626)) - * provisioner/chef: Fix permission denied error with ohai hints ([#2781](https://github.com/hashicorp/terraform/issues/2781)) - -## 0.6.0 (June 30, 2015) - -BACKWARDS INCOMPATIBILITIES: - - * command/push: If a variable is already set within Atlas, it won't be - updated unless the `-overwrite` flag is present ([#2373](https://github.com/hashicorp/terraform/issues/2373)) - * connection/ssh: The `agent` field now defaults to `true` if - the `SSH_AGENT_SOCK` environment variable is present. In other words, - `ssh-agent` support is now opt-out instead of opt-in functionality. ([#2408](https://github.com/hashicorp/terraform/issues/2408)) - * provider/aws: If you were setting access and secret key to blank ("") - to force Terraform to load credentials from another source such as the - EC2 role, this will now error. Remove the blank lines and Terraform - will load from other sources. - * `concat()` has been repurposed to combine lists instead of strings (old behavior - of joining strings is maintained in this version but is deprecated, strings - should be combined using interpolation syntax, like "${var.foo}{var.bar}") - ([#1790](https://github.com/hashicorp/terraform/issues/1790)) - -FEATURES: - - * **New provider: `azure`** ([#2052](https://github.com/hashicorp/terraform/issues/2052), [#2053](https://github.com/hashicorp/terraform/issues/2053), [#2372](https://github.com/hashicorp/terraform/issues/2372), [#2380](https://github.com/hashicorp/terraform/issues/2380), [#2394](https://github.com/hashicorp/terraform/issues/2394), [#2515](https://github.com/hashicorp/terraform/issues/2515), [#2530](https://github.com/hashicorp/terraform/issues/2530), [#2562](https://github.com/hashicorp/terraform/issues/2562)) - * **New resource: `aws_autoscaling_notification`** ([#2197](https://github.com/hashicorp/terraform/issues/2197)) - * **New resource: `aws_autoscaling_policy`** ([#2201](https://github.com/hashicorp/terraform/issues/2201)) - * **New resource: `aws_cloudwatch_metric_alarm`** ([#2201](https://github.com/hashicorp/terraform/issues/2201)) - * **New resource: `aws_dynamodb_table`** ([#2121](https://github.com/hashicorp/terraform/issues/2121)) - * **New resource: `aws_ecs_cluster`** ([#1803](https://github.com/hashicorp/terraform/issues/1803)) - * **New resource: `aws_ecs_service`** ([#1803](https://github.com/hashicorp/terraform/issues/1803)) - * **New resource: `aws_ecs_task_definition`** ([#1803](https://github.com/hashicorp/terraform/issues/1803), [#2402](https://github.com/hashicorp/terraform/issues/2402)) - * **New resource: `aws_elasticache_parameter_group`** ([#2276](https://github.com/hashicorp/terraform/issues/2276)) - * **New resource: `aws_flow_log`** ([#2384](https://github.com/hashicorp/terraform/issues/2384)) - * **New resource: `aws_iam_group_association`** ([#2273](https://github.com/hashicorp/terraform/issues/2273)) - * **New resource: `aws_iam_policy_attachment`** ([#2395](https://github.com/hashicorp/terraform/issues/2395)) - * **New resource: `aws_lambda_function`** ([#2170](https://github.com/hashicorp/terraform/issues/2170)) - * **New resource: `aws_route53_delegation_set`** ([#1999](https://github.com/hashicorp/terraform/issues/1999)) - * **New resource: `aws_route53_health_check`** ([#2226](https://github.com/hashicorp/terraform/issues/2226)) - * **New resource: `aws_spot_instance_request`** ([#2263](https://github.com/hashicorp/terraform/issues/2263)) - * **New resource: `cloudstack_ssh_keypair`** ([#2004](https://github.com/hashicorp/terraform/issues/2004)) - * **New remote state backend: `swift`**: You can now store remote state in - a OpenStack Swift. ([#2254](https://github.com/hashicorp/terraform/issues/2254)) - * command/output: support display of module outputs ([#2102](https://github.com/hashicorp/terraform/issues/2102)) - * core: `keys()` and `values()` funcs for map variables ([#2198](https://github.com/hashicorp/terraform/issues/2198)) - * connection/ssh: SSH bastion host support and ssh-agent forwarding ([#2425](https://github.com/hashicorp/terraform/issues/2425)) - -IMPROVEMENTS: - - * core: HTTP remote state now accepts `skip_cert_verification` - option to ignore TLS cert verification. ([#2214](https://github.com/hashicorp/terraform/issues/2214)) - * core: S3 remote state now accepts the 'encrypt' option for SSE ([#2405](https://github.com/hashicorp/terraform/issues/2405)) - * core: `plan` now reports sum of resources to be changed/created/destroyed ([#2458](https://github.com/hashicorp/terraform/issues/2458)) - * core: Change string list representation so we can distinguish empty, single - element lists ([#2504](https://github.com/hashicorp/terraform/issues/2504)) - * core: Properly close provider and provisioner plugin connections ([#2406](https://github.com/hashicorp/terraform/issues/2406), [#2527](https://github.com/hashicorp/terraform/issues/2527)) - * provider/aws: AutoScaling groups now support updating Load Balancers without - recreation ([#2472](https://github.com/hashicorp/terraform/issues/2472)) - * provider/aws: Allow more in-place updates for ElastiCache cluster without recreating - ([#2469](https://github.com/hashicorp/terraform/issues/2469)) - * provider/aws: ElastiCache Subnet Groups can be updated - without destroying first ([#2191](https://github.com/hashicorp/terraform/issues/2191)) - * provider/aws: Normalize `certificate_chain` in `aws_iam_server_certificate` to - prevent unnecessary replacement. ([#2411](https://github.com/hashicorp/terraform/issues/2411)) - * provider/aws: `aws_instance` supports `monitoring' ([#2489](https://github.com/hashicorp/terraform/issues/2489)) - * provider/aws: `aws_launch_configuration` now supports `enable_monitoring` ([#2410](https://github.com/hashicorp/terraform/issues/2410)) - * provider/aws: Show outputs after `terraform refresh` ([#2347](https://github.com/hashicorp/terraform/issues/2347)) - * provider/aws: Add backoff/throttling during DynamoDB creation ([#2462](https://github.com/hashicorp/terraform/issues/2462)) - * provider/aws: Add validation for aws_vpc.cidr_block ([#2514](https://github.com/hashicorp/terraform/issues/2514)) - * provider/aws: Add validation for aws_db_subnet_group.name ([#2513](https://github.com/hashicorp/terraform/issues/2513)) - * provider/aws: Add validation for aws_db_instance.identifier ([#2516](https://github.com/hashicorp/terraform/issues/2516)) - * provider/aws: Add validation for aws_elb.name ([#2517](https://github.com/hashicorp/terraform/issues/2517)) - * provider/aws: Add validation for aws_security_group (name+description) ([#2518](https://github.com/hashicorp/terraform/issues/2518)) - * provider/aws: Add validation for aws_launch_configuration ([#2519](https://github.com/hashicorp/terraform/issues/2519)) - * provider/aws: Add validation for aws_autoscaling_group.name ([#2520](https://github.com/hashicorp/terraform/issues/2520)) - * provider/aws: Add validation for aws_iam_role.name ([#2521](https://github.com/hashicorp/terraform/issues/2521)) - * provider/aws: Add validation for aws_iam_role_policy.name ([#2552](https://github.com/hashicorp/terraform/issues/2552)) - * provider/aws: Add validation for aws_iam_instance_profile.name ([#2553](https://github.com/hashicorp/terraform/issues/2553)) - * provider/aws: aws_auto_scaling_group.default_cooldown no longer requires - resource replacement ([#2510](https://github.com/hashicorp/terraform/issues/2510)) - * provider/aws: add AH and ESP protocol integers ([#2321](https://github.com/hashicorp/terraform/issues/2321)) - * provider/docker: `docker_container` has the `privileged` - option. ([#2227](https://github.com/hashicorp/terraform/issues/2227)) - * provider/openstack: allow `OS_AUTH_TOKEN` environment variable - to set the openstack `api_key` field ([#2234](https://github.com/hashicorp/terraform/issues/2234)) - * provider/openstack: Can now configure endpoint type (public, admin, - internal) ([#2262](https://github.com/hashicorp/terraform/issues/2262)) - * provider/cloudstack: `cloudstack_instance` now supports projects ([#2115](https://github.com/hashicorp/terraform/issues/2115)) - * provisioner/chef: Added a `os_type` to specifically specify the target OS ([#2483](https://github.com/hashicorp/terraform/issues/2483)) - * provisioner/chef: Added a `ohai_hints` option to upload hint files ([#2487](https://github.com/hashicorp/terraform/issues/2487)) - -BUG FIXES: - - * core: lifecycle `prevent_destroy` can be any value that can be - coerced into a bool ([#2268](https://github.com/hashicorp/terraform/issues/2268)) - * core: matching provider types in sibling modules won't override - each other's config. ([#2464](https://github.com/hashicorp/terraform/issues/2464)) - * core: computed provider configurations now properly validate ([#2457](https://github.com/hashicorp/terraform/issues/2457)) - * core: orphan (commented out) resource dependencies are destroyed in - the correct order ([#2453](https://github.com/hashicorp/terraform/issues/2453)) - * core: validate object types in plugins are actually objects ([#2450](https://github.com/hashicorp/terraform/issues/2450)) - * core: fix `-no-color` flag in subcommands ([#2414](https://github.com/hashicorp/terraform/issues/2414)) - * core: Fix error of 'attribute not found for variable' when a computed - resource attribute is used as a parameter to a module ([#2477](https://github.com/hashicorp/terraform/issues/2477)) - * core: moduled orphans will properly inherit provider configs ([#2476](https://github.com/hashicorp/terraform/issues/2476)) - * core: modules with provider aliases work properly if the parent - doesn't implement those aliases ([#2475](https://github.com/hashicorp/terraform/issues/2475)) - * core: unknown resource attributes passed in as parameters to modules - now error ([#2478](https://github.com/hashicorp/terraform/issues/2478)) - * core: better error messages for missing variables ([#2479](https://github.com/hashicorp/terraform/issues/2479)) - * core: removed set items now properly appear in diffs and applies ([#2507](https://github.com/hashicorp/terraform/issues/2507)) - * core: '*' will not be added as part of the variable name when you - attempt multiplication without a space ([#2505](https://github.com/hashicorp/terraform/issues/2505)) - * core: fix target dependency calculation across module boundaries ([#2555](https://github.com/hashicorp/terraform/issues/2555)) - * command/*: fixed bug where variable input was not asked for unset - vars if terraform.tfvars existed ([#2502](https://github.com/hashicorp/terraform/issues/2502)) - * command/apply: prevent output duplication when reporting errors ([#2267](https://github.com/hashicorp/terraform/issues/2267)) - * command/apply: destroyed orphan resources are properly counted ([#2506](https://github.com/hashicorp/terraform/issues/2506)) - * provider/aws: loading credentials from the environment (vars, EC2 role, - etc.) is more robust and will not ask for credentials from stdin ([#1841](https://github.com/hashicorp/terraform/issues/1841)) - * provider/aws: fix panic when route has no `cidr_block` ([#2215](https://github.com/hashicorp/terraform/issues/2215)) - * provider/aws: fix issue preventing destruction of IAM Roles ([#2177](https://github.com/hashicorp/terraform/issues/2177)) - * provider/aws: fix issue where Security Group Rules could collide and fail - to save to the state file correctly ([#2376](https://github.com/hashicorp/terraform/issues/2376)) - * provider/aws: fix issue preventing destruction self referencing Securtity - Group Rules ([#2305](https://github.com/hashicorp/terraform/issues/2305)) - * provider/aws: fix issue causing perpetual diff on ELB listeners - when non-lowercase protocol strings were used ([#2246](https://github.com/hashicorp/terraform/issues/2246)) - * provider/aws: corrected frankfurt S3 website region ([#2259](https://github.com/hashicorp/terraform/issues/2259)) - * provider/aws: `aws_elasticache_cluster` port is required ([#2160](https://github.com/hashicorp/terraform/issues/2160)) - * provider/aws: Handle AMIs where RootBlockDevice does not appear in the - BlockDeviceMapping, preventing root_block_device from working ([#2271](https://github.com/hashicorp/terraform/issues/2271)) - * provider/aws: fix `terraform show` with remote state ([#2371](https://github.com/hashicorp/terraform/issues/2371)) - * provider/aws: detect `instance_type` drift on `aws_instance` ([#2374](https://github.com/hashicorp/terraform/issues/2374)) - * provider/aws: fix crash when `security_group_rule` referenced non-existent - security group ([#2434](https://github.com/hashicorp/terraform/issues/2434)) - * provider/aws: `aws_launch_configuration` retries if IAM instance - profile is not ready yet. ([#2452](https://github.com/hashicorp/terraform/issues/2452)) - * provider/aws: `fqdn` is populated during creation for `aws_route53_record` ([#2528](https://github.com/hashicorp/terraform/issues/2528)) - * provider/aws: retry VPC delete on DependencyViolation due to eventual - consistency ([#2532](https://github.com/hashicorp/terraform/issues/2532)) - * provider/aws: VPC peering connections in "failed" state are deleted ([#2544](https://github.com/hashicorp/terraform/issues/2544)) - * provider/aws: EIP deletion works if it was manually disassociated ([#2543](https://github.com/hashicorp/terraform/issues/2543)) - * provider/aws: `elasticache_subnet_group.subnet_ids` is now a required argument ([#2534](https://github.com/hashicorp/terraform/issues/2534)) - * provider/aws: handle nil response from VPN connection describes ([#2533](https://github.com/hashicorp/terraform/issues/2533)) - * provider/cloudflare: manual record deletion doesn't cause error ([#2545](https://github.com/hashicorp/terraform/issues/2545)) - * provider/digitalocean: handle case where droplet is deleted outside of - terraform ([#2497](https://github.com/hashicorp/terraform/issues/2497)) - * provider/dme: No longer an error if record deleted manually ([#2546](https://github.com/hashicorp/terraform/issues/2546)) - * provider/docker: Fix issues when using containers with links ([#2327](https://github.com/hashicorp/terraform/issues/2327)) - * provider/openstack: fix panic case if API returns nil network ([#2448](https://github.com/hashicorp/terraform/issues/2448)) - * provider/template: fix issue causing "unknown variable" rendering errors - when an existing set of template variables is changed ([#2386](https://github.com/hashicorp/terraform/issues/2386)) - * provisioner/chef: improve the decoding logic to prevent parameter not found errors ([#2206](https://github.com/hashicorp/terraform/issues/2206)) - -## 0.5.3 (June 1, 2015) - -IMPROVEMENTS: - - * **New resource: `aws_kinesis_stream`** ([#2110](https://github.com/hashicorp/terraform/issues/2110)) - * **New resource: `aws_iam_server_certificate`** ([#2086](https://github.com/hashicorp/terraform/issues/2086)) - * **New resource: `aws_sqs_queue`** ([#1939](https://github.com/hashicorp/terraform/issues/1939)) - * **New resource: `aws_sns_topic`** ([#1974](https://github.com/hashicorp/terraform/issues/1974)) - * **New resource: `aws_sns_topic_subscription`** ([#1974](https://github.com/hashicorp/terraform/issues/1974)) - * **New resource: `aws_volume_attachment`** ([#2050](https://github.com/hashicorp/terraform/issues/2050)) - * **New resource: `google_storage_bucket`** ([#2060](https://github.com/hashicorp/terraform/issues/2060)) - * provider/aws: support ec2 termination protection ([#1988](https://github.com/hashicorp/terraform/issues/1988)) - * provider/aws: support for RDS Read Replicas ([#1946](https://github.com/hashicorp/terraform/issues/1946)) - * provider/aws: `aws_s3_bucket` add support for `policy` ([#1992](https://github.com/hashicorp/terraform/issues/1992)) - * provider/aws: `aws_ebs_volume` add support for `tags` ([#2135](https://github.com/hashicorp/terraform/issues/2135)) - * provider/aws: `aws_elasticache_cluster` Confirm node status before reporting - available - * provider/aws: `aws_network_acl` Add support for ICMP Protocol ([#2148](https://github.com/hashicorp/terraform/issues/2148)) - * provider/aws: New `force_destroy` parameter for S3 buckets, to destroy - Buckets that contain objects ([#2007](https://github.com/hashicorp/terraform/issues/2007)) - * provider/aws: switching `health_check_type` on ASGs no longer requires - resource refresh ([#2147](https://github.com/hashicorp/terraform/issues/2147)) - * provider/aws: ignore empty `vpc_security_group_ids` on `aws_instance` ([#2311](https://github.com/hashicorp/terraform/issues/2311)) - -BUG FIXES: - - * provider/aws: Correctly handle AWS keypairs which no longer exist ([#2032](https://github.com/hashicorp/terraform/issues/2032)) - * provider/aws: Fix issue with restoring an Instance from snapshot ID ([#2120](https://github.com/hashicorp/terraform/issues/2120)) - * provider/template: store relative path in the state ([#2038](https://github.com/hashicorp/terraform/issues/2038)) - * provisioner/chef: fix interpolation in the Chef provisioner ([#2168](https://github.com/hashicorp/terraform/issues/2168)) - * provisioner/remote-exec: Don't prepend shebang on scripts that already - have one ([#2041](https://github.com/hashicorp/terraform/issues/2041)) - -## 0.5.2 (May 15, 2015) - -FEATURES: - - * **Chef provisioning**: You can now provision new hosts (both Linux and - Windows) with [Chef](https://chef.io) using a native provisioner ([#1868](https://github.com/hashicorp/terraform/issues/1868)) - -IMPROVEMENTS: - - * **New config function: `formatlist`** - Format lists in a similar way to `format`. - Useful for creating URLs from a list of IPs. ([#1829](https://github.com/hashicorp/terraform/issues/1829)) - * **New resource: `aws_route53_zone_association`** - * provider/aws: `aws_autoscaling_group` can wait for capacity in ELB - via `min_elb_capacity` ([#1970](https://github.com/hashicorp/terraform/issues/1970)) - * provider/aws: `aws_db_instances` supports `license_model` ([#1966](https://github.com/hashicorp/terraform/issues/1966)) - * provider/aws: `aws_elasticache_cluster` add support for Tags ([#1965](https://github.com/hashicorp/terraform/issues/1965)) - * provider/aws: `aws_network_acl` Network ACLs can be applied to multiple subnets ([#1931](https://github.com/hashicorp/terraform/issues/1931)) - * provider/aws: `aws_s3_bucket` exports `hosted_zone_id` and `region` ([#1865](https://github.com/hashicorp/terraform/issues/1865)) - * provider/aws: `aws_s3_bucket` add support for website `redirect_all_requests_to` ([#1909](https://github.com/hashicorp/terraform/issues/1909)) - * provider/aws: `aws_route53_record` exports `fqdn` ([#1847](https://github.com/hashicorp/terraform/issues/1847)) - * provider/aws: `aws_route53_zone` can create private hosted zones ([#1526](https://github.com/hashicorp/terraform/issues/1526)) - * provider/google: `google_compute_instance` `scratch` attribute added ([#1920](https://github.com/hashicorp/terraform/issues/1920)) - -BUG FIXES: - - * core: fix "resource not found" for interpolation issues with modules - * core: fix unflattenable error for orphans ([#1922](https://github.com/hashicorp/terraform/issues/1922)) - * core: fix deadlock with create-before-destroy + modules ([#1949](https://github.com/hashicorp/terraform/issues/1949)) - * core: fix "no roots found" error with create-before-destroy ([#1953](https://github.com/hashicorp/terraform/issues/1953)) - * core: variables set with environment variables won't validate as - not set without a default ([#1930](https://github.com/hashicorp/terraform/issues/1930)) - * core: resources with a blank ID in the state are now assumed to not exist ([#1905](https://github.com/hashicorp/terraform/issues/1905)) - * command/push: local vars override remote ones ([#1881](https://github.com/hashicorp/terraform/issues/1881)) - * provider/aws: Mark `aws_security_group` description as `ForceNew` ([#1871](https://github.com/hashicorp/terraform/issues/1871)) - * provider/aws: `aws_db_instance` ARN value is correct ([#1910](https://github.com/hashicorp/terraform/issues/1910)) - * provider/aws: `aws_db_instance` only submit modify request if there - is a change. ([#1906](https://github.com/hashicorp/terraform/issues/1906)) - * provider/aws: `aws_elasticache_cluster` export missing information on cluster nodes ([#1965](https://github.com/hashicorp/terraform/issues/1965)) - * provider/aws: bad AMI on a launch configuration won't block refresh ([#1901](https://github.com/hashicorp/terraform/issues/1901)) - * provider/aws: `aws_security_group` + `aws_subnet` - destroy timeout increased - to prevent DependencyViolation errors. ([#1886](https://github.com/hashicorp/terraform/issues/1886)) - * provider/google: `google_compute_instance` Local SSDs no-longer cause crash - ([#1088](https://github.com/hashicorp/terraform/issues/1088)) - * provider/google: `google_http_health_check` Defaults now driven from Terraform, - avoids errors on update ([#1894](https://github.com/hashicorp/terraform/issues/1894)) - * provider/google: `google_compute_template` Update Instance Template network - definition to match changes to Instance ([#980](https://github.com/hashicorp/terraform/issues/980)) - * provider/template: Fix infinite diff ([#1898](https://github.com/hashicorp/terraform/issues/1898)) - -## 0.5.1 (never released) - -This version was never released since we accidentally skipped it! - -## 0.5.0 (May 7, 2015) - -BACKWARDS INCOMPATIBILITIES: - - * provider/aws: Terraform now remove the default egress rule created by AWS in - a new security group. - -FEATURES: - - * **Multi-provider (a.k.a multi-region)**: Multiple instances of a single - provider can be configured so resources can apply to different settings. - As an example, this allows Terraform to manage multiple regions with AWS. - * **Environmental variables to set variables**: Environment variables can be - used to set variables. The environment variables must be in the format - `TF_VAR_name` and this will be checked last for a value. - * **New remote state backend: `s3`**: You can now store remote state in - an S3 bucket. ([#1723](https://github.com/hashicorp/terraform/issues/1723)) - * **Automatic AWS retries**: This release includes a lot of improvement - around automatic retries of transient errors in AWS. The number of - retry attempts is also configurable. - * **Templates**: A new `template_file` resource allows long strings needing - variable interpolation to be moved into files. ([#1778](https://github.com/hashicorp/terraform/issues/1778)) - * **Provision with WinRM**: Provisioners can now run remote commands on - Windows hosts. ([#1483](https://github.com/hashicorp/terraform/issues/1483)) - -IMPROVEMENTS: - - * **New config function: `length`** - Get the length of a string or a list. - Useful in conjunction with `split`. ([#1495](https://github.com/hashicorp/terraform/issues/1495)) - * **New resource: `aws_app_cookie_stickiness_policy`** - * **New resource: `aws_customer_gateway`** - * **New resource: `aws_ebs_volume`** - * **New resource: `aws_elasticache_cluster`** - * **New resource: `aws_elasticache_security_group`** - * **New resource: `aws_elasticache_subnet_group`** - * **New resource: `aws_iam_access_key`** - * **New resource: `aws_iam_group_policy`** - * **New resource: `aws_iam_group`** - * **New resource: `aws_iam_instance_profile`** - * **New resource: `aws_iam_policy`** - * **New resource: `aws_iam_role_policy`** - * **New resource: `aws_iam_role`** - * **New resource: `aws_iam_user_policy`** - * **New resource: `aws_iam_user`** - * **New resource: `aws_lb_cookie_stickiness_policy`** - * **New resource: `aws_proxy_protocol_policy`** - * **New resource: `aws_security_group_rule`** - * **New resource: `aws_vpc_dhcp_options_association`** - * **New resource: `aws_vpc_dhcp_options`** - * **New resource: `aws_vpn_connection_route`** - * **New resource: `google_dns_managed_zone`** - * **New resource: `google_dns_record_set`** - * **Migrate to upstream AWS SDK:** Migrate the AWS provider to - [awslabs/aws-sdk-go](https://github.com/awslabs/aws-sdk-go), - the official `awslabs` library. Previously we had forked the library for - stability while `awslabs` refactored. Now that work has completed, and we've - migrated back to the upstream version. - * core: Improve error message on diff mismatch ([#1501](https://github.com/hashicorp/terraform/issues/1501)) - * provisioner/file: expand `~` in source path ([#1569](https://github.com/hashicorp/terraform/issues/1569)) - * provider/aws: Better retry logic, now retries up to 11 times by default - with exponentional backoff. This number is configurable. ([#1787](https://github.com/hashicorp/terraform/issues/1787)) - * provider/aws: Improved credential detection ([#1470](https://github.com/hashicorp/terraform/issues/1470)) - * provider/aws: Can specify a `token` via the config file ([#1601](https://github.com/hashicorp/terraform/issues/1601)) - * provider/aws: Added new `vpc_security_group_ids` attribute for AWS - Instances. If using a VPC, you can now modify the security groups for that - Instance without destroying it ([#1539](https://github.com/hashicorp/terraform/issues/1539)) - * provider/aws: White or blacklist account IDs that can be used to - protect against accidents. ([#1595](https://github.com/hashicorp/terraform/issues/1595)) - * provider/aws: Add a subset of IAM resources ([#939](https://github.com/hashicorp/terraform/issues/939)) - * provider/aws: `aws_autoscaling_group` retries deletes through "in progress" - errors ([#1840](https://github.com/hashicorp/terraform/issues/1840)) - * provider/aws: `aws_autoscaling_group` waits for healthy capacity during - ASG creation ([#1839](https://github.com/hashicorp/terraform/issues/1839)) - * provider/aws: `aws_instance` supports placement groups ([#1358](https://github.com/hashicorp/terraform/issues/1358)) - * provider/aws: `aws_eip` supports network interface attachment ([#1681](https://github.com/hashicorp/terraform/issues/1681)) - * provider/aws: `aws_elb` supports in-place changing of listeners ([#1619](https://github.com/hashicorp/terraform/issues/1619)) - * provider/aws: `aws_elb` supports connection draining settings ([#1502](https://github.com/hashicorp/terraform/issues/1502)) - * provider/aws: `aws_elb` increase default idle timeout to 60s ([#1646](https://github.com/hashicorp/terraform/issues/1646)) - * provider/aws: `aws_key_pair` name can be omitted and generated ([#1751](https://github.com/hashicorp/terraform/issues/1751)) - * provider/aws: `aws_network_acl` improved validation for network ACL ports - and protocols ([#1798](https://github.com/hashicorp/terraform/issues/1798)) ([#1808](https://github.com/hashicorp/terraform/issues/1808)) - * provider/aws: `aws_route_table` can target network interfaces ([#968](https://github.com/hashicorp/terraform/issues/968)) - * provider/aws: `aws_route_table` can specify propagating VGWs ([#1516](https://github.com/hashicorp/terraform/issues/1516)) - * provider/aws: `aws_route53_record` supports weighted sets ([#1578](https://github.com/hashicorp/terraform/issues/1578)) - * provider/aws: `aws_route53_zone` exports nameservers ([#1525](https://github.com/hashicorp/terraform/issues/1525)) - * provider/aws: `aws_s3_bucket` website support ([#1738](https://github.com/hashicorp/terraform/issues/1738)) - * provider/aws: `aws_security_group` name becomes optional and can be - automatically set to a unique identifier; this helps with - `create_before_destroy` scenarios ([#1632](https://github.com/hashicorp/terraform/issues/1632)) - * provider/aws: `aws_security_group` description becomes optional with a - static default value ([#1632](https://github.com/hashicorp/terraform/issues/1632)) - * provider/aws: automatically set the private IP as the SSH address - if not specified and no public IP is available ([#1623](https://github.com/hashicorp/terraform/issues/1623)) - * provider/aws: `aws_elb` exports `source_security_group` field ([#1708](https://github.com/hashicorp/terraform/issues/1708)) - * provider/aws: `aws_route53_record` supports alias targeting ([#1775](https://github.com/hashicorp/terraform/issues/1775)) - * provider/aws: Remove default AWS egress rule for newly created Security Groups ([#1765](https://github.com/hashicorp/terraform/issues/1765)) - * provider/consul: add `scheme` configuration argument ([#1838](https://github.com/hashicorp/terraform/issues/1838)) - * provider/docker: `docker_container` can specify links ([#1564](https://github.com/hashicorp/terraform/issues/1564)) - * provider/google: `resource_compute_disk` supports snapshots ([#1426](https://github.com/hashicorp/terraform/issues/1426)) - * provider/google: `resource_compute_instance` supports specifying the - device name ([#1426](https://github.com/hashicorp/terraform/issues/1426)) - * provider/openstack: Floating IP support for LBaaS ([#1550](https://github.com/hashicorp/terraform/issues/1550)) - * provider/openstack: Add AZ to `openstack_blockstorage_volume_v1` ([#1726](https://github.com/hashicorp/terraform/issues/1726)) - -BUG FIXES: - - * core: Fix graph cycle issues surrounding modules ([#1582](https://github.com/hashicorp/terraform/issues/1582)) ([#1637](https://github.com/hashicorp/terraform/issues/1637)) - * core: math on arbitrary variables works if first operand isn't a - numeric primitive. ([#1381](https://github.com/hashicorp/terraform/issues/1381)) - * core: avoid unnecessary cycles by pruning tainted destroys from - graph if there are no tainted resources ([#1475](https://github.com/hashicorp/terraform/issues/1475)) - * core: fix issue where destroy nodes weren't pruned in specific - edge cases around matching prefixes, which could cause cycles ([#1527](https://github.com/hashicorp/terraform/issues/1527)) - * core: fix issue causing diff mismatch errors in certain scenarios during - resource replacement ([#1515](https://github.com/hashicorp/terraform/issues/1515)) - * core: dependencies on resources with a different index work when - count > 1 ([#1540](https://github.com/hashicorp/terraform/issues/1540)) - * core: don't panic if variable default type is invalid ([#1344](https://github.com/hashicorp/terraform/issues/1344)) - * core: fix perpetual diff issue for computed maps that are empty ([#1607](https://github.com/hashicorp/terraform/issues/1607)) - * core: validation added to check for `self` variables in modules ([#1609](https://github.com/hashicorp/terraform/issues/1609)) - * core: fix edge case where validation didn't pick up unknown fields - if the value was computed ([#1507](https://github.com/hashicorp/terraform/issues/1507)) - * core: Fix issue where values in sets on resources couldn't contain - hyphens. ([#1641](https://github.com/hashicorp/terraform/issues/1641)) - * core: Outputs removed from the config are removed from the state ([#1714](https://github.com/hashicorp/terraform/issues/1714)) - * core: Validate against the worst-case graph during plan phase to catch cycles - that would previously only show up during apply ([#1655](https://github.com/hashicorp/terraform/issues/1655)) - * core: Referencing invalid module output in module validates ([#1448](https://github.com/hashicorp/terraform/issues/1448)) - * command: remote states with uppercase types work ([#1356](https://github.com/hashicorp/terraform/issues/1356)) - * provider/aws: Support `AWS_SECURITY_TOKEN` env var again ([#1785](https://github.com/hashicorp/terraform/issues/1785)) - * provider/aws: Don't save "instance" for EIP if association fails ([#1776](https://github.com/hashicorp/terraform/issues/1776)) - * provider/aws: launch configuration ID set after create success ([#1518](https://github.com/hashicorp/terraform/issues/1518)) - * provider/aws: Fixed an issue with creating ELBs without any tags ([#1580](https://github.com/hashicorp/terraform/issues/1580)) - * provider/aws: Fix issue in Security Groups with empty IPRanges ([#1612](https://github.com/hashicorp/terraform/issues/1612)) - * provider/aws: manually deleted S3 buckets are refreshed properly ([#1574](https://github.com/hashicorp/terraform/issues/1574)) - * provider/aws: only check for EIP allocation ID in VPC ([#1555](https://github.com/hashicorp/terraform/issues/1555)) - * provider/aws: raw protocol numbers work in `aws_network_acl` ([#1435](https://github.com/hashicorp/terraform/issues/1435)) - * provider/aws: Block devices can be encrypted ([#1718](https://github.com/hashicorp/terraform/issues/1718)) - * provider/aws: ASG health check grace period can be updated in-place ([#1682](https://github.com/hashicorp/terraform/issues/1682)) - * provider/aws: ELB security groups can be updated in-place ([#1662](https://github.com/hashicorp/terraform/issues/1662)) - * provider/aws: `aws_main_route_table diff --git a/vendor/github.com/hashicorp/terraform/Dockerfile b/vendor/github.com/hashicorp/terraform/Dockerfile deleted file mode 100644 index c5afc0d8015..00000000000 --- a/vendor/github.com/hashicorp/terraform/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -# This Dockerfile builds on golang:alpine by building Terraform from source -# using the current working directory. -# -# This produces a docker image that contains a working Terraform binary along -# with all of its source code, which is what gets released on hub.docker.com -# as terraform:full. The main releases (terraform:latest, terraform:light and -# the release tags) are lighter images including only the officially-released -# binary from releases.hashicorp.com; these are built instead from -# scripts/docker-release/Dockerfile-release. - -FROM golang:alpine -LABEL maintainer="HashiCorp Terraform Team " - -RUN apk add --update git bash openssh - -ENV TF_DEV=true -ENV TF_RELEASE=1 - -WORKDIR $GOPATH/src/github.com/hashicorp/terraform -COPY . . -RUN /bin/bash scripts/build.sh - -WORKDIR $GOPATH -ENTRYPOINT ["terraform"] diff --git a/vendor/github.com/hashicorp/terraform/LICENSE b/vendor/github.com/hashicorp/terraform/LICENSE deleted file mode 100644 index c33dcc7c928..00000000000 --- a/vendor/github.com/hashicorp/terraform/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/terraform/Makefile b/vendor/github.com/hashicorp/terraform/Makefile deleted file mode 100644 index 07897daacdc..00000000000 --- a/vendor/github.com/hashicorp/terraform/Makefile +++ /dev/null @@ -1,105 +0,0 @@ -TEST?=./... -GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor) - -default: test vet - -tools: - go get -u github.com/kardianos/govendor - go get -u golang.org/x/tools/cmd/stringer - go get -u golang.org/x/tools/cmd/cover - -# bin generates the releaseable binaries for Terraform -bin: fmtcheck generate - @TF_RELEASE=1 sh -c "'$(CURDIR)/scripts/build.sh'" - -# dev creates binaries for testing Terraform locally. These are put -# into ./bin/ as well as $GOPATH/bin -dev: fmtcheck generate - @TF_DEV=1 sh -c "'$(CURDIR)/scripts/build.sh'" - -quickdev: generate - @TF_DEV=1 sh -c "'$(CURDIR)/scripts/build.sh'" - -# Shorthand for building and installing just one plugin for local testing. -# Run as (for example): make plugin-dev PLUGIN=provider-aws -plugin-dev: generate - go install github.com/hashicorp/terraform/builtin/bins/$(PLUGIN) - mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN) - -# test runs the unit tests -# we run this one package at a time here because running the entire suite in -# one command creates memory usage issues when running in Travis-CI. -test: fmtcheck generate - go test -i $(TEST) || exit 1 - go list $(TEST) | xargs -t -n4 go test $(TESTARGS) -timeout=60s -parallel=4 - -# testacc runs acceptance tests -testacc: fmtcheck generate - @if [ "$(TEST)" = "./..." ]; then \ - echo "ERROR: Set TEST to a specific package. For example,"; \ - echo " make testacc TEST=./builtin/providers/aws"; \ - exit 1; \ - fi - TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 120m - -# e2etest runs the end-to-end tests against a generated Terraform binary -# The TF_ACC here allows network access, but does not require any special -# credentials since the e2etests use local-only providers such as "null". -e2etest: generate - TF_ACC=1 go test -v ./command/e2etest - -test-compile: fmtcheck generate - @if [ "$(TEST)" = "./..." ]; then \ - echo "ERROR: Set TEST to a specific package. For example,"; \ - echo " make test-compile TEST=./builtin/providers/aws"; \ - exit 1; \ - fi - go test -c $(TEST) $(TESTARGS) - -# testrace runs the race checker -testrace: fmtcheck generate - TF_ACC= go test -race $(TEST) $(TESTARGS) - -cover: - @go tool cover 2>/dev/null; if [ $$? -eq 3 ]; then \ - go get -u golang.org/x/tools/cmd/cover; \ - fi - go test $(TEST) -coverprofile=coverage.out - go tool cover -html=coverage.out - rm coverage.out - -# vet runs the Go source code static analysis tool `vet` to find -# any common errors. -vet: - @echo 'go vet ./...' - @go vet ./... ; if [ $$? -eq 1 ]; then \ - echo ""; \ - echo "Vet found suspicious constructs. Please check the reported constructs"; \ - echo "and fix them if necessary before submitting the code for review."; \ - exit 1; \ - fi - -# generate runs `go generate` to build the dynamically generated -# source files. -generate: - @which stringer > /dev/null; if [ $$? -ne 0 ]; then \ - go get -u golang.org/x/tools/cmd/stringer; \ - fi - go generate ./... - @go fmt command/internal_plugin_list.go > /dev/null - -fmt: - gofmt -w $(GOFMT_FILES) - -fmtcheck: - @sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" - -vendor-status: - @govendor status - -# disallow any parallelism (-j) for Make. This is necessary since some -# commands during the build process create temporary files that collide -# under parallel conditions. -.NOTPARALLEL: - -.PHONY: bin cover default dev e2etest fmt fmtcheck generate plugin-dev quickdev test-compile test testacc testrace tools vendor-status vet diff --git a/vendor/github.com/hashicorp/terraform/README.md b/vendor/github.com/hashicorp/terraform/README.md deleted file mode 100644 index eed7f74e3b0..00000000000 --- a/vendor/github.com/hashicorp/terraform/README.md +++ /dev/null @@ -1,170 +0,0 @@ -Terraform -========= - -- Website: https://www.terraform.io -- [![Gitter chat](https://badges.gitter.im/hashicorp-terraform/Lobby.png)](https://gitter.im/hashicorp-terraform/Lobby) -- Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool) - -Terraform - -Terraform is a tool for building, changing, and versioning infrastructure safely and efficiently. Terraform can manage existing and popular service providers as well as custom in-house solutions. - -The key features of Terraform are: - -- **Infrastructure as Code**: Infrastructure is described using a high-level configuration syntax. This allows a blueprint of your datacenter to be versioned and treated as you would any other code. Additionally, infrastructure can be shared and re-used. - -- **Execution Plans**: Terraform has a "planning" step where it generates an *execution plan*. The execution plan shows what Terraform will do when you call apply. This lets you avoid any surprises when Terraform manipulates infrastructure. - -- **Resource Graph**: Terraform builds a graph of all your resources, and parallelizes the creation and modification of any non-dependent resources. Because of this, Terraform builds infrastructure as efficiently as possible, and operators get insight into dependencies in their infrastructure. - -- **Change Automation**: Complex changesets can be applied to your infrastructure with minimal human interaction. With the previously mentioned execution plan and resource graph, you know exactly what Terraform will change and in what order, avoiding many possible human errors. - -For more information, see the [introduction section](http://www.terraform.io/intro) of the Terraform website. - -Getting Started & Documentation -------------------------------- - -If you're new to Terraform and want to get started creating infrastructure, please checkout our [Getting Started](https://www.terraform.io/intro/getting-started/install.html) guide, available on the [Terraform website](http://www.terraform.io). - -All documentation is available on the [Terraform website](http://www.terraform.io): - - - [Intro](https://www.terraform.io/intro/index.html) - - [Docs](https://www.terraform.io/docs/index.html) - -Developing Terraform --------------------- - -If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.9+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you. - -This repository contains only Terraform core, which includes the command line interface and the main graph engine. Providers are implemented as plugins that each have their own repository in [the `terraform-providers` organization](https://github.com/terraform-providers) on GitHub. Instructions for developing each provider are in the associated README file. For more information, see [the provider development overview](https://www.terraform.io/docs/plugins/provider.html). - -For local development of Terraform core, first make sure Go is properly installed and that a -[GOPATH](http://golang.org/doc/code.html#GOPATH) has been set. You will also need to add `$GOPATH/bin` to your `$PATH`. - -Next, using [Git](https://git-scm.com/), clone this repository into `$GOPATH/src/github.com/hashicorp/terraform`. All the necessary dependencies are either vendored or automatically installed, so you just need to type `make`. This will compile the code and then run the tests. If this exits with exit status 0, then everything is working! - -```sh -$ cd "$GOPATH/src/github.com/hashicorp/terraform" -$ make -``` - -To compile a development version of Terraform and the built-in plugins, run `make dev`. This will build everything using [gox](https://github.com/mitchellh/gox) and put Terraform binaries in the `bin` and `$GOPATH/bin` folders: - -```sh -$ make dev -... -$ bin/terraform -... -``` - -If you're developing a specific package, you can run tests for just that package by specifying the `TEST` variable. For example below, only`terraform` package tests will be run. - -```sh -$ make test TEST=./terraform -... -``` - -If you're working on a specific provider which has not been separated into an individual repository and only wish to rebuild that provider, you can use the `plugin-dev` target. For example, to build only the Test provider: - -```sh -$ make plugin-dev PLUGIN=provider-test -``` - -### Dependencies - -Terraform stores its dependencies under `vendor/`, which [Go 1.6+ will automatically recognize and load](https://golang.org/cmd/go/#hdr-Vendor_Directories). We use [`govendor`](https://github.com/kardianos/govendor) to manage the vendored dependencies. - -If you're developing Terraform, there are a few tasks you might need to perform. - -#### Adding a dependency - -If you're adding a dependency, you'll need to vendor it in the same Pull Request as the code that depends on it. You should do this in a separate commit from your code, as makes PR review easier and Git history simpler to read in the future. - -To add a dependency: - -Assuming your work is on a branch called `my-feature-branch`, the steps look like this: - -1. Add the new package to your GOPATH: - - ```bash - go get github.com/hashicorp/my-project - ``` - -2. Add the new package to your `vendor/` directory: - - ```bash - govendor add github.com/hashicorp/my-project/package - ``` - -3. Review the changes in git and commit them. - -#### Updating a dependency - -To update a dependency: - -1. Fetch the dependency: - - ```bash - govendor fetch github.com/hashicorp/my-project - ``` - -2. Review the changes in git and commit them. - -### Acceptance Tests - -Terraform has a comprehensive [acceptance -test](http://en.wikipedia.org/wiki/Acceptance_testing) suite covering the -built-in providers. Our [Contributing Guide](https://github.com/hashicorp/terraform/blob/master/.github/CONTRIBUTING.md) includes details about how and when to write and run acceptance tests in order to help contributions get accepted quickly. - - -### Cross Compilation and Building for Distribution - -If you wish to cross-compile Terraform for another architecture, you can set the `XC_OS` and `XC_ARCH` environment variables to values representing the target operating system and architecture before calling `make`. The output is placed in the `pkg` subdirectory tree both expanded in a directory representing the OS/architecture combination and as a ZIP archive. - -For example, to compile 64-bit Linux binaries on Mac OS X, you can run: - -```sh -$ XC_OS=linux XC_ARCH=amd64 make bin -... -$ file pkg/linux_amd64/terraform -terraform: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), statically linked, not stripped -``` - -`XC_OS` and `XC_ARCH` can be space separated lists representing different combinations of operating system and architecture. For example, to compile for both Linux and Mac OS X, targeting both 32- and 64-bit architectures, you can run: - -```sh -$ XC_OS="linux darwin" XC_ARCH="386 amd64" make bin -... -$ tree ./pkg/ -P "terraform|*.zip" -./pkg/ -├── darwin_386 -│   └── terraform -├── darwin_386.zip -├── darwin_amd64 -│   └── terraform -├── darwin_amd64.zip -├── linux_386 -│   └── terraform -├── linux_386.zip -├── linux_amd64 -│   └── terraform -└── linux_amd64.zip - -4 directories, 8 files -``` - -_Note: Cross-compilation uses [gox](https://github.com/mitchellh/gox), which requires toolchains to be built with versions of Go prior to 1.5. In order to successfully cross-compile with older versions of Go, you will need to run `gox -build-toolchain` before running the commands detailed above._ - -#### Docker - -When using docker you don't need to have any of the Go development tools installed and you can clone terraform to any location on disk (doesn't have to be in your $GOPATH). This is useful for users who want to build `master` or a specific branch for testing without setting up a proper Go environment. - -For example, run the following command to build terraform in a linux-based container for macOS. - -```sh -docker run --rm -v $(pwd):/go/src/github.com/hashicorp/terraform -w /go/src/github.com/hashicorp/terraform -e XC_OS=darwin -e XC_ARCH=amd64 golang:latest bash -c "apt-get update && apt-get install -y zip && make bin" -``` - - -## License -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform.svg?type=large)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform?ref=badge_large) diff --git a/vendor/github.com/hashicorp/terraform/Vagrantfile b/vendor/github.com/hashicorp/terraform/Vagrantfile deleted file mode 100644 index d140efb9188..00000000000 --- a/vendor/github.com/hashicorp/terraform/Vagrantfile +++ /dev/null @@ -1,92 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! -VAGRANTFILE_API_VERSION = "2" - -# Software version variables -GOVERSION = "1.9" -UBUNTUVERSION = "16.04" - -# CPU and RAM can be adjusted depending on your system -CPUCOUNT = "2" -RAM = "4096" - -$script = <